def __init__(self, atom=None, **kwargs):
DependencyArg.__init__(self, **kwargs)
self.atom = atom
- self.pset = InternalPackageSet(initial_atoms=(self.atom,))
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,), allow_repo=True)
portdb = root_config.trees[tree].dbapi
settings.setcpv(pkg)
settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
- ebuild_path = portdb.findname(pkg.cpv)
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
self._ebuild_path = ebuild_path
root_config = self.pkg.root_config
portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(self.pkg.cpv)
+ ebuild_path = portdb.findname(self.pkg.cpv, myrepo=self.pkg.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
settings = self.settings
pkg = self.pkg
portdb = pkg.root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(pkg.cpv)
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
settings.setcpv(pkg)
pkg = self.pkg
root_config = pkg.root_config
portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(pkg.cpv)
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
debug = settings.get("PORTAGE_DEBUG") == "1"
self._aux_get_wrapper(cpv, [])
return matches
- def _aux_get_wrapper(self, pkg, wants):
+ def _aux_get_wrapper(self, pkg, wants, myrepo=None):
if pkg in self._aux_get_history:
return self._aux_get(pkg, wants)
self._aux_get_history.add(pkg)
portdb = pkg.root_config.trees["porttree"].dbapi
portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
if portdir_repo_name:
- pkg_repo_name = pkg.metadata.get("repository")
+ pkg_repo_name = pkg.repo
if pkg_repo_name != portdir_repo_name:
if not pkg_repo_name:
pkg_repo_name = "unknown repo"
from portage.cache.mappings import slot_dict_class
from portage.const import EBUILD_PHASES
from portage.dep import Atom, check_required_use, use_reduce, \
- paren_enclose, _slot_re
+ paren_enclose, _slot_re, _slot_separator, _repo_separator
from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
from portage.exception import InvalidDependString
from _emerge.Task import Task
"category", "counter", "cp", "cpv_split",
"inherited", "invalid", "iuse", "masks", "mtime",
"pf", "pv_split", "root", "slot", "slot_atom", "visible",) + \
- ("_raw_metadata", "_use",)
+ ("_raw_metadata", "_use", "_repo",)
metadata_keys = [
"BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "EAPI",
if not self.installed:
self._invalid_metadata('EAPI.incompatible',
"IUSE contains defaults, but EAPI doesn't allow them")
- self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
+ self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
self.category, self.pf = portage.catsplit(self.cpv)
self.cpv_split = portage.catpkgsplit(self.cpv)
self.pv_split = self.cpv_split[1:]
cpv_color = "PKG_NOMERGE"
s = "(%s, %s" \
- % (portage.output.colorize(cpv_color, self.cpv) , self.type_name)
+ % (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
if self.type_name == "installed":
if self.root != "/":
def __init__(self, use):
self.enabled = frozenset(use)
+ @property
+ def repo(self):
+ if self._repo is None:
+ self._repo = self.metadata['repository']
+ return self._repo
+
@property
def use(self):
if self._use is None:
if self.onlydeps or self.installed:
self.operation = "nomerge"
self._hash_key = \
- (self.type_name, self.root, self.cpv, self.operation)
+ (self.type_name, self.root, self.cpv, self.operation, self.metadata.get('repository', None))
return self._hash_key
def __lt__(self, other):
del self._cpv_map[pkg.cpv]
self._clear_cache()
- def aux_get(self, cpv, wants):
+ def aux_get(self, cpv, wants, myrepo=None):
metadata = self._cpv_map[cpv].metadata
return [metadata.get(x, "") for x in wants]
self._spinner = spinner
self._mtimedb = mtimedb
self._favorites = favorites
- self._args_set = InternalPackageSet(favorites)
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
self._build_opts = self._build_opts_class()
for k in self._build_opts.__slots__:
setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
'digest' not in pkgsettings.features:
continue
portdb = x.root_config.trees['porttree'].dbapi
- ebuild_path = portdb.findname(x.cpv)
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % x.cpv)
pkgsettings['O'] = os.path.dirname(ebuild_path)
root_config = x.root_config
portdb = root_config.trees["porttree"].dbapi
quiet_config = quiet_settings[root_config.root]
- ebuild_path = portdb.findname(x.cpv)
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % x.cpv)
quiet_config["O"] = os.path.dirname(ebuild_path)
else:
tree = "porttree"
portdb = root_config.trees["porttree"].dbapi
- ebuild_path = portdb.findname(x.cpv)
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
if ebuild_path is None:
raise AssertionError("ebuild not found for '%s'" % x.cpv)
else:
print("Configuring pkg...")
print()
- ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
+ ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg, myrepo=pkg.repo)
mysettings = portage.config(clone=settings)
vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
debug = mysettings.get("PORTAGE_DEBUG") == "1"
if pkg_type == "installed":
ebuildpath = vardb.findname(pkg.cpv)
elif pkg_type == "ebuild":
- ebuildpath = portdb.findname(pkg.cpv)
+ ebuildpath = portdb.findname(pkg.cpv, pkg.repo)
elif pkg_type == "binary":
tbz2_file = bindb.bintree.getname(pkg.cpv)
ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
- self.sets['__non_set_args__'] = InternalPackageSet()
+ self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
- self.atoms = InternalPackageSet()
+ self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _dynamic_depgraph_config(object):
mykey, noiselevel=-1)
elif mytype == "ebuild":
portdb = self._frozen_config.roots[myroot].trees["porttree"].dbapi
- myebuild, mylocation = portdb.findname2(mykey)
+ myebuild, mylocation = portdb.findname2(mykey, myrepo = pkg.repo)
portage.writemsg("!!! This ebuild cannot be installed: " + \
"'%s'\n" % myebuild, noiselevel=-1)
portage.writemsg("!!! Please notify the package maintainer " + \
deps = []
for cat in categories:
deps.append(Atom(insert_category_into_atom(
- atom_without_category, cat)))
+ atom_without_category, cat), allow_repo=True))
return deps
def _have_new_virt(self, root, atom_cp):
args.append(SetArg(arg=x, pset=pset,
root_config=root_config))
continue
- if not is_valid_package_atom(x):
+ if not is_valid_package_atom(x, allow_repo=True):
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
if "/" in x:
- args.append(AtomArg(arg=x, atom=Atom(x),
+ args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
root_config=root_config))
continue
expanded_atoms = self._dep_expand(root_config, x)
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
depgraph_sets.sets.setdefault('__non_set_args__',
- InternalPackageSet()).clear()
+ InternalPackageSet(allow_repo=True)).clear()
depgraph_sets.atoms.clear()
depgraph_sets.atom_arg_map.clear()
set_atoms[root] = []
a matching package has been masked by backtracking.
"""
backtrack_mask = False
- atom_set = InternalPackageSet(initial_atoms=(atom,))
+ atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
xinfo = '"%s"' % atom.unevaluated_atom
if arg:
xinfo='"%s"' % arg
# descending order
cpv_list.reverse()
for cpv in cpv_list:
- metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, \
- pkg_type, built, installed, db_keys, _pkg_use_enabled=self._pkg_use_enabled)
+ metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
+ built, installed, db_keys, myrepo=atom.repo, _pkg_use_enabled=self._pkg_use_enabled)
if metadata is not None:
pkg = self._pkg(cpv, pkg_type, root_config,
- installed=installed)
+ installed=installed, myrepo = atom.repo)
# pkg.metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
metadata = pkg.metadata
if not slot_available:
continue
inst_pkg = self._pkg(cpv, "installed",
- root_config, installed=installed)
+ root_config, installed=installed, myrepo = atom.repo)
# Remove the slot from the atom and verify that
# the package matches the resulting atom.
atom_without_slot = portage.dep.remove_slot(atom)
break
if cpv_list:
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
# descending order
cpv_list.reverse()
for cpv in cpv_list:
- try:
- pkg = self._pkg(cpv, pkg_type, root_config,
- installed=installed, onlydeps=onlydeps)
- except portage.exception.PackageNotFound:
- pass
- else:
- if pkg.cp != atom.cp:
- # A cpv can be returned from dbapi.match() as an
- # old-style virtual match even in cases when the
- # package does not actually PROVIDE the virtual.
- # Filter out any such false matches here.
- if not InternalPackageSet(initial_atoms=(atom,)
- ).findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
- continue
- yield pkg
+ for repo in repo_list:
+ try:
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, onlydeps=onlydeps, myrepo=repo)
+ except portage.exception.PackageNotFound:
+ pass
+ else:
+ if pkg.cp != atom.cp:
+ # A cpv can be returned from dbapi.match() as an
+ # old-style virtual match even in cases when the
+ # package does not actually PROVIDE the virtual.
+ # Filter out any such false matches here.
+ if not InternalPackageSet(initial_atoms=(atom,)
+ ).findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ yield pkg
def _select_pkg_highest_available(self, root, atom, onlydeps=False):
cache_key = (root, atom, onlydeps)
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
atom_cp = atom.cp
- atom_set = InternalPackageSet(initial_atoms=(atom,))
+ atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
existing_node = None
myeb = None
rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
else:
try:
pkg_eb = self._pkg(
- pkg.cpv, "ebuild", root_config)
+ pkg.cpv, "ebuild", root_config, myrepo=atom.repo)
except portage.exception.PackageNotFound:
continue
else:
return 1
def _pkg(self, cpv, type_name, root_config, installed=False,
- onlydeps=False):
+ onlydeps=False, myrepo = None):
"""
Get a package instance from the cache, or create a new
one if necessary. Raises PackageNotFound from aux_get if it
db_keys = list(self._frozen_config._trees_orig[root_config.root][
tree_type].dbapi._aux_cache_keys)
try:
- metadata = zip(db_keys, db.aux_get(cpv, db_keys))
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
except KeyError:
raise portage.exception.PackageNotFound(cpv)
pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
added_favorites = set()
for x in self._dynamic_config._set_nodes:
- pkg_type, root, pkg_key, pkg_status = x
+ pkg_type, root, pkg_key, pkg_status, pkg_repo = x
if pkg_status != "nomerge":
continue
serialized_tasks = []
masked_tasks = []
for x in mergelist:
- if not (isinstance(x, list) and len(x) == 4):
+ if not (isinstance(x, list) and len(x) == 5):
continue
- pkg_type, myroot, pkg_key, action = x
+ pkg_type, myroot, pkg_key, action, pkg_repo = x
if pkg_type not in self.pkg_tree_map:
continue
if action != "merge":
continue
root_config = self._frozen_config.roots[myroot]
try:
- pkg = self._pkg(pkg_key, pkg_type, root_config)
+ pkg = self._pkg(pkg_key, pkg_type, root_config, myrepo=pkg_repo)
except portage.exception.PackageNotFound:
# It does no exist or it is corrupt.
if skip_missing:
return (success, mydepgraph, dropped_tasks)
def get_mask_info(root_config, cpv, pkgsettings,
- db, pkg_type, built, installed, db_keys, _pkg_use_enabled=None):
+ db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
eapi_masked = False
try:
metadata = dict(zip(db_keys,
- db.aux_get(cpv, db_keys)))
+ db.aux_get(cpv, db_keys, myrepo=myrepo)))
except KeyError:
metadata = None
if _pkg_use_enabled is not None:
modified_use = _pkg_use_enabled(pkg)
- mreasons = get_masking_status(pkg, pkgsettings, root_config, use=modified_use)
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
+
return metadata, mreasons
def show_masked_packages(masked_packages):
writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
writemsg("http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked\n\n", noiselevel=-1)
-def get_masking_status(pkg, pkgsettings, root_config, use=None):
+def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
return [mreason.message for \
- mreason in _get_masking_status(pkg, pkgsettings, root_config, use=use)]
-
-def _get_masking_status(pkg, pkgsettings, root_config, use=None):
+ mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
+def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
mreasons = _getmaskingstatus(
pkg, settings=pkgsettings,
- portdb=root_config.trees["porttree"].dbapi)
+ portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
if not pkg.installed:
if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
import re
import portage
+import _emerge.depgraph
-def is_valid_package_atom(x):
+def is_valid_package_atom(x, allow_repo=False):
if "/" not in x:
- alphanum = re.search(r'\w', x)
- if alphanum:
- x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
- return portage.isvalidatom(x, allow_blockers=False)
+ x2 = _emerge.depgraph.insert_category_into_atom(x, 'cat')
+ if x2 != None:
+ x = x2
+ return portage.isvalidatom(x, allow_blockers=False, allow_repo=allow_repo)
for x in myfiles:
if x.startswith(SETPREFIX) or \
- is_valid_package_atom(x):
+ is_valid_package_atom(x, allow_repo=True):
continue
if x[:1] == os.sep:
continue
dynamic_config = depgraph._dynamic_config
self.mylist = mylist
- self.favorites = InternalPackageSet(favorites)
+ self.favorites = InternalPackageSet(favorites, allow_repo=True)
self.verbosity = verbosity
if self.verbosity is None:
pkg = x
metadata = pkg.metadata
ebuild_path = None
- repo_name = metadata["repository"]
+ repo_name = pkg.repo
if pkg.type_name == "ebuild":
- ebuild_path = portdb.findname(pkg.cpv)
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=repo_name)
if ebuild_path is None:
raise AssertionError(
"ebuild not found for '%s'" % pkg.cpv)
fetch = red("F")
if ordered:
counters.restrict_fetch += 1
- if portdb.fetch_check(pkg_key, pkg_use):
+ if portdb.fetch_check(pkg_key, pkg_use, myrepo=pkg.repo):
fetch = green("f")
if ordered:
counters.restrict_fetch_satisfied += 1
_repopath_sub = re.compile(r'\$\{repository:(?P<reponame>.+)\}')
def __init__(self, filename, greedy=False, dbapi=None):
- super(StaticFileSet, self).__init__()
+ super(StaticFileSet, self).__init__(allow_repo=True)
self._filename = filename
self._mtime = None
self.description = "Package set loaded from file %s" % self._filename
setattr(self, key, " ".join(value))
def _validate(self, atom):
- return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom))
+ return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom, allow_repo=True))
def write(self):
write_atomic(self._filename, "".join("%s\n" % (atom,) \
"""
return NotImplementedError
- def aux_get(self, mycpv, mylist):
+ def aux_get(self, mycpv, mylist, myrepo=None):
"""Return the metadata keys in mylist for mycpv
Args:
mycpv - "sys-apps/foo-1.0"
mylist - ["SLOT","DEPEND","HOMEPAGE"]
+ myrepo - The repository name.
Returns:
a list of results, in order of keys in mylist, such as:
["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
return list(self._iter_match(mydep,
self.cp_list(mydep.cp, use_cache=use_cache)))
- def _iter_match(self, atom, cpv_iter):
+ def _iter_match(self, atom, cpv_iter, myrepo=None):
cpv_iter = iter(match_from_list(atom, cpv_iter))
if atom.slot:
- cpv_iter = self._iter_match_slot(atom, cpv_iter)
+ cpv_iter = self._iter_match_slot(atom, cpv_iter, myrepo)
if atom.use:
- cpv_iter = self._iter_match_use(atom, cpv_iter)
+ cpv_iter = self._iter_match_use(atom, cpv_iter, myrepo)
+ if atom.repo:
+ cpv_iter = self._iter_match_repo(atom, cpv_iter, myrepo)
return cpv_iter
- def _iter_match_slot(self, atom, cpv_iter):
+ def _iter_match_repo(self, atom, cpv_iter, myrepo=None):
for cpv in cpv_iter:
try:
- if self.aux_get(cpv, ["SLOT"])[0] == atom.slot:
+ if self.aux_get(cpv, ["repository"], myrepo=myrepo)[0] == atom.repo:
yield cpv
except KeyError:
continue
- def _iter_match_use(self, atom, cpv_iter):
+ def _iter_match_slot(self, atom, cpv_iter, myrepo=None):
+ for cpv in cpv_iter:
+ try:
+ if self.aux_get(cpv, ["SLOT"], myrepo=myrepo)[0] == atom.slot:
+ yield cpv
+ except KeyError:
+ continue
+
+ def _iter_match_use(self, atom, cpv_iter, myrepo = None):
"""
1) Check for required IUSE intersection (need implicit IUSE here).
2) Check enabled/disabled flag states.
iuse_implicit_match = self.settings._iuse_implicit_match
for cpv in cpv_iter:
try:
- iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"])
+ iuse, slot, use = self.aux_get(cpv, ["IUSE", "SLOT", "USE"], myrepo=myrepo)
except KeyError:
continue
use = use.split()
self._aux_cache.pop(cpv, None)
fakedbapi.cpv_remove(self, cpv)
- def aux_get(self, mycpv, wants):
+ def aux_get(self, mycpv, wants, myrepo=None):
if self.bintree and not self.bintree.populated:
self.bintree.populate()
cache_me = False
mydep = mydep.cp
expanded = cpv_expand(mydep, mydb=mydb,
use_cache=use_cache, settings=settings)
- return Atom(orig_dep.replace(mydep, expanded, 1))
+ return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
return license_path
return None
- def findname(self,mycpv):
- return self.findname2(mycpv)[0]
+ def findname(self,mycpv, mytree = None, myrepo = None):
+ return self.findname2(mycpv, mytree, myrepo)[0]
def getRepositoryPath(self, repository_id):
"""
"""
return [k for k in self.treemap if k]
- def findname2(self, mycpv, mytree=None):
+ def findname2(self, mycpv, mytree=None, myrepo = None):
"""
Returns the location of the CPV, and what overlay it was in.
Searches overlays first, then PORTDIR; this allows us to return the first
matching file. As opposed to starting in portdir and then doing overlays
second, we would have to exhaustively search the overlays until we found
the file we wanted.
+ If myrepo is not None it will find packages from this repository(overlay)
"""
if not mycpv:
return (None, 0)
+
+ if myrepo:
+ if myrepo in self.treemap:
+ mytree = self.treemap[myrepo]
+ else:
+ return (None, 0)
+
mysplit = mycpv.split("/")
psplit = pkgsplit(mysplit[1])
if psplit is None or len(mysplit) != 2:
return (metadata, st, emtime)
- def aux_get(self, mycpv, mylist, mytree=None):
+ def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
"stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
cache_me = False
+ if myrepo:
+ if myrepo in self.treemap:
+ mytree = self.treemap[myrepo]
+ else:
+ raise KeyError(myrepo)
+
if not mytree:
cache_me = True
if not mytree and not self._known_keys.intersection(
return _parse_uri_map(mypkg, {'EAPI':eapi,'SRC_URI':myuris},
use=useflags)
- def getfetchsizes(self, mypkg, useflags=None, debug=0):
+ def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
# returns a filename:size dictionnary of remaining downloads
- myebuild = self.findname(mypkg)
+ myebuild = self.findname(mypkg, myrepo=myrepo)
if myebuild is None:
raise AssertionError(_("ebuild not found for '%s'") % mypkg)
pkgdir = os.path.dirname(myebuild)
filesdict[myfile] = int(checksums[myfile]["size"])
return filesdict
- def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
if all:
useflags = None
elif useflags is None:
if mysettings:
useflags = mysettings["USE"].split()
- myfiles = self.getFetchMap(mypkg, useflags=useflags)
- myebuild = self.findname(mypkg)
+ if myrepo:
+ if myrepo in self.treemap:
+ mytree = self.treemap[myrepo]
+ else:
+ return False
+ else:
+ mytree = None
+
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ myebuild = self.findname(mypkg, myrepo=myrepo)
if myebuild is None:
raise AssertionError(_("ebuild not found for '%s'") % mypkg)
pkgdir = os.path.dirname(myebuild)
if mydep == mykey:
mylist = self.cp_list(mykey)
else:
- mylist = match_from_list(mydep, self.cp_list(mykey))
+ mylist = match_from_list(mydep, self.cp_list(mykey, myrepo = mydep.repo))
myval = ""
settings = self.settings
local_config = settings.local_config
elif level == "match-visible":
#dep match -- find all visible matches
#get all visible packages, then get the matching ones
-
myval = list(self._iter_match(mydep,
- self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey)))
+ self.xmatch("list-visible", mykey, mydep=mykey, mykey=mykey), myrepo=mydep.repo))
elif level == "match-all":
#match *all* visible *and* masked packages
if mydep == mykey:
myval = self.cp_list(mykey)
else:
- myval = list(self._iter_match(mydep, self.cp_list(mykey)))
+ myval = list(self._iter_match(mydep, self.cp_list(mykey), myrepo = mydep.repo))
else:
raise AssertionError(
"Invalid level argument: '%s'" % level)
PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_PRESERVE_LIBS
from portage.dbapi import dbapi
+from portage.dep import _slot_separator
from portage.exception import CommandNotFound, \
InvalidData, InvalidPackageName, \
FileNotFound, PermissionDenied, UnsupportedAPIException
aux_cache["modified"] = set()
self._aux_cache_obj = aux_cache
- def aux_get(self, mycpv, wants):
+ def aux_get(self, mycpv, wants, myrepo = None):
"""This automatically caches selected keys that are frequently needed
by emerge for dependency calculations. The cached metadata is
considered valid if the mtime of the package directory has not changed
from portage.util import grabfile
from portage.const import CACHE_PATH
from portage.localization import _
+from portage.dep import _slot_separator
# Note: the space for rgt and rlt is important !!
# FIXME: use slot deps instead, requires GLSA format versioning
pass
else:
if slot and slot != "*":
- rValue += ":" + slot
+ rValue += _slot_separator + slot
return str(rValue)
def makeVersion(versionNode):
pass
else:
if slot and slot != "*":
- rValue += ":" + slot
+ rValue += _slot_separator + slot
return rValue
def match(atom, dbapi, match_type="default"):
import portage
from portage import os
from portage.const import USER_CONFIG_PATH
-from portage.dep import match_from_list
+from portage.dep import match_from_list, _slot_separator, _repo_separator
from portage.localization import _
from portage.util import grablines, normalize_path
from portage.versions import catpkgsplit
if metadata is None:
db_keys = list(portdb._aux_cache_keys)
try:
- metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
+ metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=metadata.get('repository'))))
except KeyError:
if not portdb.cpv_exists(mycpv):
raise
# Can't access SLOT due to corruption.
cpv_slot_list = [mycpv]
else:
- cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
+ pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
+ if 'repository' in metadata:
+ pkg = "".join((pkg, _repo_separator, metadata['repository']))
+ cpv_slot_list = [pkg]
mycp=mysplit[0]+"/"+mysplit[1]
# XXX- This is a temporary duplicate of code from the config constructor.
import portage
from portage import eapi_is_supported, _eapi_is_deprecated
-from portage.dep import match_from_list
+from portage.dep import match_from_list, _slot_separator, _repo_separator
from portage.localization import _
from portage.package.ebuild.config import config
from portage.versions import catpkgsplit, cpv_getkey
self.message = message
self.unmask_hint = unmask_hint
-def getmaskingstatus(mycpv, settings=None, portdb=None):
+def getmaskingstatus(mycpv, settings=None, portdb=None, myrepo=None):
if settings is None:
settings = config(clone=portage.settings)
if portdb is None:
portdb = portage.portdb
return [mreason.message for \
- mreason in _getmaskingstatus(mycpv, settings, portdb)]
+ mreason in _getmaskingstatus(mycpv, settings, portdb,myrepo)]
-def _getmaskingstatus(mycpv, settings, portdb):
+def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
metadata = None
installed = False
if metadata is None:
db_keys = list(portdb._aux_cache_keys)
try:
- metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys)))
+ metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
except KeyError:
if not portdb.cpv_exists(mycpv):
raise
pkgdict = settings._keywords_manager.pkeywordsdict.get(cp)
matches = False
if pkgdict:
- cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
+ pkg = "".join((mycpv, _slot_separator, metadata["SLOT"]))
+ if 'repository' in metadata:
+ pkg = "".join((pkg, _repo_separator, metadata['repository']))
+ cpv_slot_list = [pkg]
for atom, pkgkeywords in pkgdict.items():
if match_from_list(atom, cpv_slot_list):
matches = True
--- /dev/null
+# test_dep_getslot.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getrepo
+
+class DepGetRepo(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetRepo(self):
+
+ repo_char = "::"
+ repos = ( "a", "repo-name", "repo_name", "repo123", None )
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1","2.1-r1", None]
+ uses = ["[use]", None]
+ for cpv in cpvs:
+ for version in versions:
+ for use in uses:
+ for repo in repos:
+ pkg = cpv
+ if version:
+ pkg = '=' + pkg + '-' + version
+ if repo is not None:
+ pkg = pkg + repo_char + repo
+ if use:
+ pkg = pkg + use
+ self.assertEqual( dep_getrepo( pkg ), repo )
f.close()
def _load_config(self):
+ portdir_overlay = []
+ for repo_name in sorted(self.repo_dirs):
+ path = self.repo_dirs[repo_name]
+ if path != self.portdir:
+ portdir_overlay.append(path)
+
env = {
"ACCEPT_KEYWORDS": "x86",
"PORTDIR": self.portdir,
+ "PORTDIR_OVERLAY": " ".join(portdir_overlay),
'PORTAGE_TMPDIR' : os.path.join(self.eroot, 'var/tmp'),
}
--- /dev/null
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultirepoTestCase(TestCase):
+
+ def testMultirepo(self):
+ ebuilds = {
+ #Simple repo selection
+ "dev-libs/A-1": { },
+ "dev-libs/A-1::repo1": { },
+ "dev-libs/A-2::repo1": { },
+ "dev-libs/A-1::repo2": { },
+
+ #Packges in exactly one repo
+ "dev-libs/B-1": { },
+ "dev-libs/C-1::repo1": { },
+
+ #Package in repository 1 and 2, but 1 must be used
+ "dev-libs/D-1::repo1": { },
+ "dev-libs/D-1::repo2": { },
+
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/E-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/F-1::repo1": { "SLOT": "1" },
+ "dev-libs/F-1::repo2": { "SLOT": "1" },
+ }
+
+ sets = {
+ "multirepotest":
+ ( "dev-libs/A::test_repo", )
+ }
+
+ test_cases = (
+ #Simple repo selection
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::test_repo"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["@multirepotest"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #Packges in exactly one repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #Package in repository 1 and 2, but 1 must be used
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1::repo1"]),
+
+ #Atoms with slots
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ #FAIL
+ #~ ResolverPlaygroundTestCase(
+ #~ ["dev-libs/E:1"],
+ #~ success = True,
+ #~ check_repo_names = True,
+ #~ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, sets=sets)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testMultirepoUserConfig(self):
+ ebuilds = {
+ #package.use test
+ "dev-libs/A-1": { "IUSE": "foo" },
+ "dev-libs/A-2::repo1": { "IUSE": "foo" },
+ "dev-libs/A-3::repo2": { },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A", "EAPI": 2 },
+ "dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
+
+ #package.keywords test
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
+
+ #package.license
+ "dev-libs/D-1": { "LICENSE": "TEST" },
+ "dev-libs/D-1::repo1": { "LICENSE": "TEST" },
+
+ #package.mask
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+
+ #package.properties
+ "dev-libs/F-1": { "PROPERTIES": "bar"},
+ "dev-libs/F-1::repo1": { "PROPERTIES": "bar"},
+
+ #package.unmask
+ "dev-libs/G-1": { },
+ "dev-libs/G-1::repo1": { },
+ }
+
+ user_config = {
+ "package.use":
+ (
+ "dev-libs/A::repo1 foo",
+ ),
+ "package.keywords":
+ (
+ "=dev-libs/C-1::test_repo",
+ ),
+ "package.license":
+ (
+ "=dev-libs/D-1::test_repo TEST",
+ ),
+ "package.mask":
+ (
+ "dev-libs/E::repo1",
+ #needed for package.unmask test
+ "dev-libs/G",
+ ),
+ "package.properties":
+ (
+ "dev-libs/F::repo1 -bar",
+ ),
+ "package.unmask":
+ (
+ "dev-libs/G::test_repo",
+ ),
+ }
+
+ test_cases = (
+ #package.use test
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-3::repo2", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1", "dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-3"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-3"]),
+
+ #package.keywords test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1"]),
+
+ #package.license test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1"]),
+
+ #package.properties test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/G-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()