# This block ensures that ^C interrupts are handled quietly.
try:
- def exithandler(signum,frame):
+ def exithandler(signum, _frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(128 + signum)
except KeyboardInterrupt:
sys.exit(128 + signal.SIGINT)
-def debug_signal(signum, frame):
+def debug_signal(_signum, _frame):
import pdb
pdb.set_trace()
from repoman.utilities import FindVCS
if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
long = int
def parse_args(args):
def _write_cache(self, trg_cache, cpv, repo_path, metadata, ebuild_hash):
- if not hasattr(trg_cache, 'raise_stat_collision'):
- # This cache does not avoid redundant writes automatically,
- # so check for an identical existing entry before writing.
- # This prevents unnecessary disk writes and can also prevent
- # unnecessary rsync transfers.
- try:
- dest = trg_cache[cpv]
- except (KeyError, CacheError):
- pass
- else:
- if trg_cache.validate_entry(dest,
- ebuild_hash, self._eclass_db):
- identical = True
- for k in self._auxdbkeys:
- if dest.get(k, '') != metadata.get(k, ''):
- identical = False
- break
- if identical:
- return
+ if not hasattr(trg_cache, 'raise_stat_collision'):
+ # This cache does not avoid redundant writes automatically,
+ # so check for an identical existing entry before writing.
+ # This prevents unnecessary disk writes and can also prevent
+ # unnecessary rsync transfers.
+ try:
+ dest = trg_cache[cpv]
+ except (KeyError, CacheError):
+ pass
+ else:
+ if trg_cache.validate_entry(dest,
+ ebuild_hash, self._eclass_db):
+ identical = True
+ for k in self._auxdbkeys:
+ if dest.get(k, '') != metadata.get(k, ''):
+ identical = False
+ break
+ if identical:
+ return
+ try:
+ chf = trg_cache.validation_chf
+ metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
try:
- chf = trg_cache.validation_chf
- metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
+ trg_cache[cpv] = metadata
+ except StatCollision as sc:
+ # If the content of a cache entry changes and neither the
+ # file mtime nor size changes, it will prevent rsync from
+ # detecting changes. Cache backends may raise this
+ # exception from _setitem() if they detect this type of stat
+ # collision. These exceptions are handled by bumping the
+ # mtime on the ebuild (and the corresponding cache entry).
+ # See bug #139134. It is convenient to include checks for
+ # redundant writes along with the internal StatCollision
+ # detection code, so for caches with the
+ # raise_stat_collision attribute, we do not need to
+ # explicitly check for redundant writes like we do for the
+ # other cache types above.
+ max_mtime = sc.mtime
+ for _ec, ec_hash in metadata['_eclasses_'].items():
+ if max_mtime < ec_hash.mtime:
+ max_mtime = ec_hash.mtime
+ if max_mtime == sc.mtime:
+ max_mtime += 1
+ max_mtime = long(max_mtime)
try:
+ os.utime(ebuild_hash.location, (max_mtime, max_mtime))
+ except OSError as e:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, e),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ ebuild_hash.mtime = max_mtime
+ metadata['_mtime_'] = max_mtime
trg_cache[cpv] = metadata
- except StatCollision as sc:
- # If the content of a cache entry changes and neither the
- # file mtime nor size changes, it will prevent rsync from
- # detecting changes. Cache backends may raise this
- # exception from _setitem() if they detect this type of stat
- # collision. These exceptions are handled by bumping the
- # mtime on the ebuild (and the corresponding cache entry).
- # See bug #139134. It is convenient to include checks for
- # redundant writes along with the internal StatCollision
- # detection code, so for caches with the
- # raise_stat_collision attribute, we do not need to
- # explicitly check for redundant writes like we do for the
- # other cache types above.
- max_mtime = sc.mtime
- for ec, ec_hash in metadata['_eclasses_'].items():
- if max_mtime < ec_hash.mtime:
- max_mtime = ec_hash.mtime
- if max_mtime == sc.mtime:
- max_mtime += 1
- max_mtime = long(max_mtime)
- try:
- os.utime(ebuild_hash.location, (max_mtime, max_mtime))
- except OSError as e:
- self.returncode |= 1
- writemsg_level(
- "%s writing target: %s\n" % (cpv, e),
- level=logging.ERROR, noiselevel=-1)
- else:
- ebuild_hash.mtime = max_mtime
- metadata['_mtime_'] = max_mtime
- trg_cache[cpv] = metadata
- self._portdb.auxdb[repo_path][cpv] = metadata
+ self._portdb.auxdb[repo_path][cpv] = metadata
- except CacheError as ce:
- self.returncode |= 1
- writemsg_level(
- "%s writing target: %s\n" % (cpv, ce),
- level=logging.ERROR, noiselevel=-1)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "%s writing target: %s\n" % (cpv, ce),
+ level=logging.ERROR, noiselevel=-1)
def run(self):
signum = run_main_scheduler(self._regen)
self.file_name = file_name
self.file_type = guessManifestFileType(file_name)
- def file_type_lt(self, a, b):
+ @staticmethod
+ def file_type_lt(a, b):
"""
Defines an ordering between file types.
"""
# This block ensures that ^C interrupts are handled quietly.
try:
- def exithandler(signum, frame):
+ def exithandler(signum, _frame):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
sys.exit(128 + signum)
return 2
try:
for pack in argv[1:]:
- mylist=portage.db[argv[0]]["vartree"].dbapi.match(pack)
- print(pack+":"+portage.best(mylist))
+ mylist = portage.db[argv[0]]['vartree'].dbapi.match(pack)
+ print('%s:%s' % (pack, portage.best(mylist)))
except KeyError:
return 1
@uses_eroot
def metadata(argv):
if (len(argv) < 4):
- print("ERROR: insufficient parameters!", file=sys.stderr)
+ print('ERROR: insufficient parameters!', file=sys.stderr)
return 2
eroot, pkgtype, pkgspec = argv[0:3]
metakeys = argv[3:]
type_map = {
- "ebuild":"porttree",
- "binary":"bintree",
- "installed":"vartree"}
+ 'ebuild': 'porttree',
+ 'binary': 'bintree',
+ 'installed': 'vartree'
+ }
if pkgtype not in type_map:
print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
return 1
repo = portage.dep.dep_getrepo(pkgspec)
pkgspec = portage.dep.remove_slot(pkgspec)
try:
- values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
- pkgspec, metakeys, myrepo=repo)
- writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
+ values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
+ pkgspec, metakeys, myrepo=repo)
+ writemsg_stdout(''.join('%s\n' % x for x in values), noiselevel=-1)
except KeyError:
print("Package not found: '%s'" % pkgspec, file=sys.stderr)
return 1
settings.get("CONFIG_PROTECT_MASK", ""))
protect_obj = ConfigProtect(root, protect, protect_mask)
- protected = 0
errors = 0
for line in sys.stdin:
continue
if protect_obj.isprotected(f):
- protected += 1
out.write("%s\n" % filename)
out.flush()
noiselevel=-1)
return 2
- root_config = RootConfig(portage.settings,
- portage.db[eroot], None)
+ root_config = RootConfig(portage.settings, portage.db[eroot], None)
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all-cpv-only", atom)
return os.EX_OK
-def vdb_path(argv):
+def vdb_path(_argv):
"""
Returns the path used for the var(installed) package database for the
set environment/configuration options.
out.flush()
return os.EX_OK
-def gentoo_mirrors(argv):
+def gentoo_mirrors(_argv):
"""
Returns the mirrors set to use in the portage configuration.
"""
"""
return repositories_configuration(argv)
-def portdir(argv):
+def portdir(_argv):
"""
Returns the PORTDIR path.
"""
print(portage.settings["PORTDIR"])
-def config_protect(argv):
+def config_protect(_argv):
"""
Returns the CONFIG_PROTECT paths.
"""
print(portage.settings["CONFIG_PROTECT"])
-def config_protect_mask(argv):
+def config_protect_mask(_argv):
"""
Returns the CONFIG_PROTECT_MASK paths.
"""
print(portage.settings["CONFIG_PROTECT_MASK"])
-def portdir_overlay(argv):
+def portdir_overlay(_argv):
"""
Returns the PORTDIR_OVERLAY path.
"""
print(portage.settings["PORTDIR_OVERLAY"])
-def pkgdir(argv):
+def pkgdir(_argv):
"""
Returns the PKGDIR path.
"""
print(portage.settings["PKGDIR"])
-def distdir(argv):
+def distdir(_argv):
"""
Returns the DISTDIR path.
"""
print(portage.settings["DISTDIR"])
-def colormap(argv):
+def colormap(_argv):
"""
Display the color.map as environment variables.
"""
sys.stderr.write("ERROR: This version of portageq"
" only supports <eroot>s ending in"
" '%s'. The provided <eroot>, '%s',"
- " doesn't.\n" % (eprefix, eroot));
+ " doesn't.\n" % (eprefix, eroot))
sys.stderr.flush()
sys.exit(os.EX_USAGE)
- root = eroot[:1-len(eprefix)]
+ root = eroot[:1 - len(eprefix)]
else:
root = eroot
warn(txt)
sys.exit(1)
-def exithandler(signum=None, frame=None):
+def exithandler(signum=None, _frame=None):
logging.fatal("Interrupted; exiting...")
if signum is None:
sys.exit(1)
chain(mychanged, mynew, myremoved)))
for x in effective_scanlist:
- #ebuilds and digests added to cvs respectively.
+ # ebuilds and digests added to cvs respectively.
logging.info("checking package %s" % x)
# save memory by discarding xmatch caches from previous package(s)
arch_xmatch_caches.clear()
allvalid = True
for y in checkdirlist:
if (y in no_exec or y.endswith(".ebuild")) and \
- stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
- stats["file.executable"] += 1
- fails["file.executable"].append(os.path.join(checkdir, y))
+ stat.S_IMODE(os.stat(os.path.join(checkdir, y)).st_mode) & 0o111:
+ stats["file.executable"] += 1
+ fails["file.executable"].append(os.path.join(checkdir, y))
if y.endswith(".ebuild"):
pf = y[:-7]
ebuildlist.append(pf)
fails["metadata.bad"].append("%s/metadata.xml: %s" % (x, e))
del e
- #Only carry out if in package directory or check forced
+ # Only carry out if in package directory or check forced
if xmllint_capable and not metadata_bad:
# xmlint can produce garbage output even on success, so only dump
# the ouput when it fails.
fails['changelog.ebuildadded'].append(relative_path)
if vcs in ("cvs", "svn", "bzr") and check_ebuild_notadded and y not in eadded:
- #ebuild not added to vcs
+ # ebuild not added to vcs
stats["ebuild.notadded"] += 1
fails["ebuild.notadded"].append(x + "/" + y + ".ebuild")
myesplit = portage.pkgsplit(y)
for mypos in range(len(myuse)):
stats["IUSE.invalid"] += 1
fails["IUSE.invalid"].append(x + "/" + y + ".ebuild: %s" % myuse[mypos])
-
+
# Check for outdated RUBY targets
if "ruby-ng" in inherited or "ruby-fakegem" in inherited or "ruby" in inherited:
ruby_intersection = pkg.iuse.all.intersection(ruby_deprecated)
stats["LICENSE.deprecated"] += 1
fails["LICENSE.deprecated"].append("%s: %s" % (relative_path, lic))
- #keyword checks
+ # keyword checks
myuse = myaux["KEYWORDS"].split()
for mykey in myuse:
if mykey not in ("-*", "*", "~*"):
stats["KEYWORDS.invalid"] += 1
fails["KEYWORDS.invalid"].append(x + "/" + y + ".ebuild: %s (profile invalid)" % mykey)
- #restrict checks
+ # restrict checks
myrestrict = None
try:
myrestrict = portage.dep.use_reduce(myaux["RESTRICT"], matchall=1, flat=True)
stats["RESTRICT.invalid"] += len(mybadrestrict)
for mybad in mybadrestrict:
fails["RESTRICT.invalid"].append(x + "/" + y + ".ebuild: %s" % mybad)
- #REQUIRED_USE check
+ # REQUIRED_USE check
required_use = myaux["REQUIRED_USE"]
if required_use:
if not eapi_has_required_use(eapi):
for keyword, groups, prof in relevant_profiles:
- if not (prof.status == "stable" or \
- (prof.status == "dev" and options.include_dev) or \
- (prof.status == "exp" and options.include_exp_profiles == 'y')):
- continue
+ if not (prof.status == "stable" or \
+ (prof.status == "dev" and options.include_dev) or \
+ (prof.status == "exp" and options.include_exp_profiles == 'y')):
+ continue
- dep_settings = arch_caches.get(prof.sub_path)
- if dep_settings is None:
- dep_settings = portage.config(
- config_profile_path=prof.abs_path,
- config_incrementals=repoman_incrementals,
- config_root=config_root,
- local_config=False,
- _unmatched_removal=options.unmatched_removal,
- env=env, repositories=repoman_settings.repositories)
- dep_settings.categories = repoman_settings.categories
- if options.without_mask:
- dep_settings._mask_manager_obj = \
- copy.deepcopy(dep_settings._mask_manager)
- dep_settings._mask_manager._pmaskdict.clear()
- arch_caches[prof.sub_path] = dep_settings
-
- xmatch_cache_key = (prof.sub_path, tuple(groups))
- xcache = arch_xmatch_caches.get(xmatch_cache_key)
- if xcache is None:
- portdb.melt()
- portdb.freeze()
- xcache = portdb.xcache
- xcache.update(shared_xmatch_caches)
- arch_xmatch_caches[xmatch_cache_key] = xcache
-
- trees[root]["porttree"].settings = dep_settings
- portdb.settings = dep_settings
- portdb.xcache = xcache
-
- dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
- # just in case, prevent config.reset() from nuking these.
- dep_settings.backup_changes("ACCEPT_KEYWORDS")
-
- # This attribute is used in dbapi._match_use() to apply
- # use.stable.{mask,force} settings based on the stable
- # status of the parent package. This is required in order
- # for USE deps of unstable packages to be resolved correctly,
- # since otherwise use.stable.{mask,force} settings of
- # dependencies may conflict (see bug #456342).
- dep_settings._parent_stable = dep_settings._isStable(pkg)
-
- # Handle package.use*.{force,mask) calculation, for use
- # in dep_check.
- dep_settings.useforce = dep_settings._use_manager.getUseForce(
- pkg, stable=dep_settings._parent_stable)
- dep_settings.usemask = dep_settings._use_manager.getUseMask(
- pkg, stable=dep_settings._parent_stable)
-
- if not baddepsyntax:
- ismasked = not ebuild_archs or \
- pkg.cpv not in portdb.xmatch("match-visible", pkg.cp)
- if ismasked:
- if not have_pmasked:
- have_pmasked = bool(dep_settings._getMaskAtom(
- pkg.cpv, pkg._metadata))
- if options.ignore_masked:
- continue
- #we are testing deps for a masked package; give it some lee-way
- suffix = "masked"
- matchmode = "minimum-all"
- else:
- suffix = ""
- matchmode = "minimum-visible"
-
- if not have_dev_keywords:
- have_dev_keywords = \
- bool(dev_keywords.intersection(keywords))
-
- if prof.status == "dev":
- suffix = suffix + "indev"
-
- for mytype in Package._dep_keys:
-
- mykey = "dependency.bad" + suffix
- myvalue = myaux[mytype]
- if not myvalue:
- continue
-
- success, atoms = portage.dep_check(myvalue, portdb,
- dep_settings, use="all", mode=matchmode,
- trees=trees)
-
- if success:
- if atoms:
-
- # Don't bother with dependency.unknown for
- # cases in which *DEPEND.bad is triggered.
- for atom in atoms:
- # dep_check returns all blockers and they
- # aren't counted for *DEPEND.bad, so we
- # ignore them here.
- if not atom.blocker:
- unknown_pkgs.discard(
- (mytype, atom.unevaluated_atom))
-
- if not prof.sub_path:
- # old-style virtuals currently aren't
- # resolvable with empty profile, since
- # 'virtuals' mappings are unavailable
- # (it would be expensive to search
- # for PROVIDE in all ebuilds)
- atoms = [atom for atom in atoms if not \
- (atom.cp.startswith('virtual/') and \
- not portdb.cp_list(atom.cp))]
-
- #we have some unsolvable deps
- #remove ! deps, which always show up as unsatisfiable
- atoms = [str(atom.unevaluated_atom) \
- for atom in atoms if not atom.blocker]
-
- #if we emptied out our list, continue:
- if not atoms:
- continue
- stats[mykey] += 1
- fails[mykey].append("%s: %s: %s(%s) %s" % \
- (relative_path, mytype, keyword,
- prof, repr(atoms)))
- else:
+ dep_settings = arch_caches.get(prof.sub_path)
+ if dep_settings is None:
+ dep_settings = portage.config(
+ config_profile_path=prof.abs_path,
+ config_incrementals=repoman_incrementals,
+ config_root=config_root,
+ local_config=False,
+ _unmatched_removal=options.unmatched_removal,
+ env=env, repositories=repoman_settings.repositories)
+ dep_settings.categories = repoman_settings.categories
+ if options.without_mask:
+ dep_settings._mask_manager_obj = \
+ copy.deepcopy(dep_settings._mask_manager)
+ dep_settings._mask_manager._pmaskdict.clear()
+ arch_caches[prof.sub_path] = dep_settings
+
+ xmatch_cache_key = (prof.sub_path, tuple(groups))
+ xcache = arch_xmatch_caches.get(xmatch_cache_key)
+ if xcache is None:
+ portdb.melt()
+ portdb.freeze()
+ xcache = portdb.xcache
+ xcache.update(shared_xmatch_caches)
+ arch_xmatch_caches[xmatch_cache_key] = xcache
+
+ trees[root]["porttree"].settings = dep_settings
+ portdb.settings = dep_settings
+ portdb.xcache = xcache
+
+ dep_settings["ACCEPT_KEYWORDS"] = " ".join(groups)
+ # just in case, prevent config.reset() from nuking these.
+ dep_settings.backup_changes("ACCEPT_KEYWORDS")
+
+ # This attribute is used in dbapi._match_use() to apply
+ # use.stable.{mask,force} settings based on the stable
+ # status of the parent package. This is required in order
+ # for USE deps of unstable packages to be resolved correctly,
+ # since otherwise use.stable.{mask,force} settings of
+ # dependencies may conflict (see bug #456342).
+ dep_settings._parent_stable = dep_settings._isStable(pkg)
+
+ # Handle package.use*.{force,mask) calculation, for use
+ # in dep_check.
+ dep_settings.useforce = dep_settings._use_manager.getUseForce(
+ pkg, stable=dep_settings._parent_stable)
+ dep_settings.usemask = dep_settings._use_manager.getUseMask(
+ pkg, stable=dep_settings._parent_stable)
+
+ if not baddepsyntax:
+ ismasked = not ebuild_archs or \
+ pkg.cpv not in portdb.xmatch("match-visible", pkg.cp)
+ if ismasked:
+ if not have_pmasked:
+ have_pmasked = bool(dep_settings._getMaskAtom(
+ pkg.cpv, pkg._metadata))
+ if options.ignore_masked:
+ continue
+ # we are testing deps for a masked package; give it some lee-way
+ suffix = "masked"
+ matchmode = "minimum-all"
+ else:
+ suffix = ""
+ matchmode = "minimum-visible"
+
+ if not have_dev_keywords:
+ have_dev_keywords = \
+ bool(dev_keywords.intersection(keywords))
+
+ if prof.status == "dev":
+ suffix = suffix + "indev"
+
+ for mytype in Package._dep_keys:
+
+ mykey = "dependency.bad" + suffix
+ myvalue = myaux[mytype]
+ if not myvalue:
+ continue
+
+ success, atoms = portage.dep_check(myvalue, portdb,
+ dep_settings, use="all", mode=matchmode,
+ trees=trees)
+
+ if success:
+ if atoms:
+
+ # Don't bother with dependency.unknown for
+ # cases in which *DEPEND.bad is triggered.
+ for atom in atoms:
+ # dep_check returns all blockers and they
+ # aren't counted for *DEPEND.bad, so we
+ # ignore them here.
+ if not atom.blocker:
+ unknown_pkgs.discard(
+ (mytype, atom.unevaluated_atom))
+
+ if not prof.sub_path:
+ # old-style virtuals currently aren't
+ # resolvable with empty profile, since
+ # 'virtuals' mappings are unavailable
+ # (it would be expensive to search
+ # for PROVIDE in all ebuilds)
+ atoms = [atom for atom in atoms if not \
+ (atom.cp.startswith('virtual/') and \
+ not portdb.cp_list(atom.cp))]
+
+ # we have some unsolvable deps
+ # remove ! deps, which always show up as unsatisfiable
+ atoms = [str(atom.unevaluated_atom) \
+ for atom in atoms if not atom.blocker]
+
+ # if we emptied out our list, continue:
+ if not atoms:
+ continue
stats[mykey] += 1
fails[mykey].append("%s: %s: %s(%s) %s" % \
(relative_path, mytype, keyword,
prof, repr(atoms)))
+ else:
+ stats[mykey] += 1
+ fails[mykey].append("%s: %s: %s(%s) %s" % \
+ (relative_path, mytype, keyword,
+ prof, repr(atoms)))
if not baddepsyntax and unknown_pkgs:
type_map = {}