# mtime on the ebuild (and the corresponding cache entry).
# See bug #139134.
max_mtime = sc.mtime
- for ec, (loc, ec_mtime) in metadata['_eclasses_'].iteritems():
+ for ec, (loc, ec_mtime) in metadata['_eclasses_'].items():
if max_mtime < ec_mtime:
max_mtime = ec_mtime
if max_mtime == sc.mtime:
dead_nodes = set()
if self._global_cleanse:
try:
- for cpv in trg_cache.iterkeys():
+ for cpv in trg_cache.keys():
cp = cpv_getkey(cpv)
if cp is None:
self.returncode |= 1
else:
cp_set = self._cp_set
try:
- for cpv in trg_cache.iterkeys():
+ for cpv in trg_cache.keys():
cp = cpv_getkey(cpv)
if cp is None:
self.returncode |= 1
bintree.populated = False
del pkgindex.packages[:]
- pkgindex.packages.extend(metadata.itervalues())
+ pkgindex.packages.extend(metadata.values())
from portage.util import atomic_ofstream
f = atomic_ofstream(self._pkgindex_file)
try:
"cleanresume":CleanResume
}
- module_names = modules.keys()
+ module_names = list(modules.keys())
module_names.sort()
module_names.insert(0, "all")
fd1.write(color(myglsa.nr) + " " + color(status) + " " + color(access) + myglsa.title + " (")
if not verbose:
- for pkg in myglsa.packages.keys()[:3]:
+ for pkg in list(myglsa.packages.keys())[:3]:
fd1.write(" " + pkg + " ")
if len(myglsa.packages) > 3:
fd1.write("... ")
owners = vardb._owners.get_owners(files)
msg = []
- for pkg, owned_files in owners.iteritems():
+ for pkg, owned_files in owners.items():
cpv = pkg.mycpv
msg.append("%s\n" % cpv)
for f in sorted(owned_files):
#
non_commands = frozenset(['exithandler', 'main',
'usage', 'writemsg', 'writemsg_stdout'])
- commands = sorted(k for k, v in globals().iteritems() \
+ commands = sorted(k for k, v in globals().items() \
if type(v) is types.FunctionType and k not in non_commands)
for name in commands:
required_metadata["CATEGORY"] = category
required_metadata["PF"] = pf
update_metadata = {}
- for k, v in required_metadata.iteritems():
+ for k, v in required_metadata.items():
if v != existing_metadata[k]:
update_metadata[k] = v
if update_metadata:
'scan' : 'Scan directory tree for QA issues'
}
- mode_keys = modes.keys()
+ mode_keys = list(modes.keys())
mode_keys.sort()
parser = RepomanOptionParser(formatter=RepomanHelpFormatter(), usage="%prog [options] [mode]")
parser.add_option('--without-mask', dest='without_mask', action='store_true',
default=False, help='behave as if no package.mask entries exist (not allowed with commit mode)')
- parser.add_option('--mode', type='choice', dest='mode', choices=modes.keys(),
+ parser.add_option('--mode', type='choice', dest='mode', choices=list(modes.keys()),
help='specify which mode repoman will run in (default=full)')
parser.on_tail("\n " + green("Modes".ljust(20) + " Description\n"))
parser.on_tail("\n " + green("QA keyword".ljust(20) + " Description\n"))
- sorted_qa = qahelp.keys()
+ sorted_qa = list(qahelp.keys())
sorted_qa.sort()
for k in sorted_qa:
parser.on_tail(" %s %s\n" % (k.ljust(20), qahelp[k]))
"upstream.workaround":"The ebuild works around an upstream bug, an upstream bug should be filed and tracked in bugs.gentoo.org"
}
-qacats = qahelp.keys()
+qacats = list(qahelp.keys())
qacats.sort()
qawarnings = set((
want to add the --include-dev option.
"""
type_arch_map = {}
- for arch, arch_profiles in profiles.iteritems():
+ for arch, arch_profiles in profiles.items():
for prof in arch_profiles:
arch_set = type_arch_map.get(prof.status)
if arch_set is None:
(vcs == "svn" and repolevel < 3 and options.mode != "commit")
# Build a regex from thirdpartymirrors for the SRC_URI.mirror check.
-thirdpartymirrors = portage.flatten(repoman_settings.thirdpartymirrors().values())
+thirdpartymirrors = portage.flatten(list(repoman_settings.thirdpartymirrors().values()))
for x in scanlist:
#ebuilds and digests added to cvs respectively.
fetchlist_dict=fetchlist_dict)
mf.create(requiredDistfiles=None,
assumeDistHashesAlways=True)
- for distfiles in fetchlist_dict.itervalues():
+ for distfiles in fetchlist_dict.values():
for distfile in distfiles:
if os.path.isfile(os.path.join(distdir, distfile)):
mf.fhashdict['DIST'].pop(distfile, None)
pkg = pkgs[y]
if pkg.invalid:
- for k, msgs in pkg.invalid.iteritems():
+ for k, msgs in pkg.invalid.items():
for msg in msgs:
stats[k] = stats[k] + 1
fails[k].append("%s %s" % (relative_path, msg))
inherited = pkg.inherited
live_ebuild = live_eclasses.intersection(inherited)
- for k, v in myaux.iteritems():
+ for k, v in myaux.items():
if not isinstance(v, basestring):
continue
m = non_ascii_re.search(v)
"FILE" : os.path.basename(pkg_path)
}
- fetch_env = dict(settings.iteritems())
+ fetch_env = dict(settings.items())
fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
for x in portage.util.shlex_split(fcmd)]
# Validate all the atoms and counters so that
# corruption is detected as soon as possible.
invalid_items = set()
- for k, v in self._cache_data["blockers"].iteritems():
+ for k, v in self._cache_data["blockers"].items():
if not isinstance(k, basestring):
invalid_items.add(k)
continue
fd_pipes.setdefault(2, sys.stderr.fileno())
# flush any pending output
- for fd in fd_pipes.itervalues():
+ for fd in fd_pipes.values():
if fd == sys.stdout.fileno():
sys.stdout.flush()
if fd == sys.stderr.fileno():
object.__setattr__(self, "_isatty", isatty)
if not isatty or not self._init_term():
term_codes = {}
- for k, capname in self._termcap_name_map.iteritems():
+ for k, capname in self._termcap_name_map.items():
term_codes[k] = self._default_term_codes[capname]
object.__setattr__(self, "_term_codes", term_codes)
encoding = sys.getdefaultencoding()
return False
term_codes = {}
- for k, capname in self._termcap_name_map.iteritems():
+ for k, capname in self._termcap_name_map.items():
code = tigetstr(capname)
if code is None:
code = self._default_term_codes[capname]
if self._global_cleanse:
for mytree in portdb.porttrees:
try:
- dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
+ dead_nodes[mytree] = set(portdb.auxdb[mytree].keys())
except CacheError as e:
portage.writemsg("Error listing cache entries for " + \
"'%s': %s, continuing...\n" % (mytree, e),
for mytree in portdb.porttrees:
try:
dead_nodes[mytree] = set(cpv for cpv in \
- portdb.auxdb[mytree].iterkeys() \
+ portdb.auxdb[mytree].keys() \
if cpv_getkey(cpv) in cp_set)
except CacheError as e:
portage.writemsg("Error listing cache entries for " + \
if portdb.findname2(y, mytree=mytree)[0]:
dead_nodes[mytree].discard(y)
- for mytree, nodes in dead_nodes.iteritems():
+ for mytree, nodes in dead_nodes.items():
auxdb = portdb.auxdb[mytree]
for y in nodes:
try:
# USE is lazy, but we want it to show up in self.keys().
self['USE'] = ''
self.update(metadata)
- for k, v in self.iteritems():
+ for k, v in self.items():
if k == 'INHERITED':
if isinstance(v, basestring):
v = frozenset(v.split())
obj = PackageVirtualDbapi(self.settings)
obj._match_cache = self._match_cache.copy()
obj._cp_map = self._cp_map.copy()
- for k, v in obj._cp_map.iteritems():
+ for k, v in obj._cp_map.items():
obj._cp_map[k] = v[:]
obj._cpv_map = self._cpv_map.copy()
return obj
def __iter__(self):
- return self._cpv_map.itervalues()
+ return iter(self._cpv_map.values())
def __contains__(self, item):
existing = self._cpv_map.get(item.cpv)
def _start(self):
self._reg_ids = set()
self._read_data = []
- for k, f in self.input_files.iteritems():
+ for k, f in self.input_files.items():
fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
self._reg_ids.add(self.scheduler.register(f.fileno(),
if event & PollConstants.POLLIN:
- for f in self.input_files.itervalues():
+ for f in self.input_files.values():
if fd == f.fileno():
break
self._reg_ids = None
if self.input_files is not None:
- for f in self.input_files.itervalues():
+ for f in self.input_files.values():
f.close()
self.input_files = None
select_args = self._select_args
if select_args is None:
- select_args = [self._registered.keys(), [], []]
+ select_args = [list(self._registered.keys()), [], []]
if timeout is not None:
select_args = select_args[:]
self._shown_repos = {}
self._unknown_repo = False
repo_paths = set()
- for root_config in roots.itervalues():
+ for root_config in roots.values():
portdir = root_config.settings.get("PORTDIR")
if portdir:
repo_paths.add(portdir)
for repo_path in repo_paths ]
# pre-allocate index for PORTDIR so that it always has index 0.
- for root_config in roots.itervalues():
+ for root_config in roots.values():
portdb = root_config.trees["porttree"].dbapi
portdir = portdb.porttree_root
if portdir:
if shown_repos or self._unknown_repo:
output.append("Portage tree and overlays:\n")
show_repo_paths = list(shown_repos)
- for repo_path, repo_index in shown_repos.iteritems():
+ for repo_path, repo_index in shown_repos.items():
show_repo_paths[repo_index] = repo_path
if show_repo_paths:
for index, repo_path in enumerate(show_repo_paths):
}
tree_pkg_map = {}
- for k, v in pkg_tree_map.iteritems():
+ for k, v in pkg_tree_map.items():
tree_pkg_map[v] = k
def __init__(self, settings, trees, setconfig):
digest = '--digest' in self.myopts
if not digest:
- for pkgsettings in self.pkgsettings.itervalues():
+ for pkgsettings in self.pkgsettings.values():
if 'digest' in pkgsettings.features:
digest = True
break
shown_verifying_msg = False
quiet_settings = {}
- for myroot, pkgsettings in self.pkgsettings.iteritems():
+ for myroot, pkgsettings in self.pkgsettings.items():
quiet_config = portage.config(clone=pkgsettings)
quiet_config["PORTAGE_QUIET"] = "1"
quiet_config.backup_changes("PORTAGE_QUIET")
# any of bad_resume_opts from leaking in
# via EMERGE_DEFAULT_OPTS.
resume_opts["--ignore-default-opts"] = True
- for myopt, myarg in resume_opts.iteritems():
+ for myopt, myarg in resume_opts.items():
if myopt not in bad_resume_opts:
if myarg is True:
mynewargv.append(myopt)
fd_pipes.setdefault(2, sys.stderr.fileno())
# flush any pending output
- for fd in fd_pipes.itervalues():
+ for fd in fd_pipes.values():
if fd == sys.stdout.fileno():
sys.stdout.flush()
if fd == sys.stderr.fileno():
self._reg_id = None
if self._files is not None:
- for f in self._files.itervalues():
+ for f in self._files.values():
f.close()
self._files = None
continue
set_args = {}
- for s, package_set in required_sets.iteritems():
+ for s, package_set in required_sets.items():
set_atom = SETPREFIX + s
set_arg = SetArg(arg=set_atom, set=package_set,
root_config=resolver._frozen_config.roots[myroot])
if not consumers:
continue
- for lib, lib_consumers in consumers.items():
+ for lib, lib_consumers in list(consumers.items()):
for consumer_file in list(lib_consumers):
if pkg_dblink.isowner(consumer_file, myroot):
lib_consumers.remove(consumer_file)
if not consumers:
continue
- for lib, lib_consumers in consumers.iteritems():
+ for lib, lib_consumers in consumers.items():
soname = soname_cache.get(lib)
if soname is None:
if consumer_map:
search_files = set()
- for consumers in consumer_map.itervalues():
- for lib, consumer_providers in consumers.iteritems():
+ for consumers in consumer_map.values():
+ for lib, consumer_providers in consumers.items():
for lib_consumer, providers in consumer_providers:
search_files.add(lib_consumer)
search_files.update(providers)
writemsg_level(">>> Assigning files to packages...\n")
file_owners = real_vardb._owners.getFileOwnerMap(search_files)
- for pkg, consumers in consumer_map.items():
- for lib, consumer_providers in consumers.items():
+ for pkg, consumers in list(consumer_map.items()):
+ for lib, consumer_providers in list(consumers.items()):
lib_consumers = set()
for lib_consumer, providers in consumer_providers:
# and also add any dependencies pulled in by the provider.
writemsg_level(">>> Adding lib providers to graph...\n")
- for pkg, consumers in consumer_map.iteritems():
+ for pkg, consumers in consumer_map.items():
for consumer_dblink in set(chain(*consumers.values())):
consumer_pkg = vardb.get(("installed", myroot,
consumer_dblink.mycpv, "nomerge"))
libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
if "--verbose" in myopts:
- myvars=settings.keys()
+ myvars=list(settings.keys())
else:
myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
for tree_data in porttrees_data:
try:
- dead_nodes = set(tree_data.dest_db.iterkeys())
+ dead_nodes = set(tree_data.dest_db.keys())
except CacheError as e:
writemsg_level("Error listing cache entries for " + \
"'%s': %s, continuing...\n" % (tree_data.path, e),
continue
inconsistent = False
- for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
updated_mtime = updated_ec_mtimes.get(ec)
if updated_mtime is not None and updated_mtime != ec_mtime:
writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
if current_eb_mtime != eb_mtime:
os.utime(eb_path, (eb_mtime, eb_mtime))
- for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
+ for ec, (ec_path, ec_mtime) in ec_mtimes.items():
if ec in updated_ec_mtimes:
continue
ec_path = os.path.join(ec_dir, ec + ".eclass")
kwargs[k] = v
trees = portage.create_trees(trees=trees, **kwargs)
- for root, root_trees in trees.iteritems():
+ for root, root_trees in trees.items():
settings = root_trees["vartree"].settings
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
import portage
def clear_caches(trees):
- for d in trees.itervalues():
+ for d in trees.values():
d["porttree"].dbapi.melt()
d["porttree"].dbapi._aux_cache.clear()
d["bintree"].dbapi._aux_cache.clear()
runtime_pkg_mask = {}
else:
runtime_pkg_mask = dict((k, v.copy()) for (k, v) in \
- runtime_pkg_mask.iteritems())
+ runtime_pkg_mask.items())
self._runtime_pkg_mask = runtime_pkg_mask
self._need_restart = False
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
- self._dynamic_config._runtime_pkg_mask.iteritems():
+ self._dynamic_config._runtime_pkg_mask.items():
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
- for mask_type, parent_atoms in mask_reasons.iteritems():
+ for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
return
missed_update_types = {}
- for pkg, mask_type, parent_atoms in missed_updates.itervalues():
+ for pkg, mask_type, parent_atoms in missed_updates.values():
missed_update_types.setdefault(mask_type,
[]).append((pkg, parent_atoms))
explanation_columns = 70
explanations = 0
for (slot_atom, root), slot_nodes \
- in self._dynamic_config._slot_collision_info.iteritems():
+ in self._dynamic_config._slot_collision_info.items():
msg.append(str(slot_atom))
if root != '/':
msg.append(" for %s" % (root,))
packages that have been pulled into a given slot.
"""
for (slot_atom, root), slot_nodes \
- in self._dynamic_config._slot_collision_info.iteritems():
+ in self._dynamic_config._slot_collision_info.items():
all_parent_atoms = set()
for pkg in slot_nodes:
# by dep_zapdeps. We preserve actual parent/child relationships
# here in order to avoid distorting the dependency graph like
# <=portage-2.1.6.x did.
- for virt_pkg, atoms in selected_atoms.iteritems():
+ for virt_pkg, atoms in selected_atoms.items():
# Just assume depth + 1 here for now, though it's not entirely
# accurate since multilple levels of indirect virtual deps may
args_set.add(atom)
self._dynamic_config._set_atoms.clear()
- self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.itervalues()))
+ self._dynamic_config._set_atoms.update(chain(*self._dynamic_config._sets.values()))
atom_arg_map = self._dynamic_config._atom_arg_map
atom_arg_map.clear()
for arg in args:
# Invalidate the package selection cache, since
# arguments influence package selections.
self._dynamic_config._highest_pkg_cache.clear()
- for trees in self._dynamic_config._filtered_trees.itervalues():
+ for trees in self._dynamic_config._filtered_trees.values():
trees["porttree"].dbapi._clear_cache()
def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
except portage.exception.InvalidDependString:
continue
blocker_atoms = []
- for atoms in selected_atoms.itervalues():
+ for atoms in selected_atoms.values():
blocker_atoms.extend(x for x in atoms if x.blocker)
blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
(running_root, running_portage.cpv, e), noiselevel=-1)
del e
portage_rdepend = {running_portage : []}
- for atoms in portage_rdepend.itervalues():
+ for atoms in portage_rdepend.values():
runtime_deps.update(atom for atom in atoms \
if not atom.blocker)
# Reduce noise by pruning packages that are only
# pulled in by other conflict packages.
pruned_pkgs = set()
- for pkg, parent_atoms in conflict_pkgs.iteritems():
+ for pkg, parent_atoms in conflict_pkgs.items():
relevant_parent = False
for parent, atom in parent_atoms:
if parent not in conflict_pkgs:
indent = " "
# Max number of parents shown, to avoid flooding the display.
max_parents = 3
- for pkg, parent_atoms in conflict_pkgs.iteritems():
+ for pkg, parent_atoms in conflict_pkgs.items():
pruned_list = set()
"merged because it is listed in\n")
msg.append("package.provided:\n\n")
problems_sets = set()
- for (arg, atom), refs in arg_refs.iteritems():
+ for (arg, atom), refs in arg_refs.items():
ref_string = ""
if refs:
problems_sets.update(refs)
mreasons.append("CHOST: %s" % \
pkg.metadata["CHOST"])
if pkg.invalid:
- for msg_type, msgs in pkg.invalid.iteritems():
+ for msg_type, msgs in pkg.invalid.items():
for msg in msgs:
mreasons.append("invalid: %s" % (msg,))
samefile_map[obj_key] = alt_paths
alt_paths.add(f)
- for alt_paths in samefile_map.itervalues():
+ for alt_paths in samefile_map.values():
alt_paths = sorted(alt_paths)
for p in alt_paths:
print(colorize("WARN", " * ") + " - %s" % (p,))
continue
match = None
- for k, arg_choices in short_arg_opts.iteritems():
+ for k, arg_choices in short_arg_opts.items():
if k in arg:
match = k
break
if match is None:
- for k, arg_choices in short_arg_opts_n.iteritems():
+ for k, arg_choices in short_arg_opts_n.items():
if k in arg:
match = k
break
for myopt in options:
parser.add_option(myopt, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
- for shortopt, longopt in shortmapping.iteritems():
+ for shortopt, longopt in shortmapping.items():
parser.add_option("-" + shortopt, action="store_true",
dest=longopt.lstrip("--").replace("-", "_"), default=False)
- for myalias, myopt in longopt_aliases.iteritems():
+ for myalias, myopt in longopt_aliases.items():
parser.add_option(myalias, action="store_true",
dest=myopt.lstrip("--").replace("-", "_"), default=False)
- for myopt, kwargs in argument_options.iteritems():
+ for myopt, kwargs in argument_options.items():
shortopt = kwargs.pop("shortopt", None)
args = [myopt]
if shortopt is not None:
def repo_name_check(trees):
missing_repo_names = set()
- for root, root_trees in trees.iteritems():
+ for root, root_trees in trees.items():
if "porttree" in root_trees:
portdb = root_trees["porttree"].dbapi
missing_repo_names.update(portdb.porttrees)
def repo_name_duplicate_check(trees):
ignored_repos = {}
- for root, root_trees in trees.iteritems():
+ for root, root_trees in trees.items():
if 'porttree' in root_trees:
portdb = root_trees['porttree'].dbapi
if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
return bool(ignored_repos)
def config_protect_check(trees):
- for root, root_trees in trees.iteritems():
+ for root, root_trees in trees.items():
if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
msg = "!!! CONFIG_PROTECT is empty"
if root != "/":
def profile_check(trees, myaction):
if myaction in ("help", "info", "sync", "version"):
return os.EX_OK
- for root, root_trees in trees.iteritems():
+ for root, root_trees in trees.items():
if root_trees["root_config"].settings.profiles:
continue
# generate some profile related warning messages
repo_name_duplicate_check(trees)
config_protect_check(trees)
- for mytrees in trees.itervalues():
+ for mytrees in trees.values():
mydb = mytrees["porttree"].dbapi
# Freeze the portdbapi for performance (memoize all xmatch results).
mydb.freeze()
slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
for myslot in slotmap:
- counterkeys = slotmap[myslot].keys()
+ counterkeys = list(slotmap[myslot].keys())
if not counterkeys:
continue
counterkeys.sort()
unordered[cp] = cp_dict
for k in d:
cp_dict[k] = set()
- for k, v in d.iteritems():
+ for k, v in d.items():
cp_dict[k].update(v)
pkgmap = [unordered[cp] for cp in sorted(unordered)]
selected = pkgmap[x]["selected"]
if not selected:
continue
- for mytype, mylist in pkgmap[x].iteritems():
+ for mytype, mylist in pkgmap[x].items():
if mytype == "selected":
continue
mylist.difference_update(all_selected)
wrapped_kwargs = dict(
(_unicode_encode(k, encoding=encoding, errors='strict'),
_unicode_encode(v, encoding=encoding, errors='strict'))
- for k, v in kwargs.iteritems())
+ for k, v in kwargs.items())
else:
wrapped_kwargs = {}
return list(self.nodes[node][0])
children = []
if hasattr(ignore_priority, '__call__'):
- for child, priorities in self.nodes[node][0].iteritems():
+ for child, priorities in self.nodes[node][0].items():
for priority in priorities:
if not ignore_priority(priority):
children.append(child)
break
else:
- for child, priorities in self.nodes[node][0].iteritems():
+ for child, priorities in self.nodes[node][0].items():
if ignore_priority < priorities[-1]:
children.append(child)
return children
return list(self.nodes[node][1])
parents = []
if hasattr(ignore_priority, '__call__'):
- for parent, priorities in self.nodes[node][1].iteritems():
+ for parent, priorities in self.nodes[node][1].items():
for priority in priorities:
if not ignore_priority(priority):
parents.append(parent)
break
else:
- for parent, priorities in self.nodes[node][1].iteritems():
+ for parent, priorities in self.nodes[node][1].items():
if ignore_priority < priorities[-1]:
parents.append(parent)
return parents
elif hasattr(ignore_priority, '__call__'):
for node in self.order:
is_leaf_node = True
- for child, priorities in self.nodes[node][0].iteritems():
+ for child, priorities in self.nodes[node][0].items():
for priority in priorities:
if not ignore_priority(priority):
is_leaf_node = False
else:
for node in self.order:
is_leaf_node = True
- for child, priorities in self.nodes[node][0].iteritems():
+ for child, priorities in self.nodes[node][0].items():
if ignore_priority < priorities[-1]:
is_leaf_node = False
break
elif hasattr(ignore_priority, '__call__'):
for node in self.order:
is_root_node = True
- for parent, priorities in self.nodes[node][1].iteritems():
+ for parent, priorities in self.nodes[node][1].items():
for priority in priorities:
if not ignore_priority(priority):
is_root_node = False
else:
for node in self.order:
is_root_node = True
- for parent, priorities in self.nodes[node][1].iteritems():
+ for parent, priorities in self.nodes[node][1].items():
if ignore_priority < priorities[-1]:
is_root_node = False
break
clone = digraph()
clone.nodes = {}
memo = {}
- for children, parents, node in self.nodes.itervalues():
+ for children, parents, node in self.nodes.values():
children_clone = {}
- for child, priorities in children.iteritems():
+ for child, priorities in children.items():
priorities_clone = memo.get(id(priorities))
if priorities_clone is None:
priorities_clone = priorities[:]
memo[id(priorities)] = priorities_clone
children_clone[child] = priorities_clone
parents_clone = {}
- for parent, priorities in parents.iteritems():
+ for parent, priorities in parents.items():
priorities_clone = memo.get(id(priorities))
if priorities_clone is None:
priorities_clone = priorities[:]
output("depends on\n")
else:
output("(no children)\n")
- for child, priorities in self.nodes[node][0].iteritems():
+ for child, priorities in self.nodes[node][0].items():
output(" %s (%s)\n" % (child, priorities[-1],))
#parse /etc/env.d and generate /etc/profile.env
not ld_cache_update and \
contents is not None:
libdir_contents_changed = False
- for mypath, mydata in contents.iteritems():
+ for mypath, mydata in contents.items():
if mydata[0] not in ("obj","sym"):
continue
head, tail = os.path.split(mypath)
for x in self.profiles]
for pkeyworddict in rawpkeywords:
cpdict = {}
- for k, v in pkeyworddict.iteritems():
+ for k, v in pkeyworddict.items():
cpdict.setdefault(k.cp, {})[k] = v
self._pkeywords_list.append(cpdict)
recursive=1) for x in self.profiles]
for pusemaskdict in rawpusemask:
cpdict = {}
- for k, v in pusemaskdict.iteritems():
+ for k, v in pusemaskdict.items():
cpdict.setdefault(k.cp, {})[k] = v
self.pusemask_list.append(cpdict)
del rawpusemask
juststrings=True, recursive=1) for x in self.profiles]
for rawpusedict in rawprofileuse:
cpdict = {}
- for k, v in rawpusedict.iteritems():
+ for k, v in rawpusedict.items():
cpdict.setdefault(k.cp, {})[k] = v
self.pkgprofileuse.append(cpdict)
del rawprofileuse
for x in self.profiles]
for rawpusefdict in rawpuseforce:
cpdict = {}
- for k, v in rawpusefdict.iteritems():
+ for k, v in rawpusefdict.items():
cpdict.setdefault(k.cp, {})[k] = v
self.puseforce_list.append(cpdict)
del rawpuseforce
# Avoid potential UnicodeDecodeError exceptions later.
env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
- for k, v in env.iteritems())
+ for k, v in env.items())
self.backupenv = env_unicode
# Remove duplicate values so they don't override updated
# profile.env values later (profile.env is reloaded in each
# call to self.regenerate).
- for k, v in env_d.iteritems():
+ for k, v in env_d.items():
try:
if self.backupenv[k] == v:
del self.backupenv[k]
pmask_locations.append(abs_user_config)
pusedict = grabdict_package(
os.path.join(abs_user_config, "package.use"), recursive=1)
- for k, v in pusedict.iteritems():
+ for k, v in pusedict.items():
self.pusedict.setdefault(k.cp, {})[k] = v
#package.keywords
pkgdict = grabdict_package(
os.path.join(abs_user_config, "package.keywords"),
recursive=1)
- for k, v in pkgdict.iteritems():
+ for k, v in pkgdict.items():
# default to ~arch if no specific keyword is given
if not v:
mykeywordlist = []
#package.license
licdict = grabdict_package(os.path.join(
abs_user_config, "package.license"), recursive=1)
- for k, v in licdict.iteritems():
+ for k, v in licdict.items():
cp = k.cp
cp_dict = self._plicensedict.get(cp)
if not cp_dict:
#package.properties
propdict = grabdict_package(os.path.join(
abs_user_config, "package.properties"), recursive=1)
- for k, v in propdict.iteritems():
+ for k, v in propdict.items():
cp = k.cp
cp_dict = self._ppropertiesdict.get(cp)
if not cp_dict:
}
for mypath, (gid, mode, modemask, preserve_perms) \
- in dir_mode_map.iteritems():
+ in dir_mode_map.items():
mydir = os.path.join(self["ROOT"], mypath)
if preserve_perms and os.path.isdir(mydir):
# Only adjust permissions on some directories if
for i, pkgprofileuse_dict in enumerate(self.pkgprofileuse):
cpdict = pkgprofileuse_dict.get(cp)
if cpdict:
- keys = cpdict.keys()
+ keys = list(cpdict.keys())
while keys:
bestmatch = best_match_to_list(cpv_slot, keys)
if bestmatch:
self.puse = ""
cpdict = self.pusedict.get(cp)
if cpdict:
- keys = cpdict.keys()
+ keys = list(cpdict.keys())
while keys:
self.pusekey = best_match_to_list(cpv_slot, keys)
if self.pusekey:
# allows packages that support LINGUAS but don't
# declare it in IUSE to use the variable outside of the
# USE_EXPAND context.
- for k, use_expand_iuse in use_expand_iuses.iteritems():
+ for k, use_expand_iuse in use_expand_iuses.items():
if k + '_*' in use:
use.update( x for x in use_expand_iuse if x not in usemask )
k = k.upper()
for i, pusemask_dict in enumerate(self.pusemask_list):
cpdict = pusemask_dict.get(cp)
if cpdict:
- keys = cpdict.keys()
+ keys = list(cpdict.keys())
while keys:
best_match = best_match_to_list(pkg, keys)
if best_match:
for i, puseforce_dict in enumerate(self.puseforce_list):
cpdict = puseforce_dict.get(cp)
if cpdict:
- keys = cpdict.keys()
+ keys = list(cpdict.keys())
while keys:
best_match = best_match_to_list(pkg, keys)
if best_match:
matches = False
if pkgdict:
cpv_slot_list = ["%s:%s" % (cpv, metadata["SLOT"])]
- for atom, pkgkeywords in pkgdict.iteritems():
+ for atom, pkgkeywords in pkgdict.items():
if match_from_list(atom, cpv_slot_list):
matches = True
pgroups.extend(pkgkeywords)
if cpdict:
accept_license = list(self._accept_license)
cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
- for atom in match_to_list(cpv_slot, cpdict.keys()):
+ for atom in match_to_list(cpv_slot, list(cpdict.keys())):
accept_license.extend(cpdict[atom])
licenses = set(flatten(dep.use_reduce(dep.paren_reduce(
if cpdict:
accept_properties = list(self._accept_properties)
cpv_slot = "%s:%s" % (cpv, metadata["SLOT"])
- for atom in match_to_list(cpv_slot, cpdict.keys()):
+ for atom in match_to_list(cpv_slot, list(cpdict.keys())):
accept_properties.extend(cpdict[atom])
properties = set(flatten(dep.use_reduce(dep.paren_reduce(
virtuals_file = os.path.join(x, "virtuals")
virtuals_dict = grabdict(virtuals_file)
atoms_dict = {}
- for k, v in virtuals_dict.iteritems():
+ for k, v in virtuals_dict.items():
try:
virt_atom = portage.dep.Atom(k)
except portage.exception.InvalidAtom:
def _populate_treeVirtuals(self, vartree):
"""Reduce the provides into a list by CP."""
- for provide, cpv_list in vartree.get_all_provides().iteritems():
+ for provide, cpv_list in vartree.get_all_provides().items():
try:
provide = dep.Atom(provide)
except exception.InvalidAtom:
# Virtuals by profile+tree preferences.
ptVirtuals = {}
- for virt, installed_list in self.treeVirtuals.iteritems():
+ for virt, installed_list in self.treeVirtuals.items():
profile_list = self.dirVirtuals.get(virt, None)
if not profile_list:
continue
yield (k, self[k])
def items(self):
- return list(self.iteritems())
+ return list(self.items())
def __setitem__(self,mykey,myvalue):
"set a value; will be thrown away at reset() time"
# it needs to be flushed before allowing a child process to use it
# so that output always shows in the correct order.
stdout_filenos = (sys.stdout.fileno(), sys.stderr.fileno())
- for fd in fd_pipes.itervalues():
+ for fd in fd_pipes.values():
if fd in stdout_filenos:
sys.stdout.flush()
sys.stderr.flush()
if args[0] != BASH_BINARY:
args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
- rval = spawn_func(args, env=dict(settings.iteritems()), **kwargs)
+ rval = spawn_func(args, env=dict(iter(settings.items())), **kwargs)
return rval
file_uri_tuples = []
if isinstance(myuris, dict):
- for myfile, uri_set in myuris.iteritems():
+ for myfile, uri_set in myuris.items():
for myuri in uri_set:
file_uri_tuples.append((myfile, myuri))
else:
# Prefer thirdpartymirrors over normal mirrors in cases when
# the file does not yet exist on the normal mirrors.
- for myfile, uris in thirdpartymirror_uris.iteritems():
+ for myfile, uris in thirdpartymirror_uris.items():
primaryuri_dict.setdefault(myfile, []).extend(uris)
can_fetch=True
mysettings.get("PORTAGE_QUIET", None) == "1"
digests = mydigests.get(myfile)
if digests:
- digests = digests.keys()
+ digests = list(digests.keys())
digests.sort()
eout.ebegin(
"%s %s ;-)" % (myfile, " ".join(digests)))
writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
return 0
if "assume-digests" not in mysettings.features:
- distlist = mf.fhashdict.get("DIST", {}).keys()
+ distlist = list(mf.fhashdict.get("DIST", {}).keys())
distlist.sort()
auto_assumed = []
for filename in distlist:
return 1
mf = Manifest(pkgdir, mysettings["DISTDIR"])
manifest_empty = True
- for d in mf.fhashdict.itervalues():
+ for d in mf.fhashdict.values():
if d:
manifest_empty = False
break
droppriv = secpass >= 2 and \
"userpriv" in mysettings.features and \
"userpriv" not in restrict
- for myfeature, kwargs in features_dirs.iteritems():
+ for myfeature, kwargs in features_dirs.items():
if myfeature in mysettings.features:
failure = False
basedir = mysettings.get(kwargs["basedir_var"])
matches = False
if pkgdict:
cpv_slot_list = ["%s:%s" % (mycpv, metadata["SLOT"])]
- for atom, pkgkeywords in pkgdict.iteritems():
+ for atom, pkgkeywords in pkgdict.items():
if match_from_list(atom, cpv_slot_list):
matches = True
pgroups.extend(pkgkeywords)
self.portdb = mydbapi
def __getitem__(self, pkg_key):
"""Returns the complete fetch list for a given package."""
- return self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys()
+ return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree).keys())
def __contains__(self, cpv):
- return cpv in self.keys()
+ return cpv in list(self.keys())
def has_key(self, pkg_key):
"""Returns true if the given package exists within pkgdir."""
return pkg_key in self
# until after _all_ of the above updates have
# been processed because the mtimedb will
# automatically commit when killed by ctrl C.
- for mykey, mtime in timestamps.iteritems():
+ for mykey, mtime in timestamps.items():
prev_mtimes[mykey] = mtime
# We gotta do the brute force updates for these now.
mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
"starttime", "updates", "version"))
- for k in d.keys():
+ for k in list(d.keys()):
if k not in mtimedbkeys:
writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
del d[k]
self._ensure_access(self._db_path)
def iteritems(self):
- return self.__db.iteritems()
+ return iter(self.__db.items())
def _getitem(self, cpv):
# we override getitem because it's just a cpickling of the data handed in.
del self.__db[cpv]
def __iter__(self):
- return iter(self.__db.keys())
+ return iter(list(self.__db.keys()))
def __contains__(self, cpv):
return cpv in self.__db
def _setitem(self, cpv, values):
path = self.__get_path(cpv)
max = self.max_len
- for key,value in values.iteritems():
+ for key,value in values.items():
# mtime comes in as long so need to convert to strings
s = str(value)
# We need to split long values
"""
def __iter__(self):
- return self.iterkeys()
+ return iter(self.keys())
def keys(self):
return list(self.__iter__())
return self.__iter__()
def itervalues(self):
- for _, v in self.iteritems():
+ for _, v in self.items():
yield v
def values(self):
- return [v for _, v in self.iteritems()]
+ return [v for _, v in self.items()]
def items(self):
- return list(self.iteritems())
+ return list(self.items())
def get(self, key, default=None):
try:
return default
def __repr__(self):
- return repr(dict(self.iteritems()))
+ return repr(dict(self.items()))
def __len__(self):
- return len(self.keys())
+ return len(list(self.keys()))
if sys.hexversion >= 0x3000000:
items = iteritems
"""
def clear(self):
- for key in self.keys():
+ for key in list(self.keys()):
del self[key]
def setdefault(self, key, default=None):
def popitem(self):
try:
- k, v = self.iteritems().next()
+ k, v = iter(self.items()).next()
except StopIteration:
raise KeyError('container is empty')
del self[k]
def __iter__(self):
- for k in self.new.iterkeys():
+ for k in self.new.keys():
yield k
- for k in self.orig.iterkeys():
+ for k in self.orig.keys():
if k not in self.blacklist and k not in self.new:
yield k
self.update(kwargs)
def __iter__(self):
- for k, v in self.iteritems():
+ for k, v in self.items():
yield k
def __len__(self):
l = 0
- for i in self.iteritems():
+ for i in self.items():
l += 1
return l
pass
def items(self):
- return list(self.iteritems())
+ return list(self.items())
def itervalues(self):
- for k, v in self.iteritems():
+ for k, v in self.items():
yield v
def values(self):
- return list(self.itervalues())
+ return list(self.values())
def __delitem__(self, k):
try:
def popitem(self):
try:
- k, v = self.iteritems().next()
+ k, v = iter(self.items()).next()
except StopIteration:
raise KeyError('container is empty')
del self[k]
pass
def __str__(self):
- return str(dict(self.iteritems()))
+ return str(dict(self.items()))
if sys.hexversion >= 0x3000000:
items = iteritems
self._allowed_keys
cursor = self._db_cursor
- for k, v in self._db_table.iteritems():
+ for k, v in self._db_table.items():
if self._db_table_exists(v["table_name"]):
create_statement = self._db_table_get_create(v["table_name"])
if create_statement != v["create"]:
raise cache_errors.ReadOnlyRestriction()
if self.cleanse_keys:
d=ProtectedDict(values)
- for k in d.keys():
+ for k in list(d.keys()):
if d[k] == '':
del d[k]
if self.serialize_eclasses and "_eclasses_" in values:
return cpv in self
def keys(self):
- return tuple(self.iterkeys())
+ return tuple(self.keys())
def iterkeys(self):
return iter(self)
def iteritems(self):
- for x in self.iterkeys():
+ for x in self.keys():
yield (x, self[x])
def items(self):
- return list(self.iteritems())
+ return list(self.items())
def sync(self, rate=0):
self.sync_rate = rate
if self.iterkeys is database.iterkeys:
# prevent a possible recursive loop
raise NotImplementedError(self)
- return self.iterkeys()
+ return iter(self.keys())
def get(self, k, x=None):
try:
import re
restricts = {}
- for key,match in match_dict.iteritems():
+ for key,match in match_dict.items():
# XXX this sucks.
try:
if isinstance(match, basestring):
if key not in self.__known_keys:
raise InvalidRestriction(key, match, "Key isn't valid")
- for cpv in self.iterkeys():
+ for cpv in self.keys():
cont = True
vals = self[cpv]
- for key, match in restricts.iteritems():
+ for key, match in restricts.items():
if not match(vals[key]):
cont = False
break
return mydict
def get_valid_checksum_keys():
- return hashfunc_map.keys()
+ return list(hashfunc_map.keys())
def get_hash_origin(hashtype):
if hashtype not in hashfunc_map:
basedir."""
if not basedir.endswith("/"):
basedir += "/"
- for myfile, mydata in entries["files"].iteritems():
+ for myfile, mydata in entries["files"].items():
if "cvs" in mydata["status"]:
if pattern.search(mydata["flags"]):
yield basedir+myfile
if recursive:
- for mydir, mydata in entries["dirs"].iteritems():
+ for mydir, mydata in entries["dirs"].items():
for x in findoption(mydata, pattern,
recursive, basedir+mydir):
yield x
mytbz2 = portage.xpak.tbz2(tbz2path)
mydata = mytbz2.get_data()
- for k, v in values.iteritems():
+ for k, v in values.items():
k = _unicode_encode(k,
encoding=_encodings['repo.content'], errors='backslashreplace')
v = _unicode_encode(v,
# from xpak.
if update_pkgindex and os.access(self.pkgdir, os.W_OK):
del pkgindex.packages[:]
- pkgindex.packages.extend(metadata.itervalues())
+ pkgindex.packages.extend(iter(metadata.values()))
self._update_pkgindex_header(pkgindex.header)
from portage.util import atomic_ofstream
f = atomic_ofstream(self._pkgindex_file)
# Remote package instances override local package
# if they are not identical.
hash_names = ["SIZE"] + self._pkgindex_hashes
- for cpv, local_metadata in metadata.iteritems():
+ for cpv, local_metadata in metadata.items():
remote_metadata = self._remotepkgs.get(cpv)
if remote_metadata is None:
continue
# earlier ones that correspond to the same name.
porttrees = [x for x in porttrees if x not in identically_named_paths]
ignored_map = {}
- for path, repo_name in identically_named_paths.iteritems():
+ for path, repo_name in identically_named_paths.items():
ignored_map.setdefault(repo_name, []).append(path)
self._ignored_repos = tuple((repo_name, tuple(paths)) \
- for repo_name, paths in ignored_map.iteritems())
+ for repo_name, paths in ignored_map.items())
self.porttrees = porttrees
porttree_root = porttrees[0]
repo_aliases = {}
if local_repo_configs is not None:
default_loc_repo_config = local_repo_configs.get('DEFAULT')
- for repo_name, loc_repo_conf in local_repo_configs.iteritems():
+ for repo_name, loc_repo_conf in local_repo_configs.items():
if loc_repo_conf.aliases is not None:
for alias in loc_repo_conf.aliases:
overridden_alias = repo_aliases.get(alias)
i = metadata
if hasattr(metadata, "iteritems"):
- i = metadata.iteritems()
+ i = iter(metadata.items())
metadata = dict(i)
if metadata.get("INHERITED", False):
all_uris = []
all_files = []
- for filename, uris in uri_map.iteritems():
+ for filename, uris in uri_map.items():
for uri in uris:
all_uris.append(uri)
all_files.append(filename)
y == "CVS":
continue
d[x+"/"+y] = None
- l = d.keys()
+ l = list(d.keys())
l.sort()
return l
(mycp, self.mysettings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
mylist = []
else:
- mylist = d.keys()
+ mylist = list(d.keys())
# Always sort in ascending order here since it's handy
# and the result can be easily cached and reused.
self._cpv_sort_ascending(mylist)
rValue = []
if not self._libs:
self.rebuild()
- for arch_map in self._libs.itervalues():
- for soname_map in arch_map.itervalues():
+ for arch_map in self._libs.values():
+ for soname_map in arch_map.values():
for obj_key in soname_map.providers:
rValue.extend(self._obj_properties[obj_key][4])
return rValue
self.invalidentry(self.getpath(y))
continue
d[mysplit[0]+"/"+mysplit[1]] = None
- return d.keys()
+ return list(d.keys())
def checkblockers(self, origdep):
pass
cache_valid = cache_mtime == mydir_mtime
if cache_valid:
# Migrate old metadata to unicode.
- for k, v in metadata.iteritems():
+ for k, v in metadata.items():
metadata[k] = _unicode_decode(v,
encoding=_encodings['repo.content'], errors='replace')
treetype="vartree", vartree=self.vartree)
if not mylink.exists():
raise KeyError(cpv)
- for k, v in values.iteritems():
+ for k, v in values.items():
if v:
mylink.setfile(k, v)
else:
def getFileOwnerMap(self, path_iter):
owners = self.get_owners(path_iter)
file_owners = {}
- for pkg_dblink, files in owners.iteritems():
+ for pkg_dblink, files in owners.items():
for f in files:
owner_set = file_owners.get(f)
if owner_set is None:
cpv_lib_map = self._find_unused_preserved_libs()
if cpv_lib_map:
self._remove_preserved_libs(cpv_lib_map)
- for cpv, removed in cpv_lib_map.iteritems():
+ for cpv, removed in cpv_lib_map.items():
if not self.vartree.dbapi.cpv_exists(cpv):
for dblnk in others_in_slot:
if dblnk.mycpv == cpv:
if pkgfiles:
self.updateprotect()
- mykeys = pkgfiles.keys()
+ mykeys = list(pkgfiles.keys())
mykeys.sort()
mykeys.reverse()
# Create consumer nodes and add them to the graph.
# Note that consumers can also be providers.
- for provider_node, consumers in consumer_map.iteritems():
+ for provider_node, consumers in consumer_map.items():
for c in consumers:
if self.isowner(c, root):
continue
return node
linkmap = self.vartree.dbapi.linkmap
- for cpv, plibs in plib_dict.iteritems():
+ for cpv, plibs in plib_dict.items():
for f in plibs:
path_cpv_map[f] = cpv
preserved_node = path_to_node(f)
os = _os_merge
files_to_remove = set()
- for files in cpv_lib_map.itervalues():
+ for files in cpv_lib_map.values():
files_to_remove.update(files)
files_to_remove = sorted(files_to_remove)
showMessage = self._display_merge
plib_dict = self.vartree.dbapi.plib_registry.getPreservedLibs()
plib_cpv_map = {}
plib_paths = set()
- for cpv, paths in plib_dict.iteritems():
+ for cpv, paths in plib_dict.items():
plib_paths.update(paths)
for f in paths:
plib_cpv_map[f] = cpv
k = (s.st_dev, s.st_ino)
inode_map.setdefault(k, []).append((path, s))
suspicious_hardlinks = []
- for path_list in inode_map.itervalues():
+ for path_list in inode_map.values():
path, s = path_list[0]
if len(path_list) == s.st_nlink:
# All hardlinks seem to be owned by this package.
owners = self.vartree.dbapi._owners.get_owners(collisions)
self.vartree.dbapi.flush_cache()
- for pkg, owned_files in owners.iteritems():
+ for pkg, owned_files in owners.items():
cpv = pkg.mycpv
msg = []
msg.append("%s" % cpv)
# and update the contents of the packages that owned them.
plib_registry = self.vartree.dbapi.plib_registry
plib_dict = plib_registry.getPreservedLibs()
- for cpv, paths in plib_collisions.iteritems():
+ for cpv, paths in plib_collisions.items():
if cpv not in plib_dict:
continue
if cpv == self.mycpv:
cpv_lib_map = self._find_unused_preserved_libs()
if cpv_lib_map:
self._remove_preserved_libs(cpv_lib_map)
- for cpv, removed in cpv_lib_map.iteritems():
+ for cpv, removed in cpv_lib_map.items():
if not self.vartree.dbapi.cpv_exists(cpv):
continue
self.vartree.dbapi.removeFromContents(cpv, removed)
curval = 0
if onProgress:
onProgress(maxval, 0)
- paths = contents.keys()
+ paths = list(contents.keys())
paths.sort()
for path in paths:
curval += 1
return list(self.cpdict)
def cpv_all(self):
- return self.cpvdict.keys()
+ return list(self.cpvdict.keys())
def cpv_inject(self, mycpv, metadata=None):
"""Adds a cpv to the list of available packages. See the
# We omit items that will lead to unreasonable bloat of the trace
# output (and resulting log file).
- for k, v in my_locals.iteritems():
+ for k, v in my_locals.items():
my_repr = repr(v)
if len(my_repr) > self.max_repr_length:
my_locals[k] = "omitted"
self.required = frozenset(chain(
enabled_flags,
disabled_flags,
- *conditional.values()
+ *list(conditional.values())
))
self.enabled = frozenset(enabled_flags)
self.disabled = frozenset(disabled_flags)
self.conditional = None
- for v in conditional.itervalues():
+ for v in conditional.values():
if v:
- for k, v in conditional.iteritems():
+ for k, v in conditional.items():
conditional[k] = frozenset(v)
self.conditional = conditional
break
# Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
quotes = "\"'"
- for k, v in opts.iteritems():
+ for k, v in opts.items():
if v[:1] in quotes and v[:1] == v[-1:]:
opts[k] = v[1:-1]
def is_eclass_data_valid(self, ec_dict):
if not isinstance(ec_dict, dict):
return False
- for eclass, tup in ec_dict.iteritems():
+ for eclass, tup in ec_dict.items():
cached_data = self.eclasses.get(eclass, None)
""" Only use the mtime for validation since the probability of a
collision is small and, depending on the cache implementation, the
else:
all_logentries[cpv] = ebuild_logentries
- for key in _preserve_logentries.keys():
+ for key in list(_preserve_logentries.keys()):
if key in all_logentries:
all_logentries[key] = _merge_logentries(_preserve_logentries[key], all_logentries[key])
else:
listener(mysettings, str(key), default_logentries, default_fulllog)
# pass the processing to the individual modules
- for s, levels in logsystems.iteritems():
+ for s, levels in logsystems.items():
# allow per module overrides of PORTAGE_ELOG_CLASSES
if levels:
mod_logentries = filter_loglevels(all_logentries[key], levels)
an older version of portage will import the module from a newer version
when it upgrades itself."""
global _items
- for mysettings, items in _items.itervalues():
+ for mysettings, items in _items.values():
_finalize(mysettings, items)
_items.clear()
mybody += "- %s\n" % key
mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject,
- mybody, attachments=items.values())
+ mybody, attachments=list(items.values()))
def timeout_handler(signum, frame):
raise PortageException("Timeout in finalize() for elog system 'mail_summary'")
if not mycpv:
continue
if self._default_pkg_data:
- for k, v in self._default_pkg_data.iteritems():
+ for k, v in self._default_pkg_data.items():
d.setdefault(k, v)
if self._inherited_keys:
for k in self._inherited_keys:
if self.modified:
self.header["TIMESTAMP"] = str(long(time.time()))
self.header["PACKAGES"] = str(len(self.packages))
- keys = self.header.keys()
+ keys = list(self.header.keys())
keys.sort()
self._writepkgindex(pkgfile, [(k, self.header[k]) \
for k in keys if self.header[k]])
if v is not None and v == metadata.get(k):
del metadata[k]
if self._default_pkg_data:
- for k, v in self._default_pkg_data.iteritems():
+ for k, v in self._default_pkg_data.items():
if metadata.get(k) == v:
metadata.pop(k, None)
- keys = metadata.keys()
+ keys = list(metadata.keys())
keys.sort()
self._writepkgindex(pkgfile,
[(k, metadata[k]) for k in keys if metadata[k]])
class ManifestEntry(object):
__slots__ = ("type", "name", "hashes")
def __init__(self, **kwargs):
- for k, v in kwargs.iteritems():
+ for k, v in kwargs.items():
setattr(self, k, v)
class Manifest2Entry(ManifestEntry):
def __str__(self):
myline = " ".join([self.type, self.name, str(self.hashes["size"])])
- myhashkeys = self.hashes.keys()
+ myhashkeys = list(self.hashes.keys())
myhashkeys.remove("size")
myhashkeys.sort()
for h in myhashkeys:
return myhashdict
def _createManifestEntries(self):
- mytypes = self.fhashdict.keys()
+ mytypes = list(self.fhashdict.keys())
mytypes.sort()
for t in mytypes:
- myfiles = self.fhashdict[t].keys()
+ myfiles = list(self.fhashdict[t].keys())
myfiles.sort()
for f in myfiles:
myentry = Manifest2Entry(
type=t, name=f, hashes=self.fhashdict[t][f].copy())
- myhashkeys = myentry.hashes.keys()
+ myhashkeys = list(myentry.hashes.keys())
myhashkeys.sort()
for h in myhashkeys:
if h not in ["size"] + portage.const.MANIFEST2_HASH_FUNCTIONS:
'profile' : profile }
all_match = True
- for values in self.restrictions.itervalues():
+ for values in self.restrictions.values():
any_match = False
for restriction in values:
if restriction.checkRestriction(**kwargs):
restricts = { _installedRE : DisplayInstalledRestriction,
_profileRE : DisplayProfileRestriction,
_keywordRE : DisplayKeywordRestriction }
- for regex, restriction in restricts.iteritems():
+ for regex, restriction in restricts.items():
match = regex.match(line)
if match:
restrict = restriction(match.groups()[0].strip())
codes = object.__getattribute__(codes, '_attr')
_styles = object.__getattribute__(_styles, '_attr')
- for k, v in codes.iteritems():
+ for k, v in codes.items():
codes[k] = _unicode_decode(v)
- for k, v in _styles.iteritems():
+ for k, v in _styles.items():
_styles[k] = _unicode_decode(v)
try:
# Avoid a potential UnicodeEncodeError from os.execve().
env_bytes = {}
- for k, v in env.iteritems():
+ for k, v in env.items():
env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
_unicode_encode(v, encoding=_encodings['content'])
env = env_bytes
object.__getattribute__(proxy, '_get_target')()
modules = sys.modules
- for name, proxy_list in list(_module_proxies.iteritems()):
+ for name, proxy_list in list(_module_proxies.items()):
if name not in modules:
continue
# First delete this name from the dict so that
rev_transform[atom] = atom
else:
rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1))] = atom
- best_match = best_match_to_list(pkg, rev_transform.iterkeys())
+ best_match = best_match_to_list(pkg, iter(rev_transform.keys()))
if best_match:
return rev_transform[best_match]
return None
# in the latest new slot that may be available.
atoms.append(a)
else:
- atoms = data.keys()
+ atoms = list(data.keys())
self._setAtoms(atoms)
self._mtime = mtime
def load(self):
data, errors = self.loader.load()
- self._setAtoms(data.keys())
+ self._setAtoms(list(data.keys()))
def singleBuilder(self, options, settings, trees):
if not "filename" in options:
raise
del e
data = {}
- atoms = data.keys()
+ atoms = list(data.keys())
self._mtime = mtime
atoms_changed = True
else:
raise
del e
data = {}
- nonatoms = data.keys()
+ nonatoms = list(data.keys())
self._mtime2 = mtime
atoms_changed = True
else:
consumers = set()
if reg:
plib_dict = reg.getPreservedLibs()
- for libs in plib_dict.itervalues():
+ for libs in plib_dict.values():
for lib in libs:
if self.debug:
print(lib)
consumers.update(self.dbapi.linkmap.findConsumers(lib))
# Don't rebuild packages just because they contain preserved
# libs that happen to be consumers of other preserved libs.
- for libs in plib_dict.itervalues():
+ for libs in plib_dict.values():
consumers.difference_update(libs)
else:
return
f = PackageKeywordsFile(self.fname)
f.load()
i = 0
- for cpv, keyword in f.iteritems():
+ for cpv, keyword in f.items():
self.assertEqual( cpv, self.cpv[i] )
[k for k in keyword if self.assertTrue(k in self.keywords)]
i = i + 1
try:
f = PackageUseFile(self.fname)
f.load()
- for cpv, use in f.iteritems():
+ for cpv, use in f.items():
self.assertEqual( cpv, self.cpv )
[flag for flag in use if self.assertTrue(flag in self.useflags)]
finally:
def BuildFile(self):
fd, self.fname = mkstemp()
f = os.fdopen(fd, 'w')
- for k, v in self.items.iteritems():
+ for k, v in self.items.items():
f.write('%s=%s\n' % (k,v))
f.close()
"""Performs update commands and returns a
dict containing only the updated items."""
updated_items = {}
- for k, mycontent in mydata.iteritems():
+ for k, mycontent in mydata.items():
k_unicode = _unicode_decode(k,
encoding=_encodings['repo.content'], errors='replace')
if k_unicode not in ignored_dbentries:
mode='r', encoding=_encodings['repo.content'],
errors='replace').read()
updated_items = update_dbentries(update_iter, mydata)
- for myfile, mycontent in updated_items.iteritems():
+ for myfile, mycontent in updated_items.items():
file_path = os.path.join(dbdir, myfile)
write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
return len(updated_items) > 0
# update /etc/portage/packages.*
ignore_line_re = re.compile(r'^#|^\s*$')
for update_cmd in update_iter:
- for x, contents in file_contents.iteritems():
+ for x, contents in file_contents.items():
for pos, line in enumerate(contents):
if ignore_line_re.match(line):
continue
return None
if final_dict is None:
final_dict = {}
- for y in mydict.keys():
+ for y in list(mydict.keys()):
if True:
if y in final_dict and (incremental or (y in incrementals)):
final_dict[y] += " "+mydict[y][:]
new_list[y] = True
else:
new_list[y] = True
- return new_list.keys()
+ return list(new_list.keys())
def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1):
"""
else:
newdict[myline[0]] = myline[1:]
if juststrings:
- for k, v in newdict.iteritems():
+ for k, v in newdict.items():
newdict[k] = " ".join(v)
return newdict
# "RuntimeError: dictionary changed size during iteration"
# when an invalid atom is deleted.
atoms = {}
- for k, v in pkgs.iteritems():
+ for k, v in pkgs.items():
try:
k = Atom(k)
except InvalidAtom:
"""Create an xpack segement from a map object."""
mydata_encoded = {}
- for k, v in mydata.iteritems():
+ for k, v in mydata.items():
k = _unicode_encode(k,
encoding=_encodings['repo.content'], errors='backslashreplace')
v = _unicode_encode(v,
indexpos=0
dataglob = _unicode_encode('')
datapos=0
- for x, newglob in mydata.iteritems():
+ for x, newglob in mydata.items():
mydatasize=len(newglob)
indexglob=indexglob+encodeint(len(x))+x+encodeint(datapos)+encodeint(mydatasize)
indexpos=indexpos+4+len(x)+4+4
full = options.mode == 'full'
# we only want key value pairs where value > 0
for category, number in \
- filter(lambda myitem: myitem[1] > 0, stats.iteritems()):
+ filter(lambda myitem: myitem[1] > 0, iter(stats.items())):
formatter.add_literal_data(_unicode_decode(" " + category.ljust(30)))
if category in qawarnings:
formatter.push_style("WARN")