-# Copyright 2010 Gentoo Foundation
+# Copyright 2010-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
__all__ = ['fetch']
-import codecs
import errno
+import io
import logging
import random
import re
-import shutil
import stat
import sys
import tempfile
+try:
+ from urllib.parse import urlparse, urlunparse
+except ImportError:
+ from urlparse import urlparse, urlunparse
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
)
-from portage import OrderedDict, os, selinux, _encodings, \
+from portage import OrderedDict, os, selinux, shutil, _encodings, \
_shell_quote, _unicode_encode
-from portage.checksum import perform_md5, verify_all
+from portage.checksum import (hashfunc_map, perform_md5, verify_all,
+ _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
GLOBAL_CONFIG_PATH
from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
from portage.exception import FileNotFound, OperationNotPermitted, \
- PermissionDenied, PortageException, TryAgain
+ PortageException, TryAgain
from portage.localization import _
from portage.locks import lockfile, unlockfile
-from portage.manifest import Manifest
from portage.output import colorize, EOutput
from portage.util import apply_recursive_permissions, \
apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
("umask", 0o02),
)
+def _hide_url_passwd(url):
+ return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
+
def _spawn_fetch(settings, args, **kwargs):
"""
Spawn a process with appropriate settings for fetching, including
if "fd_pipes" not in kwargs:
kwargs["fd_pipes"] = {
- 0 : sys.stdin.fileno(),
- 1 : sys.stdout.fileno(),
- 2 : sys.stdout.fileno(),
+ 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stdout__.fileno(),
}
if "userfetch" in settings.features and \
- os.getuid() == 0 and portage_gid and portage_uid:
+ os.getuid() == 0 and portage_gid and portage_uid and \
+ hasattr(os, "setgroups"):
kwargs.update(_userpriv_spawn_kwargs)
spawn_func = spawn
return rval
_userpriv_test_write_file_cache = {}
-_userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
+_userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
"rm -f %(file_path)s ; exit $rval"
def _userpriv_test_write_file(settings, file_path):
return False
return True
-def _check_distfile(filename, digests, eout, show_errors=1):
+def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
"""
@return a tuple of (match, stat_obj) where match is True if filename
matches all given digests (if any) and stat_obj is a stat result, or
# Zero-byte distfiles are always invalid.
return (False, st)
else:
+ digests = _filter_unaccelarated_hashes(digests)
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
if _check_digests(filename, digests, show_errors=show_errors):
eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
" ".join(sorted(digests))))
'Y' : 80,
}
-def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
+
+def _get_checksum_failure_max_tries(settings, default=5):
+ """
+ Get the maximum number of failed download attempts.
+
+ Generally, downloading the same file repeatedly from
+ every single available mirror is a waste of bandwidth
+ and time, so there needs to be a cap.
+ """
+ key = 'PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS'
+ v = default
+ try:
+ v = int(settings.get(key, default))
+ except (ValueError, OverflowError):
+ writemsg(_("!!! Variable %s contains "
+ "non-integer value: '%s'\n")
+ % (key, settings[key]),
+ noiselevel=-1)
+ writemsg(_("!!! Using %s default value: %s\n")
+ % (key, default),
+ noiselevel=-1)
+ v = default
+ if v < 1:
+ writemsg(_("!!! Variable %s contains "
+ "value less than 1: '%s'\n")
+ % (key, v),
+ noiselevel=-1)
+ writemsg(_("!!! Using %s default value: %s\n")
+ % (key, default),
+ noiselevel=-1)
+ v = default
+ return v
+
+
+def _get_fetch_resume_size(settings, default='350K'):
+ key = 'PORTAGE_FETCH_RESUME_MIN_SIZE'
+ v = settings.get(key, default)
+ if v is not None:
+ v = "".join(v.split())
+ if not v:
+ # If it's empty, silently use the default.
+ v = default
+ match = _fetch_resume_size_re.match(v)
+ if (match is None or
+ match.group(2).upper() not in _size_suffix_map):
+ writemsg(_("!!! Variable %s contains "
+ "an unrecognized format: '%s'\n")
+ % (key, settings[key]),
+ noiselevel=-1)
+ writemsg(_("!!! Using %s default value: %s\n")
+ % (key, default),
+ noiselevel=-1)
+ v = default
+ match = _fetch_resume_size_re.match(v)
+ v = int(match.group(1)) * \
+ 2 ** _size_suffix_map[match.group(2).upper()]
+ return v
+
+
+def _get_file_uri_tuples(uris):
+ """Return a list of (filename, URI) tuples."""
+ file_uri_tuples = []
+ # Check for 'items' attribute since OrderedDict is not a dict.
+ if hasattr(uris, 'items'):
+ for filename, uri_set in uris.items():
+ for uri in uri_set:
+ file_uri_tuples.append((filename, uri))
+ if not uri_set:
+ file_uri_tuples.append((filename, None))
+ else:
+ for uri in uris:
+ if urlparse(uri).scheme:
+ file_uri_tuples.append(
+ (os.path.basename(uri), uri))
+ else:
+ file_uri_tuples.append(
+ (os.path.basename(uri), None))
+ return file_uri_tuples
+
+
+def _expand_mirror(uri, custom_mirrors=(), third_party_mirrors=()):
+ """
+ Replace the 'mirror://' scheme and netloc in the URI.
+
+ Returns an iterable listing expanded (group, URI) tuples,
+ where the group is either 'custom' or 'third-party'.
+ """
+ parsed = urlparse(uri)
+ mirror = parsed.netloc
+ path = parsed.path
+ if path:
+ # Try user-defined mirrors first
+ if mirror in custom_mirrors:
+ for cmirr in custom_mirrors[mirror]:
+ m_uri = urlparse(cmirr)
+ yield ('custom', urlunparse((
+ m_uri.scheme, m_uri.netloc, path) +
+ parsed[3:]))
+
+ # now try the official mirrors
+ if mirror in third_party_mirrors:
+ uris = []
+ for locmirr in third_party_mirrors[mirror]:
+ m_uri = urlparse(locmirr)
+ uris.append(urlunparse((
+ m_uri.scheme, m_uri.netloc, path) +
+ parsed[3:]))
+ random.shuffle(uris)
+ for uri in uris:
+ yield ('third-party', uri)
+
+ if (not custom_mirrors.get(mirror, []) and
+ not third_party_mirrors.get(mirror, [])):
+ writemsg(
+ _("No known mirror by the name: %s\n")
+ % mirror)
+ else:
+ writemsg(_("Invalid mirror definition in SRC_URI:\n"),
+ noiselevel=-1)
+ writemsg(" %s\n" % uri, noiselevel=-1)
+
+
+def _get_uris(uris, settings, custom_mirrors=(), locations=()):
+ restrict = settings.get("PORTAGE_RESTRICT", "").split()
+ restrict_fetch = "fetch" in restrict
+ restrict_mirror = "mirror" in restrict or "nomirror" in restrict
+ force_mirror = (
+ "force-mirror" in settings.features and
+ not restrict_mirror)
+
+ third_party_mirrors = settings.thirdpartymirrors()
+ third_party_mirror_uris = {}
+ filedict = OrderedDict()
+ primaryuri_dict = {}
+ for filename, uri in _get_file_uri_tuples(uris=uris):
+ if filename not in filedict:
+ filedict[filename] = [
+ os.path.join(location, 'distfiles', filename)
+ for location in locations]
+ if uri is None:
+ continue
+ if uri.startswith('mirror://'):
+ expanded_uris = _expand_mirror(
+ uri=uri, custom_mirrors=custom_mirrors,
+ third_party_mirrors=third_party_mirrors)
+ filedict[filename].extend(
+ uri for _, uri in expanded_uris)
+ third_party_mirror_uris.setdefault(filename, []).extend(
+ uri for group, uri in expanded_uris
+ if group == 'third-party')
+ else:
+ if restrict_fetch or force_mirror:
+ # Only fetch from specific mirrors is allowed.
+ continue
+ primaryuris = primaryuri_dict.get(filename)
+ if primaryuris is None:
+ primaryuris = []
+ primaryuri_dict[filename] = primaryuris
+ primaryuris.append(uri)
+
+ # Order primaryuri_dict values to match that in SRC_URI.
+ for uris in primaryuri_dict.values():
+ uris.reverse()
+
+ # Prefer third_party_mirrors over normal mirrors in cases when
+ # the file does not yet exist on the normal mirrors.
+ for filename, uris in third_party_mirror_uris.items():
+ primaryuri_dict.setdefault(filename, []).extend(uris)
+
+ # Now merge primaryuri values into filedict (includes mirrors
+ # explicitly referenced in SRC_URI).
+ if "primaryuri" in restrict:
+ for filename, uris in filedict.items():
+ filedict[filename] = primaryuri_dict.get(filename, []) + uris
+ else:
+ for filename in filedict:
+ filedict[filename] += primaryuri_dict.get(filename, [])
+
+ return filedict, primaryuri_dict
+
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0,
+ locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
+ allow_missing_digests=True):
"fetch files. Will use digest file if available."
if not myuris:
userpriv = secpass >= 2 and "userpriv" in features
# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
- if "mirror" in restrict or \
- "nomirror" in restrict:
+ restrict_mirror = "mirror" in restrict or "nomirror" in restrict
+ if restrict_mirror:
if ("mirror" in features) and ("lmirror" not in features):
# lmirror should allow you to bypass mirror restrictions.
# XXX: This is not a good thing, and is temporary at best.
print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
return 1
- # Generally, downloading the same file repeatedly from
- # every single available mirror is a waste of bandwidth
- # and time, so there needs to be a cap.
- checksum_failure_max_tries = 5
- v = checksum_failure_max_tries
- try:
- v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
- checksum_failure_max_tries))
- except (ValueError, OverflowError):
- writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
- " contains non-integer value: '%s'\n") % \
- mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
- writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
- "default value: %s\n") % checksum_failure_max_tries,
- noiselevel=-1)
- v = checksum_failure_max_tries
- if v < 1:
- writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
- " contains value less than 1: '%s'\n") % v, noiselevel=-1)
- writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
- "default value: %s\n") % checksum_failure_max_tries,
- noiselevel=-1)
- v = checksum_failure_max_tries
- checksum_failure_max_tries = v
- del v
-
- fetch_resume_size_default = "350K"
- fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
- if fetch_resume_size is not None:
- fetch_resume_size = "".join(fetch_resume_size.split())
- if not fetch_resume_size:
- # If it's undefined or empty, silently use the default.
- fetch_resume_size = fetch_resume_size_default
- match = _fetch_resume_size_re.match(fetch_resume_size)
- if match is None or \
- (match.group(2).upper() not in _size_suffix_map):
- writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
- " contains an unrecognized format: '%s'\n") % \
- mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
- writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
- "default value: %s\n") % fetch_resume_size_default,
- noiselevel=-1)
- fetch_resume_size = None
- if fetch_resume_size is None:
- fetch_resume_size = fetch_resume_size_default
- match = _fetch_resume_size_re.match(fetch_resume_size)
- fetch_resume_size = int(match.group(1)) * \
- 2 ** _size_suffix_map[match.group(2).upper()]
+ checksum_failure_max_tries = _get_checksum_failure_max_tries(
+ settings=mysettings)
+ fetch_resume_size = _get_fetch_resume_size(settings=mysettings)
# Behave like the package has RESTRICT="primaryuri" after a
# couple of checksum failures, to increase the probablility
# of success before checksum_failure_max_tries is reached.
checksum_failure_primaryuri = 2
- thirdpartymirrors = mysettings.thirdpartymirrors()
# In the background parallel-fetch process, it's safe to skip checksum
# verification of pre-existing files in $DISTDIR that have the correct
_("!!! For fetching to a read-only filesystem, "
"locking should be turned off.\n")), noiselevel=-1)
writemsg(_("!!! This can be done by adding -distlocks to "
- "FEATURES in /etc/make.conf\n"), noiselevel=-1)
+ "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
# use_locks = 0
# local mirrors are always added
if "local" in custommirrors:
mymirrors += custommirrors["local"]
- if "nomirror" in restrict or \
- "mirror" in restrict:
+ if restrict_mirror:
# We don't add any mirrors.
pass
else:
if try_mirrors:
mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
+ if skip_manifest:
+ allow_missing_digests = True
pkgdir = mysettings.get("O")
- if not (pkgdir is None or skip_manifest):
- mydigests = Manifest(
+ if digests is None and not (pkgdir is None or skip_manifest):
+ mydigests = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
- else:
+ elif digests is None or skip_manifest:
# no digests because fetch was not called for a specific package
mydigests = {}
+ else:
+ mydigests = digests
ro_distdirs = [x for x in \
shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
else:
locations = mymirrors
- file_uri_tuples = []
- # Check for 'items' attribute since OrderedDict is not a dict.
- if hasattr(myuris, 'items'):
- for myfile, uri_set in myuris.items():
- for myuri in uri_set:
- file_uri_tuples.append((myfile, myuri))
- else:
- for myuri in myuris:
- file_uri_tuples.append((os.path.basename(myuri), myuri))
-
- filedict = OrderedDict()
- primaryuri_indexes={}
- primaryuri_dict = {}
- thirdpartymirror_uris = {}
- for myfile, myuri in file_uri_tuples:
- if myfile not in filedict:
- filedict[myfile]=[]
- for y in range(0,len(locations)):
- filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
- if myuri[:9]=="mirror://":
- eidx = myuri.find("/", 9)
- if eidx != -1:
- mirrorname = myuri[9:eidx]
- path = myuri[eidx+1:]
-
- # Try user-defined mirrors first
- if mirrorname in custommirrors:
- for cmirr in custommirrors[mirrorname]:
- filedict[myfile].append(
- cmirr.rstrip("/") + "/" + path)
-
- # now try the official mirrors
- if mirrorname in thirdpartymirrors:
- random.shuffle(thirdpartymirrors[mirrorname])
-
- uris = [locmirr.rstrip("/") + "/" + path \
- for locmirr in thirdpartymirrors[mirrorname]]
- filedict[myfile].extend(uris)
- thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
-
- if not filedict[myfile]:
- writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
- else:
- writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
- writemsg(" %s\n" % (myuri), noiselevel=-1)
- else:
- if restrict_fetch:
- # Only fetch from specific mirrors is allowed.
- continue
- if "primaryuri" in restrict:
- # Use the source site first.
- if myfile in primaryuri_indexes:
- primaryuri_indexes[myfile] += 1
- else:
- primaryuri_indexes[myfile] = 0
- filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
- else:
- filedict[myfile].append(myuri)
- primaryuris = primaryuri_dict.get(myfile)
- if primaryuris is None:
- primaryuris = []
- primaryuri_dict[myfile] = primaryuris
- primaryuris.append(myuri)
-
- # Prefer thirdpartymirrors over normal mirrors in cases when
- # the file does not yet exist on the normal mirrors.
- for myfile, uris in thirdpartymirror_uris.items():
- primaryuri_dict.setdefault(myfile, []).extend(uris)
+ filedict, primaryuri_dict = _get_uris(
+ uris=myuris, settings=mysettings,
+ custom_mirrors=custommirrors, locations=locations)
can_fetch=True
if can_fetch and not fetch_to_ro:
global _userpriv_test_write_file_cache
- dirmode = 0o2070
+ dirmode = 0o070
filemode = 0o60
modemask = 0o2
dir_gid = portage_gid
fetched = 0
orig_digests = mydigests.get(myfile, {})
+
+ if not (allow_missing_digests or listonly):
+ verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_map)
+ expected.discard("size")
+ expected = " ".join(sorted(expected))
+ got = set(orig_digests)
+ got.discard("size")
+ got = " ".join(sorted(got))
+ reason = (_("Insufficient data for checksum verification"),
+ got, expected)
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+
+ if fetchonly:
+ failed_files.add(myfile)
+ continue
+ else:
+ return 0
+
size = orig_digests.get("size")
if size == 0:
# Zero-byte distfiles are always invalid, so discard their digests.
else:
# check if there is enough space in DISTDIR to completely store myfile
# overestimate the filesize so we aren't bitten by FS overhead
+ vfs_stat = None
if size is not None and hasattr(os, "statvfs"):
- vfs_stat = os.statvfs(mysettings["DISTDIR"])
+ try:
+ vfs_stat = os.statvfs(mysettings["DISTDIR"])
+ except OSError as e:
+ writemsg_level("!!! statvfs('%s'): %s\n" %
+ (mysettings["DISTDIR"], e),
+ noiselevel=-1, level=logging.ERROR)
+ del e
+
+ if vfs_stat is not None:
try:
mysize = os.stat(myfile_path).st_size
except OSError as e:
elif userfetch:
has_space = False
- if not has_space:
- writemsg(_("!!! Insufficient space to store %s in %s\n") % \
- (myfile, mysettings["DISTDIR"]), noiselevel=-1)
-
- if has_space_superuser:
- writemsg(_("!!! Insufficient privileges to use "
- "remaining space.\n"), noiselevel=-1)
- if userfetch:
- writemsg(_("!!! You may set FEATURES=\"-userfetch\""
- " in /etc/make.conf in order to fetch with\n"
- "!!! superuser privileges.\n"), noiselevel=-1)
-
if distdir_writable and use_locks:
lock_kwargs = {}
eout = EOutput()
eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
match, mystat = _check_distfile(
- myfile_path, pruned_digests, eout)
+ myfile_path, pruned_digests, eout, hash_filter=hash_filter)
if match:
- if distdir_writable:
+ # Skip permission adjustment for symlinks, since we don't
+ # want to modify anything outside of the primary DISTDIR,
+ # and symlinks typically point to PORTAGE_RO_DISTDIRS.
+ if distdir_writable and not os.path.islink(myfile_path):
try:
apply_secpass_permissions(myfile_path,
gid=portage_gid, mode=0o664, mask=0o2,
for x in ro_distdirs:
filename = os.path.join(x, myfile)
match, mystat = _check_distfile(
- filename, pruned_digests, eout)
+ filename, pruned_digests, eout, hash_filter=hash_filter)
if match:
readonly_file = filename
break
os.symlink(readonly_file, myfile_path)
continue
+ # this message is shown only after we know that
+ # the file is not already fetched
+ if not has_space:
+ writemsg(_("!!! Insufficient space to store %s in %s\n") % \
+ (myfile, mysettings["DISTDIR"]), noiselevel=-1)
+
+ if has_space_superuser:
+ writemsg(_("!!! Insufficient privileges to use "
+ "remaining space.\n"), noiselevel=-1)
+ if userfetch:
+ writemsg(_("!!! You may set FEATURES=\"-userfetch\""
+ " in /etc/portage/make.conf in order to fetch with\n"
+ "!!! superuser privileges.\n"), noiselevel=-1)
+
if fsmirrors and not os.path.exists(myfile_path) and has_space:
for mydir in fsmirrors:
mirror_file = os.path.join(mydir, myfile)
raise
del e
else:
- try:
- apply_secpass_permissions(
- myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
- stat_cached=mystat)
- except PortageException as e:
- if not os.access(myfile_path, os.R_OK):
- writemsg(_("!!! Failed to adjust permissions:"
- " %s\n") % str(e), noiselevel=-1)
+ # Skip permission adjustment for symlinks, since we don't
+ # want to modify anything outside of the primary DISTDIR,
+ # and symlinks typically point to PORTAGE_RO_DISTDIRS.
+ if not os.path.islink(myfile_path):
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % (e,), noiselevel=-1)
# If the file is empty then it's obviously invalid. Remove
# the empty file and try to download if possible.
eout.eend(0)
continue
else:
- verified_ok, reason = verify_all(
- myfile_path, mydigests[myfile])
+ digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
writemsg(_("!!! Previously fetched"
" file: '%s'\n") % myfile, noiselevel=-1)
eout = EOutput()
eout.quiet = \
mysettings.get("PORTAGE_QUIET", None) == "1"
- digests = mydigests.get(myfile)
if digests:
digests = list(digests)
digests.sort()
protocol = loc[0:loc.find("://")]
global_config_path = GLOBAL_CONFIG_PATH
- if mysettings['EPREFIX']:
- global_config_path = os.path.join(mysettings['EPREFIX'],
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
GLOBAL_CONFIG_PATH.lstrip(os.sep))
missing_file_param = False
locfetch=fetchcommand
command_var = fetchcommand_var
writemsg_stdout(_(">>> Downloading '%s'\n") % \
- re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
+ _hide_url_passwd(loc))
variables = {
- "DISTDIR": mysettings["DISTDIR"],
"URI": loc,
"FILE": myfile
}
+ for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
+ try:
+ variables[k] = mysettings[k]
+ except KeyError:
+ pass
+
myfetch = shlex_split(locfetch)
myfetch = [varexpand(x, mydict=variables) for x in myfetch]
myret = -1
# Fetch failed... Try the next one... Kill 404 files though.
if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
- if html404.search(codecs.open(
+ with io.open(
_unicode_encode(myfile_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'], errors='replace'
- ).read()):
- try:
- os.unlink(mysettings["DISTDIR"]+"/"+myfile)
- writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
- fetched = 0
- continue
- except (IOError, OSError):
- pass
+ ) as f:
+ if html404.search(f.read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
+ fetched = 0
+ continue
+ except (IOError, OSError):
+ pass
fetched = 1
continue
if True:
# file NOW, for those users who don't have a stable/continuous
# net connection. This way we have a chance to try to download
# from another mirror...
- verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
+ digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ verified_ok, reason = verify_all(myfile_path, digests)
if not verified_ok:
- print(reason)
writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
noiselevel=-1)
writemsg(_("!!! Reason: %s\n") % reason[0],
else:
eout = EOutput()
eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
- digests = mydigests.get(myfile)
if digests:
eout.ebegin("%s %s ;-)" % \
(myfile, " ".join(sorted(digests))))
noiselevel=-1)
if listonly:
+ failed_files.add(myfile)
continue
elif fetchonly:
failed_files.add(myfile)