1 # Copyright 2010-2013 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
18 from urllib.parse import urlparse, urlunparse
20 from urlparse import urlparse, urlunparse
23 portage.proxy.lazyimport.lazyimport(globals(),
24 'portage.package.ebuild.config:check_config_instance,config',
25 'portage.package.ebuild.doebuild:doebuild_environment,' + \
27 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
30 from portage import OrderedDict, os, selinux, shutil, _encodings, \
31 _shell_quote, _unicode_encode
32 from portage.checksum import (hashfunc_map, perform_md5, verify_all,
33 _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
34 from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
36 from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
37 from portage.exception import FileNotFound, OperationNotPermitted, \
38 PortageException, TryAgain
39 from portage.localization import _
40 from portage.locks import lockfile, unlockfile
41 from portage.output import colorize, EOutput
42 from portage.util import apply_recursive_permissions, \
43 apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
44 varexpand, writemsg, writemsg_level, writemsg_stdout
45 from portage.process import spawn
47 _userpriv_spawn_kwargs = (
50 ("groups", userpriv_groups),
54 def _hide_url_passwd(url):
55 return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
57 def _spawn_fetch(settings, args, **kwargs):
59 Spawn a process with appropriate settings for fetching, including
60 userfetch and selinux support.
63 global _userpriv_spawn_kwargs
65 # Redirect all output to stdout since some fetchers like
66 # wget pollute stderr (if portage detects a problem then it
67 # can send it's own message to stderr).
68 if "fd_pipes" not in kwargs:
70 kwargs["fd_pipes"] = {
71 0 : portage._get_stdin().fileno(),
72 1 : sys.__stdout__.fileno(),
73 2 : sys.__stdout__.fileno(),
76 if "userfetch" in settings.features and \
77 os.getuid() == 0 and portage_gid and portage_uid and \
78 hasattr(os, "setgroups"):
79 kwargs.update(_userpriv_spawn_kwargs)
83 if settings.selinux_enabled():
84 spawn_func = selinux.spawn_wrapper(spawn_func,
85 settings["PORTAGE_FETCH_T"])
87 # bash is an allowed entrypoint, while most binaries are not
88 if args[0] != BASH_BINARY:
89 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
91 # Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
92 # does not filter the calling environment (which may contain needed
93 # proxy variables, as in bug #315421).
94 phase_backup = settings.get('EBUILD_PHASE')
95 settings['EBUILD_PHASE'] = 'fetch'
97 rval = spawn_func(args, env=settings.environ(), **kwargs)
99 if phase_backup is None:
100 settings.pop('EBUILD_PHASE', None)
102 settings['EBUILD_PHASE'] = phase_backup
106 _userpriv_test_write_file_cache = {}
107 _userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
108 "rm -f %(file_path)s ; exit $rval"
110 def _userpriv_test_write_file(settings, file_path):
112 Drop privileges and try to open a file for writing. The file may or
113 may not exist, and the parent directory is assumed to exist. The file
114 is removed before returning.
116 @param settings: A config instance which is passed to _spawn_fetch()
117 @param file_path: A file path to open and write.
118 @return: True if write succeeds, False otherwise.
121 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
122 rval = _userpriv_test_write_file_cache.get(file_path)
126 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
127 {"file_path" : _shell_quote(file_path)}]
129 returncode = _spawn_fetch(settings, args)
131 rval = returncode == os.EX_OK
132 _userpriv_test_write_file_cache[file_path] = rval
135 def _checksum_failure_temp_file(distdir, basename):
137 First try to find a duplicate temp file with the same checksum and return
138 that filename if available. Otherwise, use mkstemp to create a new unique
139 filename._checksum_failure_.$RANDOM, rename the given file, and return the
140 new filename. In any case, filename will be renamed or removed before this
141 function returns a temp filename.
144 filename = os.path.join(distdir, basename)
145 size = os.stat(filename).st_size
147 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
148 for temp_filename in os.listdir(distdir):
149 if not tempfile_re.match(temp_filename):
151 temp_filename = os.path.join(distdir, temp_filename)
153 if size != os.stat(temp_filename).st_size:
158 temp_checksum = perform_md5(temp_filename)
160 # Apparently the temp file disappeared. Let it go.
163 checksum = perform_md5(filename)
164 if checksum == temp_checksum:
168 fd, temp_filename = \
169 tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
171 os.rename(filename, temp_filename)
174 def _check_digests(filename, digests, show_errors=1):
176 Check digests and display a message if an error occurs.
177 @return True if all digests match, False otherwise.
179 verified_ok, reason = verify_all(filename, digests)
182 writemsg(_("!!! Previously fetched"
183 " file: '%s'\n") % filename, noiselevel=-1)
184 writemsg(_("!!! Reason: %s\n") % reason[0],
186 writemsg(_("!!! Got: %s\n"
187 "!!! Expected: %s\n") % \
188 (reason[1], reason[2]), noiselevel=-1)
192 def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
194 @return a tuple of (match, stat_obj) where match is True if filename
195 matches all given digests (if any) and stat_obj is a stat result, or
196 None if the file does not exist.
200 size = digests.get("size")
201 if size is not None and len(digests) == 1:
205 st = os.stat(filename)
208 if size is not None and size != st.st_size:
212 eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
214 elif st.st_size == 0:
215 # Zero-byte distfiles are always invalid.
218 digests = _filter_unaccelarated_hashes(digests)
219 if hash_filter is not None:
220 digests = _apply_hash_filter(digests, hash_filter)
221 if _check_digests(filename, digests, show_errors=show_errors):
222 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
223 " ".join(sorted(digests))))
229 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
244 def _get_checksum_failure_max_tries(settings, default=5):
246 Get the maximum number of failed download attempts.
248 Generally, downloading the same file repeatedly from
249 every single available mirror is a waste of bandwidth
250 and time, so there needs to be a cap.
252 key = 'PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS'
255 v = int(settings.get(key, default))
256 except (ValueError, OverflowError):
257 writemsg(_("!!! Variable %s contains "
258 "non-integer value: '%s'\n")
259 % (key, settings[key]),
261 writemsg(_("!!! Using %s default value: %s\n")
266 writemsg(_("!!! Variable %s contains "
267 "value less than 1: '%s'\n")
270 writemsg(_("!!! Using %s default value: %s\n")
277 def _get_fetch_resume_size(settings, default='350K'):
278 key = 'PORTAGE_FETCH_RESUME_MIN_SIZE'
279 v = settings.get(key, default)
281 v = "".join(v.split())
283 # If it's empty, silently use the default.
285 match = _fetch_resume_size_re.match(v)
287 match.group(2).upper() not in _size_suffix_map):
288 writemsg(_("!!! Variable %s contains "
289 "an unrecognized format: '%s'\n")
290 % (key, settings[key]),
292 writemsg(_("!!! Using %s default value: %s\n")
296 match = _fetch_resume_size_re.match(v)
297 v = int(match.group(1)) * \
298 2 ** _size_suffix_map[match.group(2).upper()]
302 def _get_file_uri_tuples(uris):
303 """Return a list of (filename, URI) tuples."""
305 # Check for 'items' attribute since OrderedDict is not a dict.
306 if hasattr(uris, 'items'):
307 for filename, uri_set in uris.items():
309 file_uri_tuples.append((filename, uri))
311 file_uri_tuples.append((filename, None))
314 if urlparse(uri).scheme:
315 file_uri_tuples.append(
316 (os.path.basename(uri), uri))
318 file_uri_tuples.append(
319 (os.path.basename(uri), None))
320 return file_uri_tuples
323 def _expand_mirror(uri, custom_mirrors=(), third_party_mirrors=()):
325 Replace the 'mirror://' scheme and netloc in the URI.
327 Returns an iterable listing expanded (group, URI) tuples,
328 where the group is either 'custom' or 'third-party'.
330 parsed = urlparse(uri)
331 mirror = parsed.netloc
334 # Try user-defined mirrors first
335 if mirror in custom_mirrors:
336 for cmirr in custom_mirrors[mirror]:
337 m_uri = urlparse(cmirr)
338 yield ('custom', urlunparse((
339 m_uri.scheme, m_uri.netloc, path) +
342 # now try the official mirrors
343 if mirror in third_party_mirrors:
345 for locmirr in third_party_mirrors[mirror]:
346 m_uri = urlparse(locmirr)
347 uris.append(urlunparse((
348 m_uri.scheme, m_uri.netloc, path) +
352 yield ('third-party', uri)
354 if (not custom_mirrors.get(mirror, []) and
355 not third_party_mirrors.get(mirror, [])):
357 _("No known mirror by the name: %s\n")
360 writemsg(_("Invalid mirror definition in SRC_URI:\n"),
362 writemsg(" %s\n" % uri, noiselevel=-1)
365 def _get_uris(uris, settings, custom_mirrors=(), locations=()):
366 restrict = settings.get("PORTAGE_RESTRICT", "").split()
367 restrict_fetch = "fetch" in restrict
368 restrict_mirror = "mirror" in restrict or "nomirror" in restrict
370 "force-mirror" in settings.features and
373 third_party_mirrors = settings.thirdpartymirrors()
374 third_party_mirror_uris = {}
375 filedict = OrderedDict()
377 for filename, uri in _get_file_uri_tuples(uris=uris):
378 if filename not in filedict:
379 filedict[filename] = [
380 os.path.join(location, 'distfiles', filename)
381 for location in locations]
384 if uri.startswith('mirror://'):
385 expanded_uris = _expand_mirror(
386 uri=uri, custom_mirrors=custom_mirrors,
387 third_party_mirrors=third_party_mirrors)
388 filedict[filename].extend(
389 uri for _, uri in expanded_uris)
390 third_party_mirror_uris.setdefault(filename, []).extend(
391 uri for group, uri in expanded_uris
392 if group == 'third-party')
394 if restrict_fetch or force_mirror:
395 # Only fetch from specific mirrors is allowed.
397 primaryuris = primaryuri_dict.get(filename)
398 if primaryuris is None:
400 primaryuri_dict[filename] = primaryuris
401 primaryuris.append(uri)
403 # Order primaryuri_dict values to match that in SRC_URI.
404 for uris in primaryuri_dict.values():
407 # Prefer third_party_mirrors over normal mirrors in cases when
408 # the file does not yet exist on the normal mirrors.
409 for filename, uris in third_party_mirror_uris.items():
410 primaryuri_dict.setdefault(filename, []).extend(uris)
412 # Now merge primaryuri values into filedict (includes mirrors
413 # explicitly referenced in SRC_URI).
414 if "primaryuri" in restrict:
415 for filename, uris in filedict.items():
416 filedict[filename] = primaryuri_dict.get(filename, []) + uris
418 for filename in filedict:
419 filedict[filename] += primaryuri_dict.get(filename, [])
421 return filedict, primaryuri_dict
424 def fetch(myuris, mysettings, listonly=0, fetchonly=0,
425 locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
426 allow_missing_digests=True):
427 "fetch files. Will use digest file if available."
432 features = mysettings.features
433 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
435 userfetch = secpass >= 2 and "userfetch" in features
436 userpriv = secpass >= 2 and "userpriv" in features
438 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
439 restrict_mirror = "mirror" in restrict or "nomirror" in restrict
441 if ("mirror" in features) and ("lmirror" not in features):
442 # lmirror should allow you to bypass mirror restrictions.
443 # XXX: This is not a good thing, and is temporary at best.
444 print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
447 checksum_failure_max_tries = _get_checksum_failure_max_tries(
449 fetch_resume_size = _get_fetch_resume_size(settings=mysettings)
451 # Behave like the package has RESTRICT="primaryuri" after a
452 # couple of checksum failures, to increase the probablility
453 # of success before checksum_failure_max_tries is reached.
454 checksum_failure_primaryuri = 2
456 # In the background parallel-fetch process, it's safe to skip checksum
457 # verification of pre-existing files in $DISTDIR that have the correct
458 # file size. The parent process will verify their checksums prior to
461 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
462 if parallel_fetchonly:
465 check_config_instance(mysettings)
467 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
468 CUSTOM_MIRRORS_FILE), recursive=1)
472 if listonly or ("distlocks" not in features):
476 if "skiprocheck" in features:
479 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
481 writemsg(colorize("BAD",
482 _("!!! For fetching to a read-only filesystem, "
483 "locking should be turned off.\n")), noiselevel=-1)
484 writemsg(_("!!! This can be done by adding -distlocks to "
485 "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
488 # local mirrors are always added
489 if "local" in custommirrors:
490 mymirrors += custommirrors["local"]
493 # We don't add any mirrors.
497 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
499 hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
500 if hash_filter.transparent:
502 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
504 allow_missing_digests = True
505 pkgdir = mysettings.get("O")
506 if digests is None and not (pkgdir is None or skip_manifest):
507 mydigests = mysettings.repositories.get_repo_for_location(
508 os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
509 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
510 elif digests is None or skip_manifest:
511 # no digests because fetch was not called for a specific package
516 ro_distdirs = [x for x in \
517 shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
521 for x in range(len(mymirrors)-1,-1,-1):
522 if mymirrors[x] and mymirrors[x][0]=='/':
523 fsmirrors += [mymirrors[x]]
526 restrict_fetch = "fetch" in restrict
527 custom_local_mirrors = custommirrors.get("local", [])
529 # With fetch restriction, a normal uri may only be fetched from
530 # custom local mirrors (if available). A mirror:// uri may also
531 # be fetched from specific mirrors (effectively overriding fetch
532 # restriction, but only for specific mirrors).
533 locations = custom_local_mirrors
535 locations = mymirrors
537 filedict, primaryuri_dict = _get_uris(
538 uris=myuris, settings=mysettings,
539 custom_mirrors=custommirrors, locations=locations)
546 if can_fetch and not fetch_to_ro:
547 global _userpriv_test_write_file_cache
551 dir_gid = portage_gid
552 if "FAKED_MODE" in mysettings:
553 # When inside fakeroot, directories with portage's gid appear
554 # to have root's gid. Therefore, use root's gid instead of
555 # portage's gid to avoid spurrious permissions adjustments
556 # when inside fakeroot.
561 for x in distdir_dirs:
562 mydir = os.path.join(mysettings["DISTDIR"], x)
563 write_test_file = os.path.join(
564 mydir, ".__portage_test_write__")
571 if st is not None and stat.S_ISDIR(st.st_mode):
572 if not (userfetch or userpriv):
574 if _userpriv_test_write_file(mysettings, write_test_file):
577 _userpriv_test_write_file_cache.pop(write_test_file, None)
578 if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
580 # The directory has just been created
581 # and therefore it must be empty.
583 writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
586 raise # bail out on the first error that occurs during recursion
587 if not apply_recursive_permissions(mydir,
588 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
589 filemode=filemode, filemask=modemask, onerror=onerror):
590 raise OperationNotPermitted(
591 _("Failed to apply recursive permissions for the portage group."))
592 except PortageException as e:
593 if not os.path.isdir(mysettings["DISTDIR"]):
594 writemsg("!!! %s\n" % str(e), noiselevel=-1)
595 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
596 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
599 not fetch_to_ro and \
600 not os.access(mysettings["DISTDIR"], os.W_OK):
601 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
605 distdir_writable = can_fetch and not fetch_to_ro
607 restrict_fetch_msg = False
609 for myfile in filedict:
613 1 partially downloaded
614 2 completely downloaded
618 orig_digests = mydigests.get(myfile, {})
620 if not (allow_missing_digests or listonly):
621 verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
622 verifiable_hash_types.discard("size")
623 if not verifiable_hash_types:
624 expected = set(hashfunc_map)
625 expected.discard("size")
626 expected = " ".join(sorted(expected))
627 got = set(orig_digests)
629 got = " ".join(sorted(got))
630 reason = (_("Insufficient data for checksum verification"),
632 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
634 writemsg(_("!!! Reason: %s\n") % reason[0],
636 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
637 (reason[1], reason[2]), noiselevel=-1)
640 failed_files.add(myfile)
645 size = orig_digests.get("size")
647 # Zero-byte distfiles are always invalid, so discard their digests.
648 del mydigests[myfile]
651 pruned_digests = orig_digests
652 if parallel_fetchonly:
655 pruned_digests["size"] = size
657 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
659 has_space_superuser = True
662 writemsg_stdout("\n", noiselevel=-1)
664 # check if there is enough space in DISTDIR to completely store myfile
665 # overestimate the filesize so we aren't bitten by FS overhead
667 if size is not None and hasattr(os, "statvfs"):
669 vfs_stat = os.statvfs(mysettings["DISTDIR"])
671 writemsg_level("!!! statvfs('%s'): %s\n" %
672 (mysettings["DISTDIR"], e),
673 noiselevel=-1, level=logging.ERROR)
676 if vfs_stat is not None:
678 mysize = os.stat(myfile_path).st_size
680 if e.errno not in (errno.ENOENT, errno.ESTALE):
684 if (size - mysize + vfs_stat.f_bsize) >= \
685 (vfs_stat.f_bsize * vfs_stat.f_bavail):
687 if (size - mysize + vfs_stat.f_bsize) >= \
688 (vfs_stat.f_bsize * vfs_stat.f_bfree):
689 has_space_superuser = False
691 if not has_space_superuser:
698 if distdir_writable and use_locks:
702 lock_kwargs["flags"] = os.O_NONBLOCK
705 file_lock = lockfile(myfile_path,
706 wantnewlockfile=1, **lock_kwargs)
708 writemsg(_(">>> File '%s' is already locked by "
709 "another fetcher. Continuing...\n") % myfile,
716 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
717 match, mystat = _check_distfile(
718 myfile_path, pruned_digests, eout, hash_filter=hash_filter)
720 # Skip permission adjustment for symlinks, since we don't
721 # want to modify anything outside of the primary DISTDIR,
722 # and symlinks typically point to PORTAGE_RO_DISTDIRS.
723 if distdir_writable and not os.path.islink(myfile_path):
725 apply_secpass_permissions(myfile_path,
726 gid=portage_gid, mode=0o664, mask=0o2,
728 except PortageException as e:
729 if not os.access(myfile_path, os.R_OK):
730 writemsg(_("!!! Failed to adjust permissions:"
731 " %s\n") % str(e), noiselevel=-1)
735 if distdir_writable and mystat is None:
736 # Remove broken symlinks if necessary.
738 os.unlink(myfile_path)
742 if mystat is not None:
743 if stat.S_ISDIR(mystat.st_mode):
745 _("!!! Unable to fetch file since "
746 "a directory is in the way: \n"
747 "!!! %s\n") % myfile_path,
748 level=logging.ERROR, noiselevel=-1)
751 if mystat.st_size == 0:
754 os.unlink(myfile_path)
757 elif distdir_writable:
758 if mystat.st_size < fetch_resume_size and \
759 mystat.st_size < size:
760 # If the file already exists and the size does not
761 # match the existing digests, it may be that the
762 # user is attempting to update the digest. In this
763 # case, the digestgen() function will advise the
764 # user to use `ebuild --force foo.ebuild manifest`
765 # in order to force the old digests to be replaced.
766 # Since the user may want to keep this file, rename
767 # it instead of deleting it.
768 writemsg(_(">>> Renaming distfile with size "
769 "%d (smaller than " "PORTAGE_FETCH_RESU"
770 "ME_MIN_SIZE)\n") % mystat.st_size)
772 _checksum_failure_temp_file(
773 mysettings["DISTDIR"], myfile)
774 writemsg_stdout(_("Refetching... "
775 "File renamed to '%s'\n\n") % \
776 temp_filename, noiselevel=-1)
777 elif mystat.st_size >= size:
779 _checksum_failure_temp_file(
780 mysettings["DISTDIR"], myfile)
781 writemsg_stdout(_("Refetching... "
782 "File renamed to '%s'\n\n") % \
783 temp_filename, noiselevel=-1)
785 if distdir_writable and ro_distdirs:
787 for x in ro_distdirs:
788 filename = os.path.join(x, myfile)
789 match, mystat = _check_distfile(
790 filename, pruned_digests, eout, hash_filter=hash_filter)
792 readonly_file = filename
794 if readonly_file is not None:
796 os.unlink(myfile_path)
798 if e.errno not in (errno.ENOENT, errno.ESTALE):
801 os.symlink(readonly_file, myfile_path)
804 # this message is shown only after we know that
805 # the file is not already fetched
807 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
808 (myfile, mysettings["DISTDIR"]), noiselevel=-1)
810 if has_space_superuser:
811 writemsg(_("!!! Insufficient privileges to use "
812 "remaining space.\n"), noiselevel=-1)
814 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
815 " in /etc/portage/make.conf in order to fetch with\n"
816 "!!! superuser privileges.\n"), noiselevel=-1)
818 if fsmirrors and not os.path.exists(myfile_path) and has_space:
819 for mydir in fsmirrors:
820 mirror_file = os.path.join(mydir, myfile)
822 shutil.copyfile(mirror_file, myfile_path)
823 writemsg(_("Local mirror has file: %s\n") % myfile)
825 except (IOError, OSError) as e:
826 if e.errno not in (errno.ENOENT, errno.ESTALE):
831 mystat = os.stat(myfile_path)
833 if e.errno not in (errno.ENOENT, errno.ESTALE):
837 # Skip permission adjustment for symlinks, since we don't
838 # want to modify anything outside of the primary DISTDIR,
839 # and symlinks typically point to PORTAGE_RO_DISTDIRS.
840 if not os.path.islink(myfile_path):
842 apply_secpass_permissions(myfile_path,
843 gid=portage_gid, mode=0o664, mask=0o2,
845 except PortageException as e:
846 if not os.access(myfile_path, os.R_OK):
847 writemsg(_("!!! Failed to adjust permissions:"
848 " %s\n") % (e,), noiselevel=-1)
850 # If the file is empty then it's obviously invalid. Remove
851 # the empty file and try to download if possible.
852 if mystat.st_size == 0:
855 os.unlink(myfile_path)
856 except EnvironmentError:
858 elif myfile not in mydigests:
859 # We don't have a digest, but the file exists. We must
860 # assume that it is fully downloaded.
863 if mystat.st_size < mydigests[myfile]["size"] and \
865 fetched = 1 # Try to resume this download.
866 elif parallel_fetchonly and \
867 mystat.st_size == mydigests[myfile]["size"]:
870 mysettings.get("PORTAGE_QUIET") == "1"
872 "%s size ;-)" % (myfile, ))
876 digests = _filter_unaccelarated_hashes(mydigests[myfile])
877 if hash_filter is not None:
878 digests = _apply_hash_filter(digests, hash_filter)
879 verified_ok, reason = verify_all(myfile_path, digests)
881 writemsg(_("!!! Previously fetched"
882 " file: '%s'\n") % myfile, noiselevel=-1)
883 writemsg(_("!!! Reason: %s\n") % reason[0],
885 writemsg(_("!!! Got: %s\n"
886 "!!! Expected: %s\n") % \
887 (reason[1], reason[2]), noiselevel=-1)
888 if reason[0] == _("Insufficient data for checksum verification"):
892 _checksum_failure_temp_file(
893 mysettings["DISTDIR"], myfile)
894 writemsg_stdout(_("Refetching... "
895 "File renamed to '%s'\n\n") % \
896 temp_filename, noiselevel=-1)
900 mysettings.get("PORTAGE_QUIET", None) == "1"
902 digests = list(digests)
905 "%s %s ;-)" % (myfile, " ".join(digests)))
907 continue # fetch any remaining files
909 # Create a reversed list since that is optimal for list.pop().
910 uri_list = filedict[myfile][:]
912 checksum_failure_count = 0
913 tried_locations = set()
916 # Eliminate duplicates here in case we've switched to
917 # "primaryuri" mode on the fly due to a checksum failure.
918 if loc in tried_locations:
920 tried_locations.add(loc)
922 writemsg_stdout(loc+" ", noiselevel=-1)
924 # allow different fetchcommands per protocol
925 protocol = loc[0:loc.find("://")]
927 global_config_path = GLOBAL_CONFIG_PATH
928 if portage.const.EPREFIX:
929 global_config_path = os.path.join(portage.const.EPREFIX,
930 GLOBAL_CONFIG_PATH.lstrip(os.sep))
932 missing_file_param = False
933 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
934 fetchcommand = mysettings.get(fetchcommand_var)
935 if fetchcommand is None:
936 fetchcommand_var = "FETCHCOMMAND"
937 fetchcommand = mysettings.get(fetchcommand_var)
938 if fetchcommand is None:
940 _("!!! %s is unset. It should "
941 "have been defined in\n!!! %s/make.globals.\n") \
942 % (fetchcommand_var, global_config_path),
943 level=logging.ERROR, noiselevel=-1)
945 if "${FILE}" not in fetchcommand:
947 _("!!! %s does not contain the required ${FILE}"
948 " parameter.\n") % fetchcommand_var,
949 level=logging.ERROR, noiselevel=-1)
950 missing_file_param = True
952 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
953 resumecommand = mysettings.get(resumecommand_var)
954 if resumecommand is None:
955 resumecommand_var = "RESUMECOMMAND"
956 resumecommand = mysettings.get(resumecommand_var)
957 if resumecommand is None:
959 _("!!! %s is unset. It should "
960 "have been defined in\n!!! %s/make.globals.\n") \
961 % (resumecommand_var, global_config_path),
962 level=logging.ERROR, noiselevel=-1)
964 if "${FILE}" not in resumecommand:
966 _("!!! %s does not contain the required ${FILE}"
967 " parameter.\n") % resumecommand_var,
968 level=logging.ERROR, noiselevel=-1)
969 missing_file_param = True
971 if missing_file_param:
973 _("!!! Refer to the make.conf(5) man page for "
974 "information about how to\n!!! correctly specify "
975 "FETCHCOMMAND and RESUMECOMMAND.\n"),
976 level=logging.ERROR, noiselevel=-1)
977 if myfile != os.path.basename(loc):
983 mysize = os.stat(myfile_path).st_size
985 if e.errno not in (errno.ENOENT, errno.ESTALE):
991 writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
993 elif size is None or size > mysize:
994 writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
997 writemsg(_("!!! File %s is incorrect size, "
998 "but unable to retry.\n") % myfile, noiselevel=-1)
1003 if fetched != 2 and has_space:
1004 #we either need to resume or start the download
1007 mystat = os.stat(myfile_path)
1008 except OSError as e:
1009 if e.errno not in (errno.ENOENT, errno.ESTALE):
1014 if mystat.st_size < fetch_resume_size:
1015 writemsg(_(">>> Deleting distfile with size "
1016 "%d (smaller than " "PORTAGE_FETCH_RESU"
1017 "ME_MIN_SIZE)\n") % mystat.st_size)
1019 os.unlink(myfile_path)
1020 except OSError as e:
1022 (errno.ENOENT, errno.ESTALE):
1028 writemsg(_(">>> Resuming download...\n"))
1029 locfetch=resumecommand
1030 command_var = resumecommand_var
1033 locfetch=fetchcommand
1034 command_var = fetchcommand_var
1035 writemsg_stdout(_(">>> Downloading '%s'\n") % \
1036 _hide_url_passwd(loc))
1042 for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
1044 variables[k] = mysettings[k]
1048 myfetch = shlex_split(locfetch)
1049 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
1053 myret = _spawn_fetch(mysettings, myfetch)
1057 apply_secpass_permissions(myfile_path,
1058 gid=portage_gid, mode=0o664, mask=0o2)
1059 except FileNotFound:
1061 except PortageException as e:
1062 if not os.access(myfile_path, os.R_OK):
1063 writemsg(_("!!! Failed to adjust permissions:"
1064 " %s\n") % str(e), noiselevel=-1)
1067 # If the file is empty then it's obviously invalid. Don't
1068 # trust the return value from the fetcher. Remove the
1069 # empty file and try to download again.
1071 if os.stat(myfile_path).st_size == 0:
1072 os.unlink(myfile_path)
1075 except EnvironmentError:
1078 if mydigests is not None and myfile in mydigests:
1080 mystat = os.stat(myfile_path)
1081 except OSError as e:
1082 if e.errno not in (errno.ENOENT, errno.ESTALE):
1088 if stat.S_ISDIR(mystat.st_mode):
1089 # This can happen if FETCHCOMMAND erroneously
1090 # contains wget's -P option where it should
1093 _("!!! The command specified in the "
1094 "%s variable appears to have\n!!! "
1095 "created a directory instead of a "
1096 "normal file.\n") % command_var,
1097 level=logging.ERROR, noiselevel=-1)
1099 _("!!! Refer to the make.conf(5) "
1100 "man page for information about how "
1101 "to\n!!! correctly specify "
1102 "FETCHCOMMAND and RESUMECOMMAND.\n"),
1103 level=logging.ERROR, noiselevel=-1)
1106 # no exception? file exists. let digestcheck() report
1107 # an appropriately for size or checksum errors
1109 # If the fetcher reported success and the file is
1110 # too small, it's probably because the digest is
1111 # bad (upstream changed the distfile). In this
1112 # case we don't want to attempt to resume. Show a
1113 # digest verification failure to that the user gets
1114 # a clue about what just happened.
1115 if myret != os.EX_OK and \
1116 mystat.st_size < mydigests[myfile]["size"]:
1117 # Fetch failed... Try the next one... Kill 404 files though.
1118 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
1119 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1121 _unicode_encode(myfile_path,
1122 encoding=_encodings['fs'], errors='strict'),
1123 mode='r', encoding=_encodings['content'], errors='replace'
1125 if html404.search(f.read()):
1127 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1128 writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
1131 except (IOError, OSError):
1136 # File is the correct size--check the checksums for the fetched
1137 # file NOW, for those users who don't have a stable/continuous
1138 # net connection. This way we have a chance to try to download
1139 # from another mirror...
1140 digests = _filter_unaccelarated_hashes(mydigests[myfile])
1141 if hash_filter is not None:
1142 digests = _apply_hash_filter(digests, hash_filter)
1143 verified_ok, reason = verify_all(myfile_path, digests)
1145 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
1147 writemsg(_("!!! Reason: %s\n") % reason[0],
1149 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
1150 (reason[1], reason[2]), noiselevel=-1)
1151 if reason[0] == _("Insufficient data for checksum verification"):
1154 _checksum_failure_temp_file(
1155 mysettings["DISTDIR"], myfile)
1156 writemsg_stdout(_("Refetching... "
1157 "File renamed to '%s'\n\n") % \
1158 temp_filename, noiselevel=-1)
1160 checksum_failure_count += 1
1161 if checksum_failure_count == \
1162 checksum_failure_primaryuri:
1163 # Switch to "primaryuri" mode in order
1164 # to increase the probablility of
1167 primaryuri_dict.get(myfile)
1170 reversed(primaryuris))
1171 if checksum_failure_count >= \
1172 checksum_failure_max_tries:
1176 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
1178 eout.ebegin("%s %s ;-)" % \
1179 (myfile, " ".join(sorted(digests))))
1187 elif mydigests!=None:
1188 writemsg(_("No digest file available and download failed.\n\n"),
1191 if use_locks and file_lock:
1192 unlockfile(file_lock)
1196 writemsg_stdout("\n", noiselevel=-1)
1198 if restrict_fetch and not restrict_fetch_msg:
1199 restrict_fetch_msg = True
1200 msg = _("\n!!! %s/%s"
1201 " has fetch restriction turned on.\n"
1202 "!!! This probably means that this "
1203 "ebuild's files must be downloaded\n"
1204 "!!! manually. See the comments in"
1205 " the ebuild for more information.\n\n") % \
1206 (mysettings["CATEGORY"], mysettings["PF"])
1208 level=logging.ERROR, noiselevel=-1)
1209 elif restrict_fetch:
1213 elif not filedict[myfile]:
1214 writemsg(_("Warning: No mirrors available for file"
1215 " '%s'\n") % (myfile), noiselevel=-1)
1217 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
1221 failed_files.add(myfile)
1224 failed_files.add(myfile)