1 # Copyright 2010-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
20 portage.proxy.lazyimport.lazyimport(globals(),
21 'portage.package.ebuild.config:check_config_instance,config',
22 'portage.package.ebuild.doebuild:doebuild_environment,' + \
24 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
27 from portage import OrderedDict, os, selinux, _encodings, \
28 _shell_quote, _unicode_encode
29 from portage.checksum import hashfunc_map, perform_md5, verify_all
30 from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
32 from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
33 from portage.exception import FileNotFound, OperationNotPermitted, \
34 PortageException, TryAgain
35 from portage.localization import _
36 from portage.locks import lockfile, unlockfile
37 from portage.manifest import Manifest
38 from portage.output import colorize, EOutput
39 from portage.util import apply_recursive_permissions, \
40 apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
41 varexpand, writemsg, writemsg_level, writemsg_stdout
42 from portage.process import spawn
44 _userpriv_spawn_kwargs = (
47 ("groups", userpriv_groups),
51 def _spawn_fetch(settings, args, **kwargs):
53 Spawn a process with appropriate settings for fetching, including
54 userfetch and selinux support.
57 global _userpriv_spawn_kwargs
59 # Redirect all output to stdout since some fetchers like
60 # wget pollute stderr (if portage detects a problem then it
61 # can send it's own message to stderr).
62 if "fd_pipes" not in kwargs:
64 kwargs["fd_pipes"] = {
65 0 : sys.stdin.fileno(),
66 1 : sys.stdout.fileno(),
67 2 : sys.stdout.fileno(),
70 if "userfetch" in settings.features and \
71 os.getuid() == 0 and portage_gid and portage_uid:
72 kwargs.update(_userpriv_spawn_kwargs)
76 if settings.selinux_enabled():
77 spawn_func = selinux.spawn_wrapper(spawn_func,
78 settings["PORTAGE_FETCH_T"])
80 # bash is an allowed entrypoint, while most binaries are not
81 if args[0] != BASH_BINARY:
82 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
84 # Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
85 # does not filter the calling environment (which may contain needed
86 # proxy variables, as in bug #315421).
87 phase_backup = settings.get('EBUILD_PHASE')
88 settings['EBUILD_PHASE'] = 'fetch'
90 rval = spawn_func(args, env=settings.environ(), **kwargs)
92 if phase_backup is None:
93 settings.pop('EBUILD_PHASE', None)
95 settings['EBUILD_PHASE'] = phase_backup
99 _userpriv_test_write_file_cache = {}
100 _userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
101 "rm -f %(file_path)s ; exit $rval"
103 def _userpriv_test_write_file(settings, file_path):
105 Drop privileges and try to open a file for writing. The file may or
106 may not exist, and the parent directory is assumed to exist. The file
107 is removed before returning.
109 @param settings: A config instance which is passed to _spawn_fetch()
110 @param file_path: A file path to open and write.
111 @return: True if write succeeds, False otherwise.
114 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
115 rval = _userpriv_test_write_file_cache.get(file_path)
119 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
120 {"file_path" : _shell_quote(file_path)}]
122 returncode = _spawn_fetch(settings, args)
124 rval = returncode == os.EX_OK
125 _userpriv_test_write_file_cache[file_path] = rval
128 def _checksum_failure_temp_file(distdir, basename):
130 First try to find a duplicate temp file with the same checksum and return
131 that filename if available. Otherwise, use mkstemp to create a new unique
132 filename._checksum_failure_.$RANDOM, rename the given file, and return the
133 new filename. In any case, filename will be renamed or removed before this
134 function returns a temp filename.
137 filename = os.path.join(distdir, basename)
138 size = os.stat(filename).st_size
140 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
141 for temp_filename in os.listdir(distdir):
142 if not tempfile_re.match(temp_filename):
144 temp_filename = os.path.join(distdir, temp_filename)
146 if size != os.stat(temp_filename).st_size:
151 temp_checksum = perform_md5(temp_filename)
153 # Apparently the temp file disappeared. Let it go.
156 checksum = perform_md5(filename)
157 if checksum == temp_checksum:
161 fd, temp_filename = \
162 tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
164 os.rename(filename, temp_filename)
167 def _check_digests(filename, digests, show_errors=1):
169 Check digests and display a message if an error occurs.
170 @return True if all digests match, False otherwise.
172 verified_ok, reason = verify_all(filename, digests)
175 writemsg(_("!!! Previously fetched"
176 " file: '%s'\n") % filename, noiselevel=-1)
177 writemsg(_("!!! Reason: %s\n") % reason[0],
179 writemsg(_("!!! Got: %s\n"
180 "!!! Expected: %s\n") % \
181 (reason[1], reason[2]), noiselevel=-1)
185 def _check_distfile(filename, digests, eout, show_errors=1):
187 @return a tuple of (match, stat_obj) where match is True if filename
188 matches all given digests (if any) and stat_obj is a stat result, or
189 None if the file does not exist.
193 size = digests.get("size")
194 if size is not None and len(digests) == 1:
198 st = os.stat(filename)
201 if size is not None and size != st.st_size:
205 eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
207 elif st.st_size == 0:
208 # Zero-byte distfiles are always invalid.
211 if _check_digests(filename, digests, show_errors=show_errors):
212 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
213 " ".join(sorted(digests))))
219 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
233 def fetch(myuris, mysettings, listonly=0, fetchonly=0,
234 locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
235 allow_missing_digests=True):
236 "fetch files. Will use digest file if available."
241 features = mysettings.features
242 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
244 userfetch = secpass >= 2 and "userfetch" in features
245 userpriv = secpass >= 2 and "userpriv" in features
247 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
248 restrict_mirror = "mirror" in restrict or "nomirror" in restrict
250 if ("mirror" in features) and ("lmirror" not in features):
251 # lmirror should allow you to bypass mirror restrictions.
252 # XXX: This is not a good thing, and is temporary at best.
253 print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
256 # Generally, downloading the same file repeatedly from
257 # every single available mirror is a waste of bandwidth
258 # and time, so there needs to be a cap.
259 checksum_failure_max_tries = 5
260 v = checksum_failure_max_tries
262 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
263 checksum_failure_max_tries))
264 except (ValueError, OverflowError):
265 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
266 " contains non-integer value: '%s'\n") % \
267 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
268 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
269 "default value: %s\n") % checksum_failure_max_tries,
271 v = checksum_failure_max_tries
273 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
274 " contains value less than 1: '%s'\n") % v, noiselevel=-1)
275 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
276 "default value: %s\n") % checksum_failure_max_tries,
278 v = checksum_failure_max_tries
279 checksum_failure_max_tries = v
282 fetch_resume_size_default = "350K"
283 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
284 if fetch_resume_size is not None:
285 fetch_resume_size = "".join(fetch_resume_size.split())
286 if not fetch_resume_size:
287 # If it's undefined or empty, silently use the default.
288 fetch_resume_size = fetch_resume_size_default
289 match = _fetch_resume_size_re.match(fetch_resume_size)
290 if match is None or \
291 (match.group(2).upper() not in _size_suffix_map):
292 writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
293 " contains an unrecognized format: '%s'\n") % \
294 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
295 writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
296 "default value: %s\n") % fetch_resume_size_default,
298 fetch_resume_size = None
299 if fetch_resume_size is None:
300 fetch_resume_size = fetch_resume_size_default
301 match = _fetch_resume_size_re.match(fetch_resume_size)
302 fetch_resume_size = int(match.group(1)) * \
303 2 ** _size_suffix_map[match.group(2).upper()]
305 # Behave like the package has RESTRICT="primaryuri" after a
306 # couple of checksum failures, to increase the probablility
307 # of success before checksum_failure_max_tries is reached.
308 checksum_failure_primaryuri = 2
309 thirdpartymirrors = mysettings.thirdpartymirrors()
311 # In the background parallel-fetch process, it's safe to skip checksum
312 # verification of pre-existing files in $DISTDIR that have the correct
313 # file size. The parent process will verify their checksums prior to
316 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
317 if parallel_fetchonly:
320 check_config_instance(mysettings)
322 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
323 CUSTOM_MIRRORS_FILE), recursive=1)
327 if listonly or ("distlocks" not in features):
331 if "skiprocheck" in features:
334 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
336 writemsg(colorize("BAD",
337 _("!!! For fetching to a read-only filesystem, "
338 "locking should be turned off.\n")), noiselevel=-1)
339 writemsg(_("!!! This can be done by adding -distlocks to "
340 "FEATURES in /etc/make.conf\n"), noiselevel=-1)
343 # local mirrors are always added
344 if "local" in custommirrors:
345 mymirrors += custommirrors["local"]
348 # We don't add any mirrors.
352 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
354 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
356 allow_missing_digests = True
357 pkgdir = mysettings.get("O")
358 if digests is None and not (pkgdir is None or skip_manifest):
359 mydigests = mysettings.repositories.get_repo_for_location(
360 os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
361 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
362 elif digests is None or skip_manifest:
363 # no digests because fetch was not called for a specific package
368 ro_distdirs = [x for x in \
369 shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
373 for x in range(len(mymirrors)-1,-1,-1):
374 if mymirrors[x] and mymirrors[x][0]=='/':
375 fsmirrors += [mymirrors[x]]
378 restrict_fetch = "fetch" in restrict
379 force_mirror = "force-mirror" in features and not restrict_mirror
380 custom_local_mirrors = custommirrors.get("local", [])
382 # With fetch restriction, a normal uri may only be fetched from
383 # custom local mirrors (if available). A mirror:// uri may also
384 # be fetched from specific mirrors (effectively overriding fetch
385 # restriction, but only for specific mirrors).
386 locations = custom_local_mirrors
388 locations = mymirrors
391 # Check for 'items' attribute since OrderedDict is not a dict.
392 if hasattr(myuris, 'items'):
393 for myfile, uri_set in myuris.items():
394 for myuri in uri_set:
395 file_uri_tuples.append((myfile, myuri))
398 file_uri_tuples.append((os.path.basename(myuri), myuri))
400 filedict = OrderedDict()
401 primaryuri_indexes={}
403 thirdpartymirror_uris = {}
404 for myfile, myuri in file_uri_tuples:
405 if myfile not in filedict:
407 for y in range(0,len(locations)):
408 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
409 if myuri[:9]=="mirror://":
410 eidx = myuri.find("/", 9)
412 mirrorname = myuri[9:eidx]
413 path = myuri[eidx+1:]
415 # Try user-defined mirrors first
416 if mirrorname in custommirrors:
417 for cmirr in custommirrors[mirrorname]:
418 filedict[myfile].append(
419 cmirr.rstrip("/") + "/" + path)
421 # now try the official mirrors
422 if mirrorname in thirdpartymirrors:
423 random.shuffle(thirdpartymirrors[mirrorname])
425 uris = [locmirr.rstrip("/") + "/" + path \
426 for locmirr in thirdpartymirrors[mirrorname]]
427 filedict[myfile].extend(uris)
428 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
430 if not filedict[myfile]:
431 writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
433 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
434 writemsg(" %s\n" % (myuri), noiselevel=-1)
436 if restrict_fetch or force_mirror:
437 # Only fetch from specific mirrors is allowed.
439 if "primaryuri" in restrict:
440 # Use the source site first.
441 if myfile in primaryuri_indexes:
442 primaryuri_indexes[myfile] += 1
444 primaryuri_indexes[myfile] = 0
445 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
447 filedict[myfile].append(myuri)
448 primaryuris = primaryuri_dict.get(myfile)
449 if primaryuris is None:
451 primaryuri_dict[myfile] = primaryuris
452 primaryuris.append(myuri)
454 # Prefer thirdpartymirrors over normal mirrors in cases when
455 # the file does not yet exist on the normal mirrors.
456 for myfile, uris in thirdpartymirror_uris.items():
457 primaryuri_dict.setdefault(myfile, []).extend(uris)
464 if can_fetch and not fetch_to_ro:
465 global _userpriv_test_write_file_cache
469 dir_gid = portage_gid
470 if "FAKED_MODE" in mysettings:
471 # When inside fakeroot, directories with portage's gid appear
472 # to have root's gid. Therefore, use root's gid instead of
473 # portage's gid to avoid spurrious permissions adjustments
474 # when inside fakeroot.
479 for x in distdir_dirs:
480 mydir = os.path.join(mysettings["DISTDIR"], x)
481 write_test_file = os.path.join(
482 mydir, ".__portage_test_write__")
489 if st is not None and stat.S_ISDIR(st.st_mode):
490 if not (userfetch or userpriv):
492 if _userpriv_test_write_file(mysettings, write_test_file):
495 _userpriv_test_write_file_cache.pop(write_test_file, None)
496 if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
498 # The directory has just been created
499 # and therefore it must be empty.
501 writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
504 raise # bail out on the first error that occurs during recursion
505 if not apply_recursive_permissions(mydir,
506 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
507 filemode=filemode, filemask=modemask, onerror=onerror):
508 raise OperationNotPermitted(
509 _("Failed to apply recursive permissions for the portage group."))
510 except PortageException as e:
511 if not os.path.isdir(mysettings["DISTDIR"]):
512 writemsg("!!! %s\n" % str(e), noiselevel=-1)
513 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
514 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
517 not fetch_to_ro and \
518 not os.access(mysettings["DISTDIR"], os.W_OK):
519 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
523 distdir_writable = can_fetch and not fetch_to_ro
525 restrict_fetch_msg = False
527 for myfile in filedict:
531 1 partially downloaded
532 2 completely downloaded
536 orig_digests = mydigests.get(myfile, {})
538 if not (allow_missing_digests or listonly):
539 verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
540 verifiable_hash_types.discard("size")
541 if not verifiable_hash_types:
542 expected = set(hashfunc_map)
543 expected.discard("size")
544 expected = " ".join(sorted(expected))
545 got = set(orig_digests)
547 got = " ".join(sorted(got))
548 reason = (_("Insufficient data for checksum verification"),
550 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
552 writemsg(_("!!! Reason: %s\n") % reason[0],
554 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
555 (reason[1], reason[2]), noiselevel=-1)
558 failed_files.add(myfile)
563 size = orig_digests.get("size")
565 # Zero-byte distfiles are always invalid, so discard their digests.
566 del mydigests[myfile]
569 pruned_digests = orig_digests
570 if parallel_fetchonly:
573 pruned_digests["size"] = size
575 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
577 has_space_superuser = True
580 writemsg_stdout("\n", noiselevel=-1)
582 # check if there is enough space in DISTDIR to completely store myfile
583 # overestimate the filesize so we aren't bitten by FS overhead
585 if size is not None and hasattr(os, "statvfs"):
587 vfs_stat = os.statvfs(mysettings["DISTDIR"])
589 writemsg_level("!!! statvfs('%s'): %s\n" %
590 (mysettings["DISTDIR"], e),
591 noiselevel=-1, level=logging.ERROR)
594 if vfs_stat is not None:
596 mysize = os.stat(myfile_path).st_size
598 if e.errno not in (errno.ENOENT, errno.ESTALE):
602 if (size - mysize + vfs_stat.f_bsize) >= \
603 (vfs_stat.f_bsize * vfs_stat.f_bavail):
605 if (size - mysize + vfs_stat.f_bsize) >= \
606 (vfs_stat.f_bsize * vfs_stat.f_bfree):
607 has_space_superuser = False
609 if not has_space_superuser:
617 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
618 (myfile, mysettings["DISTDIR"]), noiselevel=-1)
620 if has_space_superuser:
621 writemsg(_("!!! Insufficient privileges to use "
622 "remaining space.\n"), noiselevel=-1)
624 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
625 " in /etc/make.conf in order to fetch with\n"
626 "!!! superuser privileges.\n"), noiselevel=-1)
628 if distdir_writable and use_locks:
632 lock_kwargs["flags"] = os.O_NONBLOCK
635 file_lock = lockfile(myfile_path,
636 wantnewlockfile=1, **lock_kwargs)
638 writemsg(_(">>> File '%s' is already locked by "
639 "another fetcher. Continuing...\n") % myfile,
646 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
647 match, mystat = _check_distfile(
648 myfile_path, pruned_digests, eout)
652 apply_secpass_permissions(myfile_path,
653 gid=portage_gid, mode=0o664, mask=0o2,
655 except PortageException as e:
656 if not os.access(myfile_path, os.R_OK):
657 writemsg(_("!!! Failed to adjust permissions:"
658 " %s\n") % str(e), noiselevel=-1)
662 if distdir_writable and mystat is None:
663 # Remove broken symlinks if necessary.
665 os.unlink(myfile_path)
669 if mystat is not None:
670 if stat.S_ISDIR(mystat.st_mode):
672 _("!!! Unable to fetch file since "
673 "a directory is in the way: \n"
674 "!!! %s\n") % myfile_path,
675 level=logging.ERROR, noiselevel=-1)
678 if mystat.st_size == 0:
681 os.unlink(myfile_path)
684 elif distdir_writable:
685 if mystat.st_size < fetch_resume_size and \
686 mystat.st_size < size:
687 # If the file already exists and the size does not
688 # match the existing digests, it may be that the
689 # user is attempting to update the digest. In this
690 # case, the digestgen() function will advise the
691 # user to use `ebuild --force foo.ebuild manifest`
692 # in order to force the old digests to be replaced.
693 # Since the user may want to keep this file, rename
694 # it instead of deleting it.
695 writemsg(_(">>> Renaming distfile with size "
696 "%d (smaller than " "PORTAGE_FETCH_RESU"
697 "ME_MIN_SIZE)\n") % mystat.st_size)
699 _checksum_failure_temp_file(
700 mysettings["DISTDIR"], myfile)
701 writemsg_stdout(_("Refetching... "
702 "File renamed to '%s'\n\n") % \
703 temp_filename, noiselevel=-1)
704 elif mystat.st_size >= size:
706 _checksum_failure_temp_file(
707 mysettings["DISTDIR"], myfile)
708 writemsg_stdout(_("Refetching... "
709 "File renamed to '%s'\n\n") % \
710 temp_filename, noiselevel=-1)
712 if distdir_writable and ro_distdirs:
714 for x in ro_distdirs:
715 filename = os.path.join(x, myfile)
716 match, mystat = _check_distfile(
717 filename, pruned_digests, eout)
719 readonly_file = filename
721 if readonly_file is not None:
723 os.unlink(myfile_path)
725 if e.errno not in (errno.ENOENT, errno.ESTALE):
728 os.symlink(readonly_file, myfile_path)
731 if fsmirrors and not os.path.exists(myfile_path) and has_space:
732 for mydir in fsmirrors:
733 mirror_file = os.path.join(mydir, myfile)
735 shutil.copyfile(mirror_file, myfile_path)
736 writemsg(_("Local mirror has file: %s\n") % myfile)
738 except (IOError, OSError) as e:
739 if e.errno not in (errno.ENOENT, errno.ESTALE):
744 mystat = os.stat(myfile_path)
746 if e.errno not in (errno.ENOENT, errno.ESTALE):
751 apply_secpass_permissions(
752 myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
754 except PortageException as e:
755 if not os.access(myfile_path, os.R_OK):
756 writemsg(_("!!! Failed to adjust permissions:"
757 " %s\n") % str(e), noiselevel=-1)
759 # If the file is empty then it's obviously invalid. Remove
760 # the empty file and try to download if possible.
761 if mystat.st_size == 0:
764 os.unlink(myfile_path)
765 except EnvironmentError:
767 elif myfile not in mydigests:
768 # We don't have a digest, but the file exists. We must
769 # assume that it is fully downloaded.
772 if mystat.st_size < mydigests[myfile]["size"] and \
774 fetched = 1 # Try to resume this download.
775 elif parallel_fetchonly and \
776 mystat.st_size == mydigests[myfile]["size"]:
779 mysettings.get("PORTAGE_QUIET") == "1"
781 "%s size ;-)" % (myfile, ))
785 verified_ok, reason = verify_all(
786 myfile_path, mydigests[myfile])
788 writemsg(_("!!! Previously fetched"
789 " file: '%s'\n") % myfile, noiselevel=-1)
790 writemsg(_("!!! Reason: %s\n") % reason[0],
792 writemsg(_("!!! Got: %s\n"
793 "!!! Expected: %s\n") % \
794 (reason[1], reason[2]), noiselevel=-1)
795 if reason[0] == _("Insufficient data for checksum verification"):
799 _checksum_failure_temp_file(
800 mysettings["DISTDIR"], myfile)
801 writemsg_stdout(_("Refetching... "
802 "File renamed to '%s'\n\n") % \
803 temp_filename, noiselevel=-1)
807 mysettings.get("PORTAGE_QUIET", None) == "1"
808 digests = mydigests.get(myfile)
810 digests = list(digests)
813 "%s %s ;-)" % (myfile, " ".join(digests)))
815 continue # fetch any remaining files
817 # Create a reversed list since that is optimal for list.pop().
818 uri_list = filedict[myfile][:]
820 checksum_failure_count = 0
821 tried_locations = set()
824 # Eliminate duplicates here in case we've switched to
825 # "primaryuri" mode on the fly due to a checksum failure.
826 if loc in tried_locations:
828 tried_locations.add(loc)
830 writemsg_stdout(loc+" ", noiselevel=-1)
832 # allow different fetchcommands per protocol
833 protocol = loc[0:loc.find("://")]
835 global_config_path = GLOBAL_CONFIG_PATH
836 if mysettings['EPREFIX']:
837 global_config_path = os.path.join(mysettings['EPREFIX'],
838 GLOBAL_CONFIG_PATH.lstrip(os.sep))
840 missing_file_param = False
841 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
842 fetchcommand = mysettings.get(fetchcommand_var)
843 if fetchcommand is None:
844 fetchcommand_var = "FETCHCOMMAND"
845 fetchcommand = mysettings.get(fetchcommand_var)
846 if fetchcommand is None:
848 _("!!! %s is unset. It should "
849 "have been defined in\n!!! %s/make.globals.\n") \
850 % (fetchcommand_var, global_config_path),
851 level=logging.ERROR, noiselevel=-1)
853 if "${FILE}" not in fetchcommand:
855 _("!!! %s does not contain the required ${FILE}"
856 " parameter.\n") % fetchcommand_var,
857 level=logging.ERROR, noiselevel=-1)
858 missing_file_param = True
860 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
861 resumecommand = mysettings.get(resumecommand_var)
862 if resumecommand is None:
863 resumecommand_var = "RESUMECOMMAND"
864 resumecommand = mysettings.get(resumecommand_var)
865 if resumecommand is None:
867 _("!!! %s is unset. It should "
868 "have been defined in\n!!! %s/make.globals.\n") \
869 % (resumecommand_var, global_config_path),
870 level=logging.ERROR, noiselevel=-1)
872 if "${FILE}" not in resumecommand:
874 _("!!! %s does not contain the required ${FILE}"
875 " parameter.\n") % resumecommand_var,
876 level=logging.ERROR, noiselevel=-1)
877 missing_file_param = True
879 if missing_file_param:
881 _("!!! Refer to the make.conf(5) man page for "
882 "information about how to\n!!! correctly specify "
883 "FETCHCOMMAND and RESUMECOMMAND.\n"),
884 level=logging.ERROR, noiselevel=-1)
885 if myfile != os.path.basename(loc):
891 mysize = os.stat(myfile_path).st_size
893 if e.errno not in (errno.ENOENT, errno.ESTALE):
899 writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
901 elif size is None or size > mysize:
902 writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
905 writemsg(_("!!! File %s is incorrect size, "
906 "but unable to retry.\n") % myfile, noiselevel=-1)
911 if fetched != 2 and has_space:
912 #we either need to resume or start the download
915 mystat = os.stat(myfile_path)
917 if e.errno not in (errno.ENOENT, errno.ESTALE):
922 if mystat.st_size < fetch_resume_size:
923 writemsg(_(">>> Deleting distfile with size "
924 "%d (smaller than " "PORTAGE_FETCH_RESU"
925 "ME_MIN_SIZE)\n") % mystat.st_size)
927 os.unlink(myfile_path)
930 (errno.ENOENT, errno.ESTALE):
936 writemsg(_(">>> Resuming download...\n"))
937 locfetch=resumecommand
938 command_var = resumecommand_var
941 locfetch=fetchcommand
942 command_var = fetchcommand_var
943 writemsg_stdout(_(">>> Downloading '%s'\n") % \
944 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
946 "DISTDIR": mysettings["DISTDIR"],
951 myfetch = shlex_split(locfetch)
952 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
956 myret = _spawn_fetch(mysettings, myfetch)
960 apply_secpass_permissions(myfile_path,
961 gid=portage_gid, mode=0o664, mask=0o2)
964 except PortageException as e:
965 if not os.access(myfile_path, os.R_OK):
966 writemsg(_("!!! Failed to adjust permissions:"
967 " %s\n") % str(e), noiselevel=-1)
970 # If the file is empty then it's obviously invalid. Don't
971 # trust the return value from the fetcher. Remove the
972 # empty file and try to download again.
974 if os.stat(myfile_path).st_size == 0:
975 os.unlink(myfile_path)
978 except EnvironmentError:
981 if mydigests is not None and myfile in mydigests:
983 mystat = os.stat(myfile_path)
985 if e.errno not in (errno.ENOENT, errno.ESTALE):
991 if stat.S_ISDIR(mystat.st_mode):
992 # This can happen if FETCHCOMMAND erroneously
993 # contains wget's -P option where it should
996 _("!!! The command specified in the "
997 "%s variable appears to have\n!!! "
998 "created a directory instead of a "
999 "normal file.\n") % command_var,
1000 level=logging.ERROR, noiselevel=-1)
1002 _("!!! Refer to the make.conf(5) "
1003 "man page for information about how "
1004 "to\n!!! correctly specify "
1005 "FETCHCOMMAND and RESUMECOMMAND.\n"),
1006 level=logging.ERROR, noiselevel=-1)
1009 # no exception? file exists. let digestcheck() report
1010 # an appropriately for size or checksum errors
1012 # If the fetcher reported success and the file is
1013 # too small, it's probably because the digest is
1014 # bad (upstream changed the distfile). In this
1015 # case we don't want to attempt to resume. Show a
1016 # digest verification failure to that the user gets
1017 # a clue about what just happened.
1018 if myret != os.EX_OK and \
1019 mystat.st_size < mydigests[myfile]["size"]:
1020 # Fetch failed... Try the next one... Kill 404 files though.
1021 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
1022 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1023 if html404.search(io.open(
1024 _unicode_encode(myfile_path,
1025 encoding=_encodings['fs'], errors='strict'),
1026 mode='r', encoding=_encodings['content'], errors='replace'
1029 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1030 writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
1033 except (IOError, OSError):
1038 # File is the correct size--check the checksums for the fetched
1039 # file NOW, for those users who don't have a stable/continuous
1040 # net connection. This way we have a chance to try to download
1041 # from another mirror...
1042 verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
1045 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
1047 writemsg(_("!!! Reason: %s\n") % reason[0],
1049 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
1050 (reason[1], reason[2]), noiselevel=-1)
1051 if reason[0] == _("Insufficient data for checksum verification"):
1054 _checksum_failure_temp_file(
1055 mysettings["DISTDIR"], myfile)
1056 writemsg_stdout(_("Refetching... "
1057 "File renamed to '%s'\n\n") % \
1058 temp_filename, noiselevel=-1)
1060 checksum_failure_count += 1
1061 if checksum_failure_count == \
1062 checksum_failure_primaryuri:
1063 # Switch to "primaryuri" mode in order
1064 # to increase the probablility of
1067 primaryuri_dict.get(myfile)
1070 reversed(primaryuris))
1071 if checksum_failure_count >= \
1072 checksum_failure_max_tries:
1076 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
1077 digests = mydigests.get(myfile)
1079 eout.ebegin("%s %s ;-)" % \
1080 (myfile, " ".join(sorted(digests))))
1088 elif mydigests!=None:
1089 writemsg(_("No digest file available and download failed.\n\n"),
1092 if use_locks and file_lock:
1093 unlockfile(file_lock)
1097 writemsg_stdout("\n", noiselevel=-1)
1099 if restrict_fetch and not restrict_fetch_msg:
1100 restrict_fetch_msg = True
1101 msg = _("\n!!! %s/%s"
1102 " has fetch restriction turned on.\n"
1103 "!!! This probably means that this "
1104 "ebuild's files must be downloaded\n"
1105 "!!! manually. See the comments in"
1106 " the ebuild for more information.\n\n") % \
1107 (mysettings["CATEGORY"], mysettings["PF"])
1109 level=logging.ERROR, noiselevel=-1)
1110 elif restrict_fetch:
1114 elif not filedict[myfile]:
1115 writemsg(_("Warning: No mirrors available for file"
1116 " '%s'\n") % (myfile), noiselevel=-1)
1118 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
1122 failed_files.add(myfile)
1125 failed_files.add(myfile)