1 # Copyright 2010-2013 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
18 from urllib.parse import urlparse
20 from urlparse import urlparse
23 portage.proxy.lazyimport.lazyimport(globals(),
24 'portage.package.ebuild.config:check_config_instance,config',
25 'portage.package.ebuild.doebuild:doebuild_environment,' + \
27 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
30 from portage import OrderedDict, os, selinux, shutil, _encodings, \
31 _shell_quote, _unicode_encode
32 from portage.checksum import (hashfunc_map, perform_md5, verify_all,
33 _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
34 from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
36 from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
37 from portage.exception import FileNotFound, OperationNotPermitted, \
38 PortageException, TryAgain
39 from portage.localization import _
40 from portage.locks import lockfile, unlockfile
41 from portage.output import colorize, EOutput
42 from portage.util import apply_recursive_permissions, \
43 apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
44 varexpand, writemsg, writemsg_level, writemsg_stdout
45 from portage.process import spawn
47 _userpriv_spawn_kwargs = (
50 ("groups", userpriv_groups),
54 def _hide_url_passwd(url):
55 return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
57 def _spawn_fetch(settings, args, **kwargs):
59 Spawn a process with appropriate settings for fetching, including
60 userfetch and selinux support.
63 global _userpriv_spawn_kwargs
65 # Redirect all output to stdout since some fetchers like
66 # wget pollute stderr (if portage detects a problem then it
67 # can send it's own message to stderr).
68 if "fd_pipes" not in kwargs:
70 kwargs["fd_pipes"] = {
71 0 : portage._get_stdin().fileno(),
72 1 : sys.__stdout__.fileno(),
73 2 : sys.__stdout__.fileno(),
76 if "userfetch" in settings.features and \
77 os.getuid() == 0 and portage_gid and portage_uid and \
78 hasattr(os, "setgroups"):
79 kwargs.update(_userpriv_spawn_kwargs)
83 if settings.selinux_enabled():
84 spawn_func = selinux.spawn_wrapper(spawn_func,
85 settings["PORTAGE_FETCH_T"])
87 # bash is an allowed entrypoint, while most binaries are not
88 if args[0] != BASH_BINARY:
89 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
91 # Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
92 # does not filter the calling environment (which may contain needed
93 # proxy variables, as in bug #315421).
94 phase_backup = settings.get('EBUILD_PHASE')
95 settings['EBUILD_PHASE'] = 'fetch'
97 rval = spawn_func(args, env=settings.environ(), **kwargs)
99 if phase_backup is None:
100 settings.pop('EBUILD_PHASE', None)
102 settings['EBUILD_PHASE'] = phase_backup
106 _userpriv_test_write_file_cache = {}
107 _userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
108 "rm -f %(file_path)s ; exit $rval"
110 def _userpriv_test_write_file(settings, file_path):
112 Drop privileges and try to open a file for writing. The file may or
113 may not exist, and the parent directory is assumed to exist. The file
114 is removed before returning.
116 @param settings: A config instance which is passed to _spawn_fetch()
117 @param file_path: A file path to open and write.
118 @return: True if write succeeds, False otherwise.
121 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
122 rval = _userpriv_test_write_file_cache.get(file_path)
126 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
127 {"file_path" : _shell_quote(file_path)}]
129 returncode = _spawn_fetch(settings, args)
131 rval = returncode == os.EX_OK
132 _userpriv_test_write_file_cache[file_path] = rval
135 def _checksum_failure_temp_file(distdir, basename):
137 First try to find a duplicate temp file with the same checksum and return
138 that filename if available. Otherwise, use mkstemp to create a new unique
139 filename._checksum_failure_.$RANDOM, rename the given file, and return the
140 new filename. In any case, filename will be renamed or removed before this
141 function returns a temp filename.
144 filename = os.path.join(distdir, basename)
145 size = os.stat(filename).st_size
147 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
148 for temp_filename in os.listdir(distdir):
149 if not tempfile_re.match(temp_filename):
151 temp_filename = os.path.join(distdir, temp_filename)
153 if size != os.stat(temp_filename).st_size:
158 temp_checksum = perform_md5(temp_filename)
160 # Apparently the temp file disappeared. Let it go.
163 checksum = perform_md5(filename)
164 if checksum == temp_checksum:
168 fd, temp_filename = \
169 tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
171 os.rename(filename, temp_filename)
174 def _check_digests(filename, digests, show_errors=1):
176 Check digests and display a message if an error occurs.
177 @return True if all digests match, False otherwise.
179 verified_ok, reason = verify_all(filename, digests)
182 writemsg(_("!!! Previously fetched"
183 " file: '%s'\n") % filename, noiselevel=-1)
184 writemsg(_("!!! Reason: %s\n") % reason[0],
186 writemsg(_("!!! Got: %s\n"
187 "!!! Expected: %s\n") % \
188 (reason[1], reason[2]), noiselevel=-1)
192 def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
194 @return a tuple of (match, stat_obj) where match is True if filename
195 matches all given digests (if any) and stat_obj is a stat result, or
196 None if the file does not exist.
200 size = digests.get("size")
201 if size is not None and len(digests) == 1:
205 st = os.stat(filename)
208 if size is not None and size != st.st_size:
212 eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
214 elif st.st_size == 0:
215 # Zero-byte distfiles are always invalid.
218 digests = _filter_unaccelarated_hashes(digests)
219 if hash_filter is not None:
220 digests = _apply_hash_filter(digests, hash_filter)
221 if _check_digests(filename, digests, show_errors=show_errors):
222 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
223 " ".join(sorted(digests))))
229 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
243 def fetch(myuris, mysettings, listonly=0, fetchonly=0,
244 locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
245 allow_missing_digests=True):
246 "fetch files. Will use digest file if available."
251 features = mysettings.features
252 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
254 userfetch = secpass >= 2 and "userfetch" in features
255 userpriv = secpass >= 2 and "userpriv" in features
257 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
258 restrict_mirror = "mirror" in restrict or "nomirror" in restrict
260 if ("mirror" in features) and ("lmirror" not in features):
261 # lmirror should allow you to bypass mirror restrictions.
262 # XXX: This is not a good thing, and is temporary at best.
263 print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
266 # Generally, downloading the same file repeatedly from
267 # every single available mirror is a waste of bandwidth
268 # and time, so there needs to be a cap.
269 checksum_failure_max_tries = 5
270 v = checksum_failure_max_tries
272 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
273 checksum_failure_max_tries))
274 except (ValueError, OverflowError):
275 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
276 " contains non-integer value: '%s'\n") % \
277 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
278 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
279 "default value: %s\n") % checksum_failure_max_tries,
281 v = checksum_failure_max_tries
283 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
284 " contains value less than 1: '%s'\n") % v, noiselevel=-1)
285 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
286 "default value: %s\n") % checksum_failure_max_tries,
288 v = checksum_failure_max_tries
289 checksum_failure_max_tries = v
292 fetch_resume_size_default = "350K"
293 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
294 if fetch_resume_size is not None:
295 fetch_resume_size = "".join(fetch_resume_size.split())
296 if not fetch_resume_size:
297 # If it's undefined or empty, silently use the default.
298 fetch_resume_size = fetch_resume_size_default
299 match = _fetch_resume_size_re.match(fetch_resume_size)
300 if match is None or \
301 (match.group(2).upper() not in _size_suffix_map):
302 writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
303 " contains an unrecognized format: '%s'\n") % \
304 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
305 writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
306 "default value: %s\n") % fetch_resume_size_default,
308 fetch_resume_size = None
309 if fetch_resume_size is None:
310 fetch_resume_size = fetch_resume_size_default
311 match = _fetch_resume_size_re.match(fetch_resume_size)
312 fetch_resume_size = int(match.group(1)) * \
313 2 ** _size_suffix_map[match.group(2).upper()]
315 # Behave like the package has RESTRICT="primaryuri" after a
316 # couple of checksum failures, to increase the probablility
317 # of success before checksum_failure_max_tries is reached.
318 checksum_failure_primaryuri = 2
319 thirdpartymirrors = mysettings.thirdpartymirrors()
321 # In the background parallel-fetch process, it's safe to skip checksum
322 # verification of pre-existing files in $DISTDIR that have the correct
323 # file size. The parent process will verify their checksums prior to
326 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
327 if parallel_fetchonly:
330 check_config_instance(mysettings)
332 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
333 CUSTOM_MIRRORS_FILE), recursive=1)
337 if listonly or ("distlocks" not in features):
341 if "skiprocheck" in features:
344 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
346 writemsg(colorize("BAD",
347 _("!!! For fetching to a read-only filesystem, "
348 "locking should be turned off.\n")), noiselevel=-1)
349 writemsg(_("!!! This can be done by adding -distlocks to "
350 "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
353 # local mirrors are always added
354 if "local" in custommirrors:
355 mymirrors += custommirrors["local"]
358 # We don't add any mirrors.
362 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
364 hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
365 if hash_filter.transparent:
367 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
369 allow_missing_digests = True
370 pkgdir = mysettings.get("O")
371 if digests is None and not (pkgdir is None or skip_manifest):
372 mydigests = mysettings.repositories.get_repo_for_location(
373 os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
374 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
375 elif digests is None or skip_manifest:
376 # no digests because fetch was not called for a specific package
381 ro_distdirs = [x for x in \
382 shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
386 for x in range(len(mymirrors)-1,-1,-1):
387 if mymirrors[x] and mymirrors[x][0]=='/':
388 fsmirrors += [mymirrors[x]]
391 restrict_fetch = "fetch" in restrict
392 force_mirror = "force-mirror" in features and not restrict_mirror
393 custom_local_mirrors = custommirrors.get("local", [])
395 # With fetch restriction, a normal uri may only be fetched from
396 # custom local mirrors (if available). A mirror:// uri may also
397 # be fetched from specific mirrors (effectively overriding fetch
398 # restriction, but only for specific mirrors).
399 locations = custom_local_mirrors
401 locations = mymirrors
404 # Check for 'items' attribute since OrderedDict is not a dict.
405 if hasattr(myuris, 'items'):
406 for myfile, uri_set in myuris.items():
407 for myuri in uri_set:
408 file_uri_tuples.append((myfile, myuri))
410 file_uri_tuples.append((myfile, None))
413 if urlparse(myuri).scheme:
414 file_uri_tuples.append((os.path.basename(myuri), myuri))
416 file_uri_tuples.append((os.path.basename(myuri), None))
418 filedict = OrderedDict()
420 thirdpartymirror_uris = {}
421 for myfile, myuri in file_uri_tuples:
422 if myfile not in filedict:
424 for y in range(0,len(locations)):
425 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
428 if myuri[:9]=="mirror://":
429 eidx = myuri.find("/", 9)
431 mirrorname = myuri[9:eidx]
432 path = myuri[eidx+1:]
434 # Try user-defined mirrors first
435 if mirrorname in custommirrors:
436 for cmirr in custommirrors[mirrorname]:
437 filedict[myfile].append(
438 cmirr.rstrip("/") + "/" + path)
440 # now try the official mirrors
441 if mirrorname in thirdpartymirrors:
442 uris = [locmirr.rstrip("/") + "/" + path \
443 for locmirr in thirdpartymirrors[mirrorname]]
445 filedict[myfile].extend(uris)
446 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
448 if not filedict[myfile]:
449 writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
451 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
452 writemsg(" %s\n" % (myuri), noiselevel=-1)
454 if restrict_fetch or force_mirror:
455 # Only fetch from specific mirrors is allowed.
457 primaryuris = primaryuri_dict.get(myfile)
458 if primaryuris is None:
460 primaryuri_dict[myfile] = primaryuris
461 primaryuris.append(myuri)
463 # Order primaryuri_dict values to match that in SRC_URI.
464 for uris in primaryuri_dict.values():
467 # Prefer thirdpartymirrors over normal mirrors in cases when
468 # the file does not yet exist on the normal mirrors.
469 for myfile, uris in thirdpartymirror_uris.items():
470 primaryuri_dict.setdefault(myfile, []).extend(uris)
472 # Now merge primaryuri values into filedict (includes mirrors
473 # explicitly referenced in SRC_URI).
474 if "primaryuri" in restrict:
475 for myfile, uris in filedict.items():
476 filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
478 for myfile in filedict:
479 filedict[myfile] += primaryuri_dict.get(myfile, [])
486 if can_fetch and not fetch_to_ro:
487 global _userpriv_test_write_file_cache
491 dir_gid = portage_gid
492 if "FAKED_MODE" in mysettings:
493 # When inside fakeroot, directories with portage's gid appear
494 # to have root's gid. Therefore, use root's gid instead of
495 # portage's gid to avoid spurrious permissions adjustments
496 # when inside fakeroot.
501 for x in distdir_dirs:
502 mydir = os.path.join(mysettings["DISTDIR"], x)
503 write_test_file = os.path.join(
504 mydir, ".__portage_test_write__")
511 if st is not None and stat.S_ISDIR(st.st_mode):
512 if not (userfetch or userpriv):
514 if _userpriv_test_write_file(mysettings, write_test_file):
517 _userpriv_test_write_file_cache.pop(write_test_file, None)
518 if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
520 # The directory has just been created
521 # and therefore it must be empty.
523 writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
526 raise # bail out on the first error that occurs during recursion
527 if not apply_recursive_permissions(mydir,
528 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
529 filemode=filemode, filemask=modemask, onerror=onerror):
530 raise OperationNotPermitted(
531 _("Failed to apply recursive permissions for the portage group."))
532 except PortageException as e:
533 if not os.path.isdir(mysettings["DISTDIR"]):
534 writemsg("!!! %s\n" % str(e), noiselevel=-1)
535 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
536 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
539 not fetch_to_ro and \
540 not os.access(mysettings["DISTDIR"], os.W_OK):
541 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
545 distdir_writable = can_fetch and not fetch_to_ro
547 restrict_fetch_msg = False
549 for myfile in filedict:
553 1 partially downloaded
554 2 completely downloaded
558 orig_digests = mydigests.get(myfile, {})
560 if not (allow_missing_digests or listonly):
561 verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
562 verifiable_hash_types.discard("size")
563 if not verifiable_hash_types:
564 expected = set(hashfunc_map)
565 expected.discard("size")
566 expected = " ".join(sorted(expected))
567 got = set(orig_digests)
569 got = " ".join(sorted(got))
570 reason = (_("Insufficient data for checksum verification"),
572 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
574 writemsg(_("!!! Reason: %s\n") % reason[0],
576 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
577 (reason[1], reason[2]), noiselevel=-1)
580 failed_files.add(myfile)
585 size = orig_digests.get("size")
587 # Zero-byte distfiles are always invalid, so discard their digests.
588 del mydigests[myfile]
591 pruned_digests = orig_digests
592 if parallel_fetchonly:
595 pruned_digests["size"] = size
597 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
599 has_space_superuser = True
602 writemsg_stdout("\n", noiselevel=-1)
604 # check if there is enough space in DISTDIR to completely store myfile
605 # overestimate the filesize so we aren't bitten by FS overhead
607 if size is not None and hasattr(os, "statvfs"):
609 vfs_stat = os.statvfs(mysettings["DISTDIR"])
611 writemsg_level("!!! statvfs('%s'): %s\n" %
612 (mysettings["DISTDIR"], e),
613 noiselevel=-1, level=logging.ERROR)
616 if vfs_stat is not None:
618 mysize = os.stat(myfile_path).st_size
620 if e.errno not in (errno.ENOENT, errno.ESTALE):
624 if (size - mysize + vfs_stat.f_bsize) >= \
625 (vfs_stat.f_bsize * vfs_stat.f_bavail):
627 if (size - mysize + vfs_stat.f_bsize) >= \
628 (vfs_stat.f_bsize * vfs_stat.f_bfree):
629 has_space_superuser = False
631 if not has_space_superuser:
638 if distdir_writable and use_locks:
642 lock_kwargs["flags"] = os.O_NONBLOCK
645 file_lock = lockfile(myfile_path,
646 wantnewlockfile=1, **lock_kwargs)
648 writemsg(_(">>> File '%s' is already locked by "
649 "another fetcher. Continuing...\n") % myfile,
656 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
657 match, mystat = _check_distfile(
658 myfile_path, pruned_digests, eout, hash_filter=hash_filter)
660 # Skip permission adjustment for symlinks, since we don't
661 # want to modify anything outside of the primary DISTDIR,
662 # and symlinks typically point to PORTAGE_RO_DISTDIRS.
663 if distdir_writable and not os.path.islink(myfile_path):
665 apply_secpass_permissions(myfile_path,
666 gid=portage_gid, mode=0o664, mask=0o2,
668 except PortageException as e:
669 if not os.access(myfile_path, os.R_OK):
670 writemsg(_("!!! Failed to adjust permissions:"
671 " %s\n") % str(e), noiselevel=-1)
675 if distdir_writable and mystat is None:
676 # Remove broken symlinks if necessary.
678 os.unlink(myfile_path)
682 if mystat is not None:
683 if stat.S_ISDIR(mystat.st_mode):
685 _("!!! Unable to fetch file since "
686 "a directory is in the way: \n"
687 "!!! %s\n") % myfile_path,
688 level=logging.ERROR, noiselevel=-1)
691 if mystat.st_size == 0:
694 os.unlink(myfile_path)
697 elif distdir_writable:
698 if mystat.st_size < fetch_resume_size and \
699 mystat.st_size < size:
700 # If the file already exists and the size does not
701 # match the existing digests, it may be that the
702 # user is attempting to update the digest. In this
703 # case, the digestgen() function will advise the
704 # user to use `ebuild --force foo.ebuild manifest`
705 # in order to force the old digests to be replaced.
706 # Since the user may want to keep this file, rename
707 # it instead of deleting it.
708 writemsg(_(">>> Renaming distfile with size "
709 "%d (smaller than " "PORTAGE_FETCH_RESU"
710 "ME_MIN_SIZE)\n") % mystat.st_size)
712 _checksum_failure_temp_file(
713 mysettings["DISTDIR"], myfile)
714 writemsg_stdout(_("Refetching... "
715 "File renamed to '%s'\n\n") % \
716 temp_filename, noiselevel=-1)
717 elif mystat.st_size >= size:
719 _checksum_failure_temp_file(
720 mysettings["DISTDIR"], myfile)
721 writemsg_stdout(_("Refetching... "
722 "File renamed to '%s'\n\n") % \
723 temp_filename, noiselevel=-1)
725 if distdir_writable and ro_distdirs:
727 for x in ro_distdirs:
728 filename = os.path.join(x, myfile)
729 match, mystat = _check_distfile(
730 filename, pruned_digests, eout, hash_filter=hash_filter)
732 readonly_file = filename
734 if readonly_file is not None:
736 os.unlink(myfile_path)
738 if e.errno not in (errno.ENOENT, errno.ESTALE):
741 os.symlink(readonly_file, myfile_path)
744 # this message is shown only after we know that
745 # the file is not already fetched
747 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
748 (myfile, mysettings["DISTDIR"]), noiselevel=-1)
750 if has_space_superuser:
751 writemsg(_("!!! Insufficient privileges to use "
752 "remaining space.\n"), noiselevel=-1)
754 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
755 " in /etc/portage/make.conf in order to fetch with\n"
756 "!!! superuser privileges.\n"), noiselevel=-1)
758 if fsmirrors and not os.path.exists(myfile_path) and has_space:
759 for mydir in fsmirrors:
760 mirror_file = os.path.join(mydir, myfile)
762 shutil.copyfile(mirror_file, myfile_path)
763 writemsg(_("Local mirror has file: %s\n") % myfile)
765 except (IOError, OSError) as e:
766 if e.errno not in (errno.ENOENT, errno.ESTALE):
771 mystat = os.stat(myfile_path)
773 if e.errno not in (errno.ENOENT, errno.ESTALE):
777 # Skip permission adjustment for symlinks, since we don't
778 # want to modify anything outside of the primary DISTDIR,
779 # and symlinks typically point to PORTAGE_RO_DISTDIRS.
780 if not os.path.islink(myfile_path):
782 apply_secpass_permissions(myfile_path,
783 gid=portage_gid, mode=0o664, mask=0o2,
785 except PortageException as e:
786 if not os.access(myfile_path, os.R_OK):
787 writemsg(_("!!! Failed to adjust permissions:"
788 " %s\n") % (e,), noiselevel=-1)
790 # If the file is empty then it's obviously invalid. Remove
791 # the empty file and try to download if possible.
792 if mystat.st_size == 0:
795 os.unlink(myfile_path)
796 except EnvironmentError:
798 elif myfile not in mydigests:
799 # We don't have a digest, but the file exists. We must
800 # assume that it is fully downloaded.
803 if mystat.st_size < mydigests[myfile]["size"] and \
805 fetched = 1 # Try to resume this download.
806 elif parallel_fetchonly and \
807 mystat.st_size == mydigests[myfile]["size"]:
810 mysettings.get("PORTAGE_QUIET") == "1"
812 "%s size ;-)" % (myfile, ))
816 digests = _filter_unaccelarated_hashes(mydigests[myfile])
817 if hash_filter is not None:
818 digests = _apply_hash_filter(digests, hash_filter)
819 verified_ok, reason = verify_all(myfile_path, digests)
821 writemsg(_("!!! Previously fetched"
822 " file: '%s'\n") % myfile, noiselevel=-1)
823 writemsg(_("!!! Reason: %s\n") % reason[0],
825 writemsg(_("!!! Got: %s\n"
826 "!!! Expected: %s\n") % \
827 (reason[1], reason[2]), noiselevel=-1)
828 if reason[0] == _("Insufficient data for checksum verification"):
832 _checksum_failure_temp_file(
833 mysettings["DISTDIR"], myfile)
834 writemsg_stdout(_("Refetching... "
835 "File renamed to '%s'\n\n") % \
836 temp_filename, noiselevel=-1)
840 mysettings.get("PORTAGE_QUIET", None) == "1"
842 digests = list(digests)
845 "%s %s ;-)" % (myfile, " ".join(digests)))
847 continue # fetch any remaining files
849 # Create a reversed list since that is optimal for list.pop().
850 uri_list = filedict[myfile][:]
852 checksum_failure_count = 0
853 tried_locations = set()
856 # Eliminate duplicates here in case we've switched to
857 # "primaryuri" mode on the fly due to a checksum failure.
858 if loc in tried_locations:
860 tried_locations.add(loc)
862 writemsg_stdout(loc+" ", noiselevel=-1)
864 # allow different fetchcommands per protocol
865 protocol = loc[0:loc.find("://")]
867 global_config_path = GLOBAL_CONFIG_PATH
868 if portage.const.EPREFIX:
869 global_config_path = os.path.join(portage.const.EPREFIX,
870 GLOBAL_CONFIG_PATH.lstrip(os.sep))
872 missing_file_param = False
873 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
874 fetchcommand = mysettings.get(fetchcommand_var)
875 if fetchcommand is None:
876 fetchcommand_var = "FETCHCOMMAND"
877 fetchcommand = mysettings.get(fetchcommand_var)
878 if fetchcommand is None:
880 _("!!! %s is unset. It should "
881 "have been defined in\n!!! %s/make.globals.\n") \
882 % (fetchcommand_var, global_config_path),
883 level=logging.ERROR, noiselevel=-1)
885 if "${FILE}" not in fetchcommand:
887 _("!!! %s does not contain the required ${FILE}"
888 " parameter.\n") % fetchcommand_var,
889 level=logging.ERROR, noiselevel=-1)
890 missing_file_param = True
892 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
893 resumecommand = mysettings.get(resumecommand_var)
894 if resumecommand is None:
895 resumecommand_var = "RESUMECOMMAND"
896 resumecommand = mysettings.get(resumecommand_var)
897 if resumecommand is None:
899 _("!!! %s is unset. It should "
900 "have been defined in\n!!! %s/make.globals.\n") \
901 % (resumecommand_var, global_config_path),
902 level=logging.ERROR, noiselevel=-1)
904 if "${FILE}" not in resumecommand:
906 _("!!! %s does not contain the required ${FILE}"
907 " parameter.\n") % resumecommand_var,
908 level=logging.ERROR, noiselevel=-1)
909 missing_file_param = True
911 if missing_file_param:
913 _("!!! Refer to the make.conf(5) man page for "
914 "information about how to\n!!! correctly specify "
915 "FETCHCOMMAND and RESUMECOMMAND.\n"),
916 level=logging.ERROR, noiselevel=-1)
917 if myfile != os.path.basename(loc):
923 mysize = os.stat(myfile_path).st_size
925 if e.errno not in (errno.ENOENT, errno.ESTALE):
931 writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
933 elif size is None or size > mysize:
934 writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
937 writemsg(_("!!! File %s is incorrect size, "
938 "but unable to retry.\n") % myfile, noiselevel=-1)
943 if fetched != 2 and has_space:
944 #we either need to resume or start the download
947 mystat = os.stat(myfile_path)
949 if e.errno not in (errno.ENOENT, errno.ESTALE):
954 if mystat.st_size < fetch_resume_size:
955 writemsg(_(">>> Deleting distfile with size "
956 "%d (smaller than " "PORTAGE_FETCH_RESU"
957 "ME_MIN_SIZE)\n") % mystat.st_size)
959 os.unlink(myfile_path)
962 (errno.ENOENT, errno.ESTALE):
968 writemsg(_(">>> Resuming download...\n"))
969 locfetch=resumecommand
970 command_var = resumecommand_var
973 locfetch=fetchcommand
974 command_var = fetchcommand_var
975 writemsg_stdout(_(">>> Downloading '%s'\n") % \
976 _hide_url_passwd(loc))
982 for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
984 variables[k] = mysettings[k]
988 myfetch = shlex_split(locfetch)
989 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
993 myret = _spawn_fetch(mysettings, myfetch)
997 apply_secpass_permissions(myfile_path,
998 gid=portage_gid, mode=0o664, mask=0o2)
1001 except PortageException as e:
1002 if not os.access(myfile_path, os.R_OK):
1003 writemsg(_("!!! Failed to adjust permissions:"
1004 " %s\n") % str(e), noiselevel=-1)
1007 # If the file is empty then it's obviously invalid. Don't
1008 # trust the return value from the fetcher. Remove the
1009 # empty file and try to download again.
1011 if os.stat(myfile_path).st_size == 0:
1012 os.unlink(myfile_path)
1015 except EnvironmentError:
1018 if mydigests is not None and myfile in mydigests:
1020 mystat = os.stat(myfile_path)
1021 except OSError as e:
1022 if e.errno not in (errno.ENOENT, errno.ESTALE):
1028 if stat.S_ISDIR(mystat.st_mode):
1029 # This can happen if FETCHCOMMAND erroneously
1030 # contains wget's -P option where it should
1033 _("!!! The command specified in the "
1034 "%s variable appears to have\n!!! "
1035 "created a directory instead of a "
1036 "normal file.\n") % command_var,
1037 level=logging.ERROR, noiselevel=-1)
1039 _("!!! Refer to the make.conf(5) "
1040 "man page for information about how "
1041 "to\n!!! correctly specify "
1042 "FETCHCOMMAND and RESUMECOMMAND.\n"),
1043 level=logging.ERROR, noiselevel=-1)
1046 # no exception? file exists. let digestcheck() report
1047 # an appropriately for size or checksum errors
1049 # If the fetcher reported success and the file is
1050 # too small, it's probably because the digest is
1051 # bad (upstream changed the distfile). In this
1052 # case we don't want to attempt to resume. Show a
1053 # digest verification failure to that the user gets
1054 # a clue about what just happened.
1055 if myret != os.EX_OK and \
1056 mystat.st_size < mydigests[myfile]["size"]:
1057 # Fetch failed... Try the next one... Kill 404 files though.
1058 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
1059 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1061 _unicode_encode(myfile_path,
1062 encoding=_encodings['fs'], errors='strict'),
1063 mode='r', encoding=_encodings['content'], errors='replace'
1065 if html404.search(f.read()):
1067 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1068 writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
1071 except (IOError, OSError):
1076 # File is the correct size--check the checksums for the fetched
1077 # file NOW, for those users who don't have a stable/continuous
1078 # net connection. This way we have a chance to try to download
1079 # from another mirror...
1080 digests = _filter_unaccelarated_hashes(mydigests[myfile])
1081 if hash_filter is not None:
1082 digests = _apply_hash_filter(digests, hash_filter)
1083 verified_ok, reason = verify_all(myfile_path, digests)
1085 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
1087 writemsg(_("!!! Reason: %s\n") % reason[0],
1089 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
1090 (reason[1], reason[2]), noiselevel=-1)
1091 if reason[0] == _("Insufficient data for checksum verification"):
1094 _checksum_failure_temp_file(
1095 mysettings["DISTDIR"], myfile)
1096 writemsg_stdout(_("Refetching... "
1097 "File renamed to '%s'\n\n") % \
1098 temp_filename, noiselevel=-1)
1100 checksum_failure_count += 1
1101 if checksum_failure_count == \
1102 checksum_failure_primaryuri:
1103 # Switch to "primaryuri" mode in order
1104 # to increase the probablility of
1107 primaryuri_dict.get(myfile)
1110 reversed(primaryuris))
1111 if checksum_failure_count >= \
1112 checksum_failure_max_tries:
1116 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
1118 eout.ebegin("%s %s ;-)" % \
1119 (myfile, " ".join(sorted(digests))))
1127 elif mydigests!=None:
1128 writemsg(_("No digest file available and download failed.\n\n"),
1131 if use_locks and file_lock:
1132 unlockfile(file_lock)
1136 writemsg_stdout("\n", noiselevel=-1)
1138 if restrict_fetch and not restrict_fetch_msg:
1139 restrict_fetch_msg = True
1140 msg = _("\n!!! %s/%s"
1141 " has fetch restriction turned on.\n"
1142 "!!! This probably means that this "
1143 "ebuild's files must be downloaded\n"
1144 "!!! manually. See the comments in"
1145 " the ebuild for more information.\n\n") % \
1146 (mysettings["CATEGORY"], mysettings["PF"])
1148 level=logging.ERROR, noiselevel=-1)
1149 elif restrict_fetch:
1153 elif not filedict[myfile]:
1154 writemsg(_("Warning: No mirrors available for file"
1155 " '%s'\n") % (myfile), noiselevel=-1)
1157 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
1161 failed_files.add(myfile)
1164 failed_files.add(myfile)