1 # Copyright 2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
20 portage.proxy.lazyimport.lazyimport(globals(),
21 'portage.package.ebuild.config:check_config_instance,config',
22 'portage.package.ebuild.doebuild:doebuild_environment,' + \
23 'spawn@doebuild_spawn',
24 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
27 from portage import OrderedDict, os, selinux, _encodings, \
28 _shell_quote, _unicode_encode
29 from portage.checksum import perform_md5, verify_all
30 from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
31 EBUILD_SH_BINARY, GLOBAL_CONFIG_PATH
32 from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
33 from portage.exception import FileNotFound, OperationNotPermitted, \
34 PermissionDenied, PortageException, TryAgain
35 from portage.localization import _
36 from portage.locks import lockfile, unlockfile
37 from portage.manifest import Manifest
38 from portage.output import colorize, EOutput
39 from portage.util import apply_recursive_permissions, \
40 apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
41 varexpand, writemsg, writemsg_level, writemsg_stdout
42 from portage.process import spawn
44 _userpriv_spawn_kwargs = (
47 ("groups", userpriv_groups),
51 def _spawn_fetch(settings, args, **kwargs):
53 Spawn a process with appropriate settings for fetching, including
54 userfetch and selinux support.
57 global _userpriv_spawn_kwargs
59 # Redirect all output to stdout since some fetchers like
60 # wget pollute stderr (if portage detects a problem then it
61 # can send it's own message to stderr).
62 if "fd_pipes" not in kwargs:
64 kwargs["fd_pipes"] = {
65 0 : sys.stdin.fileno(),
66 1 : sys.stdout.fileno(),
67 2 : sys.stdout.fileno(),
70 if "userfetch" in settings.features and \
71 os.getuid() == 0 and portage_gid and portage_uid:
72 kwargs.update(_userpriv_spawn_kwargs)
76 if settings.selinux_enabled():
77 spawn_func = selinux.spawn_wrapper(spawn_func,
78 settings["PORTAGE_FETCH_T"])
80 # bash is an allowed entrypoint, while most binaries are not
81 if args[0] != BASH_BINARY:
82 args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
84 rval = spawn_func(args, env=settings.environ(), **kwargs)
88 _userpriv_test_write_file_cache = {}
89 _userpriv_test_write_cmd_script = "touch %(file_path)s 2>/dev/null ; rval=$? ; " + \
90 "rm -f %(file_path)s ; exit $rval"
92 def _userpriv_test_write_file(settings, file_path):
94 Drop privileges and try to open a file for writing. The file may or
95 may not exist, and the parent directory is assumed to exist. The file
96 is removed before returning.
98 @param settings: A config instance which is passed to _spawn_fetch()
99 @param file_path: A file path to open and write.
100 @return: True if write succeeds, False otherwise.
103 global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
104 rval = _userpriv_test_write_file_cache.get(file_path)
108 args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
109 {"file_path" : _shell_quote(file_path)}]
111 returncode = _spawn_fetch(settings, args)
113 rval = returncode == os.EX_OK
114 _userpriv_test_write_file_cache[file_path] = rval
117 def _checksum_failure_temp_file(distdir, basename):
119 First try to find a duplicate temp file with the same checksum and return
120 that filename if available. Otherwise, use mkstemp to create a new unique
121 filename._checksum_failure_.$RANDOM, rename the given file, and return the
122 new filename. In any case, filename will be renamed or removed before this
123 function returns a temp filename.
126 filename = os.path.join(distdir, basename)
127 size = os.stat(filename).st_size
129 tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
130 for temp_filename in os.listdir(distdir):
131 if not tempfile_re.match(temp_filename):
133 temp_filename = os.path.join(distdir, temp_filename)
135 if size != os.stat(temp_filename).st_size:
140 temp_checksum = perform_md5(temp_filename)
142 # Apparently the temp file disappeared. Let it go.
145 checksum = perform_md5(filename)
146 if checksum == temp_checksum:
150 fd, temp_filename = \
151 tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
153 os.rename(filename, temp_filename)
156 def _check_digests(filename, digests, show_errors=1):
158 Check digests and display a message if an error occurs.
159 @return True if all digests match, False otherwise.
161 verified_ok, reason = verify_all(filename, digests)
164 writemsg(_("!!! Previously fetched"
165 " file: '%s'\n") % filename, noiselevel=-1)
166 writemsg(_("!!! Reason: %s\n") % reason[0],
168 writemsg(_("!!! Got: %s\n"
169 "!!! Expected: %s\n") % \
170 (reason[1], reason[2]), noiselevel=-1)
174 def _check_distfile(filename, digests, eout, show_errors=1):
176 @return a tuple of (match, stat_obj) where match is True if filename
177 matches all given digests (if any) and stat_obj is a stat result, or
178 None if the file does not exist.
182 size = digests.get("size")
183 if size is not None and len(digests) == 1:
187 st = os.stat(filename)
190 if size is not None and size != st.st_size:
194 eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
196 elif st.st_size == 0:
197 # Zero-byte distfiles are always invalid.
200 if _check_digests(filename, digests, show_errors=show_errors):
201 eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
202 " ".join(sorted(digests))))
208 _fetch_resume_size_re = re.compile('(^[\d]+)([KMGTPEZY]?$)')
222 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
223 "fetch files. Will use digest file if available."
228 features = mysettings.features
229 restrict = mysettings.get("PORTAGE_RESTRICT","").split()
231 userfetch = secpass >= 2 and "userfetch" in features
232 userpriv = secpass >= 2 and "userpriv" in features
234 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
235 if "mirror" in restrict or \
236 "nomirror" in restrict:
237 if ("mirror" in features) and ("lmirror" not in features):
238 # lmirror should allow you to bypass mirror restrictions.
239 # XXX: This is not a good thing, and is temporary at best.
240 print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
243 # Generally, downloading the same file repeatedly from
244 # every single available mirror is a waste of bandwidth
245 # and time, so there needs to be a cap.
246 checksum_failure_max_tries = 5
247 v = checksum_failure_max_tries
249 v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
250 checksum_failure_max_tries))
251 except (ValueError, OverflowError):
252 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
253 " contains non-integer value: '%s'\n") % \
254 mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
255 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
256 "default value: %s\n") % checksum_failure_max_tries,
258 v = checksum_failure_max_tries
260 writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
261 " contains value less than 1: '%s'\n") % v, noiselevel=-1)
262 writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
263 "default value: %s\n") % checksum_failure_max_tries,
265 v = checksum_failure_max_tries
266 checksum_failure_max_tries = v
269 fetch_resume_size_default = "350K"
270 fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
271 if fetch_resume_size is not None:
272 fetch_resume_size = "".join(fetch_resume_size.split())
273 if not fetch_resume_size:
274 # If it's undefined or empty, silently use the default.
275 fetch_resume_size = fetch_resume_size_default
276 match = _fetch_resume_size_re.match(fetch_resume_size)
277 if match is None or \
278 (match.group(2).upper() not in _size_suffix_map):
279 writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
280 " contains an unrecognized format: '%s'\n") % \
281 mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
282 writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
283 "default value: %s\n") % fetch_resume_size_default,
285 fetch_resume_size = None
286 if fetch_resume_size is None:
287 fetch_resume_size = fetch_resume_size_default
288 match = _fetch_resume_size_re.match(fetch_resume_size)
289 fetch_resume_size = int(match.group(1)) * \
290 2 ** _size_suffix_map[match.group(2).upper()]
292 # Behave like the package has RESTRICT="primaryuri" after a
293 # couple of checksum failures, to increase the probablility
294 # of success before checksum_failure_max_tries is reached.
295 checksum_failure_primaryuri = 2
296 thirdpartymirrors = mysettings.thirdpartymirrors()
298 # In the background parallel-fetch process, it's safe to skip checksum
299 # verification of pre-existing files in $DISTDIR that have the correct
300 # file size. The parent process will verify their checksums prior to
303 parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
304 if parallel_fetchonly:
307 check_config_instance(mysettings)
309 custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
310 CUSTOM_MIRRORS_FILE), recursive=1)
314 if listonly or ("distlocks" not in features):
318 if "skiprocheck" in features:
321 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
323 writemsg(colorize("BAD",
324 _("!!! For fetching to a read-only filesystem, "
325 "locking should be turned off.\n")), noiselevel=-1)
326 writemsg(_("!!! This can be done by adding -distlocks to "
327 "FEATURES in /etc/make.conf\n"), noiselevel=-1)
330 # local mirrors are always added
331 if "local" in custommirrors:
332 mymirrors += custommirrors["local"]
334 if "nomirror" in restrict or \
335 "mirror" in restrict:
336 # We don't add any mirrors.
340 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
342 skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
343 pkgdir = mysettings.get("O")
344 if not (pkgdir is None or skip_manifest):
345 mydigests = Manifest(
346 pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
348 # no digests because fetch was not called for a specific package
351 ro_distdirs = [x for x in \
352 shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
356 for x in range(len(mymirrors)-1,-1,-1):
357 if mymirrors[x] and mymirrors[x][0]=='/':
358 fsmirrors += [mymirrors[x]]
361 restrict_fetch = "fetch" in restrict
362 custom_local_mirrors = custommirrors.get("local", [])
364 # With fetch restriction, a normal uri may only be fetched from
365 # custom local mirrors (if available). A mirror:// uri may also
366 # be fetched from specific mirrors (effectively overriding fetch
367 # restriction, but only for specific mirrors).
368 locations = custom_local_mirrors
370 locations = mymirrors
373 # Check for 'items' attribute since OrderedDict is not a dict.
374 if hasattr(myuris, 'items'):
375 for myfile, uri_set in myuris.items():
376 for myuri in uri_set:
377 file_uri_tuples.append((myfile, myuri))
380 file_uri_tuples.append((os.path.basename(myuri), myuri))
382 filedict = OrderedDict()
383 primaryuri_indexes={}
385 thirdpartymirror_uris = {}
386 for myfile, myuri in file_uri_tuples:
387 if myfile not in filedict:
389 for y in range(0,len(locations)):
390 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
391 if myuri[:9]=="mirror://":
392 eidx = myuri.find("/", 9)
394 mirrorname = myuri[9:eidx]
395 path = myuri[eidx+1:]
397 # Try user-defined mirrors first
398 if mirrorname in custommirrors:
399 for cmirr in custommirrors[mirrorname]:
400 filedict[myfile].append(
401 cmirr.rstrip("/") + "/" + path)
403 # now try the official mirrors
404 if mirrorname in thirdpartymirrors:
405 random.shuffle(thirdpartymirrors[mirrorname])
407 uris = [locmirr.rstrip("/") + "/" + path \
408 for locmirr in thirdpartymirrors[mirrorname]]
409 filedict[myfile].extend(uris)
410 thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
412 if not filedict[myfile]:
413 writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
415 writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
416 writemsg(" %s\n" % (myuri), noiselevel=-1)
419 # Only fetch from specific mirrors is allowed.
421 if "primaryuri" in restrict:
422 # Use the source site first.
423 if myfile in primaryuri_indexes:
424 primaryuri_indexes[myfile] += 1
426 primaryuri_indexes[myfile] = 0
427 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
429 filedict[myfile].append(myuri)
430 primaryuris = primaryuri_dict.get(myfile)
431 if primaryuris is None:
433 primaryuri_dict[myfile] = primaryuris
434 primaryuris.append(myuri)
436 # Prefer thirdpartymirrors over normal mirrors in cases when
437 # the file does not yet exist on the normal mirrors.
438 for myfile, uris in thirdpartymirror_uris.items():
439 primaryuri_dict.setdefault(myfile, []).extend(uris)
446 if can_fetch and not fetch_to_ro:
447 global _userpriv_test_write_file_cache
451 dir_gid = portage_gid
452 if "FAKED_MODE" in mysettings:
453 # When inside fakeroot, directories with portage's gid appear
454 # to have root's gid. Therefore, use root's gid instead of
455 # portage's gid to avoid spurrious permissions adjustments
456 # when inside fakeroot.
459 if "distlocks" in features:
460 distdir_dirs.append(".locks")
463 for x in distdir_dirs:
464 mydir = os.path.join(mysettings["DISTDIR"], x)
465 write_test_file = os.path.join(
466 mydir, ".__portage_test_write__")
473 if st is not None and stat.S_ISDIR(st.st_mode):
474 if not (userfetch or userpriv):
476 if _userpriv_test_write_file(mysettings, write_test_file):
479 _userpriv_test_write_file_cache.pop(write_test_file, None)
480 if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
482 # The directory has just been created
483 # and therefore it must be empty.
485 writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
488 raise # bail out on the first error that occurs during recursion
489 if not apply_recursive_permissions(mydir,
490 gid=dir_gid, dirmode=dirmode, dirmask=modemask,
491 filemode=filemode, filemask=modemask, onerror=onerror):
492 raise OperationNotPermitted(
493 _("Failed to apply recursive permissions for the portage group."))
494 except PortageException as e:
495 if not os.path.isdir(mysettings["DISTDIR"]):
496 writemsg("!!! %s\n" % str(e), noiselevel=-1)
497 writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
498 writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
501 not fetch_to_ro and \
502 not os.access(mysettings["DISTDIR"], os.W_OK):
503 writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
507 if can_fetch and use_locks and locks_in_subdir:
508 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
509 if not os.access(distlocks_subdir, os.W_OK):
510 writemsg(_("!!! No write access to write to %s. Aborting.\n") % distlocks_subdir,
515 distdir_writable = can_fetch and not fetch_to_ro
517 restrict_fetch_msg = False
519 for myfile in filedict:
523 1 partially downloaded
524 2 completely downloaded
528 orig_digests = mydigests.get(myfile, {})
529 size = orig_digests.get("size")
531 # Zero-byte distfiles are always invalid, so discard their digests.
532 del mydigests[myfile]
535 pruned_digests = orig_digests
536 if parallel_fetchonly:
539 pruned_digests["size"] = size
541 myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
543 has_space_superuser = True
546 writemsg_stdout("\n", noiselevel=-1)
548 # check if there is enough space in DISTDIR to completely store myfile
549 # overestimate the filesize so we aren't bitten by FS overhead
550 if size is not None and hasattr(os, "statvfs"):
551 vfs_stat = os.statvfs(mysettings["DISTDIR"])
553 mysize = os.stat(myfile_path).st_size
555 if e.errno not in (errno.ENOENT, errno.ESTALE):
559 if (size - mysize + vfs_stat.f_bsize) >= \
560 (vfs_stat.f_bsize * vfs_stat.f_bavail):
562 if (size - mysize + vfs_stat.f_bsize) >= \
563 (vfs_stat.f_bsize * vfs_stat.f_bfree):
564 has_space_superuser = False
566 if not has_space_superuser:
574 writemsg(_("!!! Insufficient space to store %s in %s\n") % \
575 (myfile, mysettings["DISTDIR"]), noiselevel=-1)
577 if has_space_superuser:
578 writemsg(_("!!! Insufficient privileges to use "
579 "remaining space.\n"), noiselevel=-1)
581 writemsg(_("!!! You may set FEATURES=\"-userfetch\""
582 " in /etc/make.conf in order to fetch with\n"
583 "!!! superuser privileges.\n"), noiselevel=-1)
585 if distdir_writable and use_locks:
589 lock_kwargs["flags"] = os.O_NONBLOCK
592 file_lock = lockfile(myfile_path,
593 wantnewlockfile=1, **lock_kwargs)
595 writemsg(_(">>> File '%s' is already locked by "
596 "another fetcher. Continuing...\n") % myfile,
603 eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
604 match, mystat = _check_distfile(
605 myfile_path, pruned_digests, eout)
609 apply_secpass_permissions(myfile_path,
610 gid=portage_gid, mode=0o664, mask=0o2,
612 except PortageException as e:
613 if not os.access(myfile_path, os.R_OK):
614 writemsg(_("!!! Failed to adjust permissions:"
615 " %s\n") % str(e), noiselevel=-1)
619 if distdir_writable and mystat is None:
620 # Remove broken symlinks if necessary.
622 os.unlink(myfile_path)
626 if mystat is not None:
627 if stat.S_ISDIR(mystat.st_mode):
629 _("!!! Unable to fetch file since "
630 "a directory is in the way: \n"
631 "!!! %s\n") % myfile_path,
632 level=logging.ERROR, noiselevel=-1)
635 if mystat.st_size == 0:
638 os.unlink(myfile_path)
641 elif distdir_writable:
642 if mystat.st_size < fetch_resume_size and \
643 mystat.st_size < size:
644 # If the file already exists and the size does not
645 # match the existing digests, it may be that the
646 # user is attempting to update the digest. In this
647 # case, the digestgen() function will advise the
648 # user to use `ebuild --force foo.ebuild manifest`
649 # in order to force the old digests to be replaced.
650 # Since the user may want to keep this file, rename
651 # it instead of deleting it.
652 writemsg(_(">>> Renaming distfile with size "
653 "%d (smaller than " "PORTAGE_FETCH_RESU"
654 "ME_MIN_SIZE)\n") % mystat.st_size)
656 _checksum_failure_temp_file(
657 mysettings["DISTDIR"], myfile)
658 writemsg_stdout(_("Refetching... "
659 "File renamed to '%s'\n\n") % \
660 temp_filename, noiselevel=-1)
661 elif mystat.st_size >= size:
663 _checksum_failure_temp_file(
664 mysettings["DISTDIR"], myfile)
665 writemsg_stdout(_("Refetching... "
666 "File renamed to '%s'\n\n") % \
667 temp_filename, noiselevel=-1)
669 if distdir_writable and ro_distdirs:
671 for x in ro_distdirs:
672 filename = os.path.join(x, myfile)
673 match, mystat = _check_distfile(
674 filename, pruned_digests, eout)
676 readonly_file = filename
678 if readonly_file is not None:
680 os.unlink(myfile_path)
682 if e.errno not in (errno.ENOENT, errno.ESTALE):
685 os.symlink(readonly_file, myfile_path)
688 if fsmirrors and not os.path.exists(myfile_path) and has_space:
689 for mydir in fsmirrors:
690 mirror_file = os.path.join(mydir, myfile)
692 shutil.copyfile(mirror_file, myfile_path)
693 writemsg(_("Local mirror has file: %s\n") % myfile)
695 except (IOError, OSError) as e:
696 if e.errno not in (errno.ENOENT, errno.ESTALE):
701 mystat = os.stat(myfile_path)
703 if e.errno not in (errno.ENOENT, errno.ESTALE):
708 apply_secpass_permissions(
709 myfile_path, gid=portage_gid, mode=0o664, mask=0o2,
711 except PortageException as e:
712 if not os.access(myfile_path, os.R_OK):
713 writemsg(_("!!! Failed to adjust permissions:"
714 " %s\n") % str(e), noiselevel=-1)
716 # If the file is empty then it's obviously invalid. Remove
717 # the empty file and try to download if possible.
718 if mystat.st_size == 0:
721 os.unlink(myfile_path)
722 except EnvironmentError:
724 elif myfile not in mydigests:
725 # We don't have a digest, but the file exists. We must
726 # assume that it is fully downloaded.
729 if mystat.st_size < mydigests[myfile]["size"] and \
731 fetched = 1 # Try to resume this download.
732 elif parallel_fetchonly and \
733 mystat.st_size == mydigests[myfile]["size"]:
736 mysettings.get("PORTAGE_QUIET") == "1"
738 "%s size ;-)" % (myfile, ))
742 verified_ok, reason = verify_all(
743 myfile_path, mydigests[myfile])
745 writemsg(_("!!! Previously fetched"
746 " file: '%s'\n") % myfile, noiselevel=-1)
747 writemsg(_("!!! Reason: %s\n") % reason[0],
749 writemsg(_("!!! Got: %s\n"
750 "!!! Expected: %s\n") % \
751 (reason[1], reason[2]), noiselevel=-1)
752 if reason[0] == _("Insufficient data for checksum verification"):
756 _checksum_failure_temp_file(
757 mysettings["DISTDIR"], myfile)
758 writemsg_stdout(_("Refetching... "
759 "File renamed to '%s'\n\n") % \
760 temp_filename, noiselevel=-1)
764 mysettings.get("PORTAGE_QUIET", None) == "1"
765 digests = mydigests.get(myfile)
767 digests = list(digests)
770 "%s %s ;-)" % (myfile, " ".join(digests)))
772 continue # fetch any remaining files
774 # Create a reversed list since that is optimal for list.pop().
775 uri_list = filedict[myfile][:]
777 checksum_failure_count = 0
778 tried_locations = set()
781 # Eliminate duplicates here in case we've switched to
782 # "primaryuri" mode on the fly due to a checksum failure.
783 if loc in tried_locations:
785 tried_locations.add(loc)
787 writemsg_stdout(loc+" ", noiselevel=-1)
789 # allow different fetchcommands per protocol
790 protocol = loc[0:loc.find("://")]
792 missing_file_param = False
793 fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
794 fetchcommand = mysettings.get(fetchcommand_var)
795 if fetchcommand is None:
796 fetchcommand_var = "FETCHCOMMAND"
797 fetchcommand = mysettings.get(fetchcommand_var)
798 if fetchcommand is None:
800 _("!!! %s is unset. It should "
801 "have been defined in\n!!! %s/make.globals.\n") \
802 % (fetchcommand_var, GLOBAL_CONFIG_PATH),
803 level=logging.ERROR, noiselevel=-1)
805 if "${FILE}" not in fetchcommand:
807 _("!!! %s does not contain the required ${FILE}"
808 " parameter.\n") % fetchcommand_var,
809 level=logging.ERROR, noiselevel=-1)
810 missing_file_param = True
812 resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
813 resumecommand = mysettings.get(resumecommand_var)
814 if resumecommand is None:
815 resumecommand_var = "RESUMECOMMAND"
816 resumecommand = mysettings.get(resumecommand_var)
817 if resumecommand is None:
819 _("!!! %s is unset. It should "
820 "have been defined in\n!!! %s/make.globals.\n") \
821 % (resumecommand_var, GLOBAL_CONFIG_PATH),
822 level=logging.ERROR, noiselevel=-1)
824 if "${FILE}" not in resumecommand:
826 _("!!! %s does not contain the required ${FILE}"
827 " parameter.\n") % resumecommand_var,
828 level=logging.ERROR, noiselevel=-1)
829 missing_file_param = True
831 if missing_file_param:
833 _("!!! Refer to the make.conf(5) man page for "
834 "information about how to\n!!! correctly specify "
835 "FETCHCOMMAND and RESUMECOMMAND.\n"),
836 level=logging.ERROR, noiselevel=-1)
837 if myfile != os.path.basename(loc):
843 mysize = os.stat(myfile_path).st_size
845 if e.errno not in (errno.ENOENT, errno.ESTALE):
851 writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
853 elif size is None or size > mysize:
854 writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
857 writemsg(_("!!! File %s is incorrect size, "
858 "but unable to retry.\n") % myfile, noiselevel=-1)
863 if fetched != 2 and has_space:
864 #we either need to resume or start the download
867 mystat = os.stat(myfile_path)
869 if e.errno not in (errno.ENOENT, errno.ESTALE):
874 if mystat.st_size < fetch_resume_size:
875 writemsg(_(">>> Deleting distfile with size "
876 "%d (smaller than " "PORTAGE_FETCH_RESU"
877 "ME_MIN_SIZE)\n") % mystat.st_size)
879 os.unlink(myfile_path)
882 (errno.ENOENT, errno.ESTALE):
888 writemsg(_(">>> Resuming download...\n"))
889 locfetch=resumecommand
890 command_var = resumecommand_var
893 locfetch=fetchcommand
894 command_var = fetchcommand_var
895 writemsg_stdout(_(">>> Downloading '%s'\n") % \
896 re.sub(r'//(.+):.+@(.+)/',r'//\1:*password*@\2/', loc))
898 "DISTDIR": mysettings["DISTDIR"],
903 myfetch = shlex_split(locfetch)
904 myfetch = [varexpand(x, mydict=variables) for x in myfetch]
908 myret = _spawn_fetch(mysettings, myfetch)
912 apply_secpass_permissions(myfile_path,
913 gid=portage_gid, mode=0o664, mask=0o2)
916 except PortageException as e:
917 if not os.access(myfile_path, os.R_OK):
918 writemsg(_("!!! Failed to adjust permissions:"
919 " %s\n") % str(e), noiselevel=-1)
922 # If the file is empty then it's obviously invalid. Don't
923 # trust the return value from the fetcher. Remove the
924 # empty file and try to download again.
926 if os.stat(myfile_path).st_size == 0:
927 os.unlink(myfile_path)
930 except EnvironmentError:
933 if mydigests is not None and myfile in mydigests:
935 mystat = os.stat(myfile_path)
937 if e.errno not in (errno.ENOENT, errno.ESTALE):
943 if stat.S_ISDIR(mystat.st_mode):
944 # This can happen if FETCHCOMMAND erroneously
945 # contains wget's -P option where it should
948 _("!!! The command specified in the "
949 "%s variable appears to have\n!!! "
950 "created a directory instead of a "
951 "normal file.\n") % command_var,
952 level=logging.ERROR, noiselevel=-1)
954 _("!!! Refer to the make.conf(5) "
955 "man page for information about how "
956 "to\n!!! correctly specify "
957 "FETCHCOMMAND and RESUMECOMMAND.\n"),
958 level=logging.ERROR, noiselevel=-1)
961 # no exception? file exists. let digestcheck() report
962 # an appropriately for size or checksum errors
964 # If the fetcher reported success and the file is
965 # too small, it's probably because the digest is
966 # bad (upstream changed the distfile). In this
967 # case we don't want to attempt to resume. Show a
968 # digest verification failure to that the user gets
969 # a clue about what just happened.
970 if myret != os.EX_OK and \
971 mystat.st_size < mydigests[myfile]["size"]:
972 # Fetch failed... Try the next one... Kill 404 files though.
973 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
974 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
975 if html404.search(codecs.open(
976 _unicode_encode(myfile_path,
977 encoding=_encodings['fs'], errors='strict'),
978 mode='r', encoding=_encodings['content'], errors='replace'
981 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
982 writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
985 except (IOError, OSError):
990 # File is the correct size--check the checksums for the fetched
991 # file NOW, for those users who don't have a stable/continuous
992 # net connection. This way we have a chance to try to download
993 # from another mirror...
994 verified_ok,reason = verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
997 writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
999 writemsg(_("!!! Reason: %s\n") % reason[0],
1001 writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
1002 (reason[1], reason[2]), noiselevel=-1)
1003 if reason[0] == _("Insufficient data for checksum verification"):
1006 _checksum_failure_temp_file(
1007 mysettings["DISTDIR"], myfile)
1008 writemsg_stdout(_("Refetching... "
1009 "File renamed to '%s'\n\n") % \
1010 temp_filename, noiselevel=-1)
1012 checksum_failure_count += 1
1013 if checksum_failure_count == \
1014 checksum_failure_primaryuri:
1015 # Switch to "primaryuri" mode in order
1016 # to increase the probablility of
1019 primaryuri_dict.get(myfile)
1022 reversed(primaryuris))
1023 if checksum_failure_count >= \
1024 checksum_failure_max_tries:
1028 eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
1029 digests = mydigests.get(myfile)
1031 eout.ebegin("%s %s ;-)" % \
1032 (myfile, " ".join(sorted(digests))))
1040 elif mydigests!=None:
1041 writemsg(_("No digest file available and download failed.\n\n"),
1044 if use_locks and file_lock:
1045 unlockfile(file_lock)
1048 writemsg_stdout("\n", noiselevel=-1)
1050 if restrict_fetch and not restrict_fetch_msg:
1051 restrict_fetch_msg = True
1052 msg = _("\n!!! %s/%s"
1053 " has fetch restriction turned on.\n"
1054 "!!! This probably means that this "
1055 "ebuild's files must be downloaded\n"
1056 "!!! manually. See the comments in"
1057 " the ebuild for more information.\n\n") % \
1058 (mysettings["CATEGORY"], mysettings["PF"])
1060 level=logging.ERROR, noiselevel=-1)
1061 have_builddir = "PORTAGE_BUILDDIR" in mysettings and \
1062 os.path.isdir(mysettings["PORTAGE_BUILDDIR"])
1064 global_tmpdir = mysettings["PORTAGE_TMPDIR"]
1065 private_tmpdir = None
1066 if not parallel_fetchonly and not have_builddir:
1067 # When called by digestgen(), it's normal that
1068 # PORTAGE_BUILDDIR doesn't exist. It's helpful
1069 # to show the pkg_nofetch output though, so go
1070 # ahead and create a temporary PORTAGE_BUILDDIR.
1071 # Use a temporary config instance to avoid altering
1072 # the state of the one that's been passed in.
1073 mysettings = config(clone=mysettings)
1075 private_tmpdir = tempfile.mkdtemp("", "._portage_fetch_.",
1077 except OSError as e:
1078 if e.errno != PermissionDenied.errno:
1080 raise PermissionDenied(global_tmpdir)
1081 mysettings["PORTAGE_TMPDIR"] = private_tmpdir
1082 mysettings.backup_changes("PORTAGE_TMPDIR")
1083 debug = mysettings.get("PORTAGE_DEBUG") == "1"
1084 doebuild_environment(mysettings["EBUILD"], "fetch",
1085 mysettings["ROOT"], mysettings, debug, 1, None)
1086 prepare_build_dirs(mysettings["ROOT"], mysettings, 0)
1087 have_builddir = True
1089 if not parallel_fetchonly and have_builddir:
1090 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1091 # ensuring sane $PWD (bug #239560) and storing elog
1092 # messages. Therefore, calling code needs to ensure that
1093 # PORTAGE_BUILDDIR is already clean and locked here.
1095 # All the pkg_nofetch goes to stderr since it's considered
1096 # to be an error message.
1098 0 : sys.stdin.fileno(),
1099 1 : sys.stderr.fileno(),
1100 2 : sys.stderr.fileno(),
1103 ebuild_phase = mysettings.get("EBUILD_PHASE")
1105 mysettings["EBUILD_PHASE"] = "nofetch"
1106 doebuild_spawn(_shell_quote(EBUILD_SH_BINARY) + \
1107 " nofetch", mysettings, fd_pipes=fd_pipes)
1109 if ebuild_phase is None:
1110 mysettings.pop("EBUILD_PHASE", None)
1112 mysettings["EBUILD_PHASE"] = ebuild_phase
1113 if private_tmpdir is not None:
1114 shutil.rmtree(private_tmpdir)
1116 elif restrict_fetch:
1120 elif not filedict[myfile]:
1121 writemsg(_("Warning: No mirrors available for file"
1122 " '%s'\n") % (myfile), noiselevel=-1)
1124 writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
1130 failed_files.add(myfile)