1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
7 from subprocess import getstatusoutput as subprocess_getstatusoutput
9 from commands import getstatusoutput as subprocess_getstatusoutput
24 from itertools import chain
27 from portage import os
28 from portage import digraph
29 from portage import _unicode_decode
30 from portage.cache.cache_errors import CacheError
31 from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
32 from portage.const import _ENABLE_DYN_LINK_MAP
33 from portage.dbapi.dep_expand import dep_expand
34 from portage.dep import Atom, extended_cp_match
35 from portage.exception import InvalidAtom
36 from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
38 good = create_color_func("GOOD")
39 bad = create_color_func("BAD")
40 from portage.package.ebuild._ipc.QueryCommand import QueryCommand
41 from portage.package.ebuild.doebuild import _check_temp_dir
42 from portage._sets import load_default_config, SETPREFIX
43 from portage._sets.base import InternalPackageSet
44 from portage.util import cmp_sort_key, writemsg, \
45 writemsg_level, writemsg_stdout
46 from portage._global_updates import _global_updates
48 from _emerge.clear_caches import clear_caches
49 from _emerge.countdown import countdown
50 from _emerge.create_depgraph_params import create_depgraph_params
51 from _emerge.Dependency import Dependency
52 from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
53 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
54 from _emerge.emergelog import emergelog
55 from _emerge.is_valid_package_atom import is_valid_package_atom
56 from _emerge.MetadataRegen import MetadataRegen
57 from _emerge.Package import Package
58 from _emerge.ProgressHandler import ProgressHandler
59 from _emerge.RootConfig import RootConfig
60 from _emerge.Scheduler import Scheduler
61 from _emerge.search import search
62 from _emerge.SetArg import SetArg
63 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
64 from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
65 from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
66 from _emerge.unmerge import unmerge
67 from _emerge.UnmergeDepPriority import UnmergeDepPriority
68 from _emerge.UseFlagDisplay import pkg_use_display
69 from _emerge.userquery import userquery
71 if sys.hexversion >= 0x3000000:
74 def action_build(settings, trees, mtimedb,
75 myopts, myaction, myfiles, spinner):
77 if '--usepkgonly' not in myopts:
78 old_tree_timestamp_warn(settings['PORTDIR'], settings)
80 # It's best for config updates in /etc/portage to be processed
81 # before we get here, so warn if they're not (bug #267103).
82 chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
84 # validate the state of the resume data
85 # so that we can make assumptions later.
86 for k in ("resume", "resume_backup"):
89 resume_data = mtimedb[k]
90 if not isinstance(resume_data, dict):
93 mergelist = resume_data.get("mergelist")
94 if not isinstance(mergelist, list):
98 if not (isinstance(x, list) and len(x) == 4):
100 pkg_type, pkg_root, pkg_key, pkg_action = x
101 if pkg_root not in trees:
102 # Current $ROOT setting differs,
103 # so the list must be stale.
109 resume_opts = resume_data.get("myopts")
110 if not isinstance(resume_opts, (dict, list)):
113 favorites = resume_data.get("favorites")
114 if not isinstance(favorites, list):
119 if "--resume" in myopts and \
120 ("resume" in mtimedb or
121 "resume_backup" in mtimedb):
123 if "resume" not in mtimedb:
124 mtimedb["resume"] = mtimedb["resume_backup"]
125 del mtimedb["resume_backup"]
127 # "myopts" is a list for backward compatibility.
128 resume_opts = mtimedb["resume"].get("myopts", [])
129 if isinstance(resume_opts, list):
130 resume_opts = dict((k,True) for k in resume_opts)
131 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
132 resume_opts.pop(opt, None)
134 # Current options always override resume_opts.
135 resume_opts.update(myopts)
137 myopts.update(resume_opts)
139 if "--debug" in myopts:
140 writemsg_level("myopts %s\n" % (myopts,))
142 # Adjust config according to options of the command being resumed.
144 mysettings = trees[myroot]["vartree"].settings
146 adjust_config(myopts, mysettings)
148 del myroot, mysettings
150 ldpath_mtimes = mtimedb["ldpath"]
152 buildpkgonly = "--buildpkgonly" in myopts
153 pretend = "--pretend" in myopts
154 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
155 ask = "--ask" in myopts
156 enter_invalid = '--ask-enter-invalid' in myopts
157 nodeps = "--nodeps" in myopts
158 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
159 tree = "--tree" in myopts
163 portage.writemsg(colorize("WARN", " * ") + \
164 "--tree is broken with --nodeps. Disabling...\n")
165 debug = "--debug" in myopts
166 verbose = "--verbose" in myopts
167 quiet = "--quiet" in myopts
168 if pretend or fetchonly:
169 # make the mtimedb readonly
170 mtimedb.filename = None
171 if '--digest' in myopts or 'digest' in settings.features:
172 if '--digest' in myopts:
173 msg = "The --digest option"
175 msg = "The FEATURES=digest setting"
177 msg += " can prevent corruption from being" + \
178 " noticed. The `repoman manifest` command is the preferred" + \
179 " way to generate manifests and it is capable of doing an" + \
180 " entire repository or category at once."
182 writemsg(prefix + "\n")
183 from textwrap import wrap
184 for line in wrap(msg, 72):
185 writemsg("%s%s\n" % (prefix, line))
186 writemsg(prefix + "\n")
189 favorites = mtimedb["resume"].get("favorites")
190 if not isinstance(favorites, list):
192 myparams = create_depgraph_params(myopts, myaction)
194 resume_data = mtimedb["resume"]
195 mergelist = resume_data["mergelist"]
196 if mergelist and "--skipfirst" in myopts:
197 for i, task in enumerate(mergelist):
198 if isinstance(task, list) and \
199 task and task[-1] == "merge":
206 success, mydepgraph, dropped_tasks = resume_depgraph(
207 settings, trees, mtimedb, myopts, myparams, spinner)
208 except (portage.exception.PackageNotFound,
209 depgraph.UnsatisfiedResumeDep) as e:
210 if isinstance(e, depgraph.UnsatisfiedResumeDep):
211 mydepgraph = e.depgraph
213 from textwrap import wrap
214 from portage.output import EOutput
217 resume_data = mtimedb["resume"]
218 mergelist = resume_data.get("mergelist")
219 if not isinstance(mergelist, list):
221 if mergelist and debug or (verbose and not quiet):
222 out.eerror("Invalid resume list:")
225 for task in mergelist:
226 if isinstance(task, list):
227 out.eerror(indent + str(tuple(task)))
230 if isinstance(e, depgraph.UnsatisfiedResumeDep):
231 out.eerror("One or more packages are either masked or " + \
232 "have missing dependencies:")
237 out.eerror(indent + "Masked package:")
238 out.eerror(2 * indent + str(dep.parent))
241 out.eerror(indent + str(dep.atom) + " pulled in by:")
242 out.eerror(2 * indent + str(dep.parent))
244 msg = "The resume list contains packages " + \
245 "that are either masked or have " + \
246 "unsatisfied dependencies. " + \
247 "Please restart/continue " + \
248 "the operation manually, or use --skipfirst " + \
249 "to skip the first package in the list and " + \
250 "any other packages that may be " + \
251 "masked or have missing dependencies."
252 for line in wrap(msg, 72):
254 elif isinstance(e, portage.exception.PackageNotFound):
255 out.eerror("An expected package is " + \
256 "not available: %s" % str(e))
258 msg = "The resume list contains one or more " + \
259 "packages that are no longer " + \
260 "available. Please restart/continue " + \
261 "the operation manually."
262 for line in wrap(msg, 72):
267 portage.writemsg("!!! One or more packages have been " + \
268 "dropped due to\n" + \
269 "!!! masking or unsatisfied dependencies:\n\n",
271 for task in dropped_tasks:
272 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
273 portage.writemsg("\n", noiselevel=-1)
276 if mydepgraph is not None:
277 mydepgraph.display_problems()
278 if not (ask or pretend):
279 # delete the current list and also the backup
280 # since it's probably stale too.
281 for k in ("resume", "resume_backup"):
287 if ("--resume" in myopts):
288 print(darkgreen("emerge: It seems we have nothing to resume..."))
291 myparams = create_depgraph_params(myopts, myaction)
293 success, mydepgraph, favorites = backtrack_depgraph(
294 settings, trees, myopts, myparams, myaction, myfiles, spinner)
295 except portage.exception.PackageSetNotFound as e:
296 root_config = trees[settings["ROOT"]]["root_config"]
297 display_missing_pkg_set(root_config, e.value)
301 mydepgraph.display_problems()
304 if "--pretend" not in myopts and \
305 ("--ask" in myopts or "--tree" in myopts or \
306 "--verbose" in myopts) and \
307 not ("--quiet" in myopts and "--ask" not in myopts):
308 if "--resume" in myopts:
309 mymergelist = mydepgraph.altlist()
310 if len(mymergelist) == 0:
311 print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
313 favorites = mtimedb["resume"]["favorites"]
314 retval = mydepgraph.display(
315 mydepgraph.altlist(reversed=tree),
317 mydepgraph.display_problems()
318 if retval != os.EX_OK:
320 prompt="Would you like to resume merging these packages?"
322 retval = mydepgraph.display(
323 mydepgraph.altlist(reversed=("--tree" in myopts)),
325 mydepgraph.display_problems()
326 if retval != os.EX_OK:
329 for x in mydepgraph.altlist():
330 if isinstance(x, Package) and x.operation == "merge":
334 sets = trees[settings["ROOT"]]["root_config"].sets
335 world_candidates = None
336 if "--noreplace" in myopts and \
337 not oneshot and favorites:
338 # Sets that are not world candidates are filtered
339 # out here since the favorites list needs to be
340 # complete for depgraph.loadResumeCommand() to
342 world_candidates = [x for x in favorites \
343 if not (x.startswith(SETPREFIX) and \
344 not sets[x[1:]].world_candidate)]
345 if "--noreplace" in myopts and \
346 not oneshot and world_candidates:
348 for x in world_candidates:
349 print(" %s %s" % (good("*"), x))
350 prompt="Would you like to add these packages to your world favorites?"
351 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
352 prompt="Nothing to merge; would you like to auto-clean packages?"
355 print("Nothing to merge; quitting.")
358 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
359 prompt="Would you like to fetch the source files for these packages?"
361 prompt="Would you like to merge these packages?"
363 if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
368 # Don't ask again (e.g. when auto-cleaning packages after merge)
369 myopts.pop("--ask", None)
371 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
372 if ("--resume" in myopts):
373 mymergelist = mydepgraph.altlist()
374 if len(mymergelist) == 0:
375 print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
377 favorites = mtimedb["resume"]["favorites"]
378 retval = mydepgraph.display(
379 mydepgraph.altlist(reversed=tree),
381 mydepgraph.display_problems()
382 if retval != os.EX_OK:
385 retval = mydepgraph.display(
386 mydepgraph.altlist(reversed=("--tree" in myopts)),
388 mydepgraph.display_problems()
389 if retval != os.EX_OK:
391 if "--buildpkgonly" in myopts:
392 graph_copy = mydepgraph._dynamic_config.digraph.copy()
393 removed_nodes = set()
394 for node in graph_copy:
395 if not isinstance(node, Package) or \
396 node.operation == "nomerge":
397 removed_nodes.add(node)
398 graph_copy.difference_update(removed_nodes)
399 if not graph_copy.hasallzeros(ignore_priority = \
400 DepPrioritySatisfiedRange.ignore_medium):
401 print("\n!!! --buildpkgonly requires all dependencies to be merged.")
402 print("!!! You have to merge the dependencies before you can build this package.\n")
405 if "--buildpkgonly" in myopts:
406 graph_copy = mydepgraph._dynamic_config.digraph.copy()
407 removed_nodes = set()
408 for node in graph_copy:
409 if not isinstance(node, Package) or \
410 node.operation == "nomerge":
411 removed_nodes.add(node)
412 graph_copy.difference_update(removed_nodes)
413 if not graph_copy.hasallzeros(ignore_priority = \
414 DepPrioritySatisfiedRange.ignore_medium):
415 print("\n!!! --buildpkgonly requires all dependencies to be merged.")
416 print("!!! Cannot merge requested packages. Merge deps and try again.\n")
419 if ("--resume" in myopts):
420 favorites=mtimedb["resume"]["favorites"]
423 if "resume" in mtimedb and \
424 "mergelist" in mtimedb["resume"] and \
425 len(mtimedb["resume"]["mergelist"]) > 1:
426 mtimedb["resume_backup"] = mtimedb["resume"]
427 del mtimedb["resume"]
430 mydepgraph.saveNomergeFavorites()
432 mergetask = Scheduler(settings, trees, mtimedb, myopts,
433 spinner, favorites=favorites,
434 graph_config=mydepgraph.schedulerGraph())
439 retval = mergetask.merge()
441 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
442 if "yes" == settings.get("AUTOCLEAN"):
443 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
444 unmerge(trees[settings["ROOT"]]["root_config"],
446 ldpath_mtimes, autoclean=1)
448 portage.writemsg_stdout(colorize("WARN", "WARNING:")
449 + " AUTOCLEAN is disabled. This can cause serious"
450 + " problems due to overlapping packages.\n")
452 trees[settings["ROOT"]]["vartree"].dbapi._plib_registry
453 if plib_registry is None:
454 # preserve-libs is entirely disabled
457 plib_registry.pruneNonExisting()
461 def action_config(settings, trees, myopts, myfiles):
462 enter_invalid = '--ask-enter-invalid' in myopts
463 if len(myfiles) != 1:
464 print(red("!!! config can only take a single package atom at this time\n"))
466 if not is_valid_package_atom(myfiles[0]):
467 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
469 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
470 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
474 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
475 except portage.exception.AmbiguousPackageName as e:
476 # Multiple matches thrown from cpv_expand
479 print("No packages found.\n")
482 if "--ask" in myopts:
484 print("Please select a package to configure:")
488 options.append(str(idx))
489 print(options[-1]+") "+pkg)
492 idx = userquery("Selection?", enter_invalid, responses=options)
495 pkg = pkgs[int(idx)-1]
497 print("The following packages available:")
500 print("\nPlease use a specific atom or the --ask option.")
506 if "--ask" in myopts:
507 if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
510 print("Configuring pkg...")
512 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
513 mysettings = portage.config(clone=settings)
514 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
515 debug = mysettings.get("PORTAGE_DEBUG") == "1"
516 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
518 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
519 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
520 if retval == os.EX_OK:
521 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
522 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
525 def action_depclean(settings, trees, ldpath_mtimes,
526 myopts, action, myfiles, spinner, scheduler=None):
527 # Kill packages that aren't explicitly merged or are required as a
528 # dependency of another package. World file is explicit.
530 # Global depclean or prune operations are not very safe when there are
531 # missing dependencies since it's unknown how badly incomplete
532 # the dependency graph is, and we might accidentally remove packages
533 # that should have been pulled into the graph. On the other hand, it's
534 # relatively safe to ignore missing deps when only asked to remove
538 if not _ENABLE_DYN_LINK_MAP:
539 msg.append("Depclean may break link level dependencies. Thus, it is\n")
540 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
541 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
543 msg.append("Always study the list of packages to be cleaned for any obvious\n")
544 msg.append("mistakes. Packages that are part of the world set will always\n")
545 msg.append("be kept. They can be manually added to this set with\n")
546 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
547 msg.append("package.provided (see portage(5)) will be removed by\n")
548 msg.append("depclean, even if they are part of the world set.\n")
550 msg.append("As a safety measure, depclean will not remove any packages\n")
551 msg.append("unless *all* required dependencies have been resolved. As a\n")
552 msg.append("consequence, it is often necessary to run %s\n" % \
553 good("`emerge --update"))
554 msg.append(good("--newuse --deep @world`") + \
555 " prior to depclean.\n")
557 if action == "depclean" and "--quiet" not in myopts and not myfiles:
558 portage.writemsg_stdout("\n")
560 portage.writemsg_stdout(colorize("WARN", " * ") + x)
562 root_config = trees[settings['ROOT']]['root_config']
563 vardb = root_config.trees['vartree'].dbapi
565 args_set = InternalPackageSet(allow_repo=True)
567 args_set.update(myfiles)
568 matched_packages = False
571 matched_packages = True
573 if not matched_packages:
574 writemsg_level(">>> No packages selected for removal by %s\n" % \
578 # The calculation is done in a separate function so that depgraph
579 # references go out of scope and the corresponding memory
580 # is freed before we call unmerge().
581 rval, cleanlist, ordered, req_pkg_count = \
582 calc_depclean(settings, trees, ldpath_mtimes,
583 myopts, action, args_set, spinner)
591 unmerge(root_config, myopts, "unmerge",
592 cleanlist, ldpath_mtimes, ordered=ordered,
595 if action == "prune":
598 if not cleanlist and "--quiet" in myopts:
601 print("Packages installed: " + str(len(vardb.cpv_all())))
602 print("Packages in world: " + \
603 str(len(root_config.sets["selected"].getAtoms())))
604 print("Packages in system: " + \
605 str(len(root_config.sets["system"].getAtoms())))
606 print("Required packages: "+str(req_pkg_count))
607 if "--pretend" in myopts:
608 print("Number to remove: "+str(len(cleanlist)))
610 print("Number removed: "+str(len(cleanlist)))
612 def calc_depclean(settings, trees, ldpath_mtimes,
613 myopts, action, args_set, spinner):
614 allow_missing_deps = bool(args_set)
616 debug = '--debug' in myopts
617 xterm_titles = "notitles" not in settings.features
618 myroot = settings["ROOT"]
619 root_config = trees[myroot]["root_config"]
620 psets = root_config.setconfig.psets
621 deselect = myopts.get('--deselect') != 'n'
623 required_sets['world'] = psets['world']
625 # When removing packages, a temporary version of the world 'selected'
626 # set may be used which excludes packages that are intended to be
627 # eligible for removal.
628 selected_set = psets['selected']
629 required_sets['selected'] = selected_set
630 protected_set = InternalPackageSet()
631 protected_set_name = '____depclean_protected_set____'
632 required_sets[protected_set_name] = protected_set
633 system_set = psets["system"]
635 if not system_set or not selected_set:
638 writemsg_level("!!! You have no system list.\n",
639 level=logging.ERROR, noiselevel=-1)
642 writemsg_level("!!! You have no world file.\n",
643 level=logging.WARNING, noiselevel=-1)
645 writemsg_level("!!! Proceeding is likely to " + \
646 "break your installation.\n",
647 level=logging.WARNING, noiselevel=-1)
648 if "--pretend" not in myopts:
649 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
651 if action == "depclean":
652 emergelog(xterm_titles, " >>> depclean")
654 writemsg_level("\nCalculating dependencies ")
655 resolver_params = create_depgraph_params(myopts, "remove")
656 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
658 vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
659 real_vardb = trees[myroot]["vartree"].dbapi
661 if action == "depclean":
666 # Start with an empty set.
667 selected_set = InternalPackageSet()
668 required_sets['selected'] = selected_set
669 # Pull in any sets nested within the selected set.
670 selected_set.update(psets['selected'].getNonAtoms())
672 # Pull in everything that's installed but not matched
673 # by an argument atom since we don't want to clean any
674 # package if something depends on it.
680 if args_set.findAtomForPackage(pkg) is None:
681 protected_set.add("=" + pkg.cpv)
683 except portage.exception.InvalidDependString as e:
684 show_invalid_depstring_notice(pkg,
685 pkg.metadata["PROVIDE"], str(e))
687 protected_set.add("=" + pkg.cpv)
690 elif action == "prune":
693 # Start with an empty set.
694 selected_set = InternalPackageSet()
695 required_sets['selected'] = selected_set
696 # Pull in any sets nested within the selected set.
697 selected_set.update(psets['selected'].getNonAtoms())
699 # Pull in everything that's installed since we don't
700 # to prune a package if something depends on it.
701 protected_set.update(vardb.cp_all())
705 # Try to prune everything that's slotted.
706 for cp in vardb.cp_all():
707 if len(vardb.cp_list(cp)) > 1:
710 # Remove atoms from world that match installed packages
711 # that are also matched by argument atoms, but do not remove
712 # them if they match the highest installed version.
715 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
716 if not pkgs_for_cp or pkg not in pkgs_for_cp:
717 raise AssertionError("package expected in matches: " + \
718 "cp = %s, cpv = %s matches = %s" % \
719 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
721 highest_version = pkgs_for_cp[-1]
722 if pkg == highest_version:
723 # pkg is the highest version
724 protected_set.add("=" + pkg.cpv)
727 if len(pkgs_for_cp) <= 1:
728 raise AssertionError("more packages expected: " + \
729 "cp = %s, cpv = %s matches = %s" % \
730 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
733 if args_set.findAtomForPackage(pkg) is None:
734 protected_set.add("=" + pkg.cpv)
736 except portage.exception.InvalidDependString as e:
737 show_invalid_depstring_notice(pkg,
738 pkg.metadata["PROVIDE"], str(e))
740 protected_set.add("=" + pkg.cpv)
743 if resolver._frozen_config.excluded_pkgs:
744 excluded_set = resolver._frozen_config.excluded_pkgs
745 required_sets['__excluded__'] = InternalPackageSet()
752 if excluded_set.findAtomForPackage(pkg):
753 required_sets['__excluded__'].add("=" + pkg.cpv)
754 except portage.exception.InvalidDependString as e:
755 show_invalid_depstring_notice(pkg,
756 pkg.metadata["PROVIDE"], str(e))
758 required_sets['__excluded__'].add("=" + pkg.cpv)
760 success = resolver._complete_graph(required_sets={myroot:required_sets})
761 writemsg_level("\b\b... done!\n")
763 resolver.display_problems()
766 return 1, [], False, 0
768 def unresolved_deps():
771 for dep in resolver._dynamic_config._initially_unsatisfied_deps:
772 if isinstance(dep.parent, Package) and \
773 (dep.priority > UnmergeDepPriority.SOFT):
774 unresolvable.add((dep.atom, dep.parent.cpv))
779 if unresolvable and not allow_missing_deps:
781 if "--debug" in myopts:
782 writemsg("\ndigraph:\n\n", noiselevel=-1)
783 resolver._dynamic_config.digraph.debug_print()
784 writemsg("\n", noiselevel=-1)
788 msg.append("Dependencies could not be completely resolved due to")
789 msg.append("the following required packages not being installed:")
791 for atom, parent in unresolvable:
792 msg.append(" %s pulled in by:" % (atom,))
793 msg.append(" %s" % (parent,))
795 msg.append("Have you forgotten to run " + \
796 good("`emerge --update --newuse --deep @world`") + " prior")
797 msg.append(("to %s? It may be necessary to manually " + \
798 "uninstall packages that no longer") % action)
799 msg.append("exist in the portage tree since " + \
800 "it may not be possible to satisfy their")
801 msg.append("dependencies. Also, be aware of " + \
802 "the --with-bdeps option that is documented")
803 msg.append("in " + good("`man emerge`") + ".")
804 if action == "prune":
806 msg.append("If you would like to ignore " + \
807 "dependencies then use %s." % good("--nodeps"))
808 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
809 level=logging.ERROR, noiselevel=-1)
813 if unresolved_deps():
814 return 1, [], False, 0
816 graph = resolver._dynamic_config.digraph.copy()
817 required_pkgs_total = 0
819 if isinstance(node, Package):
820 required_pkgs_total += 1
822 def show_parents(child_node):
823 parent_nodes = graph.parent_nodes(child_node)
825 # With --prune, the highest version can be pulled in without any
826 # real parent since all installed packages are pulled in. In that
827 # case there's nothing to show here.
830 for node in parent_nodes:
831 parent_strs.append(str(getattr(node, "cpv", node)))
834 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
835 for parent_str in parent_strs:
836 msg.append(" %s\n" % (parent_str,))
838 portage.writemsg_stdout("".join(msg), noiselevel=-1)
840 def cmp_pkg_cpv(pkg1, pkg2):
841 """Sort Package instances by cpv."""
842 if pkg1.cpv > pkg2.cpv:
844 elif pkg1.cpv == pkg2.cpv:
849 def create_cleanlist():
851 if "--debug" in myopts:
852 writemsg("\ndigraph:\n\n", noiselevel=-1)
854 writemsg("\n", noiselevel=-1)
856 # Never display the special internal protected_set.
858 if isinstance(node, SetArg) and node.name == protected_set_name:
864 if action == "depclean":
867 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
870 arg_atom = args_set.findAtomForPackage(pkg)
871 except portage.exception.InvalidDependString:
872 # this error has already been displayed by now
877 pkgs_to_remove.append(pkg)
878 elif "--verbose" in myopts:
882 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
884 pkgs_to_remove.append(pkg)
885 elif "--verbose" in myopts:
888 elif action == "prune":
890 for atom in args_set:
891 for pkg in vardb.match_pkgs(atom):
893 pkgs_to_remove.append(pkg)
894 elif "--verbose" in myopts:
897 if not pkgs_to_remove:
899 ">>> No packages selected for removal by %s\n" % action)
900 if "--verbose" not in myopts:
902 ">>> To see reverse dependencies, use %s\n" % \
904 if action == "prune":
906 ">>> To ignore dependencies, use %s\n" % \
909 return pkgs_to_remove
911 cleanlist = create_cleanlist()
912 clean_set = set(cleanlist)
915 real_vardb._linkmap is not None and \
916 myopts.get('--depclean-lib-check') != 'n':
918 # Check if any of these packages are the sole providers of libraries
919 # with consumers that have not been selected for removal. If so, these
920 # packages and any dependencies need to be added to the graph.
921 linkmap = real_vardb._linkmap
926 writemsg_level(">>> Checking for lib consumers...\n")
928 for pkg in cleanlist:
929 pkg_dblink = real_vardb._dblink(pkg.cpv)
932 for lib in pkg_dblink.getcontents():
933 lib = lib[len(myroot):]
934 lib_key = linkmap._obj_key(lib)
935 lib_consumers = consumer_cache.get(lib_key)
936 if lib_consumers is None:
938 lib_consumers = linkmap.findConsumers(lib_key)
941 consumer_cache[lib_key] = lib_consumers
943 consumers[lib_key] = lib_consumers
948 for lib, lib_consumers in list(consumers.items()):
949 for consumer_file in list(lib_consumers):
950 if pkg_dblink.isowner(consumer_file):
951 lib_consumers.remove(consumer_file)
952 if not lib_consumers:
958 for lib, lib_consumers in consumers.items():
960 soname = linkmap.getSoname(lib)
962 consumer_providers = []
963 for lib_consumer in lib_consumers:
964 providers = provider_cache.get(lib)
965 if providers is None:
966 providers = linkmap.findProviders(lib_consumer)
967 provider_cache[lib_consumer] = providers
968 if soname not in providers:
969 # Why does this happen?
971 consumer_providers.append(
972 (lib_consumer, providers[soname]))
974 consumers[lib] = consumer_providers
976 consumer_map[pkg] = consumers
981 for consumers in consumer_map.values():
982 for lib, consumer_providers in consumers.items():
983 for lib_consumer, providers in consumer_providers:
984 search_files.add(lib_consumer)
985 search_files.update(providers)
987 writemsg_level(">>> Assigning files to packages...\n")
988 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
990 for pkg, consumers in list(consumer_map.items()):
991 for lib, consumer_providers in list(consumers.items()):
992 lib_consumers = set()
994 for lib_consumer, providers in consumer_providers:
995 owner_set = file_owners.get(lib_consumer)
996 provider_dblinks = set()
997 provider_pkgs = set()
999 if len(providers) > 1:
1000 for provider in providers:
1001 provider_set = file_owners.get(provider)
1002 if provider_set is not None:
1003 provider_dblinks.update(provider_set)
1005 if len(provider_dblinks) > 1:
1006 for provider_dblink in provider_dblinks:
1007 provider_pkg = resolver._pkg(
1008 provider_dblink.mycpv, "installed",
1009 root_config, installed=True)
1010 if provider_pkg not in clean_set:
1011 provider_pkgs.add(provider_pkg)
1016 if owner_set is not None:
1017 lib_consumers.update(owner_set)
1019 for consumer_dblink in list(lib_consumers):
1020 if resolver._pkg(consumer_dblink.mycpv, "installed",
1021 root_config, installed=True) in clean_set:
1022 lib_consumers.remove(consumer_dblink)
1026 consumers[lib] = lib_consumers
1030 del consumer_map[pkg]
1033 # TODO: Implement a package set for rebuilding consumer packages.
1035 msg = "In order to avoid breakage of link level " + \
1036 "dependencies, one or more packages will not be removed. " + \
1037 "This can be solved by rebuilding " + \
1038 "the packages that pulled them in."
1041 from textwrap import wrap
1042 writemsg_level("".join(prefix + "%s\n" % line for \
1043 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
1046 for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
1047 consumers = consumer_map[pkg]
1049 for lib, lib_consumers in consumers.items():
1050 for consumer in lib_consumers:
1051 consumer_libs.setdefault(
1052 consumer.mycpv, set()).add(linkmap.getSoname(lib))
1053 unique_consumers = set(chain(*consumers.values()))
1054 unique_consumers = sorted(consumer.mycpv \
1055 for consumer in unique_consumers)
1057 msg.append(" %s pulled in by:" % (pkg.cpv,))
1058 for consumer in unique_consumers:
1059 libs = consumer_libs[consumer]
1060 msg.append(" %s needs %s" % \
1061 (consumer, ', '.join(sorted(libs))))
1063 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
1064 level=logging.WARNING, noiselevel=-1)
1066 # Add lib providers to the graph as children of lib consumers,
1067 # and also add any dependencies pulled in by the provider.
1068 writemsg_level(">>> Adding lib providers to graph...\n")
1070 for pkg, consumers in consumer_map.items():
1071 for consumer_dblink in set(chain(*consumers.values())):
1072 consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
1073 "installed", root_config, installed=True)
1074 if not resolver._add_pkg(pkg,
1075 Dependency(parent=consumer_pkg,
1076 priority=UnmergeDepPriority(runtime=True),
1078 resolver.display_problems()
1079 return 1, [], False, 0
1081 writemsg_level("\nCalculating dependencies ")
1082 success = resolver._complete_graph(
1083 required_sets={myroot:required_sets})
1084 writemsg_level("\b\b... done!\n")
1085 resolver.display_problems()
1087 return 1, [], False, 0
1088 if unresolved_deps():
1089 return 1, [], False, 0
1091 graph = resolver._dynamic_config.digraph.copy()
1092 required_pkgs_total = 0
1094 if isinstance(node, Package):
1095 required_pkgs_total += 1
1096 cleanlist = create_cleanlist()
1098 return 0, [], False, required_pkgs_total
1099 clean_set = set(cleanlist)
1102 writemsg_level(">>> Calculating removal order...\n")
1103 # Use a topological sort to create an unmerge order such that
1104 # each package is unmerged before it's dependencies. This is
1105 # necessary to avoid breaking things that may need to run
1106 # during pkg_prerm or pkg_postrm phases.
1108 # Create a new graph to account for dependencies between the
1109 # packages being unmerged.
1113 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1114 runtime = UnmergeDepPriority(runtime=True)
1115 runtime_post = UnmergeDepPriority(runtime_post=True)
1116 buildtime = UnmergeDepPriority(buildtime=True)
1119 "PDEPEND": runtime_post,
1120 "DEPEND": buildtime,
1123 for node in clean_set:
1124 graph.add(node, None)
1126 for dep_type in dep_keys:
1127 depstr = node.metadata[dep_type]
1130 priority = priority_map[dep_type]
1133 writemsg_level(_unicode_decode("\nParent: %s\n") \
1134 % (node,), noiselevel=-1, level=logging.DEBUG)
1135 writemsg_level(_unicode_decode( "Depstring: %s\n") \
1136 % (depstr,), noiselevel=-1, level=logging.DEBUG)
1137 writemsg_level(_unicode_decode( "Priority: %s\n") \
1138 % (priority,), noiselevel=-1, level=logging.DEBUG)
1141 atoms = resolver._select_atoms(myroot, depstr,
1142 myuse=node.use.enabled, parent=node,
1143 priority=priority)[node]
1144 except portage.exception.InvalidDependString:
1145 # Ignore invalid deps of packages that will
1146 # be uninstalled anyway.
1150 writemsg_level("Candidates: [%s]\n" % \
1151 ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
1152 noiselevel=-1, level=logging.DEBUG)
1155 if not isinstance(atom, portage.dep.Atom):
1156 # Ignore invalid atoms returned from dep_check().
1160 matches = vardb.match_pkgs(atom)
1163 for child_node in matches:
1164 if child_node in clean_set:
1165 graph.add(child_node, node, priority=priority)
1168 writemsg_level("\nunmerge digraph:\n\n",
1169 noiselevel=-1, level=logging.DEBUG)
1171 writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
1174 if len(graph.order) == len(graph.root_nodes()):
1175 # If there are no dependencies between packages
1176 # let unmerge() group them by cat/pn.
1178 cleanlist = [pkg.cpv for pkg in graph.order]
1180 # Order nodes from lowest to highest overall reference count for
1181 # optimal root node selection (this can help minimize issues
1182 # with unaccounted implicit dependencies).
1184 for node in graph.order:
1185 node_refcounts[node] = len(graph.parent_nodes(node))
1186 def cmp_reference_count(node1, node2):
1187 return node_refcounts[node1] - node_refcounts[node2]
1188 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
1190 ignore_priority_range = [None]
1191 ignore_priority_range.extend(
1192 range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
1193 while not graph.empty():
1194 for ignore_priority in ignore_priority_range:
1195 nodes = graph.root_nodes(ignore_priority=ignore_priority)
1199 raise AssertionError("no root nodes")
1200 if ignore_priority is not None:
1201 # Some deps have been dropped due to circular dependencies,
1202 # so only pop one node in order to minimize the number that
1207 cleanlist.append(node.cpv)
1209 return 0, cleanlist, ordered, required_pkgs_total
1210 return 0, [], False, required_pkgs_total
1212 def action_deselect(settings, trees, opts, atoms):
1213 enter_invalid = '--ask-enter-invalid' in opts
1214 root_config = trees[settings['ROOT']]['root_config']
1215 world_set = root_config.sets['selected']
1216 if not hasattr(world_set, 'update'):
1217 writemsg_level("World @selected set does not appear to be mutable.\n",
1218 level=logging.ERROR, noiselevel=-1)
1221 vardb = root_config.trees['vartree'].dbapi
1222 expanded_atoms = set(atoms)
1223 from portage.dep import Atom
1225 if not atom.startswith(SETPREFIX):
1226 for cpv in vardb.match(atom):
1227 slot, = vardb.aux_get(cpv, ['SLOT'])
1230 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
1232 pretend = '--pretend' in opts
1234 if not pretend and hasattr(world_set, 'lock'):
1238 discard_atoms = set()
1240 for atom in world_set:
1241 for arg_atom in expanded_atoms:
1242 if arg_atom.startswith(SETPREFIX):
1243 if atom.startswith(SETPREFIX) and \
1245 discard_atoms.add(atom)
1248 if not atom.startswith(SETPREFIX) and \
1249 arg_atom.intersects(atom) and \
1250 not (arg_atom.slot and not atom.slot) and \
1251 not (arg_atom.repo and not atom.repo):
1252 discard_atoms.add(atom)
1255 for atom in sorted(discard_atoms):
1257 print(">>> Would remove %s from \"world\" favorites file..." % \
1258 colorize("INFORM", str(atom)))
1260 print(">>> Removing %s from \"world\" favorites file..." % \
1261 colorize("INFORM", str(atom)))
1264 prompt = "Would you like to remove these " + \
1265 "packages from your world favorites?"
1266 if userquery(prompt, enter_invalid) == 'No':
1269 remaining = set(world_set)
1270 remaining.difference_update(discard_atoms)
1272 world_set.replace(remaining)
1274 print(">>> No matching atoms found in \"world\" favorites file...")
1280 class _info_pkgs_ver(object):
1281 def __init__(self, ver, repo_suffix, provide_suffix):
1283 self.repo_suffix = repo_suffix
1284 self.provide_suffix = provide_suffix
1286 def __lt__(self, other):
1287 return portage.versions.vercmp(self.ver, other.ver) < 0
1291 This may return unicode if repo_name contains unicode.
1292 Don't use __str__ and str() since unicode triggers compatibility
1293 issues between python 2.x and 3.x.
1295 return self.ver + self.repo_suffix + self.provide_suffix
1297 def action_info(settings, trees, myopts, myfiles):
1298 print(getportageversion(settings["PORTDIR"], settings["ROOT"],
1299 settings.profile_path, settings["CHOST"],
1300 trees[settings["ROOT"]]["vartree"].dbapi))
1302 header_title = "System Settings"
1304 print(header_width * "=")
1305 print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
1306 print(header_width * "=")
1307 print("System uname: "+platform.platform(aliased=1))
1309 lastSync = portage.grabfile(os.path.join(
1310 settings["PORTDIR"], "metadata", "timestamp.chk"))
1311 print("Timestamp of tree:", end=' ')
1317 output=subprocess_getstatusoutput("distcc --version")
1319 print(str(output[1].split("\n",1)[0]), end=' ')
1320 if "distcc" in settings.features:
1325 output=subprocess_getstatusoutput("ccache -V")
1327 print(str(output[1].split("\n",1)[0]), end=' ')
1328 if "ccache" in settings.features:
1333 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
1334 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
1335 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
1336 myvars = portage.util.unique_array(myvars)
1339 portdb = trees["/"]["porttree"].dbapi
1340 vardb = trees["/"]["vartree"].dbapi
1341 main_repo = portdb.getRepositoryName(portdb.porttree_root)
1344 if portage.isvalidatom(x):
1345 pkg_matches = vardb.match(x)
1348 for cpv in pkg_matches:
1349 ver = portage.versions.cpv_getversion(cpv)
1350 repo = vardb.aux_get(cpv, ["repository"])[0]
1351 if repo == main_repo:
1354 repo_suffix = "::<unknown repository>"
1356 repo_suffix = "::" + repo
1358 matched_cp = portage.versions.cpv_getkey(cpv)
1362 provide_suffix = " (%s)" % matched_cp
1365 _info_pkgs_ver(ver, repo_suffix, provide_suffix))
1370 versions = ", ".join(ver.toString() for ver in versions)
1371 writemsg_stdout("%-20s %s\n" % (x+":", versions),
1374 writemsg_stdout("%-20s %s\n" % (x+":", "[NOT VALID]"),
1377 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
1379 repos = portdb.settings.repositories
1380 if "--verbose" in myopts:
1381 writemsg_stdout("Repositories:\n\n")
1383 writemsg_stdout(repo.info_string())
1385 writemsg_stdout("Repositories: %s\n" % \
1386 " ".join(repo.name for repo in repos))
1388 if "--verbose" in myopts:
1389 myvars = list(settings)
1391 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
1392 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
1393 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
1394 'PORTAGE_BZIP2_COMMAND',
1395 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
1396 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
1397 'EMERGE_DEFAULT_OPTS']
1399 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
1401 myvars_ignore_defaults = {
1402 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
1405 myvars = portage.util.unique_array(myvars)
1406 use_expand = settings.get('USE_EXPAND', '').split()
1408 use_expand_hidden = set(
1409 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
1410 alphabetical_use = '--alphabetical' in myopts
1411 root_config = trees[settings["ROOT"]]['root_config']
1417 default = myvars_ignore_defaults.get(x)
1418 if default is not None and \
1419 default == settings[x]:
1421 writemsg_stdout('%s="%s"\n' % (x, settings[x]), noiselevel=-1)
1423 use = set(settings["USE"].split())
1424 for varname in use_expand:
1425 flag_prefix = varname.lower() + "_"
1427 if f.startswith(flag_prefix):
1431 print('USE="%s"' % " ".join(use), end=' ')
1432 for varname in use_expand:
1433 myval = settings.get(varname)
1435 print('%s="%s"' % (varname, myval), end=' ')
1438 unset_vars.append(x)
1440 print("Unset: "+", ".join(unset_vars))
1443 if "--debug" in myopts:
1444 for x in dir(portage):
1445 module = getattr(portage, x)
1446 if "cvs_id_string" in dir(module):
1447 print("%s: %s" % (str(x), str(module.cvs_id_string)))
1449 # See if we can find any packages installed matching the strings
1450 # passed on the command line
1452 vardb = trees[settings["ROOT"]]["vartree"].dbapi
1453 portdb = trees[settings["ROOT"]]["porttree"].dbapi
1454 bindb = trees[settings["ROOT"]]["bintree"].dbapi
1457 installed_match = vardb.match(x)
1458 for installed in installed_match:
1459 mypkgs.append((installed, "installed"))
1465 for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
1466 if pkg_type == "binary" and "--usepkg" not in myopts:
1469 matches = db.match(x)
1471 for match in matches:
1472 if pkg_type == "binary":
1473 if db.bintree.isremote(match):
1475 auxkeys = ["EAPI", "DEFINED_PHASES"]
1476 metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
1477 if metadata["EAPI"] not in ("0", "1", "2", "3") and \
1478 "info" in metadata["DEFINED_PHASES"].split():
1479 mypkgs.append((match, pkg_type))
1482 # If some packages were found...
1484 # Get our global settings (we only print stuff if it varies from
1485 # the current config)
1486 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
1487 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
1488 auxkeys.append('DEFINED_PHASES')
1490 pkgsettings = portage.config(clone=settings)
1492 # Loop through each package
1493 # Only print settings if they differ from global settings
1494 header_title = "Package Settings"
1495 print(header_width * "=")
1496 print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
1497 print(header_width * "=")
1498 from portage.output import EOutput
1500 for mypkg in mypkgs:
1503 # Get all package specific variables
1504 if pkg_type == "installed":
1505 metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
1506 elif pkg_type == "ebuild":
1507 metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
1508 elif pkg_type == "binary":
1509 metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
1511 pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
1512 installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
1513 (metadata.get(x, '') for x in Package.metadata_keys)),
1514 root_config=root_config, type_name=pkg_type)
1516 if pkg_type == "installed":
1517 print("\n%s was built with the following:" % \
1518 colorize("INFORM", str(pkg.cpv)))
1519 elif pkg_type == "ebuild":
1520 print("\n%s would be build with the following:" % \
1521 colorize("INFORM", str(pkg.cpv)))
1522 elif pkg_type == "binary":
1523 print("\n%s (non-installed binary) was built with the following:" % \
1524 colorize("INFORM", str(pkg.cpv)))
1526 writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
1528 if pkg_type == "installed":
1529 for myvar in mydesiredvars:
1530 if metadata[myvar].split() != settings.get(myvar, '').split():
1531 print("%s=\"%s\"" % (myvar, metadata[myvar]))
1534 if metadata['DEFINED_PHASES']:
1535 if 'info' not in metadata['DEFINED_PHASES'].split():
1538 print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
1540 if pkg_type == "installed":
1541 ebuildpath = vardb.findname(pkg.cpv)
1542 elif pkg_type == "ebuild":
1543 ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
1544 elif pkg_type == "binary":
1545 tbz2_file = bindb.bintree.getname(pkg.cpv)
1546 ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
1547 ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
1548 tmpdir = tempfile.mkdtemp()
1549 ebuildpath = os.path.join(tmpdir, ebuild_file_name)
1550 file = open(ebuildpath, 'w')
1551 file.write(ebuild_file_contents)
1554 if not ebuildpath or not os.path.exists(ebuildpath):
1555 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
1558 if pkg_type == "installed":
1559 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1560 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1561 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
1563 elif pkg_type == "ebuild":
1564 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1565 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1566 mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
1568 elif pkg_type == "binary":
1569 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1570 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1571 mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
1573 shutil.rmtree(tmpdir)
1575 def action_metadata(settings, portdb, myopts, porttrees=None):
1576 if porttrees is None:
1577 porttrees = portdb.porttrees
1578 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
1579 old_umask = os.umask(0o002)
1580 cachedir = os.path.normpath(settings.depcachedir)
1581 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
1582 "/lib", "/opt", "/proc", "/root", "/sbin",
1583 "/sys", "/tmp", "/usr", "/var"]:
1584 print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
1585 "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
1586 print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
1588 if not os.path.exists(cachedir):
1589 os.makedirs(cachedir)
1591 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
1592 auxdbkeys = tuple(auxdbkeys)
1594 class TreeData(object):
1595 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
1596 def __init__(self, dest_db, eclass_db, path, src_db):
1597 self.dest_db = dest_db
1598 self.eclass_db = eclass_db
1600 self.src_db = src_db
1601 self.valid_nodes = set()
1604 for path in porttrees:
1605 src_db = portdb._pregen_auxdb.get(path)
1606 if src_db is None and \
1607 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
1608 src_db = portdb.metadbmodule(
1609 path, 'metadata/cache', auxdbkeys, readonly=True)
1611 src_db.ec = portdb._repo_info[path].eclass_db
1612 except AttributeError:
1615 if src_db is not None:
1616 porttrees_data.append(TreeData(portdb.auxdb[path],
1617 portdb._repo_info[path].eclass_db, path, src_db))
1619 porttrees = [tree_data.path for tree_data in porttrees_data]
1621 quiet = settings.get('TERM') == 'dumb' or \
1622 '--quiet' in myopts or \
1623 not sys.stdout.isatty()
1627 progressBar = portage.output.TermProgressBar()
1628 progressHandler = ProgressHandler()
1629 onProgress = progressHandler.onProgress
1631 progressBar.set(progressHandler.curval, progressHandler.maxval)
1632 progressHandler.display = display
1633 def sigwinch_handler(signum, frame):
1634 lines, progressBar.term_columns = \
1635 portage.output.get_term_size()
1636 signal.signal(signal.SIGWINCH, sigwinch_handler)
1638 # Temporarily override portdb.porttrees so portdb.cp_all()
1639 # will only return the relevant subset.
1640 portdb_porttrees = portdb.porttrees
1641 portdb.porttrees = porttrees
1643 cp_all = portdb.cp_all()
1645 portdb.porttrees = portdb_porttrees
1648 maxval = len(cp_all)
1649 if onProgress is not None:
1650 onProgress(maxval, curval)
1652 from portage.cache.util import quiet_mirroring
1653 from portage import eapi_is_supported, \
1654 _validate_cache_for_unsupported_eapis
1656 # TODO: Display error messages, but do not interfere with the progress bar.
1658 # 1) erase the progress bar
1659 # 2) show the error message
1660 # 3) redraw the progress bar on a new line
1661 noise = quiet_mirroring()
1664 for tree_data in porttrees_data:
1665 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
1666 tree_data.valid_nodes.add(cpv)
1668 src = tree_data.src_db[cpv]
1669 except KeyError as e:
1670 noise.missing_entry(cpv)
1673 except CacheError as ce:
1674 noise.exception(cpv, ce)
1678 eapi = src.get('EAPI')
1681 eapi = eapi.lstrip('-')
1682 eapi_supported = eapi_is_supported(eapi)
1683 if not eapi_supported:
1684 if not _validate_cache_for_unsupported_eapis:
1685 noise.misc(cpv, "unable to validate " + \
1686 "cache for EAPI='%s'" % eapi)
1691 dest = tree_data.dest_db[cpv]
1692 except (KeyError, CacheError):
1695 for d in (src, dest):
1696 if d is not None and d.get('EAPI') in ('', '0'):
1699 if dest is not None:
1700 if not (dest['_mtime_'] == src['_mtime_'] and \
1701 tree_data.eclass_db.is_eclass_data_valid(
1702 dest['_eclasses_']) and \
1703 set(dest['_eclasses_']) == set(src['_eclasses_'])):
1706 # We don't want to skip the write unless we're really
1707 # sure that the existing cache is identical, so don't
1708 # trust _mtime_ and _eclasses_ alone.
1709 for k in set(chain(src, dest)).difference(
1710 ('_mtime_', '_eclasses_')):
1711 if dest.get(k, '') != src.get(k, ''):
1715 if dest is not None:
1716 # The existing data is valid and identical,
1717 # so there's no need to overwrite it.
1721 inherited = src.get('INHERITED', '')
1722 eclasses = src.get('_eclasses_')
1723 except CacheError as ce:
1724 noise.exception(cpv, ce)
1728 if eclasses is not None:
1729 if not tree_data.eclass_db.is_eclass_data_valid(
1731 noise.eclass_stale(cpv)
1733 inherited = eclasses
1735 inherited = inherited.split()
1737 if tree_data.src_db.complete_eclass_entries and \
1739 noise.corruption(cpv, "missing _eclasses_ field")
1743 # Even if _eclasses_ already exists, replace it with data from
1744 # eclass_cache, in order to insert local eclass paths.
1746 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
1748 # INHERITED contains a non-existent eclass.
1749 noise.eclass_stale(cpv)
1752 if eclasses is None:
1753 noise.eclass_stale(cpv)
1755 src['_eclasses_'] = eclasses
1757 src['_eclasses_'] = {}
1759 if not eapi_supported:
1761 'EAPI' : '-' + eapi,
1762 '_mtime_' : src['_mtime_'],
1763 '_eclasses_' : src['_eclasses_'],
1767 tree_data.dest_db[cpv] = src
1768 except CacheError as ce:
1769 noise.exception(cpv, ce)
1773 if onProgress is not None:
1774 onProgress(maxval, curval)
1776 if onProgress is not None:
1777 onProgress(maxval, curval)
1779 for tree_data in porttrees_data:
1781 dead_nodes = set(tree_data.dest_db)
1782 except CacheError as e:
1783 writemsg_level("Error listing cache entries for " + \
1784 "'%s': %s, continuing...\n" % (tree_data.path, e),
1785 level=logging.ERROR, noiselevel=-1)
1788 dead_nodes.difference_update(tree_data.valid_nodes)
1789 for cpv in dead_nodes:
1791 del tree_data.dest_db[cpv]
1792 except (KeyError, CacheError):
1796 # make sure the final progress is displayed
1797 progressHandler.display()
1799 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
1804 def action_regen(settings, portdb, max_jobs, max_load):
1805 xterm_titles = "notitles" not in settings.features
1806 emergelog(xterm_titles, " === regen")
1807 #regenerate cache entries
1809 os.close(sys.stdin.fileno())
1810 except SystemExit as e:
1811 raise # Needed else can't exit
1816 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
1817 received_signal = []
1819 def emergeexitsig(signum, frame):
1820 signal.signal(signal.SIGINT, signal.SIG_IGN)
1821 signal.signal(signal.SIGTERM, signal.SIG_IGN)
1822 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
1825 received_signal.append(128 + signum)
1827 earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
1828 earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
1833 # Restore previous handlers
1834 if earlier_sigint_handler is not None:
1835 signal.signal(signal.SIGINT, earlier_sigint_handler)
1837 signal.signal(signal.SIGINT, signal.SIG_DFL)
1838 if earlier_sigterm_handler is not None:
1839 signal.signal(signal.SIGTERM, earlier_sigterm_handler)
1841 signal.signal(signal.SIGTERM, signal.SIG_DFL)
1844 sys.exit(received_signal[0])
1846 portage.writemsg_stdout("done!\n")
1847 return regen.returncode
1849 def action_search(root_config, myopts, myfiles, spinner):
1851 print("emerge: no search terms provided.")
1853 searchinstance = search(root_config,
1854 spinner, "--searchdesc" in myopts,
1855 "--quiet" not in myopts, "--usepkg" in myopts,
1856 "--usepkgonly" in myopts)
1857 for mysearch in myfiles:
1859 searchinstance.execute(mysearch)
1860 except re.error as comment:
1861 print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
1863 searchinstance.output()
1865 def action_sync(settings, trees, mtimedb, myopts, myaction):
1866 enter_invalid = '--ask-enter-invalid' in myopts
1867 xterm_titles = "notitles" not in settings.features
1868 emergelog(xterm_titles, " === sync")
1869 portdb = trees[settings["ROOT"]]["porttree"].dbapi
1870 myportdir = portdb.porttree_root
1872 myportdir = settings.get('PORTDIR', '')
1873 if myportdir and myportdir.strip():
1874 myportdir = os.path.realpath(myportdir)
1877 out = portage.output.EOutput()
1878 global_config_path = GLOBAL_CONFIG_PATH
1879 if settings['EPREFIX']:
1880 global_config_path = os.path.join(settings['EPREFIX'],
1881 GLOBAL_CONFIG_PATH.lstrip(os.sep))
1883 sys.stderr.write("!!! PORTDIR is undefined. " + \
1884 "Is %s/make.globals missing?\n" % global_config_path)
1886 if myportdir[-1]=="/":
1887 myportdir=myportdir[:-1]
1889 st = os.stat(myportdir)
1893 print(">>>",myportdir,"not found, creating it.")
1894 portage.util.ensure_dirs(myportdir, mode=0o755)
1895 st = os.stat(myportdir)
1899 spawn_kwargs["env"] = settings.environ()
1900 if 'usersync' in settings.features and \
1901 portage.data.secpass >= 2 and \
1902 (st.st_uid != os.getuid() and st.st_mode & 0o700 or \
1903 st.st_gid != os.getgid() and st.st_mode & 0o070):
1905 homedir = pwd.getpwuid(st.st_uid).pw_dir
1909 # Drop privileges when syncing, in order to match
1910 # existing uid/gid settings.
1911 usersync_uid = st.st_uid
1912 spawn_kwargs["uid"] = st.st_uid
1913 spawn_kwargs["gid"] = st.st_gid
1914 spawn_kwargs["groups"] = [st.st_gid]
1915 spawn_kwargs["env"]["HOME"] = homedir
1917 if not st.st_mode & 0o020:
1918 umask = umask | 0o020
1919 spawn_kwargs["umask"] = umask
1921 if usersync_uid is not None:
1922 # PORTAGE_TMPDIR is used below, so validate it and
1923 # bail out if necessary.
1924 rval = _check_temp_dir(settings)
1925 if rval != os.EX_OK:
1928 syncuri = settings.get("SYNC", "").strip()
1930 writemsg_level("!!! SYNC is undefined. " + \
1931 "Is %s/make.globals missing?\n" % global_config_path,
1932 noiselevel=-1, level=logging.ERROR)
1935 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
1936 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
1940 updatecache_flg = False
1941 if myaction == "metadata":
1942 print("skipping sync")
1943 updatecache_flg = True
1944 elif ".git" in vcs_dirs:
1945 # Update existing git repository, and ignore the syncuri. We are
1946 # going to trust the user and assume that the user is in the branch
1947 # that he/she wants updated. We'll let the user manage branches with
1949 if portage.process.find_binary("git") is None:
1950 msg = ["Command not found: git",
1951 "Type \"emerge dev-util/git\" to enable git support."]
1953 writemsg_level("!!! %s\n" % l,
1954 level=logging.ERROR, noiselevel=-1)
1956 msg = ">>> Starting git pull in %s..." % myportdir
1957 emergelog(xterm_titles, msg )
1958 writemsg_level(msg + "\n")
1959 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
1960 (portage._shell_quote(myportdir),), **spawn_kwargs)
1961 if exitcode != os.EX_OK:
1962 msg = "!!! git pull error in %s." % myportdir
1963 emergelog(xterm_titles, msg)
1964 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
1966 msg = ">>> Git pull in %s successful" % myportdir
1967 emergelog(xterm_titles, msg)
1968 writemsg_level(msg + "\n")
1969 exitcode = git_sync_timestamps(settings, myportdir)
1970 if exitcode == os.EX_OK:
1971 updatecache_flg = True
1972 elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
1973 for vcs_dir in vcs_dirs:
1974 writemsg_level(("!!! %s appears to be under revision " + \
1975 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
1976 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
1978 if not os.path.exists("/usr/bin/rsync"):
1979 print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
1980 print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
1985 if settings["PORTAGE_RSYNC_OPTS"] == "":
1986 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
1988 "--recursive", # Recurse directories
1989 "--links", # Consider symlinks
1990 "--safe-links", # Ignore links outside of tree
1991 "--perms", # Preserve permissions
1992 "--times", # Preserive mod times
1993 "--compress", # Compress the data transmitted
1994 "--force", # Force deletion on non-empty dirs
1995 "--whole-file", # Don't do block transfers, only entire files
1996 "--delete", # Delete files that aren't in the master tree
1997 "--stats", # Show final statistics about what was transfered
1998 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
1999 "--exclude=/distfiles", # Exclude distfiles from consideration
2000 "--exclude=/local", # Exclude local from consideration
2001 "--exclude=/packages", # Exclude packages from consideration
2005 # The below validation is not needed when using the above hardcoded
2008 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
2009 rsync_opts.extend(portage.util.shlex_split(
2010 settings.get("PORTAGE_RSYNC_OPTS", "")))
2011 for opt in ("--recursive", "--times"):
2012 if opt not in rsync_opts:
2013 portage.writemsg(yellow("WARNING:") + " adding required option " + \
2014 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
2015 rsync_opts.append(opt)
2017 for exclude in ("distfiles", "local", "packages"):
2018 opt = "--exclude=/%s" % exclude
2019 if opt not in rsync_opts:
2020 portage.writemsg(yellow("WARNING:") + \
2021 " adding required option %s not included in " % opt + \
2022 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
2023 rsync_opts.append(opt)
2025 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
2026 def rsync_opt_startswith(opt_prefix):
2027 for x in rsync_opts:
2028 if x.startswith(opt_prefix):
2032 if not rsync_opt_startswith("--timeout="):
2033 rsync_opts.append("--timeout=%d" % mytimeout)
2035 for opt in ("--compress", "--whole-file"):
2036 if opt not in rsync_opts:
2037 portage.writemsg(yellow("WARNING:") + " adding required option " + \
2038 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
2039 rsync_opts.append(opt)
2041 if "--quiet" in myopts:
2042 rsync_opts.append("--quiet") # Shut up a lot
2044 rsync_opts.append("--verbose") # Print filelist
2046 if "--verbose" in myopts:
2047 rsync_opts.append("--progress") # Progress meter for each file
2049 if "--debug" in myopts:
2050 rsync_opts.append("--checksum") # Force checksum on all files
2052 # Real local timestamp file.
2053 servertimestampfile = os.path.join(
2054 myportdir, "metadata", "timestamp.chk")
2056 content = portage.util.grabfile(servertimestampfile)
2060 mytimestamp = time.mktime(time.strptime(content[0],
2061 "%a, %d %b %Y %H:%M:%S +0000"))
2062 except (OverflowError, ValueError):
2067 rsync_initial_timeout = \
2068 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
2070 rsync_initial_timeout = 15
2073 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
2074 except SystemExit as e:
2075 raise # Needed else can't exit
2077 maxretries = -1 #default number of retries
2080 proto, user_name, hostname, port = re.split(
2081 "(rsync|ssh)://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=4)[1:5]
2084 if user_name is None:
2086 updatecache_flg=True
2087 all_rsync_opts = set(rsync_opts)
2088 extra_rsync_opts = portage.util.shlex_split(
2089 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
2090 all_rsync_opts.update(extra_rsync_opts)
2092 family = socket.AF_UNSPEC
2093 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
2094 family = socket.AF_INET
2095 elif socket.has_ipv6 and \
2096 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
2097 family = socket.AF_INET6
2103 addrinfos = getaddrinfo_validate(
2104 socket.getaddrinfo(hostname, None,
2105 family, socket.SOCK_STREAM))
2106 except socket.error as e:
2108 "!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
2109 noiselevel=-1, level=logging.ERROR)
2113 AF_INET = socket.AF_INET
2116 AF_INET6 = socket.AF_INET6
2121 for addrinfo in addrinfos:
2122 if addrinfo[0] == AF_INET:
2123 ips_v4.append("%s" % addrinfo[4][0])
2124 elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
2125 # IPv6 addresses need to be enclosed in square brackets
2126 ips_v6.append("[%s]" % addrinfo[4][0])
2128 random.shuffle(ips_v4)
2129 random.shuffle(ips_v6)
2131 # Give priority to the address family that
2132 # getaddrinfo() returned first.
2133 if AF_INET6 is not None and addrinfos and \
2134 addrinfos[0][0] == AF_INET6:
2135 ips = ips_v6 + ips_v4
2137 ips = ips_v4 + ips_v6
2140 uris.append(syncuri.replace(
2141 "//" + user_name + hostname + port + "/",
2142 "//" + user_name + ip + port + "/", 1))
2145 # With some configurations we need to use the plain hostname
2146 # rather than try to resolve the ip addresses (bug #340817).
2147 uris.append(syncuri)
2149 # reverse, for use with pop()
2152 effective_maxretries = maxretries
2153 if effective_maxretries < 0:
2154 effective_maxretries = len(uris) - 1
2156 SERVER_OUT_OF_DATE = -1
2157 EXCEEDED_MAX_RETRIES = -2
2160 dosyncuri = uris.pop()
2162 writemsg("!!! Exhausted addresses for %s\n" % \
2163 hostname, noiselevel=-1)
2167 if "--ask" in myopts:
2168 if userquery("Do you want to sync your Portage tree " + \
2169 "with the mirror at\n" + blue(dosyncuri) + bold("?"),
2170 enter_invalid) == "No":
2175 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
2176 if "--quiet" not in myopts:
2177 print(">>> Starting rsync with "+dosyncuri+"...")
2179 emergelog(xterm_titles,
2180 ">>> Starting retry %d of %d with %s" % \
2181 (retries, effective_maxretries, dosyncuri))
2183 "\n\n>>> Starting retry %d of %d with %s\n" % \
2184 (retries, effective_maxretries, dosyncuri), noiselevel=-1)
2186 if dosyncuri.startswith('ssh://'):
2187 dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
2189 if mytimestamp != 0 and "--quiet" not in myopts:
2190 print(">>> Checking server timestamp ...")
2192 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
2194 if "--debug" in myopts:
2199 # Even if there's no timestamp available locally, fetch the
2200 # timestamp anyway as an initial probe to verify that the server is
2201 # responsive. This protects us from hanging indefinitely on a
2202 # connection attempt to an unresponsive server which rsync's
2203 # --timeout option does not prevent.
2205 # Temporary file for remote server timestamp comparison.
2206 # NOTE: If FEATURES=usersync is enabled then the tempfile
2207 # needs to be in a directory that's readable by the usersync
2208 # user. We assume that PORTAGE_TMPDIR will satisfy this
2209 # requirement, since that's not necessarily true for the
2210 # default directory used by the tempfile module.
2211 if usersync_uid is not None:
2212 tmpdir = settings['PORTAGE_TMPDIR']
2214 # use default dir from tempfile module
2216 fd, tmpservertimestampfile = \
2217 tempfile.mkstemp(dir=tmpdir)
2219 if usersync_uid is not None:
2220 portage.util.apply_permissions(tmpservertimestampfile,
2222 mycommand = rsynccommand[:]
2223 mycommand.append(dosyncuri.rstrip("/") + \
2224 "/metadata/timestamp.chk")
2225 mycommand.append(tmpservertimestampfile)
2229 # Timeout here in case the server is unresponsive. The
2230 # --timeout rsync option doesn't apply to the initial
2231 # connection attempt.
2233 if rsync_initial_timeout:
2234 portage.exception.AlarmSignal.register(
2235 rsync_initial_timeout)
2237 mypids.extend(portage.process.spawn(
2238 mycommand, returnpid=True, **spawn_kwargs))
2239 exitcode = os.waitpid(mypids[0], 0)[1]
2240 if usersync_uid is not None:
2241 portage.util.apply_permissions(tmpservertimestampfile,
2243 content = portage.grabfile(tmpservertimestampfile)
2245 if rsync_initial_timeout:
2246 portage.exception.AlarmSignal.unregister()
2248 os.unlink(tmpservertimestampfile)
2251 except portage.exception.AlarmSignal:
2254 # With waitpid and WNOHANG, only check the
2255 # first element of the tuple since the second
2256 # element may vary (bug #337465).
2257 if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
2258 os.kill(mypids[0], signal.SIGTERM)
2259 os.waitpid(mypids[0], 0)
2260 # This is the same code rsync uses for timeout.
2263 if exitcode != os.EX_OK:
2265 exitcode = (exitcode & 0xff) << 8
2267 exitcode = exitcode >> 8
2269 portage.process.spawned_pids.remove(mypids[0])
2272 servertimestamp = time.mktime(time.strptime(
2273 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
2274 except (OverflowError, ValueError):
2276 del mycommand, mypids, content
2277 if exitcode == os.EX_OK:
2278 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
2279 emergelog(xterm_titles,
2280 ">>> Cancelling sync -- Already current.")
2283 print(">>> Timestamps on the server and in the local repository are the same.")
2284 print(">>> Cancelling all further sync action. You are already up to date.")
2286 print(">>> In order to force sync, remove '%s'." % servertimestampfile)
2290 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
2291 emergelog(xterm_titles,
2292 ">>> Server out of date: %s" % dosyncuri)
2295 print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
2297 print(">>> In order to force sync, remove '%s'." % servertimestampfile)
2300 exitcode = SERVER_OUT_OF_DATE
2301 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
2303 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
2304 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
2305 if exitcode in [0,1,3,4,11,14,20,21]:
2307 elif exitcode in [1,3,4,11,14,20,21]:
2310 # Code 2 indicates protocol incompatibility, which is expected
2311 # for servers with protocol < 29 that don't support
2312 # --prune-empty-directories. Retry for a server that supports
2313 # at least rsync protocol version 29 (>=rsync-2.6.4).
2318 if maxretries < 0 or retries <= maxretries:
2319 print(">>> Retrying...")
2323 updatecache_flg=False
2324 exitcode = EXCEEDED_MAX_RETRIES
2328 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
2329 elif exitcode == SERVER_OUT_OF_DATE:
2331 elif exitcode == EXCEEDED_MAX_RETRIES:
2333 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
2338 msg.append("Rsync has reported that there is a syntax error. Please ensure")
2339 msg.append("that your SYNC statement is proper.")
2340 msg.append("SYNC=" + settings["SYNC"])
2342 msg.append("Rsync has reported that there is a File IO error. Normally")
2343 msg.append("this means your disk is full, but can be caused by corruption")
2344 msg.append("on the filesystem that contains PORTDIR. Please investigate")
2345 msg.append("and try again after the problem has been fixed.")
2346 msg.append("PORTDIR=" + settings["PORTDIR"])
2348 msg.append("Rsync was killed before it finished.")
2350 msg.append("Rsync has not successfully finished. It is recommended that you keep")
2351 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
2352 msg.append("to use rsync due to firewall or other restrictions. This should be a")
2353 msg.append("temporary problem unless complications exist with your network")
2354 msg.append("(and possibly your system's filesystem) configuration.")
2358 elif syncuri[:6]=="cvs://":
2359 if not os.path.exists("/usr/bin/cvs"):
2360 print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
2361 print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
2364 cvsdir=os.path.dirname(myportdir)
2365 if not os.path.exists(myportdir+"/CVS"):
2367 print(">>> Starting initial cvs checkout with "+syncuri+"...")
2368 if os.path.exists(cvsdir+"/gentoo-x86"):
2369 print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
2373 except OSError as e:
2374 if e.errno != errno.ENOENT:
2376 "!!! existing '%s' directory; exiting.\n" % myportdir)
2379 if portage.process.spawn_bash(
2380 "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
2381 (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
2382 **spawn_kwargs) != os.EX_OK:
2383 print("!!! cvs checkout error; exiting.")
2385 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
2388 print(">>> Starting cvs update with "+syncuri+"...")
2389 retval = portage.process.spawn_bash(
2390 "cd %s; exec cvs -z0 -q update -dP" % \
2391 (portage._shell_quote(myportdir),), **spawn_kwargs)
2392 if retval != os.EX_OK:
2396 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
2397 noiselevel=-1, level=logging.ERROR)
2400 if updatecache_flg and \
2401 myaction != "metadata" and \
2402 "metadata-transfer" not in settings.features:
2403 updatecache_flg = False
2405 # Reload the whole config from scratch.
2406 settings, trees, mtimedb = load_emerge_config(trees=trees)
2407 adjust_configs(myopts, trees)
2408 root_config = trees[settings["ROOT"]]["root_config"]
2409 portdb = trees[settings["ROOT"]]["porttree"].dbapi
2411 if updatecache_flg and \
2412 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
2414 # Only update cache for myportdir since that's
2415 # the only one that's been synced here.
2416 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
2418 if myopts.get('--package-moves') != 'n' and \
2419 _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
2421 # Reload the whole config from scratch.
2422 settings, trees, mtimedb = load_emerge_config(trees=trees)
2423 adjust_configs(myopts, trees)
2424 portdb = trees[settings["ROOT"]]["porttree"].dbapi
2425 root_config = trees[settings["ROOT"]]["root_config"]
2427 mybestpv = portdb.xmatch("bestmatch-visible",
2428 portage.const.PORTAGE_PACKAGE_ATOM)
2429 mypvs = portage.best(
2430 trees[settings["ROOT"]]["vartree"].dbapi.match(
2431 portage.const.PORTAGE_PACKAGE_ATOM))
2433 chk_updated_cfg_files(settings["EROOT"],
2434 portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
2436 if myaction != "metadata":
2437 postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
2438 portage.USER_CONFIG_PATH, "bin", "post_sync")
2439 if os.access(postsync, os.X_OK):
2440 retval = portage.process.spawn(
2441 [postsync, dosyncuri], env=settings.environ())
2442 if retval != os.EX_OK:
2443 print(red(" * ") + bold("spawn failed of " + postsync))
2445 if(mybestpv != mypvs) and not "--quiet" in myopts:
2447 print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
2448 print(red(" * ")+"that you update portage now, before any other packages are updated.")
2450 print(red(" * ")+"To update portage, run 'emerge portage' now.")
2453 display_news_notification(root_config, myopts)
2456 def action_uninstall(settings, trees, ldpath_mtimes,
2457 opts, action, files, spinner):
2458 # For backward compat, some actions do not require leading '='.
2459 ignore_missing_eq = action in ('clean', 'unmerge')
2460 root = settings['ROOT']
2461 vardb = trees[root]['vartree'].dbapi
2465 # Ensure atoms are valid before calling unmerge().
2466 # For backward compat, leading '=' is not required.
2468 if is_valid_package_atom(x, allow_repo=True) or \
2469 (ignore_missing_eq and is_valid_package_atom('=' + x)):
2473 dep_expand(x, mydb=vardb, settings=settings))
2474 except portage.exception.AmbiguousPackageName as e:
2475 msg = "The short ebuild name \"" + x + \
2476 "\" is ambiguous. Please specify " + \
2477 "one of the following " + \
2478 "fully-qualified ebuild names instead:"
2479 for line in textwrap.wrap(msg, 70):
2480 writemsg_level("!!! %s\n" % (line,),
2481 level=logging.ERROR, noiselevel=-1)
2483 writemsg_level(" %s\n" % colorize("INFORM", i),
2484 level=logging.ERROR, noiselevel=-1)
2485 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
2488 elif x.startswith(os.sep):
2489 if not x.startswith(root):
2490 writemsg_level(("!!! '%s' does not start with" + \
2491 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
2493 # Queue these up since it's most efficient to handle
2494 # multiple files in a single iter_owners() call.
2495 lookup_owners.append(x)
2497 elif x.startswith(SETPREFIX) and action == "deselect":
2498 valid_atoms.append(x)
2502 ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
2505 msg.append("'%s' is not a valid package atom." % (x,))
2506 msg.append("Please check ebuild(5) for full details.")
2507 writemsg_level("".join("!!! %s\n" % line for line in msg),
2508 level=logging.ERROR, noiselevel=-1)
2511 for cp in vardb.cp_all():
2512 if extended_cp_match(ext_atom.cp, cp):
2515 atom += ":" + ext_atom.slot
2517 atom += "::" + ext_atom.repo
2519 if vardb.match(atom):
2520 valid_atoms.append(Atom(atom))
2524 msg.append("'%s' is not a valid package atom." % (x,))
2525 msg.append("Please check ebuild(5) for full details.")
2526 writemsg_level("".join("!!! %s\n" % line for line in msg),
2527 level=logging.ERROR, noiselevel=-1)
2532 search_for_multiple = False
2533 if len(lookup_owners) > 1:
2534 search_for_multiple = True
2536 for x in lookup_owners:
2537 if not search_for_multiple and os.path.isdir(x):
2538 search_for_multiple = True
2539 relative_paths.append(x[len(root)-1:])
2542 for pkg, relative_path in \
2543 vardb._owners.iter_owners(relative_paths):
2544 owners.add(pkg.mycpv)
2545 if not search_for_multiple:
2550 slot = vardb.aux_get(cpv, ['SLOT'])[0]
2552 # portage now masks packages with missing slot, but it's
2553 # possible that one was installed by an older version
2554 atom = portage.cpv_getkey(cpv)
2556 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
2557 valid_atoms.append(portage.dep.Atom(atom))
2559 writemsg_level(("!!! '%s' is not claimed " + \
2560 "by any package.\n") % lookup_owners[0],
2561 level=logging.WARNING, noiselevel=-1)
2563 if files and not valid_atoms:
2566 if action == 'unmerge' and \
2567 '--quiet' not in opts and \
2568 '--quiet-unmerge-warn' not in opts:
2569 msg = "This action can remove important packages! " + \
2570 "In order to be safer, use " + \
2571 "`emerge -pv --depclean <atom>` to check for " + \
2572 "reverse dependencies before removing packages."
2573 out = portage.output.EOutput()
2574 for line in textwrap.wrap(msg, 72):
2577 if action == 'deselect':
2578 return action_deselect(settings, trees, opts, valid_atoms)
2580 # Create a Scheduler for calls to unmerge(), in order to cause
2581 # redirection of ebuild phase output to logs as required for
2582 # options such as --quiet.
2583 sched = Scheduler(settings, trees, None, opts,
2585 sched._background = sched._background_mode()
2586 sched._status_display.quiet = True
2588 if action in ('clean', 'unmerge') or \
2589 (action == 'prune' and "--nodeps" in opts):
2590 # When given a list of atoms, unmerge them in the order given.
2591 ordered = action == 'unmerge'
2592 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
2593 valid_atoms, ldpath_mtimes, ordered=ordered,
2594 scheduler=sched._sched_iface)
2597 rval = action_depclean(settings, trees, ldpath_mtimes,
2598 opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
2602 def adjust_configs(myopts, trees):
2603 for myroot in trees:
2604 mysettings = trees[myroot]["vartree"].settings
2606 adjust_config(myopts, mysettings)
2609 def adjust_config(myopts, settings):
2610 """Make emerge specific adjustments to the config."""
2612 # Kill noauto as it will break merges otherwise.
2613 if "noauto" in settings.features:
2614 settings.features.remove('noauto')
2616 fail_clean = myopts.get('--fail-clean')
2617 if fail_clean is not None:
2618 if fail_clean is True and \
2619 'fail-clean' not in settings.features:
2620 settings.features.add('fail-clean')
2621 elif fail_clean == 'n' and \
2622 'fail-clean' in settings.features:
2623 settings.features.remove('fail-clean')
2627 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
2628 except ValueError as e:
2629 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2630 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
2631 settings["CLEAN_DELAY"], noiselevel=-1)
2632 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
2633 settings.backup_changes("CLEAN_DELAY")
2635 EMERGE_WARNING_DELAY = 10
2637 EMERGE_WARNING_DELAY = int(settings.get(
2638 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
2639 except ValueError as e:
2640 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2641 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
2642 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
2643 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
2644 settings.backup_changes("EMERGE_WARNING_DELAY")
2646 if "--quiet" in myopts or "--quiet-build" in myopts:
2647 settings["PORTAGE_QUIET"]="1"
2648 settings.backup_changes("PORTAGE_QUIET")
2650 if "--verbose" in myopts:
2651 settings["PORTAGE_VERBOSE"] = "1"
2652 settings.backup_changes("PORTAGE_VERBOSE")
2654 # Set so that configs will be merged regardless of remembered status
2655 if ("--noconfmem" in myopts):
2656 settings["NOCONFMEM"]="1"
2657 settings.backup_changes("NOCONFMEM")
2659 # Set various debug markers... They should be merged somehow.
2662 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
2663 if PORTAGE_DEBUG not in (0, 1):
2664 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
2665 PORTAGE_DEBUG, noiselevel=-1)
2666 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
2669 except ValueError as e:
2670 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2671 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
2672 settings["PORTAGE_DEBUG"], noiselevel=-1)
2674 if "--debug" in myopts:
2676 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
2677 settings.backup_changes("PORTAGE_DEBUG")
2679 if settings.get("NOCOLOR") not in ("yes","true"):
2680 portage.output.havecolor = 1
2682 """The explicit --color < y | n > option overrides the NOCOLOR environment
2683 variable and stdout auto-detection."""
2684 if "--color" in myopts:
2685 if "y" == myopts["--color"]:
2686 portage.output.havecolor = 1
2687 settings["NOCOLOR"] = "false"
2689 portage.output.havecolor = 0
2690 settings["NOCOLOR"] = "true"
2691 settings.backup_changes("NOCOLOR")
2692 elif settings.get('TERM') == 'dumb' or \
2693 not sys.stdout.isatty():
2694 portage.output.havecolor = 0
2695 settings["NOCOLOR"] = "true"
2696 settings.backup_changes("NOCOLOR")
2698 def display_missing_pkg_set(root_config, set_name):
2701 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
2702 "The following sets exist:") % \
2703 colorize("INFORM", set_name))
2706 for s in sorted(root_config.sets):
2707 msg.append(" %s" % s)
2710 writemsg_level("".join("%s\n" % l for l in msg),
2711 level=logging.ERROR, noiselevel=-1)
2713 def relative_profile_path(portdir, abs_profile):
2714 realpath = os.path.realpath(abs_profile)
2715 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
2716 if realpath.startswith(basepath):
2717 profilever = realpath[1 + len(basepath):]
2722 def getportageversion(portdir, target_root, profile, chost, vardb):
2725 profilever = relative_profile_path(portdir, profile)
2726 if profilever is None:
2728 for parent in portage.grabfile(
2729 os.path.join(profile, 'parent')):
2730 profilever = relative_profile_path(portdir,
2731 os.path.join(profile, parent))
2732 if profilever is not None:
2734 except portage.exception.PortageException:
2737 if profilever is None:
2739 profilever = "!" + os.readlink(profile)
2743 if profilever is None:
2744 profilever = "unavailable"
2747 libclist = vardb.match("virtual/libc")
2748 libclist += vardb.match("virtual/glibc")
2749 libclist = portage.util.unique_array(libclist)
2751 xs=portage.catpkgsplit(x)
2753 libcver+=","+"-".join(xs[1:])
2755 libcver="-".join(xs[1:])
2757 libcver="unavailable"
2759 gccver = getgccversion(chost)
2760 unameout=platform.release()+" "+platform.machine()
2762 return "Portage %s (%s, %s, %s, %s)" % \
2763 (portage.VERSION, profilever, gccver, libcver, unameout)
2765 def git_sync_timestamps(settings, portdir):
2767 Since git doesn't preserve timestamps, synchronize timestamps between
2768 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
2769 for a given file as long as the file in the working tree is not modified
2772 cache_dir = os.path.join(portdir, "metadata", "cache")
2773 if not os.path.isdir(cache_dir):
2775 writemsg_level(">>> Synchronizing timestamps...\n")
2777 from portage.cache.cache_errors import CacheError
2779 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
2780 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
2781 except CacheError as e:
2782 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
2783 level=logging.ERROR, noiselevel=-1)
2786 ec_dir = os.path.join(portdir, "eclass")
2788 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
2789 if f.endswith(".eclass"))
2790 except OSError as e:
2791 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
2792 level=logging.ERROR, noiselevel=-1)
2795 args = [portage.const.BASH_BINARY, "-c",
2796 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
2797 portage._shell_quote(portdir)]
2799 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
2800 modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
2802 if rval != os.EX_OK:
2805 modified_eclasses = set(ec for ec in ec_names \
2806 if os.path.join("eclass", ec + ".eclass") in modified_files)
2808 updated_ec_mtimes = {}
2810 for cpv in cache_db:
2811 cpv_split = portage.catpkgsplit(cpv)
2812 if cpv_split is None:
2813 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
2814 level=logging.ERROR, noiselevel=-1)
2817 cat, pn, ver, rev = cpv_split
2818 cat, pf = portage.catsplit(cpv)
2819 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
2820 if relative_eb_path in modified_files:
2824 cache_entry = cache_db[cpv]
2825 eb_mtime = cache_entry.get("_mtime_")
2826 ec_mtimes = cache_entry.get("_eclasses_")
2828 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
2829 level=logging.ERROR, noiselevel=-1)
2831 except CacheError as e:
2832 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
2833 (cpv, e), level=logging.ERROR, noiselevel=-1)
2836 if eb_mtime is None:
2837 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
2838 level=logging.ERROR, noiselevel=-1)
2842 eb_mtime = long(eb_mtime)
2844 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
2845 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
2848 if ec_mtimes is None:
2849 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
2850 level=logging.ERROR, noiselevel=-1)
2853 if modified_eclasses.intersection(ec_mtimes):
2856 missing_eclasses = set(ec_mtimes).difference(ec_names)
2857 if missing_eclasses:
2858 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
2859 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
2863 eb_path = os.path.join(portdir, relative_eb_path)
2865 current_eb_mtime = os.stat(eb_path)
2867 writemsg_level("!!! Missing ebuild: %s\n" % \
2868 (cpv,), level=logging.ERROR, noiselevel=-1)
2871 inconsistent = False
2872 for ec, (ec_path, ec_mtime) in ec_mtimes.items():
2873 updated_mtime = updated_ec_mtimes.get(ec)
2874 if updated_mtime is not None and updated_mtime != ec_mtime:
2875 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
2876 (cpv, ec), level=logging.ERROR, noiselevel=-1)
2883 if current_eb_mtime != eb_mtime:
2884 os.utime(eb_path, (eb_mtime, eb_mtime))
2886 for ec, (ec_path, ec_mtime) in ec_mtimes.items():
2887 if ec in updated_ec_mtimes:
2889 ec_path = os.path.join(ec_dir, ec + ".eclass")
2890 current_mtime = os.stat(ec_path)[stat.ST_MTIME]
2891 if current_mtime != ec_mtime:
2892 os.utime(ec_path, (ec_mtime, ec_mtime))
2893 updated_ec_mtimes[ec] = ec_mtime
2897 def load_emerge_config(trees=None):
2899 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
2900 v = os.environ.get(envvar, None)
2903 trees = portage.create_trees(trees=trees, **kwargs)
2905 for root, root_trees in trees.items():
2906 settings = root_trees["vartree"].settings
2907 settings._init_dirs()
2908 setconfig = load_default_config(settings, root_trees)
2909 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
2911 settings = trees["/"]["vartree"].settings
2913 for myroot in trees:
2915 settings = trees[myroot]["vartree"].settings
2918 mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
2919 mtimedb = portage.MtimeDB(mtimedbfile)
2920 portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
2921 QueryCommand._db = trees
2922 return settings, trees, mtimedb
2924 def chk_updated_cfg_files(eroot, config_protect):
2927 portage.util.find_updated_config_files(target_root, config_protect))
2930 print("\n"+colorize("WARN", " * IMPORTANT:"), end=' ')
2931 if not x[1]: # it's a protected file
2932 print("config file '%s' needs updating." % x[0])
2933 else: # it's a protected dir
2934 print("%d config files in '%s' need updating." % (len(x[1]), x[0]))
2937 print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
2938 + " section of the " + bold("emerge"))
2939 print(" "+yellow("*")+" man page to learn how to update config files.")
2941 def display_news_notification(root_config, myopts):
2942 target_root = root_config.settings['EROOT']
2943 trees = root_config.trees
2944 settings = trees["vartree"].settings
2945 portdb = trees["porttree"].dbapi
2946 vardb = trees["vartree"].dbapi
2947 NEWS_PATH = os.path.join("metadata", "news")
2948 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
2949 newsReaderDisplay = False
2950 update = "--pretend" not in myopts
2951 if "news" not in settings.features:
2954 # Populate these using our existing vartree, to avoid
2955 # having a temporary one instantiated.
2956 settings._populate_treeVirtuals_if_needed(trees["vartree"])
2958 for repo in portdb.getRepositories():
2959 unreadItems = checkUpdatedNewsItems(
2960 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
2962 if not newsReaderDisplay:
2963 newsReaderDisplay = True
2965 print(colorize("WARN", " * IMPORTANT:"), end=' ')
2966 print("%s news items need reading for repository '%s'." % (unreadItems, repo))
2969 if newsReaderDisplay:
2970 print(colorize("WARN", " *"), end=' ')
2971 print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
2974 def getgccversion(chost):
2977 return: the current in-use gcc version
2980 gcc_ver_command = 'gcc -dumpversion'
2981 gcc_ver_prefix = 'gcc-'
2983 gcc_not_found_error = red(
2984 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
2985 "!!! to update the environment of this terminal and possibly\n" +
2986 "!!! other terminals also.\n"
2989 mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
2990 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
2991 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
2993 mystatus, myoutput = subprocess_getstatusoutput(
2994 chost + "-" + gcc_ver_command)
2995 if mystatus == os.EX_OK:
2996 return gcc_ver_prefix + myoutput
2998 mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
2999 if mystatus == os.EX_OK:
3000 return gcc_ver_prefix + myoutput
3002 portage.writemsg(gcc_not_found_error, noiselevel=-1)
3003 return "[unavailable]"
3005 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
3008 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
3009 Returns the number of unread (yet relevent) items.
3011 @param portdb: a portage tree database
3012 @type portdb: pordbapi
3013 @param vardb: an installed package database
3014 @type vardb: vardbapi
3023 1. The number of unread but relevant news items.
3026 from portage.news import NewsManager
3027 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
3028 return manager.getUnreadItems( repo_id, update=update )