1 # Copyright 1999-2010 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
7 from subprocess import getstatusoutput as subprocess_getstatusoutput
9 from commands import getstatusoutput as subprocess_getstatusoutput
24 from itertools import chain
27 from portage import os
28 from portage import digraph
29 from portage import _unicode_decode
30 from portage.cache.cache_errors import CacheError
31 from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
32 from portage.const import _ENABLE_DYN_LINK_MAP
33 from portage.dbapi.dep_expand import dep_expand
34 from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
36 good = create_color_func("GOOD")
37 bad = create_color_func("BAD")
38 from portage.package.ebuild._ipc.QueryCommand import QueryCommand
39 from portage.package.ebuild.doebuild import _check_temp_dir
40 from portage._sets import load_default_config, SETPREFIX
41 from portage._sets.base import InternalPackageSet
42 from portage.util import cmp_sort_key, writemsg, \
43 writemsg_level, writemsg_stdout
44 from portage._global_updates import _global_updates
46 from _emerge.clear_caches import clear_caches
47 from _emerge.countdown import countdown
48 from _emerge.create_depgraph_params import create_depgraph_params
49 from _emerge.Dependency import Dependency
50 from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
51 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
52 from _emerge.emergelog import emergelog
53 from _emerge.is_valid_package_atom import is_valid_package_atom
54 from _emerge.MetadataRegen import MetadataRegen
55 from _emerge.Package import Package
56 from _emerge.ProgressHandler import ProgressHandler
57 from _emerge.RootConfig import RootConfig
58 from _emerge.Scheduler import Scheduler
59 from _emerge.search import search
60 from _emerge.SetArg import SetArg
61 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
62 from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
63 from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
64 from _emerge.unmerge import unmerge
65 from _emerge.UnmergeDepPriority import UnmergeDepPriority
66 from _emerge.UseFlagDisplay import UseFlagDisplay
67 from _emerge.userquery import userquery
69 if sys.hexversion >= 0x3000000:
72 def action_build(settings, trees, mtimedb,
73 myopts, myaction, myfiles, spinner):
75 if '--usepkgonly' not in myopts:
76 old_tree_timestamp_warn(settings['PORTDIR'], settings)
78 # It's best for config updates in /etc/portage to be processed
79 # before we get here, so warn if they're not (bug #267103).
80 chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
82 # validate the state of the resume data
83 # so that we can make assumptions later.
84 for k in ("resume", "resume_backup"):
87 resume_data = mtimedb[k]
88 if not isinstance(resume_data, dict):
91 mergelist = resume_data.get("mergelist")
92 if not isinstance(mergelist, list):
96 if not (isinstance(x, list) and len(x) == 4):
98 pkg_type, pkg_root, pkg_key, pkg_action = x
99 if pkg_root not in trees:
100 # Current $ROOT setting differs,
101 # so the list must be stale.
107 resume_opts = resume_data.get("myopts")
108 if not isinstance(resume_opts, (dict, list)):
111 favorites = resume_data.get("favorites")
112 if not isinstance(favorites, list):
117 if "--resume" in myopts and \
118 ("resume" in mtimedb or
119 "resume_backup" in mtimedb):
121 if "resume" not in mtimedb:
122 mtimedb["resume"] = mtimedb["resume_backup"]
123 del mtimedb["resume_backup"]
125 # "myopts" is a list for backward compatibility.
126 resume_opts = mtimedb["resume"].get("myopts", [])
127 if isinstance(resume_opts, list):
128 resume_opts = dict((k,True) for k in resume_opts)
129 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
130 resume_opts.pop(opt, None)
132 # Current options always override resume_opts.
133 resume_opts.update(myopts)
135 myopts.update(resume_opts)
137 if "--debug" in myopts:
138 writemsg_level("myopts %s\n" % (myopts,))
140 # Adjust config according to options of the command being resumed.
142 mysettings = trees[myroot]["vartree"].settings
144 adjust_config(myopts, mysettings)
146 del myroot, mysettings
148 ldpath_mtimes = mtimedb["ldpath"]
151 buildpkgonly = "--buildpkgonly" in myopts
152 pretend = "--pretend" in myopts
153 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
154 ask = "--ask" in myopts
155 enter_invalid = '--ask-enter-invalid' in myopts
156 nodeps = "--nodeps" in myopts
157 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
158 tree = "--tree" in myopts
162 portage.writemsg(colorize("WARN", " * ") + \
163 "--tree is broken with --nodeps. Disabling...\n")
164 debug = "--debug" in myopts
165 verbose = "--verbose" in myopts
166 quiet = "--quiet" in myopts
167 if pretend or fetchonly:
168 # make the mtimedb readonly
169 mtimedb.filename = None
170 if '--digest' in myopts or 'digest' in settings.features:
171 if '--digest' in myopts:
172 msg = "The --digest option"
174 msg = "The FEATURES=digest setting"
176 msg += " can prevent corruption from being" + \
177 " noticed. The `repoman manifest` command is the preferred" + \
178 " way to generate manifests and it is capable of doing an" + \
179 " entire repository or category at once."
181 writemsg(prefix + "\n")
182 from textwrap import wrap
183 for line in wrap(msg, 72):
184 writemsg("%s%s\n" % (prefix, line))
185 writemsg(prefix + "\n")
188 favorites = mtimedb["resume"].get("favorites")
189 if not isinstance(favorites, list):
191 myparams = create_depgraph_params(myopts, myaction)
193 resume_data = mtimedb["resume"]
194 mergelist = resume_data["mergelist"]
195 if mergelist and "--skipfirst" in myopts:
196 for i, task in enumerate(mergelist):
197 if isinstance(task, list) and \
198 task and task[-1] == "merge":
205 success, mydepgraph, dropped_tasks = resume_depgraph(
206 settings, trees, mtimedb, myopts, myparams, spinner)
207 except (portage.exception.PackageNotFound,
208 depgraph.UnsatisfiedResumeDep) as e:
209 if isinstance(e, depgraph.UnsatisfiedResumeDep):
210 mydepgraph = e.depgraph
212 from textwrap import wrap
213 from portage.output import EOutput
216 resume_data = mtimedb["resume"]
217 mergelist = resume_data.get("mergelist")
218 if not isinstance(mergelist, list):
220 if mergelist and debug or (verbose and not quiet):
221 out.eerror("Invalid resume list:")
224 for task in mergelist:
225 if isinstance(task, list):
226 out.eerror(indent + str(tuple(task)))
229 if isinstance(e, depgraph.UnsatisfiedResumeDep):
230 out.eerror("One or more packages are either masked or " + \
231 "have missing dependencies:")
236 out.eerror(indent + "Masked package:")
237 out.eerror(2 * indent + str(dep.parent))
240 out.eerror(indent + str(dep.atom) + " pulled in by:")
241 out.eerror(2 * indent + str(dep.parent))
243 msg = "The resume list contains packages " + \
244 "that are either masked or have " + \
245 "unsatisfied dependencies. " + \
246 "Please restart/continue " + \
247 "the operation manually, or use --skipfirst " + \
248 "to skip the first package in the list and " + \
249 "any other packages that may be " + \
250 "masked or have missing dependencies."
251 for line in wrap(msg, 72):
253 elif isinstance(e, portage.exception.PackageNotFound):
254 out.eerror("An expected package is " + \
255 "not available: %s" % str(e))
257 msg = "The resume list contains one or more " + \
258 "packages that are no longer " + \
259 "available. Please restart/continue " + \
260 "the operation manually."
261 for line in wrap(msg, 72):
266 portage.writemsg("!!! One or more packages have been " + \
267 "dropped due to\n" + \
268 "!!! masking or unsatisfied dependencies:\n\n",
270 for task in dropped_tasks:
271 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
272 portage.writemsg("\n", noiselevel=-1)
275 if mydepgraph is not None:
276 mydepgraph.display_problems()
277 if not (ask or pretend):
278 # delete the current list and also the backup
279 # since it's probably stale too.
280 for k in ("resume", "resume_backup"):
286 if ("--resume" in myopts):
287 print(darkgreen("emerge: It seems we have nothing to resume..."))
290 myparams = create_depgraph_params(myopts, myaction)
292 success, mydepgraph, favorites = backtrack_depgraph(
293 settings, trees, myopts, myparams, myaction, myfiles, spinner)
294 except portage.exception.PackageSetNotFound as e:
295 root_config = trees[settings["ROOT"]]["root_config"]
296 display_missing_pkg_set(root_config, e.value)
300 mydepgraph.display_problems()
303 if "--pretend" not in myopts and \
304 ("--ask" in myopts or "--tree" in myopts or \
305 "--verbose" in myopts) and \
306 not ("--quiet" in myopts and "--ask" not in myopts):
307 if "--resume" in myopts:
308 mymergelist = mydepgraph.altlist()
309 if len(mymergelist) == 0:
310 print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
312 favorites = mtimedb["resume"]["favorites"]
313 retval = mydepgraph.display(
314 mydepgraph.altlist(reversed=tree),
316 mydepgraph.display_problems()
317 if retval != os.EX_OK:
319 prompt="Would you like to resume merging these packages?"
321 retval = mydepgraph.display(
322 mydepgraph.altlist(reversed=("--tree" in myopts)),
324 mydepgraph.display_problems()
325 if retval != os.EX_OK:
328 for x in mydepgraph.altlist():
329 if isinstance(x, Package) and x.operation == "merge":
333 sets = trees[settings["ROOT"]]["root_config"].sets
334 world_candidates = None
335 if "--noreplace" in myopts and \
336 not oneshot and favorites:
337 # Sets that are not world candidates are filtered
338 # out here since the favorites list needs to be
339 # complete for depgraph.loadResumeCommand() to
341 world_candidates = [x for x in favorites \
342 if not (x.startswith(SETPREFIX) and \
343 not sets[x[1:]].world_candidate)]
344 if "--noreplace" in myopts and \
345 not oneshot and world_candidates:
347 for x in world_candidates:
348 print(" %s %s" % (good("*"), x))
349 prompt="Would you like to add these packages to your world favorites?"
350 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
351 prompt="Nothing to merge; would you like to auto-clean packages?"
354 print("Nothing to merge; quitting.")
357 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
358 prompt="Would you like to fetch the source files for these packages?"
360 prompt="Would you like to merge these packages?"
362 if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
367 # Don't ask again (e.g. when auto-cleaning packages after merge)
368 myopts.pop("--ask", None)
370 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
371 if ("--resume" in myopts):
372 mymergelist = mydepgraph.altlist()
373 if len(mymergelist) == 0:
374 print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
376 favorites = mtimedb["resume"]["favorites"]
377 retval = mydepgraph.display(
378 mydepgraph.altlist(reversed=tree),
380 mydepgraph.display_problems()
381 if retval != os.EX_OK:
384 retval = mydepgraph.display(
385 mydepgraph.altlist(reversed=("--tree" in myopts)),
387 mydepgraph.display_problems()
388 if retval != os.EX_OK:
390 if "--buildpkgonly" in myopts:
391 graph_copy = mydepgraph._dynamic_config.digraph.copy()
392 removed_nodes = set()
393 for node in graph_copy:
394 if not isinstance(node, Package) or \
395 node.operation == "nomerge":
396 removed_nodes.add(node)
397 graph_copy.difference_update(removed_nodes)
398 if not graph_copy.hasallzeros(ignore_priority = \
399 DepPrioritySatisfiedRange.ignore_medium):
400 print("\n!!! --buildpkgonly requires all dependencies to be merged.")
401 print("!!! You have to merge the dependencies before you can build this package.\n")
404 if "--buildpkgonly" in myopts:
405 graph_copy = mydepgraph._dynamic_config.digraph.copy()
406 removed_nodes = set()
407 for node in graph_copy:
408 if not isinstance(node, Package) or \
409 node.operation == "nomerge":
410 removed_nodes.add(node)
411 graph_copy.difference_update(removed_nodes)
412 if not graph_copy.hasallzeros(ignore_priority = \
413 DepPrioritySatisfiedRange.ignore_medium):
414 print("\n!!! --buildpkgonly requires all dependencies to be merged.")
415 print("!!! Cannot merge requested packages. Merge deps and try again.\n")
418 if ("--resume" in myopts):
419 favorites=mtimedb["resume"]["favorites"]
420 mergetask = Scheduler(settings, trees, mtimedb, myopts,
421 spinner, favorites=favorites,
422 graph_config=mydepgraph.schedulerGraph())
426 retval = mergetask.merge()
427 merge_count = mergetask.curval
429 if "resume" in mtimedb and \
430 "mergelist" in mtimedb["resume"] and \
431 len(mtimedb["resume"]["mergelist"]) > 1:
432 mtimedb["resume_backup"] = mtimedb["resume"]
433 del mtimedb["resume"]
436 mydepgraph.saveNomergeFavorites()
437 mergetask = Scheduler(settings, trees, mtimedb, myopts,
438 spinner, favorites=favorites,
439 graph_config=mydepgraph.schedulerGraph())
443 retval = mergetask.merge()
444 merge_count = mergetask.curval
446 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
447 if "yes" == settings.get("AUTOCLEAN"):
448 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
449 unmerge(trees[settings["ROOT"]]["root_config"],
451 ldpath_mtimes, autoclean=1)
453 portage.writemsg_stdout(colorize("WARN", "WARNING:")
454 + " AUTOCLEAN is disabled. This can cause serious"
455 + " problems due to overlapping packages.\n")
457 trees[settings["ROOT"]]["vartree"].dbapi._plib_registry
458 if plib_registry is None:
459 # preserve-libs is entirely disabled
462 plib_registry.pruneNonExisting()
466 def action_config(settings, trees, myopts, myfiles):
467 enter_invalid = '--ask-enter-invalid' in myopts
468 if len(myfiles) != 1:
469 print(red("!!! config can only take a single package atom at this time\n"))
471 if not is_valid_package_atom(myfiles[0]):
472 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
474 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
475 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
479 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
480 except portage.exception.AmbiguousPackageName as e:
481 # Multiple matches thrown from cpv_expand
484 print("No packages found.\n")
487 if "--ask" in myopts:
489 print("Please select a package to configure:")
493 options.append(str(idx))
494 print(options[-1]+") "+pkg)
497 idx = userquery("Selection?", enter_invalid, responses=options)
500 pkg = pkgs[int(idx)-1]
502 print("The following packages available:")
505 print("\nPlease use a specific atom or the --ask option.")
511 if "--ask" in myopts:
512 if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
515 print("Configuring pkg...")
517 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
518 mysettings = portage.config(clone=settings)
519 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
520 debug = mysettings.get("PORTAGE_DEBUG") == "1"
521 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
523 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
524 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
525 if retval == os.EX_OK:
526 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
527 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
530 def action_depclean(settings, trees, ldpath_mtimes,
531 myopts, action, myfiles, spinner, scheduler=None):
532 # Kill packages that aren't explicitly merged or are required as a
533 # dependency of another package. World file is explicit.
535 # Global depclean or prune operations are not very safe when there are
536 # missing dependencies since it's unknown how badly incomplete
537 # the dependency graph is, and we might accidentally remove packages
538 # that should have been pulled into the graph. On the other hand, it's
539 # relatively safe to ignore missing deps when only asked to remove
543 if not _ENABLE_DYN_LINK_MAP:
544 msg.append("Depclean may break link level dependencies. Thus, it is\n")
545 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
546 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
548 msg.append("Always study the list of packages to be cleaned for any obvious\n")
549 msg.append("mistakes. Packages that are part of the world set will always\n")
550 msg.append("be kept. They can be manually added to this set with\n")
551 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
552 msg.append("package.provided (see portage(5)) will be removed by\n")
553 msg.append("depclean, even if they are part of the world set.\n")
555 msg.append("As a safety measure, depclean will not remove any packages\n")
556 msg.append("unless *all* required dependencies have been resolved. As a\n")
557 msg.append("consequence, it is often necessary to run %s\n" % \
558 good("`emerge --update"))
559 msg.append(good("--newuse --deep @world`") + \
560 " prior to depclean.\n")
562 if action == "depclean" and "--quiet" not in myopts and not myfiles:
563 portage.writemsg_stdout("\n")
565 portage.writemsg_stdout(colorize("WARN", " * ") + x)
567 root_config = trees[settings['ROOT']]['root_config']
568 vardb = root_config.trees['vartree'].dbapi
570 args_set = InternalPackageSet(allow_repo=True)
572 args_set.update(myfiles)
573 matched_packages = False
576 matched_packages = True
578 if not matched_packages:
579 writemsg_level(">>> No packages selected for removal by %s\n" % \
583 # The calculation is done in a separate function so that depgraph
584 # references go out of scope and the corresponding memory
585 # is freed before we call unmerge().
586 rval, cleanlist, ordered, req_pkg_count = \
587 calc_depclean(settings, trees, ldpath_mtimes,
588 myopts, action, args_set, spinner)
596 unmerge(root_config, myopts, "unmerge",
597 cleanlist, ldpath_mtimes, ordered=ordered,
600 if action == "prune":
603 if not cleanlist and "--quiet" in myopts:
606 print("Packages installed: " + str(len(vardb.cpv_all())))
607 print("Packages in world: " + \
608 str(len(root_config.sets["selected"].getAtoms())))
609 print("Packages in system: " + \
610 str(len(root_config.sets["system"].getAtoms())))
611 print("Required packages: "+str(req_pkg_count))
612 if "--pretend" in myopts:
613 print("Number to remove: "+str(len(cleanlist)))
615 print("Number removed: "+str(len(cleanlist)))
617 def calc_depclean(settings, trees, ldpath_mtimes,
618 myopts, action, args_set, spinner):
619 allow_missing_deps = bool(args_set)
621 debug = '--debug' in myopts
622 xterm_titles = "notitles" not in settings.features
623 myroot = settings["ROOT"]
624 root_config = trees[myroot]["root_config"]
625 psets = root_config.setconfig.psets
626 deselect = myopts.get('--deselect') != 'n'
628 required_sets['world'] = psets['world']
630 # When removing packages, a temporary version of the world 'selected'
631 # set may be used which excludes packages that are intended to be
632 # eligible for removal.
633 selected_set = psets['selected']
634 required_sets['selected'] = selected_set
635 protected_set = InternalPackageSet()
636 protected_set_name = '____depclean_protected_set____'
637 required_sets[protected_set_name] = protected_set
638 system_set = psets["system"]
640 if not system_set or not selected_set:
643 writemsg_level("!!! You have no system list.\n",
644 level=logging.ERROR, noiselevel=-1)
647 writemsg_level("!!! You have no world file.\n",
648 level=logging.WARNING, noiselevel=-1)
650 writemsg_level("!!! Proceeding is likely to " + \
651 "break your installation.\n",
652 level=logging.WARNING, noiselevel=-1)
653 if "--pretend" not in myopts:
654 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
656 if action == "depclean":
657 emergelog(xterm_titles, " >>> depclean")
659 writemsg_level("\nCalculating dependencies ")
660 resolver_params = create_depgraph_params(myopts, "remove")
661 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
663 vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
664 real_vardb = trees[myroot]["vartree"].dbapi
666 if action == "depclean":
671 # Start with an empty set.
672 selected_set = InternalPackageSet()
673 required_sets['selected'] = selected_set
674 # Pull in any sets nested within the selected set.
675 selected_set.update(psets['selected'].getNonAtoms())
677 # Pull in everything that's installed but not matched
678 # by an argument atom since we don't want to clean any
679 # package if something depends on it.
685 if args_set.findAtomForPackage(pkg) is None:
686 protected_set.add("=" + pkg.cpv)
688 except portage.exception.InvalidDependString as e:
689 show_invalid_depstring_notice(pkg,
690 pkg.metadata["PROVIDE"], str(e))
692 protected_set.add("=" + pkg.cpv)
695 elif action == "prune":
698 # Start with an empty set.
699 selected_set = InternalPackageSet()
700 required_sets['selected'] = selected_set
701 # Pull in any sets nested within the selected set.
702 selected_set.update(psets['selected'].getNonAtoms())
704 # Pull in everything that's installed since we don't
705 # to prune a package if something depends on it.
706 protected_set.update(vardb.cp_all())
710 # Try to prune everything that's slotted.
711 for cp in vardb.cp_all():
712 if len(vardb.cp_list(cp)) > 1:
715 # Remove atoms from world that match installed packages
716 # that are also matched by argument atoms, but do not remove
717 # them if they match the highest installed version.
720 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
721 if not pkgs_for_cp or pkg not in pkgs_for_cp:
722 raise AssertionError("package expected in matches: " + \
723 "cp = %s, cpv = %s matches = %s" % \
724 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
726 highest_version = pkgs_for_cp[-1]
727 if pkg == highest_version:
728 # pkg is the highest version
729 protected_set.add("=" + pkg.cpv)
732 if len(pkgs_for_cp) <= 1:
733 raise AssertionError("more packages expected: " + \
734 "cp = %s, cpv = %s matches = %s" % \
735 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
738 if args_set.findAtomForPackage(pkg) is None:
739 protected_set.add("=" + pkg.cpv)
741 except portage.exception.InvalidDependString as e:
742 show_invalid_depstring_notice(pkg,
743 pkg.metadata["PROVIDE"], str(e))
745 protected_set.add("=" + pkg.cpv)
748 if resolver._frozen_config.excluded_pkgs:
749 excluded_set = resolver._frozen_config.excluded_pkgs
750 required_sets['__excluded__'] = InternalPackageSet()
757 if excluded_set.findAtomForPackage(pkg):
758 required_sets['__excluded__'].add("=" + pkg.cpv)
759 except portage.exception.InvalidDependString as e:
760 show_invalid_depstring_notice(pkg,
761 pkg.metadata["PROVIDE"], str(e))
763 required_sets['__excluded__'].add("=" + pkg.cpv)
765 success = resolver._complete_graph(required_sets={myroot:required_sets})
766 writemsg_level("\b\b... done!\n")
768 resolver.display_problems()
771 return 1, [], False, 0
773 def unresolved_deps():
776 for dep in resolver._dynamic_config._initially_unsatisfied_deps:
777 if isinstance(dep.parent, Package) and \
778 (dep.priority > UnmergeDepPriority.SOFT):
779 unresolvable.add((dep.atom, dep.parent.cpv))
784 if unresolvable and not allow_missing_deps:
786 if "--debug" in myopts:
787 writemsg("\ndigraph:\n\n", noiselevel=-1)
788 resolver._dynamic_config.digraph.debug_print()
789 writemsg("\n", noiselevel=-1)
793 msg.append("Dependencies could not be completely resolved due to")
794 msg.append("the following required packages not being installed:")
796 for atom, parent in unresolvable:
797 msg.append(" %s pulled in by:" % (atom,))
798 msg.append(" %s" % (parent,))
800 msg.append("Have you forgotten to run " + \
801 good("`emerge --update --newuse --deep @world`") + " prior")
802 msg.append(("to %s? It may be necessary to manually " + \
803 "uninstall packages that no longer") % action)
804 msg.append("exist in the portage tree since " + \
805 "it may not be possible to satisfy their")
806 msg.append("dependencies. Also, be aware of " + \
807 "the --with-bdeps option that is documented")
808 msg.append("in " + good("`man emerge`") + ".")
809 if action == "prune":
811 msg.append("If you would like to ignore " + \
812 "dependencies then use %s." % good("--nodeps"))
813 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
814 level=logging.ERROR, noiselevel=-1)
818 if unresolved_deps():
819 return 1, [], False, 0
821 graph = resolver._dynamic_config.digraph.copy()
822 required_pkgs_total = 0
824 if isinstance(node, Package):
825 required_pkgs_total += 1
827 def show_parents(child_node):
828 parent_nodes = graph.parent_nodes(child_node)
830 # With --prune, the highest version can be pulled in without any
831 # real parent since all installed packages are pulled in. In that
832 # case there's nothing to show here.
835 for node in parent_nodes:
836 parent_strs.append(str(getattr(node, "cpv", node)))
839 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
840 for parent_str in parent_strs:
841 msg.append(" %s\n" % (parent_str,))
843 portage.writemsg_stdout("".join(msg), noiselevel=-1)
845 def cmp_pkg_cpv(pkg1, pkg2):
846 """Sort Package instances by cpv."""
847 if pkg1.cpv > pkg2.cpv:
849 elif pkg1.cpv == pkg2.cpv:
854 def create_cleanlist():
856 if "--debug" in myopts:
857 writemsg("\ndigraph:\n\n", noiselevel=-1)
859 writemsg("\n", noiselevel=-1)
861 # Never display the special internal protected_set.
863 if isinstance(node, SetArg) and node.name == protected_set_name:
869 if action == "depclean":
872 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
875 arg_atom = args_set.findAtomForPackage(pkg)
876 except portage.exception.InvalidDependString:
877 # this error has already been displayed by now
882 pkgs_to_remove.append(pkg)
883 elif "--verbose" in myopts:
887 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
889 pkgs_to_remove.append(pkg)
890 elif "--verbose" in myopts:
893 elif action == "prune":
895 for atom in args_set:
896 for pkg in vardb.match_pkgs(atom):
898 pkgs_to_remove.append(pkg)
899 elif "--verbose" in myopts:
902 if not pkgs_to_remove:
904 ">>> No packages selected for removal by %s\n" % action)
905 if "--verbose" not in myopts:
907 ">>> To see reverse dependencies, use %s\n" % \
909 if action == "prune":
911 ">>> To ignore dependencies, use %s\n" % \
914 return pkgs_to_remove
916 cleanlist = create_cleanlist()
917 clean_set = set(cleanlist)
920 real_vardb._linkmap is not None and \
921 myopts.get('--depclean-lib-check') != 'n':
923 # Check if any of these packages are the sole providers of libraries
924 # with consumers that have not been selected for removal. If so, these
925 # packages and any dependencies need to be added to the graph.
926 linkmap = real_vardb._linkmap
931 writemsg_level(">>> Checking for lib consumers...\n")
933 for pkg in cleanlist:
934 pkg_dblink = real_vardb._dblink(pkg.cpv)
937 for lib in pkg_dblink.getcontents():
938 lib = lib[len(myroot):]
939 lib_key = linkmap._obj_key(lib)
940 lib_consumers = consumer_cache.get(lib_key)
941 if lib_consumers is None:
943 lib_consumers = linkmap.findConsumers(lib_key)
946 consumer_cache[lib_key] = lib_consumers
948 consumers[lib_key] = lib_consumers
953 for lib, lib_consumers in list(consumers.items()):
954 for consumer_file in list(lib_consumers):
955 if pkg_dblink.isowner(consumer_file):
956 lib_consumers.remove(consumer_file)
957 if not lib_consumers:
963 for lib, lib_consumers in consumers.items():
965 soname = linkmap.getSoname(lib)
967 consumer_providers = []
968 for lib_consumer in lib_consumers:
969 providers = provider_cache.get(lib)
970 if providers is None:
971 providers = linkmap.findProviders(lib_consumer)
972 provider_cache[lib_consumer] = providers
973 if soname not in providers:
974 # Why does this happen?
976 consumer_providers.append(
977 (lib_consumer, providers[soname]))
979 consumers[lib] = consumer_providers
981 consumer_map[pkg] = consumers
986 for consumers in consumer_map.values():
987 for lib, consumer_providers in consumers.items():
988 for lib_consumer, providers in consumer_providers:
989 search_files.add(lib_consumer)
990 search_files.update(providers)
992 writemsg_level(">>> Assigning files to packages...\n")
993 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
995 for pkg, consumers in list(consumer_map.items()):
996 for lib, consumer_providers in list(consumers.items()):
997 lib_consumers = set()
999 for lib_consumer, providers in consumer_providers:
1000 owner_set = file_owners.get(lib_consumer)
1001 provider_dblinks = set()
1002 provider_pkgs = set()
1004 if len(providers) > 1:
1005 for provider in providers:
1006 provider_set = file_owners.get(provider)
1007 if provider_set is not None:
1008 provider_dblinks.update(provider_set)
1010 if len(provider_dblinks) > 1:
1011 for provider_dblink in provider_dblinks:
1012 pkg_key = ("installed", myroot,
1013 provider_dblink.mycpv, "nomerge")
1014 if pkg_key not in clean_set:
1015 provider_pkgs.add(vardb.get(pkg_key))
1020 if owner_set is not None:
1021 lib_consumers.update(owner_set)
1023 for consumer_dblink in list(lib_consumers):
1024 if ("installed", myroot, consumer_dblink.mycpv,
1025 "nomerge") in clean_set:
1026 lib_consumers.remove(consumer_dblink)
1030 consumers[lib] = lib_consumers
1034 del consumer_map[pkg]
1037 # TODO: Implement a package set for rebuilding consumer packages.
1039 msg = "In order to avoid breakage of link level " + \
1040 "dependencies, one or more packages will not be removed. " + \
1041 "This can be solved by rebuilding " + \
1042 "the packages that pulled them in."
1045 from textwrap import wrap
1046 writemsg_level("".join(prefix + "%s\n" % line for \
1047 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
1050 for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
1051 consumers = consumer_map[pkg]
1053 for lib, lib_consumers in consumers.items():
1054 for consumer in lib_consumers:
1055 consumer_libs.setdefault(
1056 consumer.mycpv, set()).add(linkmap.getSoname(lib))
1057 unique_consumers = set(chain(*consumers.values()))
1058 unique_consumers = sorted(consumer.mycpv \
1059 for consumer in unique_consumers)
1061 msg.append(" %s pulled in by:" % (pkg.cpv,))
1062 for consumer in unique_consumers:
1063 libs = consumer_libs[consumer]
1064 msg.append(" %s needs %s" % \
1065 (consumer, ', '.join(sorted(libs))))
1067 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
1068 level=logging.WARNING, noiselevel=-1)
1070 # Add lib providers to the graph as children of lib consumers,
1071 # and also add any dependencies pulled in by the provider.
1072 writemsg_level(">>> Adding lib providers to graph...\n")
1074 for pkg, consumers in consumer_map.items():
1075 for consumer_dblink in set(chain(*consumers.values())):
1076 consumer_pkg = vardb.get(("installed", myroot,
1077 consumer_dblink.mycpv, "nomerge"))
1078 if not resolver._add_pkg(pkg,
1079 Dependency(parent=consumer_pkg,
1080 priority=UnmergeDepPriority(runtime=True),
1082 resolver.display_problems()
1083 return 1, [], False, 0
1085 writemsg_level("\nCalculating dependencies ")
1086 success = resolver._complete_graph(
1087 required_sets={myroot:required_sets})
1088 writemsg_level("\b\b... done!\n")
1089 resolver.display_problems()
1091 return 1, [], False, 0
1092 if unresolved_deps():
1093 return 1, [], False, 0
1095 graph = resolver._dynamic_config.digraph.copy()
1096 required_pkgs_total = 0
1098 if isinstance(node, Package):
1099 required_pkgs_total += 1
1100 cleanlist = create_cleanlist()
1102 return 0, [], False, required_pkgs_total
1103 clean_set = set(cleanlist)
1106 writemsg_level(">>> Calculating removal order...\n")
1107 # Use a topological sort to create an unmerge order such that
1108 # each package is unmerged before it's dependencies. This is
1109 # necessary to avoid breaking things that may need to run
1110 # during pkg_prerm or pkg_postrm phases.
1112 # Create a new graph to account for dependencies between the
1113 # packages being unmerged.
1117 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1118 runtime = UnmergeDepPriority(runtime=True)
1119 runtime_post = UnmergeDepPriority(runtime_post=True)
1120 buildtime = UnmergeDepPriority(buildtime=True)
1123 "PDEPEND": runtime_post,
1124 "DEPEND": buildtime,
1127 for node in clean_set:
1128 graph.add(node, None)
1130 for dep_type in dep_keys:
1131 depstr = node.metadata[dep_type]
1134 priority = priority_map[dep_type]
1137 writemsg_level(_unicode_decode("\nParent: %s\n") \
1138 % (node,), noiselevel=-1, level=logging.DEBUG)
1139 writemsg_level(_unicode_decode( "Depstring: %s\n") \
1140 % (depstr,), noiselevel=-1, level=logging.DEBUG)
1141 writemsg_level(_unicode_decode( "Priority: %s\n") \
1142 % (priority,), noiselevel=-1, level=logging.DEBUG)
1145 atoms = resolver._select_atoms(myroot, depstr,
1146 myuse=node.use.enabled, parent=node,
1147 priority=priority)[node]
1148 except portage.exception.InvalidDependString:
1149 # Ignore invalid deps of packages that will
1150 # be uninstalled anyway.
1154 writemsg_level("Candidates: [%s]\n" % \
1155 ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
1156 noiselevel=-1, level=logging.DEBUG)
1159 if not isinstance(atom, portage.dep.Atom):
1160 # Ignore invalid atoms returned from dep_check().
1164 matches = vardb.match_pkgs(atom)
1167 for child_node in matches:
1168 if child_node in clean_set:
1169 graph.add(child_node, node, priority=priority)
1172 writemsg_level("\nunmerge digraph:\n\n",
1173 noiselevel=-1, level=logging.DEBUG)
1175 writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
1178 if len(graph.order) == len(graph.root_nodes()):
1179 # If there are no dependencies between packages
1180 # let unmerge() group them by cat/pn.
1182 cleanlist = [pkg.cpv for pkg in graph.order]
1184 # Order nodes from lowest to highest overall reference count for
1185 # optimal root node selection (this can help minimize issues
1186 # with unaccounted implicit dependencies).
1188 for node in graph.order:
1189 node_refcounts[node] = len(graph.parent_nodes(node))
1190 def cmp_reference_count(node1, node2):
1191 return node_refcounts[node1] - node_refcounts[node2]
1192 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
1194 ignore_priority_range = [None]
1195 ignore_priority_range.extend(
1196 range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
1197 while not graph.empty():
1198 for ignore_priority in ignore_priority_range:
1199 nodes = graph.root_nodes(ignore_priority=ignore_priority)
1203 raise AssertionError("no root nodes")
1204 if ignore_priority is not None:
1205 # Some deps have been dropped due to circular dependencies,
1206 # so only pop one node in order to minimize the number that
1211 cleanlist.append(node.cpv)
1213 return 0, cleanlist, ordered, required_pkgs_total
1214 return 0, [], False, required_pkgs_total
1216 def action_deselect(settings, trees, opts, atoms):
1217 enter_invalid = '--ask-enter-invalid' in opts
1218 root_config = trees[settings['ROOT']]['root_config']
1219 world_set = root_config.sets['selected']
1220 if not hasattr(world_set, 'update'):
1221 writemsg_level("World @selected set does not appear to be mutable.\n",
1222 level=logging.ERROR, noiselevel=-1)
1225 vardb = root_config.trees['vartree'].dbapi
1226 expanded_atoms = set(atoms)
1227 from portage.dep import Atom
1229 if not atom.startswith(SETPREFIX):
1230 for cpv in vardb.match(atom):
1231 slot, = vardb.aux_get(cpv, ['SLOT'])
1234 expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
1236 pretend = '--pretend' in opts
1238 if not pretend and hasattr(world_set, 'lock'):
1242 discard_atoms = set()
1244 for atom in world_set:
1245 for arg_atom in expanded_atoms:
1246 if arg_atom.startswith(SETPREFIX):
1247 if atom.startswith(SETPREFIX) and \
1249 discard_atoms.add(atom)
1252 if not atom.startswith(SETPREFIX) and \
1253 arg_atom.intersects(atom) and \
1254 not (arg_atom.slot and not atom.slot) and \
1255 not (arg_atom.repo and not atom.repo):
1256 discard_atoms.add(atom)
1259 for atom in sorted(discard_atoms):
1261 print(">>> Would remove %s from \"world\" favorites file..." % \
1262 colorize("INFORM", str(atom)))
1264 print(">>> Removing %s from \"world\" favorites file..." % \
1265 colorize("INFORM", str(atom)))
1268 prompt = "Would you like to remove these " + \
1269 "packages from your world favorites?"
1270 if userquery(prompt, enter_invalid) == 'No':
1273 remaining = set(world_set)
1274 remaining.difference_update(discard_atoms)
1276 world_set.replace(remaining)
1278 print(">>> No matching atoms found in \"world\" favorites file...")
1284 class _info_pkgs_ver(object):
1285 def __init__(self, ver, repo_suffix, provide_suffix):
1287 self.repo_suffix = repo_suffix
1288 self.provide_suffix = provide_suffix
1290 def __lt__(self, other):
1291 return portage.versions.vercmp(self.ver, other.ver) < 0
1295 This may return unicode if repo_name contains unicode.
1296 Don't use __str__ and str() since unicode triggers compatibility
1297 issues between python 2.x and 3.x.
1299 return self.ver + self.repo_suffix + self.provide_suffix
1301 def action_info(settings, trees, myopts, myfiles):
1302 print(getportageversion(settings["PORTDIR"], settings["ROOT"],
1303 settings.profile_path, settings["CHOST"],
1304 trees[settings["ROOT"]]["vartree"].dbapi))
1306 header_title = "System Settings"
1308 print(header_width * "=")
1309 print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
1310 print(header_width * "=")
1311 print("System uname: "+platform.platform(aliased=1))
1313 lastSync = portage.grabfile(os.path.join(
1314 settings["PORTDIR"], "metadata", "timestamp.chk"))
1315 print("Timestamp of tree:", end=' ')
1321 output=subprocess_getstatusoutput("distcc --version")
1323 print(str(output[1].split("\n",1)[0]), end=' ')
1324 if "distcc" in settings.features:
1329 output=subprocess_getstatusoutput("ccache -V")
1331 print(str(output[1].split("\n",1)[0]), end=' ')
1332 if "ccache" in settings.features:
1337 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
1338 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
1339 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
1340 myvars = portage.util.unique_array(myvars)
1343 portdb = trees["/"]["porttree"].dbapi
1344 vardb = trees["/"]["vartree"].dbapi
1345 main_repo = portdb.getRepositoryName(portdb.porttree_root)
1348 if portage.isvalidatom(x):
1349 pkg_matches = vardb.match(x)
1352 for cpv in pkg_matches:
1353 ver = portage.versions.cpv_getversion(cpv)
1354 repo = vardb.aux_get(cpv, ["repository"])[0]
1355 if repo == main_repo:
1358 repo_suffix = "::<unknown repository>"
1360 repo_suffix = "::" + repo
1362 matched_cp = portage.versions.cpv_getkey(cpv)
1366 provide_suffix = " (%s)" % matched_cp
1369 _info_pkgs_ver(ver, repo_suffix, provide_suffix))
1374 versions = ", ".join(ver.toString() for ver in versions)
1375 writemsg_stdout("%-20s %s\n" % (x+":", versions),
1378 writemsg_stdout("%-20s %s\n" % (x+":", "[NOT VALID]"),
1381 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
1383 repos = portdb.settings.repositories
1384 if "--verbose" in myopts:
1385 writemsg_stdout("Repositories:\n\n")
1387 writemsg_stdout(repo.info_string())
1389 writemsg_stdout("Repositories: %s\n" % \
1390 " ".join(repo.name for repo in repos))
1392 if "--verbose" in myopts:
1393 myvars = list(settings)
1395 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
1396 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
1397 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
1398 'PORTAGE_BZIP2_COMMAND',
1399 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
1400 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
1401 'EMERGE_DEFAULT_OPTS']
1403 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
1405 myvars_ignore_defaults = {
1406 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
1409 myvars = portage.util.unique_array(myvars)
1410 use_expand = settings.get('USE_EXPAND', '').split()
1412 use_expand_hidden = set(
1413 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
1414 alphabetical_use = '--alphabetical' in myopts
1415 root_config = trees[settings["ROOT"]]['root_config']
1421 default = myvars_ignore_defaults.get(x)
1422 if default is not None and \
1423 default == settings[x]:
1425 writemsg_stdout('%s="%s"\n' % (x, settings[x]), noiselevel=-1)
1427 use = set(settings["USE"].split())
1428 for varname in use_expand:
1429 flag_prefix = varname.lower() + "_"
1431 if f.startswith(flag_prefix):
1435 print('USE="%s"' % " ".join(use), end=' ')
1436 for varname in use_expand:
1437 myval = settings.get(varname)
1439 print('%s="%s"' % (varname, myval), end=' ')
1442 unset_vars.append(x)
1444 print("Unset: "+", ".join(unset_vars))
1447 if "--debug" in myopts:
1448 for x in dir(portage):
1449 module = getattr(portage, x)
1450 if "cvs_id_string" in dir(module):
1451 print("%s: %s" % (str(x), str(module.cvs_id_string)))
1453 # See if we can find any packages installed matching the strings
1454 # passed on the command line
1456 vardb = trees[settings["ROOT"]]["vartree"].dbapi
1457 portdb = trees[settings["ROOT"]]["porttree"].dbapi
1458 bindb = trees[settings["ROOT"]]["bintree"].dbapi
1461 installed_match = vardb.match(x)
1462 for installed in installed_match:
1463 mypkgs.append((installed, "installed"))
1469 for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
1470 if pkg_type == "binary" and "--usepkg" not in myopts:
1473 matches = db.match(x)
1475 for match in matches:
1476 if pkg_type == "binary":
1477 if db.bintree.isremote(match):
1479 auxkeys = ["EAPI", "DEFINED_PHASES"]
1480 metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
1481 if metadata["EAPI"] not in ("0", "1", "2", "3") and \
1482 "info" in metadata["DEFINED_PHASES"].split():
1483 mypkgs.append((match, pkg_type))
1486 # If some packages were found...
1488 # Get our global settings (we only print stuff if it varies from
1489 # the current config)
1490 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
1491 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
1492 auxkeys.append('DEFINED_PHASES')
1494 pkgsettings = portage.config(clone=settings)
1496 # Loop through each package
1497 # Only print settings if they differ from global settings
1498 header_title = "Package Settings"
1499 print(header_width * "=")
1500 print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
1501 print(header_width * "=")
1502 from portage.output import EOutput
1504 for mypkg in mypkgs:
1507 # Get all package specific variables
1508 if pkg_type == "installed":
1509 metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
1510 elif pkg_type == "ebuild":
1511 metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
1512 elif pkg_type == "binary":
1513 metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
1515 pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
1516 installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
1517 (metadata.get(x, '') for x in Package.metadata_keys)),
1518 root_config=root_config, type_name=pkg_type)
1520 if pkg_type == "installed":
1521 print("\n%s was built with the following:" % \
1522 colorize("INFORM", str(pkg.cpv)))
1523 elif pkg_type == "ebuild":
1524 print("\n%s would be build with the following:" % \
1525 colorize("INFORM", str(pkg.cpv)))
1526 elif pkg_type == "binary":
1527 print("\n%s (non-installed binary) was built with the following:" % \
1528 colorize("INFORM", str(pkg.cpv)))
1530 pkgsettings.setcpv(pkg)
1531 forced_flags = set(chain(pkgsettings.useforce,
1532 pkgsettings.usemask))
1533 use = set(pkg.use.enabled)
1534 use.discard(pkgsettings.get('ARCH'))
1535 use_expand_flags = set()
1538 for varname in use_expand:
1539 flag_prefix = varname.lower() + "_"
1541 if f.startswith(flag_prefix):
1542 use_expand_flags.add(f)
1543 use_enabled.setdefault(
1544 varname.upper(), []).append(f[len(flag_prefix):])
1546 for f in pkg.iuse.all:
1547 if f.startswith(flag_prefix):
1548 use_expand_flags.add(f)
1550 use_disabled.setdefault(
1551 varname.upper(), []).append(f[len(flag_prefix):])
1553 var_order = set(use_enabled)
1554 var_order.update(use_disabled)
1555 var_order = sorted(var_order)
1556 var_order.insert(0, 'USE')
1557 use.difference_update(use_expand_flags)
1558 use_enabled['USE'] = list(use)
1559 use_disabled['USE'] = []
1561 for f in pkg.iuse.all:
1562 if f not in use and \
1563 f not in use_expand_flags:
1564 use_disabled['USE'].append(f)
1567 for varname in var_order:
1568 if varname in use_expand_hidden:
1571 for f in use_enabled.get(varname, []):
1572 flags.append(UseFlagDisplay(f, True, f in forced_flags))
1573 for f in use_disabled.get(varname, []):
1574 flags.append(UseFlagDisplay(f, False, f in forced_flags))
1575 if alphabetical_use:
1576 flags.sort(key=UseFlagDisplay.sort_combined)
1578 flags.sort(key=UseFlagDisplay.sort_separated)
1579 # Use _unicode_decode() to force unicode format string so
1580 # that UseFlagDisplay.__unicode__() is called in python2.
1581 flag_displays.append('%s="%s"' % (varname,
1582 ' '.join(_unicode_decode("%s") % (f,) for f in flags)))
1583 writemsg_stdout('%s\n' % ' '.join(flag_displays), noiselevel=-1)
1584 if pkg_type == "installed":
1585 for myvar in mydesiredvars:
1586 if metadata[myvar].split() != settings.get(myvar, '').split():
1587 print("%s=\"%s\"" % (myvar, metadata[myvar]))
1590 if metadata['DEFINED_PHASES']:
1591 if 'info' not in metadata['DEFINED_PHASES'].split():
1594 print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
1596 if pkg_type == "installed":
1597 ebuildpath = vardb.findname(pkg.cpv)
1598 elif pkg_type == "ebuild":
1599 ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
1600 elif pkg_type == "binary":
1601 tbz2_file = bindb.bintree.getname(pkg.cpv)
1602 ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
1603 ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
1604 tmpdir = tempfile.mkdtemp()
1605 ebuildpath = os.path.join(tmpdir, ebuild_file_name)
1606 file = open(ebuildpath, 'w')
1607 file.write(ebuild_file_contents)
1610 if not ebuildpath or not os.path.exists(ebuildpath):
1611 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
1614 if pkg_type == "installed":
1615 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1616 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1617 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
1619 elif pkg_type == "ebuild":
1620 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1621 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1622 mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
1624 elif pkg_type == "binary":
1625 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1626 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1627 mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
1629 shutil.rmtree(tmpdir)
1631 def action_metadata(settings, portdb, myopts, porttrees=None):
1632 if porttrees is None:
1633 porttrees = portdb.porttrees
1634 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
1635 old_umask = os.umask(0o002)
1636 cachedir = os.path.normpath(settings.depcachedir)
1637 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
1638 "/lib", "/opt", "/proc", "/root", "/sbin",
1639 "/sys", "/tmp", "/usr", "/var"]:
1640 print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
1641 "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
1642 print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
1644 if not os.path.exists(cachedir):
1645 os.makedirs(cachedir)
1647 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
1648 auxdbkeys = tuple(auxdbkeys)
1650 class TreeData(object):
1651 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
1652 def __init__(self, dest_db, eclass_db, path, src_db):
1653 self.dest_db = dest_db
1654 self.eclass_db = eclass_db
1656 self.src_db = src_db
1657 self.valid_nodes = set()
1660 for path in porttrees:
1661 src_db = portdb._pregen_auxdb.get(path)
1662 if src_db is None and \
1663 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
1664 src_db = portdb.metadbmodule(
1665 path, 'metadata/cache', auxdbkeys, readonly=True)
1667 src_db.ec = portdb._repo_info[path].eclass_db
1668 except AttributeError:
1671 if src_db is not None:
1672 porttrees_data.append(TreeData(portdb.auxdb[path],
1673 portdb._repo_info[path].eclass_db, path, src_db))
1675 porttrees = [tree_data.path for tree_data in porttrees_data]
1677 quiet = settings.get('TERM') == 'dumb' or \
1678 '--quiet' in myopts or \
1679 not sys.stdout.isatty()
1683 progressBar = portage.output.TermProgressBar()
1684 progressHandler = ProgressHandler()
1685 onProgress = progressHandler.onProgress
1687 progressBar.set(progressHandler.curval, progressHandler.maxval)
1688 progressHandler.display = display
1689 def sigwinch_handler(signum, frame):
1690 lines, progressBar.term_columns = \
1691 portage.output.get_term_size()
1692 signal.signal(signal.SIGWINCH, sigwinch_handler)
1694 # Temporarily override portdb.porttrees so portdb.cp_all()
1695 # will only return the relevant subset.
1696 portdb_porttrees = portdb.porttrees
1697 portdb.porttrees = porttrees
1699 cp_all = portdb.cp_all()
1701 portdb.porttrees = portdb_porttrees
1704 maxval = len(cp_all)
1705 if onProgress is not None:
1706 onProgress(maxval, curval)
1708 from portage.cache.util import quiet_mirroring
1709 from portage import eapi_is_supported, \
1710 _validate_cache_for_unsupported_eapis
1712 # TODO: Display error messages, but do not interfere with the progress bar.
1714 # 1) erase the progress bar
1715 # 2) show the error message
1716 # 3) redraw the progress bar on a new line
1717 noise = quiet_mirroring()
1720 for tree_data in porttrees_data:
1721 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
1722 tree_data.valid_nodes.add(cpv)
1724 src = tree_data.src_db[cpv]
1725 except KeyError as e:
1726 noise.missing_entry(cpv)
1729 except CacheError as ce:
1730 noise.exception(cpv, ce)
1734 eapi = src.get('EAPI')
1737 eapi = eapi.lstrip('-')
1738 eapi_supported = eapi_is_supported(eapi)
1739 if not eapi_supported:
1740 if not _validate_cache_for_unsupported_eapis:
1741 noise.misc(cpv, "unable to validate " + \
1742 "cache for EAPI='%s'" % eapi)
1747 dest = tree_data.dest_db[cpv]
1748 except (KeyError, CacheError):
1751 for d in (src, dest):
1752 if d is not None and d.get('EAPI') in ('', '0'):
1755 if dest is not None:
1756 if not (dest['_mtime_'] == src['_mtime_'] and \
1757 tree_data.eclass_db.is_eclass_data_valid(
1758 dest['_eclasses_']) and \
1759 set(dest['_eclasses_']) == set(src['_eclasses_'])):
1762 # We don't want to skip the write unless we're really
1763 # sure that the existing cache is identical, so don't
1764 # trust _mtime_ and _eclasses_ alone.
1765 for k in set(chain(src, dest)).difference(
1766 ('_mtime_', '_eclasses_')):
1767 if dest.get(k, '') != src.get(k, ''):
1771 if dest is not None:
1772 # The existing data is valid and identical,
1773 # so there's no need to overwrite it.
1777 inherited = src.get('INHERITED', '')
1778 eclasses = src.get('_eclasses_')
1779 except CacheError as ce:
1780 noise.exception(cpv, ce)
1784 if eclasses is not None:
1785 if not tree_data.eclass_db.is_eclass_data_valid(
1787 noise.eclass_stale(cpv)
1789 inherited = eclasses
1791 inherited = inherited.split()
1793 if tree_data.src_db.complete_eclass_entries and \
1795 noise.corruption(cpv, "missing _eclasses_ field")
1799 # Even if _eclasses_ already exists, replace it with data from
1800 # eclass_cache, in order to insert local eclass paths.
1802 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
1804 # INHERITED contains a non-existent eclass.
1805 noise.eclass_stale(cpv)
1808 if eclasses is None:
1809 noise.eclass_stale(cpv)
1811 src['_eclasses_'] = eclasses
1813 src['_eclasses_'] = {}
1815 if not eapi_supported:
1817 'EAPI' : '-' + eapi,
1818 '_mtime_' : src['_mtime_'],
1819 '_eclasses_' : src['_eclasses_'],
1823 tree_data.dest_db[cpv] = src
1824 except CacheError as ce:
1825 noise.exception(cpv, ce)
1829 if onProgress is not None:
1830 onProgress(maxval, curval)
1832 if onProgress is not None:
1833 onProgress(maxval, curval)
1835 for tree_data in porttrees_data:
1837 dead_nodes = set(tree_data.dest_db)
1838 except CacheError as e:
1839 writemsg_level("Error listing cache entries for " + \
1840 "'%s': %s, continuing...\n" % (tree_data.path, e),
1841 level=logging.ERROR, noiselevel=-1)
1844 dead_nodes.difference_update(tree_data.valid_nodes)
1845 for cpv in dead_nodes:
1847 del tree_data.dest_db[cpv]
1848 except (KeyError, CacheError):
1852 # make sure the final progress is displayed
1853 progressHandler.display()
1855 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
1860 def action_regen(settings, portdb, max_jobs, max_load):
1861 xterm_titles = "notitles" not in settings.features
1862 emergelog(xterm_titles, " === regen")
1863 #regenerate cache entries
1865 os.close(sys.stdin.fileno())
1866 except SystemExit as e:
1867 raise # Needed else can't exit
1872 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
1875 portage.writemsg_stdout("done!\n")
1876 return regen.returncode
1878 def action_search(root_config, myopts, myfiles, spinner):
1880 print("emerge: no search terms provided.")
1882 searchinstance = search(root_config,
1883 spinner, "--searchdesc" in myopts,
1884 "--quiet" not in myopts, "--usepkg" in myopts,
1885 "--usepkgonly" in myopts)
1886 for mysearch in myfiles:
1888 searchinstance.execute(mysearch)
1889 except re.error as comment:
1890 print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
1892 searchinstance.output()
1894 def action_sync(settings, trees, mtimedb, myopts, myaction):
1895 enter_invalid = '--ask-enter-invalid' in myopts
1896 xterm_titles = "notitles" not in settings.features
1897 emergelog(xterm_titles, " === sync")
1898 portdb = trees[settings["ROOT"]]["porttree"].dbapi
1899 myportdir = portdb.porttree_root
1900 out = portage.output.EOutput()
1901 global_config_path = GLOBAL_CONFIG_PATH
1902 if settings['EPREFIX']:
1903 global_config_path = os.path.join(settings['EPREFIX'],
1904 GLOBAL_CONFIG_PATH.lstrip(os.sep))
1906 sys.stderr.write("!!! PORTDIR is undefined. " + \
1907 "Is %s/make.globals missing?\n" % global_config_path)
1909 if myportdir[-1]=="/":
1910 myportdir=myportdir[:-1]
1912 st = os.stat(myportdir)
1916 print(">>>",myportdir,"not found, creating it.")
1917 os.makedirs(myportdir,0o755)
1918 st = os.stat(myportdir)
1922 spawn_kwargs["env"] = settings.environ()
1923 if 'usersync' in settings.features and \
1924 portage.data.secpass >= 2 and \
1925 (st.st_uid != os.getuid() and st.st_mode & 0o700 or \
1926 st.st_gid != os.getgid() and st.st_mode & 0o070):
1928 homedir = pwd.getpwuid(st.st_uid).pw_dir
1932 # Drop privileges when syncing, in order to match
1933 # existing uid/gid settings.
1934 usersync_uid = st.st_uid
1935 spawn_kwargs["uid"] = st.st_uid
1936 spawn_kwargs["gid"] = st.st_gid
1937 spawn_kwargs["groups"] = [st.st_gid]
1938 spawn_kwargs["env"]["HOME"] = homedir
1940 if not st.st_mode & 0o020:
1941 umask = umask | 0o020
1942 spawn_kwargs["umask"] = umask
1944 if usersync_uid is not None:
1945 # PORTAGE_TMPDIR is used below, so validate it and
1946 # bail out if necessary.
1947 rval = _check_temp_dir(settings)
1948 if rval != os.EX_OK:
1951 syncuri = settings.get("SYNC", "").strip()
1953 writemsg_level("!!! SYNC is undefined. " + \
1954 "Is %s/make.globals missing?\n" % global_config_path,
1955 noiselevel=-1, level=logging.ERROR)
1958 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
1959 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
1963 updatecache_flg = False
1964 if myaction == "metadata":
1965 print("skipping sync")
1966 updatecache_flg = True
1967 elif ".git" in vcs_dirs:
1968 # Update existing git repository, and ignore the syncuri. We are
1969 # going to trust the user and assume that the user is in the branch
1970 # that he/she wants updated. We'll let the user manage branches with
1972 if portage.process.find_binary("git") is None:
1973 msg = ["Command not found: git",
1974 "Type \"emerge dev-util/git\" to enable git support."]
1976 writemsg_level("!!! %s\n" % l,
1977 level=logging.ERROR, noiselevel=-1)
1979 msg = ">>> Starting git pull in %s..." % myportdir
1980 emergelog(xterm_titles, msg )
1981 writemsg_level(msg + "\n")
1982 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
1983 (portage._shell_quote(myportdir),), **spawn_kwargs)
1984 if exitcode != os.EX_OK:
1985 msg = "!!! git pull error in %s." % myportdir
1986 emergelog(xterm_titles, msg)
1987 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
1989 msg = ">>> Git pull in %s successful" % myportdir
1990 emergelog(xterm_titles, msg)
1991 writemsg_level(msg + "\n")
1992 exitcode = git_sync_timestamps(settings, myportdir)
1993 if exitcode == os.EX_OK:
1994 updatecache_flg = True
1995 elif syncuri[:8]=="rsync://":
1996 for vcs_dir in vcs_dirs:
1997 writemsg_level(("!!! %s appears to be under revision " + \
1998 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
1999 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
2001 if not os.path.exists("/usr/bin/rsync"):
2002 print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
2003 print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
2008 if settings["PORTAGE_RSYNC_OPTS"] == "":
2009 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
2011 "--recursive", # Recurse directories
2012 "--links", # Consider symlinks
2013 "--safe-links", # Ignore links outside of tree
2014 "--perms", # Preserve permissions
2015 "--times", # Preserive mod times
2016 "--compress", # Compress the data transmitted
2017 "--force", # Force deletion on non-empty dirs
2018 "--whole-file", # Don't do block transfers, only entire files
2019 "--delete", # Delete files that aren't in the master tree
2020 "--stats", # Show final statistics about what was transfered
2021 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
2022 "--exclude=/distfiles", # Exclude distfiles from consideration
2023 "--exclude=/local", # Exclude local from consideration
2024 "--exclude=/packages", # Exclude packages from consideration
2028 # The below validation is not needed when using the above hardcoded
2031 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
2032 rsync_opts.extend(portage.util.shlex_split(
2033 settings.get("PORTAGE_RSYNC_OPTS", "")))
2034 for opt in ("--recursive", "--times"):
2035 if opt not in rsync_opts:
2036 portage.writemsg(yellow("WARNING:") + " adding required option " + \
2037 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
2038 rsync_opts.append(opt)
2040 for exclude in ("distfiles", "local", "packages"):
2041 opt = "--exclude=/%s" % exclude
2042 if opt not in rsync_opts:
2043 portage.writemsg(yellow("WARNING:") + \
2044 " adding required option %s not included in " % opt + \
2045 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
2046 rsync_opts.append(opt)
2048 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
2049 def rsync_opt_startswith(opt_prefix):
2050 for x in rsync_opts:
2051 if x.startswith(opt_prefix):
2055 if not rsync_opt_startswith("--timeout="):
2056 rsync_opts.append("--timeout=%d" % mytimeout)
2058 for opt in ("--compress", "--whole-file"):
2059 if opt not in rsync_opts:
2060 portage.writemsg(yellow("WARNING:") + " adding required option " + \
2061 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
2062 rsync_opts.append(opt)
2064 if "--quiet" in myopts:
2065 rsync_opts.append("--quiet") # Shut up a lot
2067 rsync_opts.append("--verbose") # Print filelist
2069 if "--verbose" in myopts:
2070 rsync_opts.append("--progress") # Progress meter for each file
2072 if "--debug" in myopts:
2073 rsync_opts.append("--checksum") # Force checksum on all files
2075 # Real local timestamp file.
2076 servertimestampfile = os.path.join(
2077 myportdir, "metadata", "timestamp.chk")
2079 content = portage.util.grabfile(servertimestampfile)
2083 mytimestamp = time.mktime(time.strptime(content[0],
2084 "%a, %d %b %Y %H:%M:%S +0000"))
2085 except (OverflowError, ValueError):
2090 rsync_initial_timeout = \
2091 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
2093 rsync_initial_timeout = 15
2096 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
2097 except SystemExit as e:
2098 raise # Needed else can't exit
2100 maxretries = -1 #default number of retries
2103 user_name, hostname, port = re.split(
2104 "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
2107 if user_name is None:
2109 updatecache_flg=True
2110 all_rsync_opts = set(rsync_opts)
2111 extra_rsync_opts = portage.util.shlex_split(
2112 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
2113 all_rsync_opts.update(extra_rsync_opts)
2115 family = socket.AF_UNSPEC
2116 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
2117 family = socket.AF_INET
2118 elif socket.has_ipv6 and \
2119 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
2120 family = socket.AF_INET6
2126 addrinfos = getaddrinfo_validate(
2127 socket.getaddrinfo(hostname, None,
2128 family, socket.SOCK_STREAM))
2129 except socket.error as e:
2131 "!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
2132 noiselevel=-1, level=logging.ERROR)
2136 AF_INET = socket.AF_INET
2139 AF_INET6 = socket.AF_INET6
2144 for addrinfo in addrinfos:
2145 if addrinfo[0] == AF_INET:
2146 ips_v4.append("%s" % addrinfo[4][0])
2147 elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
2148 # IPv6 addresses need to be enclosed in square brackets
2149 ips_v6.append("[%s]" % addrinfo[4][0])
2151 random.shuffle(ips_v4)
2152 random.shuffle(ips_v6)
2154 # Give priority to the address family that
2155 # getaddrinfo() returned first.
2156 if AF_INET6 is not None and addrinfos and \
2157 addrinfos[0][0] == AF_INET6:
2158 ips = ips_v6 + ips_v4
2160 ips = ips_v4 + ips_v6
2163 uris.append(syncuri.replace(
2164 "//" + user_name + hostname + port + "/",
2165 "//" + user_name + ip + port + "/", 1))
2168 # With some configurations we need to use the plain hostname
2169 # rather than try to resolve the ip addresses (bug #340817).
2170 uris.append(syncuri)
2172 # reverse, for use with pop()
2175 effective_maxretries = maxretries
2176 if effective_maxretries < 0:
2177 effective_maxretries = len(uris) - 1
2179 SERVER_OUT_OF_DATE = -1
2180 EXCEEDED_MAX_RETRIES = -2
2183 dosyncuri = uris.pop()
2185 writemsg("!!! Exhausted addresses for %s\n" % \
2186 hostname, noiselevel=-1)
2190 if "--ask" in myopts:
2191 if userquery("Do you want to sync your Portage tree " + \
2192 "with the mirror at\n" + blue(dosyncuri) + bold("?"),
2193 enter_invalid) == "No":
2198 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
2199 if "--quiet" not in myopts:
2200 print(">>> Starting rsync with "+dosyncuri+"...")
2202 emergelog(xterm_titles,
2203 ">>> Starting retry %d of %d with %s" % \
2204 (retries, effective_maxretries, dosyncuri))
2206 "\n\n>>> Starting retry %d of %d with %s\n" % \
2207 (retries, effective_maxretries, dosyncuri), noiselevel=-1)
2209 if mytimestamp != 0 and "--quiet" not in myopts:
2210 print(">>> Checking server timestamp ...")
2212 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
2214 if "--debug" in myopts:
2219 # Even if there's no timestamp available locally, fetch the
2220 # timestamp anyway as an initial probe to verify that the server is
2221 # responsive. This protects us from hanging indefinitely on a
2222 # connection attempt to an unresponsive server which rsync's
2223 # --timeout option does not prevent.
2225 # Temporary file for remote server timestamp comparison.
2226 # NOTE: If FEATURES=usersync is enabled then the tempfile
2227 # needs to be in a directory that's readable by the usersync
2228 # user. We assume that PORTAGE_TMPDIR will satisfy this
2229 # requirement, since that's not necessarily true for the
2230 # default directory used by the tempfile module.
2231 if usersync_uid is not None:
2232 tmpdir = settings['PORTAGE_TMPDIR']
2234 # use default dir from tempfile module
2236 fd, tmpservertimestampfile = \
2237 tempfile.mkstemp(dir=tmpdir)
2239 if usersync_uid is not None:
2240 portage.util.apply_permissions(tmpservertimestampfile,
2242 mycommand = rsynccommand[:]
2243 mycommand.append(dosyncuri.rstrip("/") + \
2244 "/metadata/timestamp.chk")
2245 mycommand.append(tmpservertimestampfile)
2249 # Timeout here in case the server is unresponsive. The
2250 # --timeout rsync option doesn't apply to the initial
2251 # connection attempt.
2253 if rsync_initial_timeout:
2254 portage.exception.AlarmSignal.register(
2255 rsync_initial_timeout)
2257 mypids.extend(portage.process.spawn(
2258 mycommand, returnpid=True, **spawn_kwargs))
2259 exitcode = os.waitpid(mypids[0], 0)[1]
2260 if usersync_uid is not None:
2261 portage.util.apply_permissions(tmpservertimestampfile,
2263 content = portage.grabfile(tmpservertimestampfile)
2265 if rsync_initial_timeout:
2266 portage.exception.AlarmSignal.unregister()
2268 os.unlink(tmpservertimestampfile)
2271 except portage.exception.AlarmSignal:
2274 # With waitpid and WNOHANG, only check the
2275 # first element of the tuple since the second
2276 # element may vary (bug #337465).
2277 if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
2278 os.kill(mypids[0], signal.SIGTERM)
2279 os.waitpid(mypids[0], 0)
2280 # This is the same code rsync uses for timeout.
2283 if exitcode != os.EX_OK:
2285 exitcode = (exitcode & 0xff) << 8
2287 exitcode = exitcode >> 8
2289 portage.process.spawned_pids.remove(mypids[0])
2292 servertimestamp = time.mktime(time.strptime(
2293 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
2294 except (OverflowError, ValueError):
2296 del mycommand, mypids, content
2297 if exitcode == os.EX_OK:
2298 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
2299 emergelog(xterm_titles,
2300 ">>> Cancelling sync -- Already current.")
2303 print(">>> Timestamps on the server and in the local repository are the same.")
2304 print(">>> Cancelling all further sync action. You are already up to date.")
2306 print(">>> In order to force sync, remove '%s'." % servertimestampfile)
2310 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
2311 emergelog(xterm_titles,
2312 ">>> Server out of date: %s" % dosyncuri)
2315 print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
2317 print(">>> In order to force sync, remove '%s'." % servertimestampfile)
2320 exitcode = SERVER_OUT_OF_DATE
2321 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
2323 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
2324 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
2325 if exitcode in [0,1,3,4,11,14,20,21]:
2327 elif exitcode in [1,3,4,11,14,20,21]:
2330 # Code 2 indicates protocol incompatibility, which is expected
2331 # for servers with protocol < 29 that don't support
2332 # --prune-empty-directories. Retry for a server that supports
2333 # at least rsync protocol version 29 (>=rsync-2.6.4).
2338 if maxretries < 0 or retries <= maxretries:
2339 print(">>> Retrying...")
2343 updatecache_flg=False
2344 exitcode = EXCEEDED_MAX_RETRIES
2348 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
2349 elif exitcode == SERVER_OUT_OF_DATE:
2351 elif exitcode == EXCEEDED_MAX_RETRIES:
2353 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
2358 msg.append("Rsync has reported that there is a syntax error. Please ensure")
2359 msg.append("that your SYNC statement is proper.")
2360 msg.append("SYNC=" + settings["SYNC"])
2362 msg.append("Rsync has reported that there is a File IO error. Normally")
2363 msg.append("this means your disk is full, but can be caused by corruption")
2364 msg.append("on the filesystem that contains PORTDIR. Please investigate")
2365 msg.append("and try again after the problem has been fixed.")
2366 msg.append("PORTDIR=" + settings["PORTDIR"])
2368 msg.append("Rsync was killed before it finished.")
2370 msg.append("Rsync has not successfully finished. It is recommended that you keep")
2371 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
2372 msg.append("to use rsync due to firewall or other restrictions. This should be a")
2373 msg.append("temporary problem unless complications exist with your network")
2374 msg.append("(and possibly your system's filesystem) configuration.")
2378 elif syncuri[:6]=="cvs://":
2379 if not os.path.exists("/usr/bin/cvs"):
2380 print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
2381 print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
2384 cvsdir=os.path.dirname(myportdir)
2385 if not os.path.exists(myportdir+"/CVS"):
2387 print(">>> Starting initial cvs checkout with "+syncuri+"...")
2388 if os.path.exists(cvsdir+"/gentoo-x86"):
2389 print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
2393 except OSError as e:
2394 if e.errno != errno.ENOENT:
2396 "!!! existing '%s' directory; exiting.\n" % myportdir)
2399 if portage.spawn_bash("cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
2400 (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
2402 print("!!! cvs checkout error; exiting.")
2404 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
2407 print(">>> Starting cvs update with "+syncuri+"...")
2408 retval = portage.process.spawn_bash(
2409 "cd %s; exec cvs -z0 -q update -dP" % \
2410 (portage._shell_quote(myportdir),), **spawn_kwargs)
2411 if retval != os.EX_OK:
2415 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
2416 noiselevel=-1, level=logging.ERROR)
2419 if updatecache_flg and \
2420 myaction != "metadata" and \
2421 "metadata-transfer" not in settings.features:
2422 updatecache_flg = False
2424 # Reload the whole config from scratch.
2425 settings, trees, mtimedb = load_emerge_config(trees=trees)
2426 adjust_configs(myopts, trees)
2427 root_config = trees[settings["ROOT"]]["root_config"]
2428 portdb = trees[settings["ROOT"]]["porttree"].dbapi
2430 if updatecache_flg and \
2431 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
2433 # Only update cache for myportdir since that's
2434 # the only one that's been synced here.
2435 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
2437 if myopts.get('--package-moves') != 'n' and \
2438 _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
2440 # Reload the whole config from scratch.
2441 settings, trees, mtimedb = load_emerge_config(trees=trees)
2442 adjust_configs(myopts, trees)
2443 portdb = trees[settings["ROOT"]]["porttree"].dbapi
2444 root_config = trees[settings["ROOT"]]["root_config"]
2446 mybestpv = portdb.xmatch("bestmatch-visible",
2447 portage.const.PORTAGE_PACKAGE_ATOM)
2448 mypvs = portage.best(
2449 trees[settings["ROOT"]]["vartree"].dbapi.match(
2450 portage.const.PORTAGE_PACKAGE_ATOM))
2452 chk_updated_cfg_files(settings["EROOT"],
2453 portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
2455 if myaction != "metadata":
2456 postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
2457 portage.USER_CONFIG_PATH, "bin", "post_sync")
2458 if os.access(postsync, os.X_OK):
2459 retval = portage.process.spawn(
2460 [postsync, dosyncuri], env=settings.environ())
2461 if retval != os.EX_OK:
2462 print(red(" * ") + bold("spawn failed of " + postsync))
2464 if(mybestpv != mypvs) and not "--quiet" in myopts:
2466 print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
2467 print(red(" * ")+"that you update portage now, before any other packages are updated.")
2469 print(red(" * ")+"To update portage, run 'emerge portage' now.")
2472 display_news_notification(root_config, myopts)
2475 def action_uninstall(settings, trees, ldpath_mtimes,
2476 opts, action, files, spinner):
2478 # For backward compat, some actions do not require leading '='.
2479 ignore_missing_eq = action in ('clean', 'unmerge')
2480 root = settings['ROOT']
2481 vardb = trees[root]['vartree'].dbapi
2485 # Ensure atoms are valid before calling unmerge().
2486 # For backward compat, leading '=' is not required.
2488 if is_valid_package_atom(x, allow_repo=True) or \
2489 (ignore_missing_eq and is_valid_package_atom('=' + x)):
2493 dep_expand(x, mydb=vardb, settings=settings))
2494 except portage.exception.AmbiguousPackageName as e:
2495 msg = "The short ebuild name \"" + x + \
2496 "\" is ambiguous. Please specify " + \
2497 "one of the following " + \
2498 "fully-qualified ebuild names instead:"
2499 for line in textwrap.wrap(msg, 70):
2500 writemsg_level("!!! %s\n" % (line,),
2501 level=logging.ERROR, noiselevel=-1)
2503 writemsg_level(" %s\n" % colorize("INFORM", i),
2504 level=logging.ERROR, noiselevel=-1)
2505 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
2508 elif x.startswith(os.sep):
2509 if not x.startswith(root):
2510 writemsg_level(("!!! '%s' does not start with" + \
2511 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
2513 # Queue these up since it's most efficient to handle
2514 # multiple files in a single iter_owners() call.
2515 lookup_owners.append(x)
2517 elif x.startswith(SETPREFIX) and action == "deselect":
2518 valid_atoms.append(x)
2522 msg.append("'%s' is not a valid package atom." % (x,))
2523 msg.append("Please check ebuild(5) for full details.")
2524 writemsg_level("".join("!!! %s\n" % line for line in msg),
2525 level=logging.ERROR, noiselevel=-1)
2530 search_for_multiple = False
2531 if len(lookup_owners) > 1:
2532 search_for_multiple = True
2534 for x in lookup_owners:
2535 if not search_for_multiple and os.path.isdir(x):
2536 search_for_multiple = True
2537 relative_paths.append(x[len(root)-1:])
2540 for pkg, relative_path in \
2541 vardb._owners.iter_owners(relative_paths):
2542 owners.add(pkg.mycpv)
2543 if not search_for_multiple:
2548 slot = vardb.aux_get(cpv, ['SLOT'])[0]
2550 # portage now masks packages with missing slot, but it's
2551 # possible that one was installed by an older version
2552 atom = portage.cpv_getkey(cpv)
2554 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
2555 valid_atoms.append(portage.dep.Atom(atom))
2557 writemsg_level(("!!! '%s' is not claimed " + \
2558 "by any package.\n") % lookup_owners[0],
2559 level=logging.WARNING, noiselevel=-1)
2561 if files and not valid_atoms:
2564 if action == 'unmerge' and \
2565 '--quiet' not in opts and \
2566 '--quiet-unmerge-warn' not in opts:
2567 msg = "This action can remove important packages! " + \
2568 "In order to be safer, use " + \
2569 "`emerge -pv --depclean <atom>` to check for " + \
2570 "reverse dependencies before removing packages."
2571 out = portage.output.EOutput()
2572 for line in textwrap.wrap(msg, 72):
2575 if action == 'deselect':
2576 return action_deselect(settings, trees, opts, valid_atoms)
2578 # Create a Scheduler for calls to unmerge(), in order to cause
2579 # redirection of ebuild phase output to logs as required for
2580 # options such as --quiet.
2581 sched = Scheduler(settings, trees, None, opts,
2583 sched._background = sched._background_mode()
2584 sched._status_display.quiet = True
2586 if action in ('clean', 'unmerge') or \
2587 (action == 'prune' and "--nodeps" in opts):
2588 # When given a list of atoms, unmerge them in the order given.
2589 ordered = action == 'unmerge'
2590 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
2591 valid_atoms, ldpath_mtimes, ordered=ordered,
2592 scheduler=sched._sched_iface)
2595 rval = action_depclean(settings, trees, ldpath_mtimes,
2596 opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
2600 def adjust_configs(myopts, trees):
2601 for myroot in trees:
2602 mysettings = trees[myroot]["vartree"].settings
2604 adjust_config(myopts, mysettings)
2607 def adjust_config(myopts, settings):
2608 """Make emerge specific adjustments to the config."""
2610 # Kill noauto as it will break merges otherwise.
2611 if "noauto" in settings.features:
2612 settings.features.remove('noauto')
2614 fail_clean = myopts.get('--fail-clean')
2615 if fail_clean is not None:
2616 if fail_clean is True and \
2617 'fail-clean' not in settings.features:
2618 settings.features.add('fail-clean')
2619 elif fail_clean == 'n' and \
2620 'fail-clean' in settings.features:
2621 settings.features.remove('fail-clean')
2625 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
2626 except ValueError as e:
2627 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2628 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
2629 settings["CLEAN_DELAY"], noiselevel=-1)
2630 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
2631 settings.backup_changes("CLEAN_DELAY")
2633 EMERGE_WARNING_DELAY = 10
2635 EMERGE_WARNING_DELAY = int(settings.get(
2636 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
2637 except ValueError as e:
2638 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2639 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
2640 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
2641 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
2642 settings.backup_changes("EMERGE_WARNING_DELAY")
2644 if "--quiet" in myopts or "--quiet-build" in myopts:
2645 settings["PORTAGE_QUIET"]="1"
2646 settings.backup_changes("PORTAGE_QUIET")
2648 if "--verbose" in myopts:
2649 settings["PORTAGE_VERBOSE"] = "1"
2650 settings.backup_changes("PORTAGE_VERBOSE")
2652 # Set so that configs will be merged regardless of remembered status
2653 if ("--noconfmem" in myopts):
2654 settings["NOCONFMEM"]="1"
2655 settings.backup_changes("NOCONFMEM")
2657 # Set various debug markers... They should be merged somehow.
2660 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
2661 if PORTAGE_DEBUG not in (0, 1):
2662 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
2663 PORTAGE_DEBUG, noiselevel=-1)
2664 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
2667 except ValueError as e:
2668 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2669 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
2670 settings["PORTAGE_DEBUG"], noiselevel=-1)
2672 if "--debug" in myopts:
2674 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
2675 settings.backup_changes("PORTAGE_DEBUG")
2677 if settings.get("NOCOLOR") not in ("yes","true"):
2678 portage.output.havecolor = 1
2680 """The explicit --color < y | n > option overrides the NOCOLOR environment
2681 variable and stdout auto-detection."""
2682 if "--color" in myopts:
2683 if "y" == myopts["--color"]:
2684 portage.output.havecolor = 1
2685 settings["NOCOLOR"] = "false"
2687 portage.output.havecolor = 0
2688 settings["NOCOLOR"] = "true"
2689 settings.backup_changes("NOCOLOR")
2690 elif settings.get('TERM') == 'dumb' or \
2691 not sys.stdout.isatty():
2692 portage.output.havecolor = 0
2693 settings["NOCOLOR"] = "true"
2694 settings.backup_changes("NOCOLOR")
2696 def display_missing_pkg_set(root_config, set_name):
2699 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
2700 "The following sets exist:") % \
2701 colorize("INFORM", set_name))
2704 for s in sorted(root_config.sets):
2705 msg.append(" %s" % s)
2708 writemsg_level("".join("%s\n" % l for l in msg),
2709 level=logging.ERROR, noiselevel=-1)
2711 def relative_profile_path(portdir, abs_profile):
2712 realpath = os.path.realpath(abs_profile)
2713 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
2714 if realpath.startswith(basepath):
2715 profilever = realpath[1 + len(basepath):]
2720 def getportageversion(portdir, target_root, profile, chost, vardb):
2723 profilever = relative_profile_path(portdir, profile)
2724 if profilever is None:
2726 for parent in portage.grabfile(
2727 os.path.join(profile, 'parent')):
2728 profilever = relative_profile_path(portdir,
2729 os.path.join(profile, parent))
2730 if profilever is not None:
2732 except portage.exception.PortageException:
2735 if profilever is None:
2737 profilever = "!" + os.readlink(profile)
2741 if profilever is None:
2742 profilever = "unavailable"
2745 libclist = vardb.match("virtual/libc")
2746 libclist += vardb.match("virtual/glibc")
2747 libclist = portage.util.unique_array(libclist)
2749 xs=portage.catpkgsplit(x)
2751 libcver+=","+"-".join(xs[1:])
2753 libcver="-".join(xs[1:])
2755 libcver="unavailable"
2757 gccver = getgccversion(chost)
2758 unameout=platform.release()+" "+platform.machine()
2760 return "Portage %s (%s, %s, %s, %s)" % \
2761 (portage.VERSION, profilever, gccver, libcver, unameout)
2763 def git_sync_timestamps(settings, portdir):
2765 Since git doesn't preserve timestamps, synchronize timestamps between
2766 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
2767 for a given file as long as the file in the working tree is not modified
2770 cache_dir = os.path.join(portdir, "metadata", "cache")
2771 if not os.path.isdir(cache_dir):
2773 writemsg_level(">>> Synchronizing timestamps...\n")
2775 from portage.cache.cache_errors import CacheError
2777 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
2778 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
2779 except CacheError as e:
2780 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
2781 level=logging.ERROR, noiselevel=-1)
2784 ec_dir = os.path.join(portdir, "eclass")
2786 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
2787 if f.endswith(".eclass"))
2788 except OSError as e:
2789 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
2790 level=logging.ERROR, noiselevel=-1)
2793 args = [portage.const.BASH_BINARY, "-c",
2794 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
2795 portage._shell_quote(portdir)]
2797 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
2798 modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
2800 if rval != os.EX_OK:
2803 modified_eclasses = set(ec for ec in ec_names \
2804 if os.path.join("eclass", ec + ".eclass") in modified_files)
2806 updated_ec_mtimes = {}
2808 for cpv in cache_db:
2809 cpv_split = portage.catpkgsplit(cpv)
2810 if cpv_split is None:
2811 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
2812 level=logging.ERROR, noiselevel=-1)
2815 cat, pn, ver, rev = cpv_split
2816 cat, pf = portage.catsplit(cpv)
2817 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
2818 if relative_eb_path in modified_files:
2822 cache_entry = cache_db[cpv]
2823 eb_mtime = cache_entry.get("_mtime_")
2824 ec_mtimes = cache_entry.get("_eclasses_")
2826 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
2827 level=logging.ERROR, noiselevel=-1)
2829 except CacheError as e:
2830 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
2831 (cpv, e), level=logging.ERROR, noiselevel=-1)
2834 if eb_mtime is None:
2835 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
2836 level=logging.ERROR, noiselevel=-1)
2840 eb_mtime = long(eb_mtime)
2842 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
2843 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
2846 if ec_mtimes is None:
2847 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
2848 level=logging.ERROR, noiselevel=-1)
2851 if modified_eclasses.intersection(ec_mtimes):
2854 missing_eclasses = set(ec_mtimes).difference(ec_names)
2855 if missing_eclasses:
2856 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
2857 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
2861 eb_path = os.path.join(portdir, relative_eb_path)
2863 current_eb_mtime = os.stat(eb_path)
2865 writemsg_level("!!! Missing ebuild: %s\n" % \
2866 (cpv,), level=logging.ERROR, noiselevel=-1)
2869 inconsistent = False
2870 for ec, (ec_path, ec_mtime) in ec_mtimes.items():
2871 updated_mtime = updated_ec_mtimes.get(ec)
2872 if updated_mtime is not None and updated_mtime != ec_mtime:
2873 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
2874 (cpv, ec), level=logging.ERROR, noiselevel=-1)
2881 if current_eb_mtime != eb_mtime:
2882 os.utime(eb_path, (eb_mtime, eb_mtime))
2884 for ec, (ec_path, ec_mtime) in ec_mtimes.items():
2885 if ec in updated_ec_mtimes:
2887 ec_path = os.path.join(ec_dir, ec + ".eclass")
2888 current_mtime = os.stat(ec_path)[stat.ST_MTIME]
2889 if current_mtime != ec_mtime:
2890 os.utime(ec_path, (ec_mtime, ec_mtime))
2891 updated_ec_mtimes[ec] = ec_mtime
2895 def load_emerge_config(trees=None):
2897 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
2898 v = os.environ.get(envvar, None)
2901 trees = portage.create_trees(trees=trees, **kwargs)
2903 for root, root_trees in trees.items():
2904 settings = root_trees["vartree"].settings
2905 settings._init_dirs()
2906 setconfig = load_default_config(settings, root_trees)
2907 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
2909 settings = trees["/"]["vartree"].settings
2911 for myroot in trees:
2913 settings = trees[myroot]["vartree"].settings
2916 mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
2917 mtimedb = portage.MtimeDB(mtimedbfile)
2918 portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
2919 QueryCommand._db = trees
2920 return settings, trees, mtimedb
2922 def chk_updated_cfg_files(eroot, config_protect):
2925 portage.util.find_updated_config_files(target_root, config_protect))
2928 print("\n"+colorize("WARN", " * IMPORTANT:"), end=' ')
2929 if not x[1]: # it's a protected file
2930 print("config file '%s' needs updating." % x[0])
2931 else: # it's a protected dir
2932 print("%d config files in '%s' need updating." % (len(x[1]), x[0]))
2935 print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
2936 + " section of the " + bold("emerge"))
2937 print(" "+yellow("*")+" man page to learn how to update config files.")
2939 def display_news_notification(root_config, myopts):
2940 target_root = root_config.settings['EROOT']
2941 trees = root_config.trees
2942 settings = trees["vartree"].settings
2943 portdb = trees["porttree"].dbapi
2944 vardb = trees["vartree"].dbapi
2945 NEWS_PATH = os.path.join("metadata", "news")
2946 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
2947 newsReaderDisplay = False
2948 update = "--pretend" not in myopts
2949 if "news" not in settings.features:
2952 # Populate these using our existing vartree, to avoid
2953 # having a temporary one instantiated.
2954 settings._populate_treeVirtuals_if_needed(trees["vartree"])
2956 for repo in portdb.getRepositories():
2957 unreadItems = checkUpdatedNewsItems(
2958 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
2960 if not newsReaderDisplay:
2961 newsReaderDisplay = True
2963 print(colorize("WARN", " * IMPORTANT:"), end=' ')
2964 print("%s news items need reading for repository '%s'." % (unreadItems, repo))
2967 if newsReaderDisplay:
2968 print(colorize("WARN", " *"), end=' ')
2969 print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
2972 def getgccversion(chost):
2975 return: the current in-use gcc version
2978 gcc_ver_command = 'gcc -dumpversion'
2979 gcc_ver_prefix = 'gcc-'
2981 gcc_not_found_error = red(
2982 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
2983 "!!! to update the environment of this terminal and possibly\n" +
2984 "!!! other terminals also.\n"
2987 mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
2988 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
2989 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
2991 mystatus, myoutput = subprocess_getstatusoutput(
2992 chost + "-" + gcc_ver_command)
2993 if mystatus == os.EX_OK:
2994 return gcc_ver_prefix + myoutput
2996 mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
2997 if mystatus == os.EX_OK:
2998 return gcc_ver_prefix + myoutput
3000 portage.writemsg(gcc_not_found_error, noiselevel=-1)
3001 return "[unavailable]"
3003 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
3006 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
3007 Returns the number of unread (yet relevent) items.
3009 @param portdb: a portage tree database
3010 @type portdb: pordbapi
3011 @param vardb: an installed package database
3012 @type vardb: vardbapi
3021 1. The number of unread but relevant news items.
3024 from portage.news import NewsManager
3025 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
3026 return manager.getUnreadItems( repo_id, update=update )