1 # Copyright 1999-2011 Gentoo Foundation
2 # Distributed under the terms of the GNU General Public License v2
4 from __future__ import print_function
7 from subprocess import getstatusoutput as subprocess_getstatusoutput
9 from commands import getstatusoutput as subprocess_getstatusoutput
24 from itertools import chain
27 from portage import os
28 from portage import digraph
29 from portage import _unicode_decode
30 from portage.cache.cache_errors import CacheError
31 from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
32 from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
33 from portage.dbapi.dep_expand import dep_expand
34 from portage.dbapi._expand_new_virt import expand_new_virt
35 from portage.dep import Atom, extended_cp_match
36 from portage.exception import InvalidAtom
37 from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
39 good = create_color_func("GOOD")
40 bad = create_color_func("BAD")
41 from portage.package.ebuild._ipc.QueryCommand import QueryCommand
42 from portage.package.ebuild.doebuild import _check_temp_dir
43 from portage._sets import load_default_config, SETPREFIX
44 from portage._sets.base import InternalPackageSet
45 from portage.util import cmp_sort_key, writemsg, \
46 writemsg_level, writemsg_stdout
47 from portage._global_updates import _global_updates
49 from _emerge.clear_caches import clear_caches
50 from _emerge.countdown import countdown
51 from _emerge.create_depgraph_params import create_depgraph_params
52 from _emerge.Dependency import Dependency
53 from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
54 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
55 from _emerge.emergelog import emergelog
56 from _emerge.is_valid_package_atom import is_valid_package_atom
57 from _emerge.MetadataRegen import MetadataRegen
58 from _emerge.Package import Package
59 from _emerge.ProgressHandler import ProgressHandler
60 from _emerge.RootConfig import RootConfig
61 from _emerge.Scheduler import Scheduler
62 from _emerge.search import search
63 from _emerge.SetArg import SetArg
64 from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
65 from _emerge.sync.getaddrinfo_validate import getaddrinfo_validate
66 from _emerge.sync.old_tree_timestamp import old_tree_timestamp_warn
67 from _emerge.unmerge import unmerge
68 from _emerge.UnmergeDepPriority import UnmergeDepPriority
69 from _emerge.UseFlagDisplay import pkg_use_display
70 from _emerge.userquery import userquery
72 if sys.hexversion >= 0x3000000:
75 def action_build(settings, trees, mtimedb,
76 myopts, myaction, myfiles, spinner):
78 if '--usepkgonly' not in myopts:
79 old_tree_timestamp_warn(settings['PORTDIR'], settings)
81 # It's best for config updates in /etc/portage to be processed
82 # before we get here, so warn if they're not (bug #267103).
83 chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
85 # validate the state of the resume data
86 # so that we can make assumptions later.
87 for k in ("resume", "resume_backup"):
90 resume_data = mtimedb[k]
91 if not isinstance(resume_data, dict):
94 mergelist = resume_data.get("mergelist")
95 if not isinstance(mergelist, list):
99 if not (isinstance(x, list) and len(x) == 4):
101 pkg_type, pkg_root, pkg_key, pkg_action = x
102 if pkg_root not in trees:
103 # Current $ROOT setting differs,
104 # so the list must be stale.
110 resume_opts = resume_data.get("myopts")
111 if not isinstance(resume_opts, (dict, list)):
114 favorites = resume_data.get("favorites")
115 if not isinstance(favorites, list):
120 if "--resume" in myopts and \
121 ("resume" in mtimedb or
122 "resume_backup" in mtimedb):
124 if "resume" not in mtimedb:
125 mtimedb["resume"] = mtimedb["resume_backup"]
126 del mtimedb["resume_backup"]
128 # "myopts" is a list for backward compatibility.
129 resume_opts = mtimedb["resume"].get("myopts", [])
130 if isinstance(resume_opts, list):
131 resume_opts = dict((k,True) for k in resume_opts)
132 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
133 resume_opts.pop(opt, None)
135 # Current options always override resume_opts.
136 resume_opts.update(myopts)
138 myopts.update(resume_opts)
140 if "--debug" in myopts:
141 writemsg_level("myopts %s\n" % (myopts,))
143 # Adjust config according to options of the command being resumed.
145 mysettings = trees[myroot]["vartree"].settings
147 adjust_config(myopts, mysettings)
149 del myroot, mysettings
151 ldpath_mtimes = mtimedb["ldpath"]
153 buildpkgonly = "--buildpkgonly" in myopts
154 pretend = "--pretend" in myopts
155 fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
156 ask = "--ask" in myopts
157 enter_invalid = '--ask-enter-invalid' in myopts
158 nodeps = "--nodeps" in myopts
159 oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
160 tree = "--tree" in myopts
164 portage.writemsg(colorize("WARN", " * ") + \
165 "--tree is broken with --nodeps. Disabling...\n")
166 debug = "--debug" in myopts
167 verbose = "--verbose" in myopts
168 quiet = "--quiet" in myopts
169 if pretend or fetchonly:
170 # make the mtimedb readonly
171 mtimedb.filename = None
172 if '--digest' in myopts or 'digest' in settings.features:
173 if '--digest' in myopts:
174 msg = "The --digest option"
176 msg = "The FEATURES=digest setting"
178 msg += " can prevent corruption from being" + \
179 " noticed. The `repoman manifest` command is the preferred" + \
180 " way to generate manifests and it is capable of doing an" + \
181 " entire repository or category at once."
183 writemsg(prefix + "\n")
184 from textwrap import wrap
185 for line in wrap(msg, 72):
186 writemsg("%s%s\n" % (prefix, line))
187 writemsg(prefix + "\n")
190 favorites = mtimedb["resume"].get("favorites")
191 if not isinstance(favorites, list):
193 myparams = create_depgraph_params(myopts, myaction)
195 resume_data = mtimedb["resume"]
196 mergelist = resume_data["mergelist"]
197 if mergelist and "--skipfirst" in myopts:
198 for i, task in enumerate(mergelist):
199 if isinstance(task, list) and \
200 task and task[-1] == "merge":
207 success, mydepgraph, dropped_tasks = resume_depgraph(
208 settings, trees, mtimedb, myopts, myparams, spinner)
209 except (portage.exception.PackageNotFound,
210 depgraph.UnsatisfiedResumeDep) as e:
211 if isinstance(e, depgraph.UnsatisfiedResumeDep):
212 mydepgraph = e.depgraph
214 from textwrap import wrap
215 from portage.output import EOutput
218 resume_data = mtimedb["resume"]
219 mergelist = resume_data.get("mergelist")
220 if not isinstance(mergelist, list):
222 if mergelist and debug or (verbose and not quiet):
223 out.eerror("Invalid resume list:")
226 for task in mergelist:
227 if isinstance(task, list):
228 out.eerror(indent + str(tuple(task)))
231 if isinstance(e, depgraph.UnsatisfiedResumeDep):
232 out.eerror("One or more packages are either masked or " + \
233 "have missing dependencies:")
238 out.eerror(indent + "Masked package:")
239 out.eerror(2 * indent + str(dep.parent))
242 out.eerror(indent + str(dep.atom) + " pulled in by:")
243 out.eerror(2 * indent + str(dep.parent))
245 msg = "The resume list contains packages " + \
246 "that are either masked or have " + \
247 "unsatisfied dependencies. " + \
248 "Please restart/continue " + \
249 "the operation manually, or use --skipfirst " + \
250 "to skip the first package in the list and " + \
251 "any other packages that may be " + \
252 "masked or have missing dependencies."
253 for line in wrap(msg, 72):
255 elif isinstance(e, portage.exception.PackageNotFound):
256 out.eerror("An expected package is " + \
257 "not available: %s" % str(e))
259 msg = "The resume list contains one or more " + \
260 "packages that are no longer " + \
261 "available. Please restart/continue " + \
262 "the operation manually."
263 for line in wrap(msg, 72):
268 portage.writemsg("!!! One or more packages have been " + \
269 "dropped due to\n" + \
270 "!!! masking or unsatisfied dependencies:\n\n",
272 for task in dropped_tasks:
273 portage.writemsg(" " + str(task) + "\n", noiselevel=-1)
274 portage.writemsg("\n", noiselevel=-1)
277 if mydepgraph is not None:
278 mydepgraph.display_problems()
279 if not (ask or pretend):
280 # delete the current list and also the backup
281 # since it's probably stale too.
282 for k in ("resume", "resume_backup"):
288 if ("--resume" in myopts):
289 print(darkgreen("emerge: It seems we have nothing to resume..."))
292 myparams = create_depgraph_params(myopts, myaction)
294 success, mydepgraph, favorites = backtrack_depgraph(
295 settings, trees, myopts, myparams, myaction, myfiles, spinner)
296 except portage.exception.PackageSetNotFound as e:
297 root_config = trees[settings["ROOT"]]["root_config"]
298 display_missing_pkg_set(root_config, e.value)
302 mydepgraph.display_problems()
305 if "--pretend" not in myopts and \
306 ("--ask" in myopts or "--tree" in myopts or \
307 "--verbose" in myopts) and \
308 not ("--quiet" in myopts and "--ask" not in myopts):
309 if "--resume" in myopts:
310 mymergelist = mydepgraph.altlist()
311 if len(mymergelist) == 0:
312 print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
314 favorites = mtimedb["resume"]["favorites"]
315 retval = mydepgraph.display(
316 mydepgraph.altlist(reversed=tree),
318 mydepgraph.display_problems()
319 if retval != os.EX_OK:
321 prompt="Would you like to resume merging these packages?"
323 retval = mydepgraph.display(
324 mydepgraph.altlist(reversed=("--tree" in myopts)),
326 mydepgraph.display_problems()
327 if retval != os.EX_OK:
330 for x in mydepgraph.altlist():
331 if isinstance(x, Package) and x.operation == "merge":
335 sets = trees[settings["ROOT"]]["root_config"].sets
336 world_candidates = None
337 if "--noreplace" in myopts and \
338 not oneshot and favorites:
339 # Sets that are not world candidates are filtered
340 # out here since the favorites list needs to be
341 # complete for depgraph.loadResumeCommand() to
343 world_candidates = [x for x in favorites \
344 if not (x.startswith(SETPREFIX) and \
345 not sets[x[1:]].world_candidate)]
346 if "--noreplace" in myopts and \
347 not oneshot and world_candidates:
349 for x in world_candidates:
350 print(" %s %s" % (good("*"), x))
351 prompt="Would you like to add these packages to your world favorites?"
352 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
353 prompt="Nothing to merge; would you like to auto-clean packages?"
356 print("Nothing to merge; quitting.")
359 elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
360 prompt="Would you like to fetch the source files for these packages?"
362 prompt="Would you like to merge these packages?"
364 if "--ask" in myopts and userquery(prompt, enter_invalid) == "No":
369 # Don't ask again (e.g. when auto-cleaning packages after merge)
370 myopts.pop("--ask", None)
372 if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
373 if ("--resume" in myopts):
374 mymergelist = mydepgraph.altlist()
375 if len(mymergelist) == 0:
376 print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
378 favorites = mtimedb["resume"]["favorites"]
379 retval = mydepgraph.display(
380 mydepgraph.altlist(reversed=tree),
382 mydepgraph.display_problems()
383 if retval != os.EX_OK:
386 retval = mydepgraph.display(
387 mydepgraph.altlist(reversed=("--tree" in myopts)),
389 mydepgraph.display_problems()
390 if retval != os.EX_OK:
392 if "--buildpkgonly" in myopts:
393 graph_copy = mydepgraph._dynamic_config.digraph.copy()
394 removed_nodes = set()
395 for node in graph_copy:
396 if not isinstance(node, Package) or \
397 node.operation == "nomerge":
398 removed_nodes.add(node)
399 graph_copy.difference_update(removed_nodes)
400 if not graph_copy.hasallzeros(ignore_priority = \
401 DepPrioritySatisfiedRange.ignore_medium):
402 print("\n!!! --buildpkgonly requires all dependencies to be merged.")
403 print("!!! You have to merge the dependencies before you can build this package.\n")
406 if "--buildpkgonly" in myopts:
407 graph_copy = mydepgraph._dynamic_config.digraph.copy()
408 removed_nodes = set()
409 for node in graph_copy:
410 if not isinstance(node, Package) or \
411 node.operation == "nomerge":
412 removed_nodes.add(node)
413 graph_copy.difference_update(removed_nodes)
414 if not graph_copy.hasallzeros(ignore_priority = \
415 DepPrioritySatisfiedRange.ignore_medium):
416 print("\n!!! --buildpkgonly requires all dependencies to be merged.")
417 print("!!! Cannot merge requested packages. Merge deps and try again.\n")
420 if ("--resume" in myopts):
421 favorites=mtimedb["resume"]["favorites"]
424 if "resume" in mtimedb and \
425 "mergelist" in mtimedb["resume"] and \
426 len(mtimedb["resume"]["mergelist"]) > 1:
427 mtimedb["resume_backup"] = mtimedb["resume"]
428 del mtimedb["resume"]
431 mydepgraph.saveNomergeFavorites()
433 mergetask = Scheduler(settings, trees, mtimedb, myopts,
434 spinner, favorites=favorites,
435 graph_config=mydepgraph.schedulerGraph())
440 retval = mergetask.merge()
442 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
443 if "yes" == settings.get("AUTOCLEAN"):
444 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
445 unmerge(trees[settings["ROOT"]]["root_config"],
447 ldpath_mtimes, autoclean=1)
449 portage.writemsg_stdout(colorize("WARN", "WARNING:")
450 + " AUTOCLEAN is disabled. This can cause serious"
451 + " problems due to overlapping packages.\n")
455 def action_config(settings, trees, myopts, myfiles):
456 enter_invalid = '--ask-enter-invalid' in myopts
457 if len(myfiles) != 1:
458 print(red("!!! config can only take a single package atom at this time\n"))
460 if not is_valid_package_atom(myfiles[0]):
461 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
463 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
464 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
468 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
469 except portage.exception.AmbiguousPackageName as e:
470 # Multiple matches thrown from cpv_expand
473 print("No packages found.\n")
476 if "--ask" in myopts:
478 print("Please select a package to configure:")
482 options.append(str(idx))
483 print(options[-1]+") "+pkg)
486 idx = userquery("Selection?", enter_invalid, responses=options)
489 pkg = pkgs[int(idx)-1]
491 print("The following packages available:")
494 print("\nPlease use a specific atom or the --ask option.")
500 if "--ask" in myopts:
501 if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
504 print("Configuring pkg...")
506 ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
507 mysettings = portage.config(clone=settings)
508 vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
509 debug = mysettings.get("PORTAGE_DEBUG") == "1"
510 retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
512 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
513 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
514 if retval == os.EX_OK:
515 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
516 mysettings, debug=debug, mydbapi=vardb, tree="vartree")
519 def action_depclean(settings, trees, ldpath_mtimes,
520 myopts, action, myfiles, spinner, scheduler=None):
521 # Kill packages that aren't explicitly merged or are required as a
522 # dependency of another package. World file is explicit.
524 # Global depclean or prune operations are not very safe when there are
525 # missing dependencies since it's unknown how badly incomplete
526 # the dependency graph is, and we might accidentally remove packages
527 # that should have been pulled into the graph. On the other hand, it's
528 # relatively safe to ignore missing deps when only asked to remove
532 if not _ENABLE_DYN_LINK_MAP:
533 msg.append("Depclean may break link level dependencies. Thus, it is\n")
534 msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
535 msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
537 msg.append("Always study the list of packages to be cleaned for any obvious\n")
538 msg.append("mistakes. Packages that are part of the world set will always\n")
539 msg.append("be kept. They can be manually added to this set with\n")
540 msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
541 msg.append("package.provided (see portage(5)) will be removed by\n")
542 msg.append("depclean, even if they are part of the world set.\n")
544 msg.append("As a safety measure, depclean will not remove any packages\n")
545 msg.append("unless *all* required dependencies have been resolved. As a\n")
546 msg.append("consequence, it is often necessary to run %s\n" % \
547 good("`emerge --update"))
548 msg.append(good("--newuse --deep @world`") + \
549 " prior to depclean.\n")
551 if action == "depclean" and "--quiet" not in myopts and not myfiles:
552 portage.writemsg_stdout("\n")
554 portage.writemsg_stdout(colorize("WARN", " * ") + x)
556 root_config = trees[settings['ROOT']]['root_config']
557 vardb = root_config.trees['vartree'].dbapi
559 args_set = InternalPackageSet(allow_repo=True)
561 args_set.update(myfiles)
562 matched_packages = False
565 matched_packages = True
567 if not matched_packages:
568 writemsg_level(">>> No packages selected for removal by %s\n" % \
572 # The calculation is done in a separate function so that depgraph
573 # references go out of scope and the corresponding memory
574 # is freed before we call unmerge().
575 rval, cleanlist, ordered, req_pkg_count = \
576 calc_depclean(settings, trees, ldpath_mtimes,
577 myopts, action, args_set, spinner)
585 unmerge(root_config, myopts, "unmerge",
586 cleanlist, ldpath_mtimes, ordered=ordered,
589 if action == "prune":
592 if not cleanlist and "--quiet" in myopts:
595 print("Packages installed: " + str(len(vardb.cpv_all())))
596 print("Packages in world: " + \
597 str(len(root_config.sets["selected"].getAtoms())))
598 print("Packages in system: " + \
599 str(len(root_config.sets["system"].getAtoms())))
600 print("Required packages: "+str(req_pkg_count))
601 if "--pretend" in myopts:
602 print("Number to remove: "+str(len(cleanlist)))
604 print("Number removed: "+str(len(cleanlist)))
606 def calc_depclean(settings, trees, ldpath_mtimes,
607 myopts, action, args_set, spinner):
608 allow_missing_deps = bool(args_set)
610 debug = '--debug' in myopts
611 xterm_titles = "notitles" not in settings.features
612 myroot = settings["ROOT"]
613 root_config = trees[myroot]["root_config"]
614 psets = root_config.setconfig.psets
615 deselect = myopts.get('--deselect') != 'n'
617 required_sets['world'] = psets['world']
619 # When removing packages, a temporary version of the world 'selected'
620 # set may be used which excludes packages that are intended to be
621 # eligible for removal.
622 selected_set = psets['selected']
623 required_sets['selected'] = selected_set
624 protected_set = InternalPackageSet()
625 protected_set_name = '____depclean_protected_set____'
626 required_sets[protected_set_name] = protected_set
627 system_set = psets["system"]
629 if not system_set or not selected_set:
632 writemsg_level("!!! You have no system list.\n",
633 level=logging.ERROR, noiselevel=-1)
636 writemsg_level("!!! You have no world file.\n",
637 level=logging.WARNING, noiselevel=-1)
639 writemsg_level("!!! Proceeding is likely to " + \
640 "break your installation.\n",
641 level=logging.WARNING, noiselevel=-1)
642 if "--pretend" not in myopts:
643 countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
645 if action == "depclean":
646 emergelog(xterm_titles, " >>> depclean")
648 writemsg_level("\nCalculating dependencies ")
649 resolver_params = create_depgraph_params(myopts, "remove")
650 resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
652 vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
653 real_vardb = trees[myroot]["vartree"].dbapi
655 if action == "depclean":
660 # Start with an empty set.
661 selected_set = InternalPackageSet()
662 required_sets['selected'] = selected_set
663 # Pull in any sets nested within the selected set.
664 selected_set.update(psets['selected'].getNonAtoms())
666 # Pull in everything that's installed but not matched
667 # by an argument atom since we don't want to clean any
668 # package if something depends on it.
674 if args_set.findAtomForPackage(pkg) is None:
675 protected_set.add("=" + pkg.cpv)
677 except portage.exception.InvalidDependString as e:
678 show_invalid_depstring_notice(pkg,
679 pkg.metadata["PROVIDE"], str(e))
681 protected_set.add("=" + pkg.cpv)
684 elif action == "prune":
687 # Start with an empty set.
688 selected_set = InternalPackageSet()
689 required_sets['selected'] = selected_set
690 # Pull in any sets nested within the selected set.
691 selected_set.update(psets['selected'].getNonAtoms())
693 # Pull in everything that's installed since we don't
694 # to prune a package if something depends on it.
695 protected_set.update(vardb.cp_all())
699 # Try to prune everything that's slotted.
700 for cp in vardb.cp_all():
701 if len(vardb.cp_list(cp)) > 1:
704 # Remove atoms from world that match installed packages
705 # that are also matched by argument atoms, but do not remove
706 # them if they match the highest installed version.
709 pkgs_for_cp = vardb.match_pkgs(pkg.cp)
710 if not pkgs_for_cp or pkg not in pkgs_for_cp:
711 raise AssertionError("package expected in matches: " + \
712 "cp = %s, cpv = %s matches = %s" % \
713 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
715 highest_version = pkgs_for_cp[-1]
716 if pkg == highest_version:
717 # pkg is the highest version
718 protected_set.add("=" + pkg.cpv)
721 if len(pkgs_for_cp) <= 1:
722 raise AssertionError("more packages expected: " + \
723 "cp = %s, cpv = %s matches = %s" % \
724 (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
727 if args_set.findAtomForPackage(pkg) is None:
728 protected_set.add("=" + pkg.cpv)
730 except portage.exception.InvalidDependString as e:
731 show_invalid_depstring_notice(pkg,
732 pkg.metadata["PROVIDE"], str(e))
734 protected_set.add("=" + pkg.cpv)
737 if resolver._frozen_config.excluded_pkgs:
738 excluded_set = resolver._frozen_config.excluded_pkgs
739 required_sets['__excluded__'] = InternalPackageSet()
746 if excluded_set.findAtomForPackage(pkg):
747 required_sets['__excluded__'].add("=" + pkg.cpv)
748 except portage.exception.InvalidDependString as e:
749 show_invalid_depstring_notice(pkg,
750 pkg.metadata["PROVIDE"], str(e))
752 required_sets['__excluded__'].add("=" + pkg.cpv)
754 success = resolver._complete_graph(required_sets={myroot:required_sets})
755 writemsg_level("\b\b... done!\n")
757 resolver.display_problems()
760 return 1, [], False, 0
762 def unresolved_deps():
765 for dep in resolver._dynamic_config._initially_unsatisfied_deps:
766 if isinstance(dep.parent, Package) and \
767 (dep.priority > UnmergeDepPriority.SOFT):
768 unresolvable.add((dep.atom, dep.parent.cpv))
773 if unresolvable and not allow_missing_deps:
775 if "--debug" in myopts:
776 writemsg("\ndigraph:\n\n", noiselevel=-1)
777 resolver._dynamic_config.digraph.debug_print()
778 writemsg("\n", noiselevel=-1)
782 msg.append("Dependencies could not be completely resolved due to")
783 msg.append("the following required packages not being installed:")
785 for atom, parent in unresolvable:
786 msg.append(" %s pulled in by:" % (atom,))
787 msg.append(" %s" % (parent,))
789 msg.append("Have you forgotten to run " + \
790 good("`emerge --update --newuse --deep @world`") + " prior")
791 msg.append(("to %s? It may be necessary to manually " + \
792 "uninstall packages that no longer") % action)
793 msg.append("exist in the portage tree since " + \
794 "it may not be possible to satisfy their")
795 msg.append("dependencies. Also, be aware of " + \
796 "the --with-bdeps option that is documented")
797 msg.append("in " + good("`man emerge`") + ".")
798 if action == "prune":
800 msg.append("If you would like to ignore " + \
801 "dependencies then use %s." % good("--nodeps"))
802 writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
803 level=logging.ERROR, noiselevel=-1)
807 if unresolved_deps():
808 return 1, [], False, 0
810 graph = resolver._dynamic_config.digraph.copy()
811 required_pkgs_total = 0
813 if isinstance(node, Package):
814 required_pkgs_total += 1
816 def show_parents(child_node):
817 parent_nodes = graph.parent_nodes(child_node)
819 # With --prune, the highest version can be pulled in without any
820 # real parent since all installed packages are pulled in. In that
821 # case there's nothing to show here.
824 for node in parent_nodes:
825 parent_strs.append(str(getattr(node, "cpv", node)))
828 msg.append(" %s pulled in by:\n" % (child_node.cpv,))
829 for parent_str in parent_strs:
830 msg.append(" %s\n" % (parent_str,))
832 portage.writemsg_stdout("".join(msg), noiselevel=-1)
834 def cmp_pkg_cpv(pkg1, pkg2):
835 """Sort Package instances by cpv."""
836 if pkg1.cpv > pkg2.cpv:
838 elif pkg1.cpv == pkg2.cpv:
843 def create_cleanlist():
845 if "--debug" in myopts:
846 writemsg("\ndigraph:\n\n", noiselevel=-1)
848 writemsg("\n", noiselevel=-1)
850 # Never display the special internal protected_set.
852 if isinstance(node, SetArg) and node.name == protected_set_name:
858 if action == "depclean":
861 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
864 arg_atom = args_set.findAtomForPackage(pkg)
865 except portage.exception.InvalidDependString:
866 # this error has already been displayed by now
871 pkgs_to_remove.append(pkg)
872 elif "--verbose" in myopts:
876 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
878 pkgs_to_remove.append(pkg)
879 elif "--verbose" in myopts:
882 elif action == "prune":
884 for atom in args_set:
885 for pkg in vardb.match_pkgs(atom):
887 pkgs_to_remove.append(pkg)
888 elif "--verbose" in myopts:
891 if not pkgs_to_remove:
893 ">>> No packages selected for removal by %s\n" % action)
894 if "--verbose" not in myopts:
896 ">>> To see reverse dependencies, use %s\n" % \
898 if action == "prune":
900 ">>> To ignore dependencies, use %s\n" % \
903 return pkgs_to_remove
905 cleanlist = create_cleanlist()
906 clean_set = set(cleanlist)
909 real_vardb._linkmap is not None and \
910 myopts.get("--depclean-lib-check") != "n" and \
911 "preserve-libs" not in settings.features:
913 # Check if any of these packages are the sole providers of libraries
914 # with consumers that have not been selected for removal. If so, these
915 # packages and any dependencies need to be added to the graph.
916 linkmap = real_vardb._linkmap
921 writemsg_level(">>> Checking for lib consumers...\n")
923 for pkg in cleanlist:
924 pkg_dblink = real_vardb._dblink(pkg.cpv)
927 for lib in pkg_dblink.getcontents():
928 lib = lib[len(myroot):]
929 lib_key = linkmap._obj_key(lib)
930 lib_consumers = consumer_cache.get(lib_key)
931 if lib_consumers is None:
933 lib_consumers = linkmap.findConsumers(lib_key)
936 consumer_cache[lib_key] = lib_consumers
938 consumers[lib_key] = lib_consumers
943 for lib, lib_consumers in list(consumers.items()):
944 for consumer_file in list(lib_consumers):
945 if pkg_dblink.isowner(consumer_file):
946 lib_consumers.remove(consumer_file)
947 if not lib_consumers:
953 for lib, lib_consumers in consumers.items():
955 soname = linkmap.getSoname(lib)
957 consumer_providers = []
958 for lib_consumer in lib_consumers:
959 providers = provider_cache.get(lib)
960 if providers is None:
961 providers = linkmap.findProviders(lib_consumer)
962 provider_cache[lib_consumer] = providers
963 if soname not in providers:
964 # Why does this happen?
966 consumer_providers.append(
967 (lib_consumer, providers[soname]))
969 consumers[lib] = consumer_providers
971 consumer_map[pkg] = consumers
976 for consumers in consumer_map.values():
977 for lib, consumer_providers in consumers.items():
978 for lib_consumer, providers in consumer_providers:
979 search_files.add(lib_consumer)
980 search_files.update(providers)
982 writemsg_level(">>> Assigning files to packages...\n")
983 file_owners = real_vardb._owners.getFileOwnerMap(search_files)
985 for pkg, consumers in list(consumer_map.items()):
986 for lib, consumer_providers in list(consumers.items()):
987 lib_consumers = set()
989 for lib_consumer, providers in consumer_providers:
990 owner_set = file_owners.get(lib_consumer)
991 provider_dblinks = set()
992 provider_pkgs = set()
994 if len(providers) > 1:
995 for provider in providers:
996 provider_set = file_owners.get(provider)
997 if provider_set is not None:
998 provider_dblinks.update(provider_set)
1000 if len(provider_dblinks) > 1:
1001 for provider_dblink in provider_dblinks:
1002 provider_pkg = resolver._pkg(
1003 provider_dblink.mycpv, "installed",
1004 root_config, installed=True)
1005 if provider_pkg not in clean_set:
1006 provider_pkgs.add(provider_pkg)
1011 if owner_set is not None:
1012 lib_consumers.update(owner_set)
1014 for consumer_dblink in list(lib_consumers):
1015 if resolver._pkg(consumer_dblink.mycpv, "installed",
1016 root_config, installed=True) in clean_set:
1017 lib_consumers.remove(consumer_dblink)
1021 consumers[lib] = lib_consumers
1025 del consumer_map[pkg]
1028 # TODO: Implement a package set for rebuilding consumer packages.
1030 msg = "In order to avoid breakage of link level " + \
1031 "dependencies, one or more packages will not be removed. " + \
1032 "This can be solved by rebuilding " + \
1033 "the packages that pulled them in."
1036 from textwrap import wrap
1037 writemsg_level("".join(prefix + "%s\n" % line for \
1038 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
1041 for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
1042 consumers = consumer_map[pkg]
1044 for lib, lib_consumers in consumers.items():
1045 for consumer in lib_consumers:
1046 consumer_libs.setdefault(
1047 consumer.mycpv, set()).add(linkmap.getSoname(lib))
1048 unique_consumers = set(chain(*consumers.values()))
1049 unique_consumers = sorted(consumer.mycpv \
1050 for consumer in unique_consumers)
1052 msg.append(" %s pulled in by:" % (pkg.cpv,))
1053 for consumer in unique_consumers:
1054 libs = consumer_libs[consumer]
1055 msg.append(" %s needs %s" % \
1056 (consumer, ', '.join(sorted(libs))))
1058 writemsg_level("".join(prefix + "%s\n" % line for line in msg),
1059 level=logging.WARNING, noiselevel=-1)
1061 # Add lib providers to the graph as children of lib consumers,
1062 # and also add any dependencies pulled in by the provider.
1063 writemsg_level(">>> Adding lib providers to graph...\n")
1065 for pkg, consumers in consumer_map.items():
1066 for consumer_dblink in set(chain(*consumers.values())):
1067 consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
1068 "installed", root_config, installed=True)
1069 if not resolver._add_pkg(pkg,
1070 Dependency(parent=consumer_pkg,
1071 priority=UnmergeDepPriority(runtime=True),
1073 resolver.display_problems()
1074 return 1, [], False, 0
1076 writemsg_level("\nCalculating dependencies ")
1077 success = resolver._complete_graph(
1078 required_sets={myroot:required_sets})
1079 writemsg_level("\b\b... done!\n")
1080 resolver.display_problems()
1082 return 1, [], False, 0
1083 if unresolved_deps():
1084 return 1, [], False, 0
1086 graph = resolver._dynamic_config.digraph.copy()
1087 required_pkgs_total = 0
1089 if isinstance(node, Package):
1090 required_pkgs_total += 1
1091 cleanlist = create_cleanlist()
1093 return 0, [], False, required_pkgs_total
1094 clean_set = set(cleanlist)
1097 writemsg_level(">>> Calculating removal order...\n")
1098 # Use a topological sort to create an unmerge order such that
1099 # each package is unmerged before it's dependencies. This is
1100 # necessary to avoid breaking things that may need to run
1101 # during pkg_prerm or pkg_postrm phases.
1103 # Create a new graph to account for dependencies between the
1104 # packages being unmerged.
1108 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1109 runtime = UnmergeDepPriority(runtime=True)
1110 runtime_post = UnmergeDepPriority(runtime_post=True)
1111 buildtime = UnmergeDepPriority(buildtime=True)
1114 "PDEPEND": runtime_post,
1115 "DEPEND": buildtime,
1118 for node in clean_set:
1119 graph.add(node, None)
1121 for dep_type in dep_keys:
1122 depstr = node.metadata[dep_type]
1125 priority = priority_map[dep_type]
1128 writemsg_level(_unicode_decode("\nParent: %s\n") \
1129 % (node,), noiselevel=-1, level=logging.DEBUG)
1130 writemsg_level(_unicode_decode( "Depstring: %s\n") \
1131 % (depstr,), noiselevel=-1, level=logging.DEBUG)
1132 writemsg_level(_unicode_decode( "Priority: %s\n") \
1133 % (priority,), noiselevel=-1, level=logging.DEBUG)
1136 atoms = resolver._select_atoms(myroot, depstr,
1137 myuse=node.use.enabled, parent=node,
1138 priority=priority)[node]
1139 except portage.exception.InvalidDependString:
1140 # Ignore invalid deps of packages that will
1141 # be uninstalled anyway.
1145 writemsg_level("Candidates: [%s]\n" % \
1146 ', '.join(_unicode_decode("'%s'") % (x,) for x in atoms),
1147 noiselevel=-1, level=logging.DEBUG)
1150 if not isinstance(atom, portage.dep.Atom):
1151 # Ignore invalid atoms returned from dep_check().
1155 matches = vardb.match_pkgs(atom)
1158 for child_node in matches:
1159 if child_node in clean_set:
1160 graph.add(child_node, node, priority=priority)
1163 writemsg_level("\nunmerge digraph:\n\n",
1164 noiselevel=-1, level=logging.DEBUG)
1166 writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
1169 if len(graph.order) == len(graph.root_nodes()):
1170 # If there are no dependencies between packages
1171 # let unmerge() group them by cat/pn.
1173 cleanlist = [pkg.cpv for pkg in graph.order]
1175 # Order nodes from lowest to highest overall reference count for
1176 # optimal root node selection (this can help minimize issues
1177 # with unaccounted implicit dependencies).
1179 for node in graph.order:
1180 node_refcounts[node] = len(graph.parent_nodes(node))
1181 def cmp_reference_count(node1, node2):
1182 return node_refcounts[node1] - node_refcounts[node2]
1183 graph.order.sort(key=cmp_sort_key(cmp_reference_count))
1185 ignore_priority_range = [None]
1186 ignore_priority_range.extend(
1187 range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
1188 while not graph.empty():
1189 for ignore_priority in ignore_priority_range:
1190 nodes = graph.root_nodes(ignore_priority=ignore_priority)
1194 raise AssertionError("no root nodes")
1195 if ignore_priority is not None:
1196 # Some deps have been dropped due to circular dependencies,
1197 # so only pop one node in order to minimize the number that
1202 cleanlist.append(node.cpv)
1204 return 0, cleanlist, ordered, required_pkgs_total
1205 return 0, [], False, required_pkgs_total
1207 def action_deselect(settings, trees, opts, atoms):
1208 enter_invalid = '--ask-enter-invalid' in opts
1209 root_config = trees[settings['ROOT']]['root_config']
1210 world_set = root_config.sets['selected']
1211 if not hasattr(world_set, 'update'):
1212 writemsg_level("World @selected set does not appear to be mutable.\n",
1213 level=logging.ERROR, noiselevel=-1)
1216 pretend = '--pretend' in opts
1218 if not pretend and hasattr(world_set, 'lock'):
1223 world_atoms = world_set.getAtoms()
1224 vardb = root_config.trees["vartree"].dbapi
1225 expanded_atoms = set(atoms)
1228 if not atom.startswith(SETPREFIX):
1229 if atom.cp.startswith("null/"):
1230 # try to expand category from world set
1231 null_cat, pn = portage.catsplit(atom.cp)
1232 for world_atom in world_atoms:
1233 cat, world_pn = portage.catsplit(world_atom.cp)
1236 Atom(atom.replace("null", cat, 1),
1237 allow_repo=True, allow_wildcard=True))
1239 for cpv in vardb.match(atom):
1240 slot, = vardb.aux_get(cpv, ["SLOT"])
1243 expanded_atoms.add(Atom("%s:%s" % \
1244 (portage.cpv_getkey(cpv), slot)))
1246 discard_atoms = set()
1247 for atom in world_set:
1248 for arg_atom in expanded_atoms:
1249 if arg_atom.startswith(SETPREFIX):
1250 if atom.startswith(SETPREFIX) and \
1252 discard_atoms.add(atom)
1255 if not atom.startswith(SETPREFIX) and \
1256 arg_atom.intersects(atom) and \
1257 not (arg_atom.slot and not atom.slot) and \
1258 not (arg_atom.repo and not atom.repo):
1259 discard_atoms.add(atom)
1262 for atom in sorted(discard_atoms):
1264 print(">>> Would remove %s from \"world\" favorites file..." % \
1265 colorize("INFORM", str(atom)))
1267 print(">>> Removing %s from \"world\" favorites file..." % \
1268 colorize("INFORM", str(atom)))
1271 prompt = "Would you like to remove these " + \
1272 "packages from your world favorites?"
1273 if userquery(prompt, enter_invalid) == 'No':
1276 remaining = set(world_set)
1277 remaining.difference_update(discard_atoms)
1279 world_set.replace(remaining)
1281 print(">>> No matching atoms found in \"world\" favorites file...")
1287 class _info_pkgs_ver(object):
1288 def __init__(self, ver, repo_suffix, provide_suffix):
1290 self.repo_suffix = repo_suffix
1291 self.provide_suffix = provide_suffix
1293 def __lt__(self, other):
1294 return portage.versions.vercmp(self.ver, other.ver) < 0
1298 This may return unicode if repo_name contains unicode.
1299 Don't use __str__ and str() since unicode triggers compatibility
1300 issues between python 2.x and 3.x.
1302 return self.ver + self.repo_suffix + self.provide_suffix
1304 def action_info(settings, trees, myopts, myfiles):
1306 root_config = trees[settings['ROOT']]['root_config']
1308 print(getportageversion(settings["PORTDIR"], settings["ROOT"],
1309 settings.profile_path, settings["CHOST"],
1310 trees[settings["ROOT"]]["vartree"].dbapi))
1313 header_title = "System Settings"
1315 print(header_width * "=")
1316 print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
1317 print(header_width * "=")
1318 print("System uname: "+platform.platform(aliased=1))
1320 lastSync = portage.grabfile(os.path.join(
1321 settings["PORTDIR"], "metadata", "timestamp.chk"))
1322 print("Timestamp of tree:", end=' ')
1328 output=subprocess_getstatusoutput("distcc --version")
1330 print(str(output[1].split("\n",1)[0]), end=' ')
1331 if "distcc" in settings.features:
1336 output=subprocess_getstatusoutput("ccache -V")
1338 print(str(output[1].split("\n",1)[0]), end=' ')
1339 if "ccache" in settings.features:
1344 myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
1345 "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
1346 myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
1348 vardb = trees["/"]["vartree"].dbapi
1353 writemsg_stdout("%-20s %s\n" % (x+":", "[NOT VALID]"),
1356 for atom in expand_new_virt(vardb, x):
1357 if not atom.blocker:
1358 atoms.append((x, atom))
1360 myvars = sorted(set(atoms))
1362 portdb = trees["/"]["porttree"].dbapi
1363 main_repo = portdb.getRepositoryName(portdb.porttree_root)
1367 for orig_atom, x in myvars:
1368 pkg_matches = vardb.match(x)
1371 for cpv in pkg_matches:
1372 matched_cp = portage.versions.cpv_getkey(cpv)
1373 ver = portage.versions.cpv_getversion(cpv)
1374 ver_map = cp_map.setdefault(matched_cp, {})
1375 prev_match = ver_map.get(ver)
1376 if prev_match is not None:
1377 if prev_match.provide_suffix:
1378 # prefer duplicate matches that include
1379 # additional virtual provider info
1382 if len(matched_cp) > cp_max_len:
1383 cp_max_len = len(matched_cp)
1384 repo = vardb.aux_get(cpv, ["repository"])[0]
1385 if repo == main_repo:
1388 repo_suffix = "::<unknown repository>"
1390 repo_suffix = "::" + repo
1392 if matched_cp == orig_atom.cp:
1395 provide_suffix = " (%s)" % (orig_atom,)
1397 ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
1399 for cp in sorted(cp_map):
1400 versions = sorted(cp_map[cp].values())
1401 versions = ", ".join(ver.toString() for ver in versions)
1402 writemsg_stdout("%s %s\n" % \
1403 ((cp + ":").ljust(cp_max_len + 1), versions),
1406 libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
1408 repos = portdb.settings.repositories
1409 if "--verbose" in myopts:
1410 writemsg_stdout("Repositories:\n\n", noiselevel=-1)
1412 writemsg_stdout(repo.info_string(), noiselevel=-1)
1414 writemsg_stdout("Repositories: %s\n" % \
1415 " ".join(repo.name for repo in repos), noiselevel=-1)
1417 if _ENABLE_SET_CONFIG:
1418 sets_line = "Installed sets: "
1419 sets_line += ", ".join(s for s in \
1420 sorted(root_config.sets['selected'].getNonAtoms()) \
1421 if s.startswith(SETPREFIX))
1423 writemsg_stdout(sets_line, noiselevel=-1)
1425 if "--verbose" in myopts:
1426 myvars = list(settings)
1428 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
1429 'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
1430 'PORTDIR_OVERLAY', 'PORTAGE_BUNZIP2_COMMAND',
1431 'PORTAGE_BZIP2_COMMAND',
1432 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
1433 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'SYNC', 'FEATURES',
1434 'EMERGE_DEFAULT_OPTS']
1436 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
1438 myvars_ignore_defaults = {
1439 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
1442 myvars = portage.util.unique_array(myvars)
1443 use_expand = settings.get('USE_EXPAND', '').split()
1445 use_expand_hidden = set(
1446 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
1447 alphabetical_use = '--alphabetical' in myopts
1453 default = myvars_ignore_defaults.get(x)
1454 if default is not None and \
1455 default == settings[x]:
1457 writemsg_stdout('%s="%s"\n' % (x, settings[x]), noiselevel=-1)
1459 use = set(settings["USE"].split())
1460 for varname in use_expand:
1461 flag_prefix = varname.lower() + "_"
1463 if f.startswith(flag_prefix):
1467 print('USE="%s"' % " ".join(use), end=' ')
1468 for varname in use_expand:
1469 myval = settings.get(varname)
1471 print('%s="%s"' % (varname, myval), end=' ')
1474 unset_vars.append(x)
1476 print("Unset: "+", ".join(unset_vars))
1479 if "--debug" in myopts:
1480 for x in dir(portage):
1481 module = getattr(portage, x)
1482 if "cvs_id_string" in dir(module):
1483 print("%s: %s" % (str(x), str(module.cvs_id_string)))
1485 # See if we can find any packages installed matching the strings
1486 # passed on the command line
1488 vardb = trees[settings["ROOT"]]["vartree"].dbapi
1489 portdb = trees[settings["ROOT"]]["porttree"].dbapi
1490 bindb = trees[settings["ROOT"]]["bintree"].dbapi
1493 installed_match = vardb.match(x)
1494 for installed in installed_match:
1495 mypkgs.append((installed, "installed"))
1501 for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
1502 if pkg_type == "binary" and "--usepkg" not in myopts:
1505 matches = db.match(x)
1507 for match in matches:
1508 if pkg_type == "binary":
1509 if db.bintree.isremote(match):
1511 auxkeys = ["EAPI", "DEFINED_PHASES"]
1512 metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
1513 if metadata["EAPI"] not in ("0", "1", "2", "3") and \
1514 "info" in metadata["DEFINED_PHASES"].split():
1515 mypkgs.append((match, pkg_type))
1518 # If some packages were found...
1520 # Get our global settings (we only print stuff if it varies from
1521 # the current config)
1522 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
1523 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
1524 auxkeys.append('DEFINED_PHASES')
1526 pkgsettings = portage.config(clone=settings)
1528 # Loop through each package
1529 # Only print settings if they differ from global settings
1530 header_title = "Package Settings"
1531 print(header_width * "=")
1532 print(header_title.rjust(int(header_width/2 + len(header_title)/2)))
1533 print(header_width * "=")
1534 from portage.output import EOutput
1536 for mypkg in mypkgs:
1539 # Get all package specific variables
1540 if pkg_type == "installed":
1541 metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
1542 elif pkg_type == "ebuild":
1543 metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
1544 elif pkg_type == "binary":
1545 metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
1547 pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
1548 installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
1549 (metadata.get(x, '') for x in Package.metadata_keys)),
1550 root_config=root_config, type_name=pkg_type)
1552 if pkg_type == "installed":
1553 print("\n%s was built with the following:" % \
1554 colorize("INFORM", str(pkg.cpv)))
1555 elif pkg_type == "ebuild":
1556 print("\n%s would be build with the following:" % \
1557 colorize("INFORM", str(pkg.cpv)))
1558 elif pkg_type == "binary":
1559 print("\n%s (non-installed binary) was built with the following:" % \
1560 colorize("INFORM", str(pkg.cpv)))
1562 writemsg_stdout('%s\n' % pkg_use_display(pkg, myopts),
1564 if pkg_type == "installed":
1565 for myvar in mydesiredvars:
1566 if metadata[myvar].split() != settings.get(myvar, '').split():
1567 print("%s=\"%s\"" % (myvar, metadata[myvar]))
1570 if metadata['DEFINED_PHASES']:
1571 if 'info' not in metadata['DEFINED_PHASES'].split():
1574 print(">>> Attempting to run pkg_info() for '%s'" % pkg.cpv)
1576 if pkg_type == "installed":
1577 ebuildpath = vardb.findname(pkg.cpv)
1578 elif pkg_type == "ebuild":
1579 ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
1580 elif pkg_type == "binary":
1581 tbz2_file = bindb.bintree.getname(pkg.cpv)
1582 ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
1583 ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
1584 tmpdir = tempfile.mkdtemp()
1585 ebuildpath = os.path.join(tmpdir, ebuild_file_name)
1586 file = open(ebuildpath, 'w')
1587 file.write(ebuild_file_contents)
1590 if not ebuildpath or not os.path.exists(ebuildpath):
1591 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
1594 if pkg_type == "installed":
1595 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1596 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1597 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
1599 elif pkg_type == "ebuild":
1600 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1601 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1602 mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
1604 elif pkg_type == "binary":
1605 portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
1606 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
1607 mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
1609 shutil.rmtree(tmpdir)
1611 def action_metadata(settings, portdb, myopts, porttrees=None):
1612 if porttrees is None:
1613 porttrees = portdb.porttrees
1614 portage.writemsg_stdout("\n>>> Updating Portage cache\n")
1615 old_umask = os.umask(0o002)
1616 cachedir = os.path.normpath(settings.depcachedir)
1617 if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
1618 "/lib", "/opt", "/proc", "/root", "/sbin",
1619 "/sys", "/tmp", "/usr", "/var"]:
1620 print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
1621 "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
1622 print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
1624 if not os.path.exists(cachedir):
1625 os.makedirs(cachedir)
1627 auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
1628 auxdbkeys = tuple(auxdbkeys)
1630 class TreeData(object):
1631 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
1632 def __init__(self, dest_db, eclass_db, path, src_db):
1633 self.dest_db = dest_db
1634 self.eclass_db = eclass_db
1636 self.src_db = src_db
1637 self.valid_nodes = set()
1640 for path in porttrees:
1641 src_db = portdb._pregen_auxdb.get(path)
1642 if src_db is None and \
1643 os.path.isdir(os.path.join(path, 'metadata', 'cache')):
1644 src_db = portdb.metadbmodule(
1645 path, 'metadata/cache', auxdbkeys, readonly=True)
1647 src_db.ec = portdb._repo_info[path].eclass_db
1648 except AttributeError:
1651 if src_db is not None:
1652 porttrees_data.append(TreeData(portdb.auxdb[path],
1653 portdb._repo_info[path].eclass_db, path, src_db))
1655 porttrees = [tree_data.path for tree_data in porttrees_data]
1657 quiet = settings.get('TERM') == 'dumb' or \
1658 '--quiet' in myopts or \
1659 not sys.stdout.isatty()
1663 progressBar = portage.output.TermProgressBar()
1664 progressHandler = ProgressHandler()
1665 onProgress = progressHandler.onProgress
1667 progressBar.set(progressHandler.curval, progressHandler.maxval)
1668 progressHandler.display = display
1669 def sigwinch_handler(signum, frame):
1670 lines, progressBar.term_columns = \
1671 portage.output.get_term_size()
1672 signal.signal(signal.SIGWINCH, sigwinch_handler)
1674 # Temporarily override portdb.porttrees so portdb.cp_all()
1675 # will only return the relevant subset.
1676 portdb_porttrees = portdb.porttrees
1677 portdb.porttrees = porttrees
1679 cp_all = portdb.cp_all()
1681 portdb.porttrees = portdb_porttrees
1684 maxval = len(cp_all)
1685 if onProgress is not None:
1686 onProgress(maxval, curval)
1688 from portage.cache.util import quiet_mirroring
1689 from portage import eapi_is_supported, \
1690 _validate_cache_for_unsupported_eapis
1692 # TODO: Display error messages, but do not interfere with the progress bar.
1694 # 1) erase the progress bar
1695 # 2) show the error message
1696 # 3) redraw the progress bar on a new line
1697 noise = quiet_mirroring()
1700 for tree_data in porttrees_data:
1701 for cpv in portdb.cp_list(cp, mytree=tree_data.path):
1702 tree_data.valid_nodes.add(cpv)
1704 src = tree_data.src_db[cpv]
1705 except KeyError as e:
1706 noise.missing_entry(cpv)
1709 except CacheError as ce:
1710 noise.exception(cpv, ce)
1714 eapi = src.get('EAPI')
1717 eapi = eapi.lstrip('-')
1718 eapi_supported = eapi_is_supported(eapi)
1719 if not eapi_supported:
1720 if not _validate_cache_for_unsupported_eapis:
1721 noise.misc(cpv, "unable to validate " + \
1722 "cache for EAPI='%s'" % eapi)
1727 dest = tree_data.dest_db[cpv]
1728 except (KeyError, CacheError):
1731 for d in (src, dest):
1732 if d is not None and d.get('EAPI') in ('', '0'):
1735 if dest is not None:
1736 if not (dest['_mtime_'] == src['_mtime_'] and \
1737 tree_data.eclass_db.is_eclass_data_valid(
1738 dest['_eclasses_']) and \
1739 set(dest['_eclasses_']) == set(src['_eclasses_'])):
1742 # We don't want to skip the write unless we're really
1743 # sure that the existing cache is identical, so don't
1744 # trust _mtime_ and _eclasses_ alone.
1745 for k in set(chain(src, dest)).difference(
1746 ('_mtime_', '_eclasses_')):
1747 if dest.get(k, '') != src.get(k, ''):
1751 if dest is not None:
1752 # The existing data is valid and identical,
1753 # so there's no need to overwrite it.
1757 inherited = src.get('INHERITED', '')
1758 eclasses = src.get('_eclasses_')
1759 except CacheError as ce:
1760 noise.exception(cpv, ce)
1764 if eclasses is not None:
1765 if not tree_data.eclass_db.is_eclass_data_valid(
1767 noise.eclass_stale(cpv)
1769 inherited = eclasses
1771 inherited = inherited.split()
1773 if tree_data.src_db.complete_eclass_entries and \
1775 noise.corruption(cpv, "missing _eclasses_ field")
1779 # Even if _eclasses_ already exists, replace it with data from
1780 # eclass_cache, in order to insert local eclass paths.
1782 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
1784 # INHERITED contains a non-existent eclass.
1785 noise.eclass_stale(cpv)
1788 if eclasses is None:
1789 noise.eclass_stale(cpv)
1791 src['_eclasses_'] = eclasses
1793 src['_eclasses_'] = {}
1795 if not eapi_supported:
1797 'EAPI' : '-' + eapi,
1798 '_mtime_' : src['_mtime_'],
1799 '_eclasses_' : src['_eclasses_'],
1803 tree_data.dest_db[cpv] = src
1804 except CacheError as ce:
1805 noise.exception(cpv, ce)
1809 if onProgress is not None:
1810 onProgress(maxval, curval)
1812 if onProgress is not None:
1813 onProgress(maxval, curval)
1815 for tree_data in porttrees_data:
1817 dead_nodes = set(tree_data.dest_db)
1818 except CacheError as e:
1819 writemsg_level("Error listing cache entries for " + \
1820 "'%s': %s, continuing...\n" % (tree_data.path, e),
1821 level=logging.ERROR, noiselevel=-1)
1824 dead_nodes.difference_update(tree_data.valid_nodes)
1825 for cpv in dead_nodes:
1827 del tree_data.dest_db[cpv]
1828 except (KeyError, CacheError):
1832 # make sure the final progress is displayed
1833 progressHandler.display()
1835 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
1840 def action_regen(settings, portdb, max_jobs, max_load):
1841 xterm_titles = "notitles" not in settings.features
1842 emergelog(xterm_titles, " === regen")
1843 #regenerate cache entries
1845 os.close(sys.stdin.fileno())
1846 except SystemExit as e:
1847 raise # Needed else can't exit
1852 regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
1853 received_signal = []
1855 def emergeexitsig(signum, frame):
1856 signal.signal(signal.SIGINT, signal.SIG_IGN)
1857 signal.signal(signal.SIGTERM, signal.SIG_IGN)
1858 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
1861 received_signal.append(128 + signum)
1863 earlier_sigint_handler = signal.signal(signal.SIGINT, emergeexitsig)
1864 earlier_sigterm_handler = signal.signal(signal.SIGTERM, emergeexitsig)
1869 # Restore previous handlers
1870 if earlier_sigint_handler is not None:
1871 signal.signal(signal.SIGINT, earlier_sigint_handler)
1873 signal.signal(signal.SIGINT, signal.SIG_DFL)
1874 if earlier_sigterm_handler is not None:
1875 signal.signal(signal.SIGTERM, earlier_sigterm_handler)
1877 signal.signal(signal.SIGTERM, signal.SIG_DFL)
1880 sys.exit(received_signal[0])
1882 portage.writemsg_stdout("done!\n")
1883 return regen.returncode
1885 def action_search(root_config, myopts, myfiles, spinner):
1887 print("emerge: no search terms provided.")
1889 searchinstance = search(root_config,
1890 spinner, "--searchdesc" in myopts,
1891 "--quiet" not in myopts, "--usepkg" in myopts,
1892 "--usepkgonly" in myopts)
1893 for mysearch in myfiles:
1895 searchinstance.execute(mysearch)
1896 except re.error as comment:
1897 print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
1899 searchinstance.output()
1901 def action_sync(settings, trees, mtimedb, myopts, myaction):
1902 enter_invalid = '--ask-enter-invalid' in myopts
1903 xterm_titles = "notitles" not in settings.features
1904 emergelog(xterm_titles, " === sync")
1905 portdb = trees[settings["ROOT"]]["porttree"].dbapi
1906 myportdir = portdb.porttree_root
1908 myportdir = settings.get('PORTDIR', '')
1909 if myportdir and myportdir.strip():
1910 myportdir = os.path.realpath(myportdir)
1913 out = portage.output.EOutput()
1914 global_config_path = GLOBAL_CONFIG_PATH
1915 if settings['EPREFIX']:
1916 global_config_path = os.path.join(settings['EPREFIX'],
1917 GLOBAL_CONFIG_PATH.lstrip(os.sep))
1919 sys.stderr.write("!!! PORTDIR is undefined. " + \
1920 "Is %s/make.globals missing?\n" % global_config_path)
1922 if myportdir[-1]=="/":
1923 myportdir=myportdir[:-1]
1925 st = os.stat(myportdir)
1929 print(">>>",myportdir,"not found, creating it.")
1930 portage.util.ensure_dirs(myportdir, mode=0o755)
1931 st = os.stat(myportdir)
1935 spawn_kwargs["env"] = settings.environ()
1936 if 'usersync' in settings.features and \
1937 portage.data.secpass >= 2 and \
1938 (st.st_uid != os.getuid() and st.st_mode & 0o700 or \
1939 st.st_gid != os.getgid() and st.st_mode & 0o070):
1941 homedir = pwd.getpwuid(st.st_uid).pw_dir
1945 # Drop privileges when syncing, in order to match
1946 # existing uid/gid settings.
1947 usersync_uid = st.st_uid
1948 spawn_kwargs["uid"] = st.st_uid
1949 spawn_kwargs["gid"] = st.st_gid
1950 spawn_kwargs["groups"] = [st.st_gid]
1951 spawn_kwargs["env"]["HOME"] = homedir
1953 if not st.st_mode & 0o020:
1954 umask = umask | 0o020
1955 spawn_kwargs["umask"] = umask
1957 if usersync_uid is not None:
1958 # PORTAGE_TMPDIR is used below, so validate it and
1959 # bail out if necessary.
1960 rval = _check_temp_dir(settings)
1961 if rval != os.EX_OK:
1964 syncuri = settings.get("SYNC", "").strip()
1966 writemsg_level("!!! SYNC is undefined. " + \
1967 "Is %s/make.globals missing?\n" % global_config_path,
1968 noiselevel=-1, level=logging.ERROR)
1971 vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
1972 vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
1976 updatecache_flg = False
1977 if myaction == "metadata":
1978 print("skipping sync")
1979 updatecache_flg = True
1980 elif ".git" in vcs_dirs:
1981 # Update existing git repository, and ignore the syncuri. We are
1982 # going to trust the user and assume that the user is in the branch
1983 # that he/she wants updated. We'll let the user manage branches with
1985 if portage.process.find_binary("git") is None:
1986 msg = ["Command not found: git",
1987 "Type \"emerge dev-util/git\" to enable git support."]
1989 writemsg_level("!!! %s\n" % l,
1990 level=logging.ERROR, noiselevel=-1)
1992 msg = ">>> Starting git pull in %s..." % myportdir
1993 emergelog(xterm_titles, msg )
1994 writemsg_level(msg + "\n")
1995 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
1996 (portage._shell_quote(myportdir),), **spawn_kwargs)
1997 if exitcode != os.EX_OK:
1998 msg = "!!! git pull error in %s." % myportdir
1999 emergelog(xterm_titles, msg)
2000 writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
2002 msg = ">>> Git pull in %s successful" % myportdir
2003 emergelog(xterm_titles, msg)
2004 writemsg_level(msg + "\n")
2005 exitcode = git_sync_timestamps(settings, myportdir)
2006 if exitcode == os.EX_OK:
2007 updatecache_flg = True
2008 elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
2009 for vcs_dir in vcs_dirs:
2010 writemsg_level(("!!! %s appears to be under revision " + \
2011 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
2012 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
2014 if not os.path.exists("/usr/bin/rsync"):
2015 print("!!! /usr/bin/rsync does not exist, so rsync support is disabled.")
2016 print("!!! Type \"emerge net-misc/rsync\" to enable rsync support.")
2021 if settings["PORTAGE_RSYNC_OPTS"] == "":
2022 portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
2024 "--recursive", # Recurse directories
2025 "--links", # Consider symlinks
2026 "--safe-links", # Ignore links outside of tree
2027 "--perms", # Preserve permissions
2028 "--times", # Preserive mod times
2029 "--compress", # Compress the data transmitted
2030 "--force", # Force deletion on non-empty dirs
2031 "--whole-file", # Don't do block transfers, only entire files
2032 "--delete", # Delete files that aren't in the master tree
2033 "--stats", # Show final statistics about what was transfered
2034 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
2035 "--exclude=/distfiles", # Exclude distfiles from consideration
2036 "--exclude=/local", # Exclude local from consideration
2037 "--exclude=/packages", # Exclude packages from consideration
2041 # The below validation is not needed when using the above hardcoded
2044 portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
2045 rsync_opts.extend(portage.util.shlex_split(
2046 settings.get("PORTAGE_RSYNC_OPTS", "")))
2047 for opt in ("--recursive", "--times"):
2048 if opt not in rsync_opts:
2049 portage.writemsg(yellow("WARNING:") + " adding required option " + \
2050 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
2051 rsync_opts.append(opt)
2053 for exclude in ("distfiles", "local", "packages"):
2054 opt = "--exclude=/%s" % exclude
2055 if opt not in rsync_opts:
2056 portage.writemsg(yellow("WARNING:") + \
2057 " adding required option %s not included in " % opt + \
2058 "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
2059 rsync_opts.append(opt)
2061 if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
2062 def rsync_opt_startswith(opt_prefix):
2063 for x in rsync_opts:
2064 if x.startswith(opt_prefix):
2068 if not rsync_opt_startswith("--timeout="):
2069 rsync_opts.append("--timeout=%d" % mytimeout)
2071 for opt in ("--compress", "--whole-file"):
2072 if opt not in rsync_opts:
2073 portage.writemsg(yellow("WARNING:") + " adding required option " + \
2074 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
2075 rsync_opts.append(opt)
2077 if "--quiet" in myopts:
2078 rsync_opts.append("--quiet") # Shut up a lot
2080 rsync_opts.append("--verbose") # Print filelist
2082 if "--verbose" in myopts:
2083 rsync_opts.append("--progress") # Progress meter for each file
2085 if "--debug" in myopts:
2086 rsync_opts.append("--checksum") # Force checksum on all files
2088 # Real local timestamp file.
2089 servertimestampfile = os.path.join(
2090 myportdir, "metadata", "timestamp.chk")
2092 content = portage.util.grabfile(servertimestampfile)
2096 mytimestamp = time.mktime(time.strptime(content[0],
2097 "%a, %d %b %Y %H:%M:%S +0000"))
2098 except (OverflowError, ValueError):
2103 rsync_initial_timeout = \
2104 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
2106 rsync_initial_timeout = 15
2109 maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
2110 except SystemExit as e:
2111 raise # Needed else can't exit
2113 maxretries = -1 #default number of retries
2117 proto, user_name, hostname, port = re.split(
2118 r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
2119 syncuri, maxsplit=4)[1:5]
2121 writemsg_level("!!! SYNC is invalid: %s\n" % syncuri,
2122 noiselevel=-1, level=logging.ERROR)
2126 if user_name is None:
2128 if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
2129 getaddrinfo_host = hostname
2131 # getaddrinfo needs the brackets stripped
2132 getaddrinfo_host = hostname[1:-1]
2133 updatecache_flg=True
2134 all_rsync_opts = set(rsync_opts)
2135 extra_rsync_opts = portage.util.shlex_split(
2136 settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
2137 all_rsync_opts.update(extra_rsync_opts)
2139 family = socket.AF_UNSPEC
2140 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
2141 family = socket.AF_INET
2142 elif socket.has_ipv6 and \
2143 ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
2144 family = socket.AF_INET6
2150 addrinfos = getaddrinfo_validate(
2151 socket.getaddrinfo(getaddrinfo_host, None,
2152 family, socket.SOCK_STREAM))
2153 except socket.error as e:
2155 "!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
2156 noiselevel=-1, level=logging.ERROR)
2160 AF_INET = socket.AF_INET
2163 AF_INET6 = socket.AF_INET6
2168 for addrinfo in addrinfos:
2169 if addrinfo[0] == AF_INET:
2170 ips_v4.append("%s" % addrinfo[4][0])
2171 elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
2172 # IPv6 addresses need to be enclosed in square brackets
2173 ips_v6.append("[%s]" % addrinfo[4][0])
2175 random.shuffle(ips_v4)
2176 random.shuffle(ips_v6)
2178 # Give priority to the address family that
2179 # getaddrinfo() returned first.
2180 if AF_INET6 is not None and addrinfos and \
2181 addrinfos[0][0] == AF_INET6:
2182 ips = ips_v6 + ips_v4
2184 ips = ips_v4 + ips_v6
2187 uris.append(syncuri.replace(
2188 "//" + user_name + hostname + port + "/",
2189 "//" + user_name + ip + port + "/", 1))
2192 # With some configurations we need to use the plain hostname
2193 # rather than try to resolve the ip addresses (bug #340817).
2194 uris.append(syncuri)
2196 # reverse, for use with pop()
2199 effective_maxretries = maxretries
2200 if effective_maxretries < 0:
2201 effective_maxretries = len(uris) - 1
2203 SERVER_OUT_OF_DATE = -1
2204 EXCEEDED_MAX_RETRIES = -2
2207 dosyncuri = uris.pop()
2209 writemsg("!!! Exhausted addresses for %s\n" % \
2210 hostname, noiselevel=-1)
2214 if "--ask" in myopts:
2215 if userquery("Do you want to sync your Portage tree " + \
2216 "with the mirror at\n" + blue(dosyncuri) + bold("?"),
2217 enter_invalid) == "No":
2222 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
2223 if "--quiet" not in myopts:
2224 print(">>> Starting rsync with "+dosyncuri+"...")
2226 emergelog(xterm_titles,
2227 ">>> Starting retry %d of %d with %s" % \
2228 (retries, effective_maxretries, dosyncuri))
2230 "\n\n>>> Starting retry %d of %d with %s\n" % \
2231 (retries, effective_maxretries, dosyncuri), noiselevel=-1)
2233 if dosyncuri.startswith('ssh://'):
2234 dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
2236 if mytimestamp != 0 and "--quiet" not in myopts:
2237 print(">>> Checking server timestamp ...")
2239 rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
2241 if "--debug" in myopts:
2246 # Even if there's no timestamp available locally, fetch the
2247 # timestamp anyway as an initial probe to verify that the server is
2248 # responsive. This protects us from hanging indefinitely on a
2249 # connection attempt to an unresponsive server which rsync's
2250 # --timeout option does not prevent.
2252 # Temporary file for remote server timestamp comparison.
2253 # NOTE: If FEATURES=usersync is enabled then the tempfile
2254 # needs to be in a directory that's readable by the usersync
2255 # user. We assume that PORTAGE_TMPDIR will satisfy this
2256 # requirement, since that's not necessarily true for the
2257 # default directory used by the tempfile module.
2258 if usersync_uid is not None:
2259 tmpdir = settings['PORTAGE_TMPDIR']
2261 # use default dir from tempfile module
2263 fd, tmpservertimestampfile = \
2264 tempfile.mkstemp(dir=tmpdir)
2266 if usersync_uid is not None:
2267 portage.util.apply_permissions(tmpservertimestampfile,
2269 mycommand = rsynccommand[:]
2270 mycommand.append(dosyncuri.rstrip("/") + \
2271 "/metadata/timestamp.chk")
2272 mycommand.append(tmpservertimestampfile)
2276 # Timeout here in case the server is unresponsive. The
2277 # --timeout rsync option doesn't apply to the initial
2278 # connection attempt.
2280 if rsync_initial_timeout:
2281 portage.exception.AlarmSignal.register(
2282 rsync_initial_timeout)
2284 mypids.extend(portage.process.spawn(
2285 mycommand, returnpid=True, **spawn_kwargs))
2286 exitcode = os.waitpid(mypids[0], 0)[1]
2287 if usersync_uid is not None:
2288 portage.util.apply_permissions(tmpservertimestampfile,
2290 content = portage.grabfile(tmpservertimestampfile)
2292 if rsync_initial_timeout:
2293 portage.exception.AlarmSignal.unregister()
2295 os.unlink(tmpservertimestampfile)
2298 except portage.exception.AlarmSignal:
2301 # With waitpid and WNOHANG, only check the
2302 # first element of the tuple since the second
2303 # element may vary (bug #337465).
2304 if mypids and os.waitpid(mypids[0], os.WNOHANG)[0] == 0:
2305 os.kill(mypids[0], signal.SIGTERM)
2306 os.waitpid(mypids[0], 0)
2307 # This is the same code rsync uses for timeout.
2310 if exitcode != os.EX_OK:
2312 exitcode = (exitcode & 0xff) << 8
2314 exitcode = exitcode >> 8
2316 portage.process.spawned_pids.remove(mypids[0])
2319 servertimestamp = time.mktime(time.strptime(
2320 content[0], "%a, %d %b %Y %H:%M:%S +0000"))
2321 except (OverflowError, ValueError):
2323 del mycommand, mypids, content
2324 if exitcode == os.EX_OK:
2325 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
2326 emergelog(xterm_titles,
2327 ">>> Cancelling sync -- Already current.")
2330 print(">>> Timestamps on the server and in the local repository are the same.")
2331 print(">>> Cancelling all further sync action. You are already up to date.")
2333 print(">>> In order to force sync, remove '%s'." % servertimestampfile)
2337 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
2338 emergelog(xterm_titles,
2339 ">>> Server out of date: %s" % dosyncuri)
2342 print(">>> SERVER OUT OF DATE: %s" % dosyncuri)
2344 print(">>> In order to force sync, remove '%s'." % servertimestampfile)
2347 exitcode = SERVER_OUT_OF_DATE
2348 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
2350 mycommand = rsynccommand + [dosyncuri+"/", myportdir]
2351 exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
2352 if exitcode in [0,1,3,4,11,14,20,21]:
2354 elif exitcode in [1,3,4,11,14,20,21]:
2357 # Code 2 indicates protocol incompatibility, which is expected
2358 # for servers with protocol < 29 that don't support
2359 # --prune-empty-directories. Retry for a server that supports
2360 # at least rsync protocol version 29 (>=rsync-2.6.4).
2365 if maxretries < 0 or retries <= maxretries:
2366 print(">>> Retrying...")
2370 updatecache_flg=False
2371 exitcode = EXCEEDED_MAX_RETRIES
2375 emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
2376 elif exitcode == SERVER_OUT_OF_DATE:
2378 elif exitcode == EXCEEDED_MAX_RETRIES:
2380 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
2385 msg.append("Rsync has reported that there is a syntax error. Please ensure")
2386 msg.append("that your SYNC statement is proper.")
2387 msg.append("SYNC=" + settings["SYNC"])
2389 msg.append("Rsync has reported that there is a File IO error. Normally")
2390 msg.append("this means your disk is full, but can be caused by corruption")
2391 msg.append("on the filesystem that contains PORTDIR. Please investigate")
2392 msg.append("and try again after the problem has been fixed.")
2393 msg.append("PORTDIR=" + settings["PORTDIR"])
2395 msg.append("Rsync was killed before it finished.")
2397 msg.append("Rsync has not successfully finished. It is recommended that you keep")
2398 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
2399 msg.append("to use rsync due to firewall or other restrictions. This should be a")
2400 msg.append("temporary problem unless complications exist with your network")
2401 msg.append("(and possibly your system's filesystem) configuration.")
2405 elif syncuri[:6]=="cvs://":
2406 if not os.path.exists("/usr/bin/cvs"):
2407 print("!!! /usr/bin/cvs does not exist, so CVS support is disabled.")
2408 print("!!! Type \"emerge dev-vcs/cvs\" to enable CVS support.")
2411 cvsdir=os.path.dirname(myportdir)
2412 if not os.path.exists(myportdir+"/CVS"):
2414 print(">>> Starting initial cvs checkout with "+syncuri+"...")
2415 if os.path.exists(cvsdir+"/gentoo-x86"):
2416 print("!!! existing",cvsdir+"/gentoo-x86 directory; exiting.")
2420 except OSError as e:
2421 if e.errno != errno.ENOENT:
2423 "!!! existing '%s' directory; exiting.\n" % myportdir)
2426 if portage.process.spawn_bash(
2427 "cd %s; exec cvs -z0 -d %s co -P gentoo-x86" % \
2428 (portage._shell_quote(cvsdir), portage._shell_quote(cvsroot)),
2429 **spawn_kwargs) != os.EX_OK:
2430 print("!!! cvs checkout error; exiting.")
2432 os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
2435 print(">>> Starting cvs update with "+syncuri+"...")
2436 retval = portage.process.spawn_bash(
2437 "cd %s; exec cvs -z0 -q update -dP" % \
2438 (portage._shell_quote(myportdir),), **spawn_kwargs)
2439 if retval != os.EX_OK:
2440 writemsg_level("!!! cvs update error; exiting.\n",
2441 noiselevel=-1, level=logging.ERROR)
2445 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
2446 noiselevel=-1, level=logging.ERROR)
2449 if updatecache_flg and \
2450 myaction != "metadata" and \
2451 "metadata-transfer" not in settings.features:
2452 updatecache_flg = False
2454 # Reload the whole config from scratch.
2455 settings, trees, mtimedb = load_emerge_config(trees=trees)
2456 adjust_configs(myopts, trees)
2457 root_config = trees[settings["ROOT"]]["root_config"]
2458 portdb = trees[settings["ROOT"]]["porttree"].dbapi
2460 if updatecache_flg and \
2461 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
2463 # Only update cache for myportdir since that's
2464 # the only one that's been synced here.
2465 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
2467 if myopts.get('--package-moves') != 'n' and \
2468 _global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
2470 # Reload the whole config from scratch.
2471 settings, trees, mtimedb = load_emerge_config(trees=trees)
2472 adjust_configs(myopts, trees)
2473 portdb = trees[settings["ROOT"]]["porttree"].dbapi
2474 root_config = trees[settings["ROOT"]]["root_config"]
2476 mybestpv = portdb.xmatch("bestmatch-visible",
2477 portage.const.PORTAGE_PACKAGE_ATOM)
2478 mypvs = portage.best(
2479 trees[settings["ROOT"]]["vartree"].dbapi.match(
2480 portage.const.PORTAGE_PACKAGE_ATOM))
2482 chk_updated_cfg_files(settings["EROOT"],
2483 portage.util.shlex_split(settings.get("CONFIG_PROTECT", "")))
2485 if myaction != "metadata":
2486 postsync = os.path.join(settings["PORTAGE_CONFIGROOT"],
2487 portage.USER_CONFIG_PATH, "bin", "post_sync")
2488 if os.access(postsync, os.X_OK):
2489 retval = portage.process.spawn(
2490 [postsync, dosyncuri], env=settings.environ())
2491 if retval != os.EX_OK:
2493 " %s spawn failed of %s\n" % (bad("*"), postsync,),
2494 level=logging.ERROR, noiselevel=-1)
2496 if(mybestpv != mypvs) and not "--quiet" in myopts:
2498 print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
2499 print(red(" * ")+"that you update portage now, before any other packages are updated.")
2501 print(red(" * ")+"To update portage, run 'emerge portage' now.")
2504 display_news_notification(root_config, myopts)
2507 def action_uninstall(settings, trees, ldpath_mtimes,
2508 opts, action, files, spinner):
2509 # For backward compat, some actions do not require leading '='.
2510 ignore_missing_eq = action in ('clean', 'unmerge')
2511 root = settings['ROOT']
2512 vardb = trees[root]['vartree'].dbapi
2516 # Ensure atoms are valid before calling unmerge().
2517 # For backward compat, leading '=' is not required.
2519 if is_valid_package_atom(x, allow_repo=True) or \
2520 (ignore_missing_eq and is_valid_package_atom('=' + x)):
2524 dep_expand(x, mydb=vardb, settings=settings))
2525 except portage.exception.AmbiguousPackageName as e:
2526 msg = "The short ebuild name \"" + x + \
2527 "\" is ambiguous. Please specify " + \
2528 "one of the following " + \
2529 "fully-qualified ebuild names instead:"
2530 for line in textwrap.wrap(msg, 70):
2531 writemsg_level("!!! %s\n" % (line,),
2532 level=logging.ERROR, noiselevel=-1)
2534 writemsg_level(" %s\n" % colorize("INFORM", i),
2535 level=logging.ERROR, noiselevel=-1)
2536 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
2539 elif x.startswith(os.sep):
2540 if not x.startswith(root):
2541 writemsg_level(("!!! '%s' does not start with" + \
2542 " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
2544 # Queue these up since it's most efficient to handle
2545 # multiple files in a single iter_owners() call.
2546 lookup_owners.append(x)
2548 elif x.startswith(SETPREFIX) and action == "deselect":
2549 valid_atoms.append(x)
2553 ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
2556 msg.append("'%s' is not a valid package atom." % (x,))
2557 msg.append("Please check ebuild(5) for full details.")
2558 writemsg_level("".join("!!! %s\n" % line for line in msg),
2559 level=logging.ERROR, noiselevel=-1)
2562 for cp in vardb.cp_all():
2563 if extended_cp_match(ext_atom.cp, cp):
2566 atom += ":" + ext_atom.slot
2568 atom += "::" + ext_atom.repo
2570 if vardb.match(atom):
2571 valid_atoms.append(Atom(atom, allow_repo=True))
2575 msg.append("'%s' is not a valid package atom." % (x,))
2576 msg.append("Please check ebuild(5) for full details.")
2577 writemsg_level("".join("!!! %s\n" % line for line in msg),
2578 level=logging.ERROR, noiselevel=-1)
2583 search_for_multiple = False
2584 if len(lookup_owners) > 1:
2585 search_for_multiple = True
2587 for x in lookup_owners:
2588 if not search_for_multiple and os.path.isdir(x):
2589 search_for_multiple = True
2590 relative_paths.append(x[len(root)-1:])
2593 for pkg, relative_path in \
2594 vardb._owners.iter_owners(relative_paths):
2595 owners.add(pkg.mycpv)
2596 if not search_for_multiple:
2601 slot = vardb.aux_get(cpv, ['SLOT'])[0]
2603 # portage now masks packages with missing slot, but it's
2604 # possible that one was installed by an older version
2605 atom = portage.cpv_getkey(cpv)
2607 atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
2608 valid_atoms.append(portage.dep.Atom(atom))
2610 writemsg_level(("!!! '%s' is not claimed " + \
2611 "by any package.\n") % lookup_owners[0],
2612 level=logging.WARNING, noiselevel=-1)
2614 if files and not valid_atoms:
2617 if action == 'unmerge' and \
2618 '--quiet' not in opts and \
2619 '--quiet-unmerge-warn' not in opts:
2620 msg = "This action can remove important packages! " + \
2621 "In order to be safer, use " + \
2622 "`emerge -pv --depclean <atom>` to check for " + \
2623 "reverse dependencies before removing packages."
2624 out = portage.output.EOutput()
2625 for line in textwrap.wrap(msg, 72):
2628 if action == 'deselect':
2629 return action_deselect(settings, trees, opts, valid_atoms)
2631 # Create a Scheduler for calls to unmerge(), in order to cause
2632 # redirection of ebuild phase output to logs as required for
2633 # options such as --quiet.
2634 sched = Scheduler(settings, trees, None, opts,
2636 sched._background = sched._background_mode()
2637 sched._status_display.quiet = True
2639 if sched._background:
2640 sched.settings.unlock()
2641 sched.settings["PORTAGE_BACKGROUND"] = "1"
2642 sched.settings.backup_changes("PORTAGE_BACKGROUND")
2643 sched.settings.lock()
2644 sched.pkgsettings[root] = portage.config(clone=sched.settings)
2646 if action in ('clean', 'unmerge') or \
2647 (action == 'prune' and "--nodeps" in opts):
2648 # When given a list of atoms, unmerge them in the order given.
2649 ordered = action == 'unmerge'
2650 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
2651 valid_atoms, ldpath_mtimes, ordered=ordered,
2652 scheduler=sched._sched_iface)
2655 rval = action_depclean(settings, trees, ldpath_mtimes,
2656 opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
2660 def adjust_configs(myopts, trees):
2661 for myroot in trees:
2662 mysettings = trees[myroot]["vartree"].settings
2664 adjust_config(myopts, mysettings)
2667 def adjust_config(myopts, settings):
2668 """Make emerge specific adjustments to the config."""
2670 # Kill noauto as it will break merges otherwise.
2671 if "noauto" in settings.features:
2672 settings.features.remove('noauto')
2674 fail_clean = myopts.get('--fail-clean')
2675 if fail_clean is not None:
2676 if fail_clean is True and \
2677 'fail-clean' not in settings.features:
2678 settings.features.add('fail-clean')
2679 elif fail_clean == 'n' and \
2680 'fail-clean' in settings.features:
2681 settings.features.remove('fail-clean')
2685 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
2686 except ValueError as e:
2687 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2688 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
2689 settings["CLEAN_DELAY"], noiselevel=-1)
2690 settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
2691 settings.backup_changes("CLEAN_DELAY")
2693 EMERGE_WARNING_DELAY = 10
2695 EMERGE_WARNING_DELAY = int(settings.get(
2696 "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
2697 except ValueError as e:
2698 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2699 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
2700 settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
2701 settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
2702 settings.backup_changes("EMERGE_WARNING_DELAY")
2704 if "--quiet" in myopts or "--quiet-build" in myopts:
2705 settings["PORTAGE_QUIET"]="1"
2706 settings.backup_changes("PORTAGE_QUIET")
2708 if "--verbose" in myopts:
2709 settings["PORTAGE_VERBOSE"] = "1"
2710 settings.backup_changes("PORTAGE_VERBOSE")
2712 # Set so that configs will be merged regardless of remembered status
2713 if ("--noconfmem" in myopts):
2714 settings["NOCONFMEM"]="1"
2715 settings.backup_changes("NOCONFMEM")
2717 # Set various debug markers... They should be merged somehow.
2720 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
2721 if PORTAGE_DEBUG not in (0, 1):
2722 portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
2723 PORTAGE_DEBUG, noiselevel=-1)
2724 portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
2727 except ValueError as e:
2728 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
2729 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
2730 settings["PORTAGE_DEBUG"], noiselevel=-1)
2732 if "--debug" in myopts:
2734 settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
2735 settings.backup_changes("PORTAGE_DEBUG")
2737 if settings.get("NOCOLOR") not in ("yes","true"):
2738 portage.output.havecolor = 1
2740 """The explicit --color < y | n > option overrides the NOCOLOR environment
2741 variable and stdout auto-detection."""
2742 if "--color" in myopts:
2743 if "y" == myopts["--color"]:
2744 portage.output.havecolor = 1
2745 settings["NOCOLOR"] = "false"
2747 portage.output.havecolor = 0
2748 settings["NOCOLOR"] = "true"
2749 settings.backup_changes("NOCOLOR")
2750 elif settings.get('TERM') == 'dumb' or \
2751 not sys.stdout.isatty():
2752 portage.output.havecolor = 0
2753 settings["NOCOLOR"] = "true"
2754 settings.backup_changes("NOCOLOR")
2756 def display_missing_pkg_set(root_config, set_name):
2759 msg.append(("emerge: There are no sets to satisfy '%s'. " + \
2760 "The following sets exist:") % \
2761 colorize("INFORM", set_name))
2764 for s in sorted(root_config.sets):
2765 msg.append(" %s" % s)
2768 writemsg_level("".join("%s\n" % l for l in msg),
2769 level=logging.ERROR, noiselevel=-1)
2771 def relative_profile_path(portdir, abs_profile):
2772 realpath = os.path.realpath(abs_profile)
2773 basepath = os.path.realpath(os.path.join(portdir, "profiles"))
2774 if realpath.startswith(basepath):
2775 profilever = realpath[1 + len(basepath):]
2780 def getportageversion(portdir, target_root, profile, chost, vardb):
2783 profilever = relative_profile_path(portdir, profile)
2784 if profilever is None:
2786 for parent in portage.grabfile(
2787 os.path.join(profile, 'parent')):
2788 profilever = relative_profile_path(portdir,
2789 os.path.join(profile, parent))
2790 if profilever is not None:
2792 except portage.exception.PortageException:
2795 if profilever is None:
2797 profilever = "!" + os.readlink(profile)
2801 if profilever is None:
2802 profilever = "unavailable"
2806 for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
2807 if not atom.blocker:
2808 libclist.update(vardb.match(atom))
2810 for cpv in sorted(libclist):
2811 libcver.append("-".join(portage.catpkgsplit(cpv)[1:]))
2813 libcver = ["unavailable"]
2815 gccver = getgccversion(chost)
2816 unameout=platform.release()+" "+platform.machine()
2818 return "Portage %s (%s, %s, %s, %s)" % \
2819 (portage.VERSION, profilever, gccver, ",".join(libcver), unameout)
2821 def git_sync_timestamps(settings, portdir):
2823 Since git doesn't preserve timestamps, synchronize timestamps between
2824 entries and ebuilds/eclasses. Assume the cache has the correct timestamp
2825 for a given file as long as the file in the working tree is not modified
2828 cache_dir = os.path.join(portdir, "metadata", "cache")
2829 if not os.path.isdir(cache_dir):
2831 writemsg_level(">>> Synchronizing timestamps...\n")
2833 from portage.cache.cache_errors import CacheError
2835 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
2836 portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
2837 except CacheError as e:
2838 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
2839 level=logging.ERROR, noiselevel=-1)
2842 ec_dir = os.path.join(portdir, "eclass")
2844 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
2845 if f.endswith(".eclass"))
2846 except OSError as e:
2847 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
2848 level=logging.ERROR, noiselevel=-1)
2851 args = [portage.const.BASH_BINARY, "-c",
2852 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
2853 portage._shell_quote(portdir)]
2855 proc = subprocess.Popen(args, stdout=subprocess.PIPE)
2856 modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
2858 if rval != os.EX_OK:
2861 modified_eclasses = set(ec for ec in ec_names \
2862 if os.path.join("eclass", ec + ".eclass") in modified_files)
2864 updated_ec_mtimes = {}
2866 for cpv in cache_db:
2867 cpv_split = portage.catpkgsplit(cpv)
2868 if cpv_split is None:
2869 writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
2870 level=logging.ERROR, noiselevel=-1)
2873 cat, pn, ver, rev = cpv_split
2874 cat, pf = portage.catsplit(cpv)
2875 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
2876 if relative_eb_path in modified_files:
2880 cache_entry = cache_db[cpv]
2881 eb_mtime = cache_entry.get("_mtime_")
2882 ec_mtimes = cache_entry.get("_eclasses_")
2884 writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
2885 level=logging.ERROR, noiselevel=-1)
2887 except CacheError as e:
2888 writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
2889 (cpv, e), level=logging.ERROR, noiselevel=-1)
2892 if eb_mtime is None:
2893 writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
2894 level=logging.ERROR, noiselevel=-1)
2898 eb_mtime = long(eb_mtime)
2900 writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
2901 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
2904 if ec_mtimes is None:
2905 writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
2906 level=logging.ERROR, noiselevel=-1)
2909 if modified_eclasses.intersection(ec_mtimes):
2912 missing_eclasses = set(ec_mtimes).difference(ec_names)
2913 if missing_eclasses:
2914 writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
2915 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
2919 eb_path = os.path.join(portdir, relative_eb_path)
2921 current_eb_mtime = os.stat(eb_path)
2923 writemsg_level("!!! Missing ebuild: %s\n" % \
2924 (cpv,), level=logging.ERROR, noiselevel=-1)
2927 inconsistent = False
2928 for ec, (ec_path, ec_mtime) in ec_mtimes.items():
2929 updated_mtime = updated_ec_mtimes.get(ec)
2930 if updated_mtime is not None and updated_mtime != ec_mtime:
2931 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
2932 (cpv, ec), level=logging.ERROR, noiselevel=-1)
2939 if current_eb_mtime != eb_mtime:
2940 os.utime(eb_path, (eb_mtime, eb_mtime))
2942 for ec, (ec_path, ec_mtime) in ec_mtimes.items():
2943 if ec in updated_ec_mtimes:
2945 ec_path = os.path.join(ec_dir, ec + ".eclass")
2946 current_mtime = os.stat(ec_path)[stat.ST_MTIME]
2947 if current_mtime != ec_mtime:
2948 os.utime(ec_path, (ec_mtime, ec_mtime))
2949 updated_ec_mtimes[ec] = ec_mtime
2953 def load_emerge_config(trees=None):
2955 for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
2956 v = os.environ.get(envvar, None)
2959 trees = portage.create_trees(trees=trees, **kwargs)
2961 for root, root_trees in trees.items():
2962 settings = root_trees["vartree"].settings
2963 settings._init_dirs()
2964 setconfig = load_default_config(settings, root_trees)
2965 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
2967 settings = trees["/"]["vartree"].settings
2969 for myroot in trees:
2971 settings = trees[myroot]["vartree"].settings
2974 mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
2975 mtimedb = portage.MtimeDB(mtimedbfile)
2976 portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
2977 QueryCommand._db = trees
2978 return settings, trees, mtimedb
2980 def chk_updated_cfg_files(eroot, config_protect):
2983 portage.util.find_updated_config_files(target_root, config_protect))
2986 writemsg_level("\n %s " % (colorize("WARN", "* IMPORTANT:"),),
2987 level=logging.INFO, noiselevel=-1)
2988 if not x[1]: # it's a protected file
2989 writemsg_level("config file '%s' needs updating.\n" % x[0],
2990 level=logging.INFO, noiselevel=-1)
2991 else: # it's a protected dir
2993 head, tail = os.path.split(x[1][0])
2994 tail = tail[len("._cfg0000_"):]
2995 fpath = os.path.join(head, tail)
2996 writemsg_level("config file '%s' needs updating.\n" % fpath,
2997 level=logging.INFO, noiselevel=-1)
2999 writemsg_level("%d config files in '%s' need updating.\n" % \
3000 (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
3003 print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
3004 + " section of the " + bold("emerge"))
3005 print(" "+yellow("*")+" man page to learn how to update config files.")
3007 def display_news_notification(root_config, myopts):
3008 target_root = root_config.settings['EROOT']
3009 trees = root_config.trees
3010 settings = trees["vartree"].settings
3011 portdb = trees["porttree"].dbapi
3012 vardb = trees["vartree"].dbapi
3013 NEWS_PATH = os.path.join("metadata", "news")
3014 UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
3015 newsReaderDisplay = False
3016 update = "--pretend" not in myopts
3017 if "news" not in settings.features:
3020 for repo in portdb.getRepositories():
3021 unreadItems = checkUpdatedNewsItems(
3022 portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
3024 if not newsReaderDisplay:
3025 newsReaderDisplay = True
3027 print(colorize("WARN", " * IMPORTANT:"), end=' ')
3028 print("%s news items need reading for repository '%s'." % (unreadItems, repo))
3031 if newsReaderDisplay:
3032 print(colorize("WARN", " *"), end=' ')
3033 print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
3036 def getgccversion(chost):
3039 return: the current in-use gcc version
3042 gcc_ver_command = 'gcc -dumpversion'
3043 gcc_ver_prefix = 'gcc-'
3045 gcc_not_found_error = red(
3046 "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
3047 "!!! to update the environment of this terminal and possibly\n" +
3048 "!!! other terminals also.\n"
3051 mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
3052 if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
3053 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
3055 mystatus, myoutput = subprocess_getstatusoutput(
3056 chost + "-" + gcc_ver_command)
3057 if mystatus == os.EX_OK:
3058 return gcc_ver_prefix + myoutput
3060 mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
3061 if mystatus == os.EX_OK:
3062 return gcc_ver_prefix + myoutput
3064 portage.writemsg(gcc_not_found_error, noiselevel=-1)
3065 return "[unavailable]"
3067 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
3070 Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
3071 Returns the number of unread (yet relevent) items.
3073 @param portdb: a portage tree database
3074 @type portdb: pordbapi
3075 @param vardb: an installed package database
3076 @type vardb: vardbapi
3085 1. The number of unread but relevant news items.
3088 from portage.news import NewsManager
3089 manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
3090 return manager.getUnreadItems( repo_id, update=update )