1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.524.2.76 2005/05/29 12:40:08 jstubbs Exp $
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
18 print "Failed to import sys! Something is _VERY_ wrong with python."
22 import os,string,types,signal,fcntl,errno
23 import time,traceback,copy
24 import re,pwd,grp,commands
29 import pickle as cPickle
33 from time import sleep
34 from random import shuffle
35 from cache.cache_errors import CacheError
39 sys.stderr.write("\n\n")
40 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
41 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
42 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
44 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
45 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
47 sys.stderr.write(" "+str(e)+"\n\n");
50 sys.stderr.write("\n\n")
51 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
52 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
55 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56 sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
60 # XXX: This should get renamed to bsd_chflags, I think.
66 # XXX: This should get renamed to bsd_chflags, I think.
75 # XXX: This needs to get cleaned up.
77 from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
78 darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
79 xtermTitle, xtermTitleReset, yellow
82 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
83 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
84 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
85 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
86 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
87 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
88 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
89 INCREMENTALS, STICKIES, EAPI
91 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
92 portage_uid, portage_gid
95 from portage_util import atomic_ofstream, dump_traceback, getconfig, grabdict, \
96 grabdict_package, grabfile, grabfile_package, \
97 map_dictlist_vals, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
98 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
99 import portage_exception
103 from portage_exec import atexit_register, run_exitfuncs
104 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
105 import portage_checksum
106 from portage_checksum import perform_md5,perform_checksum,prelink_capable
108 from portage_localization import _
110 # Need these functions directly in portage namespace to not break every external tool in existence
111 from portage_versions import ververify,vercmp,catsplit,catpkgsplit,pkgsplit,pkgcmp
113 except SystemExit, e:
116 sys.stderr.write("\n\n")
117 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
118 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
119 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
120 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
121 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
122 sys.stderr.write("!!! a recovery of portage.\n")
124 sys.stderr.write(" "+str(e)+"\n\n")
128 # ===========================================================================
129 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
130 # ===========================================================================
133 def exithandler(signum,frame):
134 """Handles ^C interrupts in a sane manner"""
135 signal.signal(signal.SIGINT, signal.SIG_IGN)
136 signal.signal(signal.SIGTERM, signal.SIG_IGN)
138 # 0=send to *everybody* in process group
141 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
142 signal.signal(signal.SIGINT, exithandler)
143 signal.signal(signal.SIGTERM, exithandler)
144 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
147 modname = string.join(string.split(name,".")[:-1],".")
148 mod = __import__(modname)
149 components = name.split('.')
150 for comp in components[1:]:
151 mod = getattr(mod, comp)
154 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
156 if top_dict.has_key(x) and top_dict[x].has_key(key):
158 return copy.deepcopy(top_dict[x][key])
160 return top_dict[x][key]
164 raise KeyError, "Key not found in list; '%s'" % key
167 "this fixes situations where the current directory doesn't exist"
170 except SystemExit, e:
177 def abssymlink(symlink):
178 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
179 mylink=os.readlink(symlink)
181 mydir=os.path.dirname(symlink)
182 mylink=mydir+"/"+mylink
183 return os.path.normpath(mylink)
185 def suffix_array(array,suffix,doblanks=1):
186 """Appends a given suffix to each element in an Array/List/Tuple.
188 if type(array) not in [types.ListType, types.TupleType]:
189 raise TypeError, "List or Tuple expected. Got %s" % type(array)
193 newarray.append(x + suffix)
198 def prefix_array(array,prefix,doblanks=1):
199 """Prepends a given prefix to each element in an Array/List/Tuple.
201 if type(array) not in [types.ListType, types.TupleType]:
202 raise TypeError, "List or Tuple expected. Got %s" % type(array)
206 newarray.append(prefix + x)
211 def normalize_path(mypath):
212 newpath = os.path.normpath(mypath)
214 if newpath[:2] == "//":
215 newpath = newpath[1:]
222 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
223 global cacheHit,cacheMiss,cacheStale
224 mypath = normalize_path(my_original_path)
225 if dircache.has_key(mypath):
227 cached_mtime, list, ftype = dircache[mypath]
230 cached_mtime, list, ftype = -1, [], []
232 pathstat = os.stat(mypath)
233 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
234 mtime = pathstat[stat.ST_MTIME]
237 except SystemExit, e:
243 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
244 if mtime != cached_mtime or time.time() - mtime < 4:
245 if dircache.has_key(mypath):
247 list = os.listdir(mypath)
252 pathstat = os.stat(mypath+"/"+x)
254 pathstat = os.lstat(mypath+"/"+x)
256 if stat.S_ISREG(pathstat[stat.ST_MODE]):
258 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
260 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
264 except SystemExit, e:
268 dircache[mypath] = mtime, list, ftype
272 for x in range(0, len(list)):
273 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
274 ret_list.append(list[x])
275 ret_ftype.append(ftype[x])
276 elif (list[x] not in ignorelist):
277 ret_list.append(list[x])
278 ret_ftype.append(ftype[x])
280 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
281 return ret_list, ret_ftype
284 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
285 EmptyOnError=False, dirsonly=False):
287 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
294 if not (filesonly or dirsonly or recursive):
300 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
301 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
305 for y in range(0,len(l)):
306 l[y]=list[x]+"/"+l[y]
312 for x in range(0,len(ftype)):
314 rlist=rlist+[list[x]]
317 for x in range(0, len(ftype)):
319 rlist = rlist + [list[x]]
325 starttime=long(time.time())
328 def tokenize(mystring):
329 """breaks a string like 'foo? (bar) oni? (blah (blah))'
330 into embedded lists; returns None on paren mismatch"""
332 # This function is obsoleted.
333 # Use dep_parenreduce
343 curlist.append(accum)
345 prevlists.append(curlist)
350 curlist.append(accum)
353 writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
356 curlist=prevlists.pop()
357 curlist.append(newlist)
359 elif x in string.whitespace:
361 curlist.append(accum)
366 curlist.append(accum)
368 writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
372 def flatten(mytokens):
373 """this function now turns a [1,[2,3]] list into
374 a [1,2,3] list and returns it."""
377 if type(x)==types.ListType:
378 newlist.extend(flatten(x))
383 #beautiful directed graph object
388 #okeys = keys, in order they were added (to optimize firstzero() ordering)
391 def addnode(self,mykey,myparent):
392 if not self.dict.has_key(mykey):
393 self.okeys.append(mykey)
395 self.dict[mykey]=[0,[]]
397 self.dict[mykey]=[0,[myparent]]
398 self.dict[myparent][0]=self.dict[myparent][0]+1
400 if myparent and (not myparent in self.dict[mykey][1]):
401 self.dict[mykey][1].append(myparent)
402 self.dict[myparent][0]=self.dict[myparent][0]+1
404 def delnode(self,mykey):
405 if not self.dict.has_key(mykey):
407 for x in self.dict[mykey][1]:
408 self.dict[x][0]=self.dict[x][0]-1
412 self.okeys.remove(mykey)
417 "returns all nodes in the dictionary"
418 return self.dict.keys()
421 "returns first node with zero references, or NULL if no such node exists"
423 if self.dict[x][0]==0:
427 def depth(self, mykey):
429 while (self.dict[mykey][1]):
431 mykey=self.dict[mykey][1][0]
435 "returns all nodes with zero references, or NULL if no such node exists"
437 for x in self.dict.keys():
438 mys = string.split(x)
439 if mys[0] != "blocks" and self.dict[x][0]==0:
443 def hasallzeros(self):
444 "returns 0/1, Are all nodes zeros? 1 : 0"
446 for x in self.dict.keys():
447 if self.dict[x][0]!=0:
452 if len(self.dict)==0:
456 def hasnode(self,mynode):
457 return self.dict.has_key(mynode)
461 for x in self.dict.keys():
462 mygraph.dict[x]=self.dict[x][:]
463 mygraph.okeys=self.okeys[:]
466 def elog_process(cpv, mysettings):
467 mylogfiles = listdir(mysettings["T"]+"/logging/")
468 # shortcut for packages without any messages
469 if len(mylogfiles) == 0:
471 # exploit listdir() file order so we process log entries in chronological order
475 msgfunction, msgtype = f.split(".")
476 if not msgtype.upper() in mysettings["PORTAGE_ELOG_CLASSES"].split() \
477 and not msgtype.lower() in mysettings["PORTAGE_ELOG_CLASSES"].split():
479 if msgfunction not in portage_const.EBUILD_PHASES:
480 print "!!! can't process invalid log file: %s" % f
482 if not msgfunction in mylogentries:
483 mylogentries[msgfunction] = []
484 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
485 mylogentries[msgfunction].append((msgtype, msgcontent))
487 # in case the filters matched all messages
488 if len(mylogentries) == 0:
491 # generate a single string with all log messages
493 for phase in portage_const.EBUILD_PHASES:
494 if not phase in mylogentries:
496 for msgtype,msgcontent in mylogentries[phase]:
497 fulllog += "%s: %s\n" % (msgtype, phase)
498 for line in msgcontent:
502 # pass the processing to the individual modules
503 logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
506 # FIXME: ugly ad.hoc import code
507 # TODO: implement a common portage module loader
508 logmodule = __import__("elog_modules.mod_"+s)
509 m = getattr(logmodule, "mod_"+s)
510 m.process(mysettings, cpv, mylogentries, fulllog)
511 except (ImportError, AttributeError), e:
512 print "!!! Error while importing logging modules while loading \"mod_%s\":" % s
514 except portage_exception.PortageException, e:
517 # valid end of version components; integers specify offset from release version
518 # pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
519 # all but _p (where it is required) can be followed by an optional trailing integer
521 endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
522 # as there's no reliable way to set {}.keys() order
523 # netversion_keys will be used instead of endversion.keys
524 # to have fixed search order, so that "pre" is checked
526 endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
528 #parse /etc/env.d and generate /etc/profile.env
530 def env_update(makelinks=1):
532 if not os.path.exists(root+"etc/env.d"):
534 os.makedirs(root+"etc/env.d",0755)
536 fns=listdir(root+"etc/env.d",EmptyOnError=1)
539 while (pos<len(fns)):
543 if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
549 "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
550 "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
551 "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
552 "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
555 "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
557 "PATH", "PRELINK_PATH",
558 "PRELINK_PATH_MASK", "PYTHONPATH"
564 # don't process backup files
565 if x[-1]=='~' or x[-4:]==".bak":
567 myconfig=getconfig(root+"etc/env.d/"+x)
569 writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
572 # process PATH, CLASSPATH, LDPATH
573 for myspec in specials.keys():
574 if myconfig.has_key(myspec):
575 if myspec in colon_separated:
576 specials[myspec].extend(myconfig[myspec].split(":"))
578 specials[myspec].append(myconfig[myspec])
580 # process all other variables
581 for myenv in myconfig.keys():
582 env[myenv]=myconfig[myenv]
584 if os.path.exists(root+"etc/ld.so.conf"):
585 myld=open(root+"etc/ld.so.conf")
586 myldlines=myld.readlines()
590 #each line has at least one char (a newline)
594 # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
595 # Where is the new ld.so.conf generated? (achim)
599 ld_cache_update=False
601 newld=specials["LDPATH"]
603 #ld.so.conf needs updating and ldconfig needs to be run
604 myfd = atomic_ofstream(os.path.join(root, "etc", "ld.so.conf"))
605 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
606 myfd.write("# contents of /etc/env.d directory\n")
607 for x in specials["LDPATH"]:
612 # Update prelink.conf if we are prelink-enabled
614 newprelink = atomic_ofstream(os.path.join(root, "etc", "prelink.conf"))
615 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
616 newprelink.write("# contents of /etc/env.d directory\n")
618 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
619 newprelink.write("-l "+x+"\n");
620 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
626 for y in specials["PRELINK_PATH_MASK"]:
635 newprelink.write("-h "+x+"\n")
636 for x in specials["PRELINK_PATH_MASK"]:
637 newprelink.write("-b "+x+"\n")
640 if not mtimedb.has_key("ldpath"):
643 for x in specials["LDPATH"]+['/usr/lib','/lib']:
645 newldpathtime=os.stat(x)[stat.ST_MTIME]
646 except SystemExit, e:
650 if mtimedb["ldpath"].has_key(x):
651 if mtimedb["ldpath"][x]==newldpathtime:
654 mtimedb["ldpath"][x]=newldpathtime
657 mtimedb["ldpath"][x]=newldpathtime
660 # Only run ldconfig as needed
661 if (ld_cache_update or makelinks):
662 # ldconfig has very different behaviour between FreeBSD and Linux
663 if ostype=="Linux" or ostype.lower().endswith("gnu"):
664 # We can't update links if we haven't cleaned other versions first, as
665 # an older package installed ON TOP of a newer version will cause ldconfig
666 # to overwrite the symlinks we just made. -X means no links. After 'clean'
667 # we can safely create links.
668 writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
670 commands.getstatusoutput("cd / ; /sbin/ldconfig -r "+root)
672 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r "+root)
673 elif ostype in ("FreeBSD","DragonFly"):
674 writemsg(">>> Regenerating "+str(root)+"var/run/ld-elf.so.hints...\n")
675 commands.getstatusoutput("cd / ; /sbin/ldconfig -elf -i -f "+str(root)+"var/run/ld-elf.so.hints "+str(root)+"etc/ld.so.conf")
677 del specials["LDPATH"]
679 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
680 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
681 cenvnotice = penvnotice[:]
682 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
683 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
685 #create /etc/profile.env for bash support
686 outfile = atomic_ofstream(os.path.join(root, "etc", "profile.env"))
687 outfile.write(penvnotice)
689 for path in specials.keys():
690 if len(specials[path])==0:
692 outstring="export "+path+"='"
693 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
694 for x in specials[path][:-1]:
697 for x in specials[path][:-1]:
698 outstring=outstring+x+":"
699 outstring=outstring+specials[path][-1]+"'"
700 outfile.write(outstring+"\n")
702 #create /etc/profile.env
704 if type(env[x])!=types.StringType:
706 outfile.write("export "+x+"='"+env[x]+"'\n")
709 #create /etc/csh.env for (t)csh support
710 outfile = atomic_ofstream(os.path.join(root, "etc", "csh.env"))
711 outfile.write(cenvnotice)
713 for path in specials.keys():
714 if len(specials[path])==0:
716 outstring="setenv "+path+" '"
717 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
718 for x in specials[path][:-1]:
721 for x in specials[path][:-1]:
722 outstring=outstring+x+":"
723 outstring=outstring+specials[path][-1]+"'"
724 outfile.write(outstring+"\n")
725 #get it out of the way
730 if type(env[x])!=types.StringType:
732 outfile.write("setenv "+x+" '"+env[x]+"'\n")
735 def new_protect_filename(mydest, newmd5=None):
736 """Resolves a config-protect filename for merging, optionally
737 using the last filename if the md5 matches.
738 (dest,md5) ==> 'string' --- path_to_target_filename
739 (dest) ==> ('next', 'highest') --- next_target and most-recent_target
742 # config protection filename format:
748 if (len(mydest) == 0):
749 raise ValueError, "Empty path provided where a filename is required"
750 if (mydest[-1]=="/"): # XXX add better directory checking
751 raise ValueError, "Directory provided but this function requires a filename"
752 if not os.path.exists(mydest):
755 real_filename = os.path.basename(mydest)
756 real_dirname = os.path.dirname(mydest)
757 for pfile in listdir(real_dirname):
758 if pfile[0:5] != "._cfg":
760 if pfile[10:] != real_filename:
763 new_prot_num = int(pfile[5:9])
764 if new_prot_num > prot_num:
765 prot_num = new_prot_num
767 except SystemExit, e:
771 prot_num = prot_num + 1
773 new_pfile = os.path.normpath(real_dirname+"/._cfg"+string.zfill(prot_num,4)+"_"+real_filename)
774 old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
775 if last_pfile and newmd5:
776 if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
783 return (new_pfile, old_pfile)
785 #XXX: These two are now implemented in portage_util.py but are needed here
786 #XXX: until the isvalidatom() dependency is sorted out.
788 def grabdict_package(myfilename,juststrings=0,recursive=0):
789 pkgs=grabdict(myfilename, juststrings=juststrings, empty=1,recursive=recursive)
790 for x in pkgs.keys():
791 if not isvalidatom(x):
793 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
796 def grabfile_package(myfilename,compatlevel=0,recursive=0):
797 pkgs=grabfile(myfilename,compatlevel,recursive=recursive)
798 for x in range(len(pkgs)-1,-1,-1):
804 if not isvalidatom(pkg):
805 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
809 # returns a tuple. (version[string], error[string])
810 # They are pretty much mutually exclusive.
811 # Either version is a string and error is none, or
812 # version is None and error is a string
814 def ExtractKernelVersion(base_dir):
816 pathname = os.path.join(base_dir, 'Makefile')
818 f = open(pathname, 'r')
819 except OSError, details:
820 return (None, str(details))
821 except IOError, details:
822 return (None, str(details))
826 lines.append(f.readline())
827 except OSError, details:
828 return (None, str(details))
829 except IOError, details:
830 return (None, str(details))
832 lines = map(string.strip, lines)
836 #XXX: The following code relies on the ordering of vars within the Makefile
838 # split on the '=' then remove annoying whitespace
839 items = string.split(line, '=')
840 items = map(string.strip, items)
841 if items[0] == 'VERSION' or \
842 items[0] == 'PATCHLEVEL':
845 elif items[0] == 'SUBLEVEL':
847 elif items[0] == 'EXTRAVERSION' and \
848 items[-1] != items[0]:
851 # Grab a list of files named localversion* and sort them
852 localversions = os.listdir(base_dir)
853 for x in range(len(localversions)-1,-1,-1):
854 if localversions[x][:12] != "localversion":
858 # Append the contents of each to the version string, stripping ALL whitespace
859 for lv in localversions:
860 version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
862 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
863 kernelconfig = getconfig(base_dir+"/.config")
864 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
865 version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
867 return (version,None)
871 def autouse(myvartree,use_cache=1):
872 "returns set of USE variables auto-enabled due to packages being installed"
873 global usedefaults, autouse_val
874 if autouse_val is not None:
880 for myuse in usedefaults:
882 for mydep in usedefaults[myuse]:
883 if not myvartree.dep_match(mydep,use_cache=True):
887 myusevars += " "+myuse
888 autouse_val = myusevars
891 def check_config_instance(test):
892 if not test or (str(test.__class__) != 'portage.config'):
893 raise TypeError, "Invalid type for config object: %s" % test.__class__
896 def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None):
898 self.already_in_regenerate = 0
903 self.modifiedkeys = []
908 # Virtuals obtained from the vartree
909 self.treeVirtuals = {}
910 # Virtuals by user specification. Includes negatives.
911 self.userVirtuals = {}
912 # Virtual negatives from user specifications.
913 self.negVirtuals = {}
915 self.user_profile_dir = None
918 self.incrementals = copy.deepcopy(clone.incrementals)
919 self.profile_path = copy.deepcopy(clone.profile_path)
920 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
922 self.module_priority = copy.deepcopy(clone.module_priority)
923 self.modules = copy.deepcopy(clone.modules)
925 self.depcachedir = copy.deepcopy(clone.depcachedir)
927 self.packages = copy.deepcopy(clone.packages)
928 self.virtuals = copy.deepcopy(clone.virtuals)
930 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
931 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
932 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
934 self.use_defs = copy.deepcopy(clone.use_defs)
935 self.usemask = copy.deepcopy(clone.usemask)
937 self.configlist = copy.deepcopy(clone.configlist)
938 self.configlist[-1] = os.environ.copy()
939 self.configdict = { "globals": self.configlist[0],
940 "defaults": self.configlist[1],
941 "conf": self.configlist[2],
942 "pkg": self.configlist[3],
943 "auto": self.configlist[4],
944 "backupenv": self.configlist[5],
945 "env": self.configlist[6] }
946 self.profiles = copy.deepcopy(clone.profiles)
947 self.backupenv = copy.deepcopy(clone.backupenv)
948 self.pusedict = copy.deepcopy(clone.pusedict)
949 self.categories = copy.deepcopy(clone.categories)
950 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
951 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
952 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
953 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
954 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
955 self.lookuplist = copy.deepcopy(clone.lookuplist)
956 self.uvlist = copy.deepcopy(clone.uvlist)
957 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
958 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
960 self.depcachedir = DEPCACHE_PATH
962 if not config_profile_path:
964 writemsg("config_profile_path not specified to class config\n")
965 self.profile_path = profiledir[:]
967 self.profile_path = config_profile_path[:]
969 if not config_incrementals:
970 writemsg("incrementals not specified to class config\n")
971 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
973 self.incrementals = copy.deepcopy(config_incrementals)
975 self.module_priority = ["user","default"]
977 self.modules["user"] = getconfig(MODULES_FILE_PATH)
978 if self.modules["user"] == None:
979 self.modules["user"] = {}
980 self.modules["default"] = {
981 "portdbapi.metadbmodule": "cache.metadata.database",
982 "portdbapi.auxdbmodule": "cache.flat_hash.database",
988 # back up our incremental variables:
990 # configlist will contain: [ globals, defaults, conf, pkg, auto, backupenv (incrementals), origenv ]
992 # The symlink might not exist or might not be a symlink.
994 self.profiles=[abssymlink(self.profile_path)]
995 except SystemExit, e:
998 self.profiles=[self.profile_path]
1000 mypath = self.profiles[0]
1001 while os.path.exists(mypath+"/parent"):
1002 mypath = os.path.normpath(mypath+"///"+grabfile(mypath+"/parent")[0])
1003 if os.path.exists(mypath):
1004 self.profiles.insert(0,mypath)
1006 if os.environ.has_key("PORTAGE_CALLER") and os.environ["PORTAGE_CALLER"] == "repoman":
1009 # XXX: This should depend on ROOT?
1010 if os.path.exists("/"+CUSTOM_PROFILE_PATH):
1011 self.user_profile_dir = os.path.normpath("/"+"///"+CUSTOM_PROFILE_PATH)
1012 self.profiles.append(self.user_profile_dir[:])
1014 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1015 self.packages = stack_lists(self.packages_list, incremental=1)
1016 del self.packages_list
1017 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1020 self.prevmaskdict={}
1021 for x in self.packages:
1022 mycatpkg=dep_getkey(x)
1023 if not self.prevmaskdict.has_key(mycatpkg):
1024 self.prevmaskdict[mycatpkg]=[x]
1026 self.prevmaskdict[mycatpkg].append(x)
1028 # get profile-masked use flags -- INCREMENTAL Child over parent
1029 usemask_lists = [grabfile(os.path.join(x, "use.mask")) for x in self.profiles]
1030 self.usemask = stack_lists(usemask_lists, incremental=True)
1032 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1033 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1037 mygcfg_dlists = [getconfig(os.path.join(x, "make.globals")) for x in self.profiles+["/etc"]]
1038 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1040 if self.mygcfg == None:
1042 except SystemExit, e:
1044 except Exception, e:
1045 writemsg("!!! %s\n" % (e))
1046 writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
1047 writemsg("!!! Errors in this file should be reported on bugs.gentoo.org.\n")
1049 self.configlist.append(self.mygcfg)
1050 self.configdict["globals"]=self.configlist[-1]
1055 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1056 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1057 #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1058 if self.mygcfg == None:
1060 except SystemExit, e:
1062 except Exception, e:
1063 writemsg("!!! %s\n" % (e))
1064 writemsg("!!! 'rm -Rf /usr/portage/profiles; emerge sync' may fix this. If it does\n")
1065 writemsg("!!! not then please report this to bugs.gentoo.org and, if possible, a dev\n")
1066 writemsg("!!! on #gentoo (irc.freenode.org)\n")
1068 self.configlist.append(self.mygcfg)
1069 self.configdict["defaults"]=self.configlist[-1]
1072 # XXX: Should depend on root?
1073 self.mygcfg=getconfig("/"+MAKE_CONF_FILE,allow_sourcing=True)
1074 if self.mygcfg == None:
1076 except SystemExit, e:
1078 except Exception, e:
1079 writemsg("!!! %s\n" % (e))
1080 writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
1084 self.configlist.append(self.mygcfg)
1085 self.configdict["conf"]=self.configlist[-1]
1087 self.configlist.append({})
1088 self.configdict["pkg"]=self.configlist[-1]
1091 self.configlist.append({})
1092 self.configdict["auto"]=self.configlist[-1]
1094 #backup-env (for recording our calculated incremental variables:)
1095 self.backupenv = os.environ.copy()
1096 self.configlist.append(self.backupenv) # XXX Why though?
1097 self.configdict["backupenv"]=self.configlist[-1]
1099 self.configlist.append(os.environ.copy())
1100 self.configdict["env"]=self.configlist[-1]
1103 # make lookuplist for loading package.*
1104 self.lookuplist=self.configlist[:]
1105 self.lookuplist.reverse()
1107 if os.environ.get("PORTAGE_CALLER","") == "repoman":
1108 # repoman shouldn't use local settings.
1109 locations = [self["PORTDIR"] + "/profiles"]
1111 self.pkeywordsdict = {}
1112 self.punmaskdict = {}
1114 locations = [self["PORTDIR"] + "/profiles", USER_CONFIG_PATH]
1115 for ov in self["PORTDIR_OVERLAY"].split():
1116 ov = os.path.normpath(ov)
1117 if os.path.isdir(ov+"/profiles"):
1118 locations.append(ov+"/profiles")
1120 pusedict=grabdict_package(USER_CONFIG_PATH+"/package.use", recursive=1)
1122 for key in pusedict.keys():
1123 cp = dep_getkey(key)
1124 if not self.pusedict.has_key(cp):
1125 self.pusedict[cp] = {}
1126 self.pusedict[cp][key] = pusedict[key]
1129 pkgdict=grabdict_package(USER_CONFIG_PATH+"/package.keywords", recursive=1)
1130 self.pkeywordsdict = {}
1131 for key in pkgdict.keys():
1132 # default to ~arch if no specific keyword is given
1133 if not pkgdict[key]:
1135 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1136 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1139 for keyword in groups:
1140 if not keyword[0] in "~-":
1141 mykeywordlist.append("~"+keyword)
1142 pkgdict[key] = mykeywordlist
1143 cp = dep_getkey(key)
1144 if not self.pkeywordsdict.has_key(cp):
1145 self.pkeywordsdict[cp] = {}
1146 self.pkeywordsdict[cp][key] = pkgdict[key]
1149 pkgunmasklines = grabfile_package(USER_CONFIG_PATH+"/package.unmask",recursive=1)
1150 self.punmaskdict = {}
1151 for x in pkgunmasklines:
1152 mycatpkg=dep_getkey(x)
1153 if self.punmaskdict.has_key(mycatpkg):
1154 self.punmaskdict[mycatpkg].append(x)
1156 self.punmaskdict[mycatpkg]=[x]
1158 #getting categories from an external file now
1159 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1160 self.categories = stack_lists(categories, incremental=1)
1163 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1164 archlist = stack_lists(archlist, incremental=1)
1165 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1167 # get virtuals -- needs categories
1168 self.loadVirtuals('/')
1171 pkgmasklines = [grabfile_package(os.path.join(x, "package.mask")) for x in self.profiles]
1173 pkgmasklines.append(grabfile_package(l+os.path.sep+"package.mask", recursive=1))
1174 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1177 for x in pkgmasklines:
1178 mycatpkg=dep_getkey(x)
1179 if self.pmaskdict.has_key(mycatpkg):
1180 self.pmaskdict[mycatpkg].append(x)
1182 self.pmaskdict[mycatpkg]=[x]
1184 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1185 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1186 for x in range(len(pkgprovidedlines)-1, -1, -1):
1187 cpvr = catpkgsplit(pkgprovidedlines[x])
1188 if not cpvr or cpvr[0] == "null":
1189 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n")
1190 del pkgprovidedlines[x]
1192 self.pprovideddict = {}
1193 for x in pkgprovidedlines:
1197 mycatpkg=dep_getkey(x)
1198 if self.pprovideddict.has_key(mycatpkg):
1199 self.pprovideddict[mycatpkg].append(x)
1201 self.pprovideddict[mycatpkg]=[x]
1203 self.lookuplist=self.configlist[:]
1204 self.lookuplist.reverse()
1206 useorder=self["USE_ORDER"]
1208 # reasonable defaults; this is important as without USE_ORDER,
1209 # USE will always be "" (nothing set)!
1210 useorder="env:pkg:conf:auto:defaults"
1211 useordersplit=useorder.split(":")
1214 for x in useordersplit:
1215 if self.configdict.has_key(x):
1216 if "PKGUSE" in self.configdict[x].keys():
1217 del self.configdict[x]["PKGUSE"] # Delete PkgUse, Not legal to set.
1218 #prepend db to list to get correct order
1219 self.uvlist[0:0]=[self.configdict[x]]
1221 self.configdict["env"]["PORTAGE_GID"]=str(portage_gid)
1222 self.backupenv["PORTAGE_GID"]=str(portage_gid)
1224 if self.has_key("PORT_LOGDIR") and not self["PORT_LOGDIR"]:
1225 # port_logdir is defined, but empty. this causes a traceback in doebuild.
1226 writemsg(yellow("!!!")+" PORT_LOGDIR was defined, but set to nothing.\n")
1227 writemsg(yellow("!!!")+" Disabling it. Please set it to a non null value.\n")
1228 del self["PORT_LOGDIR"]
1230 if self["PORTAGE_CACHEDIR"]:
1231 # XXX: Deprecated -- April 15 -- NJ
1232 writemsg(yellow(">>> PORTAGE_CACHEDIR has been deprecated!")+"\n")
1233 writemsg(">>> Please use PORTAGE_DEPCACHEDIR instead.\n")
1234 self.depcachedir = self["PORTAGE_CACHEDIR"]
1235 del self["PORTAGE_CACHEDIR"]
1237 if self["PORTAGE_DEPCACHEDIR"]:
1238 #the auxcache is the only /var/cache/edb/ entry that stays at / even when "root" changes.
1239 # XXX: Could move with a CHROOT functionality addition.
1240 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1241 del self["PORTAGE_DEPCACHEDIR"]
1243 overlays = string.split(self["PORTDIR_OVERLAY"])
1247 ov=os.path.normpath(ov)
1248 if os.path.isdir(ov):
1251 writemsg(red("!!! Invalid PORTDIR_OVERLAY (not a dir): "+ov+"\n"))
1252 self["PORTDIR_OVERLAY"] = string.join(new_ov)
1253 self.backup_changes("PORTDIR_OVERLAY")
1257 self.features = portage_util.unique_array(self["FEATURES"].split())
1259 #XXX: Should this be temporary? Is it possible at all to have a default?
1260 if "gpg" in self.features:
1261 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1262 writemsg("PORTAGE_GPG_DIR is invalid. Removing gpg from FEATURES.\n")
1263 self.features.remove("gpg")
1265 if not portage_exec.sandbox_capable and ("sandbox" in self.features or "usersandbox" in self.features):
1266 writemsg(red("!!! Problem with sandbox binary. Disabling...\n\n"))
1267 if "sandbox" in self.features:
1268 self.features.remove("sandbox")
1269 if "usersandbox" in self.features:
1270 self.features.remove("usersandbox")
1272 self.features.sort()
1273 self["FEATURES"] = " ".join(["-*"]+self.features)
1274 self.backup_changes("FEATURES")
1276 if not len(self["CBUILD"]) and len(self["CHOST"]):
1277 self["CBUILD"] = self["CHOST"]
1278 self.backup_changes("CBUILD")
1283 def loadVirtuals(self,root):
1284 self.virtuals = self.getvirtuals(root)
1286 def load_best_module(self,property_string):
1287 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1289 mod = load_mod(best_mod)
1291 dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1301 def modifying(self):
1303 raise Exception, "Configuration is locked."
1305 def backup_changes(self,key=None):
1306 if key and self.configdict["env"].has_key(key):
1307 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1309 raise KeyError, "No such key defined in environment: %s" % key
1311 def reset(self,keeping_pkg=0,use_cache=1):
1312 "reset environment to original settings"
1313 for x in self.configlist[-1].keys():
1314 if x not in self.backupenv.keys():
1315 del self.configlist[-1][x]
1317 self.configdict["env"].update(self.backupenv)
1319 self.modifiedkeys = []
1322 self.configdict["pkg"].clear()
1323 self.regenerate(use_cache=use_cache)
1325 def load_infodir(self,infodir):
1326 if self.configdict.has_key("pkg"):
1327 for x in self.configdict["pkg"].keys():
1328 del self.configdict["pkg"][x]
1330 writemsg("No pkg setup for settings instance?\n")
1333 if os.path.exists(infodir):
1334 if os.path.exists(infodir+"/environment"):
1335 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1337 myre = re.compile('^[A-Z]+$')
1338 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1339 if myre.match(filename):
1341 mydata = string.strip(open(infodir+"/"+filename).read())
1342 if len(mydata)<2048:
1343 if filename == "USE":
1344 self.configdict["pkg"][filename] = "-* "+mydata
1346 self.configdict["pkg"][filename] = mydata
1347 except SystemExit, e:
1350 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename)
1355 def setcpv(self,mycpv,use_cache=1):
1358 cp = dep_getkey(mycpv)
1360 if self.pusedict.has_key(cp):
1361 self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1363 newpuse = string.join(self.pusedict[cp][self.pusekey])
1364 if newpuse == self.puse:
1367 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1368 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1369 self.reset(keeping_pkg=1,use_cache=use_cache)
1371 def setinst(self,mycpv,mydbapi):
1372 # Grab the virtuals this package provides and add them into the tree virtuals.
1373 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1374 if isinstance(mydbapi, portdbapi):
1377 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1378 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1380 cp = dep_getkey(mycpv)
1382 virt = dep_getkey(virt)
1383 if not self.treeVirtuals.has_key(virt):
1384 self.treeVirtuals[virt] = []
1385 # XXX: Is this bad? -- It's a permanent modification
1386 if cp not in self.treeVirtuals[virt]:
1387 self.treeVirtuals[virt].append(cp)
1389 self.virtuals = self.__getvirtuals_compile()
1392 def regenerate(self,useonly=0,use_cache=1):
1393 global usesplit,profiledir
1395 if self.already_in_regenerate:
1396 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1397 writemsg("!!! Looping in regenerate.\n",1)
1400 self.already_in_regenerate = 1
1403 myincrementals=["USE"]
1405 myincrementals=portage_const.INCREMENTALS
1406 for mykey in myincrementals:
1409 # XXX Global usage of db... Needs to go away somehow.
1410 if db.has_key(root) and db[root].has_key("vartree"):
1411 self.configdict["auto"]["USE"]=autouse(db[root]["vartree"],use_cache=use_cache)
1413 self.configdict["auto"]["USE"]=""
1415 mydbs=self.configlist[:-1]
1419 if not curdb.has_key(mykey):
1421 #variables are already expanded
1422 mysplit=curdb[mykey].split()
1426 # "-*" is a special "minus" var that means "unset all settings".
1427 # so USE="-* gnome" will have *just* gnome enabled.
1432 # Not legal. People assume too much. Complain.
1433 writemsg(red("USE flags should not start with a '+': %s\n" % x))
1437 if (x[1:] in myflags):
1439 del myflags[myflags.index(x[1:])]
1442 # We got here, so add it now.
1443 if x not in myflags:
1447 #store setting in last element of configlist, the original environment:
1448 self.configlist[-1][mykey]=string.join(myflags," ")
1451 #cache split-up USE var in a global
1454 for x in string.split(self.configlist[-1]["USE"]):
1455 if x not in self.usemask:
1458 if self.has_key("USE_EXPAND"):
1459 for var in string.split(self["USE_EXPAND"]):
1460 if self.has_key(var):
1461 for x in string.split(self[var]):
1462 mystr = string.lower(var)+"_"+x
1463 if mystr not in usesplit and mystr not in self.usemask:
1464 usesplit.append(mystr)
1466 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1467 if self.configdict["defaults"].has_key("ARCH"):
1468 if self.configdict["defaults"]["ARCH"]:
1469 if self.configdict["defaults"]["ARCH"] not in usesplit:
1470 usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1472 self.configlist[-1]["USE"]=string.join(usesplit," ")
1474 self.already_in_regenerate = 0
1476 def getvirtuals(self, myroot):
1478 return self.virtuals
1482 # This breaks catalyst/portage when setting to a fresh/empty root.
1483 # Virtuals cannot be calculated because there is nothing to work
1484 # from. So the only ROOT prefixed dir should be local configs.
1485 #myvirtdirs = prefix_array(self.profiles,myroot+"/")
1486 myvirtdirs = copy.deepcopy(self.profiles)
1487 while self.user_profile_dir in myvirtdirs:
1488 myvirtdirs.remove(self.user_profile_dir)
1492 # R1: Collapse profile virtuals
1493 # R2: Extract user-negatives.
1494 # R3: Collapse user-virtuals.
1495 # R4: Apply user negatives to all except user settings.
1497 # Order of preference:
1498 # 1. user-declared that are installed
1499 # 3. installed and in profile
1501 # 2. user-declared set
1504 self.dirVirtuals = [grabdict(os.path.join(x, "virtuals")) for x in myvirtdirs]
1505 self.dirVirtuals.reverse()
1507 if self.user_profile_dir and os.path.exists(self.user_profile_dir+"/virtuals"):
1508 self.userVirtuals = grabdict(self.user_profile_dir+"/virtuals")
1510 # Store all the negatives for later.
1511 for x in self.userVirtuals.keys():
1512 self.negVirtuals[x] = []
1513 for y in self.userVirtuals[x]:
1515 self.negVirtuals[x].append(y[:])
1517 # Collapse the user virtuals so that we don't deal with negatives.
1518 self.userVirtuals = stack_dictlist([self.userVirtuals],incremental=1)
1520 # Collapse all the profile virtuals including user negations.
1521 self.dirVirtuals = stack_dictlist([self.negVirtuals]+self.dirVirtuals,incremental=1)
1523 # Repoman does not use user or tree virtuals.
1524 if os.environ.get("PORTAGE_CALLER","") != "repoman":
1525 # XXX: vartree does not use virtuals, does user set matter?
1526 temp_vartree = vartree(myroot,self.dirVirtuals,categories=self.categories)
1527 # Reduce the provides into a list by CP.
1528 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
1530 return self.__getvirtuals_compile()
1532 def __getvirtuals_compile(self):
1533 """Actually generate the virtuals we have collected.
1534 The results are reversed so the list order is left to right.
1535 Given data is [Best,Better,Good] sets of [Good, Better, Best]"""
1537 # Virtuals by profile+tree preferences.
1539 # Virtuals by user+tree preferences.
1542 # If a user virtual is already installed, we preference it.
1543 for x in self.userVirtuals.keys():
1545 if self.treeVirtuals.has_key(x):
1546 for y in self.userVirtuals[x]:
1547 if y in self.treeVirtuals[x]:
1548 utVirtuals[x].append(y)
1549 #print "F:",utVirtuals
1550 #utVirtuals[x].reverse()
1551 #print "R:",utVirtuals
1553 # If a profile virtual is already installed, we preference it.
1554 for x in self.dirVirtuals.keys():
1556 if self.treeVirtuals.has_key(x):
1557 for y in self.dirVirtuals[x]:
1558 if y in self.treeVirtuals[x]:
1559 ptVirtuals[x].append(y)
1561 # UserInstalled, ProfileInstalled, Installed, User, Profile
1562 biglist = [utVirtuals, ptVirtuals, self.treeVirtuals,
1563 self.userVirtuals, self.dirVirtuals]
1565 # We reverse each dictlist so that the order matches everything
1566 # else in portage. [-*, a, b] [b, c, d] ==> [b, a]
1567 for dictlist in biglist:
1568 for key in dictlist:
1569 dictlist[key].reverse()
1571 # User settings and profile settings take precedence over tree.
1572 val = stack_dictlist(biglist,incremental=1)
1576 def __delitem__(self,mykey):
1577 for x in self.lookuplist:
1582 def __getitem__(self,mykey):
1584 for x in self.lookuplist:
1586 writemsg("!!! lookuplist is null.\n")
1587 elif x.has_key(mykey):
1591 if 0 and match and mykey in ["PORTAGE_BINHOST"]:
1592 # These require HTTP Encoding
1595 if urllib.unquote(match) != match:
1596 writemsg("Note: %s already contains escape codes." % (mykey))
1598 match = urllib.quote(match)
1599 except SystemExit, e:
1602 writemsg("Failed to fix %s using urllib, attempting to continue.\n" % (mykey))
1605 elif mykey == "CONFIG_PROTECT_MASK":
1606 match += " /etc/env.d"
1610 def has_key(self,mykey):
1611 for x in self.lookuplist:
1612 if x.has_key(mykey):
1618 for x in self.lookuplist:
1624 def __setitem__(self,mykey,myvalue):
1625 "set a value; will be thrown away at reset() time"
1626 if type(myvalue) != types.StringType:
1627 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
1629 self.modifiedkeys += [mykey]
1630 self.configdict["env"][mykey]=myvalue
1633 "return our locally-maintained environment"
1635 for x in self.keys():
1637 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
1638 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
1639 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
1644 # XXX This would be to replace getstatusoutput completely.
1645 # XXX Issue: cannot block execution. Deadlock condition.
1646 def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
1647 """spawn a subprocess with optional sandbox protection,
1648 depending on whether sandbox is enabled. The "free" argument,
1649 when set to 1, will disable sandboxing. This allows us to
1650 spawn processes that are supposed to modify files outside of the
1651 sandbox. We can't use os.system anymore because it messes up
1652 signal handling. Using spawn allows our Portage signal handler
1655 if type(mysettings) == types.DictType:
1657 keywords["opt_name"]="[ %s ]" % "portage"
1659 check_config_instance(mysettings)
1660 env=mysettings.environ()
1661 keywords["opt_name"]="[%s]" % mysettings["PF"]
1663 # XXX: Negative RESTRICT word
1664 droppriv=(droppriv and ("userpriv" in features) and not \
1665 (("nouserpriv" in string.split(mysettings["RESTRICT"])) or \
1666 ("userpriv" in string.split(mysettings["RESTRICT"]))))
1668 if droppriv and not uid and portage_gid and portage_uid:
1669 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
1672 free=((droppriv and "usersandbox" not in features) or \
1673 (not droppriv and "sandbox" not in features and "usersandbox" not in features))
1676 keywords["opt_name"] += " sandbox"
1677 return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
1679 keywords["opt_name"] += " bash"
1680 return portage_exec.spawn_bash(mystring,env=env,**keywords)
1684 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
1685 "fetch files. Will use digest file if available."
1687 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
1688 if ("mirror" in mysettings["RESTRICT"].split()) or \
1689 ("nomirror" in mysettings["RESTRICT"].split()):
1690 if ("mirror" in features) and ("lmirror" not in features):
1691 # lmirror should allow you to bypass mirror restrictions.
1692 # XXX: This is not a good thing, and is temporary at best.
1693 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
1696 global thirdpartymirrors
1698 check_config_instance(mysettings)
1700 custommirrors=grabdict(CUSTOM_MIRRORS_FILE,recursive=1)
1704 if listonly or ("distlocks" not in features):
1708 if "skiprocheck" in features:
1711 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
1713 writemsg(red("!!! You are fetching to a read-only filesystem, you should turn locking off"));
1714 writemsg("!!! This can be done by adding -distlocks to FEATURES in /etc/make.conf");
1717 # local mirrors are always added
1718 if custommirrors.has_key("local"):
1719 mymirrors += custommirrors["local"]
1721 if ("nomirror" in mysettings["RESTRICT"].split()) or \
1722 ("mirror" in mysettings["RESTRICT"].split()):
1723 # We don't add any mirrors.
1727 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
1730 digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1731 if os.path.exists(digestfn):
1732 mydigests = digestParseFile(digestfn)
1735 for x in range(len(mymirrors)-1,-1,-1):
1736 if mymirrors[x] and mymirrors[x][0]=='/':
1737 fsmirrors += [mymirrors[x]]
1740 for myuri in myuris:
1741 myfile=os.path.basename(myuri)
1743 destdir = mysettings["DISTDIR"]+"/"
1744 if not os.path.exists(destdir+myfile):
1745 for mydir in fsmirrors:
1746 if os.path.exists(mydir+"/"+myfile):
1747 writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
1748 shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
1750 except (OSError,IOError),e:
1751 # file does not exist
1752 writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
1755 if "fetch" in mysettings["RESTRICT"].split():
1756 # fetch is restricted. Ensure all files have already been downloaded; otherwise,
1757 # print message and exit.
1759 for myuri in myuris:
1760 myfile=os.path.basename(myuri)
1762 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1763 except (OSError,IOError),e:
1764 # file does not exist
1765 writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
1769 print "!!!",mysettings["CATEGORY"]+"/"+mysettings["PF"],"has fetch restriction turned on."
1770 print "!!! This probably means that this ebuild's files must be downloaded"
1771 print "!!! manually. See the comments in the ebuild for more information."
1773 spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
1776 locations=mymirrors[:]
1778 primaryuri_indexes={}
1779 for myuri in myuris:
1780 myfile=os.path.basename(myuri)
1781 if not filedict.has_key(myfile):
1783 for y in range(0,len(locations)):
1784 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
1785 if myuri[:9]=="mirror://":
1786 eidx = myuri.find("/", 9)
1788 mirrorname = myuri[9:eidx]
1790 # Try user-defined mirrors first
1791 if custommirrors.has_key(mirrorname):
1792 for cmirr in custommirrors[mirrorname]:
1793 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
1794 # remove the mirrors we tried from the list of official mirrors
1795 if cmirr.strip() in thirdpartymirrors[mirrorname]:
1796 thirdpartymirrors[mirrorname].remove(cmirr)
1797 # now try the official mirrors
1798 if thirdpartymirrors.has_key(mirrorname):
1800 shuffle(thirdpartymirrors[mirrorname])
1801 except SystemExit, e:
1804 writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"))
1805 writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n")
1806 writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n")
1807 writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n")
1808 writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n")
1811 for locmirr in thirdpartymirrors[mirrorname]:
1812 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
1814 if not filedict[myfile]:
1815 writemsg("No known mirror by the name: %s\n" % (mirrorname))
1817 writemsg("Invalid mirror definition in SRC_URI:\n")
1818 writemsg(" %s\n" % (myuri))
1820 if "primaryuri" in mysettings["RESTRICT"].split():
1821 # Use the source site first.
1822 if primaryuri_indexes.has_key(myfile):
1823 primaryuri_indexes[myfile] += 1
1825 primaryuri_indexes[myfile] = 0
1826 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
1828 filedict[myfile].append(myuri)
1830 missingSourceHost = False
1831 for myfile in filedict.keys(): # Gives a list, not just the first one
1832 if not filedict[myfile]:
1833 writemsg("Warning: No mirrors available for file '%s'\n" % (myfile))
1834 missingSourceHost = True
1835 if missingSourceHost:
1837 del missingSourceHost
1840 if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
1842 print "!!! No write access to %s" % mysettings["DISTDIR"]+"/"
1845 def distdir_perms(filename):
1847 portage_util.apply_permissions(filename, gid=portage_gid, mode=0775)
1849 if oe.errno == errno.EPERM:
1850 writemsg("!!! Unable to apply group permissions to '%s'. Non-root users may experience issues.\n"
1854 distdir_perms(mysettings["DISTDIR"])
1855 if use_locks and locks_in_subdir:
1856 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
1858 distdir_perms(distlocks_subdir)
1860 if oe.errno == errno.ENOENT:
1861 os.mkdir(distlocks_subdir)
1862 distdir_perms(distlocks_subdir)
1865 if not os.access(distlocks_subdir, os.W_OK):
1866 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir)
1868 del distlocks_subdir
1871 for myfile in filedict.keys():
1877 if use_locks and can_fetch:
1879 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
1881 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
1883 for loc in filedict[myfile]:
1887 # allow different fetchcommands per protocol
1888 protocol = loc[0:loc.find("://")]
1889 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
1890 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
1892 fetchcommand=mysettings["FETCHCOMMAND"]
1893 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
1894 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
1896 resumecommand=mysettings["RESUMECOMMAND"]
1898 fetchcommand=string.replace(fetchcommand,"${DISTDIR}",mysettings["DISTDIR"])
1899 resumecommand=string.replace(resumecommand,"${DISTDIR}",mysettings["DISTDIR"])
1902 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1903 if mydigests.has_key(myfile):
1904 #if we have the digest file, we know the final size and can resume the download.
1905 if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
1908 #we already have it downloaded, skip.
1909 #if our file is bigger than the recorded size, digestcheck should catch it.
1913 # Verify checksums at each fetch for fetchonly.
1914 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
1917 writemsg("!!! Previously fetched file: "+str(myfile)+"\n")
1918 writemsg("!!! Reason: "+reason[0]+"\n")
1919 writemsg("!!! Got: %s\n!!! Expected: %s\n" % (reason[0], reason[1]))
1920 writemsg("Refetching...\n\n")
1921 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1924 for x_key in mydigests[myfile].keys():
1925 writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n")
1927 break #No need to keep looking for this file, we have it!
1929 #we don't have the digest file, but the file exists. Assume it is fully downloaded.
1931 except (OSError,IOError),e:
1932 writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),1)
1938 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile)
1940 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile)
1945 # check if we can actually write to the directory/existing file.
1946 if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
1947 os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK) and not fetch_to_ro:
1948 writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile))
1952 #we either need to resume or start the download
1953 #you can't use "continue" when you're inside a "try" block
1956 writemsg(">>> Resuming download...\n")
1957 locfetch=resumecommand
1960 locfetch=fetchcommand
1961 writemsg(">>> Downloading "+str(loc)+"\n")
1962 myfetch=string.replace(locfetch,"${URI}",loc)
1963 myfetch=string.replace(myfetch,"${FILE}",myfile)
1966 con=selinux.getcontext()
1967 con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_FETCH_T"])
1968 selinux.setexec(con)
1969 myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
1970 selinux.setexec(None)
1972 myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
1974 #if root, -always- set the perms.
1975 if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0) \
1976 and os.access(mysettings["DISTDIR"]+"/",os.W_OK):
1977 if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
1979 os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
1980 except SystemExit, e:
1983 portage_util.writemsg("chown failed on distfile: " + str(myfile))
1984 os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
1986 if mydigests!=None and mydigests.has_key(myfile):
1988 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1989 # no exception? file exists. let digestcheck() report
1990 # an appropriately for size or checksum errors
1991 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
1992 # Fetch failed... Try the next one... Kill 404 files though.
1993 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
1994 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1996 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
1998 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1999 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2000 except SystemExit, e:
2004 except SystemExit, e:
2013 # File is the correct size--check the checksums for the fetched
2014 # file NOW, for those users who don't have a stable/continuous
2015 # net connection. This way we have a chance to try to download
2016 # from another mirror...
2017 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2020 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n")
2021 writemsg("!!! Reason: "+reason[0]+"\n")
2022 writemsg("!!! Got: %s\n!!! Expected: %s\n" % (reason[0], reason[1]))
2023 writemsg("Removing corrupt distfile...\n")
2024 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2027 for x_key in mydigests[myfile].keys():
2028 writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n")
2031 except (OSError,IOError),e:
2032 writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),1)
2038 elif mydigests!=None:
2039 writemsg("No digest file available and download failed.\n\n")
2041 if use_locks and file_lock:
2042 portage_locks.unlockfile(file_lock)
2046 if (fetched!=2) and not listonly:
2047 writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n")
2052 def digestCreate(myfiles,basedir,oldDigest={}):
2053 """Takes a list of files and the directory they are in and returns the
2054 dict of dict[filename][CHECKSUM_KEY] = hash
2055 returns None on error."""
2059 myfile=os.path.normpath(basedir+"///"+x)
2060 if os.path.exists(myfile):
2061 if not os.access(myfile, os.R_OK):
2062 print "!!! Given file does not appear to be readable. Does it exist?"
2063 print "!!! File:",myfile
2065 mydigests[x] = portage_checksum.perform_multiple_checksums(myfile, hashes=portage_const.MANIFEST1_HASH_FUNCTIONS)
2066 mysize = os.stat(myfile)[stat.ST_SIZE]
2069 # DeepCopy because we might not have a unique reference.
2070 mydigests[x] = copy.deepcopy(oldDigest[x])
2071 mysize = copy.deepcopy(oldDigest[x]["size"])
2073 print "!!! We have a source URI, but no file..."
2074 print "!!! File:",myfile
2077 if mydigests[x].has_key("size") and (mydigests[x]["size"] != mysize):
2078 raise portage_exception.DigestException, "Size mismatch during checksums"
2079 mydigests[x]["size"] = copy.deepcopy(mysize)
2082 def digestCreateLines(filelist, mydict):
2084 mydigests = copy.deepcopy(mydict)
2085 for myarchive in filelist:
2086 mysize = mydigests[myarchive]["size"]
2087 if len(mydigests[myarchive]) == 0:
2088 raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
2089 for sumName in mydigests[myarchive].keys():
2090 if sumName not in portage_checksum.get_valid_checksum_keys():
2092 mysum = mydigests[myarchive][sumName]
2096 myline += " "+myarchive
2097 myline += " "+str(mysize)
2098 mylines.append(myline)
2101 def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0):
2102 """generates digest file if missing. Assumes all files are available. If
2103 overwrite=0, the digest will only be created if it doesn't already exist."""
2106 basedir=mysettings["DISTDIR"]+"/"
2107 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2109 # portage files -- p(ortagefiles)basedir
2110 pbasedir=mysettings["O"]+"/"
2111 manifestfn=pbasedir+"Manifest"
2113 if not manifestonly:
2114 if not os.path.isdir(mysettings["FILESDIR"]):
2115 os.makedirs(mysettings["FILESDIR"])
2116 mycvstree=cvstree.getentries(pbasedir, recursive=1)
2118 if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
2119 if not cvstree.isadded(mycvstree,"files"):
2120 if "autoaddcvs" in features:
2121 print ">>> Auto-adding files/ dir to CVS..."
2122 spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
2124 print "--- Warning: files/ is not added to cvs."
2126 if (not overwrite) and os.path.exists(digestfn):
2129 print green(">>> Generating the digest file...")
2131 # Track the old digest so we can assume checksums without requiring
2132 # all files to be downloaded. 'Assuming'
2134 if os.path.exists(digestfn):
2135 myolddigest = digestParseFile(digestfn)
2139 mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
2140 except portage_exception.DigestException, s:
2143 if mydigests==None: # There was a problem, exit with an errorcode.
2147 outfile=open(digestfn, "w+")
2148 except SystemExit, e:
2150 except Exception, e:
2151 print "!!! Filesystem error skipping generation. (Read-Only?)"
2154 for x in digestCreateLines(myarchives, mydigests):
2155 outfile.write(x+"\n")
2158 os.chown(digestfn,os.getuid(),portage_gid)
2159 os.chmod(digestfn,0664)
2160 except SystemExit, e:
2165 print green(">>> Generating the manifest file...")
2166 mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
2167 mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
2169 for x in ["Manifest"]:
2173 mydigests=digestCreate(mypfiles, pbasedir)
2174 if mydigests==None: # There was a problem, exit with an errorcode.
2178 outfile=open(manifestfn, "w+")
2179 except SystemExit, e:
2181 except Exception, e:
2182 print "!!! Filesystem error skipping generation. (Read-Only?)"
2185 for x in digestCreateLines(mypfiles, mydigests):
2186 outfile.write(x+"\n")
2189 os.chown(manifestfn,os.getuid(),portage_gid)
2190 os.chmod(manifestfn,0664)
2191 except SystemExit, e:
2196 if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
2197 mycvstree=cvstree.getentries(pbasedir, recursive=1)
2199 if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
2200 if digestfn[:len(pbasedir)]==pbasedir:
2201 myunaddedfiles=digestfn[len(pbasedir):]+" "
2203 myunaddedfiles=digestfn+" "
2204 if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
2205 if manifestfn[:len(pbasedir)]==pbasedir:
2206 myunaddedfiles+=manifestfn[len(pbasedir):]+" "
2208 myunaddedfiles+=manifestfn
2210 if "autoaddcvs" in features:
2211 print blue(">>> Auto-adding digest file(s) to CVS...")
2212 spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
2214 print "--- Warning: digests are not yet added into CVS."
2215 print darkgreen(">>> Computed message digests.")
2220 def digestParseFile(myfilename):
2221 """(filename) -- Parses a given file for entries matching:
2222 <checksumkey> <checksum_hex_string> <filename> <filesize>
2223 Ignores lines that don't start with a valid checksum identifier
2224 and returns a dict with the filenames as keys and {checksumkey:checksum}
2227 if not os.path.exists(myfilename):
2229 mylines = portage_util.grabfile(myfilename, compat_level=1)
2233 myline=string.split(x)
2237 if myline[0] not in portage_checksum.get_valid_checksum_keys():
2239 mykey = myline.pop(0)
2240 myhash = myline.pop(0)
2241 mysize = long(myline.pop())
2242 myfn = string.join(myline, " ")
2243 if myfn not in mydigests:
2244 mydigests[myfn] = {}
2245 mydigests[myfn][mykey] = myhash
2246 if "size" in mydigests[myfn]:
2247 if mydigests[myfn]["size"] != mysize:
2248 raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
2250 mydigests[myfn]["size"] = mysize
2253 # XXXX strict was added here to fix a missing name error.
2254 # XXXX It's used below, but we're not paying attention to how we get it?
2255 def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0):
2256 """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
2257 of their digests and checks the digests against the indicated files in
2258 the basedir given. Returns 1 only if all files exist and match the checksums.
2261 if not mydigests.has_key(x):
2263 print red("!!! No message digest entry found for file \""+x+".\"")
2264 print "!!! Most likely a temporary problem. Try 'emerge sync' again later."
2265 print "!!! If you are certain of the authenticity of the file then you may type"
2266 print "!!! the following to generate a new digest:"
2267 print "!!! ebuild /usr/portage/category/package/package-version.ebuild digest"
2269 myfile=os.path.normpath(basedir+"/"+x)
2270 if not os.path.exists(myfile):
2272 print "!!! File does not exist:",myfile
2276 ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
2279 print red("!!! Digest verification Failed:")
2280 print red("!!!")+" "+str(os.path.realpath(myfile))
2281 print red("!!! Reason: ")+reason[0]
2282 print red("!!! Got: ")+str(reason[1])
2283 print red("!!! Expected: ")+str(reason[2])
2287 writemsg_stdout(">>> checksums "+note+" ;-) %s\n" % x)
2291 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2292 """Verifies checksums. Assumes all files have been downloaded."""
2294 basedir=mysettings["DISTDIR"]+"/"
2295 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2297 # portage files -- p(ortagefiles)basedir
2298 pbasedir=mysettings["O"]+"/"
2299 manifestfn=pbasedir+"Manifest"
2301 if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
2302 if "digest" in features:
2303 print ">>> No package digest/Manifest file found."
2304 print ">>> \"digest\" mode enabled; auto-generating new digest..."
2305 return digestgen(myfiles,mysettings)
2307 if not os.path.exists(manifestfn):
2309 print red("!!! No package manifest found:"),manifestfn
2312 print "--- No package manifest found:",manifestfn
2313 if not os.path.exists(digestfn):
2314 print "!!! No package digest file found:",digestfn
2315 print "!!! Type \"ebuild foo.ebuild digest\" to generate it."
2318 mydigests=digestParseFile(digestfn)
2320 print "!!! Failed to parse digest file:",digestfn
2322 mymdigests=digestParseFile(manifestfn)
2323 if "strict" not in features:
2324 # XXX: Remove this when manifests become mainstream.
2326 elif mymdigests==None:
2327 print "!!! Failed to parse manifest file:",manifestfn
2331 # Check the portage-related files here.
2332 mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
2333 manifest_files = mymdigests.keys()
2334 # Files unrelated to the build process are ignored for verification by default
2335 for x in ["Manifest", "ChangeLog", "metadata.xml"]:
2336 while x in mymfiles:
2338 while x in manifest_files:
2339 manifest_files.remove(x)
2340 for x in range(len(mymfiles)-1,-1,-1):
2341 if mymfiles[x] in manifest_files:
2342 manifest_files.remove(mymfiles[x])
2343 elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
2344 # we filter here, rather then above; manifest might have files flagged by the filter.
2345 # if something is returned, then it's flagged as a bad file
2346 # manifest doesn't know about it, so we kill it here.
2349 print red("!!! Security Violation: A file exists that is not in the manifest.")
2350 print "!!! File:",mymfiles[x]
2353 if manifest_files and strict:
2354 print red("!!! Files listed in the manifest do not exist!")
2355 for x in manifest_files:
2359 if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict):
2361 print ">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and"
2362 print ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")
2366 print "--- Manifest check failed. 'strict' not enabled; ignoring."
2372 # Just return the status, as it's the last check.
2373 return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict)
2375 # parse actionmap to spawn ebuild with the appropriate args
2376 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2377 if alwaysdep or ("noauto" not in features):
2378 # process dependency first
2379 if "dep" in actionmap[mydo].keys():
2380 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2384 mycommand = EBUILD_SH_BINARY + " "
2385 if selinux_enabled and ("sesandbox" in features) and (mydo in ["unpack","compile","test","install"]):
2386 con=selinux.getcontext()
2387 con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_SANDBOX_T"])
2388 selinux.setexec(con)
2389 retval=spawn(mycommand + mydo,mysettings,debug=debug,
2390 free=actionmap[mydo]["args"][0],
2391 droppriv=actionmap[mydo]["args"][1],logfile=logfile)
2392 selinux.setexec(None)
2394 retval=spawn(mycommand + mydo,mysettings, debug=debug,
2395 free=actionmap[mydo]["args"][0],
2396 droppriv=actionmap[mydo]["args"][1],logfile=logfile)
2399 # chunked out deps for each phase, so that ebuild binary can use it
2400 # to collapse targets down.
2404 "unpack": ["setup"],
2405 "compile":["unpack"],
2406 "test": ["compile"],
2409 "package":["install"],
2413 def eapi_is_supported(eapi):
2414 return str(eapi).strip() == str(portage_const.EAPI).strip()
2417 def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree=None):
2418 global db, actionmap_deps
2421 dump_traceback("Warning: tree not specified to doebuild")
2424 ebuild_path = os.path.abspath(myebuild)
2425 pkg_dir = os.path.dirname(ebuild_path)
2427 if mysettings.configdict["pkg"].has_key("CATEGORY"):
2428 cat = mysettings.configdict["pkg"]["CATEGORY"]
2430 cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
2431 mypv = os.path.basename(ebuild_path)[:-7]
2432 mycpv = cat+"/"+mypv
2434 mysplit=pkgsplit(mypv,silent=0)
2436 writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
2439 if mydo != "depend":
2440 # XXX: We're doing a little hack here to curtain the gvisible locking
2441 # XXX: that creates a deadlock... Really need to isolate that.
2442 mysettings.reset(use_cache=use_cache)
2443 mysettings.setcpv(mycpv,use_cache=use_cache)
2445 validcommands = ["help","clean","prerm","postrm","preinst","postinst",
2446 "config","setup","depend","fetch","digest",
2447 "unpack","compile","test","install","rpm","qmerge","merge",
2448 "package","unmerge", "manifest"]
2450 if mydo not in validcommands:
2451 validcommands.sort()
2452 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo)
2453 for vcount in range(len(validcommands)):
2456 writemsg(string.ljust(validcommands[vcount], 11))
2460 if not os.path.exists(myebuild):
2461 writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
2464 if debug: # Otherwise it overrides emerge's settings.
2465 # We have no other way to set debug... debug can't be passed in
2466 # due to how it's coded... Don't overwrite this so we can use it.
2467 mysettings["PORTAGE_DEBUG"]=str(debug)
2469 mysettings["ROOT"] = myroot
2470 mysettings["STARTDIR"] = getcwd()
2472 mysettings["EBUILD"] = ebuild_path
2473 mysettings["O"] = pkg_dir
2474 mysettings["CATEGORY"] = cat
2475 mysettings["FILESDIR"] = pkg_dir+"/files"
2476 mysettings["PF"] = mypv
2478 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
2479 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2481 mysettings["PROFILE_PATHS"] = string.join(mysettings.profiles,"\n")+"\n"+CUSTOM_PROFILE_PATH
2482 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
2483 mysettings["PN"] = mysplit[0]
2484 mysettings["PV"] = mysplit[1]
2485 mysettings["PR"] = mysplit[2]
2487 if portage_util.noiselimit < 0:
2488 mysettings["PORTAGE_QUIET"] = "1"
2490 if mydo != "depend":
2492 mysettings["INHERITED"], mysettings["RESTRICT"] = db[root][tree].dbapi.aux_get( \
2493 mycpv,["INHERITED","RESTRICT"])
2494 mysettings["PORTAGE_RESTRICT"]=string.join(flatten(portage_dep.use_reduce(portage_dep.paren_reduce( \
2495 mysettings["RESTRICT"]), uselist=mysettings["USE"].split())),' ')
2496 except SystemExit, e:
2500 eapi = db[root][tree].dbapi.aux_get(mycpv, ["EAPI"])[0]
2501 if not eapi_is_supported(eapi):
2502 # can't do anything with this.
2503 raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2505 if mysplit[2] == "r0":
2506 mysettings["PVR"]=mysplit[1]
2508 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
2510 mysettings["SLOT"]=""
2512 if mysettings.has_key("PATH"):
2513 mysplit=string.split(mysettings["PATH"],":")
2516 if PORTAGE_BIN_PATH not in mysplit:
2517 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
2520 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
2521 mysettings["HOME"] = mysettings["BUILD_PREFIX"]+"/homedir"
2522 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/portage-pkg"
2523 mysettings["PORTAGE_BUILDDIR"] = mysettings["BUILD_PREFIX"]+"/"+mysettings["PF"]
2525 mysettings["PORTAGE_BASHRC"] = EBUILD_SH_ENV_FILE
2527 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
2528 if (mydo!="depend") or not mysettings.has_key("KV"):
2529 mykv,err1=ExtractKernelVersion(root+"usr/src/linux")
2531 # Regular source tree
2532 mysettings["KV"]=mykv
2536 if (mydo!="depend") or not mysettings.has_key("KVERS"):
2538 mysettings["KVERS"]=myso[1]
2541 # get possible slot information from the deps file
2543 if mysettings.has_key("PORTAGE_DEBUG") and mysettings["PORTAGE_DEBUG"]=="1":
2544 # XXX: This needs to use a FD for saving the output into a file.
2545 # XXX: Set this up through spawn
2547 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
2549 mysettings["dbkey"] = dbkey
2551 mysettings["dbkey"] = mysettings.depcachedir+"/aux_db_key_temp"
2553 retval = spawn(EBUILD_SH_BINARY+" depend",mysettings)
2557 # Build directory creation isn't required for any of these.
2558 if mydo not in ["fetch","digest","manifest"]:
2560 if not os.path.exists(mysettings["BUILD_PREFIX"]):
2561 os.makedirs(mysettings["BUILD_PREFIX"])
2562 if (os.getuid() == 0):
2563 os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
2564 os.chmod(mysettings["BUILD_PREFIX"],00775)
2566 # Should be ok again to set $T, as sandbox does not depend on it
2567 # XXX Bug. no way in hell this is valid for clean handling.
2568 mysettings["T"]=mysettings["PORTAGE_BUILDDIR"]+"/temp"
2569 if cleanup or mydo=="clean":
2570 if os.path.exists(mysettings["T"]):
2571 shutil.rmtree(mysettings["T"])
2572 if not os.path.exists(mysettings["T"]):
2573 os.makedirs(mysettings["T"])
2574 if (os.getuid() == 0):
2575 os.chown(mysettings["T"],portage_uid,portage_gid)
2576 os.chmod(mysettings["T"],02770)
2578 logdir = mysettings["T"]+"/logging"
2579 if not os.path.exists(logdir):
2582 os.chown(logdir, portage_uid, portage_gid)
2583 os.chmod(logdir, 0770)
2585 try: # XXX: negative RESTRICT
2586 if not (("nouserpriv" in string.split(mysettings["PORTAGE_RESTRICT"])) or \
2587 ("userpriv" in string.split(mysettings["PORTAGE_RESTRICT"]))):
2588 if ("userpriv" in features) and (portage_uid and portage_gid):
2590 if os.path.exists(mysettings["HOME"]):
2591 # XXX: Potentially bad, but held down by HOME replacement above.
2592 spawn("rm -Rf "+mysettings["HOME"],mysettings, free=1)
2593 if not os.path.exists(mysettings["HOME"]):
2594 os.makedirs(mysettings["HOME"])
2595 elif ("userpriv" in features):
2596 print "!!! Disabling userpriv from features... Portage UID/GID not valid."
2597 del features[features.index("userpriv")]
2598 except SystemExit, e:
2600 except Exception, e:
2601 print "!!! Couldn't empty HOME:",mysettings["HOME"]
2605 # no reason to check for depend since depend returns above.
2606 if not os.path.exists(mysettings["BUILD_PREFIX"]):
2607 os.makedirs(mysettings["BUILD_PREFIX"])
2608 if (os.getuid() == 0):
2609 os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
2610 if not os.path.exists(mysettings["PORTAGE_BUILDDIR"]):
2611 os.makedirs(mysettings["PORTAGE_BUILDDIR"])
2612 if (os.getuid() == 0):
2613 os.chown(mysettings["PORTAGE_BUILDDIR"],portage_uid,portage_gid)
2615 print "!!! File system problem. (ReadOnly? Out of space?)"
2616 print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
2621 if not os.path.exists(mysettings["HOME"]):
2622 os.makedirs(mysettings["HOME"])
2623 if (os.getuid() == 0):
2624 os.chown(mysettings["HOME"],portage_uid,portage_gid)
2625 os.chmod(mysettings["HOME"],02770)
2627 print "!!! File system problem. (ReadOnly? Out of space?)"
2628 print "!!! Failed to create fake home directory in PORTAGE_BUILDDIR"
2633 if ("ccache" in features):
2634 if (not mysettings.has_key("CCACHE_DIR")) or (mysettings["CCACHE_DIR"]==""):
2635 mysettings["CCACHE_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/ccache"
2636 if not os.path.exists(mysettings["CCACHE_DIR"]):
2637 os.makedirs(mysettings["CCACHE_DIR"])
2638 mystat = os.stat(mysettings["CCACHE_DIR"])
2639 if ("userpriv" in features):
2640 if mystat[stat.ST_UID] != portage_uid or ((mystat[stat.ST_MODE]&02070)!=02070):
2641 writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
2642 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2643 spawn("chown "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2644 spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
2645 spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+xs \{\} \;", mysettings, free=1)
2647 if mystat[stat.ST_UID] != 0 or ((mystat[stat.ST_MODE]&02070)!=02070):
2648 writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
2649 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2650 spawn("chown 0:"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2651 spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
2652 spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+xs \{\} \;", mysettings, free=1)
2654 print "!!! File system problem. (ReadOnly? Out of space?)"
2655 print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
2659 if "confcache" in features:
2660 if not mysettings.has_key("CONFCACHE_DIR"):
2661 mysettings["CONFCACHE_DIR"] = os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache")
2662 if not os.path.exists(mysettings["CONFCACHE_DIR"]):
2663 if not os.getuid() == 0:
2665 features.remove("confcache")
2666 mysettings["FEATURES"] = " ".join(features)
2668 os.makedirs(mysettings["CONFCACHE_DIR"], mode=0775)
2669 os.chown(mysettings["CONFCACHE_DIR"], -1, portage_gid)
2671 st = os.stat(mysettings["CONFCACHE_DIR"])
2672 if not (st.st_mode & 07777) == 0775:
2673 os.chmod(mysettings["CONFCACHE_DIR"], 0775)
2674 if not st.st_gid == portage_gid:
2675 os.chown(mysettings["CONFCACHE_DIR"], -1, portage_gid)
2677 # check again, since it may have been disabled.
2678 if "confcache" in features:
2679 for x in listdir(mysettings["CONFCACHE_DIR"]):
2680 p = os.path.join(mysettings["CONFCACHE_DIR"], x)
2682 if not (st.st_mode & 07777) & 07660 == 0660:
2683 os.chmod(p, (st.st_mode & 0777) | 0660)
2684 if not st.st_gid == portage_gid:
2685 os.chown(p, -1, portage_gid)
2688 print "!!! Failed resetting perms on confcachedir %s" % mysettings["CONFCACHE_DIR"]
2691 # mystat=os.stat(mysettings["CCACHE_DIR"])
2692 # if (mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02070)!=02070):
2693 # print "*** Adjusting ccache permissions for portage user..."
2694 # os.chown(mysettings["CCACHE_DIR"],portage_uid,portage_gid)
2695 # os.chmod(mysettings["CCACHE_DIR"],02770)
2696 # spawn("chown -R "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"],mysettings, free=1)
2697 # spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"],mysettings, free=1)
2698 #except SystemExit, e:
2703 if "distcc" in features:
2705 if (not mysettings.has_key("DISTCC_DIR")) or (mysettings["DISTCC_DIR"]==""):
2706 mysettings["DISTCC_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/portage/.distcc"
2707 if not os.path.exists(mysettings["DISTCC_DIR"]):
2708 os.makedirs(mysettings["DISTCC_DIR"])
2709 os.chown(mysettings["DISTCC_DIR"],portage_uid,portage_gid)
2710 os.chmod(mysettings["DISTCC_DIR"],02775)
2711 for x in ("/lock", "/state"):
2712 if not os.path.exists(mysettings["DISTCC_DIR"]+x):
2713 os.mkdir(mysettings["DISTCC_DIR"]+x)
2714 os.chown(mysettings["DISTCC_DIR"]+x,portage_uid,portage_gid)
2715 os.chmod(mysettings["DISTCC_DIR"]+x,02775)
2717 writemsg("\n!!! File system problem when setting DISTCC_DIR directory permissions.\n")
2718 writemsg( "!!! DISTCC_DIR="+str(mysettings["DISTCC_DIR"]+"\n"))
2719 writemsg( "!!! "+str(e)+"\n\n")
2721 features.remove("distcc")
2722 mysettings["DISTCC_DIR"]=""
2724 mysettings["WORKDIR"]=mysettings["PORTAGE_BUILDDIR"]+"/work"
2725 mysettings["D"]=mysettings["PORTAGE_BUILDDIR"]+"/image/"
2727 if mysettings.has_key("PORT_LOGDIR"):
2728 if not os.access(mysettings["PORT_LOGDIR"],os.F_OK):
2730 os.mkdir(mysettings["PORT_LOGDIR"])
2732 print "!!! Unable to create PORT_LOGDIR"
2734 if os.access(mysettings["PORT_LOGDIR"]+"/",os.W_OK):
2736 perms = os.stat(mysettings["PORT_LOGDIR"])
2737 if perms[stat.ST_UID] != portage_uid or perms[stat.ST_GID] != portage_gid:
2738 os.chown(mysettings["PORT_LOGDIR"],portage_uid,portage_gid)
2739 if stat.S_IMODE(perms[stat.ST_MODE]) != 02770:
2740 os.chmod(mysettings["PORT_LOGDIR"],02770)
2741 if not mysettings.has_key("LOG_PF") or (mysettings["LOG_PF"] != mysettings["PF"]):
2742 mysettings["LOG_PF"]=mysettings["PF"]
2743 mysettings["LOG_COUNTER"]=str(db[myroot]["vartree"].dbapi.get_counter_tick_core("/"))
2744 logfile="%s/%s-%s.log" % (mysettings["PORT_LOGDIR"],mysettings["LOG_COUNTER"],mysettings["LOG_PF"])
2746 mysettings["PORT_LOGDIR"]=""
2747 print "!!! Unable to chown/chmod PORT_LOGDIR. Disabling logging."
2750 print "!!! Cannot create log... No write access / Does not exist"
2751 print "!!! PORT_LOGDIR:",mysettings["PORT_LOGDIR"]
2752 mysettings["PORT_LOGDIR"]=""
2755 return unmerge(mysettings["CATEGORY"],mysettings["PF"],myroot,mysettings)
2757 # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
2760 if mydo in ["help","clean","setup"]:
2761 return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
2762 elif mydo in ["prerm","postrm","preinst","postinst","config"]:
2763 mysettings.load_infodir(pkg_dir)
2764 return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
2767 mysettings["SLOT"],mysettings["RESTRICT"] = db["/"]["porttree"].dbapi.aux_get(mycpv,["SLOT","RESTRICT"])
2768 except (IOError,KeyError):
2769 print red("doebuild():")+" aux_get() error reading "+mycpv+"; aborting."
2772 newuris, alist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings)
2773 alluris, aalist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings,all=1)
2774 mysettings["A"]=string.join(alist," ")
2775 mysettings["AA"]=string.join(aalist," ")
2776 if ("mirror" in features) or fetchall:
2779 elif mydo=="digest":
2782 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2783 if os.path.exists(digestfn):
2784 mydigests=digestParseFile(digestfn)
2788 i = checkme.index(x)
2796 if not os.path.exists(mysettings["DISTDIR"]):
2797 os.makedirs(mysettings["DISTDIR"])
2798 if not os.path.exists(mysettings["DISTDIR"]+"/cvs-src"):
2799 os.makedirs(mysettings["DISTDIR"]+"/cvs-src")
2801 print "!!! File system problem. (Bad Symlink?)"
2802 print "!!! Fetching may fail:",str(e)
2805 mystat=os.stat(mysettings["DISTDIR"]+"/cvs-src")
2806 if ((mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02770)!=02770)) and not listonly:
2807 print "*** Adjusting cvs-src permissions for portage user..."
2808 os.chown(mysettings["DISTDIR"]+"/cvs-src",0,portage_gid)
2809 os.chmod(mysettings["DISTDIR"]+"/cvs-src",02770)
2810 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["DISTDIR"]+"/cvs-src", free=1)
2811 spawn("chmod -R g+rw "+mysettings["DISTDIR"]+"/cvs-src", free=1)
2812 except SystemExit, e:
2817 # Only try and fetch the files if we are going to need them ... otherwise,
2818 # if user has FEATURES=noauto and they run `ebuild clean unpack compile install`,
2819 # we will try and fetch 4 times :/
2820 need_distfiles = (mydo in ("digest", "fetch", "unpack") or
2821 mydo != "manifest" and "noauto" not in features)
2822 if need_distfiles and not fetch(fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
2825 # inefficient. improve this logic via making actionmap easily searchable to see if we're in the chain of what
2826 # will be executed, either that or forced N doebuild calls instead of a single set of phase calls.
2827 if (mydo not in ("setup", "clean", "postinst", "preinst", "prerm", "fetch", "digest", "manifest") and
2828 "noauto" not in features) or mydo == "unpack":
2829 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
2830 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir = mysettings["DISTDIR"]
2831 edpath = mysettings["DISTDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
2832 if os.path.exists(edpath):
2834 if os.path.isdir(edpath) and not os.path.islink(edpath):
2835 shutil.rmtree(edpath)
2839 print "!!! Failed reseting ebuild distdir path, " + edpath
2842 os.chown(edpath, -1, portage_gid)
2843 os.chmod(edpath, 0775)
2846 os.symlink(os.path.join(orig_distdir, file), os.path.join(edpath, file))
2848 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
2851 if mydo=="fetch" and listonly:
2854 if "digest" in features:
2855 #generate digest if it doesn't exist.
2857 return (not digestgen(aalist,mysettings,overwrite=1))
2859 digestgen(aalist,mysettings,overwrite=0)
2860 elif mydo=="digest":
2861 #since we are calling "digest" directly, recreate the digest even if it already exists
2862 return (not digestgen(aalist,mysettings,overwrite=1))
2863 if mydo=="manifest":
2864 return (not digestgen(aalist,mysettings,overwrite=1,manifestonly=1))
2866 # See above comment about fetching only when needed
2867 if not digestcheck(checkme, mysettings, ("strict" in features), (mydo not in ["digest","fetch","unpack"] and settings["PORTAGE_CALLER"] == "ebuild" and "noauto" in features)):
2873 #initial dep checks complete; time to process main commands
2875 nosandbox=(("userpriv" in features) and ("usersandbox" not in features) and \
2876 ("userpriv" not in mysettings["RESTRICT"]) and ("nouserpriv" not in mysettings["RESTRICT"]))
2877 if nosandbox and ("userpriv" not in features or "userpriv" in mysettings["RESTRICT"] or \
2878 "nouserpriv" in mysettings["RESTRICT"]):
2879 nosandbox = ("sandbox" not in features and "usersandbox" not in features)
2882 "depend": {"args":(0,1)}, # sandbox / portage
2883 "setup": {"args":(1,0)}, # without / root
2884 "unpack": {"args":(0,1)}, # sandbox / portage
2885 "compile":{"args":(nosandbox,1)}, # optional / portage
2886 "test": {"args":(nosandbox,1)}, # optional / portage
2887 "install":{"args":(0,0)}, # sandbox / root
2888 "rpm": {"args":(0,0)}, # sandbox / root
2889 "package":{"args":(0,0)}, # sandbox / root
2892 # merge the deps in so we have again a 'full' actionmap
2893 # be glad when this can die.
2894 for x in actionmap.keys():
2895 if len(actionmap_deps.get(x, [])):
2896 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
2898 if mydo in actionmap.keys():
2900 for x in ["","/"+mysettings["CATEGORY"],"/All"]:
2901 if not os.path.exists(mysettings["PKGDIR"]+x):
2902 os.makedirs(mysettings["PKGDIR"]+x)
2903 # REBUILD CODE FOR TBZ2 --- XXXX
2904 return spawnebuild(mydo,actionmap,mysettings,debug,logfile=logfile)
2905 elif mydo=="qmerge":
2906 #check to ensure install was run. this *only* pops up when users forget it and are using ebuild
2907 if not os.path.exists(mysettings["PORTAGE_BUILDDIR"]+"/.installed"):
2908 print "!!! mydo=qmerge, but install phase hasn't been ran"
2910 #qmerge is specifically not supposed to do a runtime dep check
2911 return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["PORTAGE_BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"],mytree=tree)
2913 retval=spawnebuild("install",actionmap,mysettings,debug,alwaysdep=1,logfile=logfile)
2916 return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["PORTAGE_BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"],mytree=tree)
2918 print "!!! Unknown mydo:",mydo
2923 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
2924 """moves a file from src to dest, preserving all permissions and attributes; mtime will
2925 be preserved even when moving across filesystems. Returns true on success and false on
2926 failure. Move is atomic."""
2927 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
2934 sflags=bsd_chflags.lgetflags(src)
2936 # Problem getting flags...
2937 writemsg("!!! Couldn't get flags for "+dest+"\n")
2940 except SystemExit, e:
2942 except Exception, e:
2943 print "!!! Stating source file failed... movefile()"
2949 dstat=os.lstat(dest)
2950 except SystemExit, e:
2953 dstat=os.lstat(os.path.dirname(dest))
2957 # Check that we can actually unset schg etc flags...
2958 # Clear the flags on source and destination; we'll reinstate them after merging
2960 if bsd_chflags.lchflags(dest, 0) < 0:
2961 writemsg("!!! Couldn't clear flags on file being merged: \n ")
2962 # We might have an immutable flag on the parent dir; save and clear.
2963 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
2964 bsd_chflags.lchflags(os.path.dirname(dest), 0)
2966 # Don't bother checking the return value here; if it fails then the next line will catch it.
2967 bsd_chflags.lchflags(src, 0)
2969 if bsd_chflags.lhasproblems(src)>0 or (destexists and bsd_chflags.lhasproblems(dest)>0) or bsd_chflags.lhasproblems(os.path.dirname(dest))>0:
2970 # This is bad: we can't merge the file with these flags set.
2971 writemsg("!!! Can't merge file "+dest+" because of flags set\n")
2975 if stat.S_ISLNK(dstat[stat.ST_MODE]):
2979 except SystemExit, e:
2981 except Exception, e:
2984 if stat.S_ISLNK(sstat[stat.ST_MODE]):
2986 target=os.readlink(src)
2987 if mysettings and mysettings["D"]:
2988 if target.find(mysettings["D"])==0:
2989 target=target[len(mysettings["D"]):]
2990 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
2993 sid = selinux.get_lsid(src)
2994 selinux.secure_symlink(target,dest,sid)
2996 os.symlink(target,dest)
2997 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
2999 # Restore the flags we saved before moving
3000 if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3001 writemsg("!!! Couldn't restore flags ("+str(flags)+") on " + dest+":\n")
3002 writemsg("!!! %s\n" % str(e))
3004 return os.lstat(dest)[stat.ST_MTIME]
3005 except SystemExit, e:
3007 except Exception, e:
3008 print "!!! failed to properly create symlink:"
3009 print "!!!",dest,"->",target
3014 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3017 ret=selinux.secure_rename(src,dest)
3019 ret=os.rename(src,dest)
3021 except SystemExit, e:
3023 except Exception, e:
3024 if e[0]!=errno.EXDEV:
3025 # Some random error.
3026 print "!!! Failed to move",src,"to",dest
3029 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3032 if stat.S_ISREG(sstat[stat.ST_MODE]):
3033 try: # For safety copy then move it over.
3035 selinux.secure_copy(src,dest+"#new")
3036 selinux.secure_rename(dest+"#new",dest)
3038 shutil.copyfile(src,dest+"#new")
3039 os.rename(dest+"#new",dest)
3041 except SystemExit, e:
3043 except Exception, e:
3044 print '!!! copy',src,'->',dest,'failed.'
3048 #we don't yet handle special, so we need to fall back to /bin/mv
3050 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3052 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3054 print "!!! Failed to move special file:"
3055 print "!!! '"+src+"' to '"+dest+"'"
3057 return None # failure
3060 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3061 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3063 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3064 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3066 except SystemExit, e:
3068 except Exception, e:
3069 print "!!! Failed to chown/chmod/unlink in movefile()"
3075 os.utime(dest,(newmtime,newmtime))
3077 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3078 newmtime=sstat[stat.ST_MTIME]
3081 # Restore the flags we saved before moving
3082 if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3083 writemsg("!!! Couldn't restore flags ("+str(sflags)+") on " + dest+":\n")
3088 def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None,mytree=None):
3089 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype=mytree)
3090 return mylink.merge(pkgloc,infloc,myroot,myebuild)
3092 def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
3093 mylink=dblink(cat,pkg,myroot,mysettings,treetype="vartree")
3095 mylink.unmerge(trimworld=mytrimworld,cleanup=1)
3098 def isvalidatom(atom):
3099 mycpv_cps = catpkgsplit(dep_getcpv(atom))
3100 operator = get_operator(atom)
3102 if operator[0] in "<>" and atom[-1] == "*":
3104 if mycpv_cps and mycpv_cps[0] != "null":
3108 # >=cat/pkg or >=pkg-1.0 (no category)
3114 if (len(string.split(atom, '/'))==2):
3120 def isjustname(mypkg):
3121 myparts=string.split(mypkg,'-')
3128 def isspecific(mypkg):
3129 "now supports packages with no category"
3131 return iscache[mypkg]
3132 except SystemExit, e:
3136 mysplit=string.split(mypkg,"/")
3137 if not isjustname(mysplit[-1]):
3143 def getCPFromCPV(mycpv):
3144 """Calls pkgsplit on a cpv and returns only the cp."""
3145 return pkgsplit(mycpv)[0]
3148 def dep_virtual(mysplit, mysettings):
3149 "Does virtual dependency conversion"
3152 if type(x)==types.ListType:
3153 newsplit.append(dep_virtual(x, mysettings))
3156 if mysettings.virtuals.has_key(mykey):
3157 if len(mysettings.virtuals[mykey])==1:
3158 a=string.replace(x, mykey, mysettings.virtuals[mykey][0])
3161 # blocker needs "and" not "or(||)".
3165 for y in mysettings.virtuals[mykey]:
3166 a.append(string.replace(x, mykey, y))
3172 def dep_eval(deplist):
3175 if deplist[0]=="||":
3176 #or list; we just need one "1"
3177 for x in deplist[1:]:
3178 if type(x)==types.ListType:
3183 #XXX: unless there's no available atoms in the list
3184 #in which case we need to assume that everything is
3185 #okay as some ebuilds are relying on an old bug.
3186 if len(deplist) == 1:
3191 if type(x)==types.ListType:
3198 def dep_zapdeps(unreduced,reduced,myroot,use_binaries=0):
3199 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
3200 Returned deplist contains steps that must be taken to satisfy dependencies."""
3201 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
3202 if not reduced or unreduced == ["||"] or dep_eval(reduced):
3205 if unreduced[0] != "||":
3207 for (dep, satisfied) in zip(unreduced, reduced):
3208 if isinstance(dep, list):
3209 unresolved += dep_zapdeps(dep, satisfied, myroot, use_binaries=use_binaries)
3211 unresolved.append(dep)
3214 # We're at a ( || atom ... ) type level
3215 deps = unreduced[1:]
3216 satisfieds = reduced[1:]
3219 for (dep, satisfied) in zip(deps, satisfieds):
3220 if isinstance(dep, list):
3221 atoms = dep_zapdeps(dep, satisfied, myroot, use_binaries=use_binaries)
3224 missing_atoms = [atom for atom in atoms if not db[myroot]["vartree"].dbapi.match(atom)]
3226 if not missing_atoms:
3227 if isinstance(dep, list):
3228 return atoms # Sorted out by the recursed dep_zapdeps call
3230 target = dep_getkey(dep) # An installed package that's not yet in the graph
3235 missing_atoms = [atom for atom in atoms if not db[myroot]["bintree"].dbapi.match(atom)]
3237 missing_atoms = [atom for atom in atoms if not db[myroot]["porttree"].dbapi.xmatch("match-visible", atom)]
3238 if not missing_atoms:
3239 target = (dep, satisfied)
3242 if isinstance(deps[0], list):
3243 return dep_zapdeps(deps[0], satisfieds[0], myroot, use_binaries=use_binaries)
3247 if isinstance(target, tuple): # Nothing matching installed
3248 if isinstance(target[0], list): # ... and the first available was a sublist
3249 return dep_zapdeps(target[0], target[1], myroot, use_binaries=use_binaries)
3250 else: # ... and the first available was a single atom
3251 target = dep_getkey(target[0])
3253 relevant_atoms = [dep for dep in deps if not isinstance(dep, list) and dep_getkey(dep) == target]
3256 for atom in relevant_atoms:
3258 pkg_list = db["/"]["bintree"].dbapi.match(atom)
3260 pkg_list = db["/"]["porttree"].dbapi.xmatch("match-visible", atom)
3263 pkg = best(pkg_list)
3264 available_pkgs[pkg] = atom
3266 if not available_pkgs:
3267 return [relevant_atoms[0]] # All masked
3269 target_pkg = best(available_pkgs.keys())
3270 suitable_atom = available_pkgs[target_pkg]
3271 return [suitable_atom]
3275 def dep_getkey(mydep):
3284 if mydep[:2] in [ ">=", "<=" ]:
3286 elif mydep[:1] in "=<>~":
3288 if isspecific(mydep):
3289 mysplit=catpkgsplit(mydep)
3292 return mysplit[0]+"/"+mysplit[1]
3296 def dep_getcpv(mydep):
3305 if mydep[:2] in [ ">=", "<=" ]:
3307 elif mydep[:1] in "=<>~":
3311 def dep_transform(mydep,oldkey,newkey):
3322 if mydep[:2] in [ ">=", "<=" ]:
3325 elif mydep[:1] in "=<>~!":
3329 return prefix+newkey+postfix
3333 def dep_expand(mydep,mydb=None,use_cache=1):
3343 if mydep[:2] in [ ">=", "<=" ]:
3346 elif mydep[:1] in "=<>~!":
3349 return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
3351 def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0,myroot="/"):
3352 """Takes a depend string and parses the condition."""
3354 #check_config_instance(mysettings)
3359 myusesplit = string.split(mysettings["USE"])
3362 # We've been given useflags to use.
3363 #print "USE FLAGS PASSED IN."
3365 #if "bindist" in myusesplit:
3366 # print "BINDIST is set!"
3368 # print "BINDIST NOT set."
3370 #we are being run by autouse(), don't consult USE vars yet.
3371 # WE ALSO CANNOT USE SETTINGS
3374 #convert parenthesis to sublists
3375 mysplit = portage_dep.paren_reduce(depstring)
3378 # XXX: use="all" is only used by repoman. Why would repoman checks want
3379 # profile-masked USE flags to be enabled?
3381 # mymasks=archlist[:]
3383 mymasks=mysettings.usemask+archlist[:]
3385 while mysettings["ARCH"] in mymasks:
3386 del mymasks[mymasks.index(mysettings["ARCH"])]
3387 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
3389 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
3391 # Do the || conversions
3392 mysplit=portage_dep.dep_opconvert(mysplit)
3394 #convert virtual dependencies to normal packages.
3395 mysplit=dep_virtual(mysplit, mysettings)
3396 #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
3397 #up until here, we haven't needed to look at the database tree
3400 return [0,"Parse Error (parentheses mismatch?)"]
3402 #dependencies were reduced to nothing
3405 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
3407 return [0,"Invalid token"]
3409 writemsg("\n\n\n", 1)
3410 writemsg("mysplit: %s\n" % (mysplit), 1)
3411 writemsg("mysplit2: %s\n" % (mysplit2), 1)
3412 myeval=dep_eval(mysplit2)
3413 writemsg("myeval: %s\n" % (myeval), 1)
3418 myzaps = dep_zapdeps(mysplit,mysplit2,myroot,use_binaries=use_binaries)
3419 mylist = flatten(myzaps)
3420 writemsg("myzaps: %s\n" % (myzaps), 1)
3421 writemsg("mylist: %s\n" % (mylist), 1)
3426 writemsg("mydict: %s\n" % (mydict), 1)
3427 return [1,mydict.keys()]
3429 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
3430 "Reduces the deplist to ones and zeros"
3432 deplist=mydeplist[:]
3433 while mypos<len(deplist):
3434 if type(deplist[mypos])==types.ListType:
3436 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
3437 elif deplist[mypos]=="||":
3440 mykey = dep_getkey(deplist[mypos])
3441 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
3442 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
3446 mydep=mydbapi.xmatch(mode,deplist[mypos])
3448 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
3451 if deplist[mypos][0]=="!":
3455 #encountered invalid string
3460 def cpv_getkey(mycpv):
3461 myslash=mycpv.split("/")
3462 mysplit=pkgsplit(myslash[-1])
3465 return myslash[0]+"/"+mysplit[0]
3471 def key_expand(mykey,mydb=None,use_cache=1):
3472 mysplit=mykey.split("/")
3474 if mydb and type(mydb)==types.InstanceType:
3475 for x in settings.categories:
3476 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
3478 if virts_p.has_key(mykey):
3479 return(virts_p[mykey][0])
3480 return "null/"+mykey
3482 if type(mydb)==types.InstanceType:
3483 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
3484 return virts[mykey][0]
3487 def cpv_expand(mycpv,mydb=None,use_cache=1):
3488 """Given a string (packagename or virtual) expand it into a valid
3489 cat/package string. Virtuals use the mydb to determine which provided
3490 virtual is a valid choice and defaults to the first element when there
3491 are no installed/available candidates."""
3492 myslash=mycpv.split("/")
3493 mysplit=pkgsplit(myslash[-1])
3495 # this is illegal case.
3498 elif len(myslash)==2:
3500 mykey=myslash[0]+"/"+mysplit[0]
3504 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
3505 if type(mydb)==types.InstanceType:
3506 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
3507 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
3508 mykey_orig = mykey[:]
3509 for vkey in virts[mykey]:
3510 if mydb.cp_list(vkey,use_cache=use_cache):
3512 writemsg("virts chosen: %s\n" % (mykey), 1)
3514 if mykey == mykey_orig:
3515 mykey=virts[mykey][0]
3516 writemsg("virts defaulted: %s\n" % (mykey), 1)
3517 #we only perform virtual expansion if we are passed a dbapi
3519 #specific cpv, no category, ie. "foo-1.0"
3528 for x in settings.categories:
3529 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
3530 matches.append(x+"/"+myp)
3531 if (len(matches)>1):
3532 raise ValueError, matches
3536 if not mykey and type(mydb)!=types.ListType:
3537 if virts_p.has_key(myp):
3538 mykey=virts_p[myp][0]
3539 #again, we only perform virtual expansion if we have a dbapi (not a list)
3543 if mysplit[2]=="r0":
3544 return mykey+"-"+mysplit[1]
3546 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
3550 def getmaskingreason(mycpv):
3551 from portage_util import grablines
3553 mysplit = catpkgsplit(mycpv)
3555 raise ValueError("invalid CPV: %s" % mycpv)
3556 if not portdb.cpv_exists(mycpv):
3557 raise KeyError("CPV %s does not exist" % mycpv)
3558 mycp=mysplit[0]+"/"+mysplit[1]
3560 pmasklines = grablines(settings["PORTDIR"]+"/profiles/package.mask", recursive=1)
3561 if settings.pmaskdict.has_key(mycp):
3562 for x in settings.pmaskdict[mycp]:
3563 if mycpv in portdb.xmatch("match-all", x):
3567 while i < len(pmasklines):
3568 l = pmasklines[i].strip()
3578 def getmaskingstatus(mycpv):
3580 mysplit = catpkgsplit(mycpv)
3582 raise ValueError("invalid CPV: %s" % mycpv)
3583 if not portdb.cpv_exists(mycpv):
3584 raise KeyError("CPV %s does not exist" % mycpv)
3585 mycp=mysplit[0]+"/"+mysplit[1]
3590 revmaskdict=settings.prevmaskdict
3591 if revmaskdict.has_key(mycp):
3592 for x in revmaskdict[mycp]:
3597 if not match_to_list(mycpv, [myatom]):
3598 rValue.append("profile")
3601 # package.mask checking
3602 maskdict=settings.pmaskdict
3603 unmaskdict=settings.punmaskdict
3604 if maskdict.has_key(mycp):
3605 for x in maskdict[mycp]:
3606 if mycpv in portdb.xmatch("match-all", x):
3608 if unmaskdict.has_key(mycp):
3609 for z in unmaskdict[mycp]:
3610 if mycpv in portdb.xmatch("match-all",z):
3614 rValue.append("package.mask")
3617 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
3618 if not eapi_is_supported(eapi):
3619 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
3620 mygroups = mygroups.split()
3622 myarch = settings["ARCH"]
3623 pkgdict = settings.pkeywordsdict
3625 cp = dep_getkey(mycpv)
3626 if pkgdict.has_key(cp):
3627 matches = match_to_list(mycpv, pkgdict[cp].keys())
3628 for match in matches:
3629 pgroups.extend(pkgdict[cp][match])
3633 for keyword in pgroups:
3634 if keyword in mygroups:
3643 elif gp=="-"+myarch:
3646 elif gp=="~"+myarch:
3651 rValue.append(kmask+" keyword")
3654 def fixdbentries(update_iter, dbdir):
3655 """Performs update commands which result in search and replace operations
3656 for each of the files in dbdir (excluding CONTENTS and environment.bz2).
3657 Returns True when actual modifications are necessary and False otherwise."""
3659 for myfile in [f for f in os.listdir(dbdir) if f not in ("CONTENTS", "environment.bz2")]:
3660 file_path = os.path.join(dbdir, myfile)
3661 f = open(file_path, "r")
3662 mycontent = f.read()
3664 orig_content = mycontent
3665 for update_cmd in update_iter:
3666 if update_cmd[0] == "move":
3667 old_value, new_value = update_cmd[1], update_cmd[2]
3668 if not mycontent.count(old_value):
3670 old_value = re.escape(old_value);
3671 mycontent = re.sub(old_value+"$", new_value, mycontent)
3672 mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
3673 mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
3674 mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
3675 if mycontent is not orig_content:
3676 write_atomic(file_path, mycontent)
3681 def __init__(self,virtual,clone=None):
3683 self.tree=clone.tree.copy()
3684 self.populated=clone.populated
3685 self.virtual=clone.virtual
3690 self.virtual=virtual
3693 def resolve_key(self,mykey):
3694 return key_expand(mykey,mydb=self.dbapi)
3696 def dep_nomatch(self,mypkgdep):
3697 mykey=dep_getkey(mypkgdep)
3698 nolist=self.dbapi.cp_list(mykey)
3699 mymatch=self.dbapi.match(mypkgdep)
3707 def depcheck(self,mycheck,use="yes",myusesplit=None):
3708 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
3711 "populates the tree with values"
3715 def best(mymatches):
3716 "accepts None arguments; assumes matches are valid."
3720 if not len(mymatches):
3722 bestmatch=mymatches[0]
3723 p2=catpkgsplit(bestmatch)[1:]
3724 for x in mymatches[1:]:
3725 p1=catpkgsplit(x)[1:]
3728 p2=catpkgsplit(bestmatch)[1:]
3731 def match_to_list(mypkg,mylist):
3733 Searches list for entries that matches the package.
3737 if match_from_list(x,[mypkg]):
3738 if x not in matches:
3742 def best_match_to_list(mypkg,mylist):
3744 Returns the most specific entry (assumed to be the longest one)
3745 that matches the package given.
3747 # XXX Assumption is wrong sometimes.
3750 for x in match_to_list(mypkg,mylist):
3756 def catsplit(mydep):
3757 return mydep.split("/", 1)
3759 def get_operator(mydep):
3761 returns '~', '=', '>', '<', '=*', '>=', or '<='
3765 elif mydep[0] == "=":
3766 if mydep[-1] == "*":
3770 elif mydep[0] in "><":
3771 if len(mydep) > 1 and mydep[1] == "=":
3772 operator = mydep[0:2]
3781 def match_from_list(mydep,candidate_list):
3785 mycpv = dep_getcpv(mydep)
3786 mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
3789 cat,pkg = catsplit(mycpv)
3793 cat,pkg,ver,rev = mycpv_cps
3795 raise KeyError, "Specific key requires an operator (%s) (try adding an '=')" % (mydep)
3798 operator = get_operator(mydep)
3800 writemsg("!!! Invalid atom: %s\n" % mydep)
3807 if operator == None:
3808 for x in candidate_list:
3813 elif xs[0] != mycpv:
3817 elif operator == "=": # Exact match
3818 if mycpv in candidate_list:
3821 elif operator == "=*": # glob match
3822 # The old verion ignored _tag suffixes... This one doesn't.
3823 for x in candidate_list:
3824 if x[0:len(mycpv)] == mycpv:
3827 elif operator == "~": # version, any revision, match
3828 for x in candidate_list:
3830 if xs[0:2] != mycpv_cps[0:2]:
3836 elif operator in [">", ">=", "<", "<="]:
3837 for x in candidate_list:
3839 result = pkgcmp(pkgsplit(x), [cat+"/"+pkg,ver,rev])
3840 except SystemExit, e:
3843 writemsg("\nInvalid package name: %s\n" % x)
3847 elif operator == ">":
3850 elif operator == ">=":
3853 elif operator == "<":
3856 elif operator == "<=":
3860 raise KeyError, "Unknown operator: %s" % mydep
3862 raise KeyError, "Unknown operator: %s" % mydep
3868 def match_from_list_original(mydep,mylist):
3870 Reduces the list down to those that fit the dep
3872 mycpv=dep_getcpv(mydep)
3873 if isspecific(mycpv):
3874 cp_key=catpkgsplit(mycpv)
3879 #Otherwise, this is a special call; we can only select out of the ebuilds specified in the specified mylist
3884 #example: "=sys-apps/foo-1.0*"
3886 #now, we grab the version of our dependency...
3887 mynewsplit=string.split(cp_key[2],'.')
3889 mynewsplit[-1]=`int(mynewsplit[-1])+1`
3890 #and increment the last digit of the version by one.
3891 #We don't need to worry about _pre and friends because they're not supported with '*' deps.
3892 new_v=string.join(mynewsplit,".")+"_alpha0"
3893 #new_v will be used later in the code when we do our comparisons using pkgcmp()
3894 except SystemExit, e:
3901 cmp1[1]=cmp1[1]+"_alpha0"
3902 cmp2=[cp_key[1],new_v,"r0"]
3906 #hrm, invalid entry. Continue.
3908 #skip entries in our list that do not have matching categories
3909 if cp_key[0]!=cp_x[0]:
3911 # ok, categories match. Continue to next step.
3912 if ((pkgcmp(cp_x[1:],cmp1)>=0) and (pkgcmp(cp_x[1:],cmp2)<0)):
3913 # entry is >= the version in specified in our dependency, and <= the version in our dep + 1; add it:
3917 # Does our stripped key appear literally in our list? If so, we have a match; if not, we don't.
3922 elif (mydep[0]==">") or (mydep[0]=="<"):
3925 if (len(mydep)>1) and (mydep[1]=="="):
3933 #invalid entry; continue.
3935 if cp_key[0]!=cp_x[0]:
3937 if eval("pkgcmp(cp_x[1:],cp_key[1:])"+cmpstr+"0"):
3947 #invalid entry; continue
3949 if cp_key[0]!=cp_x[0]:
3951 if cp_key[2]!=cp_x[2]:
3952 #if version doesn't match, skip it
3954 myint = int(cp_x[3][1:])
3965 #we check ! deps in emerge itself, so always returning [] is correct.
3967 cp_key=mycpv.split("/")
3971 #invalid entry; continue
3973 if cp_key[0]!=cp_x[0]:
3975 if cp_key[1]!=cp_x[1]:
3984 def __init__(self,root="/",virtual=None,clone=None):
3987 self.root=clone.root
3988 self.portroot=clone.portroot
3989 self.pkglines=clone.pkglines
3992 self.portroot=settings["PORTDIR"]
3993 self.virtual=virtual
3996 def dep_bestmatch(self,mydep):
3997 "compatibility method"
3998 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
4003 def dep_match(self,mydep):
4004 "compatibility method"
4005 mymatch=self.dbapi.xmatch("match-visible",mydep)
4010 def exists_specific(self,cpv):
4011 return self.dbapi.cpv_exists(cpv)
4013 def getallnodes(self):
4014 """new behavior: these are all *unmasked* nodes. There may or may not be available
4015 masked package for nodes in this nodes list."""
4016 return self.dbapi.cp_all()
4018 def getname(self,pkgname):
4019 "returns file location for this particular package (DEPRECATED)"
4022 mysplit=string.split(pkgname,"/")
4023 psplit=pkgsplit(mysplit[1])
4024 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4026 def resolve_specific(self,myspec):
4027 cps=catpkgsplit(myspec)
4030 mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
4031 mykey=mykey+"-"+cps[2]
4033 mykey=mykey+"-"+cps[3]
4036 def depcheck(self,mycheck,use="yes",myusesplit=None):
4037 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4039 def getslot(self,mycatpkg):
4040 "Get a slot for a catpkg; assume it exists."
4043 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4044 except SystemExit, e:
4046 except Exception, e:
4055 def close_caches(self):
4058 def cp_list(self,cp,use_cache=1):
4061 def aux_get(self,mycpv,mylist):
4062 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4063 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4064 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4065 raise NotImplementedError
4067 def match(self,origdep,use_cache=1):
4068 mydep=dep_expand(origdep,mydb=self)
4069 mykey=dep_getkey(mydep)
4070 mycat=mykey.split("/")[0]
4071 return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4073 def match2(self,mydep,mykey,mylist):
4074 writemsg("DEPRECATED: dbapi.match2\n")
4075 match_from_list(mydep,mylist)
4077 def counter_tick(self,myroot,mycpv=None):
4078 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
4080 def get_counter_tick_core(self,myroot,mycpv=None):
4081 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
4083 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
4084 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
4085 cpath=myroot+"var/cache/edb/counter"
4089 mysplit = pkgsplit(mycpv)
4090 for x in self.match(mysplit[0],use_cache=0):
4094 old_counter = long(self.aux_get(x,["COUNTER"])[0])
4095 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
4096 except SystemExit, e:
4100 writemsg("!!! BAD COUNTER in '%s'\n" % (x))
4101 if old_counter > min_counter:
4102 min_counter = old_counter
4104 # We write our new counter value to a new file that gets moved into
4105 # place to avoid filesystem corruption.
4106 if os.path.exists(cpath):
4107 cfile=open(cpath, "r")
4109 counter=long(cfile.readline())
4110 except (ValueError,OverflowError):
4112 counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
4113 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
4115 except (ValueError,OverflowError):
4116 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
4117 writemsg("!!! corrected/normalized so that portage can operate properly.\n")
4118 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
4123 counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
4124 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
4125 except SystemExit, e:
4128 writemsg("!!! Initializing global counter.\n")
4132 if counter < min_counter:
4133 counter = min_counter+1000
4136 if incrementing or changed:
4140 # update new global counter file
4141 write_atomic(cpath, str(counter))
4144 def invalidentry(self, mypath):
4145 if re.search("portage_lockfile$",mypath):
4146 if not os.environ.has_key("PORTAGE_MASTER_PID"):
4147 writemsg("Lockfile removed: %s\n" % mypath, 1)
4148 portage_locks.unlockfile((mypath,None,None))
4150 # Nothing we can do about it. We're probably sandboxed.
4152 elif re.search(".*/-MERGING-(.*)",mypath):
4153 if os.path.exists(mypath):
4154 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
4156 writemsg("!!! Invalid db entry: %s\n" % mypath)
4160 class fakedbapi(dbapi):
4161 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
4166 def cpv_exists(self,mycpv):
4167 return self.cpvdict.has_key(mycpv)
4169 def cp_list(self,mycp,use_cache=1):
4170 if not self.cpdict.has_key(mycp):
4173 return self.cpdict[mycp]
4177 for x in self.cpdict.keys():
4178 returnme.extend(self.cpdict[x])
4181 def cpv_inject(self,mycpv):
4182 """Adds a cpv from the list of available packages."""
4183 mycp=cpv_getkey(mycpv)
4184 self.cpvdict[mycpv]=1
4185 if not self.cpdict.has_key(mycp):
4186 self.cpdict[mycp]=[]
4187 if not mycpv in self.cpdict[mycp]:
4188 self.cpdict[mycp].append(mycpv)
4190 #def cpv_virtual(self,oldcpv,newcpv):
4191 # """Maps a cpv to the list of available packages."""
4192 # mycp=cpv_getkey(newcpv)
4193 # self.cpvdict[newcpv]=1
4194 # if not self.virtdict.has_key(mycp):
4195 # self.virtdict[mycp]=[]
4196 # if not mycpv in self.virtdict[mycp]:
4197 # self.virtdict[mycp].append(oldcpv)
4198 # cpv_remove(oldcpv)
4200 def cpv_remove(self,mycpv):
4201 """Removes a cpv from the list of available packages."""
4202 mycp=cpv_getkey(mycpv)
4203 if self.cpvdict.has_key(mycpv):
4204 del self.cpvdict[mycpv]
4205 if not self.cpdict.has_key(mycp):
4207 while mycpv in self.cpdict[mycp]:
4208 del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4209 if not len(self.cpdict[mycp]):
4210 del self.cpdict[mycp]
4212 class bindbapi(fakedbapi):
4213 def __init__(self,mybintree=None):
4214 self.bintree = mybintree
4218 def aux_get(self,mycpv,wants):
4219 mysplit = string.split(mycpv,"/")
4221 tbz2name = mysplit[1]+".tbz2"
4222 if self.bintree and not self.bintree.isremote(mycpv):
4223 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4225 if self.bintree and self.bintree.isremote(mycpv):
4226 # We use the cache for remote packages
4227 if self.bintree.remotepkgs[tbz2name].has_key(x):
4228 mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
4232 myval = tbz2.getfile(x)
4236 myval = string.join(myval.split(),' ')
4237 mylist.append(myval)
4239 idx = wants.index("EAPI")
4246 class vardbapi(dbapi):
4247 def __init__(self,root,categories=None):
4249 #cache for category directory mtimes
4250 self.mtdircache = {}
4251 #cache for dependency checks
4252 self.matchcache = {}
4253 #cache for cp_list results
4255 self.blockers = None
4256 self.categories = copy.deepcopy(categories)
4258 def cpv_exists(self,mykey):
4259 "Tells us whether an actual ebuild exists on disk (no masking)"
4260 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4262 def cpv_counter(self,mycpv):
4263 "This method will grab the COUNTER. Returns a counter value."
4264 cdir=self.root+VDB_PATH+"/"+mycpv
4265 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4267 # We write our new counter value to a new file that gets moved into
4268 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4270 if os.path.exists(cpath):
4271 cfile=open(cpath, "r")
4273 counter=long(cfile.readline())
4275 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4279 elif os.path.exists(cdir):
4280 mys = pkgsplit(mycpv)
4281 myl = self.match(mys[0],use_cache=0)
4285 # Only one package... Counter doesn't matter.
4286 write_atomic(cpath, "1")
4288 except SystemExit, e:
4290 except Exception, e:
4291 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
4292 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
4293 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
4294 writemsg("!!! unmerge this exact version.\n")
4295 writemsg("!!! %s\n" % e)
4298 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
4299 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
4300 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
4301 writemsg("!!! remerge the package.\n")
4306 # update new global counter file
4307 write_atomic(cpath, str(counter))
4310 def cpv_inject(self,mycpv):
4311 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
4312 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
4313 counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
4314 # write local package counter so that emerge clean does the right thing
4315 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
4317 def isInjected(self,mycpv):
4318 if self.cpv_exists(mycpv):
4319 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
4321 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
4325 def move_ent(self,mylist):
4329 for cp in [origcp,newcp]:
4330 if not (isvalidatom(cp) and isjustname(cp)):
4331 raise portage_exception.InvalidPackageName(cp)
4332 origmatches=self.match(origcp,use_cache=0)
4335 for mycpv in origmatches:
4336 mycpsplit=catpkgsplit(mycpv)
4337 mynewcpv=newcp+"-"+mycpsplit[2]
4338 mynewcat=newcp.split("/")[0]
4339 if mycpsplit[3]!="r0":
4340 mynewcpv += "-"+mycpsplit[3]
4341 mycpsplit_new = catpkgsplit(mynewcpv)
4342 origpath=self.root+VDB_PATH+"/"+mycpv
4343 if not os.path.exists(origpath):
4346 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
4347 #create the directory
4348 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
4349 newpath=self.root+VDB_PATH+"/"+mynewcpv
4350 if os.path.exists(newpath):
4351 #dest already exists; keep this puppy where it is.
4353 spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
4355 # We need to rename the ebuild now.
4356 old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
4357 new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
4358 if mycpsplit[3] != "r0":
4359 old_eb_path += "-"+mycpsplit[3]
4360 new_eb_path += "-"+mycpsplit[3]
4361 if os.path.exists(old_eb_path+".ebuild"):
4362 os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
4364 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
4365 fixdbentries([mylist], newpath)
4367 def update_ents(self, update_iter):
4368 """Run fixdbentries on all installed packages (time consuming). Like
4369 fixpackages, this should be run from a helper script and display
4370 a progress indicator."""
4371 dbdir = os.path.join(self.root, VDB_PATH)
4372 for catdir in listdir(dbdir):
4373 catdir = dbdir+"/"+catdir
4374 if os.path.isdir(catdir):
4375 for pkgdir in listdir(catdir):
4376 pkgdir = catdir+"/"+pkgdir
4377 if os.path.isdir(pkgdir):
4378 fixdbentries(update_iter, pkgdir)
4380 def move_slot_ent(self,mylist):
4385 if not isvalidatom(pkg):
4386 raise portage_exception.InvalidAtom(pkg)
4388 origmatches=self.match(pkg,use_cache=0)
4392 for mycpv in origmatches:
4393 origpath=self.root+VDB_PATH+"/"+mycpv
4394 if not os.path.exists(origpath):
4397 slot=grabfile(origpath+"/SLOT");
4401 if (slot[0]!=origslot):
4405 write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
4407 def cp_list(self,mycp,use_cache=1):
4408 mysplit=mycp.split("/")
4409 if mysplit[0] == '*':
4410 mysplit[0] = mysplit[0][1:]
4412 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
4415 if use_cache and self.cpcache.has_key(mycp):
4416 cpc=self.cpcache[mycp]
4419 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4426 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
4430 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4432 if len(mysplit) > 1:
4433 if ps[0]==mysplit[1]:
4434 returnme.append(mysplit[0]+"/"+x)
4436 self.cpcache[mycp]=[mystat,returnme]
4437 elif self.cpcache.has_key(mycp):
4438 del self.cpcache[mycp]
4441 def cpv_all(self,use_cache=1):
4443 basepath = self.root+VDB_PATH+"/"
4445 mycats = self.categories
4447 # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
4448 mycats = settings.categories
4451 for y in listdir(basepath+x,EmptyOnError=1):
4453 # -MERGING- should never be a cpv, nor should files.
4454 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
4455 returnme += [subpath]
4458 def cp_all(self,use_cache=1):
4459 mylist = self.cpv_all(use_cache=use_cache)
4464 mysplit=catpkgsplit(y)
4466 self.invalidentry(self.root+VDB_PATH+"/"+y)
4468 d[mysplit[0]+"/"+mysplit[1]] = None
4471 def checkblockers(self,origdep):
4474 def match(self,origdep,use_cache=1):
4475 "caching match function"
4476 mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
4477 mykey=dep_getkey(mydep)
4478 mycat=mykey.split("/")[0]
4480 if self.matchcache.has_key(mycat):
4481 del self.mtdircache[mycat]
4482 del self.matchcache[mycat]
4483 return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4485 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
4486 except SystemExit, e:
4491 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
4493 self.mtdircache[mycat]=curmtime
4494 self.matchcache[mycat]={}
4495 if not self.matchcache[mycat].has_key(mydep):
4496 mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4497 self.matchcache[mycat][mydep]=mymatch
4498 return self.matchcache[mycat][mydep][:]
4500 def findname(self, mycpv):
4501 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
4503 def aux_get(self, mycpv, wants):
4507 myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
4508 if os.access(myfn,os.R_OK):
4509 myf = open(myfn, "r")
4512 myd = re.sub("[\n\r\t]+"," ",myd)
4513 myd = re.sub(" +"," ",myd)
4514 myd = string.strip(myd)
4519 idx = wants.index("EAPI")
4520 if not results[idx]:
4525 class vartree(packagetree):
4526 "this tree will scan a var/db/pkg database located at root (passed to init)"
4527 def __init__(self,root="/",virtual=None,clone=None,categories=None):
4529 self.root = clone.root[:]
4530 self.dbapi = copy.deepcopy(clone.dbapi)
4534 self.dbapi = vardbapi(self.root,categories=categories)
4537 def zap(self,mycpv):
4540 def inject(self,mycpv):
4543 def get_provide(self,mycpv):
4546 mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
4548 myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
4549 myuse = string.split(string.join(myuse))
4550 mylines = string.join(mylines)
4551 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
4552 for myprovide in mylines:
4553 mys = catpkgsplit(myprovide)
4555 mys = string.split(myprovide, "/")
4556 myprovides += [mys[0] + "/" + mys[1]]
4558 except SystemExit, e:
4560 except Exception, e:
4562 print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
4563 print "Possibly Invalid: " + str(mylines)
4564 print "Exception: "+str(e)
4568 def get_all_provides(self):
4570 for node in self.getallcpv():
4571 for mykey in self.get_provide(node):
4572 if myprovides.has_key(mykey):
4573 myprovides[mykey] += [node]
4575 myprovides[mykey] = [node]
4578 def dep_bestmatch(self,mydep,use_cache=1):
4579 "compatibility method -- all matches, not just visible ones"
4580 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
4581 mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
4587 def dep_match(self,mydep,use_cache=1):
4588 "compatibility method -- we want to see all matches, not just visible ones"
4589 #mymatch=match(mydep,self.dbapi)
4590 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
4596 def exists_specific(self,cpv):
4597 return self.dbapi.cpv_exists(cpv)
4599 def getallcpv(self):
4600 """temporary function, probably to be renamed --- Gets a list of all
4601 category/package-versions installed on the system."""
4602 return self.dbapi.cpv_all()
4604 def getallnodes(self):
4605 """new behavior: these are all *unmasked* nodes. There may or may not be available
4606 masked package for nodes in this nodes list."""
4607 return self.dbapi.cp_all()
4609 def exists_specific_cat(self,cpv,use_cache=1):
4610 cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
4614 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
4618 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
4624 def getebuildpath(self,fullpackage):
4625 cat,package=fullpackage.split("/")
4626 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
4628 def getnode(self,mykey,use_cache=1):
4629 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
4632 mysplit=mykey.split("/")
4633 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4636 mypsplit=pkgsplit(x)
4638 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4640 if mypsplit[0]==mysplit[1]:
4641 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
4642 returnme.append(appendme)
4646 def getslot(self,mycatpkg):
4647 "Get a slot for a catpkg; assume it exists."
4650 myslot=string.join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
4651 except SystemExit, e:
4653 except Exception, e:
4657 def hasnode(self,mykey,use_cache):
4658 """Does the particular node (cat/pkg key) exist?"""
4659 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
4660 mysplit=mykey.split("/")
4661 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4663 mypsplit=pkgsplit(x)
4665 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4667 if mypsplit[0]==mysplit[1]:
4675 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
4676 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
4677 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
4678 'PDEPEND', 'PROVIDE', 'EAPI',
4679 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
4680 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
4682 auxdbkeylen=len(auxdbkeys)
4684 def close_portdbapi_caches():
4685 for i in portdbapi.portdbapi_instances:
4689 class portdbapi(dbapi):
4690 """this tree will scan a portage directory located at root (passed to init)"""
4691 portdbapi_instances = []
4693 def __init__(self,porttree_root,mysettings=None):
4694 portdbapi.portdbapi_instances.append(self)
4698 self.mysettings = mysettings
4700 self.mysettings = config(clone=settings)
4702 self.manifestVerifyLevel = None
4703 self.manifestVerifier = None
4704 self.manifestCache = {} # {location: [stat, md5]}
4705 self.manifestMissingCache = []
4707 if "gpg" in self.mysettings.features:
4708 self.manifestVerifyLevel = portage_gpg.EXISTS
4709 if "strict" in self.mysettings.features:
4710 self.manifestVerifyLevel = portage_gpg.MARGINAL
4711 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
4712 elif "severe" in self.mysettings.features:
4713 self.manifestVerifyLevel = portage_gpg.TRUSTED
4714 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
4716 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
4718 #self.root=settings["PORTDIR"]
4719 self.porttree_root = porttree_root
4721 self.depcachedir = self.mysettings.depcachedir[:]
4723 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
4724 if self.tmpfs and not os.path.exists(self.tmpfs):
4726 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
4728 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
4731 self.eclassdb = eclass_cache.cache(self.porttree_root, overlays=settings["PORTDIR_OVERLAY"].split())
4734 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
4736 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
4740 self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
4741 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
4744 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
4746 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
4747 for x in self.porttrees:
4748 # location, label, auxdbkeys
4749 self.auxdb[x] = self.auxdbmodule(portage_const.DEPCACHE_PATH, x, filtered_auxdbkeys, gid=portage_gid)
4751 def close_caches(self):
4752 for x in self.auxdb.keys():
4753 self.auxdb[x].sync()
4756 def flush_cache(self):
4760 def finddigest(self,mycpv):
4762 mydig = self.findname2(mycpv)[0]
4763 mydigs = string.split(mydig, "/")[:-1]
4764 mydig = string.join(mydigs, "/")
4766 mysplit = mycpv.split("/")
4767 except SystemExit, e:
4771 return mydig+"/files/digest-"+mysplit[-1]
4773 def findname(self,mycpv):
4774 return self.findname2(mycpv)[0]
4776 def findname2(self,mycpv):
4777 "returns file location for this particular package and in_overlay flag"
4780 mysplit=mycpv.split("/")
4782 psplit=pkgsplit(mysplit[1])
4785 for x in self.porttrees:
4786 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4787 if os.access(file, os.R_OK):
4791 return ret[0], ret[1]
4796 def aux_get(self, mycpv, mylist):
4797 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
4798 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4799 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
4800 global auxdbkeys,auxdbkeylen
4802 cat,pkg = string.split(mycpv, "/", 1)
4804 myebuild, mylocation=self.findname2(mycpv)
4807 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
4808 writemsg("!!! %s\n" % myebuild)
4809 raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
4811 myManifestPath = string.join(myebuild.split("/")[:-1],"/")+"/Manifest"
4812 if "gpg" in self.mysettings.features:
4814 mys = portage_gpg.fileStats(myManifestPath)
4815 if (myManifestPath in self.manifestCache) and \
4816 (self.manifestCache[myManifestPath] == mys):
4818 elif self.manifestVerifier:
4819 if not self.manifestVerifier.verify(myManifestPath):
4820 # Verification failed the desired level.
4821 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
4823 if ("severe" in self.mysettings.features) and \
4824 (mys != portage_gpg.fileStats(myManifestPath)):
4825 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
4827 except portage_exception.InvalidSignature, e:
4828 if ("strict" in self.mysettings.features) or \
4829 ("severe" in self.mysettings.features):
4831 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
4832 except portage_exception.MissingSignature, e:
4833 if ("severe" in self.mysettings.features):
4835 if ("strict" in self.mysettings.features):
4836 if myManifestPath not in self.manifestMissingCache:
4837 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
4838 self.manifestMissingCache.insert(0,myManifestPath)
4839 except (OSError,portage_exception.FileNotFound), e:
4840 if ("strict" in self.mysettings.features) or \
4841 ("severe" in self.mysettings.features):
4842 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
4843 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath})
4846 if os.access(myebuild, os.R_OK):
4847 emtime=os.stat(myebuild)[stat.ST_MTIME]
4849 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv})
4850 writemsg("!!! %s\n" % myebuild)
4854 mydata = self.auxdb[mylocation][mycpv]
4855 if emtime != long(mydata.get("_mtime_", 0)):
4857 elif len(mydata.get("_eclasses_", [])) > 0:
4858 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
4866 try: del self.auxdb[mylocation][mycpv]
4867 except KeyError: pass
4869 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
4872 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
4873 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
4876 mydbkey = self.tmpfs+"/aux_db_key_temp"
4878 mydbkey = self.depcachedir+"/aux_db_key_temp"
4880 # XXX: Part of the gvisible hack/fix to prevent deadlock
4881 # XXX: through doebuild. Need to isolate this somehow...
4882 self.mysettings.reset()
4885 raise "Lock is already held by me?"
4887 mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
4889 if os.path.exists(mydbkey):
4892 except (IOError, OSError), e:
4893 portage_locks.unlockfile(mylock)
4895 writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
4898 myret=doebuild(myebuild,"depend","/",self.mysettings,dbkey=mydbkey,tree="porttree")
4900 portage_locks.unlockfile(mylock)
4902 #depend returned non-zero exit code...
4903 writemsg(str(red("\naux_get():")+" (0) Error in "+mycpv+" ebuild. ("+str(myret)+")\n"
4904 " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
4908 mycent=open(mydbkey,"r")
4910 mylines=mycent.readlines()
4913 except (IOError, OSError):
4914 portage_locks.unlockfile(mylock)
4916 writemsg(str(red("\naux_get():")+" (1) Error in "+mycpv+" ebuild.\n"
4917 " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
4920 portage_locks.unlockfile(mylock)
4924 for x in range(0,len(mylines)):
4925 if mylines[x][-1] == '\n':
4926 mylines[x] = mylines[x][:-1]
4927 mydata[auxdbkeys[x]] = mylines[x]
4929 if "EAPI" not in mydata or not mydata["EAPI"].strip():
4930 mydata["EAPI"] = "0"
4932 if not eapi_is_supported(mydata["EAPI"]):
4933 # if newer version, wipe everything and negate eapi
4934 eapi = mydata["EAPI"]
4936 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
4937 mydata["EAPI"] = "-"+eapi
4939 if mydata.get("INHERITED", False):
4940 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
4942 mydata["_eclasses_"] = {}
4944 del mydata["INHERITED"]
4946 mydata["_mtime_"] = emtime
4948 self.auxdb[mylocation][mycpv] = mydata
4950 #finally, we look at our internal cache entry and return the requested data.
4953 if x == "INHERITED":
4954 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
4956 returnme.append(mydata.get(x,""))
4958 if "EAPI" in mylist:
4959 idx = mylist.index("EAPI")
4960 if not returnme[idx]:
4965 def getfetchlist(self,mypkg,useflags=None,mysettings=None,all=0):
4966 if mysettings == None:
4967 mysettings = self.mysettings
4969 myuris = self.aux_get(mypkg,["SRC_URI"])[0]
4970 except (IOError,KeyError):
4971 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
4974 if useflags is None:
4975 useflags = string.split(mysettings["USE"])
4977 myurilist = portage_dep.paren_reduce(myuris)
4978 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
4979 newuris = flatten(myurilist)
4983 mya = os.path.basename(x)
4984 if not mya in myfiles:
4986 return [newuris, myfiles]
4988 def getfetchsizes(self,mypkg,useflags=None,debug=0):
4989 # returns a filename:size dictionnary of remaining downloads
4990 mydigest=self.finddigest(mypkg)
4991 checksums=digestParseFile(mydigest)
4993 if debug: print "[empty/missing/bad digest]: "+mypkg
4996 if useflags == None:
4997 myuris, myfiles = self.getfetchlist(mypkg,all=1)
4999 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
5000 #XXX: maybe this should be improved: take partial downloads
5001 # into account? check checksums?
5002 for myfile in myfiles:
5003 if debug and myfile not in checksums.keys():
5004 print "[bad digest]: missing",myfile,"for",mypkg
5005 elif myfile in checksums.keys():
5006 distfile=settings["DISTDIR"]+"/"+myfile
5007 if not os.access(distfile, os.R_OK):
5008 filesdict[myfile]=int(checksums[myfile]["size"])
5011 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5014 useflags = mysettings["USE"].split()
5015 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5016 mydigest = self.finddigest(mypkg)
5017 mysums = digestParseFile(mydigest)
5021 if not mysums or x not in mysums:
5023 reason = "digest missing"
5025 ok,reason = portage_checksum.verify_all(self.mysettings["DISTDIR"]+"/"+x, mysums[x])
5027 failures[x] = reason
5032 def getsize(self,mypkg,useflags=None,debug=0):
5033 # returns the total size of remaining downloads
5035 # we use getfetchsizes() now, so this function would be obsoleted
5037 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5039 return "[empty/missing/bad digest]"
5041 for myfile in filesdict.keys():
5042 mysum+=filesdict[myfile]
5045 def cpv_exists(self,mykey):
5046 "Tells us whether an actual ebuild exists on disk (no masking)"
5047 cps2=mykey.split("/")
5048 cps=catpkgsplit(mykey,silent=0)
5052 if self.findname(cps[0]+"/"+cps2[1]):
5058 "returns a list of all keys in our tree"
5060 for x in self.mysettings.categories:
5061 for oroot in self.porttrees:
5062 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
5068 def p_list(self,mycp):
5070 for oroot in self.porttrees:
5071 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5072 if x[-7:]==".ebuild":
5076 def cp_list(self,mycp,use_cache=1):
5077 mysplit=mycp.split("/")
5079 for oroot in self.porttrees:
5080 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5081 if x[-7:]==".ebuild":
5082 d[mysplit[0]+"/"+x[:-7]] = None
5086 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
5094 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
5095 "caching match function; very trick stuff"
5096 #if no updates are being made to the tree, we can consult our xcache...
5099 return self.xcache[level][origdep]
5104 #this stuff only runs on first call of xmatch()
5105 #create mydep, mykey from origdep
5106 mydep=dep_expand(origdep,mydb=self)
5107 mykey=dep_getkey(mydep)
5109 if level=="list-visible":
5110 #a list of all visible packages, not called directly (just by xmatch())
5111 #myval=self.visible(self.cp_list(mykey))
5112 myval=self.gvisible(self.visible(self.cp_list(mykey)))
5113 elif level=="bestmatch-visible":
5114 #dep match -- best match of all visible packages
5115 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
5116 #get all visible matches (from xmatch()), then choose the best one
5117 elif level=="bestmatch-list":
5118 #dep match -- find best match but restrict search to sublist
5119 myval=best(match_from_list(mydep,mylist))
5120 #no point is calling xmatch again since we're not caching list deps
5121 elif level=="match-list":
5122 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
5123 myval=match_from_list(mydep,mylist)
5124 elif level=="match-visible":
5125 #dep match -- find all visible matches
5126 myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
5127 #get all visible packages, then get the matching ones
5128 elif level=="match-all":
5129 #match *all* visible *and* masked packages
5130 myval=match_from_list(mydep,self.cp_list(mykey))
5132 print "ERROR: xmatch doesn't handle",level,"query!"
5134 if self.frozen and (level not in ["match-list","bestmatch-list"]):
5135 self.xcache[level][mydep]=myval
5138 def match(self,mydep,use_cache=1):
5139 return self.xmatch("match-visible",mydep)
5141 def visible(self,mylist):
5142 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
5143 packages file to remove invisible entries, returning remaining items. This function assumes
5144 that all entries in mylist have the same category and package name."""
5145 if (mylist==None) or (len(mylist)==0):
5148 #first, we mask out packages in the package.mask file
5150 cpv=catpkgsplit(mykey)
5153 print "visible(): invalid cat/pkg-v:",mykey
5155 mycp=cpv[0]+"/"+cpv[1]
5156 maskdict=self.mysettings.pmaskdict
5157 unmaskdict=self.mysettings.punmaskdict
5158 if maskdict.has_key(mycp):
5159 for x in maskdict[mycp]:
5160 mymatches=self.xmatch("match-all",x)
5162 #error in package.mask file; print warning and continue:
5163 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
5167 if unmaskdict.has_key(mycp):
5168 for z in unmaskdict[mycp]:
5169 mymatches_unmask=self.xmatch("match-all",z)
5170 if y in mymatches_unmask:
5179 revmaskdict=self.mysettings.prevmaskdict
5180 if revmaskdict.has_key(mycp):
5181 for x in revmaskdict[mycp]:
5182 #important: only match against the still-unmasked entries...
5183 #notice how we pass "newlist" to the xmatch() call below....
5184 #Without this, ~ deps in the packages files are broken.
5185 mymatches=self.xmatch("match-list",x,mylist=newlist)
5187 #error in packages file; print warning and continue:
5188 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
5191 while pos<len(newlist):
5192 if newlist[pos] not in mymatches:
5198 def gvisible(self,mylist):
5199 "strip out group-masked (not in current group) entries"
5205 pkgdict = self.mysettings.pkeywordsdict
5206 for mycpv in mylist:
5207 #we need to update this next line when we have fully integrated the new db api
5210 keys, eapi = db["/"]["porttree"].dbapi.aux_get(mycpv, ["KEYWORDS", "EAPI"])
5211 except (KeyError,IOError,TypeError):
5215 #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
5217 mygroups=keys.split()
5220 cp = dep_getkey(mycpv)
5221 if pkgdict.has_key(cp):
5222 matches = match_to_list(mycpv, pkgdict[cp].keys())
5223 for atom in matches:
5224 pgroups.extend(pkgdict[cp][atom])
5229 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv)
5232 elif "-"+gp in pgroups:
5242 if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
5244 if match and eapi_is_supported(eapi):
5245 newlist.append(mycpv)
5248 class binarytree(packagetree):
5249 "this tree scans for a list of all packages available in PKGDIR"
5250 def __init__(self,root,pkgdir,virtual=None,clone=None):
5253 # XXX This isn't cloning. It's an instance of the same thing.
5254 self.root=clone.root
5255 self.pkgdir=clone.pkgdir
5256 self.dbapi=clone.dbapi
5257 self.populated=clone.populated
5258 self.tree=clone.tree
5259 self.remotepkgs=clone.remotepkgs
5260 self.invalids=clone.invalids
5263 #self.pkgdir=settings["PKGDIR"]
5265 self.dbapi=bindbapi(self)
5271 def move_ent(self,mylist):
5272 if not self.populated:
5277 for cp in [origcp,newcp]:
5278 if not (isvalidatom(cp) and isjustname(cp)):
5279 raise portage_exception.InvalidPackageName(cp)
5280 mynewcat=newcp.split("/")[0]
5281 origmatches=self.dbapi.cp_list(origcp)
5284 for mycpv in origmatches:
5286 mycpsplit=catpkgsplit(mycpv)
5287 mynewcpv=newcp+"-"+mycpsplit[2]
5288 if mycpsplit[3]!="r0":
5289 mynewcpv += "-"+mycpsplit[3]
5290 myoldpkg=mycpv.split("/")[1]
5291 mynewpkg=mynewcpv.split("/")[1]
5293 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
5294 writemsg("!!! Cannot update binary: Destination exists.\n")
5295 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n")
5298 tbz2path=self.getname(mycpv)
5299 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5300 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5303 #print ">>> Updating data in:",mycpv
5304 sys.stdout.write("%")
5306 mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5307 mytbz2=xpak.tbz2(tbz2path)
5308 mytbz2.decompose(mytmpdir, cleanup=1)
5310 fixdbentries([mylist], mytmpdir)
5312 write_atomic(os.path.join(mytmpdir, "CATEGORY"), mynewcat+"\n")
5314 os.rename(mytmpdir+"/"+string.split(mycpv,"/")[1]+".ebuild", mytmpdir+"/"+string.split(mynewcpv, "/")[1]+".ebuild")
5315 except SystemExit, e:
5317 except Exception, e:
5320 mytbz2.recompose(mytmpdir, cleanup=1)
5322 self.dbapi.cpv_remove(mycpv)
5323 if (mynewpkg != myoldpkg):
5324 os.rename(tbz2path,self.getname(mynewcpv))
5325 self.dbapi.cpv_inject(mynewcpv)
5328 def move_slot_ent(self,mylist,mytmpdir):
5329 #mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5330 mytmpdir=mytmpdir+"/tbz2"
5331 if not self.populated:
5337 if not isvalidatom(pkg):
5338 raise portage_exception.InvalidAtom(pkg)
5340 origmatches=self.dbapi.match(pkg)
5343 for mycpv in origmatches:
5344 mycpsplit=catpkgsplit(mycpv)
5345 myoldpkg=mycpv.split("/")[1]
5346 tbz2path=self.getname(mycpv)
5347 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5348 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5351 #print ">>> Updating data in:",mycpv
5352 mytbz2=xpak.tbz2(tbz2path)
5353 mytbz2.decompose(mytmpdir, cleanup=1)
5355 slot=grabfile(mytmpdir+"/SLOT");
5359 if (slot[0]!=origslot):
5362 sys.stdout.write("S")
5365 write_atomic(os.path.join(mytmpdir, "SLOT"), newslot+"\n")
5366 mytbz2.recompose(mytmpdir, cleanup=1)
5369 def update_ents(self,mybiglist,mytmpdir):
5370 #XXX mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5371 if not self.populated:
5374 for mycpv in self.dbapi.cp_all():
5375 tbz2path=self.getname(mycpv)
5376 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5377 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5379 #print ">>> Updating binary data:",mycpv
5381 mytbz2=xpak.tbz2(tbz2path)
5382 mytbz2.decompose(mytmpdir,cleanup=1)
5383 if fixdbentries(mybiglist, mytmpdir):
5384 mytbz2.recompose(mytmpdir,cleanup=1)
5386 mytbz2.cleanup(mytmpdir)
5389 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
5390 "populates the binarytree"
5391 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
5393 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
5396 if (not getbinpkgsonly) and os.path.exists(self.pkgdir+"/All"):
5397 for mypkg in listdir(self.pkgdir+"/All"):
5398 if mypkg[-5:]!=".tbz2":
5400 mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
5401 mycat=mytbz2.getfile("CATEGORY")
5403 #old-style or corrupt package
5404 writemsg("!!! Invalid binary package: "+mypkg+"\n")
5405 self.invalids.append(mypkg)
5407 mycat=string.strip(mycat)
5408 fullpkg=mycat+"/"+mypkg[:-5]
5409 mykey=dep_getkey(fullpkg)
5411 # invalid tbz2's can hurt things.
5412 self.dbapi.cpv_inject(fullpkg)
5413 except SystemExit, e:
5418 if getbinpkgs and not settings["PORTAGE_BINHOST"]:
5419 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"))
5421 if getbinpkgs and settings["PORTAGE_BINHOST"] and not self.remotepkgs:
5423 chunk_size = long(settings["PORTAGE_BINHOST_CHUNKSIZE"])
5426 except SystemExit, e:
5431 writemsg(green("Fetching binary packages info...\n"))
5432 self.remotepkgs = getbinpkg.dir_get_metadata(settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
5433 writemsg(green(" -- DONE!\n\n"))
5435 for mypkg in self.remotepkgs.keys():
5436 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
5437 #old-style or corrupt package
5438 writemsg("!!! Invalid remote binary package: "+mypkg+"\n")
5439 del self.remotepkgs[mypkg]
5441 mycat=string.strip(self.remotepkgs[mypkg]["CATEGORY"])
5442 fullpkg=mycat+"/"+mypkg[:-5]
5443 mykey=dep_getkey(fullpkg)
5445 # invalid tbz2's can hurt things.
5446 #print "cpv_inject("+str(fullpkg)+")"
5447 self.dbapi.cpv_inject(fullpkg)
5448 #print " -- Injected"
5449 except SystemExit, e:
5452 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n")
5453 del self.remotepkgs[mypkg]
5457 def inject(self,cpv):
5458 return self.dbapi.cpv_inject(cpv)
5460 def exists_specific(self,cpv):
5461 if not self.populated:
5463 return self.dbapi.match(dep_expand("="+cpv,mydb=self.dbapi))
5465 def dep_bestmatch(self,mydep):
5466 "compatibility method -- all matches, not just visible ones"
5467 if not self.populated:
5470 writemsg("mydep: %s\n" % mydep, 1)
5471 mydep=dep_expand(mydep,mydb=self.dbapi)
5472 writemsg("mydep: %s\n" % mydep, 1)
5473 mykey=dep_getkey(mydep)
5474 writemsg("mykey: %s\n" % mykey, 1)
5475 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
5476 writemsg("mymatch: %s\n" % mymatch, 1)
5481 def getname(self,pkgname):
5482 "returns file location for this particular package"
5483 mysplit=string.split(pkgname,"/")
5485 return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
5487 return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
5489 def isremote(self,pkgname):
5490 "Returns true if the package is kept remotely."
5491 mysplit=string.split(pkgname,"/")
5492 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
5495 def get_use(self,pkgname):
5496 mysplit=string.split(pkgname,"/")
5497 if self.isremote(pkgname):
5498 return string.split(self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:])
5499 tbz2=xpak.tbz2(self.getname(pkgname))
5500 return string.split(tbz2.getfile("USE"))
5502 def gettbz2(self,pkgname):
5503 "fetches the package from a remote site, if necessary."
5504 print "Fetching '"+str(pkgname)+"'"
5505 mysplit = string.split(pkgname,"/")
5506 tbz2name = mysplit[1]+".tbz2"
5507 if not self.isremote(pkgname):
5508 if (tbz2name not in self.invalids):
5511 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n")
5512 mydest = self.pkgdir+"/All/"
5514 os.makedirs(mydest, 0775)
5515 except SystemExit, e:
5519 return getbinpkg.file_get(settings["PORTAGE_BINHOST"]+"/"+tbz2name, mydest, fcmd=settings["RESUMECOMMAND"])
5521 def getslot(self,mycatpkg):
5522 "Get a slot for a catpkg; assume it exists."
5525 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
5526 except SystemExit, e:
5528 except Exception, e:
5533 "this class provides an interface to the standard text package database"
5534 def __init__(self,cat,pkg,myroot,mysettings,treetype=None):
5535 "create a dblink object for cat/pkg. This dblink entry may or may not exist"
5538 self.mycpv = self.cat+"/"+self.pkg
5539 self.mysplit = pkgsplit(self.mycpv)
5540 self.treetype = treetype
5542 self.dbroot = os.path.normpath(myroot+VDB_PATH)
5543 self.dbcatdir = self.dbroot+"/"+cat
5544 self.dbpkgdir = self.dbcatdir+"/"+pkg
5545 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
5546 self.dbdir = self.dbpkgdir
5548 self.lock_pkg = None
5549 self.lock_tmp = None
5550 self.lock_num = 0 # Count of the held locks on the db.
5552 self.settings = mysettings
5553 if self.settings==1:
5557 self.updateprotect()
5558 self.contentscache=[]
5561 if self.lock_num == 0:
5562 self.lock_pkg = portage_locks.lockdir(self.dbpkgdir)
5563 self.lock_tmp = portage_locks.lockdir(self.dbtmpdir)
5568 if self.lock_num == 0:
5569 portage_locks.unlockdir(self.lock_tmp)
5570 portage_locks.unlockdir(self.lock_pkg)
5573 "return path to location of db information (for >>> informational display)"
5577 "does the db entry exist? boolean."
5578 return os.path.exists(self.dbdir)
5581 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
5582 # XXXXX Delete this eventually
5583 raise Exception, "This is bad. Don't use it."
5584 if not os.path.exists(self.dbdir):
5585 os.makedirs(self.dbdir)
5588 "erase this db entry completely"
5589 if not os.path.exists(self.dbdir):
5592 for x in listdir(self.dbdir):
5593 os.unlink(self.dbdir+"/"+x)
5594 os.rmdir(self.dbdir)
5596 print "!!! Unable to remove db entry for this package."
5597 print "!!! It is possible that a directory is in this one. Portage will still"
5598 print "!!! register this package as installed as long as this directory exists."
5599 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
5604 def clearcontents(self):
5605 if os.path.exists(self.dbdir+"/CONTENTS"):
5606 os.unlink(self.dbdir+"/CONTENTS")
5608 def getcontents(self):
5609 if not os.path.exists(self.dbdir+"/CONTENTS"):
5611 if self.contentscache != []:
5612 return self.contentscache
5614 myc=open(self.dbdir+"/CONTENTS","r")
5615 mylines=myc.readlines()
5618 for line in mylines:
5619 mydat = string.split(line)
5620 # we do this so we can remove from non-root filesystems
5621 # (use the ROOT var to allow maintenance on other partitions)
5623 mydat[1]=os.path.normpath(root+mydat[1][1:])
5625 #format: type, mtime, md5sum
5626 pkgfiles[string.join(mydat[1:-2]," ")]=[mydat[0], mydat[-1], mydat[-2]]
5627 elif mydat[0]=="dir":
5629 pkgfiles[string.join(mydat[1:])]=[mydat[0] ]
5630 elif mydat[0]=="sym":
5631 #format: type, mtime, dest
5633 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
5634 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
5635 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
5645 pkgfiles[string.join(mydat[1:splitter]," ")]=[mydat[0], mydat[-1], string.join(mydat[(splitter+1):-1]," ")]
5646 elif mydat[0]=="dev":
5648 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0] ]
5649 elif mydat[0]=="fif":
5651 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0]]
5654 except (KeyError,IndexError):
5655 print "portage: CONTENTS line",pos,"corrupt!"
5657 self.contentscache=pkgfiles
5660 def updateprotect(self):
5661 #do some config file management prep
5663 for x in string.split(self.settings["CONFIG_PROTECT"]):
5664 ppath=normalize_path(self.myroot+x)+"/"
5665 if os.path.isdir(ppath):
5666 self.protect.append(ppath)
5669 for x in string.split(self.settings["CONFIG_PROTECT_MASK"]):
5670 ppath=normalize_path(self.myroot+x)+"/"
5671 if os.path.isdir(ppath):
5672 self.protectmask.append(ppath)
5673 #if it doesn't exist, silently skip it
5675 def isprotected(self,obj):
5676 """Checks if obj is in the current protect/mask directories. Returns
5677 0 on unprotected/masked, and 1 on protected."""
5680 for ppath in self.protect:
5681 if (len(ppath) > masked) and (obj[0:len(ppath)]==ppath):
5682 protected=len(ppath)
5683 #config file management
5684 for pmpath in self.protectmask:
5685 if (len(pmpath) >= protected) and (obj[0:len(pmpath)]==pmpath):
5686 #skip, it's in the mask
5688 return (protected > masked)
5690 def unmerge(self,pkgfiles=None,trimworld=1,cleanup=0):
5696 self.settings.load_infodir(self.dbdir)
5699 print "No package files given... Grabbing a set."
5700 pkgfiles=self.getcontents()
5702 # Now, don't assume that the name of the ebuild is the same as the
5703 # name of the dir; the package may have been moved.
5706 # We should use the environement file if possible,
5707 # as it has all sourced files already included.
5708 # XXX: Need to ensure it doesn't overwrite any important vars though.
5709 if os.access(self.dbdir+"/environment.bz2", os.R_OK):
5710 spawn("bzip2 -d "+self.dbdir+"/environment.bz2",self.settings,free=1)
5712 if not myebuildpath:
5713 mystuff=listdir(self.dbdir,EmptyOnError=1)
5715 if x[-7:]==".ebuild":
5716 myebuildpath=self.dbdir+"/"+x
5720 if myebuildpath and os.path.exists(myebuildpath):
5721 a=doebuild(myebuildpath,"prerm",self.myroot,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5722 # XXX: Decide how to handle failures here.
5724 writemsg("!!! FAILED prerm: "+str(a)+"\n")
5728 mykeys=pkgfiles.keys()
5732 self.updateprotect()
5734 #process symlinks second-to-last, directories last.
5736 modprotect="/lib/modules/"
5737 for objkey in mykeys:
5738 obj=os.path.normpath(objkey)
5743 statobj = os.stat(obj)
5748 lstatobj = os.lstat(obj)
5749 except (OSError, AttributeError):
5751 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
5754 #we skip this if we're dealing with a symlink
5755 #because os.stat() will operate on the
5756 #link target rather than the link itself.
5757 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
5759 # next line includes a tweak to protect modules from being unmerged,
5760 # but we don't protect modules from being overwritten if they are
5761 # upgraded. We effectively only want one half of the config protection
5762 # functionality for /lib/modules. For portage-ng both capabilities
5763 # should be able to be independently specified.
5764 if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)):
5765 writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
5768 lmtime=str(lstatobj[stat.ST_MTIME])
5769 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
5770 writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
5773 if pkgfiles[objkey][0]=="dir":
5774 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
5775 writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
5778 elif pkgfiles[objkey][0]=="sym":
5780 writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
5784 writemsg_stdout("<<< %s %s\n" % ("sym",obj))
5785 except (OSError,IOError),e:
5786 writemsg_stdout("!!! %s %s\n" % ("sym",obj))
5787 elif pkgfiles[objkey][0]=="obj":
5788 if statobj is None or not stat.S_ISREG(statobj.st_mode):
5789 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
5793 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
5794 except portage_exception.FileNotFound, e:
5795 # the file has disappeared between now and our stat call
5796 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
5799 # string.lower is needed because db entries used to be in upper-case. The
5800 # string.lower allows for backwards compatibility.
5801 if mymd5 != string.lower(pkgfiles[objkey][2]):
5802 writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
5806 except (OSError,IOError),e:
5808 writemsg_stdout("<<< %s %s\n" % ("obj",obj))
5809 elif pkgfiles[objkey][0]=="fif":
5810 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
5811 writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
5815 except (OSError,IOError),e:
5817 writemsg_stdout("<<< %s %s\n" % ("fif",obj))
5818 elif pkgfiles[objkey][0]=="dev":
5819 writemsg_stdout("--- %s %s\n" % ("dev",obj))
5826 if not last_non_empty.startswith(obj) and not listdir(obj):
5829 writemsg_stdout("<<< %s %s\n" % ("dir",obj))
5832 except (OSError,IOError),e:
5836 writemsg_stdout("--- !empty dir %s\n" % obj)
5837 last_non_empty = obj
5840 #remove self from vartree database so that our own virtual gets zapped if we're the last node
5841 db[self.myroot]["vartree"].zap(self.mycpv)
5843 # New code to remove stuff from the world and virtuals files when unmerged.
5845 worldlist=grabfile(self.myroot+WORLD_FILE)
5846 mykey=cpv_getkey(self.mycpv)
5849 if dep_getkey(x)==mykey:
5850 matches=db[self.myroot]["vartree"].dbapi.match(x,use_cache=0)
5852 #zap our world entry
5854 elif (len(matches)==1) and (matches[0]==self.mycpv):
5855 #zap our world entry
5858 #others are around; keep it.
5859 newworldlist.append(x)
5861 #this doesn't match the package we're unmerging; keep it.
5862 newworldlist.append(x)
5864 # if the base dir doesn't exist, create it.
5865 # (spanky noticed bug)
5866 # XXX: dumb question, but abstracting the root uid might be wise/useful for
5867 # 2nd pkg manager installation setups.
5868 if not os.path.exists(os.path.dirname(self.myroot+WORLD_FILE)):
5869 pdir = os.path.dirname(self.myroot + WORLD_FILE)
5870 os.makedirs(pdir, mode=0755)
5871 os.chown(pdir, 0, portage_gid)
5872 os.chmod(pdir, 02770)
5874 write_atomic(os.path.join(self.myroot,WORLD_FILE),"\n".join(newworldlist))
5877 if myebuildpath and os.path.exists(myebuildpath):
5878 # XXX: This should be the old config, not the current one.
5879 # XXX: Use vardbapi to load up env vars.
5880 a=doebuild(myebuildpath,"postrm",self.myroot,self.settings,use_cache=0,tree=self.treetype)
5881 # XXX: Decide how to handle failures here.
5883 writemsg("!!! FAILED postrm: "+str(a)+"\n")
5888 def isowner(self,filename,destroot):
5889 """ check if filename is a new file or belongs to this package
5890 (for this or a previous version)"""
5891 destfile = os.path.normpath(destroot+"/"+filename)
5892 if not os.path.exists(destfile):
5894 if self.getcontents() and filename in self.getcontents().keys():
5899 def treewalk(self,srcroot,destroot,inforoot,myebuild,cleanup=0):
5902 # destroot = where to merge, ie. ${ROOT},
5903 # inforoot = root of db entry,
5904 # secondhand = list of symlinks that have been skipped due to
5905 # their target not existing (will merge later),
5907 if not os.path.exists(self.dbcatdir):
5908 os.makedirs(self.dbcatdir)
5910 # This blocks until we can get the dirs to ourselves.
5914 for v in db[self.myroot]["vartree"].dbapi.cp_list(self.mysplit[0]):
5915 otherversions.append(v.split("/")[1])
5917 # check for package collisions
5918 if "collision-protect" in features:
5919 myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
5921 # the linkcheck only works if we are in srcroot
5924 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
5925 myfilelist.extend(mysymlinks)
5928 starttime=time.time()
5934 if self.pkg in otherversions:
5935 otherversions.remove(self.pkg) # we already checked this package
5937 for v in otherversions:
5938 # should we check for same SLOT here ?
5939 mypkglist.append(dblink(self.cat,v,destroot,self.settings))
5941 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
5942 for f in myfilelist:
5944 # listdir isn't intelligent enough to exclude symlinked dirs,
5945 # so we have to do it ourself
5946 for s in mysymlinks:
5947 # the length comparison makes sure that the symlink itself is checked
5948 if f[:len(s)] == s and len(f) > len(s):
5954 print str(i)+" files checked ..."
5958 for ver in [self]+mypkglist:
5959 if (ver.isowner(f, destroot) or ver.isprotected(f)):
5963 print "existing file "+f+" is not owned by this package"
5965 print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
5967 print red("*")+" This package is blocked because it wants to overwrite"
5968 print red("*")+" files belonging to other packages (see messages above)."
5969 print red("*")+" If you have no clue what this is all about report it "
5970 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
5972 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
5974 # Why is the package already merged here db-wise? Shouldn't be the case
5975 # only unmerge if it ia new package and has no contents
5976 if not self.getcontents():
5983 except SystemExit, e:
5989 # get old contents info for later unmerging
5990 oldcontents = self.getcontents()
5992 self.dbdir = self.dbtmpdir
5994 if not os.path.exists(self.dbtmpdir):
5995 os.makedirs(self.dbtmpdir)
5997 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
5999 # run preinst script
6001 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
6003 a=doebuild(myebuild,"preinst",root,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
6005 a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
6007 # XXX: Decide how to handle failures here.
6009 writemsg("!!! FAILED preinst: "+str(a)+"\n")
6012 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
6013 for x in listdir(inforoot):
6014 self.copyfile(inforoot+"/"+x)
6016 # get current counter value (counter_tick also takes care of incrementing it)
6017 # XXX Need to make this destroot, but it needs to be initialized first. XXX
6018 # XXX bis: leads to some invalidentry() call through cp_all().
6019 counter = db["/"]["vartree"].dbapi.counter_tick(self.myroot,mycpv=self.mycpv)
6020 # write local package counter for recording
6021 lcfile = open(self.dbtmpdir+"/COUNTER","w")
6022 lcfile.write(str(counter))
6025 # open CONTENTS file (possibly overwriting old one) for recording
6026 outfile=open(self.dbtmpdir+"/CONTENTS","w")
6028 self.updateprotect()
6030 #if we have a file containing previously-merged config file md5sums, grab it.
6031 if os.path.exists(destroot+CONFIG_MEMORY_FILE):
6032 cfgfiledict=grabdict(destroot+CONFIG_MEMORY_FILE)
6035 if self.settings.has_key("NOCONFMEM"):
6036 cfgfiledict["IGNORE"]=1
6038 cfgfiledict["IGNORE"]=0
6040 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
6041 mymtime = long(time.time())
6042 prevmask = os.umask(0)
6045 # we do a first merge; this will recurse through all files in our srcroot but also build up a
6046 # "second hand" of symlinks to merge later
6047 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
6050 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
6051 # broken symlinks. We'll merge them too.
6053 while len(secondhand) and len(secondhand)!=lastlen:
6054 # clear the thirdhand. Anything from our second hand that
6055 # couldn't get merged will be added to thirdhand.
6058 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
6061 lastlen=len(secondhand)
6063 # our thirdhand now becomes our secondhand. It's ok to throw
6064 # away secondhand since thirdhand contains all the stuff that
6065 # couldn't be merged.
6066 secondhand = thirdhand
6069 # force merge of remaining symlinks (broken or circular; oh well)
6070 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
6075 #if we opened it, close it
6079 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
6080 self.dbdir = self.dbpkgdir
6081 self.unmerge(oldcontents,trimworld=0)
6082 self.dbdir = self.dbtmpdir
6083 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
6085 # We hold both directory locks.
6086 self.dbdir = self.dbpkgdir
6088 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
6092 #write out our collection of md5sums
6093 if cfgfiledict.has_key("IGNORE"):
6094 del cfgfiledict["IGNORE"]
6096 # XXXX: HACK! PathSpec is very necessary here.
6097 if not os.path.exists(destroot+PRIVATE_PATH):
6098 os.makedirs(destroot+PRIVATE_PATH)
6099 os.chown(destroot+PRIVATE_PATH,os.getuid(),portage_gid)
6100 os.chmod(destroot+PRIVATE_PATH,02770)
6101 dirlist = prefix_array(listdir(destroot+PRIVATE_PATH),destroot+PRIVATE_PATH+"/")
6104 dirlist.reverse() # Gets them in file-before basedir order
6106 if os.path.isdir(x):
6107 dirlist += prefix_array(listdir(x),x+"/")
6109 os.unlink(destroot+PRIVATE_PATH+"/"+x)
6111 mylock = portage_locks.lockfile(destroot+CONFIG_MEMORY_FILE)
6112 writedict(cfgfiledict,destroot+CONFIG_MEMORY_FILE)
6113 portage_locks.unlockfile(mylock)
6117 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
6119 a=doebuild(myebuild,"postinst",root,self.settings,use_cache=0,tree=self.treetype)
6121 a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root,self.settings,use_cache=0,tree=self.treetype)
6123 # XXX: Decide how to handle failures here.
6125 writemsg("!!! FAILED postinst: "+str(a)+"\n")
6129 for v in otherversions:
6130 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
6133 #update environment settings, library paths. DO NOT change symlinks.
6134 env_update(makelinks=(not downgrade))
6135 #dircache may break autoclean because it remembers the -MERGING-pkg file
6137 if dircache.has_key(self.dbcatdir):
6138 del dircache[self.dbcatdir]
6139 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
6141 # Process ebuild logfiles
6142 elog_process(self.mycpv, self.settings)
6146 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
6147 srcroot=os.path.normpath("///"+srcroot)+"/"
6148 destroot=os.path.normpath("///"+destroot)+"/"
6149 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
6150 if type(stufftomerge)==types.StringType:
6151 #A directory is specified. Figure out protection paths, listdir() it and process it.
6152 mergelist=listdir(srcroot+stufftomerge)
6154 # We need mydest defined up here to calc. protection paths. This is now done once per
6155 # directory rather than once per file merge. This should really help merge performance.
6156 # Trailing / ensures that protects/masks with trailing /'s match.
6157 mytruncpath="/"+offset+"/"
6158 myppath=self.isprotected(mytruncpath)
6160 mergelist=stufftomerge
6163 mysrc=os.path.normpath("///"+srcroot+offset+x)
6164 mydest=os.path.normpath("///"+destroot+offset+x)
6165 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
6166 myrealdest="/"+offset+x
6167 # stat file once, test using S_* macros many times (faster that way)
6169 mystat=os.lstat(mysrc)
6170 except SystemExit, e:
6174 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
6175 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
6176 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
6177 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
6178 writemsg(red("!!! File: ")+str(mysrc)+"\n")
6179 writemsg(red("!!! Error: ")+str(e)+"\n")
6181 except Exception, e:
6183 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
6184 writemsg(red("!!! A stat call returned the following error for the following file:"))
6185 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
6186 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
6187 writemsg( "!!! File: "+str(mysrc)+"\n")
6188 writemsg( "!!! Error: "+str(e)+"\n")
6192 mymode=mystat[stat.ST_MODE]
6193 # handy variables; mydest is the target object on the live filesystems;
6194 # mysrc is the source object in the temporary install dir
6196 mydmode=os.lstat(mydest)[stat.ST_MODE]
6197 except SystemExit, e:
6200 #dest file doesn't exist
6203 if stat.S_ISLNK(mymode):
6204 # we are merging a symbolic link
6205 myabsto=abssymlink(mysrc)
6206 if myabsto[0:len(srcroot)]==srcroot:
6207 myabsto=myabsto[len(srcroot):]
6210 myto=os.readlink(mysrc)
6211 if self.settings and self.settings["D"]:
6212 if myto.find(self.settings["D"])==0:
6213 myto=myto[len(self.settings["D"]):]
6214 # myrealto contains the path of the real file to which this symlink points.
6215 # we can simply test for existence of this file to see if the target has been merged yet
6216 myrealto=os.path.normpath(os.path.join(destroot,myabsto))
6219 if not stat.S_ISLNK(mydmode):
6220 if stat.S_ISDIR(mydmode):
6221 # directory in the way: we can't merge a symlink over a directory
6222 # we won't merge this, continue with next file...
6224 srctarget = os.path.normpath(os.path.dirname(mysrc)+"/"+myto)
6225 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
6226 # Kill file blocking installation of symlink to dir #71787
6228 elif self.isprotected(mydest):
6229 # Use md5 of the target in ${D} if it exists...
6230 if os.path.exists(os.path.normpath(srcroot+myabsto)):
6231 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(srcroot+myabsto))
6233 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(myabsto))
6235 # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
6236 if (secondhand!=None) and (not os.path.exists(myrealto)):
6237 # either the target directory doesn't exist yet or the target file doesn't exist -- or
6238 # the target is a broken symlink. We will add this file to our "second hand" and merge
6240 secondhand.append(mysrc[len(srcroot):])
6242 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
6243 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
6245 print ">>>",mydest,"->",myto
6246 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
6248 print "!!! Failed to move file."
6249 print "!!!",mydest,"->",myto
6251 elif stat.S_ISDIR(mymode):
6252 # we are merging a directory
6254 # destination exists
6257 # Save then clear flags on dest.
6258 dflags=bsd_chflags.lgetflags(mydest)
6259 if(bsd_chflags.lchflags(mydest, 0)<0):
6260 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n")
6262 if not os.access(mydest, os.W_OK):
6263 pkgstuff = pkgsplit(self.pkg)
6264 writemsg("\n!!! Cannot write to '"+mydest+"'.\n")
6265 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
6266 writemsg("!!! You may start the merge process again by using ebuild:\n")
6267 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
6268 writemsg("!!! And finish by running this: env-update\n\n")
6271 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
6272 # a symlink to an existing directory will work for us; keep it:
6273 writemsg_stdout("--- %s/\n" % mydest)
6275 bsd_chflags.lchflags(mydest, dflags)
6277 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
6278 if movefile(mydest,mydest+".backup", mysettings=self.settings) == None:
6280 print "bak",mydest,mydest+".backup"
6281 #now create our directory
6283 sid = selinux.get_sid(mysrc)
6284 selinux.secure_mkdir(mydest,sid)
6288 bsd_chflags.lchflags(mydest, dflags)
6289 os.chmod(mydest,mystat[0])
6290 os.chown(mydest,mystat[4],mystat[5])
6291 writemsg_stdout(">>> %s/\n" % mydest)
6293 #destination doesn't exist
6295 sid = selinux.get_sid(mysrc)
6296 selinux.secure_mkdir(mydest,sid)
6299 os.chmod(mydest,mystat[0])
6301 bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc))
6302 os.chown(mydest,mystat[4],mystat[5])
6303 writemsg_stdout(">>> %s/\n" % mydest)
6304 outfile.write("dir "+myrealdest+"\n")
6305 # recurse and merge this directory
6306 if self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime):
6308 elif stat.S_ISREG(mymode):
6309 # we are merging a regular file
6310 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
6311 # calculate config file protection stuff
6312 mydestdir=os.path.dirname(mydest)
6316 # destination file exists
6317 if stat.S_ISDIR(mydmode):
6318 # install of destination is blocked by an existing directory with the same name
6320 writemsg_stdout("!!! %s\n" % mydest)
6321 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
6323 # install of destination is blocked by an existing regular file,
6324 # or by a symlink to an existing regular file;
6325 # now, config file management may come into play.
6326 # we only need to tweak mydest if cfg file management is in play.
6328 # we have a protection path; enable config file management.
6329 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
6331 if cfgfiledict.has_key(myrealdest):
6332 if destmd5 in cfgfiledict[myrealdest]:
6335 del cfgfiledict[myrealdest]
6338 #file already in place; simply update mtimes of destination
6339 os.utime(mydest,(thismtime,thismtime))
6343 #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
6345 cfgfiledict[myrealdest]=[mymd5]
6347 elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
6348 #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
6350 moveme=cfgfiledict["IGNORE"]
6351 cfgprot=cfgfiledict["IGNORE"]
6353 #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
6356 if not cfgfiledict.has_key(myrealdest):
6357 cfgfiledict[myrealdest]=[]
6358 if mymd5 not in cfgfiledict[myrealdest]:
6359 cfgfiledict[myrealdest].append(mymd5)
6360 # only record the last md5
6361 if len(cfgfiledict[myrealdest])>1:
6362 del cfgfiledict[myrealdest][0]
6365 mydest = new_protect_filename(myrealdest, newmd5=mymd5)
6367 # whether config protection or not, we merge the new file the
6368 # same way. Unless moveme=0 (blocking directory)
6370 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
6376 # We need to touch the destination so that on --update the
6377 # old package won't yank the file with it. (non-cfgprot related)
6378 os.utime(myrealdest,(thismtime,thismtime))
6380 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
6382 # XXX kludge, can be killed when portage stops relying on
6383 # md5+mtime, and uses refcounts
6384 # alright, we've fooled w/ mtime on the file; this pisses off static archives
6385 # basically internal mtime != file's mtime, so the linker (falsely) thinks
6386 # the archive is stale, and needs to have it's toc rebuilt.
6388 myf=open(myrealdest,"r+")
6390 # ar mtime field is digits padded with spaces, 12 bytes.
6391 lms=str(thismtime+5).ljust(12)
6394 if magic != "!<arch>\n":
6395 # not an archive (dolib.a from portage.py makes it here fex)
6398 st=os.stat(myrealdest)
6399 while myf.tell() < st.st_size - 12:
6406 # skip uid/gid/mperm
6409 # read the archive member's size
6410 x=long(myf.read(10))
6412 # skip the trailing newlines, and add the potential
6413 # extra padding byte if it's not an even size
6414 myf.seek(x + 2 + (x % 2),1)
6416 # and now we're at the end. yay.
6418 mymd5=portage_checksum.perform_md5(myrealdest,calc_prelink=1)
6419 os.utime(myrealdest,(thismtime,thismtime))
6423 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
6424 writemsg_stdout("%s %s\n" % (zing,mydest))
6426 # we are merging a fifo or device node
6429 # destination doesn't exist
6430 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
6432 if stat.S_ISFIFO(mymode):
6433 # we don't record device nodes in CONTENTS,
6434 # although we do merge them.
6435 outfile.write("fif "+myrealdest+"\n")
6438 writemsg_stdout(zing+" "+mydest+"\n")
6440 def merge(self,mergeroot,inforoot,myroot,myebuild=None,cleanup=0):
6441 return self.treewalk(mergeroot,myroot,inforoot,myebuild,cleanup=cleanup)
6443 def getstring(self,name):
6444 "returns contents of a file with whitespace converted to spaces"
6445 if not os.path.exists(self.dbdir+"/"+name):
6447 myfile=open(self.dbdir+"/"+name,"r")
6448 mydata=string.split(myfile.read())
6450 return string.join(mydata," ")
6452 def copyfile(self,fname):
6453 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
6455 def getfile(self,fname):
6456 if not os.path.exists(self.dbdir+"/"+fname):
6458 myfile=open(self.dbdir+"/"+fname,"r")
6459 mydata=myfile.read()
6463 def setfile(self,fname,data):
6464 myfile=open(self.dbdir+"/"+fname,"w")
6468 def getelements(self,ename):
6469 if not os.path.exists(self.dbdir+"/"+ename):
6471 myelement=open(self.dbdir+"/"+ename,"r")
6472 mylines=myelement.readlines()
6475 for y in string.split(x[:-1]):
6480 def setelements(self,mylist,ename):
6481 myelement=open(self.dbdir+"/"+ename,"w")
6483 myelement.write(x+"\n")
6486 def isregular(self):
6487 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
6488 return os.path.exists(self.dbdir+"/CATEGORY")
6490 def cleanup_pkgmerge(mypkg,origdir):
6491 shutil.rmtree(settings["PORTAGE_TMPDIR"]+"/portage-pkg/"+mypkg)
6492 if os.path.exists(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment"):
6493 os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment")
6496 def pkgmerge(mytbz2,myroot,mysettings):
6497 """will merge a .tbz2 file, returning a list of runtime dependencies
6498 that must be satisfied, or None if there was a merge error. This
6499 code assumes the package exists."""
6500 if mytbz2[-5:]!=".tbz2":
6501 print "!!! Not a .tbz2 file"
6503 mypkg=os.path.basename(mytbz2)[:-5]
6504 xptbz2=xpak.tbz2(mytbz2)
6506 mycat=xptbz2.getfile("CATEGORY")
6508 print "!!! CATEGORY info missing from info chunk, aborting..."
6511 mycatpkg=mycat+"/"+mypkg
6512 tmploc=mysettings["PORTAGE_TMPDIR"]+"/portage-pkg/"
6513 pkgloc=tmploc+"/"+mypkg+"/bin/"
6514 infloc=tmploc+"/"+mypkg+"/inf/"
6515 myebuild=tmploc+"/"+mypkg+"/inf/"+os.path.basename(mytbz2)[:-4]+"ebuild"
6516 if os.path.exists(tmploc+"/"+mypkg):
6517 shutil.rmtree(tmploc+"/"+mypkg,1)
6520 writemsg_stdout(">>> Extracting info\n")
6521 xptbz2.unpackinfo(infloc)
6522 # run pkg_setup early, so we can bail out early
6523 # (before extracting binaries) if there's a problem
6527 mysettings.configdict["pkg"]["CATEGORY"] = mycat;
6528 a=doebuild(myebuild,"setup",myroot,mysettings,tree="bintree")
6529 writemsg_stdout(">>> Extracting %s\n" % mypkg)
6530 notok=spawn("bzip2 -dqc -- '"+mytbz2+"' | tar xpf -",mysettings,free=1)
6532 print "!!! Error Extracting",mytbz2
6533 cleanup_pkgmerge(mypkg,origdir)
6536 # the merge takes care of pre/postinst and old instance
6537 # auto-unmerge, virtual/provides updates, etc.
6538 mysettings.load_infodir(infloc)
6539 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype="bintree")
6540 mylink.merge(pkgloc,infloc,myroot,myebuild,cleanup=1)
6542 if not os.path.exists(infloc+"/RDEPEND"):
6545 #get runtime dependencies
6546 a=open(infloc+"/RDEPEND","r")
6547 returnme=string.join(string.split(a.read())," ")
6549 cleanup_pkgmerge(mypkg,origdir)
6553 if os.environ.has_key("ROOT"):
6554 root=os.environ["ROOT"]
6562 if not os.path.exists(root[:-1]):
6563 writemsg("!!! Error: ROOT "+root+" does not exist. Please correct this.\n")
6564 writemsg("!!! Exiting.\n\n")
6566 elif not os.path.isdir(root[:-1]):
6567 writemsg("!!! Error: ROOT "+root[:-1]+" is not a directory. Please correct this.\n")
6568 writemsg("!!! Exiting.\n\n")
6571 #create tmp and var/tmp if they don't exist; read config
6573 if not os.path.exists(root+"tmp"):
6574 writemsg(">>> "+root+"tmp doesn't exist, creating it...\n")
6575 os.mkdir(root+"tmp",01777)
6576 if not os.path.exists(root+"var/tmp"):
6577 writemsg(">>> "+root+"var/tmp doesn't exist, creating it...\n")
6579 os.mkdir(root+"var",0755)
6580 except (OSError,IOError):
6583 os.mkdir(root+"var/tmp",01777)
6584 except SystemExit, e:
6587 writemsg("portage: couldn't create /var/tmp; exiting.\n")
6589 if not os.path.exists(root+"var/lib/portage"):
6590 writemsg(">>> "+root+"var/lib/portage doesn't exist, creating it...\n")
6592 os.mkdir(root+"var",0755)
6593 except (OSError,IOError):
6596 os.mkdir(root+"var/lib",0755)
6597 except (OSError,IOError):
6600 os.mkdir(root+"var/lib/portage",02750)
6601 except SystemExit, e:
6604 writemsg("portage: couldn't create /var/lib/portage; exiting.\n")
6608 #####################################
6609 # Deprecation Checks
6613 if os.path.isdir(PROFILE_PATH):
6614 profiledir = PROFILE_PATH
6615 if "PORTAGE_CALLER" in os.environ and os.environ["PORTAGE_CALLER"] == "emerge" and os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
6616 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
6617 dcontent = deprecatedfile.readlines()
6618 deprecatedfile.close()
6619 newprofile = dcontent[0]
6620 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"))
6621 writemsg(red("!!! Please upgrade to the following profile if possible:\n"))
6622 writemsg(8*" "+green(newprofile)+"\n")
6623 if len(dcontent) > 1:
6624 writemsg("To upgrade do the following steps:\n")
6625 for myline in dcontent[1:]:
6629 if os.path.exists(USER_VIRTUALS_FILE):
6630 writemsg(red("\n!!! /etc/portage/virtuals is deprecated in favor of\n"))
6631 writemsg(red("!!! /etc/portage/profile/virtuals. Please move it to\n"))
6632 writemsg(red("!!! this new location.\n\n"))
6635 #####################################
6639 # =============================================================================
6640 # =============================================================================
6641 # -----------------------------------------------------------------------------
6642 # We're going to lock the global config to prevent changes, but we need
6643 # to ensure the global settings are right.
6644 settings=config(config_profile_path=PROFILE_PATH,config_incrementals=portage_const.INCREMENTALS)
6647 settings["PORTAGE_MASTER_PID"]=str(os.getpid())
6648 settings.backup_changes("PORTAGE_MASTER_PID")
6649 # We are disabling user-specific bashrc files.
6650 settings["BASH_ENV"] = INVALID_ENV_FILE
6651 settings.backup_changes("BASH_ENV")
6653 # gets virtual package settings
6654 def getvirtuals(myroot):
6656 writemsg("--- DEPRECATED call to getvirtual\n")
6657 return settings.getvirtuals(myroot)
6659 def do_vartree(mysettings):
6660 global virts,virts_p
6661 virts=mysettings.getvirtuals("/")
6665 myvkeys=virts.keys()
6667 vkeysplit=x.split("/")
6668 if not virts_p.has_key(vkeysplit[1]):
6669 virts_p[vkeysplit[1]]=virts[x]
6670 db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
6672 virts=mysettings.getvirtuals(root)
6673 db[root]={"virtuals":virts,"vartree":vartree(root,virts)}
6674 #We need to create the vartree first, then load our settings, and then set up our other trees
6676 usedefaults=settings.use_defs
6678 # XXX: This is a circular fix.
6679 #do_vartree(settings)
6680 #settings.loadVirtuals('/')
6681 do_vartree(settings)
6682 #settings.loadVirtuals('/')
6684 settings.reset() # XXX: Regenerate use after we get a vartree -- GLOBAL
6687 # XXX: Might cause problems with root="/" assumptions
6688 portdb=portdbapi(settings["PORTDIR"])
6691 # -----------------------------------------------------------------------------
6692 # =============================================================================
6693 # =============================================================================
6696 if 'selinux' in settings["USE"].split(" "):
6699 if hasattr(selinux, "enabled"):
6700 selinux_enabled = selinux.enabled
6704 writemsg(red("!!! SELinux not loaded: ")+str(e)+"\n")
6707 writemsg(red("!!! SELinux module not found.")+" Please verify that it was installed.\n")
6709 if selinux_enabled == 0:
6711 del sys.modules["selinux"]
6717 cachedirs=[CACHE_PATH]
6719 cachedirs.append(root+CACHE_PATH)
6720 if not os.environ.has_key("SANDBOX_ACTIVE"):
6721 for cachedir in cachedirs:
6722 if not os.path.exists(cachedir):
6723 os.makedirs(cachedir,0755)
6724 writemsg(">>> "+cachedir+" doesn't exist, creating it...\n")
6725 if not os.path.exists(cachedir+"/dep"):
6726 os.makedirs(cachedir+"/dep",2755)
6727 writemsg(">>> "+cachedir+"/dep doesn't exist, creating it...\n")
6729 os.chown(cachedir,uid,portage_gid)
6730 os.chmod(cachedir,0775)
6734 mystat=os.lstat(cachedir+"/dep")
6735 os.chown(cachedir+"/dep",uid,portage_gid)
6736 os.chmod(cachedir+"/dep",02775)
6737 if mystat[stat.ST_GID]!=portage_gid:
6738 spawn("chown -R "+str(uid)+":"+str(portage_gid)+" "+cachedir+"/dep",settings,free=1)
6739 spawn("chmod -R u+rw,g+rw "+cachedir+"/dep",settings,free=1)
6743 def flushmtimedb(record):
6745 if record in mtimedb.keys():
6747 #print "mtimedb["+record+"] is cleared."
6749 writemsg("Invalid or unset record '"+record+"' in mtimedb.\n")
6751 #grab mtimes for eclasses and upgrades
6755 "version", "starttime",
6758 mtimedbfile=root+"var/cache/edb/mtimedb"
6760 mypickle=cPickle.Unpickler(open(mtimedbfile))
6761 mypickle.find_global=None
6762 mtimedb=mypickle.load()
6763 if mtimedb.has_key("old"):
6764 mtimedb["updates"]=mtimedb["old"]
6766 if mtimedb.has_key("cur"):
6768 except SystemExit, e:
6772 mtimedb={"updates":{},"version":"","starttime":0}
6774 for x in mtimedb.keys():
6775 if x not in mtimedbkeys:
6776 writemsg("Deleting invalid mtimedb key: "+str(x)+"\n")
6779 #,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
6780 features=settings["FEATURES"].split()
6782 def do_upgrade(mykey):
6783 """Valid updates are returned as a list of split update commands."""
6785 writemsg(green("Performing Global Updates: ")+bold(mykey)+"\n")
6786 writemsg("(Could take a couple of minutes if you have a lot of binary packages.)\n")
6787 writemsg(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
6790 mylines = grabfile(mykey)
6791 for myline in mylines:
6792 mysplit = myline.split()
6793 if len(mysplit) == 0:
6795 if mysplit[0] not in ("move", "slotmove"):
6796 writemsg("portage: Update type \""+mysplit[0]+"\" not recognized.\n")
6799 if mysplit[0]=="move":
6801 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
6804 orig_value, new_value = mysplit[1], mysplit[2]
6805 for cp in (orig_value, new_value):
6806 if not (isvalidatom(cp) and isjustname(cp)):
6807 writemsg("\nERROR: Malformed update entry '%s'\n" % myline)
6810 if mysplit[0]=="slotmove":
6812 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
6815 pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
6816 if not isvalidatom(pkg):
6817 writemsg("\nERROR: Malformed update entry '%s'\n" % myline)
6821 # The list of valid updates is filtered by continue statements above.
6822 myupd.append(mysplit)
6823 sys.stdout.write(".")
6825 return myupd, processed == 1
6827 def commit_mtimedb():
6833 mtimedb["version"]=VERSION
6834 f = atomic_ofstream(mymfn)
6835 cPickle.dump(mtimedb, f, -1)
6837 except SystemExit, e:
6839 except Exception, e:
6844 os.chown(mymfn,uid,portage_gid)
6845 os.chmod(mymfn,0664)
6846 except SystemExit, e:
6848 except Exception, e:
6852 global uid,portage_gid,portdb,db
6853 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
6854 close_portdbapi_caches()
6857 atexit_register(portageexit)
6859 def update_config_files(update_iter):
6860 """Perform global updates on /etc/portage/package.* and the world file."""
6863 myxfiles = ["package.mask","package.unmask","package.keywords","package.use"]
6864 myxfiles.extend(prefix_array(myxfiles, "profile/"))
6867 if os.path.isdir(USER_CONFIG_PATH+os.path.sep+x):
6868 recursivefiles.extend([x+os.path.sep+y for y in listdir(USER_CONFIG_PATH+os.path.sep+x, filesonly=1, recursive=1)])
6870 recursivefiles.append(x)
6871 myxfiles = recursivefiles
6874 myfile = open(USER_CONFIG_PATH+os.path.sep+x,"r")
6875 file_contents[x] = myfile.readlines()
6878 if file_contents.has_key(x):
6879 del file_contents[x]
6881 worldlist = grabfile(WORLD_FILE)
6883 for update_cmd in update_iter:
6884 if update_cmd[0] == "move":
6885 old_value, new_value = update_cmd[1], update_cmd[2]
6886 #update world entries:
6887 for x in range(0,len(worldlist)):
6888 #update world entries, if any.
6889 worldlist[x] = dep_transform(worldlist[x], old_value, new_value)
6891 #update /etc/portage/packages.*
6892 for x in file_contents:
6893 for mypos in range(0,len(file_contents[x])):
6894 line = file_contents[x][mypos]
6895 if line[0] == "#" or string.strip(line) == "":
6897 key = dep_getkey(line.split()[0])
6898 if key == old_value:
6899 file_contents[x][mypos] = string.replace(line, old_value, new_value)
6901 sys.stdout.write("p")
6904 write_atomic(WORLD_FILE,"\n".join(worldlist))
6906 for x in update_files:
6907 mydblink = dblink('','','/',settings)
6908 updating_file = os.path.join(USER_CONFIG_PATH, x)
6909 if mydblink.isprotected(updating_file):
6910 updating_file = new_protect_filename(updating_file)[0]
6912 write_atomic(updating_file, "".join(file_contents[x]))
6916 def global_updates():
6917 updpath = os.path.join(settings["PORTDIR"], "profiles", "updates")
6918 mylist = listdir(updpath, EmptyOnError=1)
6921 mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
6923 mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
6925 if not mtimedb.has_key("updates"):
6926 mtimedb["updates"] = {}
6929 do_upgrade_packagesmessage = 0
6932 for myfile in mylist:
6933 mykey = os.path.join(updpath, myfile)
6934 mystat = os.stat(mykey)
6935 if not stat.S_ISREG(mystat.st_mode):
6937 if mykey not in mtimedb["updates"] or \
6938 mtimedb["updates"][mykey] != mystat.st_mtime or \
6939 settings["PORTAGE_CALLER"] == "fixpackages":
6941 valid_updates, no_errors = do_upgrade(mykey)
6942 myupd.extend(valid_updates)
6944 # Update our internal mtime since we
6945 # processed all of our directives.
6946 timestamps[mykey] = mystat.st_mtime
6947 update_config_files(myupd)
6949 db["/"]["bintree"] = binarytree("/", settings["PKGDIR"], virts)
6950 for update_cmd in myupd:
6951 if update_cmd[0] == "move":
6952 db["/"]["vartree"].dbapi.move_ent(update_cmd)
6953 db["/"]["bintree"].move_ent(update_cmd)
6954 elif update_cmd[0] == "slotmove":
6955 db["/"]["vartree"].dbapi.move_slot_ent(update_cmd)
6956 db["/"]["bintree"].move_slot_ent(update_cmd, os.path.join(settings["PORTAGE_TMPDIR"], "tbz2"))
6960 # The above global updates proceed quickly, so they
6961 # are considered a single mtimedb transaction.
6962 if len(timestamps) > 0:
6963 # We do not update the mtime in the mtimedb
6964 # until after _all_ of the above updates have
6965 # been processed because the mtimedb will
6966 # automatically commit when killed by ctrl C.
6967 for mykey, mtime in timestamps.iteritems():
6968 mtimedb["updates"][mykey] = mtime
6971 # We gotta do the brute force updates for these now.
6972 if settings["PORTAGE_CALLER"] == "fixpackages" or \
6973 "fixpackages" in features:
6974 db["/"]["bintree"].update_ents(myupd, os.path.join(settings["PORTAGE_TMPDIR"], "tbz2"))
6976 do_upgrade_packagesmessage = 1
6979 #make sure our internal databases are consistent; recreate our virts and vartree
6980 do_vartree(settings)
6981 if do_upgrade_packagesmessage and \
6982 listdir(os.path.join(settings["PKGDIR"], "All"), EmptyOnError=1):
6983 writemsg("\n\n\n ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
6984 writemsg("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
6987 if (secpass==2) and (not os.environ.has_key("SANDBOX_ACTIVE")):
6988 if settings["PORTAGE_CALLER"] in ["emerge","fixpackages"]:
6989 #only do this if we're root and not running repoman/ebuild digest
6992 #continue setting up other trees
6993 db["/"]["porttree"]=portagetree("/",virts)
6994 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
6996 db[root]["porttree"]=portagetree(root,virts)
6997 db[root]["bintree"]=binarytree(root,settings["PKGDIR"],virts)
6999 profileroots = [settings["PORTDIR"]+"/profiles/"]
7000 for x in settings["PORTDIR_OVERLAY"].split():
7001 profileroots.insert(0, x+"/profiles/")
7002 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
7003 thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
7005 if not os.path.exists(settings["PORTAGE_TMPDIR"]):
7006 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
7007 writemsg("does not exist. Please create this directory or correct your PORTAGE_TMPDIR setting.\n")
7009 if not os.path.isdir(settings["PORTAGE_TMPDIR"]):
7010 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
7011 writemsg("is not a directory. Please correct your PORTAGE_TMPDIR setting.\n")
7014 # COMPATABILITY -- This shouldn't be used.
7015 pkglines = settings.packages
7017 groups = settings["ACCEPT_KEYWORDS"].split()
7018 archlist = flatten([[myarch, "~"+myarch] for myarch in settings["PORTAGE_ARCHLIST"].split()])
7020 for group in groups:
7022 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
7024 elif (group not in archlist) and group[0]!='-':
7025 writemsg("\n"+red("!!! INVALID ACCEPT_KEYWORDS: ")+str(group)+"\n")
7030 if not os.path.islink(PROFILE_PATH) and os.path.exists(settings["PORTDIR"]+"/profiles"):
7031 writemsg(red("\a\n\n!!! "+PROFILE_PATH+" is not a symlink and will probably prevent most merges.\n"))
7032 writemsg(red("!!! It should point into a profile within %s/profiles/\n" % settings["PORTDIR"]))
7033 writemsg(red("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
7036 # ============================================================================
7037 # ============================================================================