1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.524.2.76 2005/05/29 12:40:08 jstubbs Exp $
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
18 print "Failed to import sys! Something is _VERY_ wrong with python."
22 import os,string,types,signal,fcntl,errno
23 import time,traceback,copy
24 import re,pwd,grp,commands
29 import pickle as cPickle
33 from time import sleep
34 from random import shuffle
35 from cache.cache_errors import CacheError
39 sys.stderr.write("\n\n")
40 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
41 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
42 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
44 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
45 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
47 sys.stderr.write(" "+str(e)+"\n\n");
50 sys.stderr.write("\n\n")
51 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
52 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
55 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56 sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
60 # XXX: This should get renamed to bsd_chflags, I think.
66 # XXX: This should get renamed to bsd_chflags, I think.
75 # XXX: This needs to get cleaned up.
77 from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
78 darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
79 xtermTitle, xtermTitleReset, yellow
82 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
83 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
84 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
85 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
86 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
87 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
88 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
89 INCREMENTALS, STICKIES, EAPI
91 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
92 portage_uid, portage_gid
95 from portage_util import atomic_ofstream, dump_traceback, getconfig, grabdict, \
96 grabdict_package, grabfile, grabfile_package, \
97 map_dictlist_vals, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
98 unique_array, varexpand, writedict, writemsg, writemsg_stdout, write_atomic
99 import portage_exception
103 from portage_exec import atexit_register, run_exitfuncs
104 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
105 import portage_checksum
106 from portage_checksum import perform_md5,perform_checksum,prelink_capable
108 from portage_localization import _
110 # Need these functions directly in portage namespace to not break every external tool in existence
111 from portage_versions import ververify,vercmp,catsplit,catpkgsplit,pkgsplit,pkgcmp
113 except SystemExit, e:
116 sys.stderr.write("\n\n")
117 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
118 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
119 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
120 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
121 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
122 sys.stderr.write("!!! a recovery of portage.\n")
124 sys.stderr.write(" "+str(e)+"\n\n")
128 # ===========================================================================
129 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
130 # ===========================================================================
133 def exithandler(signum,frame):
134 """Handles ^C interrupts in a sane manner"""
135 signal.signal(signal.SIGINT, signal.SIG_IGN)
136 signal.signal(signal.SIGTERM, signal.SIG_IGN)
138 # 0=send to *everybody* in process group
141 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
142 signal.signal(signal.SIGINT, exithandler)
143 signal.signal(signal.SIGTERM, exithandler)
144 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
147 modname = string.join(string.split(name,".")[:-1],".")
148 mod = __import__(modname)
149 components = name.split('.')
150 for comp in components[1:]:
151 mod = getattr(mod, comp)
154 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
156 if top_dict.has_key(x) and top_dict[x].has_key(key):
158 return copy.deepcopy(top_dict[x][key])
160 return top_dict[x][key]
164 raise KeyError, "Key not found in list; '%s'" % key
167 "this fixes situations where the current directory doesn't exist"
170 except SystemExit, e:
177 def abssymlink(symlink):
178 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
179 mylink=os.readlink(symlink)
181 mydir=os.path.dirname(symlink)
182 mylink=mydir+"/"+mylink
183 return os.path.normpath(mylink)
185 def suffix_array(array,suffix,doblanks=1):
186 """Appends a given suffix to each element in an Array/List/Tuple.
188 if type(array) not in [types.ListType, types.TupleType]:
189 raise TypeError, "List or Tuple expected. Got %s" % type(array)
193 newarray.append(x + suffix)
198 def prefix_array(array,prefix,doblanks=1):
199 """Prepends a given prefix to each element in an Array/List/Tuple.
201 if type(array) not in [types.ListType, types.TupleType]:
202 raise TypeError, "List or Tuple expected. Got %s" % type(array)
206 newarray.append(prefix + x)
211 def normalize_path(mypath):
212 newpath = os.path.normpath(mypath)
214 if newpath[:2] == "//":
215 newpath = newpath[1:]
222 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
223 global cacheHit,cacheMiss,cacheStale
224 mypath = normalize_path(my_original_path)
225 if dircache.has_key(mypath):
227 cached_mtime, list, ftype = dircache[mypath]
230 cached_mtime, list, ftype = -1, [], []
232 pathstat = os.stat(mypath)
233 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
234 mtime = pathstat[stat.ST_MTIME]
237 except SystemExit, e:
243 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
244 if mtime != cached_mtime or time.time() - mtime < 4:
245 if dircache.has_key(mypath):
247 list = os.listdir(mypath)
252 pathstat = os.stat(mypath+"/"+x)
254 pathstat = os.lstat(mypath+"/"+x)
256 if stat.S_ISREG(pathstat[stat.ST_MODE]):
258 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
260 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
264 except SystemExit, e:
268 dircache[mypath] = mtime, list, ftype
272 for x in range(0, len(list)):
273 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
274 ret_list.append(list[x])
275 ret_ftype.append(ftype[x])
276 elif (list[x] not in ignorelist):
277 ret_list.append(list[x])
278 ret_ftype.append(ftype[x])
280 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
281 return ret_list, ret_ftype
284 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
285 EmptyOnError=False, dirsonly=False):
287 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
294 if not (filesonly or dirsonly or recursive):
300 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
301 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
305 for y in range(0,len(l)):
306 l[y]=list[x]+"/"+l[y]
312 for x in range(0,len(ftype)):
314 rlist=rlist+[list[x]]
317 for x in range(0, len(ftype)):
319 rlist = rlist + [list[x]]
325 starttime=long(time.time())
328 def tokenize(mystring):
329 """breaks a string like 'foo? (bar) oni? (blah (blah))'
330 into embedded lists; returns None on paren mismatch"""
332 # This function is obsoleted.
333 # Use dep_parenreduce
343 curlist.append(accum)
345 prevlists.append(curlist)
350 curlist.append(accum)
353 writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
356 curlist=prevlists.pop()
357 curlist.append(newlist)
359 elif x in string.whitespace:
361 curlist.append(accum)
366 curlist.append(accum)
368 writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
372 def flatten(mytokens):
373 """this function now turns a [1,[2,3]] list into
374 a [1,2,3] list and returns it."""
377 if type(x)==types.ListType:
378 newlist.extend(flatten(x))
383 #beautiful directed graph object
388 #okeys = keys, in order they were added (to optimize firstzero() ordering)
391 def addnode(self,mykey,myparent):
392 if not self.dict.has_key(mykey):
393 self.okeys.append(mykey)
395 self.dict[mykey]=[0,[]]
397 self.dict[mykey]=[0,[myparent]]
398 self.dict[myparent][0]=self.dict[myparent][0]+1
400 if myparent and (not myparent in self.dict[mykey][1]):
401 self.dict[mykey][1].append(myparent)
402 self.dict[myparent][0]=self.dict[myparent][0]+1
404 def delnode(self,mykey):
405 if not self.dict.has_key(mykey):
407 for x in self.dict[mykey][1]:
408 self.dict[x][0]=self.dict[x][0]-1
412 self.okeys.remove(mykey)
417 "returns all nodes in the dictionary"
418 return self.dict.keys()
421 "returns first node with zero references, or NULL if no such node exists"
423 if self.dict[x][0]==0:
427 def depth(self, mykey):
429 while (self.dict[mykey][1]):
431 mykey=self.dict[mykey][1][0]
435 "returns all nodes with zero references, or NULL if no such node exists"
437 for x in self.dict.keys():
438 mys = string.split(x)
439 if mys[0] != "blocks" and self.dict[x][0]==0:
443 def hasallzeros(self):
444 "returns 0/1, Are all nodes zeros? 1 : 0"
446 for x in self.dict.keys():
447 if self.dict[x][0]!=0:
452 if len(self.dict)==0:
456 def hasnode(self,mynode):
457 return self.dict.has_key(mynode)
461 for x in self.dict.keys():
462 mygraph.dict[x]=self.dict[x][:]
463 mygraph.okeys=self.okeys[:]
466 def elog_process(cpv, mysettings):
467 mylogfiles = listdir(mysettings["T"]+"/logging/")
468 # shortcut for packages without any messages
469 if len(mylogfiles) == 0:
471 # exploit listdir() file order so we process log entries in chronological order
475 msgfunction, msgtype = f.split(".")
476 if not msgtype.upper() in mysettings["PORTAGE_ELOG_CLASSES"].split() \
477 and not msgtype.lower() in mysettings["PORTAGE_ELOG_CLASSES"].split():
479 if msgfunction not in portage_const.EBUILD_PHASES:
480 print "!!! can't process invalid log file: %s" % f
482 if not msgfunction in mylogentries:
483 mylogentries[msgfunction] = []
484 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
485 mylogentries[msgfunction].append((msgtype, msgcontent))
487 # in case the filters matched all messages
488 if len(mylogentries) == 0:
491 # generate a single string with all log messages
493 for phase in portage_const.EBUILD_PHASES:
494 if not phase in mylogentries:
496 for msgtype,msgcontent in mylogentries[phase]:
497 fulllog += "%s: %s\n" % (msgtype, phase)
498 for line in msgcontent:
502 # pass the processing to the individual modules
503 logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
506 # FIXME: ugly ad.hoc import code
507 # TODO: implement a common portage module loader
508 logmodule = __import__("elog_modules.mod_"+s)
509 m = getattr(logmodule, "mod_"+s)
510 m.process(mysettings, cpv, mylogentries, fulllog)
511 except (ImportError, AttributeError), e:
512 print "!!! Error while importing logging modules while loading \"mod_%s\":" % s
514 except portage_exception.PortageException, e:
517 # valid end of version components; integers specify offset from release version
518 # pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
519 # all but _p (where it is required) can be followed by an optional trailing integer
521 endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
522 # as there's no reliable way to set {}.keys() order
523 # netversion_keys will be used instead of endversion.keys
524 # to have fixed search order, so that "pre" is checked
526 endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
528 #parse /etc/env.d and generate /etc/profile.env
530 def env_update(makelinks=1):
532 if not os.path.exists(root+"etc/env.d"):
534 os.makedirs(root+"etc/env.d",0755)
536 fns=listdir(root+"etc/env.d",EmptyOnError=1)
539 while (pos<len(fns)):
543 if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
549 "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
550 "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
551 "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
552 "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
555 "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
557 "PATH", "PRELINK_PATH",
558 "PRELINK_PATH_MASK", "PYTHONPATH"
564 # don't process backup files
565 if x[-1]=='~' or x[-4:]==".bak":
567 myconfig=getconfig(root+"etc/env.d/"+x)
569 writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
572 # process PATH, CLASSPATH, LDPATH
573 for myspec in specials.keys():
574 if myconfig.has_key(myspec):
575 if myspec in colon_separated:
576 specials[myspec].extend(myconfig[myspec].split(":"))
578 specials[myspec].append(myconfig[myspec])
580 # process all other variables
581 for myenv in myconfig.keys():
582 env[myenv]=myconfig[myenv]
584 if os.path.exists(root+"etc/ld.so.conf"):
585 myld=open(root+"etc/ld.so.conf")
586 myldlines=myld.readlines()
590 #each line has at least one char (a newline)
594 # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
595 # Where is the new ld.so.conf generated? (achim)
599 ld_cache_update=False
601 newld=specials["LDPATH"]
603 #ld.so.conf needs updating and ldconfig needs to be run
604 myfd = atomic_ofstream(os.path.join(root, "etc", "ld.so.conf"))
605 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
606 myfd.write("# contents of /etc/env.d directory\n")
607 for x in specials["LDPATH"]:
612 # Update prelink.conf if we are prelink-enabled
614 newprelink = atomic_ofstream(os.path.join(root, "etc", "prelink.conf"))
615 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
616 newprelink.write("# contents of /etc/env.d directory\n")
618 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
619 newprelink.write("-l "+x+"\n");
620 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
626 for y in specials["PRELINK_PATH_MASK"]:
635 newprelink.write("-h "+x+"\n")
636 for x in specials["PRELINK_PATH_MASK"]:
637 newprelink.write("-b "+x+"\n")
640 if not mtimedb.has_key("ldpath"):
643 for x in specials["LDPATH"]+['/usr/lib','/lib']:
645 newldpathtime=os.stat(x)[stat.ST_MTIME]
646 except SystemExit, e:
650 if mtimedb["ldpath"].has_key(x):
651 if mtimedb["ldpath"][x]==newldpathtime:
654 mtimedb["ldpath"][x]=newldpathtime
657 mtimedb["ldpath"][x]=newldpathtime
660 # Only run ldconfig as needed
661 if (ld_cache_update or makelinks):
662 # ldconfig has very different behaviour between FreeBSD and Linux
663 if ostype=="Linux" or ostype.lower().endswith("gnu"):
664 # We can't update links if we haven't cleaned other versions first, as
665 # an older package installed ON TOP of a newer version will cause ldconfig
666 # to overwrite the symlinks we just made. -X means no links. After 'clean'
667 # we can safely create links.
668 writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
670 commands.getstatusoutput("cd / ; /sbin/ldconfig -r "+root)
672 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r "+root)
673 elif ostype in ("FreeBSD","DragonFly"):
674 writemsg(">>> Regenerating "+str(root)+"var/run/ld-elf.so.hints...\n")
675 commands.getstatusoutput("cd / ; /sbin/ldconfig -elf -i -f "+str(root)+"var/run/ld-elf.so.hints "+str(root)+"etc/ld.so.conf")
677 del specials["LDPATH"]
679 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
680 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
681 cenvnotice = penvnotice[:]
682 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
683 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
685 #create /etc/profile.env for bash support
686 outfile = atomic_ofstream(os.path.join(root, "etc", "profile.env"))
687 outfile.write(penvnotice)
689 for path in specials.keys():
690 if len(specials[path])==0:
692 outstring="export "+path+"='"
693 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
694 for x in specials[path][:-1]:
697 for x in specials[path][:-1]:
698 outstring=outstring+x+":"
699 outstring=outstring+specials[path][-1]+"'"
700 outfile.write(outstring+"\n")
702 #create /etc/profile.env
704 if type(env[x])!=types.StringType:
706 outfile.write("export "+x+"='"+env[x]+"'\n")
709 #create /etc/csh.env for (t)csh support
710 outfile = atomic_ofstream(os.path.join(root, "etc", "csh.env"))
711 outfile.write(cenvnotice)
713 for path in specials.keys():
714 if len(specials[path])==0:
716 outstring="setenv "+path+" '"
717 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
718 for x in specials[path][:-1]:
721 for x in specials[path][:-1]:
722 outstring=outstring+x+":"
723 outstring=outstring+specials[path][-1]+"'"
724 outfile.write(outstring+"\n")
725 #get it out of the way
730 if type(env[x])!=types.StringType:
732 outfile.write("setenv "+x+" '"+env[x]+"'\n")
735 def new_protect_filename(mydest, newmd5=None):
736 """Resolves a config-protect filename for merging, optionally
737 using the last filename if the md5 matches.
738 (dest,md5) ==> 'string' --- path_to_target_filename
739 (dest) ==> ('next', 'highest') --- next_target and most-recent_target
742 # config protection filename format:
748 if (len(mydest) == 0):
749 raise ValueError, "Empty path provided where a filename is required"
750 if (mydest[-1]=="/"): # XXX add better directory checking
751 raise ValueError, "Directory provided but this function requires a filename"
752 if not os.path.exists(mydest):
755 real_filename = os.path.basename(mydest)
756 real_dirname = os.path.dirname(mydest)
757 for pfile in listdir(real_dirname):
758 if pfile[0:5] != "._cfg":
760 if pfile[10:] != real_filename:
763 new_prot_num = int(pfile[5:9])
764 if new_prot_num > prot_num:
765 prot_num = new_prot_num
767 except SystemExit, e:
771 prot_num = prot_num + 1
773 new_pfile = os.path.normpath(real_dirname+"/._cfg"+string.zfill(prot_num,4)+"_"+real_filename)
774 old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
775 if last_pfile and newmd5:
776 if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
783 return (new_pfile, old_pfile)
785 #XXX: These two are now implemented in portage_util.py but are needed here
786 #XXX: until the isvalidatom() dependency is sorted out.
788 def grabdict_package(myfilename,juststrings=0,recursive=0):
789 pkgs=grabdict(myfilename, juststrings=juststrings, empty=1,recursive=recursive)
790 for x in pkgs.keys():
791 if not isvalidatom(x):
793 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
796 def grabfile_package(myfilename,compatlevel=0,recursive=0):
797 pkgs=grabfile(myfilename,compatlevel,recursive=recursive)
798 for x in range(len(pkgs)-1,-1,-1):
804 if not isvalidatom(pkg):
805 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
809 # returns a tuple. (version[string], error[string])
810 # They are pretty much mutually exclusive.
811 # Either version is a string and error is none, or
812 # version is None and error is a string
814 def ExtractKernelVersion(base_dir):
816 pathname = os.path.join(base_dir, 'Makefile')
818 f = open(pathname, 'r')
819 except OSError, details:
820 return (None, str(details))
821 except IOError, details:
822 return (None, str(details))
826 lines.append(f.readline())
827 except OSError, details:
828 return (None, str(details))
829 except IOError, details:
830 return (None, str(details))
832 lines = map(string.strip, lines)
836 #XXX: The following code relies on the ordering of vars within the Makefile
838 # split on the '=' then remove annoying whitespace
839 items = string.split(line, '=')
840 items = map(string.strip, items)
841 if items[0] == 'VERSION' or \
842 items[0] == 'PATCHLEVEL':
845 elif items[0] == 'SUBLEVEL':
847 elif items[0] == 'EXTRAVERSION' and \
848 items[-1] != items[0]:
851 # Grab a list of files named localversion* and sort them
852 localversions = os.listdir(base_dir)
853 for x in range(len(localversions)-1,-1,-1):
854 if localversions[x][:12] != "localversion":
858 # Append the contents of each to the version string, stripping ALL whitespace
859 for lv in localversions:
860 version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
862 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
863 kernelconfig = getconfig(base_dir+"/.config")
864 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
865 version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
867 return (version,None)
871 def autouse(myvartree,use_cache=1):
872 "returns set of USE variables auto-enabled due to packages being installed"
873 global usedefaults, autouse_val
874 if autouse_val is not None:
880 for myuse in usedefaults:
882 for mydep in usedefaults[myuse]:
883 if not myvartree.dep_match(mydep,use_cache=True):
887 myusevars += " "+myuse
888 autouse_val = myusevars
891 def check_config_instance(test):
892 if not test or (str(test.__class__) != 'portage.config'):
893 raise TypeError, "Invalid type for config object: %s" % test.__class__
896 def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None):
898 self.already_in_regenerate = 0
903 self.modifiedkeys = []
908 # Virtuals obtained from the vartree
909 self.treeVirtuals = {}
910 # Virtuals by user specification. Includes negatives.
911 self.userVirtuals = {}
912 # Virtual negatives from user specifications.
913 self.negVirtuals = {}
915 self.user_profile_dir = None
918 self.incrementals = copy.deepcopy(clone.incrementals)
919 self.profile_path = copy.deepcopy(clone.profile_path)
920 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
922 self.module_priority = copy.deepcopy(clone.module_priority)
923 self.modules = copy.deepcopy(clone.modules)
925 self.depcachedir = copy.deepcopy(clone.depcachedir)
927 self.packages = copy.deepcopy(clone.packages)
928 self.virtuals = copy.deepcopy(clone.virtuals)
930 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
931 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
932 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
934 self.use_defs = copy.deepcopy(clone.use_defs)
935 self.usemask = copy.deepcopy(clone.usemask)
937 self.configlist = copy.deepcopy(clone.configlist)
938 self.configlist[-1] = os.environ.copy()
939 self.configdict = { "globals": self.configlist[0],
940 "defaults": self.configlist[1],
941 "conf": self.configlist[2],
942 "pkg": self.configlist[3],
943 "auto": self.configlist[4],
944 "backupenv": self.configlist[5],
945 "env": self.configlist[6] }
946 self.profiles = copy.deepcopy(clone.profiles)
947 self.backupenv = copy.deepcopy(clone.backupenv)
948 self.pusedict = copy.deepcopy(clone.pusedict)
949 self.categories = copy.deepcopy(clone.categories)
950 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
951 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
952 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
953 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
954 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
955 self.lookuplist = copy.deepcopy(clone.lookuplist)
956 self.uvlist = copy.deepcopy(clone.uvlist)
957 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
958 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
960 self.depcachedir = DEPCACHE_PATH
962 if not config_profile_path:
964 writemsg("config_profile_path not specified to class config\n")
965 self.profile_path = profiledir[:]
967 self.profile_path = config_profile_path[:]
969 if not config_incrementals:
970 writemsg("incrementals not specified to class config\n")
971 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
973 self.incrementals = copy.deepcopy(config_incrementals)
975 self.module_priority = ["user","default"]
977 self.modules["user"] = getconfig(MODULES_FILE_PATH)
978 if self.modules["user"] == None:
979 self.modules["user"] = {}
980 self.modules["default"] = {
981 "portdbapi.metadbmodule": "cache.metadata.database",
982 "portdbapi.auxdbmodule": "cache.flat_hash.database",
988 # back up our incremental variables:
990 # configlist will contain: [ globals, defaults, conf, pkg, auto, backupenv (incrementals), origenv ]
992 # The symlink might not exist or might not be a symlink.
994 self.profiles=[abssymlink(self.profile_path)]
995 except SystemExit, e:
998 self.profiles=[self.profile_path]
1000 mypath = self.profiles[0]
1001 while os.path.exists(mypath+"/parent"):
1002 mypath = os.path.normpath(mypath+"///"+grabfile(mypath+"/parent")[0])
1003 if os.path.exists(mypath):
1004 self.profiles.insert(0,mypath)
1006 if os.environ.has_key("PORTAGE_CALLER") and os.environ["PORTAGE_CALLER"] == "repoman":
1009 # XXX: This should depend on ROOT?
1010 if os.path.exists("/"+CUSTOM_PROFILE_PATH):
1011 self.user_profile_dir = os.path.normpath("/"+"///"+CUSTOM_PROFILE_PATH)
1012 self.profiles.append(self.user_profile_dir[:])
1014 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1015 self.packages = stack_lists(self.packages_list, incremental=1)
1016 del self.packages_list
1017 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1020 self.prevmaskdict={}
1021 for x in self.packages:
1022 mycatpkg=dep_getkey(x)
1023 if not self.prevmaskdict.has_key(mycatpkg):
1024 self.prevmaskdict[mycatpkg]=[x]
1026 self.prevmaskdict[mycatpkg].append(x)
1028 # get profile-masked use flags -- INCREMENTAL Child over parent
1029 usemask_lists = [grabfile(os.path.join(x, "use.mask")) for x in self.profiles]
1030 self.usemask = stack_lists(usemask_lists, incremental=True)
1032 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1033 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1037 mygcfg_dlists = [getconfig(os.path.join(x, "make.globals")) for x in self.profiles+["/etc"]]
1038 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1040 if self.mygcfg == None:
1042 except SystemExit, e:
1044 except Exception, e:
1045 writemsg("!!! %s\n" % (e))
1046 writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
1047 writemsg("!!! Errors in this file should be reported on bugs.gentoo.org.\n")
1049 self.configlist.append(self.mygcfg)
1050 self.configdict["globals"]=self.configlist[-1]
1055 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1056 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1057 #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1058 if self.mygcfg == None:
1060 except SystemExit, e:
1062 except Exception, e:
1063 writemsg("!!! %s\n" % (e))
1064 writemsg("!!! 'rm -Rf /usr/portage/profiles; emerge sync' may fix this. If it does\n")
1065 writemsg("!!! not then please report this to bugs.gentoo.org and, if possible, a dev\n")
1066 writemsg("!!! on #gentoo (irc.freenode.org)\n")
1068 self.configlist.append(self.mygcfg)
1069 self.configdict["defaults"]=self.configlist[-1]
1072 # XXX: Should depend on root?
1073 self.mygcfg=getconfig("/"+MAKE_CONF_FILE,allow_sourcing=True)
1074 if self.mygcfg == None:
1076 except SystemExit, e:
1078 except Exception, e:
1079 writemsg("!!! %s\n" % (e))
1080 writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
1084 self.configlist.append(self.mygcfg)
1085 self.configdict["conf"]=self.configlist[-1]
1087 self.configlist.append({})
1088 self.configdict["pkg"]=self.configlist[-1]
1091 self.configlist.append({})
1092 self.configdict["auto"]=self.configlist[-1]
1094 #backup-env (for recording our calculated incremental variables:)
1095 self.backupenv = os.environ.copy()
1096 self.configlist.append(self.backupenv) # XXX Why though?
1097 self.configdict["backupenv"]=self.configlist[-1]
1099 self.configlist.append(os.environ.copy())
1100 self.configdict["env"]=self.configlist[-1]
1103 # make lookuplist for loading package.*
1104 self.lookuplist=self.configlist[:]
1105 self.lookuplist.reverse()
1107 if os.environ.get("PORTAGE_CALLER","") == "repoman":
1108 # repoman shouldn't use local settings.
1109 locations = [self["PORTDIR"] + "/profiles"]
1111 self.pkeywordsdict = {}
1112 self.punmaskdict = {}
1114 locations = [self["PORTDIR"] + "/profiles", USER_CONFIG_PATH]
1115 for ov in self["PORTDIR_OVERLAY"].split():
1116 ov = os.path.normpath(ov)
1117 if os.path.isdir(ov+"/profiles"):
1118 locations.append(ov+"/profiles")
1120 pusedict=grabdict_package(USER_CONFIG_PATH+"/package.use", recursive=1)
1122 for key in pusedict.keys():
1123 cp = dep_getkey(key)
1124 if not self.pusedict.has_key(cp):
1125 self.pusedict[cp] = {}
1126 self.pusedict[cp][key] = pusedict[key]
1129 pkgdict=grabdict_package(USER_CONFIG_PATH+"/package.keywords", recursive=1)
1130 self.pkeywordsdict = {}
1131 for key in pkgdict.keys():
1132 # default to ~arch if no specific keyword is given
1133 if not pkgdict[key]:
1135 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1136 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1139 for keyword in groups:
1140 if not keyword[0] in "~-":
1141 mykeywordlist.append("~"+keyword)
1142 pkgdict[key] = mykeywordlist
1143 cp = dep_getkey(key)
1144 if not self.pkeywordsdict.has_key(cp):
1145 self.pkeywordsdict[cp] = {}
1146 self.pkeywordsdict[cp][key] = pkgdict[key]
1149 pkgunmasklines = grabfile_package(USER_CONFIG_PATH+"/package.unmask",recursive=1)
1150 self.punmaskdict = {}
1151 for x in pkgunmasklines:
1152 mycatpkg=dep_getkey(x)
1153 if self.punmaskdict.has_key(mycatpkg):
1154 self.punmaskdict[mycatpkg].append(x)
1156 self.punmaskdict[mycatpkg]=[x]
1158 #getting categories from an external file now
1159 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1160 self.categories = stack_lists(categories, incremental=1)
1163 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1164 archlist = stack_lists(archlist, incremental=1)
1165 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1167 # get virtuals -- needs categories
1168 self.loadVirtuals('/')
1171 pkgmasklines = [grabfile_package(os.path.join(x, "package.mask")) for x in self.profiles]
1173 pkgmasklines.append(grabfile_package(l+os.path.sep+"package.mask", recursive=1))
1174 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1177 for x in pkgmasklines:
1178 mycatpkg=dep_getkey(x)
1179 if self.pmaskdict.has_key(mycatpkg):
1180 self.pmaskdict[mycatpkg].append(x)
1182 self.pmaskdict[mycatpkg]=[x]
1184 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1185 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1186 for x in range(len(pkgprovidedlines)-1, -1, -1):
1187 cpvr = catpkgsplit(pkgprovidedlines[x])
1188 if not cpvr or cpvr[0] == "null":
1189 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n")
1190 del pkgprovidedlines[x]
1192 self.pprovideddict = {}
1193 for x in pkgprovidedlines:
1197 mycatpkg=dep_getkey(x)
1198 if self.pprovideddict.has_key(mycatpkg):
1199 self.pprovideddict[mycatpkg].append(x)
1201 self.pprovideddict[mycatpkg]=[x]
1203 self.lookuplist=self.configlist[:]
1204 self.lookuplist.reverse()
1206 useorder=self["USE_ORDER"]
1208 # reasonable defaults; this is important as without USE_ORDER,
1209 # USE will always be "" (nothing set)!
1210 useorder="env:pkg:conf:auto:defaults"
1211 useordersplit=useorder.split(":")
1214 for x in useordersplit:
1215 if self.configdict.has_key(x):
1216 if "PKGUSE" in self.configdict[x].keys():
1217 del self.configdict[x]["PKGUSE"] # Delete PkgUse, Not legal to set.
1218 #prepend db to list to get correct order
1219 self.uvlist[0:0]=[self.configdict[x]]
1221 self.configdict["env"]["PORTAGE_GID"]=str(portage_gid)
1222 self.backupenv["PORTAGE_GID"]=str(portage_gid)
1224 if self.has_key("PORT_LOGDIR") and not self["PORT_LOGDIR"]:
1225 # port_logdir is defined, but empty. this causes a traceback in doebuild.
1226 writemsg(yellow("!!!")+" PORT_LOGDIR was defined, but set to nothing.\n")
1227 writemsg(yellow("!!!")+" Disabling it. Please set it to a non null value.\n")
1228 del self["PORT_LOGDIR"]
1230 if self["PORTAGE_CACHEDIR"]:
1231 # XXX: Deprecated -- April 15 -- NJ
1232 writemsg(yellow(">>> PORTAGE_CACHEDIR has been deprecated!")+"\n")
1233 writemsg(">>> Please use PORTAGE_DEPCACHEDIR instead.\n")
1234 self.depcachedir = self["PORTAGE_CACHEDIR"]
1235 del self["PORTAGE_CACHEDIR"]
1237 if self["PORTAGE_DEPCACHEDIR"]:
1238 #the auxcache is the only /var/cache/edb/ entry that stays at / even when "root" changes.
1239 # XXX: Could move with a CHROOT functionality addition.
1240 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1241 del self["PORTAGE_DEPCACHEDIR"]
1243 overlays = string.split(self["PORTDIR_OVERLAY"])
1247 ov=os.path.normpath(ov)
1248 if os.path.isdir(ov):
1251 writemsg(red("!!! Invalid PORTDIR_OVERLAY (not a dir): "+ov+"\n"))
1252 self["PORTDIR_OVERLAY"] = string.join(new_ov)
1253 self.backup_changes("PORTDIR_OVERLAY")
1257 self.features = portage_util.unique_array(self["FEATURES"].split())
1259 #XXX: Should this be temporary? Is it possible at all to have a default?
1260 if "gpg" in self.features:
1261 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1262 writemsg("PORTAGE_GPG_DIR is invalid. Removing gpg from FEATURES.\n")
1263 self.features.remove("gpg")
1265 if not portage_exec.sandbox_capable and ("sandbox" in self.features or "usersandbox" in self.features):
1266 writemsg(red("!!! Problem with sandbox binary. Disabling...\n\n"))
1267 if "sandbox" in self.features:
1268 self.features.remove("sandbox")
1269 if "usersandbox" in self.features:
1270 self.features.remove("usersandbox")
1272 self.features.sort()
1273 self["FEATURES"] = " ".join(["-*"]+self.features)
1274 self.backup_changes("FEATURES")
1276 if not len(self["CBUILD"]) and len(self["CHOST"]):
1277 self["CBUILD"] = self["CHOST"]
1278 self.backup_changes("CBUILD")
1283 def loadVirtuals(self,root):
1284 self.virtuals = self.getvirtuals(root)
1286 def load_best_module(self,property_string):
1287 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1289 mod = load_mod(best_mod)
1291 dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1301 def modifying(self):
1303 raise Exception, "Configuration is locked."
1305 def backup_changes(self,key=None):
1306 if key and self.configdict["env"].has_key(key):
1307 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1309 raise KeyError, "No such key defined in environment: %s" % key
1311 def reset(self,keeping_pkg=0,use_cache=1):
1312 "reset environment to original settings"
1313 for x in self.configlist[-1].keys():
1314 if x not in self.backupenv.keys():
1315 del self.configlist[-1][x]
1317 self.configdict["env"].update(self.backupenv)
1319 self.modifiedkeys = []
1322 self.configdict["pkg"].clear()
1323 self.regenerate(use_cache=use_cache)
1325 def load_infodir(self,infodir):
1326 if self.configdict.has_key("pkg"):
1327 for x in self.configdict["pkg"].keys():
1328 del self.configdict["pkg"][x]
1330 writemsg("No pkg setup for settings instance?\n")
1333 if os.path.exists(infodir):
1334 if os.path.exists(infodir+"/environment"):
1335 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1337 myre = re.compile('^[A-Z]+$')
1338 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1339 if myre.match(filename):
1341 mydata = string.strip(open(infodir+"/"+filename).read())
1342 if len(mydata)<2048:
1343 if filename == "USE":
1344 self.configdict["pkg"][filename] = "-* "+mydata
1346 self.configdict["pkg"][filename] = mydata
1347 except SystemExit, e:
1350 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename)
1355 def setcpv(self,mycpv,use_cache=1):
1358 cp = dep_getkey(mycpv)
1360 if self.pusedict.has_key(cp):
1361 self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1363 newpuse = string.join(self.pusedict[cp][self.pusekey])
1364 if newpuse == self.puse:
1367 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1368 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1369 self.reset(keeping_pkg=1,use_cache=use_cache)
1371 def setinst(self,mycpv,mydbapi):
1372 # Grab the virtuals this package provides and add them into the tree virtuals.
1373 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1374 if isinstance(mydbapi, portdbapi):
1377 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1378 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1380 cp = dep_getkey(mycpv)
1382 virt = dep_getkey(virt)
1383 if not self.treeVirtuals.has_key(virt):
1384 self.treeVirtuals[virt] = []
1385 # XXX: Is this bad? -- It's a permanent modification
1386 if cp not in self.treeVirtuals[virt]:
1387 self.treeVirtuals[virt].append(cp)
1389 self.virtuals = self.__getvirtuals_compile()
1392 def regenerate(self,useonly=0,use_cache=1):
1393 global usesplit,profiledir
1395 if self.already_in_regenerate:
1396 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1397 writemsg("!!! Looping in regenerate.\n",1)
1400 self.already_in_regenerate = 1
1403 myincrementals=["USE"]
1405 myincrementals=portage_const.INCREMENTALS
1406 for mykey in myincrementals:
1409 # XXX Global usage of db... Needs to go away somehow.
1410 if db.has_key(root) and db[root].has_key("vartree"):
1411 self.configdict["auto"]["USE"]=autouse(db[root]["vartree"],use_cache=use_cache)
1413 self.configdict["auto"]["USE"]=""
1415 mydbs=self.configlist[:-1]
1419 if not curdb.has_key(mykey):
1421 #variables are already expanded
1422 mysplit=curdb[mykey].split()
1426 # "-*" is a special "minus" var that means "unset all settings".
1427 # so USE="-* gnome" will have *just* gnome enabled.
1432 # Not legal. People assume too much. Complain.
1433 writemsg(red("USE flags should not start with a '+': %s\n" % x))
1437 if (x[1:] in myflags):
1439 del myflags[myflags.index(x[1:])]
1442 # We got here, so add it now.
1443 if x not in myflags:
1447 #store setting in last element of configlist, the original environment:
1448 self.configlist[-1][mykey]=string.join(myflags," ")
1451 #cache split-up USE var in a global
1454 for x in string.split(self.configlist[-1]["USE"]):
1455 if x not in self.usemask:
1458 if self.has_key("USE_EXPAND"):
1459 for var in string.split(self["USE_EXPAND"]):
1460 if self.has_key(var):
1461 for x in string.split(self[var]):
1462 mystr = string.lower(var)+"_"+x
1463 if mystr not in usesplit and mystr not in self.usemask:
1464 usesplit.append(mystr)
1466 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1467 if self.configdict["defaults"].has_key("ARCH"):
1468 if self.configdict["defaults"]["ARCH"]:
1469 if self.configdict["defaults"]["ARCH"] not in usesplit:
1470 usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1472 self.configlist[-1]["USE"]=string.join(usesplit," ")
1474 self.already_in_regenerate = 0
1476 def getvirtuals(self, myroot):
1478 return self.virtuals
1482 # This breaks catalyst/portage when setting to a fresh/empty root.
1483 # Virtuals cannot be calculated because there is nothing to work
1484 # from. So the only ROOT prefixed dir should be local configs.
1485 #myvirtdirs = prefix_array(self.profiles,myroot+"/")
1486 myvirtdirs = copy.deepcopy(self.profiles)
1487 while self.user_profile_dir in myvirtdirs:
1488 myvirtdirs.remove(self.user_profile_dir)
1492 # R1: Collapse profile virtuals
1493 # R2: Extract user-negatives.
1494 # R3: Collapse user-virtuals.
1495 # R4: Apply user negatives to all except user settings.
1497 # Order of preference:
1498 # 1. user-declared that are installed
1499 # 3. installed and in profile
1501 # 2. user-declared set
1504 self.dirVirtuals = [grabdict(os.path.join(x, "virtuals")) for x in myvirtdirs]
1505 self.dirVirtuals.reverse()
1507 if self.user_profile_dir and os.path.exists(self.user_profile_dir+"/virtuals"):
1508 self.userVirtuals = grabdict(self.user_profile_dir+"/virtuals")
1510 # Store all the negatives for later.
1511 for x in self.userVirtuals.keys():
1512 self.negVirtuals[x] = []
1513 for y in self.userVirtuals[x]:
1515 self.negVirtuals[x].append(y[:])
1517 # Collapse the user virtuals so that we don't deal with negatives.
1518 self.userVirtuals = stack_dictlist([self.userVirtuals],incremental=1)
1520 # Collapse all the profile virtuals including user negations.
1521 self.dirVirtuals = stack_dictlist([self.negVirtuals]+self.dirVirtuals,incremental=1)
1523 # Repoman does not use user or tree virtuals.
1524 if os.environ.get("PORTAGE_CALLER","") != "repoman":
1525 # XXX: vartree does not use virtuals, does user set matter?
1526 temp_vartree = vartree(myroot,self.dirVirtuals,categories=self.categories)
1527 # Reduce the provides into a list by CP.
1528 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
1530 return self.__getvirtuals_compile()
1532 def __getvirtuals_compile(self):
1533 """Actually generate the virtuals we have collected.
1534 The results are reversed so the list order is left to right.
1535 Given data is [Best,Better,Good] sets of [Good, Better, Best]"""
1537 # Virtuals by profile+tree preferences.
1539 # Virtuals by user+tree preferences.
1542 # If a user virtual is already installed, we preference it.
1543 for x in self.userVirtuals.keys():
1545 if self.treeVirtuals.has_key(x):
1546 for y in self.userVirtuals[x]:
1547 if y in self.treeVirtuals[x]:
1548 utVirtuals[x].append(y)
1549 #print "F:",utVirtuals
1550 #utVirtuals[x].reverse()
1551 #print "R:",utVirtuals
1553 # If a profile virtual is already installed, we preference it.
1554 for x in self.dirVirtuals.keys():
1556 if self.treeVirtuals.has_key(x):
1557 for y in self.dirVirtuals[x]:
1558 if y in self.treeVirtuals[x]:
1559 ptVirtuals[x].append(y)
1561 # UserInstalled, ProfileInstalled, Installed, User, Profile
1562 biglist = [utVirtuals, ptVirtuals, self.treeVirtuals,
1563 self.userVirtuals, self.dirVirtuals]
1565 # We reverse each dictlist so that the order matches everything
1566 # else in portage. [-*, a, b] [b, c, d] ==> [b, a]
1567 for dictlist in biglist:
1568 for key in dictlist:
1569 dictlist[key].reverse()
1571 # User settings and profile settings take precedence over tree.
1572 val = stack_dictlist(biglist,incremental=1)
1576 def __delitem__(self,mykey):
1577 for x in self.lookuplist:
1582 def __getitem__(self,mykey):
1584 for x in self.lookuplist:
1586 writemsg("!!! lookuplist is null.\n")
1587 elif x.has_key(mykey):
1591 if 0 and match and mykey in ["PORTAGE_BINHOST"]:
1592 # These require HTTP Encoding
1595 if urllib.unquote(match) != match:
1596 writemsg("Note: %s already contains escape codes." % (mykey))
1598 match = urllib.quote(match)
1599 except SystemExit, e:
1602 writemsg("Failed to fix %s using urllib, attempting to continue.\n" % (mykey))
1605 elif mykey == "CONFIG_PROTECT_MASK":
1606 match += " /etc/env.d"
1610 def has_key(self,mykey):
1611 for x in self.lookuplist:
1612 if x.has_key(mykey):
1618 for x in self.lookuplist:
1624 def __setitem__(self,mykey,myvalue):
1625 "set a value; will be thrown away at reset() time"
1626 if type(myvalue) != types.StringType:
1627 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
1629 self.modifiedkeys += [mykey]
1630 self.configdict["env"][mykey]=myvalue
1633 "return our locally-maintained environment"
1635 for x in self.keys():
1637 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
1638 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
1639 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
1644 # XXX This would be to replace getstatusoutput completely.
1645 # XXX Issue: cannot block execution. Deadlock condition.
1646 def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
1647 """spawn a subprocess with optional sandbox protection,
1648 depending on whether sandbox is enabled. The "free" argument,
1649 when set to 1, will disable sandboxing. This allows us to
1650 spawn processes that are supposed to modify files outside of the
1651 sandbox. We can't use os.system anymore because it messes up
1652 signal handling. Using spawn allows our Portage signal handler
1655 if type(mysettings) == types.DictType:
1657 keywords["opt_name"]="[ %s ]" % "portage"
1659 check_config_instance(mysettings)
1660 env=mysettings.environ()
1661 keywords["opt_name"]="[%s]" % mysettings["PF"]
1663 # XXX: Negative RESTRICT word
1664 droppriv=(droppriv and ("userpriv" in features) and not \
1665 (("nouserpriv" in string.split(mysettings["RESTRICT"])) or \
1666 ("userpriv" in string.split(mysettings["RESTRICT"]))))
1668 if droppriv and not uid and portage_gid and portage_uid:
1669 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
1672 free=((droppriv and "usersandbox" not in features) or \
1673 (not droppriv and "sandbox" not in features and "usersandbox" not in features))
1676 keywords["opt_name"] += " sandbox"
1677 return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
1679 keywords["opt_name"] += " bash"
1680 return portage_exec.spawn_bash(mystring,env=env,**keywords)
1684 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
1685 "fetch files. Will use digest file if available."
1687 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
1688 if ("mirror" in mysettings["RESTRICT"].split()) or \
1689 ("nomirror" in mysettings["RESTRICT"].split()):
1690 if ("mirror" in features) and ("lmirror" not in features):
1691 # lmirror should allow you to bypass mirror restrictions.
1692 # XXX: This is not a good thing, and is temporary at best.
1693 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
1696 global thirdpartymirrors
1698 check_config_instance(mysettings)
1700 custommirrors=grabdict(CUSTOM_MIRRORS_FILE,recursive=1)
1704 if listonly or ("distlocks" not in features):
1708 if "skiprocheck" in features:
1711 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
1713 writemsg(red("!!! You are fetching to a read-only filesystem, you should turn locking off"));
1714 writemsg("!!! This can be done by adding -distlocks to FEATURES in /etc/make.conf");
1717 # local mirrors are always added
1718 if custommirrors.has_key("local"):
1719 mymirrors += custommirrors["local"]
1721 if ("nomirror" in mysettings["RESTRICT"].split()) or \
1722 ("mirror" in mysettings["RESTRICT"].split()):
1723 # We don't add any mirrors.
1727 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
1730 digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1731 if os.path.exists(digestfn):
1732 mydigests = digestParseFile(digestfn)
1735 for x in range(len(mymirrors)-1,-1,-1):
1736 if mymirrors[x] and mymirrors[x][0]=='/':
1737 fsmirrors += [mymirrors[x]]
1740 for myuri in myuris:
1741 myfile=os.path.basename(myuri)
1743 destdir = mysettings["DISTDIR"]+"/"
1744 if not os.path.exists(destdir+myfile):
1745 for mydir in fsmirrors:
1746 if os.path.exists(mydir+"/"+myfile):
1747 writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
1748 shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
1750 except (OSError,IOError),e:
1751 # file does not exist
1752 writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
1755 if "fetch" in mysettings["RESTRICT"].split():
1756 # fetch is restricted. Ensure all files have already been downloaded; otherwise,
1757 # print message and exit.
1759 for myuri in myuris:
1760 myfile=os.path.basename(myuri)
1762 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1763 except (OSError,IOError),e:
1764 # file does not exist
1765 writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
1769 print "!!!",mysettings["CATEGORY"]+"/"+mysettings["PF"],"has fetch restriction turned on."
1770 print "!!! This probably means that this ebuild's files must be downloaded"
1771 print "!!! manually. See the comments in the ebuild for more information."
1773 spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
1776 locations=mymirrors[:]
1778 primaryuri_indexes={}
1779 for myuri in myuris:
1780 myfile=os.path.basename(myuri)
1781 if not filedict.has_key(myfile):
1783 for y in range(0,len(locations)):
1784 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
1785 if myuri[:9]=="mirror://":
1786 eidx = myuri.find("/", 9)
1788 mirrorname = myuri[9:eidx]
1790 # Try user-defined mirrors first
1791 if custommirrors.has_key(mirrorname):
1792 for cmirr in custommirrors[mirrorname]:
1793 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
1794 # remove the mirrors we tried from the list of official mirrors
1795 if cmirr.strip() in thirdpartymirrors[mirrorname]:
1796 thirdpartymirrors[mirrorname].remove(cmirr)
1797 # now try the official mirrors
1798 if thirdpartymirrors.has_key(mirrorname):
1800 shuffle(thirdpartymirrors[mirrorname])
1801 except SystemExit, e:
1804 writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"))
1805 writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n")
1806 writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n")
1807 writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n")
1808 writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n")
1811 for locmirr in thirdpartymirrors[mirrorname]:
1812 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
1814 if not filedict[myfile]:
1815 writemsg("No known mirror by the name: %s\n" % (mirrorname))
1817 writemsg("Invalid mirror definition in SRC_URI:\n")
1818 writemsg(" %s\n" % (myuri))
1820 if "primaryuri" in mysettings["RESTRICT"].split():
1821 # Use the source site first.
1822 if primaryuri_indexes.has_key(myfile):
1823 primaryuri_indexes[myfile] += 1
1825 primaryuri_indexes[myfile] = 0
1826 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
1828 filedict[myfile].append(myuri)
1830 missingSourceHost = False
1831 for myfile in filedict.keys(): # Gives a list, not just the first one
1832 if not filedict[myfile]:
1833 writemsg("Warning: No mirrors available for file '%s'\n" % (myfile))
1834 missingSourceHost = True
1835 if missingSourceHost:
1837 del missingSourceHost
1840 if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
1842 print "!!! No write access to %s" % mysettings["DISTDIR"]+"/"
1845 def distdir_perms(filename):
1847 portage_util.apply_permissions(filename, gid=portage_gid, mode=0775)
1849 if oe.errno == errno.EPERM:
1850 writemsg("!!! Unable to apply group permissions to '%s'. Non-root users may experience issues.\n"
1854 distdir_perms(mysettings["DISTDIR"])
1855 if use_locks and locks_in_subdir:
1856 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
1858 distdir_perms(distlocks_subdir)
1860 if oe.errno == errno.ENOENT:
1861 os.mkdir(distlocks_subdir)
1862 distdir_perms(distlocks_subdir)
1865 if not os.access(distlocks_subdir, os.W_OK):
1866 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir)
1868 del distlocks_subdir
1871 for myfile in filedict.keys():
1877 if use_locks and can_fetch:
1879 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
1881 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
1883 for loc in filedict[myfile]:
1887 # allow different fetchcommands per protocol
1888 protocol = loc[0:loc.find("://")]
1889 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
1890 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
1892 fetchcommand=mysettings["FETCHCOMMAND"]
1893 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
1894 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
1896 resumecommand=mysettings["RESUMECOMMAND"]
1898 fetchcommand=string.replace(fetchcommand,"${DISTDIR}",mysettings["DISTDIR"])
1899 resumecommand=string.replace(resumecommand,"${DISTDIR}",mysettings["DISTDIR"])
1902 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1903 if mydigests.has_key(myfile):
1904 #if we have the digest file, we know the final size and can resume the download.
1905 if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
1908 #we already have it downloaded, skip.
1909 #if our file is bigger than the recorded size, digestcheck should catch it.
1913 # Verify checksums at each fetch for fetchonly.
1914 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
1917 writemsg("!!! Previously fetched file: "+str(myfile)+"\n")
1918 writemsg("!!! Reason: "+reason[0]+"\n")
1919 writemsg("!!! Got: %s\n!!! Expected: %s\n" % (reason[0], reason[1]))
1920 writemsg("Refetching...\n\n")
1921 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1924 for x_key in mydigests[myfile].keys():
1925 writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n")
1927 break #No need to keep looking for this file, we have it!
1929 #we don't have the digest file, but the file exists. Assume it is fully downloaded.
1931 except (OSError,IOError),e:
1932 writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),1)
1938 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile)
1940 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile)
1945 # check if we can actually write to the directory/existing file.
1946 if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
1947 os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK) and not fetch_to_ro:
1948 writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile))
1952 #we either need to resume or start the download
1953 #you can't use "continue" when you're inside a "try" block
1956 writemsg(">>> Resuming download...\n")
1957 locfetch=resumecommand
1960 locfetch=fetchcommand
1961 writemsg(">>> Downloading "+str(loc)+"\n")
1962 myfetch=string.replace(locfetch,"${URI}",loc)
1963 myfetch=string.replace(myfetch,"${FILE}",myfile)
1966 con=selinux.getcontext()
1967 con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_FETCH_T"])
1968 selinux.setexec(con)
1969 myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
1970 selinux.setexec(None)
1972 myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
1974 #if root, -always- set the perms.
1975 if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0) \
1976 and os.access(mysettings["DISTDIR"]+"/",os.W_OK):
1977 if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
1979 os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
1980 except SystemExit, e:
1983 portage_util.writemsg("chown failed on distfile: " + str(myfile))
1984 os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
1986 if mydigests!=None and mydigests.has_key(myfile):
1988 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1989 # no exception? file exists. let digestcheck() report
1990 # an appropriately for size or checksum errors
1991 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
1992 # Fetch failed... Try the next one... Kill 404 files though.
1993 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
1994 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1996 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
1998 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1999 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
2000 except SystemExit, e:
2004 except SystemExit, e:
2013 # File is the correct size--check the checksums for the fetched
2014 # file NOW, for those users who don't have a stable/continuous
2015 # net connection. This way we have a chance to try to download
2016 # from another mirror...
2017 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2020 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n")
2021 writemsg("!!! Reason: "+reason[0]+"\n")
2022 writemsg("!!! Got: %s\n!!! Expected: %s\n" % (reason[0], reason[1]))
2023 writemsg("Removing corrupt distfile...\n")
2024 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2027 for x_key in mydigests[myfile].keys():
2028 writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n")
2031 except (OSError,IOError),e:
2032 writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),1)
2038 elif mydigests!=None:
2039 writemsg("No digest file available and download failed.\n\n")
2041 if use_locks and file_lock:
2042 portage_locks.unlockfile(file_lock)
2046 if (fetched!=2) and not listonly:
2047 writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n")
2052 def digestCreate(myfiles,basedir,oldDigest={}):
2053 """Takes a list of files and the directory they are in and returns the
2054 dict of dict[filename][CHECKSUM_KEY] = hash
2055 returns None on error."""
2059 myfile=os.path.normpath(basedir+"///"+x)
2060 if os.path.exists(myfile):
2061 if not os.access(myfile, os.R_OK):
2062 print "!!! Given file does not appear to be readable. Does it exist?"
2063 print "!!! File:",myfile
2065 mydigests[x] = portage_checksum.perform_multiple_checksums(myfile, hashes=portage_const.MANIFEST1_HASH_FUNCTIONS)
2066 mysize = os.stat(myfile)[stat.ST_SIZE]
2069 # DeepCopy because we might not have a unique reference.
2070 mydigests[x] = copy.deepcopy(oldDigest[x])
2071 mysize = copy.deepcopy(oldDigest[x]["size"])
2073 print "!!! We have a source URI, but no file..."
2074 print "!!! File:",myfile
2077 if mydigests[x].has_key("size") and (mydigests[x]["size"] != mysize):
2078 raise portage_exception.DigestException, "Size mismatch during checksums"
2079 mydigests[x]["size"] = copy.deepcopy(mysize)
2082 def digestCreateLines(filelist, mydict):
2084 mydigests = copy.deepcopy(mydict)
2085 for myarchive in filelist:
2086 mysize = mydigests[myarchive]["size"]
2087 if len(mydigests[myarchive]) == 0:
2088 raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
2089 for sumName in mydigests[myarchive].keys():
2090 if sumName not in portage_checksum.get_valid_checksum_keys():
2092 mysum = mydigests[myarchive][sumName]
2096 myline += " "+myarchive
2097 myline += " "+str(mysize)
2098 mylines.append(myline)
2101 def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0):
2102 """generates digest file if missing. Assumes all files are available. If
2103 overwrite=0, the digest will only be created if it doesn't already exist."""
2106 basedir=mysettings["DISTDIR"]+"/"
2107 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2109 # portage files -- p(ortagefiles)basedir
2110 pbasedir=mysettings["O"]+"/"
2111 manifestfn=pbasedir+"Manifest"
2113 if not manifestonly:
2114 if not os.path.isdir(mysettings["FILESDIR"]):
2115 os.makedirs(mysettings["FILESDIR"])
2116 mycvstree=cvstree.getentries(pbasedir, recursive=1)
2118 if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
2119 if not cvstree.isadded(mycvstree,"files"):
2120 if "autoaddcvs" in features:
2121 print ">>> Auto-adding files/ dir to CVS..."
2122 spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
2124 print "--- Warning: files/ is not added to cvs."
2126 if (not overwrite) and os.path.exists(digestfn):
2129 print green(">>> Generating the digest file...")
2131 # Track the old digest so we can assume checksums without requiring
2132 # all files to be downloaded. 'Assuming'
2134 if os.path.exists(digestfn):
2135 myolddigest = digestParseFile(digestfn)
2139 mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
2140 except portage_exception.DigestException, s:
2143 if mydigests==None: # There was a problem, exit with an errorcode.
2147 outfile=open(digestfn, "w+")
2148 except SystemExit, e:
2150 except Exception, e:
2151 print "!!! Filesystem error skipping generation. (Read-Only?)"
2154 for x in digestCreateLines(myarchives, mydigests):
2155 outfile.write(x+"\n")
2158 os.chown(digestfn,os.getuid(),portage_gid)
2159 os.chmod(digestfn,0664)
2160 except SystemExit, e:
2165 print green(">>> Generating the manifest file...")
2166 mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
2167 mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
2169 for x in ["Manifest"]:
2173 mydigests=digestCreate(mypfiles, pbasedir)
2174 if mydigests==None: # There was a problem, exit with an errorcode.
2178 outfile=open(manifestfn, "w+")
2179 except SystemExit, e:
2181 except Exception, e:
2182 print "!!! Filesystem error skipping generation. (Read-Only?)"
2185 for x in digestCreateLines(mypfiles, mydigests):
2186 outfile.write(x+"\n")
2189 os.chown(manifestfn,os.getuid(),portage_gid)
2190 os.chmod(manifestfn,0664)
2191 except SystemExit, e:
2196 if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
2197 mycvstree=cvstree.getentries(pbasedir, recursive=1)
2199 if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
2200 if digestfn[:len(pbasedir)]==pbasedir:
2201 myunaddedfiles=digestfn[len(pbasedir):]+" "
2203 myunaddedfiles=digestfn+" "
2204 if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
2205 if manifestfn[:len(pbasedir)]==pbasedir:
2206 myunaddedfiles+=manifestfn[len(pbasedir):]+" "
2208 myunaddedfiles+=manifestfn
2210 if "autoaddcvs" in features:
2211 print blue(">>> Auto-adding digest file(s) to CVS...")
2212 spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
2214 print "--- Warning: digests are not yet added into CVS."
2215 print darkgreen(">>> Computed message digests.")
2220 def digestParseFile(myfilename):
2221 """(filename) -- Parses a given file for entries matching:
2222 <checksumkey> <checksum_hex_string> <filename> <filesize>
2223 Ignores lines that don't start with a valid checksum identifier
2224 and returns a dict with the filenames as keys and {checksumkey:checksum}
2227 if not os.path.exists(myfilename):
2229 mylines = portage_util.grabfile(myfilename, compat_level=1)
2233 myline=string.split(x)
2237 if myline[0] not in portage_checksum.get_valid_checksum_keys():
2239 mykey = myline.pop(0)
2240 myhash = myline.pop(0)
2241 mysize = long(myline.pop())
2242 myfn = string.join(myline, " ")
2243 if myfn not in mydigests:
2244 mydigests[myfn] = {}
2245 mydigests[myfn][mykey] = myhash
2246 if "size" in mydigests[myfn]:
2247 if mydigests[myfn]["size"] != mysize:
2248 raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
2250 mydigests[myfn]["size"] = mysize
2253 # XXXX strict was added here to fix a missing name error.
2254 # XXXX It's used below, but we're not paying attention to how we get it?
2255 def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0):
2256 """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
2257 of their digests and checks the digests against the indicated files in
2258 the basedir given. Returns 1 only if all files exist and match the checksums.
2261 if not mydigests.has_key(x):
2263 print red("!!! No message digest entry found for file \""+x+".\"")
2264 print "!!! Most likely a temporary problem. Try 'emerge sync' again later."
2265 print "!!! If you are certain of the authenticity of the file then you may type"
2266 print "!!! the following to generate a new digest:"
2267 print "!!! ebuild /usr/portage/category/package/package-version.ebuild digest"
2269 myfile=os.path.normpath(basedir+"/"+x)
2270 if not os.path.exists(myfile):
2272 print "!!! File does not exist:",myfile
2276 ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
2279 print red("!!! Digest verification Failed:")
2280 print red("!!!")+" "+str(os.path.realpath(myfile))
2281 print red("!!! Reason: ")+reason[0]
2282 print red("!!! Got: ")+str(reason[1])
2283 print red("!!! Expected: ")+str(reason[2])
2287 writemsg_stdout(">>> checksums "+note+" ;-) %s\n" % x)
2291 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2292 """Verifies checksums. Assumes all files have been downloaded."""
2294 basedir=mysettings["DISTDIR"]+"/"
2295 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2297 # portage files -- p(ortagefiles)basedir
2298 pbasedir=mysettings["O"]+"/"
2299 manifestfn=pbasedir+"Manifest"
2301 if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
2302 if "digest" in features:
2303 print ">>> No package digest/Manifest file found."
2304 print ">>> \"digest\" mode enabled; auto-generating new digest..."
2305 return digestgen(myfiles,mysettings)
2307 if not os.path.exists(manifestfn):
2309 print red("!!! No package manifest found:"),manifestfn
2312 print "--- No package manifest found:",manifestfn
2313 if not os.path.exists(digestfn):
2314 print "!!! No package digest file found:",digestfn
2315 print "!!! Type \"ebuild foo.ebuild digest\" to generate it."
2318 mydigests=digestParseFile(digestfn)
2320 print "!!! Failed to parse digest file:",digestfn
2322 mymdigests=digestParseFile(manifestfn)
2323 if "strict" not in features:
2324 # XXX: Remove this when manifests become mainstream.
2326 elif mymdigests==None:
2327 print "!!! Failed to parse manifest file:",manifestfn
2331 # Check the portage-related files here.
2332 mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
2333 manifest_files = mymdigests.keys()
2334 # Files unrelated to the build process are ignored for verification by default
2335 for x in ["Manifest", "ChangeLog", "metadata.xml"]:
2336 while x in mymfiles:
2338 while x in manifest_files:
2339 manifest_files.remove(x)
2340 for x in range(len(mymfiles)-1,-1,-1):
2341 if mymfiles[x] in manifest_files:
2342 manifest_files.remove(mymfiles[x])
2343 elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
2344 # we filter here, rather then above; manifest might have files flagged by the filter.
2345 # if something is returned, then it's flagged as a bad file
2346 # manifest doesn't know about it, so we kill it here.
2349 print red("!!! Security Violation: A file exists that is not in the manifest.")
2350 print "!!! File:",mymfiles[x]
2353 if manifest_files and strict:
2354 print red("!!! Files listed in the manifest do not exist!")
2355 for x in manifest_files:
2359 if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict):
2361 print ">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and"
2362 print ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")
2366 print "--- Manifest check failed. 'strict' not enabled; ignoring."
2372 # Just return the status, as it's the last check.
2373 return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict)
2375 # parse actionmap to spawn ebuild with the appropriate args
2376 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2377 if alwaysdep or ("noauto" not in features):
2378 # process dependency first
2379 if "dep" in actionmap[mydo].keys():
2380 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2384 mycommand = EBUILD_SH_BINARY + " "
2385 if selinux_enabled and ("sesandbox" in features) and (mydo in ["unpack","compile","test","install"]):
2386 con=selinux.getcontext()
2387 con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_SANDBOX_T"])
2388 selinux.setexec(con)
2389 retval=spawn(mycommand + mydo,mysettings,debug=debug,
2390 free=actionmap[mydo]["args"][0],
2391 droppriv=actionmap[mydo]["args"][1],logfile=logfile)
2392 selinux.setexec(None)
2394 retval=spawn(mycommand + mydo,mysettings, debug=debug,
2395 free=actionmap[mydo]["args"][0],
2396 droppriv=actionmap[mydo]["args"][1],logfile=logfile)
2399 # chunked out deps for each phase, so that ebuild binary can use it
2400 # to collapse targets down.
2404 "unpack": ["setup"],
2405 "compile":["unpack"],
2406 "test": ["compile"],
2409 "package":["install"],
2413 def eapi_is_supported(eapi):
2414 return str(eapi).strip() == str(portage_const.EAPI).strip()
2417 def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree=None):
2418 global db, actionmap_deps
2421 dump_traceback("Warning: tree not specified to doebuild")
2424 ebuild_path = os.path.abspath(myebuild)
2425 pkg_dir = os.path.dirname(ebuild_path)
2427 if mysettings.configdict["pkg"].has_key("CATEGORY"):
2428 cat = mysettings.configdict["pkg"]["CATEGORY"]
2430 cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
2431 mypv = os.path.basename(ebuild_path)[:-7]
2432 mycpv = cat+"/"+mypv
2434 mysplit=pkgsplit(mypv,silent=0)
2436 writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
2439 if mydo != "depend":
2440 # XXX: We're doing a little hack here to curtain the gvisible locking
2441 # XXX: that creates a deadlock... Really need to isolate that.
2442 mysettings.reset(use_cache=use_cache)
2443 mysettings.setcpv(mycpv,use_cache=use_cache)
2445 validcommands = ["help","clean","prerm","postrm","preinst","postinst",
2446 "config","setup","depend","fetch","digest",
2447 "unpack","compile","test","install","rpm","qmerge","merge",
2448 "package","unmerge", "manifest"]
2450 if mydo not in validcommands:
2451 validcommands.sort()
2452 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo)
2453 for vcount in range(len(validcommands)):
2456 writemsg(string.ljust(validcommands[vcount], 11))
2460 if not os.path.exists(myebuild):
2461 writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
2464 if debug: # Otherwise it overrides emerge's settings.
2465 # We have no other way to set debug... debug can't be passed in
2466 # due to how it's coded... Don't overwrite this so we can use it.
2467 mysettings["PORTAGE_DEBUG"]=str(debug)
2469 mysettings["ROOT"] = myroot
2470 mysettings["STARTDIR"] = getcwd()
2472 mysettings["EBUILD"] = ebuild_path
2473 mysettings["O"] = pkg_dir
2474 mysettings["CATEGORY"] = cat
2475 mysettings["FILESDIR"] = pkg_dir+"/files"
2476 mysettings["PF"] = mypv
2478 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
2479 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2481 mysettings["PROFILE_PATHS"] = string.join(mysettings.profiles,"\n")+"\n"+CUSTOM_PROFILE_PATH
2482 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
2483 mysettings["PN"] = mysplit[0]
2484 mysettings["PV"] = mysplit[1]
2485 mysettings["PR"] = mysplit[2]
2487 if portage_util.noiselimit < 0:
2488 mysettings["PORTAGE_QUIET"] = "1"
2490 if mydo != "depend":
2492 mysettings["INHERITED"], mysettings["RESTRICT"] = db[root][tree].dbapi.aux_get( \
2493 mycpv,["INHERITED","RESTRICT"])
2494 mysettings["PORTAGE_RESTRICT"]=string.join(flatten(portage_dep.use_reduce(portage_dep.paren_reduce( \
2495 mysettings["RESTRICT"]), uselist=mysettings["USE"].split())),' ')
2496 except SystemExit, e:
2500 eapi = db[root][tree].dbapi.aux_get(mycpv, ["EAPI"])[0]
2501 if not eapi_is_supported(eapi):
2502 # can't do anything with this.
2503 raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2505 if mysplit[2] == "r0":
2506 mysettings["PVR"]=mysplit[1]
2508 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
2510 mysettings["SLOT"]=""
2512 if mysettings.has_key("PATH"):
2513 mysplit=string.split(mysettings["PATH"],":")
2516 if PORTAGE_BIN_PATH not in mysplit:
2517 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
2520 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
2521 mysettings["HOME"] = mysettings["BUILD_PREFIX"]+"/homedir"
2522 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/portage-pkg"
2523 mysettings["PORTAGE_BUILDDIR"] = mysettings["BUILD_PREFIX"]+"/"+mysettings["PF"]
2525 mysettings["PORTAGE_BASHRC"] = EBUILD_SH_ENV_FILE
2527 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
2528 if (mydo!="depend") or not mysettings.has_key("KV"):
2529 mykv,err1=ExtractKernelVersion(root+"usr/src/linux")
2531 # Regular source tree
2532 mysettings["KV"]=mykv
2536 if (mydo!="depend") or not mysettings.has_key("KVERS"):
2538 mysettings["KVERS"]=myso[1]
2541 # get possible slot information from the deps file
2543 if mysettings.has_key("PORTAGE_DEBUG") and mysettings["PORTAGE_DEBUG"]=="1":
2544 # XXX: This needs to use a FD for saving the output into a file.
2545 # XXX: Set this up through spawn
2547 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
2549 mysettings["dbkey"] = dbkey
2551 mysettings["dbkey"] = mysettings.depcachedir+"/aux_db_key_temp"
2553 retval = spawn(EBUILD_SH_BINARY+" depend",mysettings)
2557 # Build directory creation isn't required for any of these.
2558 if mydo not in ["fetch","digest","manifest"]:
2560 if not os.path.exists(mysettings["BUILD_PREFIX"]):
2561 os.makedirs(mysettings["BUILD_PREFIX"])
2562 if (os.getuid() == 0):
2563 os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
2564 os.chmod(mysettings["BUILD_PREFIX"],00775)
2566 # Should be ok again to set $T, as sandbox does not depend on it
2567 # XXX Bug. no way in hell this is valid for clean handling.
2568 mysettings["T"]=mysettings["PORTAGE_BUILDDIR"]+"/temp"
2569 if cleanup or mydo=="clean":
2570 if os.path.exists(mysettings["T"]):
2571 shutil.rmtree(mysettings["T"])
2572 if not os.path.exists(mysettings["T"]):
2573 os.makedirs(mysettings["T"])
2574 if (os.getuid() == 0):
2575 os.chown(mysettings["T"],portage_uid,portage_gid)
2576 os.chmod(mysettings["T"],02770)
2578 logdir = mysettings["T"]+"/logging"
2579 if not os.path.exists(logdir):
2582 os.chown(logdir, portage_uid, portage_gid)
2583 os.chmod(logdir, 0770)
2585 try: # XXX: negative RESTRICT
2586 if not (("nouserpriv" in string.split(mysettings["PORTAGE_RESTRICT"])) or \
2587 ("userpriv" in string.split(mysettings["PORTAGE_RESTRICT"]))):
2588 if ("userpriv" in features) and (portage_uid and portage_gid):
2590 if os.path.exists(mysettings["HOME"]):
2591 # XXX: Potentially bad, but held down by HOME replacement above.
2592 spawn("rm -Rf "+mysettings["HOME"],mysettings, free=1)
2593 if not os.path.exists(mysettings["HOME"]):
2594 os.makedirs(mysettings["HOME"])
2595 elif ("userpriv" in features):
2596 print "!!! Disabling userpriv from features... Portage UID/GID not valid."
2597 del features[features.index("userpriv")]
2598 except SystemExit, e:
2600 except Exception, e:
2601 print "!!! Couldn't empty HOME:",mysettings["HOME"]
2605 # no reason to check for depend since depend returns above.
2606 if not os.path.exists(mysettings["BUILD_PREFIX"]):
2607 os.makedirs(mysettings["BUILD_PREFIX"])
2608 if (os.getuid() == 0):
2609 os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
2610 if not os.path.exists(mysettings["PORTAGE_BUILDDIR"]):
2611 os.makedirs(mysettings["PORTAGE_BUILDDIR"])
2612 if (os.getuid() == 0):
2613 os.chown(mysettings["PORTAGE_BUILDDIR"],portage_uid,portage_gid)
2615 print "!!! File system problem. (ReadOnly? Out of space?)"
2616 print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
2621 if not os.path.exists(mysettings["HOME"]):
2622 os.makedirs(mysettings["HOME"])
2623 if (os.getuid() == 0):
2624 os.chown(mysettings["HOME"],portage_uid,portage_gid)
2625 os.chmod(mysettings["HOME"],02770)
2627 print "!!! File system problem. (ReadOnly? Out of space?)"
2628 print "!!! Failed to create fake home directory in PORTAGE_BUILDDIR"
2633 if ("ccache" in features):
2634 if (not mysettings.has_key("CCACHE_DIR")) or (mysettings["CCACHE_DIR"]==""):
2635 mysettings["CCACHE_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/ccache"
2636 if not os.path.exists(mysettings["CCACHE_DIR"]):
2637 os.makedirs(mysettings["CCACHE_DIR"])
2638 mystat = os.stat(mysettings["CCACHE_DIR"])
2639 if ("userpriv" in features):
2640 if mystat[stat.ST_UID] != portage_uid or ((mystat[stat.ST_MODE]&02070)!=02070):
2641 writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
2642 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2643 spawn("chown "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2644 spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
2645 spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+xs \{\} \;", mysettings, free=1)
2647 if mystat[stat.ST_UID] != 0 or ((mystat[stat.ST_MODE]&02070)!=02070):
2648 writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
2649 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2650 spawn("chown 0:"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2651 spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
2652 spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+xs \{\} \;", mysettings, free=1)
2654 print "!!! File system problem. (ReadOnly? Out of space?)"
2655 print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
2659 if "confcache" in features:
2660 if not mysettings.has_key("CONFCACHE_DIR"):
2661 mysettings["CONFCACHE_DIR"] = os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache")
2662 if not os.path.exists(mysettings["CONFCACHE_DIR"]):
2663 if not os.getuid() == 0:
2665 features.remove("confcache")
2666 mysettings["FEATURES"] = " ".join(features)
2668 os.makedirs(mysettings["CONFCACHE_DIR"], mode=0775)
2669 os.chown(mysettings["CONFCACHE_DIR"], -1, portage_gid)
2671 st = os.stat(mysettings["CONFCACHE_DIR"])
2672 if not (st.st_mode & 07777) == 0775:
2673 os.chmod(mysettings["CONFCACHE_DIR"], 0775)
2674 if not st.st_gid == portage_gid:
2675 os.chown(mysettings["CONFCACHE_DIR"], -1, portage_gid)
2677 # check again, since it may have been disabled.
2678 if "confcache" in features:
2679 for x in listdir(mysettings["CONFCACHE_DIR"]):
2680 p = os.path.join(mysettings["CONFCACHE_DIR"], x)
2682 if not (st.st_mode & 07777) & 07660 == 0660:
2683 os.chmod(p, (st.st_mode & 0777) | 0660)
2684 if not st.st_gid == portage_gid:
2685 os.chown(p, -1, portage_gid)
2688 print "!!! Failed resetting perms on confcachedir %s" % mysettings["CONFCACHE_DIR"]
2691 # mystat=os.stat(mysettings["CCACHE_DIR"])
2692 # if (mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02070)!=02070):
2693 # print "*** Adjusting ccache permissions for portage user..."
2694 # os.chown(mysettings["CCACHE_DIR"],portage_uid,portage_gid)
2695 # os.chmod(mysettings["CCACHE_DIR"],02770)
2696 # spawn("chown -R "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"],mysettings, free=1)
2697 # spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"],mysettings, free=1)
2698 #except SystemExit, e:
2703 if "distcc" in features:
2705 if (not mysettings.has_key("DISTCC_DIR")) or (mysettings["DISTCC_DIR"]==""):
2706 mysettings["DISTCC_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/portage/.distcc"
2707 if not os.path.exists(mysettings["DISTCC_DIR"]):
2708 os.makedirs(mysettings["DISTCC_DIR"])
2709 os.chown(mysettings["DISTCC_DIR"],portage_uid,portage_gid)
2710 os.chmod(mysettings["DISTCC_DIR"],02775)
2711 for x in ("/lock", "/state"):
2712 if not os.path.exists(mysettings["DISTCC_DIR"]+x):
2713 os.mkdir(mysettings["DISTCC_DIR"]+x)
2714 os.chown(mysettings["DISTCC_DIR"]+x,portage_uid,portage_gid)
2715 os.chmod(mysettings["DISTCC_DIR"]+x,02775)
2717 writemsg("\n!!! File system problem when setting DISTCC_DIR directory permissions.\n")
2718 writemsg( "!!! DISTCC_DIR="+str(mysettings["DISTCC_DIR"]+"\n"))
2719 writemsg( "!!! "+str(e)+"\n\n")
2721 features.remove("distcc")
2722 mysettings["DISTCC_DIR"]=""
2724 mysettings["WORKDIR"]=mysettings["PORTAGE_BUILDDIR"]+"/work"
2725 mysettings["D"]=mysettings["PORTAGE_BUILDDIR"]+"/image/"
2727 if mysettings.has_key("PORT_LOGDIR"):
2728 if not os.access(mysettings["PORT_LOGDIR"],os.F_OK):
2730 os.mkdir(mysettings["PORT_LOGDIR"])
2732 print "!!! Unable to create PORT_LOGDIR"
2734 if os.access(mysettings["PORT_LOGDIR"]+"/",os.W_OK):
2736 perms = os.stat(mysettings["PORT_LOGDIR"])
2737 if perms[stat.ST_UID] != portage_uid or perms[stat.ST_GID] != portage_gid:
2738 os.chown(mysettings["PORT_LOGDIR"],portage_uid,portage_gid)
2739 if stat.S_IMODE(perms[stat.ST_MODE]) != 02770:
2740 os.chmod(mysettings["PORT_LOGDIR"],02770)
2741 if not mysettings.has_key("LOG_PF") or (mysettings["LOG_PF"] != mysettings["PF"]):
2742 mysettings["LOG_PF"]=mysettings["PF"]
2743 mysettings["LOG_COUNTER"]=str(db[myroot]["vartree"].dbapi.get_counter_tick_core("/"))
2744 logfile="%s/%s-%s.log" % (mysettings["PORT_LOGDIR"],mysettings["LOG_COUNTER"],mysettings["LOG_PF"])
2746 mysettings["PORT_LOGDIR"]=""
2747 print "!!! Unable to chown/chmod PORT_LOGDIR. Disabling logging."
2750 print "!!! Cannot create log... No write access / Does not exist"
2751 print "!!! PORT_LOGDIR:",mysettings["PORT_LOGDIR"]
2752 mysettings["PORT_LOGDIR"]=""
2755 return unmerge(mysettings["CATEGORY"],mysettings["PF"],myroot,mysettings)
2757 # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
2760 if mydo in ["help","clean","setup"]:
2761 return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
2762 elif mydo in ["prerm","postrm","preinst","postinst","config"]:
2763 mysettings.load_infodir(pkg_dir)
2764 return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
2767 mysettings["SLOT"],mysettings["RESTRICT"] = db["/"]["porttree"].dbapi.aux_get(mycpv,["SLOT","RESTRICT"])
2768 except (IOError,KeyError):
2769 print red("doebuild():")+" aux_get() error reading "+mycpv+"; aborting."
2772 newuris, alist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings)
2773 alluris, aalist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings,all=1)
2774 mysettings["A"]=string.join(alist," ")
2775 mysettings["AA"]=string.join(aalist," ")
2776 if ("mirror" in features) or fetchall:
2779 elif mydo=="digest":
2782 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2783 if os.path.exists(digestfn):
2784 mydigests=digestParseFile(digestfn)
2788 i = checkme.index(x)
2796 if not os.path.exists(mysettings["DISTDIR"]):
2797 os.makedirs(mysettings["DISTDIR"])
2798 if not os.path.exists(mysettings["DISTDIR"]+"/cvs-src"):
2799 os.makedirs(mysettings["DISTDIR"]+"/cvs-src")
2801 print "!!! File system problem. (Bad Symlink?)"
2802 print "!!! Fetching may fail:",str(e)
2805 mystat=os.stat(mysettings["DISTDIR"]+"/cvs-src")
2806 if ((mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02770)!=02770)) and not listonly:
2807 print "*** Adjusting cvs-src permissions for portage user..."
2808 os.chown(mysettings["DISTDIR"]+"/cvs-src",0,portage_gid)
2809 os.chmod(mysettings["DISTDIR"]+"/cvs-src",02770)
2810 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["DISTDIR"]+"/cvs-src", free=1)
2811 spawn("chmod -R g+rw "+mysettings["DISTDIR"]+"/cvs-src", free=1)
2812 except SystemExit, e:
2817 # Only try and fetch the files if we are going to need them ... otherwise,
2818 # if user has FEATURES=noauto and they run `ebuild clean unpack compile install`,
2819 # we will try and fetch 4 times :/
2820 need_distfiles = (mydo in ("digest", "fetch", "unpack") or
2821 mydo != "manifest" and "noauto" not in features)
2822 if need_distfiles and not fetch(fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
2825 # inefficient. improve this logic via making actionmap easily searchable to see if we're in the chain of what
2826 # will be executed, either that or forced N doebuild calls instead of a single set of phase calls.
2827 if (mydo not in ("setup", "clean", "postinst", "preinst", "prerm", "fetch", "digest", "manifest") and
2828 "noauto" not in features) or mydo == "unpack":
2829 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
2830 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir = mysettings["DISTDIR"]
2831 edpath = mysettings["DISTDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
2832 if os.path.exists(edpath):
2834 if os.path.isdir(edpath) and not os.path.islink(edpath):
2835 shutil.rmtree(edpath)
2839 print "!!! Failed reseting ebuild distdir path, " + edpath
2842 os.chown(edpath, -1, portage_gid)
2843 os.chmod(edpath, 0775)
2846 os.symlink(os.path.join(orig_distdir, file), os.path.join(edpath, file))
2848 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
2851 if mydo=="fetch" and listonly:
2854 if "digest" in features:
2855 #generate digest if it doesn't exist.
2857 return (not digestgen(aalist,mysettings,overwrite=1))
2859 digestgen(aalist,mysettings,overwrite=0)
2860 elif mydo=="digest":
2861 #since we are calling "digest" directly, recreate the digest even if it already exists
2862 return (not digestgen(aalist,mysettings,overwrite=1))
2863 if mydo=="manifest":
2864 return (not digestgen(aalist,mysettings,overwrite=1,manifestonly=1))
2866 # See above comment about fetching only when needed
2867 if not digestcheck(checkme, mysettings, ("strict" in features), (mydo not in ["digest","fetch","unpack"] and settings["PORTAGE_CALLER"] == "ebuild" and "noauto" in features)):
2873 #initial dep checks complete; time to process main commands
2875 nosandbox=(("userpriv" in features) and ("usersandbox" not in features) and \
2876 ("userpriv" not in mysettings["RESTRICT"]) and ("nouserpriv" not in mysettings["RESTRICT"]))
2877 if nosandbox and ("userpriv" not in features or "userpriv" in mysettings["RESTRICT"] or \
2878 "nouserpriv" in mysettings["RESTRICT"]):
2879 nosandbox = ("sandbox" not in features and "usersandbox" not in features)
2882 "depend": {"args":(0,1)}, # sandbox / portage
2883 "setup": {"args":(1,0)}, # without / root
2884 "unpack": {"args":(0,1)}, # sandbox / portage
2885 "compile":{"args":(nosandbox,1)}, # optional / portage
2886 "test": {"args":(nosandbox,1)}, # optional / portage
2887 "install":{"args":(0,0)}, # sandbox / root
2888 "rpm": {"args":(0,0)}, # sandbox / root
2889 "package":{"args":(0,0)}, # sandbox / root
2892 # merge the deps in so we have again a 'full' actionmap
2893 # be glad when this can die.
2894 for x in actionmap.keys():
2895 if len(actionmap_deps.get(x, [])):
2896 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
2898 if mydo in actionmap.keys():
2900 for x in ["","/"+mysettings["CATEGORY"],"/All"]:
2901 if not os.path.exists(mysettings["PKGDIR"]+x):
2902 os.makedirs(mysettings["PKGDIR"]+x)
2903 # REBUILD CODE FOR TBZ2 --- XXXX
2904 return spawnebuild(mydo,actionmap,mysettings,debug,logfile=logfile)
2905 elif mydo=="qmerge":
2906 #check to ensure install was run. this *only* pops up when users forget it and are using ebuild
2907 if not os.path.exists(mysettings["PORTAGE_BUILDDIR"]+"/.installed"):
2908 print "!!! mydo=qmerge, but install phase hasn't been ran"
2910 #qmerge is specifically not supposed to do a runtime dep check
2911 return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["PORTAGE_BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"],mytree=tree)
2913 retval=spawnebuild("install",actionmap,mysettings,debug,alwaysdep=1,logfile=logfile)
2916 return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["PORTAGE_BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"],mytree=tree)
2918 print "!!! Unknown mydo:",mydo
2923 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
2924 """moves a file from src to dest, preserving all permissions and attributes; mtime will
2925 be preserved even when moving across filesystems. Returns true on success and false on
2926 failure. Move is atomic."""
2927 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
2934 sflags=bsd_chflags.lgetflags(src)
2936 # Problem getting flags...
2937 writemsg("!!! Couldn't get flags for "+dest+"\n")
2940 except SystemExit, e:
2942 except Exception, e:
2943 print "!!! Stating source file failed... movefile()"
2949 dstat=os.lstat(dest)
2950 except SystemExit, e:
2953 dstat=os.lstat(os.path.dirname(dest))
2957 # Check that we can actually unset schg etc flags...
2958 # Clear the flags on source and destination; we'll reinstate them after merging
2960 if bsd_chflags.lchflags(dest, 0) < 0:
2961 writemsg("!!! Couldn't clear flags on file being merged: \n ")
2962 # We might have an immutable flag on the parent dir; save and clear.
2963 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
2964 bsd_chflags.lchflags(os.path.dirname(dest), 0)
2966 # Don't bother checking the return value here; if it fails then the next line will catch it.
2967 bsd_chflags.lchflags(src, 0)
2969 if bsd_chflags.lhasproblems(src)>0 or (destexists and bsd_chflags.lhasproblems(dest)>0) or bsd_chflags.lhasproblems(os.path.dirname(dest))>0:
2970 # This is bad: we can't merge the file with these flags set.
2971 writemsg("!!! Can't merge file "+dest+" because of flags set\n")
2975 if stat.S_ISLNK(dstat[stat.ST_MODE]):
2979 except SystemExit, e:
2981 except Exception, e:
2984 if stat.S_ISLNK(sstat[stat.ST_MODE]):
2986 target=os.readlink(src)
2987 if mysettings and mysettings["D"]:
2988 if target.find(mysettings["D"])==0:
2989 target=target[len(mysettings["D"]):]
2990 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
2993 sid = selinux.get_lsid(src)
2994 selinux.secure_symlink(target,dest,sid)
2996 os.symlink(target,dest)
2997 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
2999 # Restore the flags we saved before moving
3000 if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3001 writemsg("!!! Couldn't restore flags ("+str(flags)+") on " + dest+":\n")
3002 writemsg("!!! %s\n" % str(e))
3004 return os.lstat(dest)[stat.ST_MTIME]
3005 except SystemExit, e:
3007 except Exception, e:
3008 print "!!! failed to properly create symlink:"
3009 print "!!!",dest,"->",target
3014 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3017 ret=selinux.secure_rename(src,dest)
3019 ret=os.rename(src,dest)
3021 except SystemExit, e:
3023 except Exception, e:
3024 if e[0]!=errno.EXDEV:
3025 # Some random error.
3026 print "!!! Failed to move",src,"to",dest
3029 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3032 if stat.S_ISREG(sstat[stat.ST_MODE]):
3033 try: # For safety copy then move it over.
3035 selinux.secure_copy(src,dest+"#new")
3036 selinux.secure_rename(dest+"#new",dest)
3038 shutil.copyfile(src,dest+"#new")
3039 os.rename(dest+"#new",dest)
3041 except SystemExit, e:
3043 except Exception, e:
3044 print '!!! copy',src,'->',dest,'failed.'
3048 #we don't yet handle special, so we need to fall back to /bin/mv
3050 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3052 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3054 print "!!! Failed to move special file:"
3055 print "!!! '"+src+"' to '"+dest+"'"
3057 return None # failure
3060 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3061 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3063 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3064 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3066 except SystemExit, e:
3068 except Exception, e:
3069 print "!!! Failed to chown/chmod/unlink in movefile()"
3075 os.utime(dest,(newmtime,newmtime))
3077 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3078 newmtime=sstat[stat.ST_MTIME]
3081 # Restore the flags we saved before moving
3082 if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3083 writemsg("!!! Couldn't restore flags ("+str(sflags)+") on " + dest+":\n")
3088 def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None,mytree=None):
3089 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype=mytree)
3090 return mylink.merge(pkgloc,infloc,myroot,myebuild)
3092 def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
3093 mylink=dblink(cat,pkg,myroot,mysettings,treetype="vartree")
3095 mylink.unmerge(trimworld=mytrimworld,cleanup=1)
3098 def isvalidatom(atom):
3099 mycpv_cps = catpkgsplit(dep_getcpv(atom))
3100 operator = get_operator(atom)
3102 if operator[0] in "<>" and atom[-1] == "*":
3104 if mycpv_cps and mycpv_cps[0] != "null":
3108 # >=cat/pkg or >=pkg-1.0 (no category)
3114 if (len(string.split(atom, '/'))==2):
3120 def isjustname(mypkg):
3121 myparts=string.split(mypkg,'-')
3128 def isspecific(mypkg):
3129 "now supports packages with no category"
3131 return iscache[mypkg]
3132 except SystemExit, e:
3136 mysplit=string.split(mypkg,"/")
3137 if not isjustname(mysplit[-1]):
3143 def getCPFromCPV(mycpv):
3144 """Calls pkgsplit on a cpv and returns only the cp."""
3145 return pkgsplit(mycpv)[0]
3148 def dep_virtual(mysplit, mysettings):
3149 "Does virtual dependency conversion"
3152 if type(x)==types.ListType:
3153 newsplit.append(dep_virtual(x, mysettings))
3156 if mysettings.virtuals.has_key(mykey):
3157 if len(mysettings.virtuals[mykey])==1:
3158 a=string.replace(x, mykey, mysettings.virtuals[mykey][0])
3161 # blocker needs "and" not "or(||)".
3165 for y in mysettings.virtuals[mykey]:
3166 a.append(string.replace(x, mykey, y))
3172 def dep_eval(deplist):
3175 if deplist[0]=="||":
3176 #or list; we just need one "1"
3177 for x in deplist[1:]:
3178 if type(x)==types.ListType:
3183 #XXX: unless there's no available atoms in the list
3184 #in which case we need to assume that everything is
3185 #okay as some ebuilds are relying on an old bug.
3186 if len(deplist) == 1:
3191 if type(x)==types.ListType:
3198 def dep_zapdeps(unreduced,reduced,myroot,use_binaries=0):
3199 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
3200 Returned deplist contains steps that must be taken to satisfy dependencies."""
3201 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
3202 if not reduced or unreduced == ["||"] or dep_eval(reduced):
3205 if unreduced[0] != "||":
3207 for (dep, satisfied) in zip(unreduced, reduced):
3208 if isinstance(dep, list):
3209 unresolved += dep_zapdeps(dep, satisfied, myroot, use_binaries=use_binaries)
3211 unresolved.append(dep)
3214 # We're at a ( || atom ... ) type level
3215 deps = unreduced[1:]
3216 satisfieds = reduced[1:]
3219 for (dep, satisfied) in zip(deps, satisfieds):
3220 if isinstance(dep, list):
3221 atoms = dep_zapdeps(dep, satisfied, myroot, use_binaries=use_binaries)
3224 missing_atoms = [atom for atom in atoms if not db[myroot]["vartree"].dbapi.match(atom)]
3226 if not missing_atoms:
3227 if isinstance(dep, list):
3228 return atoms # Sorted out by the recursed dep_zapdeps call
3230 target = dep_getkey(dep) # An installed package that's not yet in the graph
3235 missing_atoms = [atom for atom in atoms if not db[myroot]["bintree"].dbapi.match(atom)]
3237 missing_atoms = [atom for atom in atoms if not db[myroot]["porttree"].dbapi.xmatch("match-visible", atom)]
3238 if not missing_atoms:
3239 target = (dep, satisfied)
3242 if isinstance(deps[0], list):
3243 return dep_zapdeps(deps[0], satisfieds[0], myroot, use_binaries=use_binaries)
3247 if isinstance(target, tuple): # Nothing matching installed
3248 if isinstance(target[0], list): # ... and the first available was a sublist
3249 return dep_zapdeps(target[0], target[1], myroot, use_binaries=use_binaries)
3250 else: # ... and the first available was a single atom
3251 target = dep_getkey(target[0])
3253 relevant_atoms = [dep for dep in deps if not isinstance(dep, list) and dep_getkey(dep) == target]
3256 for atom in relevant_atoms:
3258 pkg_list = db["/"]["bintree"].dbapi.match(atom)
3260 pkg_list = db["/"]["porttree"].dbapi.xmatch("match-visible", atom)
3263 pkg = best(pkg_list)
3264 available_pkgs[pkg] = atom
3266 if not available_pkgs:
3267 return [relevant_atoms[0]] # All masked
3269 target_pkg = best(available_pkgs.keys())
3270 suitable_atom = available_pkgs[target_pkg]
3271 return [suitable_atom]
3275 def dep_getkey(mydep):
3284 if mydep[:2] in [ ">=", "<=" ]:
3286 elif mydep[:1] in "=<>~":
3288 if isspecific(mydep):
3289 mysplit=catpkgsplit(mydep)
3292 return mysplit[0]+"/"+mysplit[1]
3296 def dep_getcpv(mydep):
3305 if mydep[:2] in [ ">=", "<=" ]:
3307 elif mydep[:1] in "=<>~":
3311 def dep_transform(mydep,oldkey,newkey):
3322 if mydep[:2] in [ ">=", "<=" ]:
3325 elif mydep[:1] in "=<>~!":
3329 return prefix+newkey+postfix
3333 def dep_expand(mydep,mydb=None,use_cache=1):
3343 if mydep[:2] in [ ">=", "<=" ]:
3346 elif mydep[:1] in "=<>~!":
3349 return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
3351 def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0,myroot="/"):
3352 """Takes a depend string and parses the condition."""
3354 #check_config_instance(mysettings)
3359 myusesplit = string.split(mysettings["USE"])
3362 # We've been given useflags to use.
3363 #print "USE FLAGS PASSED IN."
3365 #if "bindist" in myusesplit:
3366 # print "BINDIST is set!"
3368 # print "BINDIST NOT set."
3370 #we are being run by autouse(), don't consult USE vars yet.
3371 # WE ALSO CANNOT USE SETTINGS
3374 #convert parenthesis to sublists
3375 mysplit = portage_dep.paren_reduce(depstring)
3378 # XXX: use="all" is only used by repoman. Why would repoman checks want
3379 # profile-masked USE flags to be enabled?
3381 # mymasks=archlist[:]
3383 mymasks=mysettings.usemask+archlist[:]
3385 while mysettings["ARCH"] in mymasks:
3386 del mymasks[mymasks.index(mysettings["ARCH"])]
3387 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
3389 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
3391 # Do the || conversions
3392 mysplit=portage_dep.dep_opconvert(mysplit)
3394 #convert virtual dependencies to normal packages.
3395 mysplit=dep_virtual(mysplit, mysettings)
3396 #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
3397 #up until here, we haven't needed to look at the database tree
3400 return [0,"Parse Error (parentheses mismatch?)"]
3402 #dependencies were reduced to nothing
3405 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
3407 return [0,"Invalid token"]
3409 writemsg("\n\n\n", 1)
3410 writemsg("mysplit: %s\n" % (mysplit), 1)
3411 writemsg("mysplit2: %s\n" % (mysplit2), 1)
3412 myeval=dep_eval(mysplit2)
3413 writemsg("myeval: %s\n" % (myeval), 1)
3418 myzaps = dep_zapdeps(mysplit,mysplit2,myroot,use_binaries=use_binaries)
3419 mylist = flatten(myzaps)
3420 writemsg("myzaps: %s\n" % (myzaps), 1)
3421 writemsg("mylist: %s\n" % (mylist), 1)
3426 writemsg("mydict: %s\n" % (mydict), 1)
3427 return [1,mydict.keys()]
3429 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
3430 "Reduces the deplist to ones and zeros"
3432 deplist=mydeplist[:]
3433 while mypos<len(deplist):
3434 if type(deplist[mypos])==types.ListType:
3436 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
3437 elif deplist[mypos]=="||":
3440 mykey = dep_getkey(deplist[mypos])
3441 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
3442 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
3446 mydep=mydbapi.xmatch(mode,deplist[mypos])
3448 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
3451 if deplist[mypos][0]=="!":
3455 #encountered invalid string
3460 def cpv_getkey(mycpv):
3461 myslash=mycpv.split("/")
3462 mysplit=pkgsplit(myslash[-1])
3465 return myslash[0]+"/"+mysplit[0]
3471 def key_expand(mykey,mydb=None,use_cache=1):
3472 mysplit=mykey.split("/")
3474 if mydb and type(mydb)==types.InstanceType:
3475 for x in settings.categories:
3476 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
3478 if virts_p.has_key(mykey):
3479 return(virts_p[mykey][0])
3480 return "null/"+mykey
3482 if type(mydb)==types.InstanceType:
3483 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
3484 return virts[mykey][0]
3487 def cpv_expand(mycpv,mydb=None,use_cache=1):
3488 """Given a string (packagename or virtual) expand it into a valid
3489 cat/package string. Virtuals use the mydb to determine which provided
3490 virtual is a valid choice and defaults to the first element when there
3491 are no installed/available candidates."""
3492 myslash=mycpv.split("/")
3493 mysplit=pkgsplit(myslash[-1])
3495 # this is illegal case.
3498 elif len(myslash)==2:
3500 mykey=myslash[0]+"/"+mysplit[0]
3504 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
3505 if type(mydb)==types.InstanceType:
3506 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
3507 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
3508 mykey_orig = mykey[:]
3509 for vkey in virts[mykey]:
3510 if mydb.cp_list(vkey,use_cache=use_cache):
3512 writemsg("virts chosen: %s\n" % (mykey), 1)
3514 if mykey == mykey_orig:
3515 mykey=virts[mykey][0]
3516 writemsg("virts defaulted: %s\n" % (mykey), 1)
3517 #we only perform virtual expansion if we are passed a dbapi
3519 #specific cpv, no category, ie. "foo-1.0"
3528 for x in settings.categories:
3529 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
3530 matches.append(x+"/"+myp)
3531 if (len(matches)>1):
3532 raise ValueError, matches
3536 if not mykey and type(mydb)!=types.ListType:
3537 if virts_p.has_key(myp):
3538 mykey=virts_p[myp][0]
3539 #again, we only perform virtual expansion if we have a dbapi (not a list)
3543 if mysplit[2]=="r0":
3544 return mykey+"-"+mysplit[1]
3546 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
3550 def getmaskingreason(mycpv):
3551 from portage_util import grablines
3553 mysplit = catpkgsplit(mycpv)
3555 raise ValueError("invalid CPV: %s" % mycpv)
3556 if not portdb.cpv_exists(mycpv):
3557 raise KeyError("CPV %s does not exist" % mycpv)
3558 mycp=mysplit[0]+"/"+mysplit[1]
3560 pmasklines = grablines(settings["PORTDIR"]+"/profiles/package.mask", recursive=1)
3561 if settings.pmaskdict.has_key(mycp):
3562 for x in settings.pmaskdict[mycp]:
3563 if mycpv in portdb.xmatch("match-all", x):
3567 while i < len(pmasklines):
3568 l = pmasklines[i].strip()
3578 def getmaskingstatus(mycpv):
3580 mysplit = catpkgsplit(mycpv)
3582 raise ValueError("invalid CPV: %s" % mycpv)
3583 if not portdb.cpv_exists(mycpv):
3584 raise KeyError("CPV %s does not exist" % mycpv)
3585 mycp=mysplit[0]+"/"+mysplit[1]
3590 revmaskdict=settings.prevmaskdict
3591 if revmaskdict.has_key(mycp):
3592 for x in revmaskdict[mycp]:
3597 if not match_to_list(mycpv, [myatom]):
3598 rValue.append("profile")
3601 # package.mask checking
3602 maskdict=settings.pmaskdict
3603 unmaskdict=settings.punmaskdict
3604 if maskdict.has_key(mycp):
3605 for x in maskdict[mycp]:
3606 if mycpv in portdb.xmatch("match-all", x):
3608 if unmaskdict.has_key(mycp):
3609 for z in unmaskdict[mycp]:
3610 if mycpv in portdb.xmatch("match-all",z):
3614 rValue.append("package.mask")
3617 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
3618 if not eapi_is_supported(eapi):
3619 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
3620 mygroups = mygroups.split()
3622 myarch = settings["ARCH"]
3623 pkgdict = settings.pkeywordsdict
3625 cp = dep_getkey(mycpv)
3626 if pkgdict.has_key(cp):
3627 matches = match_to_list(mycpv, pkgdict[cp].keys())
3628 for match in matches:
3629 pgroups.extend(pkgdict[cp][match])
3633 for keyword in pgroups:
3634 if keyword in mygroups:
3643 elif gp=="-"+myarch:
3646 elif gp=="~"+myarch:
3651 rValue.append(kmask+" keyword")
3654 def fixdbentries(old_value, new_value, dbdir):
3655 """python replacement for the fixdbentries script, replaces old_value
3656 with new_value for package names in files in dbdir."""
3657 for myfile in [f for f in os.listdir(dbdir) if not f == "CONTENTS"]:
3658 file_path = os.path.join(dbdir, myfile)
3659 f = open(file_path, "r")
3660 mycontent = f.read()
3662 if not mycontent.count(old_value):
3664 old_value = re.escape(old_value);
3665 mycontent = re.sub(old_value+"$", new_value, mycontent)
3666 mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
3667 mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
3668 mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
3669 write_atomic(file_path, mycontent)
3672 def __init__(self,virtual,clone=None):
3674 self.tree=clone.tree.copy()
3675 self.populated=clone.populated
3676 self.virtual=clone.virtual
3681 self.virtual=virtual
3684 def resolve_key(self,mykey):
3685 return key_expand(mykey,mydb=self.dbapi)
3687 def dep_nomatch(self,mypkgdep):
3688 mykey=dep_getkey(mypkgdep)
3689 nolist=self.dbapi.cp_list(mykey)
3690 mymatch=self.dbapi.match(mypkgdep)
3698 def depcheck(self,mycheck,use="yes",myusesplit=None):
3699 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
3702 "populates the tree with values"
3706 def best(mymatches):
3707 "accepts None arguments; assumes matches are valid."
3711 if not len(mymatches):
3713 bestmatch=mymatches[0]
3714 p2=catpkgsplit(bestmatch)[1:]
3715 for x in mymatches[1:]:
3716 p1=catpkgsplit(x)[1:]
3719 p2=catpkgsplit(bestmatch)[1:]
3722 def match_to_list(mypkg,mylist):
3724 Searches list for entries that matches the package.
3728 if match_from_list(x,[mypkg]):
3729 if x not in matches:
3733 def best_match_to_list(mypkg,mylist):
3735 Returns the most specific entry (assumed to be the longest one)
3736 that matches the package given.
3738 # XXX Assumption is wrong sometimes.
3741 for x in match_to_list(mypkg,mylist):
3747 def catsplit(mydep):
3748 return mydep.split("/", 1)
3750 def get_operator(mydep):
3752 returns '~', '=', '>', '<', '=*', '>=', or '<='
3756 elif mydep[0] == "=":
3757 if mydep[-1] == "*":
3761 elif mydep[0] in "><":
3762 if len(mydep) > 1 and mydep[1] == "=":
3763 operator = mydep[0:2]
3772 def match_from_list(mydep,candidate_list):
3776 mycpv = dep_getcpv(mydep)
3777 mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
3780 cat,pkg = catsplit(mycpv)
3784 cat,pkg,ver,rev = mycpv_cps
3786 raise KeyError, "Specific key requires an operator (%s) (try adding an '=')" % (mydep)
3789 operator = get_operator(mydep)
3791 writemsg("!!! Invalid atom: %s\n" % mydep)
3798 if operator == None:
3799 for x in candidate_list:
3804 elif xs[0] != mycpv:
3808 elif operator == "=": # Exact match
3809 if mycpv in candidate_list:
3812 elif operator == "=*": # glob match
3813 # The old verion ignored _tag suffixes... This one doesn't.
3814 for x in candidate_list:
3815 if x[0:len(mycpv)] == mycpv:
3818 elif operator == "~": # version, any revision, match
3819 for x in candidate_list:
3821 if xs[0:2] != mycpv_cps[0:2]:
3827 elif operator in [">", ">=", "<", "<="]:
3828 for x in candidate_list:
3830 result = pkgcmp(pkgsplit(x), [cat+"/"+pkg,ver,rev])
3831 except SystemExit, e:
3834 writemsg("\nInvalid package name: %s\n" % x)
3838 elif operator == ">":
3841 elif operator == ">=":
3844 elif operator == "<":
3847 elif operator == "<=":
3851 raise KeyError, "Unknown operator: %s" % mydep
3853 raise KeyError, "Unknown operator: %s" % mydep
3859 def match_from_list_original(mydep,mylist):
3861 Reduces the list down to those that fit the dep
3863 mycpv=dep_getcpv(mydep)
3864 if isspecific(mycpv):
3865 cp_key=catpkgsplit(mycpv)
3870 #Otherwise, this is a special call; we can only select out of the ebuilds specified in the specified mylist
3875 #example: "=sys-apps/foo-1.0*"
3877 #now, we grab the version of our dependency...
3878 mynewsplit=string.split(cp_key[2],'.')
3880 mynewsplit[-1]=`int(mynewsplit[-1])+1`
3881 #and increment the last digit of the version by one.
3882 #We don't need to worry about _pre and friends because they're not supported with '*' deps.
3883 new_v=string.join(mynewsplit,".")+"_alpha0"
3884 #new_v will be used later in the code when we do our comparisons using pkgcmp()
3885 except SystemExit, e:
3892 cmp1[1]=cmp1[1]+"_alpha0"
3893 cmp2=[cp_key[1],new_v,"r0"]
3897 #hrm, invalid entry. Continue.
3899 #skip entries in our list that do not have matching categories
3900 if cp_key[0]!=cp_x[0]:
3902 # ok, categories match. Continue to next step.
3903 if ((pkgcmp(cp_x[1:],cmp1)>=0) and (pkgcmp(cp_x[1:],cmp2)<0)):
3904 # entry is >= the version in specified in our dependency, and <= the version in our dep + 1; add it:
3908 # Does our stripped key appear literally in our list? If so, we have a match; if not, we don't.
3913 elif (mydep[0]==">") or (mydep[0]=="<"):
3916 if (len(mydep)>1) and (mydep[1]=="="):
3924 #invalid entry; continue.
3926 if cp_key[0]!=cp_x[0]:
3928 if eval("pkgcmp(cp_x[1:],cp_key[1:])"+cmpstr+"0"):
3938 #invalid entry; continue
3940 if cp_key[0]!=cp_x[0]:
3942 if cp_key[2]!=cp_x[2]:
3943 #if version doesn't match, skip it
3945 myint = int(cp_x[3][1:])
3956 #we check ! deps in emerge itself, so always returning [] is correct.
3958 cp_key=mycpv.split("/")
3962 #invalid entry; continue
3964 if cp_key[0]!=cp_x[0]:
3966 if cp_key[1]!=cp_x[1]:
3975 def __init__(self,root="/",virtual=None,clone=None):
3978 self.root=clone.root
3979 self.portroot=clone.portroot
3980 self.pkglines=clone.pkglines
3983 self.portroot=settings["PORTDIR"]
3984 self.virtual=virtual
3987 def dep_bestmatch(self,mydep):
3988 "compatibility method"
3989 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
3994 def dep_match(self,mydep):
3995 "compatibility method"
3996 mymatch=self.dbapi.xmatch("match-visible",mydep)
4001 def exists_specific(self,cpv):
4002 return self.dbapi.cpv_exists(cpv)
4004 def getallnodes(self):
4005 """new behavior: these are all *unmasked* nodes. There may or may not be available
4006 masked package for nodes in this nodes list."""
4007 return self.dbapi.cp_all()
4009 def getname(self,pkgname):
4010 "returns file location for this particular package (DEPRECATED)"
4013 mysplit=string.split(pkgname,"/")
4014 psplit=pkgsplit(mysplit[1])
4015 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4017 def resolve_specific(self,myspec):
4018 cps=catpkgsplit(myspec)
4021 mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
4022 mykey=mykey+"-"+cps[2]
4024 mykey=mykey+"-"+cps[3]
4027 def depcheck(self,mycheck,use="yes",myusesplit=None):
4028 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4030 def getslot(self,mycatpkg):
4031 "Get a slot for a catpkg; assume it exists."
4034 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4035 except SystemExit, e:
4037 except Exception, e:
4046 def close_caches(self):
4049 def cp_list(self,cp,use_cache=1):
4052 def aux_get(self,mycpv,mylist):
4053 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4054 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4055 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4056 raise NotImplementedError
4058 def match(self,origdep,use_cache=1):
4059 mydep=dep_expand(origdep,mydb=self)
4060 mykey=dep_getkey(mydep)
4061 mycat=mykey.split("/")[0]
4062 return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4064 def match2(self,mydep,mykey,mylist):
4065 writemsg("DEPRECATED: dbapi.match2\n")
4066 match_from_list(mydep,mylist)
4068 def counter_tick(self,myroot,mycpv=None):
4069 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
4071 def get_counter_tick_core(self,myroot,mycpv=None):
4072 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
4074 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
4075 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
4076 cpath=myroot+"var/cache/edb/counter"
4080 mysplit = pkgsplit(mycpv)
4081 for x in self.match(mysplit[0],use_cache=0):
4085 old_counter = long(self.aux_get(x,["COUNTER"])[0])
4086 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
4087 except SystemExit, e:
4091 writemsg("!!! BAD COUNTER in '%s'\n" % (x))
4092 if old_counter > min_counter:
4093 min_counter = old_counter
4095 # We write our new counter value to a new file that gets moved into
4096 # place to avoid filesystem corruption.
4097 if os.path.exists(cpath):
4098 cfile=open(cpath, "r")
4100 counter=long(cfile.readline())
4101 except (ValueError,OverflowError):
4103 counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
4104 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
4106 except (ValueError,OverflowError):
4107 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
4108 writemsg("!!! corrected/normalized so that portage can operate properly.\n")
4109 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
4114 counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
4115 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
4116 except SystemExit, e:
4119 writemsg("!!! Initializing global counter.\n")
4123 if counter < min_counter:
4124 counter = min_counter+1000
4127 if incrementing or changed:
4131 # update new global counter file
4132 write_atomic(cpath, str(counter))
4135 def invalidentry(self, mypath):
4136 if re.search("portage_lockfile$",mypath):
4137 if not os.environ.has_key("PORTAGE_MASTER_PID"):
4138 writemsg("Lockfile removed: %s\n" % mypath, 1)
4139 portage_locks.unlockfile((mypath,None,None))
4141 # Nothing we can do about it. We're probably sandboxed.
4143 elif re.search(".*/-MERGING-(.*)",mypath):
4144 if os.path.exists(mypath):
4145 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
4147 writemsg("!!! Invalid db entry: %s\n" % mypath)
4151 class fakedbapi(dbapi):
4152 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
4157 def cpv_exists(self,mycpv):
4158 return self.cpvdict.has_key(mycpv)
4160 def cp_list(self,mycp,use_cache=1):
4161 if not self.cpdict.has_key(mycp):
4164 return self.cpdict[mycp]
4168 for x in self.cpdict.keys():
4169 returnme.extend(self.cpdict[x])
4172 def cpv_inject(self,mycpv):
4173 """Adds a cpv from the list of available packages."""
4174 mycp=cpv_getkey(mycpv)
4175 self.cpvdict[mycpv]=1
4176 if not self.cpdict.has_key(mycp):
4177 self.cpdict[mycp]=[]
4178 if not mycpv in self.cpdict[mycp]:
4179 self.cpdict[mycp].append(mycpv)
4181 #def cpv_virtual(self,oldcpv,newcpv):
4182 # """Maps a cpv to the list of available packages."""
4183 # mycp=cpv_getkey(newcpv)
4184 # self.cpvdict[newcpv]=1
4185 # if not self.virtdict.has_key(mycp):
4186 # self.virtdict[mycp]=[]
4187 # if not mycpv in self.virtdict[mycp]:
4188 # self.virtdict[mycp].append(oldcpv)
4189 # cpv_remove(oldcpv)
4191 def cpv_remove(self,mycpv):
4192 """Removes a cpv from the list of available packages."""
4193 mycp=cpv_getkey(mycpv)
4194 if self.cpvdict.has_key(mycpv):
4195 del self.cpvdict[mycpv]
4196 if not self.cpdict.has_key(mycp):
4198 while mycpv in self.cpdict[mycp]:
4199 del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4200 if not len(self.cpdict[mycp]):
4201 del self.cpdict[mycp]
4203 class bindbapi(fakedbapi):
4204 def __init__(self,mybintree=None):
4205 self.bintree = mybintree
4209 def aux_get(self,mycpv,wants):
4210 mysplit = string.split(mycpv,"/")
4212 tbz2name = mysplit[1]+".tbz2"
4213 if self.bintree and not self.bintree.isremote(mycpv):
4214 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4216 if self.bintree and self.bintree.isremote(mycpv):
4217 # We use the cache for remote packages
4218 if self.bintree.remotepkgs[tbz2name].has_key(x):
4219 mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
4223 myval = tbz2.getfile(x)
4227 myval = string.join(myval.split(),' ')
4228 mylist.append(myval)
4230 idx = wants.index("EAPI")
4237 class vardbapi(dbapi):
4238 def __init__(self,root,categories=None):
4240 #cache for category directory mtimes
4241 self.mtdircache = {}
4242 #cache for dependency checks
4243 self.matchcache = {}
4244 #cache for cp_list results
4246 self.blockers = None
4247 self.categories = copy.deepcopy(categories)
4249 def cpv_exists(self,mykey):
4250 "Tells us whether an actual ebuild exists on disk (no masking)"
4251 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4253 def cpv_counter(self,mycpv):
4254 "This method will grab the COUNTER. Returns a counter value."
4255 cdir=self.root+VDB_PATH+"/"+mycpv
4256 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4258 # We write our new counter value to a new file that gets moved into
4259 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4261 if os.path.exists(cpath):
4262 cfile=open(cpath, "r")
4264 counter=long(cfile.readline())
4266 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4270 elif os.path.exists(cdir):
4271 mys = pkgsplit(mycpv)
4272 myl = self.match(mys[0],use_cache=0)
4276 # Only one package... Counter doesn't matter.
4277 write_atomic(cpath, "1")
4279 except SystemExit, e:
4281 except Exception, e:
4282 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
4283 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
4284 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
4285 writemsg("!!! unmerge this exact version.\n")
4286 writemsg("!!! %s\n" % e)
4289 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
4290 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
4291 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
4292 writemsg("!!! remerge the package.\n")
4297 # update new global counter file
4298 write_atomic(cpath, str(counter))
4301 def cpv_inject(self,mycpv):
4302 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
4303 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
4304 counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
4305 # write local package counter so that emerge clean does the right thing
4306 write_atomic(os.path.join(self.root, VDB_PATH, mycpv, "COUNTER"), str(counter))
4308 def isInjected(self,mycpv):
4309 if self.cpv_exists(mycpv):
4310 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
4312 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
4316 def move_ent(self,mylist):
4320 for cp in [origcp,newcp]:
4321 if not (isvalidatom(cp) and isjustname(cp)):
4322 raise portage_exception.InvalidPackageName(cp)
4323 origmatches=self.match(origcp,use_cache=0)
4326 for mycpv in origmatches:
4327 mycpsplit=catpkgsplit(mycpv)
4328 mynewcpv=newcp+"-"+mycpsplit[2]
4329 mynewcat=newcp.split("/")[0]
4330 if mycpsplit[3]!="r0":
4331 mynewcpv += "-"+mycpsplit[3]
4332 mycpsplit_new = catpkgsplit(mynewcpv)
4333 origpath=self.root+VDB_PATH+"/"+mycpv
4334 if not os.path.exists(origpath):
4337 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
4338 #create the directory
4339 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
4340 newpath=self.root+VDB_PATH+"/"+mynewcpv
4341 if os.path.exists(newpath):
4342 #dest already exists; keep this puppy where it is.
4344 spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
4346 # We need to rename the ebuild now.
4347 old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
4348 new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
4349 if mycpsplit[3] != "r0":
4350 old_eb_path += "-"+mycpsplit[3]
4351 new_eb_path += "-"+mycpsplit[3]
4352 if os.path.exists(old_eb_path+".ebuild"):
4353 os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
4355 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
4357 dbdir = self.root+VDB_PATH
4358 for catdir in listdir(dbdir):
4359 catdir = dbdir+"/"+catdir
4360 if os.path.isdir(catdir):
4361 for pkgdir in listdir(catdir):
4362 pkgdir = catdir+"/"+pkgdir
4363 if os.path.isdir(pkgdir):
4364 fixdbentries(origcp, newcp, pkgdir)
4366 def move_slot_ent(self,mylist):
4371 if not isvalidatom(pkg):
4372 raise portage_exception.InvalidAtom(pkg)
4374 origmatches=self.match(pkg,use_cache=0)
4378 for mycpv in origmatches:
4379 origpath=self.root+VDB_PATH+"/"+mycpv
4380 if not os.path.exists(origpath):
4383 slot=grabfile(origpath+"/SLOT");
4387 if (slot[0]!=origslot):
4391 write_atomic(os.path.join(origpath, "SLOT"), newslot+"\n")
4393 def cp_list(self,mycp,use_cache=1):
4394 mysplit=mycp.split("/")
4395 if mysplit[0] == '*':
4396 mysplit[0] = mysplit[0][1:]
4398 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
4401 if use_cache and self.cpcache.has_key(mycp):
4402 cpc=self.cpcache[mycp]
4405 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4412 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
4416 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4418 if len(mysplit) > 1:
4419 if ps[0]==mysplit[1]:
4420 returnme.append(mysplit[0]+"/"+x)
4422 self.cpcache[mycp]=[mystat,returnme]
4423 elif self.cpcache.has_key(mycp):
4424 del self.cpcache[mycp]
4427 def cpv_all(self,use_cache=1):
4429 basepath = self.root+VDB_PATH+"/"
4431 mycats = self.categories
4433 # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
4434 mycats = settings.categories
4437 for y in listdir(basepath+x,EmptyOnError=1):
4439 # -MERGING- should never be a cpv, nor should files.
4440 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
4441 returnme += [subpath]
4444 def cp_all(self,use_cache=1):
4445 mylist = self.cpv_all(use_cache=use_cache)
4450 mysplit=catpkgsplit(y)
4452 self.invalidentry(self.root+VDB_PATH+"/"+y)
4454 d[mysplit[0]+"/"+mysplit[1]] = None
4457 def checkblockers(self,origdep):
4460 def match(self,origdep,use_cache=1):
4461 "caching match function"
4462 mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
4463 mykey=dep_getkey(mydep)
4464 mycat=mykey.split("/")[0]
4466 if self.matchcache.has_key(mycat):
4467 del self.mtdircache[mycat]
4468 del self.matchcache[mycat]
4469 return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4471 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
4472 except SystemExit, e:
4477 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
4479 self.mtdircache[mycat]=curmtime
4480 self.matchcache[mycat]={}
4481 if not self.matchcache[mycat].has_key(mydep):
4482 mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4483 self.matchcache[mycat][mydep]=mymatch
4484 return self.matchcache[mycat][mydep][:]
4486 def findname(self, mycpv):
4487 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
4489 def aux_get(self, mycpv, wants):
4493 myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
4494 if os.access(myfn,os.R_OK):
4495 myf = open(myfn, "r")
4498 myd = re.sub("[\n\r\t]+"," ",myd)
4499 myd = re.sub(" +"," ",myd)
4500 myd = string.strip(myd)
4505 idx = wants.index("EAPI")
4506 if not results[idx]:
4511 class vartree(packagetree):
4512 "this tree will scan a var/db/pkg database located at root (passed to init)"
4513 def __init__(self,root="/",virtual=None,clone=None,categories=None):
4515 self.root = clone.root[:]
4516 self.dbapi = copy.deepcopy(clone.dbapi)
4520 self.dbapi = vardbapi(self.root,categories=categories)
4523 def zap(self,mycpv):
4526 def inject(self,mycpv):
4529 def get_provide(self,mycpv):
4532 mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
4534 myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
4535 myuse = string.split(string.join(myuse))
4536 mylines = string.join(mylines)
4537 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
4538 for myprovide in mylines:
4539 mys = catpkgsplit(myprovide)
4541 mys = string.split(myprovide, "/")
4542 myprovides += [mys[0] + "/" + mys[1]]
4544 except SystemExit, e:
4546 except Exception, e:
4548 print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
4549 print "Possibly Invalid: " + str(mylines)
4550 print "Exception: "+str(e)
4554 def get_all_provides(self):
4556 for node in self.getallcpv():
4557 for mykey in self.get_provide(node):
4558 if myprovides.has_key(mykey):
4559 myprovides[mykey] += [node]
4561 myprovides[mykey] = [node]
4564 def dep_bestmatch(self,mydep,use_cache=1):
4565 "compatibility method -- all matches, not just visible ones"
4566 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
4567 mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
4573 def dep_match(self,mydep,use_cache=1):
4574 "compatibility method -- we want to see all matches, not just visible ones"
4575 #mymatch=match(mydep,self.dbapi)
4576 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
4582 def exists_specific(self,cpv):
4583 return self.dbapi.cpv_exists(cpv)
4585 def getallcpv(self):
4586 """temporary function, probably to be renamed --- Gets a list of all
4587 category/package-versions installed on the system."""
4588 return self.dbapi.cpv_all()
4590 def getallnodes(self):
4591 """new behavior: these are all *unmasked* nodes. There may or may not be available
4592 masked package for nodes in this nodes list."""
4593 return self.dbapi.cp_all()
4595 def exists_specific_cat(self,cpv,use_cache=1):
4596 cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
4600 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
4604 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
4610 def getebuildpath(self,fullpackage):
4611 cat,package=fullpackage.split("/")
4612 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
4614 def getnode(self,mykey,use_cache=1):
4615 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
4618 mysplit=mykey.split("/")
4619 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4622 mypsplit=pkgsplit(x)
4624 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4626 if mypsplit[0]==mysplit[1]:
4627 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
4628 returnme.append(appendme)
4632 def getslot(self,mycatpkg):
4633 "Get a slot for a catpkg; assume it exists."
4636 myslot=string.join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
4637 except SystemExit, e:
4639 except Exception, e:
4643 def hasnode(self,mykey,use_cache):
4644 """Does the particular node (cat/pkg key) exist?"""
4645 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
4646 mysplit=mykey.split("/")
4647 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4649 mypsplit=pkgsplit(x)
4651 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4653 if mypsplit[0]==mysplit[1]:
4661 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
4662 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
4663 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
4664 'PDEPEND', 'PROVIDE', 'EAPI',
4665 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
4666 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
4668 auxdbkeylen=len(auxdbkeys)
4670 def close_portdbapi_caches():
4671 for i in portdbapi.portdbapi_instances:
4675 class portdbapi(dbapi):
4676 """this tree will scan a portage directory located at root (passed to init)"""
4677 portdbapi_instances = []
4679 def __init__(self,porttree_root,mysettings=None):
4680 portdbapi.portdbapi_instances.append(self)
4684 self.mysettings = mysettings
4686 self.mysettings = config(clone=settings)
4688 self.manifestVerifyLevel = None
4689 self.manifestVerifier = None
4690 self.manifestCache = {} # {location: [stat, md5]}
4691 self.manifestMissingCache = []
4693 if "gpg" in self.mysettings.features:
4694 self.manifestVerifyLevel = portage_gpg.EXISTS
4695 if "strict" in self.mysettings.features:
4696 self.manifestVerifyLevel = portage_gpg.MARGINAL
4697 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
4698 elif "severe" in self.mysettings.features:
4699 self.manifestVerifyLevel = portage_gpg.TRUSTED
4700 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
4702 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
4704 #self.root=settings["PORTDIR"]
4705 self.porttree_root = porttree_root
4707 self.depcachedir = self.mysettings.depcachedir[:]
4709 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
4710 if self.tmpfs and not os.path.exists(self.tmpfs):
4712 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
4714 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
4717 self.eclassdb = eclass_cache.cache(self.porttree_root, overlays=settings["PORTDIR_OVERLAY"].split())
4720 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
4722 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
4726 self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
4727 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
4730 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
4732 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
4733 for x in self.porttrees:
4734 # location, label, auxdbkeys
4735 self.auxdb[x] = self.auxdbmodule(portage_const.DEPCACHE_PATH, x, filtered_auxdbkeys, gid=portage_gid)
4737 def close_caches(self):
4738 for x in self.auxdb.keys():
4739 self.auxdb[x].sync()
4742 def flush_cache(self):
4746 def finddigest(self,mycpv):
4748 mydig = self.findname2(mycpv)[0]
4749 mydigs = string.split(mydig, "/")[:-1]
4750 mydig = string.join(mydigs, "/")
4752 mysplit = mycpv.split("/")
4753 except SystemExit, e:
4757 return mydig+"/files/digest-"+mysplit[-1]
4759 def findname(self,mycpv):
4760 return self.findname2(mycpv)[0]
4762 def findname2(self,mycpv):
4763 "returns file location for this particular package and in_overlay flag"
4766 mysplit=mycpv.split("/")
4768 psplit=pkgsplit(mysplit[1])
4771 for x in self.porttrees:
4772 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4773 if os.access(file, os.R_OK):
4777 return ret[0], ret[1]
4782 def aux_get(self, mycpv, mylist):
4783 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
4784 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4785 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
4786 global auxdbkeys,auxdbkeylen
4788 cat,pkg = string.split(mycpv, "/", 1)
4790 myebuild, mylocation=self.findname2(mycpv)
4793 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
4794 writemsg("!!! %s\n" % myebuild)
4795 raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
4797 myManifestPath = string.join(myebuild.split("/")[:-1],"/")+"/Manifest"
4798 if "gpg" in self.mysettings.features:
4800 mys = portage_gpg.fileStats(myManifestPath)
4801 if (myManifestPath in self.manifestCache) and \
4802 (self.manifestCache[myManifestPath] == mys):
4804 elif self.manifestVerifier:
4805 if not self.manifestVerifier.verify(myManifestPath):
4806 # Verification failed the desired level.
4807 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
4809 if ("severe" in self.mysettings.features) and \
4810 (mys != portage_gpg.fileStats(myManifestPath)):
4811 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
4813 except portage_exception.InvalidSignature, e:
4814 if ("strict" in self.mysettings.features) or \
4815 ("severe" in self.mysettings.features):
4817 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
4818 except portage_exception.MissingSignature, e:
4819 if ("severe" in self.mysettings.features):
4821 if ("strict" in self.mysettings.features):
4822 if myManifestPath not in self.manifestMissingCache:
4823 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
4824 self.manifestMissingCache.insert(0,myManifestPath)
4825 except (OSError,portage_exception.FileNotFound), e:
4826 if ("strict" in self.mysettings.features) or \
4827 ("severe" in self.mysettings.features):
4828 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
4829 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath})
4832 if os.access(myebuild, os.R_OK):
4833 emtime=os.stat(myebuild)[stat.ST_MTIME]
4835 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv})
4836 writemsg("!!! %s\n" % myebuild)
4840 mydata = self.auxdb[mylocation][mycpv]
4841 if emtime != long(mydata.get("_mtime_", 0)):
4843 elif len(mydata.get("_eclasses_", [])) > 0:
4844 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
4852 try: del self.auxdb[mylocation][mycpv]
4853 except KeyError: pass
4855 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
4858 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
4859 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
4862 mydbkey = self.tmpfs+"/aux_db_key_temp"
4864 mydbkey = self.depcachedir+"/aux_db_key_temp"
4866 # XXX: Part of the gvisible hack/fix to prevent deadlock
4867 # XXX: through doebuild. Need to isolate this somehow...
4868 self.mysettings.reset()
4871 raise "Lock is already held by me?"
4873 mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
4875 if os.path.exists(mydbkey):
4878 except (IOError, OSError), e:
4879 portage_locks.unlockfile(mylock)
4881 writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
4884 myret=doebuild(myebuild,"depend","/",self.mysettings,dbkey=mydbkey,tree="porttree")
4886 portage_locks.unlockfile(mylock)
4888 #depend returned non-zero exit code...
4889 writemsg(str(red("\naux_get():")+" (0) Error in "+mycpv+" ebuild. ("+str(myret)+")\n"
4890 " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
4894 mycent=open(mydbkey,"r")
4896 mylines=mycent.readlines()
4899 except (IOError, OSError):
4900 portage_locks.unlockfile(mylock)
4902 writemsg(str(red("\naux_get():")+" (1) Error in "+mycpv+" ebuild.\n"
4903 " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
4906 portage_locks.unlockfile(mylock)
4910 for x in range(0,len(mylines)):
4911 if mylines[x][-1] == '\n':
4912 mylines[x] = mylines[x][:-1]
4913 mydata[auxdbkeys[x]] = mylines[x]
4915 if "EAPI" not in mydata or not mydata["EAPI"].strip():
4916 mydata["EAPI"] = "0"
4918 if not eapi_is_supported(mydata["EAPI"]):
4919 # if newer version, wipe everything and negate eapi
4920 eapi = mydata["EAPI"]
4922 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
4923 mydata["EAPI"] = "-"+eapi
4925 if mydata.get("INHERITED", False):
4926 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
4928 mydata["_eclasses_"] = {}
4930 del mydata["INHERITED"]
4932 mydata["_mtime_"] = emtime
4934 self.auxdb[mylocation][mycpv] = mydata
4936 #finally, we look at our internal cache entry and return the requested data.
4939 if x == "INHERITED":
4940 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
4942 returnme.append(mydata.get(x,""))
4944 if "EAPI" in mylist:
4945 idx = mylist.index("EAPI")
4946 if not returnme[idx]:
4951 def getfetchlist(self,mypkg,useflags=None,mysettings=None,all=0):
4952 if mysettings == None:
4953 mysettings = self.mysettings
4955 myuris = self.aux_get(mypkg,["SRC_URI"])[0]
4956 except (IOError,KeyError):
4957 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
4960 if useflags is None:
4961 useflags = string.split(mysettings["USE"])
4963 myurilist = portage_dep.paren_reduce(myuris)
4964 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
4965 newuris = flatten(myurilist)
4969 mya = os.path.basename(x)
4970 if not mya in myfiles:
4972 return [newuris, myfiles]
4974 def getfetchsizes(self,mypkg,useflags=None,debug=0):
4975 # returns a filename:size dictionnary of remaining downloads
4976 mydigest=self.finddigest(mypkg)
4977 checksums=digestParseFile(mydigest)
4979 if debug: print "[empty/missing/bad digest]: "+mypkg
4982 if useflags == None:
4983 myuris, myfiles = self.getfetchlist(mypkg,all=1)
4985 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
4986 #XXX: maybe this should be improved: take partial downloads
4987 # into account? check checksums?
4988 for myfile in myfiles:
4989 if debug and myfile not in checksums.keys():
4990 print "[bad digest]: missing",myfile,"for",mypkg
4991 elif myfile in checksums.keys():
4992 distfile=settings["DISTDIR"]+"/"+myfile
4993 if not os.access(distfile, os.R_OK):
4994 filesdict[myfile]=int(checksums[myfile]["size"])
4997 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5000 useflags = mysettings["USE"].split()
5001 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5002 mydigest = self.finddigest(mypkg)
5003 mysums = digestParseFile(mydigest)
5007 if not mysums or x not in mysums:
5009 reason = "digest missing"
5011 ok,reason = portage_checksum.verify_all(self.mysettings["DISTDIR"]+"/"+x, mysums[x])
5013 failures[x] = reason
5018 def getsize(self,mypkg,useflags=None,debug=0):
5019 # returns the total size of remaining downloads
5021 # we use getfetchsizes() now, so this function would be obsoleted
5023 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5025 return "[empty/missing/bad digest]"
5027 for myfile in filesdict.keys():
5028 mysum+=filesdict[myfile]
5031 def cpv_exists(self,mykey):
5032 "Tells us whether an actual ebuild exists on disk (no masking)"
5033 cps2=mykey.split("/")
5034 cps=catpkgsplit(mykey,silent=0)
5038 if self.findname(cps[0]+"/"+cps2[1]):
5044 "returns a list of all keys in our tree"
5046 for x in self.mysettings.categories:
5047 for oroot in self.porttrees:
5048 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
5054 def p_list(self,mycp):
5056 for oroot in self.porttrees:
5057 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5058 if x[-7:]==".ebuild":
5062 def cp_list(self,mycp,use_cache=1):
5063 mysplit=mycp.split("/")
5065 for oroot in self.porttrees:
5066 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5067 if x[-7:]==".ebuild":
5068 d[mysplit[0]+"/"+x[:-7]] = None
5072 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
5080 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
5081 "caching match function; very trick stuff"
5082 #if no updates are being made to the tree, we can consult our xcache...
5085 return self.xcache[level][origdep]
5090 #this stuff only runs on first call of xmatch()
5091 #create mydep, mykey from origdep
5092 mydep=dep_expand(origdep,mydb=self)
5093 mykey=dep_getkey(mydep)
5095 if level=="list-visible":
5096 #a list of all visible packages, not called directly (just by xmatch())
5097 #myval=self.visible(self.cp_list(mykey))
5098 myval=self.gvisible(self.visible(self.cp_list(mykey)))
5099 elif level=="bestmatch-visible":
5100 #dep match -- best match of all visible packages
5101 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
5102 #get all visible matches (from xmatch()), then choose the best one
5103 elif level=="bestmatch-list":
5104 #dep match -- find best match but restrict search to sublist
5105 myval=best(match_from_list(mydep,mylist))
5106 #no point is calling xmatch again since we're not caching list deps
5107 elif level=="match-list":
5108 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
5109 myval=match_from_list(mydep,mylist)
5110 elif level=="match-visible":
5111 #dep match -- find all visible matches
5112 myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
5113 #get all visible packages, then get the matching ones
5114 elif level=="match-all":
5115 #match *all* visible *and* masked packages
5116 myval=match_from_list(mydep,self.cp_list(mykey))
5118 print "ERROR: xmatch doesn't handle",level,"query!"
5120 if self.frozen and (level not in ["match-list","bestmatch-list"]):
5121 self.xcache[level][mydep]=myval
5124 def match(self,mydep,use_cache=1):
5125 return self.xmatch("match-visible",mydep)
5127 def visible(self,mylist):
5128 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
5129 packages file to remove invisible entries, returning remaining items. This function assumes
5130 that all entries in mylist have the same category and package name."""
5131 if (mylist==None) or (len(mylist)==0):
5134 #first, we mask out packages in the package.mask file
5136 cpv=catpkgsplit(mykey)
5139 print "visible(): invalid cat/pkg-v:",mykey
5141 mycp=cpv[0]+"/"+cpv[1]
5142 maskdict=self.mysettings.pmaskdict
5143 unmaskdict=self.mysettings.punmaskdict
5144 if maskdict.has_key(mycp):
5145 for x in maskdict[mycp]:
5146 mymatches=self.xmatch("match-all",x)
5148 #error in package.mask file; print warning and continue:
5149 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
5153 if unmaskdict.has_key(mycp):
5154 for z in unmaskdict[mycp]:
5155 mymatches_unmask=self.xmatch("match-all",z)
5156 if y in mymatches_unmask:
5165 revmaskdict=self.mysettings.prevmaskdict
5166 if revmaskdict.has_key(mycp):
5167 for x in revmaskdict[mycp]:
5168 #important: only match against the still-unmasked entries...
5169 #notice how we pass "newlist" to the xmatch() call below....
5170 #Without this, ~ deps in the packages files are broken.
5171 mymatches=self.xmatch("match-list",x,mylist=newlist)
5173 #error in packages file; print warning and continue:
5174 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
5177 while pos<len(newlist):
5178 if newlist[pos] not in mymatches:
5184 def gvisible(self,mylist):
5185 "strip out group-masked (not in current group) entries"
5191 pkgdict = self.mysettings.pkeywordsdict
5192 for mycpv in mylist:
5193 #we need to update this next line when we have fully integrated the new db api
5196 keys, eapi = db["/"]["porttree"].dbapi.aux_get(mycpv, ["KEYWORDS", "EAPI"])
5197 except (KeyError,IOError,TypeError):
5201 #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
5203 mygroups=keys.split()
5206 cp = dep_getkey(mycpv)
5207 if pkgdict.has_key(cp):
5208 matches = match_to_list(mycpv, pkgdict[cp].keys())
5209 for atom in matches:
5210 pgroups.extend(pkgdict[cp][atom])
5215 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv)
5218 elif "-"+gp in pgroups:
5228 if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
5230 if match and eapi_is_supported(eapi):
5231 newlist.append(mycpv)
5234 class binarytree(packagetree):
5235 "this tree scans for a list of all packages available in PKGDIR"
5236 def __init__(self,root,pkgdir,virtual=None,clone=None):
5239 # XXX This isn't cloning. It's an instance of the same thing.
5240 self.root=clone.root
5241 self.pkgdir=clone.pkgdir
5242 self.dbapi=clone.dbapi
5243 self.populated=clone.populated
5244 self.tree=clone.tree
5245 self.remotepkgs=clone.remotepkgs
5246 self.invalids=clone.invalids
5249 #self.pkgdir=settings["PKGDIR"]
5251 self.dbapi=bindbapi(self)
5257 def move_ent(self,mylist):
5258 if not self.populated:
5263 for cp in [origcp,newcp]:
5264 if not (isvalidatom(cp) and isjustname(cp)):
5265 raise portage_exception.InvalidPackageName(cp)
5266 mynewcat=newcp.split("/")[0]
5267 origmatches=self.dbapi.cp_list(origcp)
5270 for mycpv in origmatches:
5272 mycpsplit=catpkgsplit(mycpv)
5273 mynewcpv=newcp+"-"+mycpsplit[2]
5274 if mycpsplit[3]!="r0":
5275 mynewcpv += "-"+mycpsplit[3]
5276 myoldpkg=mycpv.split("/")[1]
5277 mynewpkg=mynewcpv.split("/")[1]
5279 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
5280 writemsg("!!! Cannot update binary: Destination exists.\n")
5281 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n")
5284 tbz2path=self.getname(mycpv)
5285 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5286 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5289 #print ">>> Updating data in:",mycpv
5290 sys.stdout.write("%")
5292 mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5293 mytbz2=xpak.tbz2(tbz2path)
5294 mytbz2.decompose(mytmpdir, cleanup=1)
5296 fixdbentries(origcp, newcp, mytmpdir)
5298 write_atomic(os.path.join(mytmpdir, "CATEGORY"), mynewcat+"\n")
5300 os.rename(mytmpdir+"/"+string.split(mycpv,"/")[1]+".ebuild", mytmpdir+"/"+string.split(mynewcpv, "/")[1]+".ebuild")
5301 except SystemExit, e:
5303 except Exception, e:
5306 mytbz2.recompose(mytmpdir, cleanup=1)
5308 self.dbapi.cpv_remove(mycpv)
5309 if (mynewpkg != myoldpkg):
5310 os.rename(tbz2path,self.getname(mynewcpv))
5311 self.dbapi.cpv_inject(mynewcpv)
5314 def move_slot_ent(self,mylist,mytmpdir):
5315 #mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5316 mytmpdir=mytmpdir+"/tbz2"
5317 if not self.populated:
5323 if not isvalidatom(pkg):
5324 raise portage_exception.InvalidAtom(pkg)
5326 origmatches=self.dbapi.match(pkg)
5329 for mycpv in origmatches:
5330 mycpsplit=catpkgsplit(mycpv)
5331 myoldpkg=mycpv.split("/")[1]
5332 tbz2path=self.getname(mycpv)
5333 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5334 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5337 #print ">>> Updating data in:",mycpv
5338 mytbz2=xpak.tbz2(tbz2path)
5339 mytbz2.decompose(mytmpdir, cleanup=1)
5341 slot=grabfile(mytmpdir+"/SLOT");
5345 if (slot[0]!=origslot):
5348 sys.stdout.write("S")
5351 write_atomic(os.path.join(mytmpdir, "SLOT"), newslot+"\n")
5352 mytbz2.recompose(mytmpdir, cleanup=1)
5355 def update_ents(self,mybiglist,mytmpdir):
5356 #XXX mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5357 if not self.populated:
5359 for mycpv in self.dbapi.cp_all():
5360 tbz2path=self.getname(mycpv)
5361 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5362 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5364 #print ">>> Updating binary data:",mycpv
5366 mytbz2=xpak.tbz2(tbz2path)
5367 mytbz2.decompose(mytmpdir,cleanup=1)
5368 for mylist in mybiglist:
5369 mylist=string.split(mylist)
5370 if mylist[0] != "move":
5372 fixdbentries(mylist[1], mylist[2], mytmpdir)
5373 mytbz2.recompose(mytmpdir,cleanup=1)
5376 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
5377 "populates the binarytree"
5378 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
5380 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
5383 if (not getbinpkgsonly) and os.path.exists(self.pkgdir+"/All"):
5384 for mypkg in listdir(self.pkgdir+"/All"):
5385 if mypkg[-5:]!=".tbz2":
5387 mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
5388 mycat=mytbz2.getfile("CATEGORY")
5390 #old-style or corrupt package
5391 writemsg("!!! Invalid binary package: "+mypkg+"\n")
5392 self.invalids.append(mypkg)
5394 mycat=string.strip(mycat)
5395 fullpkg=mycat+"/"+mypkg[:-5]
5396 mykey=dep_getkey(fullpkg)
5398 # invalid tbz2's can hurt things.
5399 self.dbapi.cpv_inject(fullpkg)
5400 except SystemExit, e:
5405 if getbinpkgs and not settings["PORTAGE_BINHOST"]:
5406 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"))
5408 if getbinpkgs and settings["PORTAGE_BINHOST"] and not self.remotepkgs:
5410 chunk_size = long(settings["PORTAGE_BINHOST_CHUNKSIZE"])
5413 except SystemExit, e:
5418 writemsg(green("Fetching binary packages info...\n"))
5419 self.remotepkgs = getbinpkg.dir_get_metadata(settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
5420 writemsg(green(" -- DONE!\n\n"))
5422 for mypkg in self.remotepkgs.keys():
5423 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
5424 #old-style or corrupt package
5425 writemsg("!!! Invalid remote binary package: "+mypkg+"\n")
5426 del self.remotepkgs[mypkg]
5428 mycat=string.strip(self.remotepkgs[mypkg]["CATEGORY"])
5429 fullpkg=mycat+"/"+mypkg[:-5]
5430 mykey=dep_getkey(fullpkg)
5432 # invalid tbz2's can hurt things.
5433 #print "cpv_inject("+str(fullpkg)+")"
5434 self.dbapi.cpv_inject(fullpkg)
5435 #print " -- Injected"
5436 except SystemExit, e:
5439 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n")
5440 del self.remotepkgs[mypkg]
5444 def inject(self,cpv):
5445 return self.dbapi.cpv_inject(cpv)
5447 def exists_specific(self,cpv):
5448 if not self.populated:
5450 return self.dbapi.match(dep_expand("="+cpv,mydb=self.dbapi))
5452 def dep_bestmatch(self,mydep):
5453 "compatibility method -- all matches, not just visible ones"
5454 if not self.populated:
5457 writemsg("mydep: %s\n" % mydep, 1)
5458 mydep=dep_expand(mydep,mydb=self.dbapi)
5459 writemsg("mydep: %s\n" % mydep, 1)
5460 mykey=dep_getkey(mydep)
5461 writemsg("mykey: %s\n" % mykey, 1)
5462 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
5463 writemsg("mymatch: %s\n" % mymatch, 1)
5468 def getname(self,pkgname):
5469 "returns file location for this particular package"
5470 mysplit=string.split(pkgname,"/")
5472 return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
5474 return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
5476 def isremote(self,pkgname):
5477 "Returns true if the package is kept remotely."
5478 mysplit=string.split(pkgname,"/")
5479 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
5482 def get_use(self,pkgname):
5483 mysplit=string.split(pkgname,"/")
5484 if self.isremote(pkgname):
5485 return string.split(self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:])
5486 tbz2=xpak.tbz2(self.getname(pkgname))
5487 return string.split(tbz2.getfile("USE"))
5489 def gettbz2(self,pkgname):
5490 "fetches the package from a remote site, if necessary."
5491 print "Fetching '"+str(pkgname)+"'"
5492 mysplit = string.split(pkgname,"/")
5493 tbz2name = mysplit[1]+".tbz2"
5494 if not self.isremote(pkgname):
5495 if (tbz2name not in self.invalids):
5498 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n")
5499 mydest = self.pkgdir+"/All/"
5501 os.makedirs(mydest, 0775)
5502 except SystemExit, e:
5506 return getbinpkg.file_get(settings["PORTAGE_BINHOST"]+"/"+tbz2name, mydest, fcmd=settings["RESUMECOMMAND"])
5508 def getslot(self,mycatpkg):
5509 "Get a slot for a catpkg; assume it exists."
5512 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
5513 except SystemExit, e:
5515 except Exception, e:
5520 "this class provides an interface to the standard text package database"
5521 def __init__(self,cat,pkg,myroot,mysettings,treetype=None):
5522 "create a dblink object for cat/pkg. This dblink entry may or may not exist"
5525 self.mycpv = self.cat+"/"+self.pkg
5526 self.mysplit = pkgsplit(self.mycpv)
5527 self.treetype = treetype
5529 self.dbroot = os.path.normpath(myroot+VDB_PATH)
5530 self.dbcatdir = self.dbroot+"/"+cat
5531 self.dbpkgdir = self.dbcatdir+"/"+pkg
5532 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
5533 self.dbdir = self.dbpkgdir
5535 self.lock_pkg = None
5536 self.lock_tmp = None
5537 self.lock_num = 0 # Count of the held locks on the db.
5539 self.settings = mysettings
5540 if self.settings==1:
5544 self.updateprotect()
5545 self.contentscache=[]
5548 if self.lock_num == 0:
5549 self.lock_pkg = portage_locks.lockdir(self.dbpkgdir)
5550 self.lock_tmp = portage_locks.lockdir(self.dbtmpdir)
5555 if self.lock_num == 0:
5556 portage_locks.unlockdir(self.lock_tmp)
5557 portage_locks.unlockdir(self.lock_pkg)
5560 "return path to location of db information (for >>> informational display)"
5564 "does the db entry exist? boolean."
5565 return os.path.exists(self.dbdir)
5568 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
5569 # XXXXX Delete this eventually
5570 raise Exception, "This is bad. Don't use it."
5571 if not os.path.exists(self.dbdir):
5572 os.makedirs(self.dbdir)
5575 "erase this db entry completely"
5576 if not os.path.exists(self.dbdir):
5579 for x in listdir(self.dbdir):
5580 os.unlink(self.dbdir+"/"+x)
5581 os.rmdir(self.dbdir)
5583 print "!!! Unable to remove db entry for this package."
5584 print "!!! It is possible that a directory is in this one. Portage will still"
5585 print "!!! register this package as installed as long as this directory exists."
5586 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
5591 def clearcontents(self):
5592 if os.path.exists(self.dbdir+"/CONTENTS"):
5593 os.unlink(self.dbdir+"/CONTENTS")
5595 def getcontents(self):
5596 if not os.path.exists(self.dbdir+"/CONTENTS"):
5598 if self.contentscache != []:
5599 return self.contentscache
5601 myc=open(self.dbdir+"/CONTENTS","r")
5602 mylines=myc.readlines()
5605 for line in mylines:
5606 mydat = string.split(line)
5607 # we do this so we can remove from non-root filesystems
5608 # (use the ROOT var to allow maintenance on other partitions)
5610 mydat[1]=os.path.normpath(root+mydat[1][1:])
5612 #format: type, mtime, md5sum
5613 pkgfiles[string.join(mydat[1:-2]," ")]=[mydat[0], mydat[-1], mydat[-2]]
5614 elif mydat[0]=="dir":
5616 pkgfiles[string.join(mydat[1:])]=[mydat[0] ]
5617 elif mydat[0]=="sym":
5618 #format: type, mtime, dest
5620 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
5621 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
5622 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
5632 pkgfiles[string.join(mydat[1:splitter]," ")]=[mydat[0], mydat[-1], string.join(mydat[(splitter+1):-1]," ")]
5633 elif mydat[0]=="dev":
5635 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0] ]
5636 elif mydat[0]=="fif":
5638 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0]]
5641 except (KeyError,IndexError):
5642 print "portage: CONTENTS line",pos,"corrupt!"
5644 self.contentscache=pkgfiles
5647 def updateprotect(self):
5648 #do some config file management prep
5650 for x in string.split(self.settings["CONFIG_PROTECT"]):
5651 ppath=normalize_path(self.myroot+x)+"/"
5652 if os.path.isdir(ppath):
5653 self.protect.append(ppath)
5656 for x in string.split(self.settings["CONFIG_PROTECT_MASK"]):
5657 ppath=normalize_path(self.myroot+x)+"/"
5658 if os.path.isdir(ppath):
5659 self.protectmask.append(ppath)
5660 #if it doesn't exist, silently skip it
5662 def isprotected(self,obj):
5663 """Checks if obj is in the current protect/mask directories. Returns
5664 0 on unprotected/masked, and 1 on protected."""
5667 for ppath in self.protect:
5668 if (len(ppath) > masked) and (obj[0:len(ppath)]==ppath):
5669 protected=len(ppath)
5670 #config file management
5671 for pmpath in self.protectmask:
5672 if (len(pmpath) >= protected) and (obj[0:len(pmpath)]==pmpath):
5673 #skip, it's in the mask
5675 return (protected > masked)
5677 def unmerge(self,pkgfiles=None,trimworld=1,cleanup=0):
5683 self.settings.load_infodir(self.dbdir)
5686 print "No package files given... Grabbing a set."
5687 pkgfiles=self.getcontents()
5689 # Now, don't assume that the name of the ebuild is the same as the
5690 # name of the dir; the package may have been moved.
5693 # We should use the environement file if possible,
5694 # as it has all sourced files already included.
5695 # XXX: Need to ensure it doesn't overwrite any important vars though.
5696 if os.access(self.dbdir+"/environment.bz2", os.R_OK):
5697 spawn("bzip2 -d "+self.dbdir+"/environment.bz2",self.settings,free=1)
5699 if not myebuildpath:
5700 mystuff=listdir(self.dbdir,EmptyOnError=1)
5702 if x[-7:]==".ebuild":
5703 myebuildpath=self.dbdir+"/"+x
5707 if myebuildpath and os.path.exists(myebuildpath):
5708 a=doebuild(myebuildpath,"prerm",self.myroot,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5709 # XXX: Decide how to handle failures here.
5711 writemsg("!!! FAILED prerm: "+str(a)+"\n")
5715 mykeys=pkgfiles.keys()
5719 self.updateprotect()
5721 #process symlinks second-to-last, directories last.
5723 modprotect="/lib/modules/"
5724 for objkey in mykeys:
5725 obj=os.path.normpath(objkey)
5730 statobj = os.stat(obj)
5735 lstatobj = os.lstat(obj)
5736 except (OSError, AttributeError):
5738 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
5741 #we skip this if we're dealing with a symlink
5742 #because os.stat() will operate on the
5743 #link target rather than the link itself.
5744 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
5746 # next line includes a tweak to protect modules from being unmerged,
5747 # but we don't protect modules from being overwritten if they are
5748 # upgraded. We effectively only want one half of the config protection
5749 # functionality for /lib/modules. For portage-ng both capabilities
5750 # should be able to be independently specified.
5751 if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)):
5752 writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
5755 lmtime=str(lstatobj[stat.ST_MTIME])
5756 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
5757 writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
5760 if pkgfiles[objkey][0]=="dir":
5761 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
5762 writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
5765 elif pkgfiles[objkey][0]=="sym":
5767 writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
5771 writemsg_stdout("<<< %s %s\n" % ("sym",obj))
5772 except (OSError,IOError),e:
5773 writemsg_stdout("!!! %s %s\n" % ("sym",obj))
5774 elif pkgfiles[objkey][0]=="obj":
5775 if statobj is None or not stat.S_ISREG(statobj.st_mode):
5776 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
5780 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
5781 except portage_exception.FileNotFound, e:
5782 # the file has disappeared between now and our stat call
5783 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
5786 # string.lower is needed because db entries used to be in upper-case. The
5787 # string.lower allows for backwards compatibility.
5788 if mymd5 != string.lower(pkgfiles[objkey][2]):
5789 writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
5793 except (OSError,IOError),e:
5795 writemsg_stdout("<<< %s %s\n" % ("obj",obj))
5796 elif pkgfiles[objkey][0]=="fif":
5797 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
5798 writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
5802 except (OSError,IOError),e:
5804 writemsg_stdout("<<< %s %s\n" % ("fif",obj))
5805 elif pkgfiles[objkey][0]=="dev":
5806 writemsg_stdout("--- %s %s\n" % ("dev",obj))
5813 if not last_non_empty.startswith(obj) and not listdir(obj):
5816 writemsg_stdout("<<< %s %s\n" % ("dir",obj))
5819 except (OSError,IOError),e:
5823 writemsg_stdout("--- !empty dir %s\n" % obj)
5824 last_non_empty = obj
5827 #remove self from vartree database so that our own virtual gets zapped if we're the last node
5828 db[self.myroot]["vartree"].zap(self.mycpv)
5830 # New code to remove stuff from the world and virtuals files when unmerged.
5832 worldlist=grabfile(self.myroot+WORLD_FILE)
5833 mykey=cpv_getkey(self.mycpv)
5836 if dep_getkey(x)==mykey:
5837 matches=db[self.myroot]["vartree"].dbapi.match(x,use_cache=0)
5839 #zap our world entry
5841 elif (len(matches)==1) and (matches[0]==self.mycpv):
5842 #zap our world entry
5845 #others are around; keep it.
5846 newworldlist.append(x)
5848 #this doesn't match the package we're unmerging; keep it.
5849 newworldlist.append(x)
5851 # if the base dir doesn't exist, create it.
5852 # (spanky noticed bug)
5853 # XXX: dumb question, but abstracting the root uid might be wise/useful for
5854 # 2nd pkg manager installation setups.
5855 if not os.path.exists(os.path.dirname(self.myroot+WORLD_FILE)):
5856 pdir = os.path.dirname(self.myroot + WORLD_FILE)
5857 os.makedirs(pdir, mode=0755)
5858 os.chown(pdir, 0, portage_gid)
5859 os.chmod(pdir, 02770)
5861 write_atomic(os.path.join(self.myroot,WORLD_FILE),"\n".join(newworldlist))
5864 if myebuildpath and os.path.exists(myebuildpath):
5865 # XXX: This should be the old config, not the current one.
5866 # XXX: Use vardbapi to load up env vars.
5867 a=doebuild(myebuildpath,"postrm",self.myroot,self.settings,use_cache=0,tree=self.treetype)
5868 # XXX: Decide how to handle failures here.
5870 writemsg("!!! FAILED postrm: "+str(a)+"\n")
5875 def isowner(self,filename,destroot):
5876 """ check if filename is a new file or belongs to this package
5877 (for this or a previous version)"""
5878 destfile = os.path.normpath(destroot+"/"+filename)
5879 if not os.path.exists(destfile):
5881 if self.getcontents() and filename in self.getcontents().keys():
5886 def treewalk(self,srcroot,destroot,inforoot,myebuild,cleanup=0):
5889 # destroot = where to merge, ie. ${ROOT},
5890 # inforoot = root of db entry,
5891 # secondhand = list of symlinks that have been skipped due to
5892 # their target not existing (will merge later),
5894 if not os.path.exists(self.dbcatdir):
5895 os.makedirs(self.dbcatdir)
5897 # This blocks until we can get the dirs to ourselves.
5901 for v in db[self.myroot]["vartree"].dbapi.cp_list(self.mysplit[0]):
5902 otherversions.append(v.split("/")[1])
5904 # check for package collisions
5905 if "collision-protect" in features:
5906 myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
5908 # the linkcheck only works if we are in srcroot
5911 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
5912 myfilelist.extend(mysymlinks)
5915 starttime=time.time()
5921 if self.pkg in otherversions:
5922 otherversions.remove(self.pkg) # we already checked this package
5924 for v in otherversions:
5925 # should we check for same SLOT here ?
5926 mypkglist.append(dblink(self.cat,v,destroot,self.settings))
5928 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
5929 for f in myfilelist:
5931 # listdir isn't intelligent enough to exclude symlinked dirs,
5932 # so we have to do it ourself
5933 for s in mysymlinks:
5934 # the length comparison makes sure that the symlink itself is checked
5935 if f[:len(s)] == s and len(f) > len(s):
5941 print str(i)+" files checked ..."
5945 for ver in [self]+mypkglist:
5946 if (ver.isowner(f, destroot) or ver.isprotected(f)):
5950 print "existing file "+f+" is not owned by this package"
5952 print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
5954 print red("*")+" This package is blocked because it wants to overwrite"
5955 print red("*")+" files belonging to other packages (see messages above)."
5956 print red("*")+" If you have no clue what this is all about report it "
5957 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
5959 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
5961 # Why is the package already merged here db-wise? Shouldn't be the case
5962 # only unmerge if it ia new package and has no contents
5963 if not self.getcontents():
5970 except SystemExit, e:
5976 # get old contents info for later unmerging
5977 oldcontents = self.getcontents()
5979 self.dbdir = self.dbtmpdir
5981 if not os.path.exists(self.dbtmpdir):
5982 os.makedirs(self.dbtmpdir)
5984 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
5986 # run preinst script
5988 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
5990 a=doebuild(myebuild,"preinst",root,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5992 a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5994 # XXX: Decide how to handle failures here.
5996 writemsg("!!! FAILED preinst: "+str(a)+"\n")
5999 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
6000 for x in listdir(inforoot):
6001 self.copyfile(inforoot+"/"+x)
6003 # get current counter value (counter_tick also takes care of incrementing it)
6004 # XXX Need to make this destroot, but it needs to be initialized first. XXX
6005 # XXX bis: leads to some invalidentry() call through cp_all().
6006 counter = db["/"]["vartree"].dbapi.counter_tick(self.myroot,mycpv=self.mycpv)
6007 # write local package counter for recording
6008 lcfile = open(self.dbtmpdir+"/COUNTER","w")
6009 lcfile.write(str(counter))
6012 # open CONTENTS file (possibly overwriting old one) for recording
6013 outfile=open(self.dbtmpdir+"/CONTENTS","w")
6015 self.updateprotect()
6017 #if we have a file containing previously-merged config file md5sums, grab it.
6018 if os.path.exists(destroot+CONFIG_MEMORY_FILE):
6019 cfgfiledict=grabdict(destroot+CONFIG_MEMORY_FILE)
6022 if self.settings.has_key("NOCONFMEM"):
6023 cfgfiledict["IGNORE"]=1
6025 cfgfiledict["IGNORE"]=0
6027 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
6028 mymtime = long(time.time())
6029 prevmask = os.umask(0)
6032 # we do a first merge; this will recurse through all files in our srcroot but also build up a
6033 # "second hand" of symlinks to merge later
6034 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
6037 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
6038 # broken symlinks. We'll merge them too.
6040 while len(secondhand) and len(secondhand)!=lastlen:
6041 # clear the thirdhand. Anything from our second hand that
6042 # couldn't get merged will be added to thirdhand.
6045 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
6048 lastlen=len(secondhand)
6050 # our thirdhand now becomes our secondhand. It's ok to throw
6051 # away secondhand since thirdhand contains all the stuff that
6052 # couldn't be merged.
6053 secondhand = thirdhand
6056 # force merge of remaining symlinks (broken or circular; oh well)
6057 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
6062 #if we opened it, close it
6066 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
6067 self.dbdir = self.dbpkgdir
6068 self.unmerge(oldcontents,trimworld=0)
6069 self.dbdir = self.dbtmpdir
6070 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
6072 # We hold both directory locks.
6073 self.dbdir = self.dbpkgdir
6075 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
6079 #write out our collection of md5sums
6080 if cfgfiledict.has_key("IGNORE"):
6081 del cfgfiledict["IGNORE"]
6083 # XXXX: HACK! PathSpec is very necessary here.
6084 if not os.path.exists(destroot+PRIVATE_PATH):
6085 os.makedirs(destroot+PRIVATE_PATH)
6086 os.chown(destroot+PRIVATE_PATH,os.getuid(),portage_gid)
6087 os.chmod(destroot+PRIVATE_PATH,02770)
6088 dirlist = prefix_array(listdir(destroot+PRIVATE_PATH),destroot+PRIVATE_PATH+"/")
6091 dirlist.reverse() # Gets them in file-before basedir order
6093 if os.path.isdir(x):
6094 dirlist += prefix_array(listdir(x),x+"/")
6096 os.unlink(destroot+PRIVATE_PATH+"/"+x)
6098 mylock = portage_locks.lockfile(destroot+CONFIG_MEMORY_FILE)
6099 writedict(cfgfiledict,destroot+CONFIG_MEMORY_FILE)
6100 portage_locks.unlockfile(mylock)
6104 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
6106 a=doebuild(myebuild,"postinst",root,self.settings,use_cache=0,tree=self.treetype)
6108 a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root,self.settings,use_cache=0,tree=self.treetype)
6110 # XXX: Decide how to handle failures here.
6112 writemsg("!!! FAILED postinst: "+str(a)+"\n")
6116 for v in otherversions:
6117 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
6120 #update environment settings, library paths. DO NOT change symlinks.
6121 env_update(makelinks=(not downgrade))
6122 #dircache may break autoclean because it remembers the -MERGING-pkg file
6124 if dircache.has_key(self.dbcatdir):
6125 del dircache[self.dbcatdir]
6126 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
6128 # Process ebuild logfiles
6129 elog_process(self.mycpv, self.settings)
6133 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
6134 srcroot=os.path.normpath("///"+srcroot)+"/"
6135 destroot=os.path.normpath("///"+destroot)+"/"
6136 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
6137 if type(stufftomerge)==types.StringType:
6138 #A directory is specified. Figure out protection paths, listdir() it and process it.
6139 mergelist=listdir(srcroot+stufftomerge)
6141 # We need mydest defined up here to calc. protection paths. This is now done once per
6142 # directory rather than once per file merge. This should really help merge performance.
6143 # Trailing / ensures that protects/masks with trailing /'s match.
6144 mytruncpath="/"+offset+"/"
6145 myppath=self.isprotected(mytruncpath)
6147 mergelist=stufftomerge
6150 mysrc=os.path.normpath("///"+srcroot+offset+x)
6151 mydest=os.path.normpath("///"+destroot+offset+x)
6152 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
6153 myrealdest="/"+offset+x
6154 # stat file once, test using S_* macros many times (faster that way)
6156 mystat=os.lstat(mysrc)
6157 except SystemExit, e:
6161 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
6162 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
6163 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
6164 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
6165 writemsg(red("!!! File: ")+str(mysrc)+"\n")
6166 writemsg(red("!!! Error: ")+str(e)+"\n")
6168 except Exception, e:
6170 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
6171 writemsg(red("!!! A stat call returned the following error for the following file:"))
6172 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
6173 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
6174 writemsg( "!!! File: "+str(mysrc)+"\n")
6175 writemsg( "!!! Error: "+str(e)+"\n")
6179 mymode=mystat[stat.ST_MODE]
6180 # handy variables; mydest is the target object on the live filesystems;
6181 # mysrc is the source object in the temporary install dir
6183 mydmode=os.lstat(mydest)[stat.ST_MODE]
6184 except SystemExit, e:
6187 #dest file doesn't exist
6190 if stat.S_ISLNK(mymode):
6191 # we are merging a symbolic link
6192 myabsto=abssymlink(mysrc)
6193 if myabsto[0:len(srcroot)]==srcroot:
6194 myabsto=myabsto[len(srcroot):]
6197 myto=os.readlink(mysrc)
6198 if self.settings and self.settings["D"]:
6199 if myto.find(self.settings["D"])==0:
6200 myto=myto[len(self.settings["D"]):]
6201 # myrealto contains the path of the real file to which this symlink points.
6202 # we can simply test for existence of this file to see if the target has been merged yet
6203 myrealto=os.path.normpath(os.path.join(destroot,myabsto))
6206 if not stat.S_ISLNK(mydmode):
6207 if stat.S_ISDIR(mydmode):
6208 # directory in the way: we can't merge a symlink over a directory
6209 # we won't merge this, continue with next file...
6211 srctarget = os.path.normpath(os.path.dirname(mysrc)+"/"+myto)
6212 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
6213 # Kill file blocking installation of symlink to dir #71787
6215 elif self.isprotected(mydest):
6216 # Use md5 of the target in ${D} if it exists...
6217 if os.path.exists(os.path.normpath(srcroot+myabsto)):
6218 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(srcroot+myabsto))
6220 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(myabsto))
6222 # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
6223 if (secondhand!=None) and (not os.path.exists(myrealto)):
6224 # either the target directory doesn't exist yet or the target file doesn't exist -- or
6225 # the target is a broken symlink. We will add this file to our "second hand" and merge
6227 secondhand.append(mysrc[len(srcroot):])
6229 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
6230 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
6232 print ">>>",mydest,"->",myto
6233 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
6235 print "!!! Failed to move file."
6236 print "!!!",mydest,"->",myto
6238 elif stat.S_ISDIR(mymode):
6239 # we are merging a directory
6241 # destination exists
6244 # Save then clear flags on dest.
6245 dflags=bsd_chflags.lgetflags(mydest)
6246 if(bsd_chflags.lchflags(mydest, 0)<0):
6247 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n")
6249 if not os.access(mydest, os.W_OK):
6250 pkgstuff = pkgsplit(self.pkg)
6251 writemsg("\n!!! Cannot write to '"+mydest+"'.\n")
6252 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
6253 writemsg("!!! You may start the merge process again by using ebuild:\n")
6254 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
6255 writemsg("!!! And finish by running this: env-update\n\n")
6258 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
6259 # a symlink to an existing directory will work for us; keep it:
6260 writemsg_stdout("--- %s/\n" % mydest)
6262 bsd_chflags.lchflags(mydest, dflags)
6264 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
6265 if movefile(mydest,mydest+".backup", mysettings=self.settings) == None:
6267 print "bak",mydest,mydest+".backup"
6268 #now create our directory
6270 sid = selinux.get_sid(mysrc)
6271 selinux.secure_mkdir(mydest,sid)
6275 bsd_chflags.lchflags(mydest, dflags)
6276 os.chmod(mydest,mystat[0])
6277 os.chown(mydest,mystat[4],mystat[5])
6278 writemsg_stdout(">>> %s/\n" % mydest)
6280 #destination doesn't exist
6282 sid = selinux.get_sid(mysrc)
6283 selinux.secure_mkdir(mydest,sid)
6286 os.chmod(mydest,mystat[0])
6288 bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc))
6289 os.chown(mydest,mystat[4],mystat[5])
6290 writemsg_stdout(">>> %s/\n" % mydest)
6291 outfile.write("dir "+myrealdest+"\n")
6292 # recurse and merge this directory
6293 if self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime):
6295 elif stat.S_ISREG(mymode):
6296 # we are merging a regular file
6297 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
6298 # calculate config file protection stuff
6299 mydestdir=os.path.dirname(mydest)
6303 # destination file exists
6304 if stat.S_ISDIR(mydmode):
6305 # install of destination is blocked by an existing directory with the same name
6307 writemsg_stdout("!!! %s\n" % mydest)
6308 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
6310 # install of destination is blocked by an existing regular file,
6311 # or by a symlink to an existing regular file;
6312 # now, config file management may come into play.
6313 # we only need to tweak mydest if cfg file management is in play.
6315 # we have a protection path; enable config file management.
6316 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
6318 if cfgfiledict.has_key(myrealdest):
6319 if destmd5 in cfgfiledict[myrealdest]:
6322 del cfgfiledict[myrealdest]
6325 #file already in place; simply update mtimes of destination
6326 os.utime(mydest,(thismtime,thismtime))
6330 #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
6332 cfgfiledict[myrealdest]=[mymd5]
6334 elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
6335 #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
6337 moveme=cfgfiledict["IGNORE"]
6338 cfgprot=cfgfiledict["IGNORE"]
6340 #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
6343 if not cfgfiledict.has_key(myrealdest):
6344 cfgfiledict[myrealdest]=[]
6345 if mymd5 not in cfgfiledict[myrealdest]:
6346 cfgfiledict[myrealdest].append(mymd5)
6347 # only record the last md5
6348 if len(cfgfiledict[myrealdest])>1:
6349 del cfgfiledict[myrealdest][0]
6352 mydest = new_protect_filename(myrealdest, newmd5=mymd5)
6354 # whether config protection or not, we merge the new file the
6355 # same way. Unless moveme=0 (blocking directory)
6357 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
6363 # We need to touch the destination so that on --update the
6364 # old package won't yank the file with it. (non-cfgprot related)
6365 os.utime(myrealdest,(thismtime,thismtime))
6367 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
6369 # XXX kludge, can be killed when portage stops relying on
6370 # md5+mtime, and uses refcounts
6371 # alright, we've fooled w/ mtime on the file; this pisses off static archives
6372 # basically internal mtime != file's mtime, so the linker (falsely) thinks
6373 # the archive is stale, and needs to have it's toc rebuilt.
6375 myf=open(myrealdest,"r+")
6377 # ar mtime field is digits padded with spaces, 12 bytes.
6378 lms=str(thismtime+5).ljust(12)
6381 if magic != "!<arch>\n":
6382 # not an archive (dolib.a from portage.py makes it here fex)
6385 st=os.stat(myrealdest)
6386 while myf.tell() < st.st_size - 12:
6393 # skip uid/gid/mperm
6396 # read the archive member's size
6397 x=long(myf.read(10))
6399 # skip the trailing newlines, and add the potential
6400 # extra padding byte if it's not an even size
6401 myf.seek(x + 2 + (x % 2),1)
6403 # and now we're at the end. yay.
6405 mymd5=portage_checksum.perform_md5(myrealdest,calc_prelink=1)
6406 os.utime(myrealdest,(thismtime,thismtime))
6410 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
6411 writemsg_stdout("%s %s\n" % (zing,mydest))
6413 # we are merging a fifo or device node
6416 # destination doesn't exist
6417 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
6419 if stat.S_ISFIFO(mymode):
6420 # we don't record device nodes in CONTENTS,
6421 # although we do merge them.
6422 outfile.write("fif "+myrealdest+"\n")
6425 writemsg_stdout(zing+" "+mydest+"\n")
6427 def merge(self,mergeroot,inforoot,myroot,myebuild=None,cleanup=0):
6428 return self.treewalk(mergeroot,myroot,inforoot,myebuild,cleanup=cleanup)
6430 def getstring(self,name):
6431 "returns contents of a file with whitespace converted to spaces"
6432 if not os.path.exists(self.dbdir+"/"+name):
6434 myfile=open(self.dbdir+"/"+name,"r")
6435 mydata=string.split(myfile.read())
6437 return string.join(mydata," ")
6439 def copyfile(self,fname):
6440 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
6442 def getfile(self,fname):
6443 if not os.path.exists(self.dbdir+"/"+fname):
6445 myfile=open(self.dbdir+"/"+fname,"r")
6446 mydata=myfile.read()
6450 def setfile(self,fname,data):
6451 myfile=open(self.dbdir+"/"+fname,"w")
6455 def getelements(self,ename):
6456 if not os.path.exists(self.dbdir+"/"+ename):
6458 myelement=open(self.dbdir+"/"+ename,"r")
6459 mylines=myelement.readlines()
6462 for y in string.split(x[:-1]):
6467 def setelements(self,mylist,ename):
6468 myelement=open(self.dbdir+"/"+ename,"w")
6470 myelement.write(x+"\n")
6473 def isregular(self):
6474 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
6475 return os.path.exists(self.dbdir+"/CATEGORY")
6477 def cleanup_pkgmerge(mypkg,origdir):
6478 shutil.rmtree(settings["PORTAGE_TMPDIR"]+"/portage-pkg/"+mypkg)
6479 if os.path.exists(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment"):
6480 os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment")
6483 def pkgmerge(mytbz2,myroot,mysettings):
6484 """will merge a .tbz2 file, returning a list of runtime dependencies
6485 that must be satisfied, or None if there was a merge error. This
6486 code assumes the package exists."""
6487 if mytbz2[-5:]!=".tbz2":
6488 print "!!! Not a .tbz2 file"
6490 mypkg=os.path.basename(mytbz2)[:-5]
6491 xptbz2=xpak.tbz2(mytbz2)
6493 mycat=xptbz2.getfile("CATEGORY")
6495 print "!!! CATEGORY info missing from info chunk, aborting..."
6498 mycatpkg=mycat+"/"+mypkg
6499 tmploc=mysettings["PORTAGE_TMPDIR"]+"/portage-pkg/"
6500 pkgloc=tmploc+"/"+mypkg+"/bin/"
6501 infloc=tmploc+"/"+mypkg+"/inf/"
6502 myebuild=tmploc+"/"+mypkg+"/inf/"+os.path.basename(mytbz2)[:-4]+"ebuild"
6503 if os.path.exists(tmploc+"/"+mypkg):
6504 shutil.rmtree(tmploc+"/"+mypkg,1)
6507 writemsg_stdout(">>> Extracting info\n")
6508 xptbz2.unpackinfo(infloc)
6509 # run pkg_setup early, so we can bail out early
6510 # (before extracting binaries) if there's a problem
6514 mysettings.configdict["pkg"]["CATEGORY"] = mycat;
6515 a=doebuild(myebuild,"setup",myroot,mysettings,tree="bintree")
6516 writemsg_stdout(">>> Extracting %s\n" % mypkg)
6517 notok=spawn("bzip2 -dqc -- '"+mytbz2+"' | tar xpf -",mysettings,free=1)
6519 print "!!! Error Extracting",mytbz2
6520 cleanup_pkgmerge(mypkg,origdir)
6523 # the merge takes care of pre/postinst and old instance
6524 # auto-unmerge, virtual/provides updates, etc.
6525 mysettings.load_infodir(infloc)
6526 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype="bintree")
6527 mylink.merge(pkgloc,infloc,myroot,myebuild,cleanup=1)
6529 if not os.path.exists(infloc+"/RDEPEND"):
6532 #get runtime dependencies
6533 a=open(infloc+"/RDEPEND","r")
6534 returnme=string.join(string.split(a.read())," ")
6536 cleanup_pkgmerge(mypkg,origdir)
6540 if os.environ.has_key("ROOT"):
6541 root=os.environ["ROOT"]
6549 if not os.path.exists(root[:-1]):
6550 writemsg("!!! Error: ROOT "+root+" does not exist. Please correct this.\n")
6551 writemsg("!!! Exiting.\n\n")
6553 elif not os.path.isdir(root[:-1]):
6554 writemsg("!!! Error: ROOT "+root[:-1]+" is not a directory. Please correct this.\n")
6555 writemsg("!!! Exiting.\n\n")
6558 #create tmp and var/tmp if they don't exist; read config
6560 if not os.path.exists(root+"tmp"):
6561 writemsg(">>> "+root+"tmp doesn't exist, creating it...\n")
6562 os.mkdir(root+"tmp",01777)
6563 if not os.path.exists(root+"var/tmp"):
6564 writemsg(">>> "+root+"var/tmp doesn't exist, creating it...\n")
6566 os.mkdir(root+"var",0755)
6567 except (OSError,IOError):
6570 os.mkdir(root+"var/tmp",01777)
6571 except SystemExit, e:
6574 writemsg("portage: couldn't create /var/tmp; exiting.\n")
6576 if not os.path.exists(root+"var/lib/portage"):
6577 writemsg(">>> "+root+"var/lib/portage doesn't exist, creating it...\n")
6579 os.mkdir(root+"var",0755)
6580 except (OSError,IOError):
6583 os.mkdir(root+"var/lib",0755)
6584 except (OSError,IOError):
6587 os.mkdir(root+"var/lib/portage",02750)
6588 except SystemExit, e:
6591 writemsg("portage: couldn't create /var/lib/portage; exiting.\n")
6595 #####################################
6596 # Deprecation Checks
6600 if os.path.isdir(PROFILE_PATH):
6601 profiledir = PROFILE_PATH
6602 if "PORTAGE_CALLER" in os.environ and os.environ["PORTAGE_CALLER"] == "emerge" and os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
6603 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
6604 dcontent = deprecatedfile.readlines()
6605 deprecatedfile.close()
6606 newprofile = dcontent[0]
6607 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"))
6608 writemsg(red("!!! Please upgrade to the following profile if possible:\n"))
6609 writemsg(8*" "+green(newprofile)+"\n")
6610 if len(dcontent) > 1:
6611 writemsg("To upgrade do the following steps:\n")
6612 for myline in dcontent[1:]:
6616 if os.path.exists(USER_VIRTUALS_FILE):
6617 writemsg(red("\n!!! /etc/portage/virtuals is deprecated in favor of\n"))
6618 writemsg(red("!!! /etc/portage/profile/virtuals. Please move it to\n"))
6619 writemsg(red("!!! this new location.\n\n"))
6622 #####################################
6626 # =============================================================================
6627 # =============================================================================
6628 # -----------------------------------------------------------------------------
6629 # We're going to lock the global config to prevent changes, but we need
6630 # to ensure the global settings are right.
6631 settings=config(config_profile_path=PROFILE_PATH,config_incrementals=portage_const.INCREMENTALS)
6634 settings["PORTAGE_MASTER_PID"]=str(os.getpid())
6635 settings.backup_changes("PORTAGE_MASTER_PID")
6636 # We are disabling user-specific bashrc files.
6637 settings["BASH_ENV"] = INVALID_ENV_FILE
6638 settings.backup_changes("BASH_ENV")
6640 # gets virtual package settings
6641 def getvirtuals(myroot):
6643 writemsg("--- DEPRECATED call to getvirtual\n")
6644 return settings.getvirtuals(myroot)
6646 def do_vartree(mysettings):
6647 global virts,virts_p
6648 virts=mysettings.getvirtuals("/")
6652 myvkeys=virts.keys()
6654 vkeysplit=x.split("/")
6655 if not virts_p.has_key(vkeysplit[1]):
6656 virts_p[vkeysplit[1]]=virts[x]
6657 db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
6659 virts=mysettings.getvirtuals(root)
6660 db[root]={"virtuals":virts,"vartree":vartree(root,virts)}
6661 #We need to create the vartree first, then load our settings, and then set up our other trees
6663 usedefaults=settings.use_defs
6665 # XXX: This is a circular fix.
6666 #do_vartree(settings)
6667 #settings.loadVirtuals('/')
6668 do_vartree(settings)
6669 #settings.loadVirtuals('/')
6671 settings.reset() # XXX: Regenerate use after we get a vartree -- GLOBAL
6674 # XXX: Might cause problems with root="/" assumptions
6675 portdb=portdbapi(settings["PORTDIR"])
6678 # -----------------------------------------------------------------------------
6679 # =============================================================================
6680 # =============================================================================
6683 if 'selinux' in settings["USE"].split(" "):
6686 if hasattr(selinux, "enabled"):
6687 selinux_enabled = selinux.enabled
6691 writemsg(red("!!! SELinux not loaded: ")+str(e)+"\n")
6694 writemsg(red("!!! SELinux module not found.")+" Please verify that it was installed.\n")
6696 if selinux_enabled == 0:
6698 del sys.modules["selinux"]
6704 cachedirs=[CACHE_PATH]
6706 cachedirs.append(root+CACHE_PATH)
6707 if not os.environ.has_key("SANDBOX_ACTIVE"):
6708 for cachedir in cachedirs:
6709 if not os.path.exists(cachedir):
6710 os.makedirs(cachedir,0755)
6711 writemsg(">>> "+cachedir+" doesn't exist, creating it...\n")
6712 if not os.path.exists(cachedir+"/dep"):
6713 os.makedirs(cachedir+"/dep",2755)
6714 writemsg(">>> "+cachedir+"/dep doesn't exist, creating it...\n")
6716 os.chown(cachedir,uid,portage_gid)
6717 os.chmod(cachedir,0775)
6721 mystat=os.lstat(cachedir+"/dep")
6722 os.chown(cachedir+"/dep",uid,portage_gid)
6723 os.chmod(cachedir+"/dep",02775)
6724 if mystat[stat.ST_GID]!=portage_gid:
6725 spawn("chown -R "+str(uid)+":"+str(portage_gid)+" "+cachedir+"/dep",settings,free=1)
6726 spawn("chmod -R u+rw,g+rw "+cachedir+"/dep",settings,free=1)
6730 def flushmtimedb(record):
6732 if record in mtimedb.keys():
6734 #print "mtimedb["+record+"] is cleared."
6736 writemsg("Invalid or unset record '"+record+"' in mtimedb.\n")
6738 #grab mtimes for eclasses and upgrades
6742 "version", "starttime",
6745 mtimedbfile=root+"var/cache/edb/mtimedb"
6747 mypickle=cPickle.Unpickler(open(mtimedbfile))
6748 mypickle.find_global=None
6749 mtimedb=mypickle.load()
6750 if mtimedb.has_key("old"):
6751 mtimedb["updates"]=mtimedb["old"]
6753 if mtimedb.has_key("cur"):
6755 except SystemExit, e:
6759 mtimedb={"updates":{},"version":"","starttime":0}
6761 for x in mtimedb.keys():
6762 if x not in mtimedbkeys:
6763 writemsg("Deleting invalid mtimedb key: "+str(x)+"\n")
6766 #,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
6767 features=settings["FEATURES"].split()
6769 do_upgrade_packagesmessage=0
6770 def do_upgrade(mykey):
6771 global do_upgrade_packagesmessage
6773 writemsg(green("Performing Global Updates: ")+bold(mykey)+"\n")
6774 writemsg("(Could take a couple of minutes if you have a lot of binary packages.)\n")
6775 writemsg(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
6777 #remove stale virtual entries (mappings for packages that no longer exist)
6781 myxfiles = ["package.mask","package.unmask","package.keywords","package.use"]
6782 myxfiles.extend(prefix_array(myxfiles, "profile/"))
6785 if os.path.isdir(USER_CONFIG_PATH+os.path.sep+x):
6786 recursivefiles.extend([x+os.path.sep+y for y in listdir(USER_CONFIG_PATH+os.path.sep+x, filesonly=1, recursive=1)])
6788 recursivefiles.append(x)
6789 myxfiles = recursivefiles
6792 myfile = open(USER_CONFIG_PATH+os.path.sep+x,"r")
6793 file_contents[x] = myfile.readlines()
6796 if file_contents.has_key(x):
6797 del file_contents[x]
6800 worldlist=grabfile("/"+WORLD_FILE)
6801 myupd=grabfile(mykey)
6802 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
6803 for myline in myupd:
6804 mysplit=myline.split()
6805 if not len(mysplit):
6807 if mysplit[0]!="move" and mysplit[0]!="slotmove":
6808 writemsg("portage: Update type \""+mysplit[0]+"\" not recognized.\n")
6811 if mysplit[0]=="move" and len(mysplit)!=3:
6812 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
6815 if mysplit[0]=="slotmove" and len(mysplit)!=4:
6816 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
6819 sys.stdout.write(".")
6822 if mysplit[0]=="move":
6824 db["/"]["vartree"].dbapi.move_ent(mysplit)
6825 db["/"]["bintree"].move_ent(mysplit)
6826 except portage_exception.InvalidPackageName, e:
6827 writemsg("\nERROR: Malformed update entry '%s'\n" % myline)
6828 myupd.remove(myline) # myupd is used by fixpackages later
6830 #update world entries:
6831 for x in range(0,len(worldlist)):
6832 #update world entries, if any.
6833 worldlist[x]=dep_transform(worldlist[x],mysplit[1],mysplit[2])
6835 #update /etc/portage/packages.*
6836 for x in file_contents:
6837 for mypos in range(0,len(file_contents[x])):
6838 line=file_contents[x][mypos]
6839 if line[0]=="#" or string.strip(line)=="":
6841 key=dep_getkey(line.split()[0])
6843 file_contents[x][mypos]=string.replace(line,mysplit[1],mysplit[2])
6845 sys.stdout.write("p")
6848 elif mysplit[0]=="slotmove":
6850 db["/"]["vartree"].dbapi.move_slot_ent(mysplit)
6851 db["/"]["bintree"].move_slot_ent(mysplit,settings["PORTAGE_TMPDIR"]+"/tbz2")
6852 except portage_exception.InvalidAtom, e:
6853 writemsg("\nERROR: Malformed update entry '%s'\n" % myline)
6855 for x in update_files:
6856 mydblink = dblink('','','/',settings)
6857 if mydblink.isprotected(USER_CONFIG_PATH+os.path.sep+x):
6858 updating_file=new_protect_filename(USER_CONFIG_PATH+os.path.sep+x)[0]
6860 updating_file=USER_CONFIG_PATH+os.path.sep+x
6862 write_atomic(updating_file, "".join(file_contents[x]))
6866 # We gotta do the brute force updates for these now.
6867 if (settings["PORTAGE_CALLER"] in ["fixpackages"]) or \
6868 ("fixpackages" in features):
6869 db["/"]["bintree"].update_ents(myupd,settings["PORTAGE_TMPDIR"]+"/tbz2")
6871 do_upgrade_packagesmessage = 1
6874 #update our internal mtime since we processed all our directives.
6875 mtimedb["updates"][mykey]=os.stat(mykey)[stat.ST_MTIME]
6876 write_atomic(WORLD_FILE,"\n".join(worldlist))
6879 def commit_mtimedb():
6885 mtimedb["version"]=VERSION
6886 f = atomic_ofstream(mymfn)
6887 cPickle.dump(mtimedb, f, -1)
6889 except SystemExit, e:
6891 except Exception, e:
6896 os.chown(mymfn,uid,portage_gid)
6897 os.chmod(mymfn,0664)
6898 except SystemExit, e:
6900 except Exception, e:
6904 global uid,portage_gid,portdb,db
6905 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
6906 close_portdbapi_caches()
6909 atexit_register(portageexit)
6911 if (secpass==2) and (not os.environ.has_key("SANDBOX_ACTIVE")):
6912 if settings["PORTAGE_CALLER"] in ["emerge","fixpackages"]:
6913 #only do this if we're root and not running repoman/ebuild digest
6914 updpath=os.path.normpath(settings["PORTDIR"]+"///profiles/updates")
6916 if not mtimedb.has_key("updates"):
6917 mtimedb["updates"]={}
6919 mylist=listdir(updpath,EmptyOnError=1)
6921 mylist=[myfile[3:]+"-"+myfile[:2] for myfile in mylist]
6923 mylist=[myfile[5:]+"-"+myfile[:4] for myfile in mylist]
6924 for myfile in mylist:
6925 mykey=updpath+"/"+myfile
6926 if not os.path.isfile(mykey):
6928 if (not mtimedb["updates"].has_key(mykey)) or \
6929 (mtimedb["updates"][mykey] != os.stat(mykey)[stat.ST_MTIME]) or \
6930 (settings["PORTAGE_CALLER"] == "fixpackages"):
6933 commit_mtimedb() # This lets us save state for C-c.
6935 #directory doesn't exist
6938 #make sure our internal databases are consistent; recreate our virts and vartree
6939 do_vartree(settings)
6940 if do_upgrade_packagesmessage and \
6941 listdir(settings["PKGDIR"]+"/All/",EmptyOnError=1):
6942 writemsg("\n\n\n ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
6943 writemsg("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
6950 #continue setting up other trees
6951 db["/"]["porttree"]=portagetree("/",virts)
6952 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
6954 db[root]["porttree"]=portagetree(root,virts)
6955 db[root]["bintree"]=binarytree(root,settings["PKGDIR"],virts)
6957 profileroots = [settings["PORTDIR"]+"/profiles/"]
6958 for x in settings["PORTDIR_OVERLAY"].split():
6959 profileroots.insert(0, x+"/profiles/")
6960 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
6961 thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
6963 if not os.path.exists(settings["PORTAGE_TMPDIR"]):
6964 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
6965 writemsg("does not exist. Please create this directory or correct your PORTAGE_TMPDIR setting.\n")
6967 if not os.path.isdir(settings["PORTAGE_TMPDIR"]):
6968 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
6969 writemsg("is not a directory. Please correct your PORTAGE_TMPDIR setting.\n")
6972 # COMPATABILITY -- This shouldn't be used.
6973 pkglines = settings.packages
6975 groups = settings["ACCEPT_KEYWORDS"].split()
6976 archlist = flatten([[myarch, "~"+myarch] for myarch in settings["PORTAGE_ARCHLIST"].split()])
6978 for group in groups:
6980 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
6982 elif (group not in archlist) and group[0]!='-':
6983 writemsg("\n"+red("!!! INVALID ACCEPT_KEYWORDS: ")+str(group)+"\n")
6988 if not os.path.islink(PROFILE_PATH) and os.path.exists(settings["PORTDIR"]+"/profiles"):
6989 writemsg(red("\a\n\n!!! "+PROFILE_PATH+" is not a symlink and will probably prevent most merges.\n"))
6990 writemsg(red("!!! It should point into a profile within %s/profiles/\n" % settings["PORTDIR"]))
6991 writemsg(red("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
6994 # ============================================================================
6995 # ============================================================================