1 # portage.py -- core Portage functionality
2 # Copyright 1998-2004 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: /var/cvsroot/gentoo-src/portage/pym/portage.py,v 1.524.2.76 2005/05/29 12:40:08 jstubbs Exp $
7 VERSION="$Rev$"[6:-2] + "-svn"
9 # ===========================================================================
10 # START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
11 # ===========================================================================
18 print "Failed to import sys! Something is _VERY_ wrong with python."
22 import os,string,types,signal,fcntl,errno
23 import time,traceback,copy
24 import re,pwd,grp,commands
29 import pickle as cPickle
33 from time import sleep
34 from random import shuffle
35 from cache.cache_errors import CacheError
39 sys.stderr.write("\n\n")
40 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
41 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
42 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
44 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
45 sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
47 sys.stderr.write(" "+str(e)+"\n\n");
50 sys.stderr.write("\n\n")
51 sys.stderr.write("!!! Failed to complete python imports. There are internal modules for\n")
52 sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
53 sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
55 sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
56 sys.stderr.write("!!! gone wrong. The exception was non-standard and we were unable to catch it.\n\n")
60 # XXX: This should get renamed to bsd_chflags, I think.
66 # XXX: This should get renamed to bsd_chflags, I think.
75 # XXX: This needs to get cleaned up.
77 from output import blue, bold, brown, darkblue, darkgreen, darkred, darkteal, \
78 darkyellow, fuchsia, fuscia, green, purple, red, teal, turquoise, white, \
79 xtermTitle, xtermTitleReset, yellow
82 from portage_const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
83 USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
84 PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
85 EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
86 MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
87 DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
88 INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
89 INCREMENTALS, STICKIES, EAPI
91 from portage_data import ostype, lchown, userland, secpass, uid, wheelgid, \
92 portage_uid, portage_gid
95 from portage_util import grabdict, grabdict_package, grabfile, grabfile_package, write_atomic, \
96 map_dictlist_vals, pickle_read, pickle_write, stack_dictlist, stack_dicts, stack_lists, \
97 unique_array, varexpand, writedict, writemsg, writemsg_stdout, getconfig, dump_traceback
98 import portage_exception
102 from portage_exec import atexit_register, run_exitfuncs
103 from portage_locks import unlockfile,unlockdir,lockfile,lockdir
104 import portage_checksum
105 from portage_checksum import perform_md5,perform_checksum,prelink_capable
107 from portage_localization import _
109 # Need these functions directly in portage namespace to not break every external tool in existence
110 from portage_versions import ververify,vercmp,catsplit,catpkgsplit,pkgsplit,pkgcmp
112 except SystemExit, e:
115 sys.stderr.write("\n\n")
116 sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
117 sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
118 sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
119 sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
120 sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
121 sys.stderr.write("!!! a recovery of portage.\n")
123 sys.stderr.write(" "+str(e)+"\n\n")
127 # ===========================================================================
128 # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
129 # ===========================================================================
132 def exithandler(signum,frame):
133 """Handles ^C interrupts in a sane manner"""
134 signal.signal(signal.SIGINT, signal.SIG_IGN)
135 signal.signal(signal.SIGTERM, signal.SIG_IGN)
137 # 0=send to *everybody* in process group
140 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
141 signal.signal(signal.SIGINT, exithandler)
142 signal.signal(signal.SIGTERM, exithandler)
143 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
146 modname = string.join(string.split(name,".")[:-1],".")
147 mod = __import__(modname)
148 components = name.split('.')
149 for comp in components[1:]:
150 mod = getattr(mod, comp)
153 def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
155 if top_dict.has_key(x) and top_dict[x].has_key(key):
157 return copy.deepcopy(top_dict[x][key])
159 return top_dict[x][key]
163 raise KeyError, "Key not found in list; '%s'" % key
166 "this fixes situations where the current directory doesn't exist"
169 except SystemExit, e:
176 def abssymlink(symlink):
177 "This reads symlinks, resolving the relative symlinks, and returning the absolute."
178 mylink=os.readlink(symlink)
180 mydir=os.path.dirname(symlink)
181 mylink=mydir+"/"+mylink
182 return os.path.normpath(mylink)
184 def suffix_array(array,suffix,doblanks=1):
185 """Appends a given suffix to each element in an Array/List/Tuple.
187 if type(array) not in [types.ListType, types.TupleType]:
188 raise TypeError, "List or Tuple expected. Got %s" % type(array)
192 newarray.append(x + suffix)
197 def prefix_array(array,prefix,doblanks=1):
198 """Prepends a given prefix to each element in an Array/List/Tuple.
200 if type(array) not in [types.ListType, types.TupleType]:
201 raise TypeError, "List or Tuple expected. Got %s" % type(array)
205 newarray.append(prefix + x)
210 def normalize_path(mypath):
211 newpath = os.path.normpath(mypath)
213 if newpath[:2] == "//":
214 newpath = newpath[1:]
221 def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
222 global cacheHit,cacheMiss,cacheStale
223 mypath = normalize_path(my_original_path)
224 if dircache.has_key(mypath):
226 cached_mtime, list, ftype = dircache[mypath]
229 cached_mtime, list, ftype = -1, [], []
231 pathstat = os.stat(mypath)
232 if stat.S_ISDIR(pathstat[stat.ST_MODE]):
233 mtime = pathstat[stat.ST_MTIME]
236 except SystemExit, e:
242 # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
243 if mtime != cached_mtime or time.time() - mtime < 4:
244 if dircache.has_key(mypath):
246 list = os.listdir(mypath)
251 pathstat = os.stat(mypath+"/"+x)
253 pathstat = os.lstat(mypath+"/"+x)
255 if stat.S_ISREG(pathstat[stat.ST_MODE]):
257 elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
259 elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
263 except SystemExit, e:
267 dircache[mypath] = mtime, list, ftype
271 for x in range(0, len(list)):
272 if(ignorecvs and (len(list[x]) > 2) and (list[x][:2]!=".#")):
273 ret_list.append(list[x])
274 ret_ftype.append(ftype[x])
275 elif (list[x] not in ignorelist):
276 ret_list.append(list[x])
277 ret_ftype.append(ftype[x])
279 writemsg("cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),10)
280 return ret_list, ret_ftype
283 def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
284 EmptyOnError=False, dirsonly=False):
286 list, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
293 if not (filesonly or dirsonly or recursive):
299 if ftype[x]==1 and not (ignorecvs and os.path.basename(list[x]) in ('CVS','.svn','SCCS')):
300 l,f = cacheddir(mypath+"/"+list[x], ignorecvs, ignorelist, EmptyOnError,
304 for y in range(0,len(l)):
305 l[y]=list[x]+"/"+l[y]
311 for x in range(0,len(ftype)):
313 rlist=rlist+[list[x]]
316 for x in range(0, len(ftype)):
318 rlist = rlist + [list[x]]
324 starttime=long(time.time())
327 def tokenize(mystring):
328 """breaks a string like 'foo? (bar) oni? (blah (blah))'
329 into embedded lists; returns None on paren mismatch"""
331 # This function is obsoleted.
332 # Use dep_parenreduce
342 curlist.append(accum)
344 prevlists.append(curlist)
349 curlist.append(accum)
352 writemsg("!!! tokenizer: Unmatched left parenthesis in:\n'"+str(mystring)+"'\n")
355 curlist=prevlists.pop()
356 curlist.append(newlist)
358 elif x in string.whitespace:
360 curlist.append(accum)
365 curlist.append(accum)
367 writemsg("!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+str(mystring)+"'\n")
371 def flatten(mytokens):
372 """this function now turns a [1,[2,3]] list into
373 a [1,2,3] list and returns it."""
376 if type(x)==types.ListType:
377 newlist.extend(flatten(x))
382 #beautiful directed graph object
387 #okeys = keys, in order they were added (to optimize firstzero() ordering)
390 def addnode(self,mykey,myparent):
391 if not self.dict.has_key(mykey):
392 self.okeys.append(mykey)
394 self.dict[mykey]=[0,[]]
396 self.dict[mykey]=[0,[myparent]]
397 self.dict[myparent][0]=self.dict[myparent][0]+1
399 if myparent and (not myparent in self.dict[mykey][1]):
400 self.dict[mykey][1].append(myparent)
401 self.dict[myparent][0]=self.dict[myparent][0]+1
403 def delnode(self,mykey):
404 if not self.dict.has_key(mykey):
406 for x in self.dict[mykey][1]:
407 self.dict[x][0]=self.dict[x][0]-1
411 self.okeys.remove(mykey)
416 "returns all nodes in the dictionary"
417 return self.dict.keys()
420 "returns first node with zero references, or NULL if no such node exists"
422 if self.dict[x][0]==0:
426 def depth(self, mykey):
428 while (self.dict[mykey][1]):
430 mykey=self.dict[mykey][1][0]
434 "returns all nodes with zero references, or NULL if no such node exists"
436 for x in self.dict.keys():
437 mys = string.split(x)
438 if mys[0] != "blocks" and self.dict[x][0]==0:
442 def hasallzeros(self):
443 "returns 0/1, Are all nodes zeros? 1 : 0"
445 for x in self.dict.keys():
446 if self.dict[x][0]!=0:
451 if len(self.dict)==0:
455 def hasnode(self,mynode):
456 return self.dict.has_key(mynode)
460 for x in self.dict.keys():
461 mygraph.dict[x]=self.dict[x][:]
462 mygraph.okeys=self.okeys[:]
465 def elog_process(cpv, mysettings):
466 mylogfiles = listdir(mysettings["T"]+"/logging/")
467 # shortcut for packages without any messages
468 if len(mylogfiles) == 0:
470 # exploit listdir() file order so we process log entries in chronological order
474 msgfunction, msgtype = f.split(".")
475 if not msgtype.upper() in mysettings["PORTAGE_ELOG_CLASSES"].split() \
476 and not msgtype.lower() in mysettings["PORTAGE_ELOG_CLASSES"].split():
478 if msgfunction not in portage_const.EBUILD_PHASES:
479 print "!!! can't process invalid log file: %s" % f
481 if not msgfunction in mylogentries:
482 mylogentries[msgfunction] = []
483 msgcontent = open(mysettings["T"]+"/logging/"+f, "r").readlines()
484 mylogentries[msgfunction].append((msgtype, msgcontent))
486 # in case the filters matched all messages
487 if len(mylogentries) == 0:
490 # generate a single string with all log messages
492 for phase in portage_const.EBUILD_PHASES:
493 if not phase in mylogentries:
495 for msgtype,msgcontent in mylogentries[phase]:
496 fulllog += "%s: %s\n" % (msgtype, phase)
497 for line in msgcontent:
501 # pass the processing to the individual modules
502 logsystems = mysettings["PORTAGE_ELOG_SYSTEM"].split()
505 # FIXME: ugly ad.hoc import code
506 # TODO: implement a common portage module loader
507 logmodule = __import__("elog_modules.mod_"+s)
508 m = getattr(logmodule, "mod_"+s)
509 m.process(mysettings, cpv, mylogentries, fulllog)
510 except (ImportError, AttributeError), e:
511 print "!!! Error while importing logging modules while loading \"mod_%s\":" % s
513 except portage_exception.PortageException, e:
516 # valid end of version components; integers specify offset from release version
517 # pre=prerelease, p=patchlevel (should always be followed by an int), rc=release candidate
518 # all but _p (where it is required) can be followed by an optional trailing integer
520 endversion={"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1}
521 # as there's no reliable way to set {}.keys() order
522 # netversion_keys will be used instead of endversion.keys
523 # to have fixed search order, so that "pre" is checked
525 endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
527 #parse /etc/env.d and generate /etc/profile.env
529 def env_update(makelinks=1):
531 if not os.path.exists(root+"etc/env.d"):
533 os.makedirs(root+"etc/env.d",0755)
535 fns=listdir(root+"etc/env.d",EmptyOnError=1)
538 while (pos<len(fns)):
542 if (fns[pos][0] not in string.digits) or (fns[pos][1] not in string.digits):
548 "KDEDIRS":[],"PATH":[],"CLASSPATH":[],"LDPATH":[],"MANPATH":[],
549 "INFODIR":[],"INFOPATH":[],"ROOTPATH":[],"CONFIG_PROTECT":[],
550 "CONFIG_PROTECT_MASK":[],"PRELINK_PATH":[],"PRELINK_PATH_MASK":[],
551 "PYTHONPATH":[], "ADA_INCLUDE_PATH":[], "ADA_OBJECTS_PATH":[]
554 "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
556 "PATH", "PRELINK_PATH",
557 "PRELINK_PATH_MASK", "PYTHONPATH"
563 # don't process backup files
564 if x[-1]=='~' or x[-4:]==".bak":
566 myconfig=getconfig(root+"etc/env.d/"+x)
568 writemsg("!!! Parsing error in "+str(root)+"etc/env.d/"+str(x)+"\n")
571 # process PATH, CLASSPATH, LDPATH
572 for myspec in specials.keys():
573 if myconfig.has_key(myspec):
574 if myspec in colon_separated:
575 specials[myspec].extend(myconfig[myspec].split(":"))
577 specials[myspec].append(myconfig[myspec])
579 # process all other variables
580 for myenv in myconfig.keys():
581 env[myenv]=myconfig[myenv]
583 if os.path.exists(root+"etc/ld.so.conf"):
584 myld=open(root+"etc/ld.so.conf")
585 myldlines=myld.readlines()
589 #each line has at least one char (a newline)
593 # os.rename(root+"etc/ld.so.conf",root+"etc/ld.so.conf.bak")
594 # Where is the new ld.so.conf generated? (achim)
598 ld_cache_update=False
600 newld=specials["LDPATH"]
602 #ld.so.conf needs updating and ldconfig needs to be run
603 myfd=open(root+"etc/ld.so.conf","w")
604 myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
605 myfd.write("# contents of /etc/env.d directory\n")
606 for x in specials["LDPATH"]:
611 # Update prelink.conf if we are prelink-enabled
613 newprelink=open(root+"etc/prelink.conf","w")
614 newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
615 newprelink.write("# contents of /etc/env.d directory\n")
617 for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
618 newprelink.write("-l "+x+"\n");
619 for x in specials["LDPATH"]+specials["PATH"]+specials["PRELINK_PATH"]:
625 for y in specials["PRELINK_PATH_MASK"]:
634 newprelink.write("-h "+x+"\n")
635 for x in specials["PRELINK_PATH_MASK"]:
636 newprelink.write("-b "+x+"\n")
639 if not mtimedb.has_key("ldpath"):
642 for x in specials["LDPATH"]+['/usr/lib','/lib']:
644 newldpathtime=os.stat(x)[stat.ST_MTIME]
645 except SystemExit, e:
649 if mtimedb["ldpath"].has_key(x):
650 if mtimedb["ldpath"][x]==newldpathtime:
653 mtimedb["ldpath"][x]=newldpathtime
656 mtimedb["ldpath"][x]=newldpathtime
659 # Only run ldconfig as needed
660 if (ld_cache_update or makelinks):
661 # ldconfig has very different behaviour between FreeBSD and Linux
662 if ostype=="Linux" or ostype.lower().endswith("gnu"):
663 # We can't update links if we haven't cleaned other versions first, as
664 # an older package installed ON TOP of a newer version will cause ldconfig
665 # to overwrite the symlinks we just made. -X means no links. After 'clean'
666 # we can safely create links.
667 writemsg(">>> Regenerating "+str(root)+"etc/ld.so.cache...\n")
669 commands.getstatusoutput("cd / ; /sbin/ldconfig -r "+root)
671 commands.getstatusoutput("cd / ; /sbin/ldconfig -X -r "+root)
672 elif ostype in ("FreeBSD","DragonFly"):
673 writemsg(">>> Regenerating "+str(root)+"var/run/ld-elf.so.hints...\n")
674 commands.getstatusoutput("cd / ; /sbin/ldconfig -elf -i -f "+str(root)+"var/run/ld-elf.so.hints "+str(root)+"etc/ld.so.conf")
676 del specials["LDPATH"]
678 penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
679 penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
680 cenvnotice = penvnotice[:]
681 penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
682 cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
684 #create /etc/profile.env for bash support
685 outfile=open(root+"/etc/profile.env","w")
686 outfile.write(penvnotice)
688 for path in specials.keys():
689 if len(specials[path])==0:
691 outstring="export "+path+"='"
692 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
693 for x in specials[path][:-1]:
696 for x in specials[path][:-1]:
697 outstring=outstring+x+":"
698 outstring=outstring+specials[path][-1]+"'"
699 outfile.write(outstring+"\n")
701 #create /etc/profile.env
703 if type(env[x])!=types.StringType:
705 outfile.write("export "+x+"='"+env[x]+"'\n")
708 #create /etc/csh.env for (t)csh support
709 outfile=open(root+"/etc/csh.env","w")
710 outfile.write(cenvnotice)
712 for path in specials.keys():
713 if len(specials[path])==0:
715 outstring="setenv "+path+" '"
716 if path in ["CONFIG_PROTECT","CONFIG_PROTECT_MASK"]:
717 for x in specials[path][:-1]:
720 for x in specials[path][:-1]:
721 outstring=outstring+x+":"
722 outstring=outstring+specials[path][-1]+"'"
723 outfile.write(outstring+"\n")
724 #get it out of the way
729 if type(env[x])!=types.StringType:
731 outfile.write("setenv "+x+" '"+env[x]+"'\n")
734 def new_protect_filename(mydest, newmd5=None):
735 """Resolves a config-protect filename for merging, optionally
736 using the last filename if the md5 matches.
737 (dest,md5) ==> 'string' --- path_to_target_filename
738 (dest) ==> ('next', 'highest') --- next_target and most-recent_target
741 # config protection filename format:
747 if (len(mydest) == 0):
748 raise ValueError, "Empty path provided where a filename is required"
749 if (mydest[-1]=="/"): # XXX add better directory checking
750 raise ValueError, "Directory provided but this function requires a filename"
751 if not os.path.exists(mydest):
754 real_filename = os.path.basename(mydest)
755 real_dirname = os.path.dirname(mydest)
756 for pfile in listdir(real_dirname):
757 if pfile[0:5] != "._cfg":
759 if pfile[10:] != real_filename:
762 new_prot_num = int(pfile[5:9])
763 if new_prot_num > prot_num:
764 prot_num = new_prot_num
766 except SystemExit, e:
770 prot_num = prot_num + 1
772 new_pfile = os.path.normpath(real_dirname+"/._cfg"+string.zfill(prot_num,4)+"_"+real_filename)
773 old_pfile = os.path.normpath(real_dirname+"/"+last_pfile)
774 if last_pfile and newmd5:
775 if portage_checksum.perform_md5(real_dirname+"/"+last_pfile) == newmd5:
782 return (new_pfile, old_pfile)
784 #XXX: These two are now implemented in portage_util.py but are needed here
785 #XXX: until the isvalidatom() dependency is sorted out.
787 def grabdict_package(myfilename,juststrings=0,recursive=0):
788 pkgs=grabdict(myfilename, juststrings=juststrings, empty=1,recursive=recursive)
789 for x in pkgs.keys():
790 if not isvalidatom(x):
792 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, x))
795 def grabfile_package(myfilename,compatlevel=0,recursive=0):
796 pkgs=grabfile(myfilename,compatlevel,recursive=recursive)
797 for x in range(len(pkgs)-1,-1,-1):
803 if not isvalidatom(pkg):
804 writemsg("--- Invalid atom in %s: %s\n" % (myfilename, pkgs[x]))
808 # returns a tuple. (version[string], error[string])
809 # They are pretty much mutually exclusive.
810 # Either version is a string and error is none, or
811 # version is None and error is a string
813 def ExtractKernelVersion(base_dir):
815 pathname = os.path.join(base_dir, 'Makefile')
817 f = open(pathname, 'r')
818 except OSError, details:
819 return (None, str(details))
820 except IOError, details:
821 return (None, str(details))
825 lines.append(f.readline())
826 except OSError, details:
827 return (None, str(details))
828 except IOError, details:
829 return (None, str(details))
831 lines = map(string.strip, lines)
835 #XXX: The following code relies on the ordering of vars within the Makefile
837 # split on the '=' then remove annoying whitespace
838 items = string.split(line, '=')
839 items = map(string.strip, items)
840 if items[0] == 'VERSION' or \
841 items[0] == 'PATCHLEVEL':
844 elif items[0] == 'SUBLEVEL':
846 elif items[0] == 'EXTRAVERSION' and \
847 items[-1] != items[0]:
850 # Grab a list of files named localversion* and sort them
851 localversions = os.listdir(base_dir)
852 for x in range(len(localversions)-1,-1,-1):
853 if localversions[x][:12] != "localversion":
857 # Append the contents of each to the version string, stripping ALL whitespace
858 for lv in localversions:
859 version += string.join(string.split(string.join(grabfile(base_dir+"/"+lv))), "")
861 # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
862 kernelconfig = getconfig(base_dir+"/.config")
863 if kernelconfig and kernelconfig.has_key("CONFIG_LOCALVERSION"):
864 version += string.join(string.split(kernelconfig["CONFIG_LOCALVERSION"]), "")
866 return (version,None)
870 def autouse(myvartree,use_cache=1):
871 "returns set of USE variables auto-enabled due to packages being installed"
872 global usedefaults, autouse_val
873 if autouse_val is not None:
879 for myuse in usedefaults:
881 for mydep in usedefaults[myuse]:
882 if not myvartree.dep_match(mydep,use_cache=True):
886 myusevars += " "+myuse
887 autouse_val = myusevars
890 def check_config_instance(test):
891 if not test or (str(test.__class__) != 'portage.config'):
892 raise TypeError, "Invalid type for config object: %s" % test.__class__
895 def __init__(self, clone=None, mycpv=None, config_profile_path=None, config_incrementals=None):
897 self.already_in_regenerate = 0
902 self.modifiedkeys = []
907 # Virtuals obtained from the vartree
908 self.treeVirtuals = {}
909 # Virtuals by user specification. Includes negatives.
910 self.userVirtuals = {}
911 # Virtual negatives from user specifications.
912 self.negVirtuals = {}
914 self.user_profile_dir = None
917 self.incrementals = copy.deepcopy(clone.incrementals)
918 self.profile_path = copy.deepcopy(clone.profile_path)
919 self.user_profile_dir = copy.deepcopy(clone.user_profile_dir)
921 self.module_priority = copy.deepcopy(clone.module_priority)
922 self.modules = copy.deepcopy(clone.modules)
924 self.depcachedir = copy.deepcopy(clone.depcachedir)
926 self.packages = copy.deepcopy(clone.packages)
927 self.virtuals = copy.deepcopy(clone.virtuals)
929 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
930 self.userVirtuals = copy.deepcopy(clone.userVirtuals)
931 self.negVirtuals = copy.deepcopy(clone.negVirtuals)
933 self.use_defs = copy.deepcopy(clone.use_defs)
934 self.usemask = copy.deepcopy(clone.usemask)
936 self.configlist = copy.deepcopy(clone.configlist)
937 self.configlist[-1] = os.environ.copy()
938 self.configdict = { "globals": self.configlist[0],
939 "defaults": self.configlist[1],
940 "conf": self.configlist[2],
941 "pkg": self.configlist[3],
942 "auto": self.configlist[4],
943 "backupenv": self.configlist[5],
944 "env": self.configlist[6] }
945 self.profiles = copy.deepcopy(clone.profiles)
946 self.backupenv = copy.deepcopy(clone.backupenv)
947 self.pusedict = copy.deepcopy(clone.pusedict)
948 self.categories = copy.deepcopy(clone.categories)
949 self.pkeywordsdict = copy.deepcopy(clone.pkeywordsdict)
950 self.pmaskdict = copy.deepcopy(clone.pmaskdict)
951 self.punmaskdict = copy.deepcopy(clone.punmaskdict)
952 self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
953 self.pprovideddict = copy.deepcopy(clone.pprovideddict)
954 self.lookuplist = copy.deepcopy(clone.lookuplist)
955 self.uvlist = copy.deepcopy(clone.uvlist)
956 self.dirVirtuals = copy.deepcopy(clone.dirVirtuals)
957 self.treeVirtuals = copy.deepcopy(clone.treeVirtuals)
959 self.depcachedir = DEPCACHE_PATH
961 if not config_profile_path:
963 writemsg("config_profile_path not specified to class config\n")
964 self.profile_path = profiledir[:]
966 self.profile_path = config_profile_path[:]
968 if not config_incrementals:
969 writemsg("incrementals not specified to class config\n")
970 self.incrementals = copy.deepcopy(portage_const.INCREMENTALS)
972 self.incrementals = copy.deepcopy(config_incrementals)
974 self.module_priority = ["user","default"]
976 self.modules["user"] = getconfig(MODULES_FILE_PATH)
977 if self.modules["user"] == None:
978 self.modules["user"] = {}
979 self.modules["default"] = {
980 "portdbapi.metadbmodule": "cache.metadata.database",
981 "portdbapi.auxdbmodule": "cache.flat_hash.database",
987 # back up our incremental variables:
989 # configlist will contain: [ globals, defaults, conf, pkg, auto, backupenv (incrementals), origenv ]
991 # The symlink might not exist or might not be a symlink.
993 self.profiles=[abssymlink(self.profile_path)]
994 except SystemExit, e:
997 self.profiles=[self.profile_path]
999 mypath = self.profiles[0]
1000 while os.path.exists(mypath+"/parent"):
1001 mypath = os.path.normpath(mypath+"///"+grabfile(mypath+"/parent")[0])
1002 if os.path.exists(mypath):
1003 self.profiles.insert(0,mypath)
1005 if os.environ.has_key("PORTAGE_CALLER") and os.environ["PORTAGE_CALLER"] == "repoman":
1008 # XXX: This should depend on ROOT?
1009 if os.path.exists("/"+CUSTOM_PROFILE_PATH):
1010 self.user_profile_dir = os.path.normpath("/"+"///"+CUSTOM_PROFILE_PATH)
1011 self.profiles.append(self.user_profile_dir[:])
1013 self.packages_list = [grabfile_package(os.path.join(x, "packages")) for x in self.profiles]
1014 self.packages = stack_lists(self.packages_list, incremental=1)
1015 del self.packages_list
1016 #self.packages = grab_stacked("packages", self.profiles, grabfile, incremental_lines=1)
1019 self.prevmaskdict={}
1020 for x in self.packages:
1021 mycatpkg=dep_getkey(x)
1022 if not self.prevmaskdict.has_key(mycatpkg):
1023 self.prevmaskdict[mycatpkg]=[x]
1025 self.prevmaskdict[mycatpkg].append(x)
1027 # get profile-masked use flags -- INCREMENTAL Child over parent
1028 usemask_lists = [grabfile(os.path.join(x, "use.mask")) for x in self.profiles]
1029 self.usemask = stack_lists(usemask_lists, incremental=True)
1031 use_defs_lists = [grabdict(os.path.join(x, "use.defaults")) for x in self.profiles]
1032 self.use_defs = stack_dictlist(use_defs_lists, incremental=True)
1036 mygcfg_dlists = [getconfig(os.path.join(x, "make.globals")) for x in self.profiles+["/etc"]]
1037 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1039 if self.mygcfg == None:
1041 except SystemExit, e:
1043 except Exception, e:
1044 writemsg("!!! %s\n" % (e))
1045 writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
1046 writemsg("!!! Errors in this file should be reported on bugs.gentoo.org.\n")
1048 self.configlist.append(self.mygcfg)
1049 self.configdict["globals"]=self.configlist[-1]
1054 mygcfg_dlists = [getconfig(os.path.join(x, "make.defaults")) for x in self.profiles]
1055 self.mygcfg = stack_dicts(mygcfg_dlists, incrementals=portage_const.INCREMENTALS, ignore_none=1)
1056 #self.mygcfg = grab_stacked("make.defaults", self.profiles, getconfig)
1057 if self.mygcfg == None:
1059 except SystemExit, e:
1061 except Exception, e:
1062 writemsg("!!! %s\n" % (e))
1063 writemsg("!!! 'rm -Rf /usr/portage/profiles; emerge sync' may fix this. If it does\n")
1064 writemsg("!!! not then please report this to bugs.gentoo.org and, if possible, a dev\n")
1065 writemsg("!!! on #gentoo (irc.freenode.org)\n")
1067 self.configlist.append(self.mygcfg)
1068 self.configdict["defaults"]=self.configlist[-1]
1071 # XXX: Should depend on root?
1072 self.mygcfg=getconfig("/"+MAKE_CONF_FILE,allow_sourcing=True)
1073 if self.mygcfg == None:
1075 except SystemExit, e:
1077 except Exception, e:
1078 writemsg("!!! %s\n" % (e))
1079 writemsg("!!! Incorrect multiline literals can cause this. Do not use them.\n")
1083 self.configlist.append(self.mygcfg)
1084 self.configdict["conf"]=self.configlist[-1]
1086 self.configlist.append({})
1087 self.configdict["pkg"]=self.configlist[-1]
1090 self.configlist.append({})
1091 self.configdict["auto"]=self.configlist[-1]
1093 #backup-env (for recording our calculated incremental variables:)
1094 self.backupenv = os.environ.copy()
1095 self.configlist.append(self.backupenv) # XXX Why though?
1096 self.configdict["backupenv"]=self.configlist[-1]
1098 self.configlist.append(os.environ.copy())
1099 self.configdict["env"]=self.configlist[-1]
1102 # make lookuplist for loading package.*
1103 self.lookuplist=self.configlist[:]
1104 self.lookuplist.reverse()
1106 if os.environ.get("PORTAGE_CALLER","") == "repoman":
1107 # repoman shouldn't use local settings.
1108 locations = [self["PORTDIR"] + "/profiles"]
1110 self.pkeywordsdict = {}
1111 self.punmaskdict = {}
1113 locations = [self["PORTDIR"] + "/profiles", USER_CONFIG_PATH]
1114 for ov in self["PORTDIR_OVERLAY"].split():
1115 ov = os.path.normpath(ov)
1116 if os.path.isdir(ov+"/profiles"):
1117 locations.append(ov+"/profiles")
1119 pusedict=grabdict_package(USER_CONFIG_PATH+"/package.use", recursive=1)
1121 for key in pusedict.keys():
1122 cp = dep_getkey(key)
1123 if not self.pusedict.has_key(cp):
1124 self.pusedict[cp] = {}
1125 self.pusedict[cp][key] = pusedict[key]
1128 pkgdict=grabdict_package(USER_CONFIG_PATH+"/package.keywords", recursive=1)
1129 self.pkeywordsdict = {}
1130 for key in pkgdict.keys():
1131 # default to ~arch if no specific keyword is given
1132 if not pkgdict[key]:
1134 if self.configdict["defaults"] and self.configdict["defaults"].has_key("ACCEPT_KEYWORDS"):
1135 groups = self.configdict["defaults"]["ACCEPT_KEYWORDS"].split()
1138 for keyword in groups:
1139 if not keyword[0] in "~-":
1140 mykeywordlist.append("~"+keyword)
1141 pkgdict[key] = mykeywordlist
1142 cp = dep_getkey(key)
1143 if not self.pkeywordsdict.has_key(cp):
1144 self.pkeywordsdict[cp] = {}
1145 self.pkeywordsdict[cp][key] = pkgdict[key]
1148 pkgunmasklines = grabfile_package(USER_CONFIG_PATH+"/package.unmask",recursive=1)
1149 self.punmaskdict = {}
1150 for x in pkgunmasklines:
1151 mycatpkg=dep_getkey(x)
1152 if self.punmaskdict.has_key(mycatpkg):
1153 self.punmaskdict[mycatpkg].append(x)
1155 self.punmaskdict[mycatpkg]=[x]
1157 #getting categories from an external file now
1158 categories = [grabfile(os.path.join(x, "categories")) for x in locations]
1159 self.categories = stack_lists(categories, incremental=1)
1162 archlist = [grabfile(os.path.join(x, "arch.list")) for x in locations]
1163 archlist = stack_lists(archlist, incremental=1)
1164 self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
1166 # get virtuals -- needs categories
1167 self.loadVirtuals('/')
1170 pkgmasklines = [grabfile_package(os.path.join(x, "package.mask")) for x in self.profiles]
1172 pkgmasklines.append(grabfile_package(l+os.path.sep+"package.mask", recursive=1))
1173 pkgmasklines = stack_lists(pkgmasklines, incremental=1)
1176 for x in pkgmasklines:
1177 mycatpkg=dep_getkey(x)
1178 if self.pmaskdict.has_key(mycatpkg):
1179 self.pmaskdict[mycatpkg].append(x)
1181 self.pmaskdict[mycatpkg]=[x]
1183 pkgprovidedlines = [grabfile(os.path.join(x, "package.provided")) for x in self.profiles]
1184 pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
1185 for x in range(len(pkgprovidedlines)-1, -1, -1):
1186 cpvr = catpkgsplit(pkgprovidedlines[x])
1187 if not cpvr or cpvr[0] == "null":
1188 writemsg("Invalid package name in package.provided: "+pkgprovidedlines[x]+"\n")
1189 del pkgprovidedlines[x]
1191 self.pprovideddict = {}
1192 for x in pkgprovidedlines:
1196 mycatpkg=dep_getkey(x)
1197 if self.pprovideddict.has_key(mycatpkg):
1198 self.pprovideddict[mycatpkg].append(x)
1200 self.pprovideddict[mycatpkg]=[x]
1202 self.lookuplist=self.configlist[:]
1203 self.lookuplist.reverse()
1205 useorder=self["USE_ORDER"]
1207 # reasonable defaults; this is important as without USE_ORDER,
1208 # USE will always be "" (nothing set)!
1209 useorder="env:pkg:conf:auto:defaults"
1210 useordersplit=useorder.split(":")
1213 for x in useordersplit:
1214 if self.configdict.has_key(x):
1215 if "PKGUSE" in self.configdict[x].keys():
1216 del self.configdict[x]["PKGUSE"] # Delete PkgUse, Not legal to set.
1217 #prepend db to list to get correct order
1218 self.uvlist[0:0]=[self.configdict[x]]
1220 self.configdict["env"]["PORTAGE_GID"]=str(portage_gid)
1221 self.backupenv["PORTAGE_GID"]=str(portage_gid)
1223 if self.has_key("PORT_LOGDIR") and not self["PORT_LOGDIR"]:
1224 # port_logdir is defined, but empty. this causes a traceback in doebuild.
1225 writemsg(yellow("!!!")+" PORT_LOGDIR was defined, but set to nothing.\n")
1226 writemsg(yellow("!!!")+" Disabling it. Please set it to a non null value.\n")
1227 del self["PORT_LOGDIR"]
1229 if self["PORTAGE_CACHEDIR"]:
1230 # XXX: Deprecated -- April 15 -- NJ
1231 writemsg(yellow(">>> PORTAGE_CACHEDIR has been deprecated!")+"\n")
1232 writemsg(">>> Please use PORTAGE_DEPCACHEDIR instead.\n")
1233 self.depcachedir = self["PORTAGE_CACHEDIR"]
1234 del self["PORTAGE_CACHEDIR"]
1236 if self["PORTAGE_DEPCACHEDIR"]:
1237 #the auxcache is the only /var/cache/edb/ entry that stays at / even when "root" changes.
1238 # XXX: Could move with a CHROOT functionality addition.
1239 self.depcachedir = self["PORTAGE_DEPCACHEDIR"]
1240 del self["PORTAGE_DEPCACHEDIR"]
1242 overlays = string.split(self["PORTDIR_OVERLAY"])
1246 ov=os.path.normpath(ov)
1247 if os.path.isdir(ov):
1250 writemsg(red("!!! Invalid PORTDIR_OVERLAY (not a dir): "+ov+"\n"))
1251 self["PORTDIR_OVERLAY"] = string.join(new_ov)
1252 self.backup_changes("PORTDIR_OVERLAY")
1256 self.features = portage_util.unique_array(self["FEATURES"].split())
1258 #XXX: Should this be temporary? Is it possible at all to have a default?
1259 if "gpg" in self.features:
1260 if not os.path.exists(self["PORTAGE_GPG_DIR"]) or not os.path.isdir(self["PORTAGE_GPG_DIR"]):
1261 writemsg("PORTAGE_GPG_DIR is invalid. Removing gpg from FEATURES.\n")
1262 self.features.remove("gpg")
1264 if not portage_exec.sandbox_capable and ("sandbox" in self.features or "usersandbox" in self.features):
1265 writemsg(red("!!! Problem with sandbox binary. Disabling...\n\n"))
1266 if "sandbox" in self.features:
1267 self.features.remove("sandbox")
1268 if "usersandbox" in self.features:
1269 self.features.remove("usersandbox")
1271 self.features.sort()
1272 self["FEATURES"] = " ".join(["-*"]+self.features)
1273 self.backup_changes("FEATURES")
1275 if not len(self["CBUILD"]) and len(self["CHOST"]):
1276 self["CBUILD"] = self["CHOST"]
1277 self.backup_changes("CBUILD")
1282 def loadVirtuals(self,root):
1283 self.virtuals = self.getvirtuals(root)
1285 def load_best_module(self,property_string):
1286 best_mod = best_from_dict(property_string,self.modules,self.module_priority)
1288 mod = load_mod(best_mod)
1290 dump_traceback(red("Error: Failed to import module '%s'") % best_mod, noiselevel=0)
1300 def modifying(self):
1302 raise Exception, "Configuration is locked."
1304 def backup_changes(self,key=None):
1305 if key and self.configdict["env"].has_key(key):
1306 self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
1308 raise KeyError, "No such key defined in environment: %s" % key
1310 def reset(self,keeping_pkg=0,use_cache=1):
1311 "reset environment to original settings"
1312 for x in self.configlist[-1].keys():
1313 if x not in self.backupenv.keys():
1314 del self.configlist[-1][x]
1316 self.configdict["env"].update(self.backupenv)
1318 self.modifiedkeys = []
1321 self.configdict["pkg"].clear()
1322 self.regenerate(use_cache=use_cache)
1324 def load_infodir(self,infodir):
1325 if self.configdict.has_key("pkg"):
1326 for x in self.configdict["pkg"].keys():
1327 del self.configdict["pkg"][x]
1329 writemsg("No pkg setup for settings instance?\n")
1332 if os.path.exists(infodir):
1333 if os.path.exists(infodir+"/environment"):
1334 self.configdict["pkg"]["PORT_ENV_FILE"] = infodir+"/environment"
1336 myre = re.compile('^[A-Z]+$')
1337 for filename in listdir(infodir,filesonly=1,EmptyOnError=1):
1338 if myre.match(filename):
1340 mydata = string.strip(open(infodir+"/"+filename).read())
1341 if len(mydata)<2048:
1342 if filename == "USE":
1343 self.configdict["pkg"][filename] = "-* "+mydata
1345 self.configdict["pkg"][filename] = mydata
1346 except SystemExit, e:
1349 writemsg("!!! Unable to read file: %s\n" % infodir+"/"+filename)
1354 def setcpv(self,mycpv,use_cache=1):
1357 cp = dep_getkey(mycpv)
1359 if self.pusedict.has_key(cp):
1360 self.pusekey = best_match_to_list(self.mycpv, self.pusedict[cp].keys())
1362 newpuse = string.join(self.pusedict[cp][self.pusekey])
1363 if newpuse == self.puse:
1366 self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
1367 self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
1368 self.reset(keeping_pkg=1,use_cache=use_cache)
1370 def setinst(self,mycpv,mydbapi):
1371 # Grab the virtuals this package provides and add them into the tree virtuals.
1372 provides = mydbapi.aux_get(mycpv, ["PROVIDE"])[0]
1373 if isinstance(mydbapi, portdbapi):
1376 myuse = mydbapi.aux_get(mycpv, ["USE"])[0]
1377 virts = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(provides), uselist=myuse.split()))
1379 cp = dep_getkey(mycpv)
1381 virt = dep_getkey(virt)
1382 if not self.treeVirtuals.has_key(virt):
1383 self.treeVirtuals[virt] = []
1384 # XXX: Is this bad? -- It's a permanent modification
1385 if cp not in self.treeVirtuals[virt]:
1386 self.treeVirtuals[virt].append(cp)
1388 self.virtuals = self.__getvirtuals_compile()
1391 def regenerate(self,useonly=0,use_cache=1):
1392 global usesplit,profiledir
1394 if self.already_in_regenerate:
1395 # XXX: THIS REALLY NEEDS TO GET FIXED. autouse() loops.
1396 writemsg("!!! Looping in regenerate.\n",1)
1399 self.already_in_regenerate = 1
1402 myincrementals=["USE"]
1404 myincrementals=portage_const.INCREMENTALS
1405 for mykey in myincrementals:
1408 # XXX Global usage of db... Needs to go away somehow.
1409 if db.has_key(root) and db[root].has_key("vartree"):
1410 self.configdict["auto"]["USE"]=autouse(db[root]["vartree"],use_cache=use_cache)
1412 self.configdict["auto"]["USE"]=""
1414 mydbs=self.configlist[:-1]
1418 if not curdb.has_key(mykey):
1420 #variables are already expanded
1421 mysplit=curdb[mykey].split()
1425 # "-*" is a special "minus" var that means "unset all settings".
1426 # so USE="-* gnome" will have *just* gnome enabled.
1431 # Not legal. People assume too much. Complain.
1432 writemsg(red("USE flags should not start with a '+': %s\n" % x))
1436 if (x[1:] in myflags):
1438 del myflags[myflags.index(x[1:])]
1441 # We got here, so add it now.
1442 if x not in myflags:
1446 #store setting in last element of configlist, the original environment:
1447 self.configlist[-1][mykey]=string.join(myflags," ")
1450 #cache split-up USE var in a global
1453 for x in string.split(self.configlist[-1]["USE"]):
1454 if x not in self.usemask:
1457 if self.has_key("USE_EXPAND"):
1458 for var in string.split(self["USE_EXPAND"]):
1459 if self.has_key(var):
1460 for x in string.split(self[var]):
1461 mystr = string.lower(var)+"_"+x
1462 if mystr not in usesplit and mystr not in self.usemask:
1463 usesplit.append(mystr)
1465 # Pre-Pend ARCH variable to USE settings so '-*' in env doesn't kill arch.
1466 if self.configdict["defaults"].has_key("ARCH"):
1467 if self.configdict["defaults"]["ARCH"]:
1468 if self.configdict["defaults"]["ARCH"] not in usesplit:
1469 usesplit.insert(0,self.configdict["defaults"]["ARCH"])
1471 self.configlist[-1]["USE"]=string.join(usesplit," ")
1473 self.already_in_regenerate = 0
1475 def getvirtuals(self, myroot):
1477 return self.virtuals
1481 # This breaks catalyst/portage when setting to a fresh/empty root.
1482 # Virtuals cannot be calculated because there is nothing to work
1483 # from. So the only ROOT prefixed dir should be local configs.
1484 #myvirtdirs = prefix_array(self.profiles,myroot+"/")
1485 myvirtdirs = copy.deepcopy(self.profiles)
1486 while self.user_profile_dir in myvirtdirs:
1487 myvirtdirs.remove(self.user_profile_dir)
1491 # R1: Collapse profile virtuals
1492 # R2: Extract user-negatives.
1493 # R3: Collapse user-virtuals.
1494 # R4: Apply user negatives to all except user settings.
1496 # Order of preference:
1497 # 1. user-declared that are installed
1498 # 3. installed and in profile
1500 # 2. user-declared set
1503 self.dirVirtuals = [grabdict(os.path.join(x, "virtuals")) for x in myvirtdirs]
1504 self.dirVirtuals.reverse()
1506 if self.user_profile_dir and os.path.exists(self.user_profile_dir+"/virtuals"):
1507 self.userVirtuals = grabdict(self.user_profile_dir+"/virtuals")
1509 # Store all the negatives for later.
1510 for x in self.userVirtuals.keys():
1511 self.negVirtuals[x] = []
1512 for y in self.userVirtuals[x]:
1514 self.negVirtuals[x].append(y[:])
1516 # Collapse the user virtuals so that we don't deal with negatives.
1517 self.userVirtuals = stack_dictlist([self.userVirtuals],incremental=1)
1519 # Collapse all the profile virtuals including user negations.
1520 self.dirVirtuals = stack_dictlist([self.negVirtuals]+self.dirVirtuals,incremental=1)
1522 # Repoman does not use user or tree virtuals.
1523 if os.environ.get("PORTAGE_CALLER","") != "repoman":
1524 # XXX: vartree does not use virtuals, does user set matter?
1525 temp_vartree = vartree(myroot,self.dirVirtuals,categories=self.categories)
1526 # Reduce the provides into a list by CP.
1527 self.treeVirtuals = map_dictlist_vals(getCPFromCPV,temp_vartree.get_all_provides())
1529 return self.__getvirtuals_compile()
1531 def __getvirtuals_compile(self):
1532 """Actually generate the virtuals we have collected.
1533 The results are reversed so the list order is left to right.
1534 Given data is [Best,Better,Good] sets of [Good, Better, Best]"""
1536 # Virtuals by profile+tree preferences.
1538 # Virtuals by user+tree preferences.
1541 # If a user virtual is already installed, we preference it.
1542 for x in self.userVirtuals.keys():
1544 if self.treeVirtuals.has_key(x):
1545 for y in self.userVirtuals[x]:
1546 if y in self.treeVirtuals[x]:
1547 utVirtuals[x].append(y)
1548 #print "F:",utVirtuals
1549 #utVirtuals[x].reverse()
1550 #print "R:",utVirtuals
1552 # If a profile virtual is already installed, we preference it.
1553 for x in self.dirVirtuals.keys():
1555 if self.treeVirtuals.has_key(x):
1556 for y in self.dirVirtuals[x]:
1557 if y in self.treeVirtuals[x]:
1558 ptVirtuals[x].append(y)
1560 # UserInstalled, ProfileInstalled, Installed, User, Profile
1561 biglist = [utVirtuals, ptVirtuals, self.treeVirtuals,
1562 self.userVirtuals, self.dirVirtuals]
1564 # We reverse each dictlist so that the order matches everything
1565 # else in portage. [-*, a, b] [b, c, d] ==> [b, a]
1566 for dictlist in biglist:
1567 for key in dictlist:
1568 dictlist[key].reverse()
1570 # User settings and profile settings take precedence over tree.
1571 val = stack_dictlist(biglist,incremental=1)
1575 def __delitem__(self,mykey):
1576 for x in self.lookuplist:
1581 def __getitem__(self,mykey):
1583 for x in self.lookuplist:
1585 writemsg("!!! lookuplist is null.\n")
1586 elif x.has_key(mykey):
1590 if 0 and match and mykey in ["PORTAGE_BINHOST"]:
1591 # These require HTTP Encoding
1594 if urllib.unquote(match) != match:
1595 writemsg("Note: %s already contains escape codes." % (mykey))
1597 match = urllib.quote(match)
1598 except SystemExit, e:
1601 writemsg("Failed to fix %s using urllib, attempting to continue.\n" % (mykey))
1604 elif mykey == "CONFIG_PROTECT_MASK":
1605 match += " /etc/env.d"
1609 def has_key(self,mykey):
1610 for x in self.lookuplist:
1611 if x.has_key(mykey):
1617 for x in self.lookuplist:
1623 def __setitem__(self,mykey,myvalue):
1624 "set a value; will be thrown away at reset() time"
1625 if type(myvalue) != types.StringType:
1626 raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
1628 self.modifiedkeys += [mykey]
1629 self.configdict["env"][mykey]=myvalue
1632 "return our locally-maintained environment"
1634 for x in self.keys():
1636 if not mydict.has_key("HOME") and mydict.has_key("BUILD_PREFIX"):
1637 writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
1638 mydict["HOME"]=mydict["BUILD_PREFIX"][:]
1643 # XXX This would be to replace getstatusoutput completely.
1644 # XXX Issue: cannot block execution. Deadlock condition.
1645 def spawn(mystring,mysettings,debug=0,free=0,droppriv=0,fd_pipes=None,**keywords):
1646 """spawn a subprocess with optional sandbox protection,
1647 depending on whether sandbox is enabled. The "free" argument,
1648 when set to 1, will disable sandboxing. This allows us to
1649 spawn processes that are supposed to modify files outside of the
1650 sandbox. We can't use os.system anymore because it messes up
1651 signal handling. Using spawn allows our Portage signal handler
1654 if type(mysettings) == types.DictType:
1656 keywords["opt_name"]="[ %s ]" % "portage"
1658 check_config_instance(mysettings)
1659 env=mysettings.environ()
1660 keywords["opt_name"]="[%s]" % mysettings["PF"]
1662 # XXX: Negative RESTRICT word
1663 droppriv=(droppriv and ("userpriv" in features) and not \
1664 (("nouserpriv" in string.split(mysettings["RESTRICT"])) or \
1665 ("userpriv" in string.split(mysettings["RESTRICT"]))))
1667 if droppriv and not uid and portage_gid and portage_uid:
1668 keywords.update({"uid":portage_uid,"gid":portage_gid,"groups":[portage_gid],"umask":002})
1671 free=((droppriv and "usersandbox" not in features) or \
1672 (not droppriv and "sandbox" not in features and "usersandbox" not in features))
1675 keywords["opt_name"] += " sandbox"
1676 return portage_exec.spawn_sandbox(mystring,env=env,**keywords)
1678 keywords["opt_name"] += " bash"
1679 return portage_exec.spawn_bash(mystring,env=env,**keywords)
1683 def fetch(myuris, mysettings, listonly=0, fetchonly=0, locks_in_subdir=".locks",use_locks=1, try_mirrors=1):
1684 "fetch files. Will use digest file if available."
1686 # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
1687 if ("mirror" in mysettings["RESTRICT"].split()) or \
1688 ("nomirror" in mysettings["RESTRICT"].split()):
1689 if ("mirror" in features) and ("lmirror" not in features):
1690 # lmirror should allow you to bypass mirror restrictions.
1691 # XXX: This is not a good thing, and is temporary at best.
1692 print ">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."
1695 global thirdpartymirrors
1697 check_config_instance(mysettings)
1699 custommirrors=grabdict(CUSTOM_MIRRORS_FILE,recursive=1)
1703 if listonly or ("distlocks" not in features):
1707 if "skiprocheck" in features:
1710 if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
1712 writemsg(red("!!! You are fetching to a read-only filesystem, you should turn locking off"));
1713 writemsg("!!! This can be done by adding -distlocks to FEATURES in /etc/make.conf");
1716 # local mirrors are always added
1717 if custommirrors.has_key("local"):
1718 mymirrors += custommirrors["local"]
1720 if ("nomirror" in mysettings["RESTRICT"].split()) or \
1721 ("mirror" in mysettings["RESTRICT"].split()):
1722 # We don't add any mirrors.
1726 mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
1729 digestfn = mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
1730 if os.path.exists(digestfn):
1731 mydigests = digestParseFile(digestfn)
1734 for x in range(len(mymirrors)-1,-1,-1):
1735 if mymirrors[x] and mymirrors[x][0]=='/':
1736 fsmirrors += [mymirrors[x]]
1739 for myuri in myuris:
1740 myfile=os.path.basename(myuri)
1742 destdir = mysettings["DISTDIR"]+"/"
1743 if not os.path.exists(destdir+myfile):
1744 for mydir in fsmirrors:
1745 if os.path.exists(mydir+"/"+myfile):
1746 writemsg(_("Local mirror has file: %(file)s\n" % {"file":myfile}))
1747 shutil.copyfile(mydir+"/"+myfile,destdir+"/"+myfile)
1749 except (OSError,IOError),e:
1750 # file does not exist
1751 writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
1754 if "fetch" in mysettings["RESTRICT"].split():
1755 # fetch is restricted. Ensure all files have already been downloaded; otherwise,
1756 # print message and exit.
1758 for myuri in myuris:
1759 myfile=os.path.basename(myuri)
1761 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1762 except (OSError,IOError),e:
1763 # file does not exist
1764 writemsg(_("!!! %(file)s not found in %(dir)s\n") % {"file":myfile, "dir":mysettings["DISTDIR"]})
1768 print "!!!",mysettings["CATEGORY"]+"/"+mysettings["PF"],"has fetch restriction turned on."
1769 print "!!! This probably means that this ebuild's files must be downloaded"
1770 print "!!! manually. See the comments in the ebuild for more information."
1772 spawn(EBUILD_SH_BINARY+" nofetch",mysettings)
1775 locations=mymirrors[:]
1777 primaryuri_indexes={}
1778 for myuri in myuris:
1779 myfile=os.path.basename(myuri)
1780 if not filedict.has_key(myfile):
1782 for y in range(0,len(locations)):
1783 filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
1784 if myuri[:9]=="mirror://":
1785 eidx = myuri.find("/", 9)
1787 mirrorname = myuri[9:eidx]
1789 # Try user-defined mirrors first
1790 if custommirrors.has_key(mirrorname):
1791 for cmirr in custommirrors[mirrorname]:
1792 filedict[myfile].append(cmirr+"/"+myuri[eidx+1:])
1793 # remove the mirrors we tried from the list of official mirrors
1794 if cmirr.strip() in thirdpartymirrors[mirrorname]:
1795 thirdpartymirrors[mirrorname].remove(cmirr)
1796 # now try the official mirrors
1797 if thirdpartymirrors.has_key(mirrorname):
1799 shuffle(thirdpartymirrors[mirrorname])
1800 except SystemExit, e:
1803 writemsg(red("!!! YOU HAVE A BROKEN PYTHON/GLIBC.\n"))
1804 writemsg( "!!! You are most likely on a pentium4 box and have specified -march=pentium4\n")
1805 writemsg( "!!! or -fpmath=sse2. GCC was generating invalid sse2 instructions in versions\n")
1806 writemsg( "!!! prior to 3.2.3. Please merge the latest gcc or rebuid python with either\n")
1807 writemsg( "!!! -march=pentium3 or set -mno-sse2 in your cflags.\n\n\n")
1810 for locmirr in thirdpartymirrors[mirrorname]:
1811 filedict[myfile].append(locmirr+"/"+myuri[eidx+1:])
1813 if not filedict[myfile]:
1814 writemsg("No known mirror by the name: %s\n" % (mirrorname))
1816 writemsg("Invalid mirror definition in SRC_URI:\n")
1817 writemsg(" %s\n" % (myuri))
1819 if "primaryuri" in mysettings["RESTRICT"].split():
1820 # Use the source site first.
1821 if primaryuri_indexes.has_key(myfile):
1822 primaryuri_indexes[myfile] += 1
1824 primaryuri_indexes[myfile] = 0
1825 filedict[myfile].insert(primaryuri_indexes[myfile], myuri)
1827 filedict[myfile].append(myuri)
1829 missingSourceHost = False
1830 for myfile in filedict.keys(): # Gives a list, not just the first one
1831 if not filedict[myfile]:
1832 writemsg("Warning: No mirrors available for file '%s'\n" % (myfile))
1833 missingSourceHost = True
1834 if missingSourceHost:
1836 del missingSourceHost
1839 if not os.access(mysettings["DISTDIR"]+"/",os.W_OK):
1841 print "!!! No write access to %s" % mysettings["DISTDIR"]+"/"
1844 def distdir_perms(filename):
1846 portage_util.apply_permissions(filename, gid=portage_gid, mode=0775)
1848 if oe.errno == errno.EPERM:
1849 writemsg("!!! Unable to apply group permissions to '%s'. Non-root users may experience issues.\n"
1853 distdir_perms(mysettings["DISTDIR"])
1854 if use_locks and locks_in_subdir:
1855 distlocks_subdir = os.path.join(mysettings["DISTDIR"], locks_in_subdir)
1857 distdir_perms(distlocks_subdir)
1859 if oe.errno == errno.ENOENT:
1860 os.mkdir(distlocks_subdir)
1861 distdir_perms(distlocks_subdir)
1864 if not os.access(distlocks_subdir, os.W_OK):
1865 writemsg("!!! No write access to write to %s. Aborting.\n" % distlocks_subdir)
1867 del distlocks_subdir
1870 for myfile in filedict.keys():
1876 if use_locks and can_fetch:
1878 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+locks_in_subdir+"/"+myfile,wantnewlockfile=1)
1880 file_lock = portage_locks.lockfile(mysettings["DISTDIR"]+"/"+myfile,wantnewlockfile=1)
1882 for loc in filedict[myfile]:
1886 # allow different fetchcommands per protocol
1887 protocol = loc[0:loc.find("://")]
1888 if mysettings.has_key("FETCHCOMMAND_"+protocol.upper()):
1889 fetchcommand=mysettings["FETCHCOMMAND_"+protocol.upper()]
1891 fetchcommand=mysettings["FETCHCOMMAND"]
1892 if mysettings.has_key("RESUMECOMMAND_"+protocol.upper()):
1893 resumecommand=mysettings["RESUMECOMMAND_"+protocol.upper()]
1895 resumecommand=mysettings["RESUMECOMMAND"]
1897 fetchcommand=string.replace(fetchcommand,"${DISTDIR}",mysettings["DISTDIR"])
1898 resumecommand=string.replace(resumecommand,"${DISTDIR}",mysettings["DISTDIR"])
1901 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1902 if mydigests.has_key(myfile):
1903 #if we have the digest file, we know the final size and can resume the download.
1904 if mystat[stat.ST_SIZE]<mydigests[myfile]["size"]:
1907 #we already have it downloaded, skip.
1908 #if our file is bigger than the recorded size, digestcheck should catch it.
1912 # Verify checksums at each fetch for fetchonly.
1913 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
1916 writemsg("!!! Previously fetched file: "+str(myfile)+"\n")
1917 writemsg("!!! Reason: "+reason[0]+"\n")
1918 writemsg("!!! Got: %s\n!!! Expected: %s\n" % (reason[0], reason[1]))
1919 writemsg("Refetching...\n\n")
1920 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1923 for x_key in mydigests[myfile].keys():
1924 writemsg(">>> Previously fetched file: "+str(myfile)+" "+x_key+" ;-)\n")
1926 break #No need to keep looking for this file, we have it!
1928 #we don't have the digest file, but the file exists. Assume it is fully downloaded.
1930 except (OSError,IOError),e:
1931 writemsg("An exception was caught(1)...\nFailing the download: %s.\n" % (str(e)),1)
1937 writemsg("!!! File %s isn't fetched but unable to get it.\n" % myfile)
1939 writemsg("!!! File %s isn't fully fetched, but unable to complete it\n" % myfile)
1944 # check if we can actually write to the directory/existing file.
1945 if fetched!=2 and os.path.exists(mysettings["DISTDIR"]+"/"+myfile) != \
1946 os.access(mysettings["DISTDIR"]+"/"+myfile, os.W_OK) and not fetch_to_ro:
1947 writemsg(red("***")+" Lack write access to %s, failing fetch\n" % str(mysettings["DISTDIR"]+"/"+myfile))
1951 #we either need to resume or start the download
1952 #you can't use "continue" when you're inside a "try" block
1955 writemsg(">>> Resuming download...\n")
1956 locfetch=resumecommand
1959 locfetch=fetchcommand
1960 writemsg(">>> Downloading "+str(loc)+"\n")
1961 myfetch=string.replace(locfetch,"${URI}",loc)
1962 myfetch=string.replace(myfetch,"${FILE}",myfile)
1965 con=selinux.getcontext()
1966 con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_FETCH_T"])
1967 selinux.setexec(con)
1968 myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
1969 selinux.setexec(None)
1971 myret=spawn(myfetch,mysettings,free=1, droppriv=("userfetch" in mysettings.features))
1973 #if root, -always- set the perms.
1974 if os.path.exists(mysettings["DISTDIR"]+"/"+myfile) and (fetched != 1 or os.getuid() == 0) \
1975 and os.access(mysettings["DISTDIR"]+"/",os.W_OK):
1976 if os.stat(mysettings["DISTDIR"]+"/"+myfile).st_gid != portage_gid:
1978 os.chown(mysettings["DISTDIR"]+"/"+myfile,-1,portage_gid)
1979 except SystemExit, e:
1982 portage_util.writemsg("chown failed on distfile: " + str(myfile))
1983 os.chmod(mysettings["DISTDIR"]+"/"+myfile,0664)
1985 if mydigests!=None and mydigests.has_key(myfile):
1987 mystat=os.stat(mysettings["DISTDIR"]+"/"+myfile)
1988 # no exception? file exists. let digestcheck() report
1989 # an appropriately for size or checksum errors
1990 if (mystat[stat.ST_SIZE]<mydigests[myfile]["size"]):
1991 # Fetch failed... Try the next one... Kill 404 files though.
1992 if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
1993 html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
1995 if html404.search(open(mysettings["DISTDIR"]+"/"+myfile).read()):
1997 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
1998 writemsg(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n")
1999 except SystemExit, e:
2003 except SystemExit, e:
2012 # File is the correct size--check the checksums for the fetched
2013 # file NOW, for those users who don't have a stable/continuous
2014 # net connection. This way we have a chance to try to download
2015 # from another mirror...
2016 verified_ok,reason = portage_checksum.verify_all(mysettings["DISTDIR"]+"/"+myfile, mydigests[myfile])
2019 writemsg("!!! Fetched file: "+str(myfile)+" VERIFY FAILED!\n")
2020 writemsg("!!! Reason: "+reason[0]+"\n")
2021 writemsg("!!! Got: %s\n!!! Expected: %s\n" % (reason[0], reason[1]))
2022 writemsg("Removing corrupt distfile...\n")
2023 os.unlink(mysettings["DISTDIR"]+"/"+myfile)
2026 for x_key in mydigests[myfile].keys():
2027 writemsg(">>> "+str(myfile)+" "+x_key+" ;-)\n")
2030 except (OSError,IOError),e:
2031 writemsg("An exception was caught(2)...\nFailing the download: %s.\n" % (str(e)),1)
2037 elif mydigests!=None:
2038 writemsg("No digest file available and download failed.\n\n")
2040 if use_locks and file_lock:
2041 portage_locks.unlockfile(file_lock)
2045 if (fetched!=2) and not listonly:
2046 writemsg("!!! Couldn't download "+str(myfile)+". Aborting.\n")
2051 def digestCreate(myfiles,basedir,oldDigest={}):
2052 """Takes a list of files and the directory they are in and returns the
2053 dict of dict[filename][CHECKSUM_KEY] = hash
2054 returns None on error."""
2058 myfile=os.path.normpath(basedir+"///"+x)
2059 if os.path.exists(myfile):
2060 if not os.access(myfile, os.R_OK):
2061 print "!!! Given file does not appear to be readable. Does it exist?"
2062 print "!!! File:",myfile
2064 mydigests[x] = portage_checksum.perform_multiple_checksums(myfile, hashes=portage_const.MANIFEST1_HASH_FUNCTIONS)
2065 mysize = os.stat(myfile)[stat.ST_SIZE]
2068 # DeepCopy because we might not have a unique reference.
2069 mydigests[x] = copy.deepcopy(oldDigest[x])
2070 mysize = copy.deepcopy(oldDigest[x]["size"])
2072 print "!!! We have a source URI, but no file..."
2073 print "!!! File:",myfile
2076 if mydigests[x].has_key("size") and (mydigests[x]["size"] != mysize):
2077 raise portage_exception.DigestException, "Size mismatch during checksums"
2078 mydigests[x]["size"] = copy.deepcopy(mysize)
2081 def digestCreateLines(filelist, mydict):
2083 mydigests = copy.deepcopy(mydict)
2084 for myarchive in filelist:
2085 mysize = mydigests[myarchive]["size"]
2086 if len(mydigests[myarchive]) == 0:
2087 raise portage_exception.DigestException, "No generate digest for '%(file)s'" % {"file":myarchive}
2088 for sumName in mydigests[myarchive].keys():
2089 if sumName not in portage_checksum.get_valid_checksum_keys():
2091 mysum = mydigests[myarchive][sumName]
2095 myline += " "+myarchive
2096 myline += " "+str(mysize)
2097 mylines.append(myline)
2100 def digestgen(myarchives,mysettings,overwrite=1,manifestonly=0):
2101 """generates digest file if missing. Assumes all files are available. If
2102 overwrite=0, the digest will only be created if it doesn't already exist."""
2105 basedir=mysettings["DISTDIR"]+"/"
2106 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2108 # portage files -- p(ortagefiles)basedir
2109 pbasedir=mysettings["O"]+"/"
2110 manifestfn=pbasedir+"Manifest"
2112 if not manifestonly:
2113 if not os.path.isdir(mysettings["FILESDIR"]):
2114 os.makedirs(mysettings["FILESDIR"])
2115 mycvstree=cvstree.getentries(pbasedir, recursive=1)
2117 if ("cvs" in features) and os.path.exists(pbasedir+"/CVS"):
2118 if not cvstree.isadded(mycvstree,"files"):
2119 if "autoaddcvs" in features:
2120 print ">>> Auto-adding files/ dir to CVS..."
2121 spawn("cd "+pbasedir+"; cvs add files",mysettings,free=1)
2123 print "--- Warning: files/ is not added to cvs."
2125 if (not overwrite) and os.path.exists(digestfn):
2128 print green(">>> Generating the digest file...")
2130 # Track the old digest so we can assume checksums without requiring
2131 # all files to be downloaded. 'Assuming'
2133 if os.path.exists(digestfn):
2134 myolddigest = digestParseFile(digestfn)
2138 mydigests=digestCreate(myarchives, basedir, oldDigest=myolddigest)
2139 except portage_exception.DigestException, s:
2142 if mydigests==None: # There was a problem, exit with an errorcode.
2146 outfile=open(digestfn, "w+")
2147 except SystemExit, e:
2149 except Exception, e:
2150 print "!!! Filesystem error skipping generation. (Read-Only?)"
2153 for x in digestCreateLines(myarchives, mydigests):
2154 outfile.write(x+"\n")
2157 os.chown(digestfn,os.getuid(),portage_gid)
2158 os.chmod(digestfn,0664)
2159 except SystemExit, e:
2164 print green(">>> Generating the manifest file...")
2165 mypfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
2166 mypfiles=cvstree.apply_cvsignore_filter(mypfiles)
2168 for x in ["Manifest"]:
2172 mydigests=digestCreate(mypfiles, pbasedir)
2173 if mydigests==None: # There was a problem, exit with an errorcode.
2177 outfile=open(manifestfn, "w+")
2178 except SystemExit, e:
2180 except Exception, e:
2181 print "!!! Filesystem error skipping generation. (Read-Only?)"
2184 for x in digestCreateLines(mypfiles, mydigests):
2185 outfile.write(x+"\n")
2188 os.chown(manifestfn,os.getuid(),portage_gid)
2189 os.chmod(manifestfn,0664)
2190 except SystemExit, e:
2195 if "cvs" in features and os.path.exists(pbasedir+"/CVS"):
2196 mycvstree=cvstree.getentries(pbasedir, recursive=1)
2198 if not manifestonly and not cvstree.isadded(mycvstree,digestfn):
2199 if digestfn[:len(pbasedir)]==pbasedir:
2200 myunaddedfiles=digestfn[len(pbasedir):]+" "
2202 myunaddedfiles=digestfn+" "
2203 if not cvstree.isadded(mycvstree,manifestfn[len(pbasedir):]):
2204 if manifestfn[:len(pbasedir)]==pbasedir:
2205 myunaddedfiles+=manifestfn[len(pbasedir):]+" "
2207 myunaddedfiles+=manifestfn
2209 if "autoaddcvs" in features:
2210 print blue(">>> Auto-adding digest file(s) to CVS...")
2211 spawn("cd "+pbasedir+"; cvs add "+myunaddedfiles,mysettings,free=1)
2213 print "--- Warning: digests are not yet added into CVS."
2214 print darkgreen(">>> Computed message digests.")
2219 def digestParseFile(myfilename):
2220 """(filename) -- Parses a given file for entries matching:
2221 <checksumkey> <checksum_hex_string> <filename> <filesize>
2222 Ignores lines that don't start with a valid checksum identifier
2223 and returns a dict with the filenames as keys and {checksumkey:checksum}
2226 if not os.path.exists(myfilename):
2228 mylines = portage_util.grabfile(myfilename, compat_level=1)
2232 myline=string.split(x)
2236 if myline[0] not in portage_checksum.get_valid_checksum_keys():
2238 mykey = myline.pop(0)
2239 myhash = myline.pop(0)
2240 mysize = long(myline.pop())
2241 myfn = string.join(myline, " ")
2242 if myfn not in mydigests:
2243 mydigests[myfn] = {}
2244 mydigests[myfn][mykey] = myhash
2245 if "size" in mydigests[myfn]:
2246 if mydigests[myfn]["size"] != mysize:
2247 raise portage_exception.DigestException, "Conflicting sizes in digest: %(filename)s" % {"filename":myfilename}
2249 mydigests[myfn]["size"] = mysize
2252 # XXXX strict was added here to fix a missing name error.
2253 # XXXX It's used below, but we're not paying attention to how we get it?
2254 def digestCheckFiles(myfiles, mydigests, basedir, note="", strict=0):
2255 """(fileslist, digestdict, basedir) -- Takes a list of files and a dict
2256 of their digests and checks the digests against the indicated files in
2257 the basedir given. Returns 1 only if all files exist and match the checksums.
2260 if not mydigests.has_key(x):
2262 print red("!!! No message digest entry found for file \""+x+".\"")
2263 print "!!! Most likely a temporary problem. Try 'emerge sync' again later."
2264 print "!!! If you are certain of the authenticity of the file then you may type"
2265 print "!!! the following to generate a new digest:"
2266 print "!!! ebuild /usr/portage/category/package/package-version.ebuild digest"
2268 myfile=os.path.normpath(basedir+"/"+x)
2269 if not os.path.exists(myfile):
2271 print "!!! File does not exist:",myfile
2275 ok,reason = portage_checksum.verify_all(myfile,mydigests[x])
2278 print red("!!! Digest verification Failed:")
2279 print red("!!!")+" "+str(os.path.realpath(myfile))
2280 print red("!!! Reason: ")+reason[0]
2281 print red("!!! Got: ")+str(reason[1])
2282 print red("!!! Expected: ")+str(reason[2])
2286 writemsg_stdout(">>> checksums "+note+" ;-) %s\n" % x)
2290 def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
2291 """Verifies checksums. Assumes all files have been downloaded."""
2293 basedir=mysettings["DISTDIR"]+"/"
2294 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2296 # portage files -- p(ortagefiles)basedir
2297 pbasedir=mysettings["O"]+"/"
2298 manifestfn=pbasedir+"Manifest"
2300 if not (os.path.exists(digestfn) and os.path.exists(manifestfn)):
2301 if "digest" in features:
2302 print ">>> No package digest/Manifest file found."
2303 print ">>> \"digest\" mode enabled; auto-generating new digest..."
2304 return digestgen(myfiles,mysettings)
2306 if not os.path.exists(manifestfn):
2308 print red("!!! No package manifest found:"),manifestfn
2311 print "--- No package manifest found:",manifestfn
2312 if not os.path.exists(digestfn):
2313 print "!!! No package digest file found:",digestfn
2314 print "!!! Type \"ebuild foo.ebuild digest\" to generate it."
2317 mydigests=digestParseFile(digestfn)
2319 print "!!! Failed to parse digest file:",digestfn
2321 mymdigests=digestParseFile(manifestfn)
2322 if "strict" not in features:
2323 # XXX: Remove this when manifests become mainstream.
2325 elif mymdigests==None:
2326 print "!!! Failed to parse manifest file:",manifestfn
2330 # Check the portage-related files here.
2331 mymfiles=listdir(pbasedir,recursive=1,filesonly=1,ignorecvs=1,EmptyOnError=1)
2332 manifest_files = mymdigests.keys()
2333 # Files unrelated to the build process are ignored for verification by default
2334 for x in ["Manifest", "ChangeLog", "metadata.xml"]:
2335 while x in mymfiles:
2337 while x in manifest_files:
2338 manifest_files.remove(x)
2339 for x in range(len(mymfiles)-1,-1,-1):
2340 if mymfiles[x] in manifest_files:
2341 manifest_files.remove(mymfiles[x])
2342 elif len(cvstree.apply_cvsignore_filter([mymfiles[x]]))==0:
2343 # we filter here, rather then above; manifest might have files flagged by the filter.
2344 # if something is returned, then it's flagged as a bad file
2345 # manifest doesn't know about it, so we kill it here.
2348 print red("!!! Security Violation: A file exists that is not in the manifest.")
2349 print "!!! File:",mymfiles[x]
2352 if manifest_files and strict:
2353 print red("!!! Files listed in the manifest do not exist!")
2354 for x in manifest_files:
2358 if not digestCheckFiles(mymfiles, mymdigests, pbasedir, note="files ", strict=strict):
2360 print ">>> Please ensure you have sync'd properly. Please try '"+bold("emerge sync")+"' and"
2361 print ">>> optionally examine the file(s) for corruption. "+bold("A sync will fix most cases.")
2365 print "--- Manifest check failed. 'strict' not enabled; ignoring."
2371 # Just return the status, as it's the last check.
2372 return digestCheckFiles(myfiles, mydigests, basedir, note="src_uri", strict=strict)
2374 # parse actionmap to spawn ebuild with the appropriate args
2375 def spawnebuild(mydo,actionmap,mysettings,debug,alwaysdep=0,logfile=None):
2376 if alwaysdep or ("noauto" not in features):
2377 # process dependency first
2378 if "dep" in actionmap[mydo].keys():
2379 retval=spawnebuild(actionmap[mydo]["dep"],actionmap,mysettings,debug,alwaysdep=alwaysdep,logfile=logfile)
2383 mycommand = EBUILD_SH_BINARY + " "
2384 if selinux_enabled and ("sesandbox" in features) and (mydo in ["unpack","compile","test","install"]):
2385 con=selinux.getcontext()
2386 con=string.replace(con,mysettings["PORTAGE_T"],mysettings["PORTAGE_SANDBOX_T"])
2387 selinux.setexec(con)
2388 retval=spawn(mycommand + mydo,mysettings,debug=debug,
2389 free=actionmap[mydo]["args"][0],
2390 droppriv=actionmap[mydo]["args"][1],logfile=logfile)
2391 selinux.setexec(None)
2393 retval=spawn(mycommand + mydo,mysettings, debug=debug,
2394 free=actionmap[mydo]["args"][0],
2395 droppriv=actionmap[mydo]["args"][1],logfile=logfile)
2398 # chunked out deps for each phase, so that ebuild binary can use it
2399 # to collapse targets down.
2403 "unpack": ["setup"],
2404 "compile":["unpack"],
2405 "test": ["compile"],
2408 "package":["install"],
2412 def eapi_is_supported(eapi):
2413 return str(eapi).strip() == str(portage_const.EAPI).strip()
2416 def doebuild(myebuild,mydo,myroot,mysettings,debug=0,listonly=0,fetchonly=0,cleanup=0,dbkey=None,use_cache=1,fetchall=0,tree=None):
2417 global db, actionmap_deps
2420 dump_traceback("Warning: tree not specified to doebuild")
2423 ebuild_path = os.path.abspath(myebuild)
2424 pkg_dir = os.path.dirname(ebuild_path)
2426 if mysettings.configdict["pkg"].has_key("CATEGORY"):
2427 cat = mysettings.configdict["pkg"]["CATEGORY"]
2429 cat = os.path.basename(os.path.normpath(pkg_dir+"/.."))
2430 mypv = os.path.basename(ebuild_path)[:-7]
2431 mycpv = cat+"/"+mypv
2433 mysplit=pkgsplit(mypv,silent=0)
2435 writemsg("!!! Error: PF is null '%s'; exiting.\n" % mypv)
2438 if mydo != "depend":
2439 # XXX: We're doing a little hack here to curtain the gvisible locking
2440 # XXX: that creates a deadlock... Really need to isolate that.
2441 mysettings.reset(use_cache=use_cache)
2442 mysettings.setcpv(mycpv,use_cache=use_cache)
2444 validcommands = ["help","clean","prerm","postrm","preinst","postinst",
2445 "config","setup","depend","fetch","digest",
2446 "unpack","compile","test","install","rpm","qmerge","merge",
2447 "package","unmerge", "manifest"]
2449 if mydo not in validcommands:
2450 validcommands.sort()
2451 writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo)
2452 for vcount in range(len(validcommands)):
2455 writemsg(string.ljust(validcommands[vcount], 11))
2459 if not os.path.exists(myebuild):
2460 writemsg("!!! doebuild: "+str(myebuild)+" not found for "+str(mydo)+"\n")
2463 if debug: # Otherwise it overrides emerge's settings.
2464 # We have no other way to set debug... debug can't be passed in
2465 # due to how it's coded... Don't overwrite this so we can use it.
2466 mysettings["PORTAGE_DEBUG"]=str(debug)
2468 mysettings["ROOT"] = myroot
2469 mysettings["STARTDIR"] = getcwd()
2471 mysettings["EBUILD"] = ebuild_path
2472 mysettings["O"] = pkg_dir
2473 mysettings["CATEGORY"] = cat
2474 mysettings["FILESDIR"] = pkg_dir+"/files"
2475 mysettings["PF"] = mypv
2477 mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
2478 mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
2480 mysettings["PROFILE_PATHS"] = string.join(mysettings.profiles,"\n")+"\n"+CUSTOM_PROFILE_PATH
2481 mysettings["P"] = mysplit[0]+"-"+mysplit[1]
2482 mysettings["PN"] = mysplit[0]
2483 mysettings["PV"] = mysplit[1]
2484 mysettings["PR"] = mysplit[2]
2486 if portage_util.noiselimit < 0:
2487 mysettings["PORTAGE_QUIET"] = "1"
2489 if mydo != "depend":
2491 mysettings["INHERITED"], mysettings["RESTRICT"] = db[root][tree].dbapi.aux_get( \
2492 mycpv,["INHERITED","RESTRICT"])
2493 mysettings["PORTAGE_RESTRICT"]=string.join(flatten(portage_dep.use_reduce(portage_dep.paren_reduce( \
2494 mysettings["RESTRICT"]), uselist=mysettings["USE"].split())),' ')
2495 except SystemExit, e:
2499 eapi = db[root][tree].dbapi.aux_get(mycpv, ["EAPI"])[0]
2500 if not eapi_is_supported(eapi):
2501 # can't do anything with this.
2502 raise portage_exception.UnsupportedAPIException(mycpv, eapi)
2504 if mysplit[2] == "r0":
2505 mysettings["PVR"]=mysplit[1]
2507 mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
2509 mysettings["SLOT"]=""
2511 if mysettings.has_key("PATH"):
2512 mysplit=string.split(mysettings["PATH"],":")
2515 if PORTAGE_BIN_PATH not in mysplit:
2516 mysettings["PATH"]=PORTAGE_BIN_PATH+":"+mysettings["PATH"]
2519 mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
2520 mysettings["HOME"] = mysettings["BUILD_PREFIX"]+"/homedir"
2521 mysettings["PKG_TMPDIR"] = mysettings["PORTAGE_TMPDIR"]+"/portage-pkg"
2522 mysettings["PORTAGE_BUILDDIR"] = mysettings["BUILD_PREFIX"]+"/"+mysettings["PF"]
2524 mysettings["PORTAGE_BASHRC"] = EBUILD_SH_ENV_FILE
2526 #set up KV variable -- DEP SPEEDUP :: Don't waste time. Keep var persistent.
2527 if (mydo!="depend") or not mysettings.has_key("KV"):
2528 mykv,err1=ExtractKernelVersion(root+"usr/src/linux")
2530 # Regular source tree
2531 mysettings["KV"]=mykv
2535 if (mydo!="depend") or not mysettings.has_key("KVERS"):
2537 mysettings["KVERS"]=myso[1]
2540 # get possible slot information from the deps file
2542 if mysettings.has_key("PORTAGE_DEBUG") and mysettings["PORTAGE_DEBUG"]=="1":
2543 # XXX: This needs to use a FD for saving the output into a file.
2544 # XXX: Set this up through spawn
2546 writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
2548 mysettings["dbkey"] = dbkey
2550 mysettings["dbkey"] = mysettings.depcachedir+"/aux_db_key_temp"
2552 retval = spawn(EBUILD_SH_BINARY+" depend",mysettings)
2556 # Build directory creation isn't required for any of these.
2557 if mydo not in ["fetch","digest","manifest"]:
2559 if not os.path.exists(mysettings["BUILD_PREFIX"]):
2560 os.makedirs(mysettings["BUILD_PREFIX"])
2561 if (os.getuid() == 0):
2562 os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
2563 os.chmod(mysettings["BUILD_PREFIX"],00775)
2565 # Should be ok again to set $T, as sandbox does not depend on it
2566 # XXX Bug. no way in hell this is valid for clean handling.
2567 mysettings["T"]=mysettings["PORTAGE_BUILDDIR"]+"/temp"
2568 if cleanup or mydo=="clean":
2569 if os.path.exists(mysettings["T"]):
2570 shutil.rmtree(mysettings["T"])
2571 if not os.path.exists(mysettings["T"]):
2572 os.makedirs(mysettings["T"])
2573 if (os.getuid() == 0):
2574 os.chown(mysettings["T"],portage_uid,portage_gid)
2575 os.chmod(mysettings["T"],02770)
2577 logdir = mysettings["T"]+"/logging"
2578 if not os.path.exists(logdir):
2581 os.chown(logdir, portage_uid, portage_gid)
2582 os.chmod(logdir, 0770)
2584 try: # XXX: negative RESTRICT
2585 if not (("nouserpriv" in string.split(mysettings["PORTAGE_RESTRICT"])) or \
2586 ("userpriv" in string.split(mysettings["PORTAGE_RESTRICT"]))):
2587 if ("userpriv" in features) and (portage_uid and portage_gid):
2589 if os.path.exists(mysettings["HOME"]):
2590 # XXX: Potentially bad, but held down by HOME replacement above.
2591 spawn("rm -Rf "+mysettings["HOME"],mysettings, free=1)
2592 if not os.path.exists(mysettings["HOME"]):
2593 os.makedirs(mysettings["HOME"])
2594 elif ("userpriv" in features):
2595 print "!!! Disabling userpriv from features... Portage UID/GID not valid."
2596 del features[features.index("userpriv")]
2597 except SystemExit, e:
2599 except Exception, e:
2600 print "!!! Couldn't empty HOME:",mysettings["HOME"]
2604 # no reason to check for depend since depend returns above.
2605 if not os.path.exists(mysettings["BUILD_PREFIX"]):
2606 os.makedirs(mysettings["BUILD_PREFIX"])
2607 if (os.getuid() == 0):
2608 os.chown(mysettings["BUILD_PREFIX"],portage_uid,portage_gid)
2609 if not os.path.exists(mysettings["PORTAGE_BUILDDIR"]):
2610 os.makedirs(mysettings["PORTAGE_BUILDDIR"])
2611 if (os.getuid() == 0):
2612 os.chown(mysettings["PORTAGE_BUILDDIR"],portage_uid,portage_gid)
2614 print "!!! File system problem. (ReadOnly? Out of space?)"
2615 print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
2620 if not os.path.exists(mysettings["HOME"]):
2621 os.makedirs(mysettings["HOME"])
2622 if (os.getuid() == 0):
2623 os.chown(mysettings["HOME"],portage_uid,portage_gid)
2624 os.chmod(mysettings["HOME"],02770)
2626 print "!!! File system problem. (ReadOnly? Out of space?)"
2627 print "!!! Failed to create fake home directory in PORTAGE_BUILDDIR"
2632 if ("ccache" in features):
2633 if (not mysettings.has_key("CCACHE_DIR")) or (mysettings["CCACHE_DIR"]==""):
2634 mysettings["CCACHE_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/ccache"
2635 if not os.path.exists(mysettings["CCACHE_DIR"]):
2636 os.makedirs(mysettings["CCACHE_DIR"])
2637 mystat = os.stat(mysettings["CCACHE_DIR"])
2638 if ("userpriv" in features):
2639 if mystat[stat.ST_UID] != portage_uid or ((mystat[stat.ST_MODE]&02070)!=02070):
2640 writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
2641 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2642 spawn("chown "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2643 spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
2644 spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+xs \{\} \;", mysettings, free=1)
2646 if mystat[stat.ST_UID] != 0 or ((mystat[stat.ST_MODE]&02070)!=02070):
2647 writemsg("* Adjusting permissions on ccache in %s\n" % mysettings["CCACHE_DIR"])
2648 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2649 spawn("chown 0:"+str(portage_gid)+" "+mysettings["CCACHE_DIR"], mysettings, free=1)
2650 spawn("chmod -R ug+rw "+mysettings["CCACHE_DIR"], mysettings, free=1)
2651 spawn("find "+mysettings["CCACHE_DIR"]+" -type d -exec chmod g+xs \{\} \;", mysettings, free=1)
2653 print "!!! File system problem. (ReadOnly? Out of space?)"
2654 print "!!! Perhaps: rm -Rf",mysettings["BUILD_PREFIX"]
2658 if "confcache" in features:
2659 if not mysettings.has_key("CONFCACHE_DIR"):
2660 mysettings["CONFCACHE_DIR"] = os.path.join(mysettings["PORTAGE_TMPDIR"], "confcache")
2661 if not os.path.exists(mysettings["CONFCACHE_DIR"]):
2662 if not os.getuid() == 0:
2664 features.remove("confcache")
2665 mysettings["FEATURES"] = " ".join(features)
2667 os.makedirs(mysettings["CONFCACHE_DIR"], mode=0775)
2668 os.chown(mysettings["CONFCACHE_DIR"], -1, portage_gid)
2670 st = os.stat(mysettings["CONFCACHE_DIR"])
2671 if not (st.st_mode & 07777) == 0775:
2672 os.chmod(mysettings["CONFCACHE_DIR"], 0775)
2673 if not st.st_gid == portage_gid:
2674 os.chown(mysettings["CONFCACHE_DIR"], -1, portage_gid)
2676 # check again, since it may have been disabled.
2677 if "confcache" in features:
2678 for x in listdir(mysettings["CONFCACHE_DIR"]):
2679 p = os.path.join(mysettings["CONFCACHE_DIR"], x)
2681 if not (st.st_mode & 07777) & 07660 == 0660:
2682 os.chmod(p, (st.st_mode & 0777) | 0660)
2683 if not st.st_gid == portage_gid:
2684 os.chown(p, -1, portage_gid)
2687 print "!!! Failed resetting perms on confcachedir %s" % mysettings["CONFCACHE_DIR"]
2690 # mystat=os.stat(mysettings["CCACHE_DIR"])
2691 # if (mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02070)!=02070):
2692 # print "*** Adjusting ccache permissions for portage user..."
2693 # os.chown(mysettings["CCACHE_DIR"],portage_uid,portage_gid)
2694 # os.chmod(mysettings["CCACHE_DIR"],02770)
2695 # spawn("chown -R "+str(portage_uid)+":"+str(portage_gid)+" "+mysettings["CCACHE_DIR"],mysettings, free=1)
2696 # spawn("chmod -R g+rw "+mysettings["CCACHE_DIR"],mysettings, free=1)
2697 #except SystemExit, e:
2702 if "distcc" in features:
2704 if (not mysettings.has_key("DISTCC_DIR")) or (mysettings["DISTCC_DIR"]==""):
2705 mysettings["DISTCC_DIR"]=mysettings["PORTAGE_TMPDIR"]+"/portage/.distcc"
2706 if not os.path.exists(mysettings["DISTCC_DIR"]):
2707 os.makedirs(mysettings["DISTCC_DIR"])
2708 os.chown(mysettings["DISTCC_DIR"],portage_uid,portage_gid)
2709 os.chmod(mysettings["DISTCC_DIR"],02775)
2710 for x in ("/lock", "/state"):
2711 if not os.path.exists(mysettings["DISTCC_DIR"]+x):
2712 os.mkdir(mysettings["DISTCC_DIR"]+x)
2713 os.chown(mysettings["DISTCC_DIR"]+x,portage_uid,portage_gid)
2714 os.chmod(mysettings["DISTCC_DIR"]+x,02775)
2716 writemsg("\n!!! File system problem when setting DISTCC_DIR directory permissions.\n")
2717 writemsg( "!!! DISTCC_DIR="+str(mysettings["DISTCC_DIR"]+"\n"))
2718 writemsg( "!!! "+str(e)+"\n\n")
2720 features.remove("distcc")
2721 mysettings["DISTCC_DIR"]=""
2723 mysettings["WORKDIR"]=mysettings["PORTAGE_BUILDDIR"]+"/work"
2724 mysettings["D"]=mysettings["PORTAGE_BUILDDIR"]+"/image/"
2726 if mysettings.has_key("PORT_LOGDIR"):
2727 if not os.access(mysettings["PORT_LOGDIR"],os.F_OK):
2729 os.mkdir(mysettings["PORT_LOGDIR"])
2731 print "!!! Unable to create PORT_LOGDIR"
2733 if os.access(mysettings["PORT_LOGDIR"]+"/",os.W_OK):
2735 perms = os.stat(mysettings["PORT_LOGDIR"])
2736 if perms[stat.ST_UID] != portage_uid or perms[stat.ST_GID] != portage_gid:
2737 os.chown(mysettings["PORT_LOGDIR"],portage_uid,portage_gid)
2738 if stat.S_IMODE(perms[stat.ST_MODE]) != 02770:
2739 os.chmod(mysettings["PORT_LOGDIR"],02770)
2740 if not mysettings.has_key("LOG_PF") or (mysettings["LOG_PF"] != mysettings["PF"]):
2741 mysettings["LOG_PF"]=mysettings["PF"]
2742 mysettings["LOG_COUNTER"]=str(db[myroot]["vartree"].dbapi.get_counter_tick_core("/"))
2743 logfile="%s/%s-%s.log" % (mysettings["PORT_LOGDIR"],mysettings["LOG_COUNTER"],mysettings["LOG_PF"])
2745 mysettings["PORT_LOGDIR"]=""
2746 print "!!! Unable to chown/chmod PORT_LOGDIR. Disabling logging."
2749 print "!!! Cannot create log... No write access / Does not exist"
2750 print "!!! PORT_LOGDIR:",mysettings["PORT_LOGDIR"]
2751 mysettings["PORT_LOGDIR"]=""
2754 return unmerge(mysettings["CATEGORY"],mysettings["PF"],myroot,mysettings)
2756 # if any of these are being called, handle them -- running them out of the sandbox -- and stop now.
2759 if mydo in ["help","clean","setup"]:
2760 return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
2761 elif mydo in ["prerm","postrm","preinst","postinst","config"]:
2762 mysettings.load_infodir(pkg_dir)
2763 return spawn(EBUILD_SH_BINARY+" "+mydo,mysettings,debug=debug,free=1,logfile=logfile)
2766 mysettings["SLOT"],mysettings["RESTRICT"] = db["/"]["porttree"].dbapi.aux_get(mycpv,["SLOT","RESTRICT"])
2767 except (IOError,KeyError):
2768 print red("doebuild():")+" aux_get() error reading "+mycpv+"; aborting."
2771 newuris, alist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings)
2772 alluris, aalist = db["/"]["porttree"].dbapi.getfetchlist(mycpv,mysettings=mysettings,all=1)
2773 mysettings["A"]=string.join(alist," ")
2774 mysettings["AA"]=string.join(aalist," ")
2775 if ("mirror" in features) or fetchall:
2778 elif mydo=="digest":
2781 digestfn=mysettings["FILESDIR"]+"/digest-"+mysettings["PF"]
2782 if os.path.exists(digestfn):
2783 mydigests=digestParseFile(digestfn)
2787 i = checkme.index(x)
2795 if not os.path.exists(mysettings["DISTDIR"]):
2796 os.makedirs(mysettings["DISTDIR"])
2797 if not os.path.exists(mysettings["DISTDIR"]+"/cvs-src"):
2798 os.makedirs(mysettings["DISTDIR"]+"/cvs-src")
2800 print "!!! File system problem. (Bad Symlink?)"
2801 print "!!! Fetching may fail:",str(e)
2804 mystat=os.stat(mysettings["DISTDIR"]+"/cvs-src")
2805 if ((mystat[stat.ST_GID]!=portage_gid) or ((mystat[stat.ST_MODE]&02770)!=02770)) and not listonly:
2806 print "*** Adjusting cvs-src permissions for portage user..."
2807 os.chown(mysettings["DISTDIR"]+"/cvs-src",0,portage_gid)
2808 os.chmod(mysettings["DISTDIR"]+"/cvs-src",02770)
2809 spawn("chgrp -R "+str(portage_gid)+" "+mysettings["DISTDIR"]+"/cvs-src", free=1)
2810 spawn("chmod -R g+rw "+mysettings["DISTDIR"]+"/cvs-src", free=1)
2811 except SystemExit, e:
2816 # Only try and fetch the files if we are going to need them ... otherwise,
2817 # if user has FEATURES=noauto and they run `ebuild clean unpack compile install`,
2818 # we will try and fetch 4 times :/
2819 need_distfiles = (mydo in ("digest", "fetch", "unpack") or
2820 mydo != "manifest" and "noauto" not in features)
2821 if need_distfiles and not fetch(fetchme, mysettings, listonly=listonly, fetchonly=fetchonly):
2824 # inefficient. improve this logic via making actionmap easily searchable to see if we're in the chain of what
2825 # will be executed, either that or forced N doebuild calls instead of a single set of phase calls.
2826 if (mydo not in ("setup", "clean", "postinst", "preinst", "prerm", "fetch", "digest", "manifest") and
2827 "noauto" not in features) or mydo == "unpack":
2828 # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
2829 mysettings["PORTAGE_ACTUAL_DISTDIR"] = orig_distdir = mysettings["DISTDIR"]
2830 edpath = mysettings["DISTDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "distdir")
2831 if os.path.exists(edpath):
2833 if os.path.isdir(edpath) and not os.path.islink(edpath):
2834 shutil.rmtree(edpath)
2838 print "!!! Failed reseting ebuild distdir path, " + edpath
2841 os.chown(edpath, -1, portage_gid)
2842 os.chmod(edpath, 0775)
2845 os.symlink(os.path.join(orig_distdir, file), os.path.join(edpath, file))
2847 print "!!! Failed symlinking in '%s' to ebuild distdir" % file
2850 if mydo=="fetch" and listonly:
2853 if "digest" in features:
2854 #generate digest if it doesn't exist.
2856 return (not digestgen(aalist,mysettings,overwrite=1))
2858 digestgen(aalist,mysettings,overwrite=0)
2859 elif mydo=="digest":
2860 #since we are calling "digest" directly, recreate the digest even if it already exists
2861 return (not digestgen(aalist,mysettings,overwrite=1))
2862 if mydo=="manifest":
2863 return (not digestgen(aalist,mysettings,overwrite=1,manifestonly=1))
2865 # See above comment about fetching only when needed
2866 if not digestcheck(checkme, mysettings, ("strict" in features), (mydo not in ["digest","fetch","unpack"] and settings["PORTAGE_CALLER"] == "ebuild" and "noauto" in features)):
2872 #initial dep checks complete; time to process main commands
2874 nosandbox=(("userpriv" in features) and ("usersandbox" not in features) and \
2875 ("userpriv" not in mysettings["RESTRICT"]) and ("nouserpriv" not in mysettings["RESTRICT"]))
2876 if nosandbox and ("userpriv" not in features or "userpriv" in mysettings["RESTRICT"] or \
2877 "nouserpriv" in mysettings["RESTRICT"]):
2878 nosandbox = ("sandbox" not in features and "usersandbox" not in features)
2881 "depend": {"args":(0,1)}, # sandbox / portage
2882 "setup": {"args":(1,0)}, # without / root
2883 "unpack": {"args":(0,1)}, # sandbox / portage
2884 "compile":{"args":(nosandbox,1)}, # optional / portage
2885 "test": {"args":(nosandbox,1)}, # optional / portage
2886 "install":{"args":(0,0)}, # sandbox / root
2887 "rpm": {"args":(0,0)}, # sandbox / root
2888 "package":{"args":(0,0)}, # sandbox / root
2891 # merge the deps in so we have again a 'full' actionmap
2892 # be glad when this can die.
2893 for x in actionmap.keys():
2894 if len(actionmap_deps.get(x, [])):
2895 actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
2897 if mydo in actionmap.keys():
2899 for x in ["","/"+mysettings["CATEGORY"],"/All"]:
2900 if not os.path.exists(mysettings["PKGDIR"]+x):
2901 os.makedirs(mysettings["PKGDIR"]+x)
2902 # REBUILD CODE FOR TBZ2 --- XXXX
2903 return spawnebuild(mydo,actionmap,mysettings,debug,logfile=logfile)
2904 elif mydo=="qmerge":
2905 #check to ensure install was run. this *only* pops up when users forget it and are using ebuild
2906 if not os.path.exists(mysettings["PORTAGE_BUILDDIR"]+"/.installed"):
2907 print "!!! mydo=qmerge, but install phase hasn't been ran"
2909 #qmerge is specifically not supposed to do a runtime dep check
2910 return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["PORTAGE_BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"],mytree=tree)
2912 retval=spawnebuild("install",actionmap,mysettings,debug,alwaysdep=1,logfile=logfile)
2915 return merge(mysettings["CATEGORY"],mysettings["PF"],mysettings["D"],mysettings["PORTAGE_BUILDDIR"]+"/build-info",myroot,mysettings,myebuild=mysettings["EBUILD"],mytree=tree)
2917 print "!!! Unknown mydo:",mydo
2922 def movefile(src,dest,newmtime=None,sstat=None,mysettings=None):
2923 """moves a file from src to dest, preserving all permissions and attributes; mtime will
2924 be preserved even when moving across filesystems. Returns true on success and false on
2925 failure. Move is atomic."""
2926 #print "movefile("+str(src)+","+str(dest)+","+str(newmtime)+","+str(sstat)+")"
2933 sflags=bsd_chflags.lgetflags(src)
2935 # Problem getting flags...
2936 writemsg("!!! Couldn't get flags for "+dest+"\n")
2939 except SystemExit, e:
2941 except Exception, e:
2942 print "!!! Stating source file failed... movefile()"
2948 dstat=os.lstat(dest)
2949 except SystemExit, e:
2952 dstat=os.lstat(os.path.dirname(dest))
2956 # Check that we can actually unset schg etc flags...
2957 # Clear the flags on source and destination; we'll reinstate them after merging
2959 if bsd_chflags.lchflags(dest, 0) < 0:
2960 writemsg("!!! Couldn't clear flags on file being merged: \n ")
2961 # We might have an immutable flag on the parent dir; save and clear.
2962 pflags=bsd_chflags.lgetflags(os.path.dirname(dest))
2963 bsd_chflags.lchflags(os.path.dirname(dest), 0)
2965 # Don't bother checking the return value here; if it fails then the next line will catch it.
2966 bsd_chflags.lchflags(src, 0)
2968 if bsd_chflags.lhasproblems(src)>0 or (destexists and bsd_chflags.lhasproblems(dest)>0) or bsd_chflags.lhasproblems(os.path.dirname(dest))>0:
2969 # This is bad: we can't merge the file with these flags set.
2970 writemsg("!!! Can't merge file "+dest+" because of flags set\n")
2974 if stat.S_ISLNK(dstat[stat.ST_MODE]):
2978 except SystemExit, e:
2980 except Exception, e:
2983 if stat.S_ISLNK(sstat[stat.ST_MODE]):
2985 target=os.readlink(src)
2986 if mysettings and mysettings["D"]:
2987 if target.find(mysettings["D"])==0:
2988 target=target[len(mysettings["D"]):]
2989 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
2992 sid = selinux.get_lsid(src)
2993 selinux.secure_symlink(target,dest,sid)
2995 os.symlink(target,dest)
2996 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
2998 # Restore the flags we saved before moving
2999 if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3000 writemsg("!!! Couldn't restore flags ("+str(flags)+") on " + dest+":\n")
3001 writemsg("!!! %s\n" % str(e))
3003 return os.lstat(dest)[stat.ST_MTIME]
3004 except SystemExit, e:
3006 except Exception, e:
3007 print "!!! failed to properly create symlink:"
3008 print "!!!",dest,"->",target
3013 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV] or selinux_enabled:
3016 ret=selinux.secure_rename(src,dest)
3018 ret=os.rename(src,dest)
3020 except SystemExit, e:
3022 except Exception, e:
3023 if e[0]!=errno.EXDEV:
3024 # Some random error.
3025 print "!!! Failed to move",src,"to",dest
3028 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
3031 if stat.S_ISREG(sstat[stat.ST_MODE]):
3032 try: # For safety copy then move it over.
3034 selinux.secure_copy(src,dest+"#new")
3035 selinux.secure_rename(dest+"#new",dest)
3037 shutil.copyfile(src,dest+"#new")
3038 os.rename(dest+"#new",dest)
3040 except SystemExit, e:
3042 except Exception, e:
3043 print '!!! copy',src,'->',dest,'failed.'
3047 #we don't yet handle special, so we need to fall back to /bin/mv
3049 a=commands.getstatusoutput(MOVE_BINARY+" -c -f "+"'"+src+"' '"+dest+"'")
3051 a=commands.getstatusoutput(MOVE_BINARY+" -f "+"'"+src+"' '"+dest+"'")
3053 print "!!! Failed to move special file:"
3054 print "!!! '"+src+"' to '"+dest+"'"
3056 return None # failure
3059 if stat.S_ISLNK(sstat[stat.ST_MODE]):
3060 lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3062 os.chown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
3063 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
3065 except SystemExit, e:
3067 except Exception, e:
3068 print "!!! Failed to chown/chmod/unlink in movefile()"
3074 os.utime(dest,(newmtime,newmtime))
3076 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
3077 newmtime=sstat[stat.ST_MTIME]
3080 # Restore the flags we saved before moving
3081 if bsd_chflags.lchflags(dest, sflags) < 0 or bsd_chflags.lchflags(os.path.dirname(dest), pflags) < 0:
3082 writemsg("!!! Couldn't restore flags ("+str(sflags)+") on " + dest+":\n")
3087 def merge(mycat,mypkg,pkgloc,infloc,myroot,mysettings,myebuild=None,mytree=None):
3088 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype=mytree)
3089 return mylink.merge(pkgloc,infloc,myroot,myebuild)
3091 def unmerge(cat,pkg,myroot,mysettings,mytrimworld=1):
3092 mylink=dblink(cat,pkg,myroot,mysettings,treetype="vartree")
3094 mylink.unmerge(trimworld=mytrimworld,cleanup=1)
3097 def isvalidatom(atom):
3098 mycpv_cps = catpkgsplit(dep_getcpv(atom))
3099 operator = get_operator(atom)
3101 if operator[0] in "<>" and atom[-1] == "*":
3103 if mycpv_cps and mycpv_cps[0] != "null":
3107 # >=cat/pkg or >=pkg-1.0 (no category)
3113 if (len(string.split(atom, '/'))==2):
3119 def isjustname(mypkg):
3120 myparts=string.split(mypkg,'-')
3127 def isspecific(mypkg):
3128 "now supports packages with no category"
3130 return iscache[mypkg]
3131 except SystemExit, e:
3135 mysplit=string.split(mypkg,"/")
3136 if not isjustname(mysplit[-1]):
3142 def getCPFromCPV(mycpv):
3143 """Calls pkgsplit on a cpv and returns only the cp."""
3144 return pkgsplit(mycpv)[0]
3147 def dep_virtual(mysplit, mysettings):
3148 "Does virtual dependency conversion"
3151 if type(x)==types.ListType:
3152 newsplit.append(dep_virtual(x, mysettings))
3155 if mysettings.virtuals.has_key(mykey):
3156 if len(mysettings.virtuals[mykey])==1:
3157 a=string.replace(x, mykey, mysettings.virtuals[mykey][0])
3160 # blocker needs "and" not "or(||)".
3164 for y in mysettings.virtuals[mykey]:
3165 a.append(string.replace(x, mykey, y))
3171 def dep_eval(deplist):
3174 if deplist[0]=="||":
3175 #or list; we just need one "1"
3176 for x in deplist[1:]:
3177 if type(x)==types.ListType:
3182 #XXX: unless there's no available atoms in the list
3183 #in which case we need to assume that everything is
3184 #okay as some ebuilds are relying on an old bug.
3185 if len(deplist) == 1:
3190 if type(x)==types.ListType:
3197 def dep_zapdeps(unreduced,reduced,myroot,use_binaries=0):
3198 """Takes an unreduced and reduced deplist and removes satisfied dependencies.
3199 Returned deplist contains steps that must be taken to satisfy dependencies."""
3200 writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
3201 if not reduced or unreduced == ["||"] or dep_eval(reduced):
3204 if unreduced[0] != "||":
3206 for (dep, satisfied) in zip(unreduced, reduced):
3207 if isinstance(dep, list):
3208 unresolved += dep_zapdeps(dep, satisfied, myroot, use_binaries=use_binaries)
3210 unresolved.append(dep)
3213 # We're at a ( || atom ... ) type level
3214 deps = unreduced[1:]
3215 satisfieds = reduced[1:]
3218 for (dep, satisfied) in zip(deps, satisfieds):
3219 if isinstance(dep, list):
3220 atoms = dep_zapdeps(dep, satisfied, myroot, use_binaries=use_binaries)
3223 missing_atoms = [atom for atom in atoms if not db[myroot]["vartree"].dbapi.match(atom)]
3225 if not missing_atoms:
3226 if isinstance(dep, list):
3227 return atoms # Sorted out by the recursed dep_zapdeps call
3229 target = dep_getkey(dep) # An installed package that's not yet in the graph
3234 missing_atoms = [atom for atom in atoms if not db[myroot]["bintree"].dbapi.match(atom)]
3236 missing_atoms = [atom for atom in atoms if not db[myroot]["porttree"].dbapi.xmatch("match-visible", atom)]
3237 if not missing_atoms:
3238 target = (dep, satisfied)
3241 if isinstance(deps[0], list):
3242 return dep_zapdeps(deps[0], satisfieds[0], myroot, use_binaries=use_binaries)
3246 if isinstance(target, tuple): # Nothing matching installed
3247 if isinstance(target[0], list): # ... and the first available was a sublist
3248 return dep_zapdeps(target[0], target[1], myroot, use_binaries=use_binaries)
3249 else: # ... and the first available was a single atom
3250 target = dep_getkey(target[0])
3252 relevant_atoms = [dep for dep in deps if not isinstance(dep, list) and dep_getkey(dep) == target]
3255 for atom in relevant_atoms:
3257 pkg_list = db["/"]["bintree"].dbapi.match(atom)
3259 pkg_list = db["/"]["porttree"].dbapi.xmatch("match-visible", atom)
3262 pkg = best(pkg_list)
3263 available_pkgs[pkg] = atom
3265 if not available_pkgs:
3266 return [relevant_atoms[0]] # All masked
3268 target_pkg = best(available_pkgs.keys())
3269 suitable_atom = available_pkgs[target_pkg]
3270 return [suitable_atom]
3274 def dep_getkey(mydep):
3283 if mydep[:2] in [ ">=", "<=" ]:
3285 elif mydep[:1] in "=<>~":
3287 if isspecific(mydep):
3288 mysplit=catpkgsplit(mydep)
3291 return mysplit[0]+"/"+mysplit[1]
3295 def dep_getcpv(mydep):
3304 if mydep[:2] in [ ">=", "<=" ]:
3306 elif mydep[:1] in "=<>~":
3310 def dep_transform(mydep,oldkey,newkey):
3321 if mydep[:2] in [ ">=", "<=" ]:
3324 elif mydep[:1] in "=<>~!":
3328 return prefix+newkey+postfix
3332 def dep_expand(mydep,mydb=None,use_cache=1):
3342 if mydep[:2] in [ ">=", "<=" ]:
3345 elif mydep[:1] in "=<>~!":
3348 return prefix+cpv_expand(mydep,mydb=mydb,use_cache=use_cache)+postfix
3350 def dep_check(depstring,mydbapi,mysettings,use="yes",mode=None,myuse=None,use_cache=1,use_binaries=0,myroot="/"):
3351 """Takes a depend string and parses the condition."""
3353 #check_config_instance(mysettings)
3358 myusesplit = string.split(mysettings["USE"])
3361 # We've been given useflags to use.
3362 #print "USE FLAGS PASSED IN."
3364 #if "bindist" in myusesplit:
3365 # print "BINDIST is set!"
3367 # print "BINDIST NOT set."
3369 #we are being run by autouse(), don't consult USE vars yet.
3370 # WE ALSO CANNOT USE SETTINGS
3373 #convert parenthesis to sublists
3374 mysplit = portage_dep.paren_reduce(depstring)
3377 # XXX: use="all" is only used by repoman. Why would repoman checks want
3378 # profile-masked USE flags to be enabled?
3380 # mymasks=archlist[:]
3382 mymasks=mysettings.usemask+archlist[:]
3384 while mysettings["ARCH"] in mymasks:
3385 del mymasks[mymasks.index(mysettings["ARCH"])]
3386 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,masklist=mymasks,matchall=(use=="all"),excludeall=[mysettings["ARCH"]])
3388 mysplit = portage_dep.use_reduce(mysplit,uselist=myusesplit,matchall=(use=="all"))
3390 # Do the || conversions
3391 mysplit=portage_dep.dep_opconvert(mysplit)
3393 #convert virtual dependencies to normal packages.
3394 mysplit=dep_virtual(mysplit, mysettings)
3395 #if mysplit==None, then we have a parse error (paren mismatch or misplaced ||)
3396 #up until here, we haven't needed to look at the database tree
3399 return [0,"Parse Error (parentheses mismatch?)"]
3401 #dependencies were reduced to nothing
3404 mysplit2=dep_wordreduce(mysplit2,mysettings,mydbapi,mode,use_cache=use_cache)
3406 return [0,"Invalid token"]
3408 writemsg("\n\n\n", 1)
3409 writemsg("mysplit: %s\n" % (mysplit), 1)
3410 writemsg("mysplit2: %s\n" % (mysplit2), 1)
3411 myeval=dep_eval(mysplit2)
3412 writemsg("myeval: %s\n" % (myeval), 1)
3417 myzaps = dep_zapdeps(mysplit,mysplit2,myroot,use_binaries=use_binaries)
3418 mylist = flatten(myzaps)
3419 writemsg("myzaps: %s\n" % (myzaps), 1)
3420 writemsg("mylist: %s\n" % (mylist), 1)
3425 writemsg("mydict: %s\n" % (mydict), 1)
3426 return [1,mydict.keys()]
3428 def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
3429 "Reduces the deplist to ones and zeros"
3431 deplist=mydeplist[:]
3432 while mypos<len(deplist):
3433 if type(deplist[mypos])==types.ListType:
3435 deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
3436 elif deplist[mypos]=="||":
3439 mykey = dep_getkey(deplist[mypos])
3440 if mysettings and mysettings.pprovideddict.has_key(mykey) and \
3441 match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
3445 mydep=mydbapi.xmatch(mode,deplist[mypos])
3447 mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
3450 if deplist[mypos][0]=="!":
3454 #encountered invalid string
3459 def cpv_getkey(mycpv):
3460 myslash=mycpv.split("/")
3461 mysplit=pkgsplit(myslash[-1])
3464 return myslash[0]+"/"+mysplit[0]
3470 def key_expand(mykey,mydb=None,use_cache=1):
3471 mysplit=mykey.split("/")
3473 if mydb and type(mydb)==types.InstanceType:
3474 for x in settings.categories:
3475 if mydb.cp_list(x+"/"+mykey,use_cache=use_cache):
3477 if virts_p.has_key(mykey):
3478 return(virts_p[mykey][0])
3479 return "null/"+mykey
3481 if type(mydb)==types.InstanceType:
3482 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
3483 return virts[mykey][0]
3486 def cpv_expand(mycpv,mydb=None,use_cache=1):
3487 """Given a string (packagename or virtual) expand it into a valid
3488 cat/package string. Virtuals use the mydb to determine which provided
3489 virtual is a valid choice and defaults to the first element when there
3490 are no installed/available candidates."""
3491 myslash=mycpv.split("/")
3492 mysplit=pkgsplit(myslash[-1])
3494 # this is illegal case.
3497 elif len(myslash)==2:
3499 mykey=myslash[0]+"/"+mysplit[0]
3503 writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
3504 if type(mydb)==types.InstanceType:
3505 if (not mydb.cp_list(mykey,use_cache=use_cache)) and virts and virts.has_key(mykey):
3506 writemsg("virts[%s]: %s\n" % (str(mykey),virts[mykey]), 1)
3507 mykey_orig = mykey[:]
3508 for vkey in virts[mykey]:
3509 if mydb.cp_list(vkey,use_cache=use_cache):
3511 writemsg("virts chosen: %s\n" % (mykey), 1)
3513 if mykey == mykey_orig:
3514 mykey=virts[mykey][0]
3515 writemsg("virts defaulted: %s\n" % (mykey), 1)
3516 #we only perform virtual expansion if we are passed a dbapi
3518 #specific cpv, no category, ie. "foo-1.0"
3527 for x in settings.categories:
3528 if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
3529 matches.append(x+"/"+myp)
3530 if (len(matches)>1):
3531 raise ValueError, matches
3535 if not mykey and type(mydb)!=types.ListType:
3536 if virts_p.has_key(myp):
3537 mykey=virts_p[myp][0]
3538 #again, we only perform virtual expansion if we have a dbapi (not a list)
3542 if mysplit[2]=="r0":
3543 return mykey+"-"+mysplit[1]
3545 return mykey+"-"+mysplit[1]+"-"+mysplit[2]
3549 def getmaskingreason(mycpv):
3550 from portage_util import grablines
3552 mysplit = catpkgsplit(mycpv)
3554 raise ValueError("invalid CPV: %s" % mycpv)
3555 if not portdb.cpv_exists(mycpv):
3556 raise KeyError("CPV %s does not exist" % mycpv)
3557 mycp=mysplit[0]+"/"+mysplit[1]
3559 pmasklines = grablines(settings["PORTDIR"]+"/profiles/package.mask", recursive=1)
3560 if settings.pmaskdict.has_key(mycp):
3561 for x in settings.pmaskdict[mycp]:
3562 if mycpv in portdb.xmatch("match-all", x):
3566 while i < len(pmasklines):
3567 l = pmasklines[i].strip()
3577 def getmaskingstatus(mycpv):
3579 mysplit = catpkgsplit(mycpv)
3581 raise ValueError("invalid CPV: %s" % mycpv)
3582 if not portdb.cpv_exists(mycpv):
3583 raise KeyError("CPV %s does not exist" % mycpv)
3584 mycp=mysplit[0]+"/"+mysplit[1]
3589 revmaskdict=settings.prevmaskdict
3590 if revmaskdict.has_key(mycp):
3591 for x in revmaskdict[mycp]:
3596 if not match_to_list(mycpv, [myatom]):
3597 rValue.append("profile")
3600 # package.mask checking
3601 maskdict=settings.pmaskdict
3602 unmaskdict=settings.punmaskdict
3603 if maskdict.has_key(mycp):
3604 for x in maskdict[mycp]:
3605 if mycpv in portdb.xmatch("match-all", x):
3607 if unmaskdict.has_key(mycp):
3608 for z in unmaskdict[mycp]:
3609 if mycpv in portdb.xmatch("match-all",z):
3613 rValue.append("package.mask")
3616 mygroups, eapi = portdb.aux_get(mycpv, ["KEYWORDS", "EAPI"])
3617 if not eapi_is_supported(eapi):
3618 return ["required EAPI %s, supported EAPI %s" % (eapi, portage_const.EAPI)]
3619 mygroups = mygroups.split()
3621 myarch = settings["ARCH"]
3622 pkgdict = settings.pkeywordsdict
3624 cp = dep_getkey(mycpv)
3625 if pkgdict.has_key(cp):
3626 matches = match_to_list(mycpv, pkgdict[cp].keys())
3627 for match in matches:
3628 pgroups.extend(pkgdict[cp][match])
3632 for keyword in pgroups:
3633 if keyword in mygroups:
3642 elif gp=="-"+myarch:
3645 elif gp=="~"+myarch:
3650 rValue.append(kmask+" keyword")
3653 def fixdbentries(old_value, new_value, dbdir):
3654 """python replacement for the fixdbentries script, replaces old_value
3655 with new_value for package names in files in dbdir."""
3656 for myfile in [f for f in os.listdir(dbdir) if not f == "CONTENTS"]:
3657 file_path = os.path.join(dbdir, myfile)
3658 f = open(file_path, "r")
3659 mycontent = f.read()
3661 if not mycontent.count(old_value):
3663 old_value = re.escape(old_value);
3664 mycontent = re.sub(old_value+"$", new_value, mycontent)
3665 mycontent = re.sub(old_value+"(\\s)", new_value+"\\1", mycontent)
3666 mycontent = re.sub(old_value+"(-[^a-zA-Z])", new_value+"\\1", mycontent)
3667 mycontent = re.sub(old_value+"([^a-zA-Z0-9-])", new_value+"\\1", mycontent)
3668 write_atomic(file_path, mycontent)
3671 def __init__(self,virtual,clone=None):
3673 self.tree=clone.tree.copy()
3674 self.populated=clone.populated
3675 self.virtual=clone.virtual
3680 self.virtual=virtual
3683 def resolve_key(self,mykey):
3684 return key_expand(mykey,mydb=self.dbapi)
3686 def dep_nomatch(self,mypkgdep):
3687 mykey=dep_getkey(mypkgdep)
3688 nolist=self.dbapi.cp_list(mykey)
3689 mymatch=self.dbapi.match(mypkgdep)
3697 def depcheck(self,mycheck,use="yes",myusesplit=None):
3698 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
3701 "populates the tree with values"
3705 def best(mymatches):
3706 "accepts None arguments; assumes matches are valid."
3710 if not len(mymatches):
3712 bestmatch=mymatches[0]
3713 p2=catpkgsplit(bestmatch)[1:]
3714 for x in mymatches[1:]:
3715 p1=catpkgsplit(x)[1:]
3718 p2=catpkgsplit(bestmatch)[1:]
3721 def match_to_list(mypkg,mylist):
3723 Searches list for entries that matches the package.
3727 if match_from_list(x,[mypkg]):
3728 if x not in matches:
3732 def best_match_to_list(mypkg,mylist):
3734 Returns the most specific entry (assumed to be the longest one)
3735 that matches the package given.
3737 # XXX Assumption is wrong sometimes.
3740 for x in match_to_list(mypkg,mylist):
3746 def catsplit(mydep):
3747 return mydep.split("/", 1)
3749 def get_operator(mydep):
3751 returns '~', '=', '>', '<', '=*', '>=', or '<='
3755 elif mydep[0] == "=":
3756 if mydep[-1] == "*":
3760 elif mydep[0] in "><":
3761 if len(mydep) > 1 and mydep[1] == "=":
3762 operator = mydep[0:2]
3771 def match_from_list(mydep,candidate_list):
3775 mycpv = dep_getcpv(mydep)
3776 mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
3779 cat,pkg = catsplit(mycpv)
3783 cat,pkg,ver,rev = mycpv_cps
3785 raise KeyError, "Specific key requires an operator (%s) (try adding an '=')" % (mydep)
3788 operator = get_operator(mydep)
3790 writemsg("!!! Invalid atom: %s\n" % mydep)
3797 if operator == None:
3798 for x in candidate_list:
3803 elif xs[0] != mycpv:
3807 elif operator == "=": # Exact match
3808 if mycpv in candidate_list:
3811 elif operator == "=*": # glob match
3812 # The old verion ignored _tag suffixes... This one doesn't.
3813 for x in candidate_list:
3814 if x[0:len(mycpv)] == mycpv:
3817 elif operator == "~": # version, any revision, match
3818 for x in candidate_list:
3820 if xs[0:2] != mycpv_cps[0:2]:
3826 elif operator in [">", ">=", "<", "<="]:
3827 for x in candidate_list:
3829 result = pkgcmp(pkgsplit(x), [cat+"/"+pkg,ver,rev])
3830 except SystemExit, e:
3833 writemsg("\nInvalid package name: %s\n" % x)
3837 elif operator == ">":
3840 elif operator == ">=":
3843 elif operator == "<":
3846 elif operator == "<=":
3850 raise KeyError, "Unknown operator: %s" % mydep
3852 raise KeyError, "Unknown operator: %s" % mydep
3858 def match_from_list_original(mydep,mylist):
3860 Reduces the list down to those that fit the dep
3862 mycpv=dep_getcpv(mydep)
3863 if isspecific(mycpv):
3864 cp_key=catpkgsplit(mycpv)
3869 #Otherwise, this is a special call; we can only select out of the ebuilds specified in the specified mylist
3874 #example: "=sys-apps/foo-1.0*"
3876 #now, we grab the version of our dependency...
3877 mynewsplit=string.split(cp_key[2],'.')
3879 mynewsplit[-1]=`int(mynewsplit[-1])+1`
3880 #and increment the last digit of the version by one.
3881 #We don't need to worry about _pre and friends because they're not supported with '*' deps.
3882 new_v=string.join(mynewsplit,".")+"_alpha0"
3883 #new_v will be used later in the code when we do our comparisons using pkgcmp()
3884 except SystemExit, e:
3891 cmp1[1]=cmp1[1]+"_alpha0"
3892 cmp2=[cp_key[1],new_v,"r0"]
3896 #hrm, invalid entry. Continue.
3898 #skip entries in our list that do not have matching categories
3899 if cp_key[0]!=cp_x[0]:
3901 # ok, categories match. Continue to next step.
3902 if ((pkgcmp(cp_x[1:],cmp1)>=0) and (pkgcmp(cp_x[1:],cmp2)<0)):
3903 # entry is >= the version in specified in our dependency, and <= the version in our dep + 1; add it:
3907 # Does our stripped key appear literally in our list? If so, we have a match; if not, we don't.
3912 elif (mydep[0]==">") or (mydep[0]=="<"):
3915 if (len(mydep)>1) and (mydep[1]=="="):
3923 #invalid entry; continue.
3925 if cp_key[0]!=cp_x[0]:
3927 if eval("pkgcmp(cp_x[1:],cp_key[1:])"+cmpstr+"0"):
3937 #invalid entry; continue
3939 if cp_key[0]!=cp_x[0]:
3941 if cp_key[2]!=cp_x[2]:
3942 #if version doesn't match, skip it
3944 myint = int(cp_x[3][1:])
3955 #we check ! deps in emerge itself, so always returning [] is correct.
3957 cp_key=mycpv.split("/")
3961 #invalid entry; continue
3963 if cp_key[0]!=cp_x[0]:
3965 if cp_key[1]!=cp_x[1]:
3974 def __init__(self,root="/",virtual=None,clone=None):
3977 self.root=clone.root
3978 self.portroot=clone.portroot
3979 self.pkglines=clone.pkglines
3982 self.portroot=settings["PORTDIR"]
3983 self.virtual=virtual
3986 def dep_bestmatch(self,mydep):
3987 "compatibility method"
3988 mymatch=self.dbapi.xmatch("bestmatch-visible",mydep)
3993 def dep_match(self,mydep):
3994 "compatibility method"
3995 mymatch=self.dbapi.xmatch("match-visible",mydep)
4000 def exists_specific(self,cpv):
4001 return self.dbapi.cpv_exists(cpv)
4003 def getallnodes(self):
4004 """new behavior: these are all *unmasked* nodes. There may or may not be available
4005 masked package for nodes in this nodes list."""
4006 return self.dbapi.cp_all()
4008 def getname(self,pkgname):
4009 "returns file location for this particular package (DEPRECATED)"
4012 mysplit=string.split(pkgname,"/")
4013 psplit=pkgsplit(mysplit[1])
4014 return self.portroot+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4016 def resolve_specific(self,myspec):
4017 cps=catpkgsplit(myspec)
4020 mykey=key_expand(cps[0]+"/"+cps[1],mydb=self.dbapi)
4021 mykey=mykey+"-"+cps[2]
4023 mykey=mykey+"-"+cps[3]
4026 def depcheck(self,mycheck,use="yes",myusesplit=None):
4027 return dep_check(mycheck,self.dbapi,use=use,myuse=myusesplit)
4029 def getslot(self,mycatpkg):
4030 "Get a slot for a catpkg; assume it exists."
4033 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
4034 except SystemExit, e:
4036 except Exception, e:
4045 def close_caches(self):
4048 def cp_list(self,cp,use_cache=1):
4051 def aux_get(self,mycpv,mylist):
4052 "stub code for returning auxiliary db information, such as SLOT, DEPEND, etc."
4053 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4054 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
4055 raise NotImplementedError
4057 def match(self,origdep,use_cache=1):
4058 mydep=dep_expand(origdep,mydb=self)
4059 mykey=dep_getkey(mydep)
4060 mycat=mykey.split("/")[0]
4061 return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4063 def match2(self,mydep,mykey,mylist):
4064 writemsg("DEPRECATED: dbapi.match2\n")
4065 match_from_list(mydep,mylist)
4067 def counter_tick(self,myroot,mycpv=None):
4068 return self.counter_tick_core(myroot,incrementing=1,mycpv=mycpv)
4070 def get_counter_tick_core(self,myroot,mycpv=None):
4071 return self.counter_tick_core(myroot,incrementing=0,mycpv=mycpv)+1
4073 def counter_tick_core(self,myroot,incrementing=1,mycpv=None):
4074 "This method will grab the next COUNTER value and record it back to the global file. Returns new counter value."
4075 cpath=myroot+"var/cache/edb/counter"
4079 mysplit = pkgsplit(mycpv)
4080 for x in self.match(mysplit[0],use_cache=0):
4084 old_counter = long(self.aux_get(x,["COUNTER"])[0])
4085 writemsg("COUNTER '%d' '%s'\n" % (old_counter, x),1)
4086 except SystemExit, e:
4090 writemsg("!!! BAD COUNTER in '%s'\n" % (x))
4091 if old_counter > min_counter:
4092 min_counter = old_counter
4094 # We write our new counter value to a new file that gets moved into
4095 # place to avoid filesystem corruption.
4096 if os.path.exists(cpath):
4097 cfile=open(cpath, "r")
4099 counter=long(cfile.readline())
4100 except (ValueError,OverflowError):
4102 counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
4103 writemsg("!!! COUNTER was corrupted; resetting to value of %d\n" % counter)
4105 except (ValueError,OverflowError):
4106 writemsg("!!! COUNTER data is corrupt in pkg db. The values need to be\n")
4107 writemsg("!!! corrected/normalized so that portage can operate properly.\n")
4108 writemsg("!!! A simple solution is not yet available so try #gentoo on IRC.\n")
4113 counter=long(commands.getoutput("for FILE in $(find /"+VDB_PATH+" -type f -name COUNTER); do echo $(<${FILE}); done | sort -n | tail -n1 | tr -d '\n'"))
4114 writemsg("!!! Global counter missing. Regenerated from counter files to: %s\n" % counter)
4115 except SystemExit, e:
4118 writemsg("!!! Initializing global counter.\n")
4122 if counter < min_counter:
4123 counter = min_counter+1000
4126 if incrementing or changed:
4130 # update new global counter file
4131 write_atomic(cpath, str(counter))
4134 def invalidentry(self, mypath):
4135 if re.search("portage_lockfile$",mypath):
4136 if not os.environ.has_key("PORTAGE_MASTER_PID"):
4137 writemsg("Lockfile removed: %s\n" % mypath, 1)
4138 portage_locks.unlockfile((mypath,None,None))
4140 # Nothing we can do about it. We're probably sandboxed.
4142 elif re.search(".*/-MERGING-(.*)",mypath):
4143 if os.path.exists(mypath):
4144 writemsg(red("INCOMPLETE MERGE:")+" "+mypath+"\n")
4146 writemsg("!!! Invalid db entry: %s\n" % mypath)
4150 class fakedbapi(dbapi):
4151 "This is a dbapi to use for the emptytree function. It's empty, but things can be added to it."
4156 def cpv_exists(self,mycpv):
4157 return self.cpvdict.has_key(mycpv)
4159 def cp_list(self,mycp,use_cache=1):
4160 if not self.cpdict.has_key(mycp):
4163 return self.cpdict[mycp]
4167 for x in self.cpdict.keys():
4168 returnme.extend(self.cpdict[x])
4171 def cpv_inject(self,mycpv):
4172 """Adds a cpv from the list of available packages."""
4173 mycp=cpv_getkey(mycpv)
4174 self.cpvdict[mycpv]=1
4175 if not self.cpdict.has_key(mycp):
4176 self.cpdict[mycp]=[]
4177 if not mycpv in self.cpdict[mycp]:
4178 self.cpdict[mycp].append(mycpv)
4180 #def cpv_virtual(self,oldcpv,newcpv):
4181 # """Maps a cpv to the list of available packages."""
4182 # mycp=cpv_getkey(newcpv)
4183 # self.cpvdict[newcpv]=1
4184 # if not self.virtdict.has_key(mycp):
4185 # self.virtdict[mycp]=[]
4186 # if not mycpv in self.virtdict[mycp]:
4187 # self.virtdict[mycp].append(oldcpv)
4188 # cpv_remove(oldcpv)
4190 def cpv_remove(self,mycpv):
4191 """Removes a cpv from the list of available packages."""
4192 mycp=cpv_getkey(mycpv)
4193 if self.cpvdict.has_key(mycpv):
4194 del self.cpvdict[mycpv]
4195 if not self.cpdict.has_key(mycp):
4197 while mycpv in self.cpdict[mycp]:
4198 del self.cpdict[mycp][self.cpdict[mycp].index(mycpv)]
4199 if not len(self.cpdict[mycp]):
4200 del self.cpdict[mycp]
4202 class bindbapi(fakedbapi):
4203 def __init__(self,mybintree=None):
4204 self.bintree = mybintree
4208 def aux_get(self,mycpv,wants):
4209 mysplit = string.split(mycpv,"/")
4211 tbz2name = mysplit[1]+".tbz2"
4212 if self.bintree and not self.bintree.isremote(mycpv):
4213 tbz2 = xpak.tbz2(self.bintree.getname(mycpv))
4215 if self.bintree and self.bintree.isremote(mycpv):
4216 # We use the cache for remote packages
4217 if self.bintree.remotepkgs[tbz2name].has_key(x):
4218 mylist.append(self.bintree.remotepkgs[tbz2name][x][:]) # [:] Copy String
4222 myval = tbz2.getfile(x)
4226 myval = string.join(myval.split(),' ')
4227 mylist.append(myval)
4229 idx = wants.index("EAPI")
4236 class vardbapi(dbapi):
4237 def __init__(self,root,categories=None):
4239 #cache for category directory mtimes
4240 self.mtdircache = {}
4241 #cache for dependency checks
4242 self.matchcache = {}
4243 #cache for cp_list results
4245 self.blockers = None
4246 self.categories = copy.deepcopy(categories)
4248 def cpv_exists(self,mykey):
4249 "Tells us whether an actual ebuild exists on disk (no masking)"
4250 return os.path.exists(self.root+VDB_PATH+"/"+mykey)
4252 def cpv_counter(self,mycpv):
4253 "This method will grab the COUNTER. Returns a counter value."
4254 cdir=self.root+VDB_PATH+"/"+mycpv
4255 cpath=self.root+VDB_PATH+"/"+mycpv+"/COUNTER"
4257 # We write our new counter value to a new file that gets moved into
4258 # place to avoid filesystem corruption on XFS (unexpected reboot.)
4260 if os.path.exists(cpath):
4261 cfile=open(cpath, "r")
4263 counter=long(cfile.readline())
4265 print "portage: COUNTER for",mycpv,"was corrupted; resetting to value of 0"
4269 elif os.path.exists(cdir):
4270 mys = pkgsplit(mycpv)
4271 myl = self.match(mys[0],use_cache=0)
4275 # Only one package... Counter doesn't matter.
4276 myf = open(cpath, "w")
4281 except SystemExit, e:
4283 except Exception, e:
4284 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
4285 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
4286 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
4287 writemsg("!!! unmerge this exact version.\n")
4288 writemsg("!!! %s\n" % e)
4291 writemsg("!!! COUNTER file is missing for "+str(mycpv)+" in /var/db.\n")
4292 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.pl or\n")
4293 writemsg("!!! Please run /usr/lib/portage/bin/fix-db.py or\n")
4294 writemsg("!!! remerge the package.\n")
4299 newcpath=cpath+".new"
4300 # update new global counter file
4301 newcfile=open(newcpath,"w")
4302 newcfile.write(str(counter))
4304 # now move global counter file into place
4305 os.rename(newcpath,cpath)
4308 def cpv_inject(self,mycpv):
4309 "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
4310 os.makedirs(self.root+VDB_PATH+"/"+mycpv)
4311 counter=db[self.root]["vartree"].dbapi.counter_tick(self.root,mycpv=mycpv)
4312 # write local package counter so that emerge clean does the right thing
4313 lcfile=open(self.root+VDB_PATH+"/"+mycpv+"/COUNTER","w")
4314 lcfile.write(str(counter))
4317 def isInjected(self,mycpv):
4318 if self.cpv_exists(mycpv):
4319 if os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/INJECTED"):
4321 if not os.path.exists(self.root+VDB_PATH+"/"+mycpv+"/CONTENTS"):
4325 def move_ent(self,mylist):
4328 origmatches=self.match(origcp,use_cache=0)
4331 for mycpv in origmatches:
4332 mycpsplit=catpkgsplit(mycpv)
4333 mynewcpv=newcp+"-"+mycpsplit[2]
4334 mynewcat=newcp.split("/")[0]
4335 if mycpsplit[3]!="r0":
4336 mynewcpv += "-"+mycpsplit[3]
4337 mycpsplit_new = catpkgsplit(mynewcpv)
4338 origpath=self.root+VDB_PATH+"/"+mycpv
4339 if not os.path.exists(origpath):
4342 if not os.path.exists(self.root+VDB_PATH+"/"+mynewcat):
4343 #create the directory
4344 os.makedirs(self.root+VDB_PATH+"/"+mynewcat)
4345 newpath=self.root+VDB_PATH+"/"+mynewcpv
4346 if os.path.exists(newpath):
4347 #dest already exists; keep this puppy where it is.
4349 spawn(MOVE_BINARY+" "+origpath+" "+newpath,settings, free=1)
4351 # We need to rename the ebuild now.
4352 old_eb_path = newpath+"/"+mycpsplit[1] +"-"+mycpsplit[2]
4353 new_eb_path = newpath+"/"+mycpsplit_new[1]+"-"+mycpsplit[2]
4354 if mycpsplit[3] != "r0":
4355 old_eb_path += "-"+mycpsplit[3]
4356 new_eb_path += "-"+mycpsplit[3]
4357 if os.path.exists(old_eb_path+".ebuild"):
4358 os.rename(old_eb_path+".ebuild", new_eb_path+".ebuild")
4360 write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
4362 dbdir = self.root+VDB_PATH
4363 for catdir in listdir(dbdir):
4364 catdir = dbdir+"/"+catdir
4365 if os.path.isdir(catdir):
4366 for pkgdir in listdir(catdir):
4367 pkgdir = catdir+"/"+pkgdir
4368 if os.path.isdir(pkgdir):
4369 fixdbentries(origcp, newcp, pkgdir)
4371 def move_slot_ent(self,mylist):
4376 origmatches=self.match(pkg,use_cache=0)
4379 for mycpv in origmatches:
4380 origpath=self.root+VDB_PATH+"/"+mycpv
4381 if not os.path.exists(origpath):
4384 slot=grabfile(origpath+"/SLOT");
4388 if (slot[0]!=origslot):
4392 slotfile=open(origpath+"/SLOT", "w")
4393 slotfile.write(newslot+"\n")
4396 def cp_list(self,mycp,use_cache=1):
4397 mysplit=mycp.split("/")
4398 if mysplit[0] == '*':
4399 mysplit[0] = mysplit[0][1:]
4401 mystat=os.stat(self.root+VDB_PATH+"/"+mysplit[0])[stat.ST_MTIME]
4404 if use_cache and self.cpcache.has_key(mycp):
4405 cpc=self.cpcache[mycp]
4408 list=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4415 #writemsg(red("INCOMPLETE MERGE:")+str(x[len("-MERGING-"):])+"\n")
4419 self.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4421 if len(mysplit) > 1:
4422 if ps[0]==mysplit[1]:
4423 returnme.append(mysplit[0]+"/"+x)
4425 self.cpcache[mycp]=[mystat,returnme]
4426 elif self.cpcache.has_key(mycp):
4427 del self.cpcache[mycp]
4430 def cpv_all(self,use_cache=1):
4432 basepath = self.root+VDB_PATH+"/"
4434 mycats = self.categories
4436 # XXX: CIRCULAR DEP! This helps backwards compat. --NJ (10 Sept 2004)
4437 mycats = settings.categories
4440 for y in listdir(basepath+x,EmptyOnError=1):
4442 # -MERGING- should never be a cpv, nor should files.
4443 if os.path.isdir(basepath+subpath) and (pkgsplit(y) is not None):
4444 returnme += [subpath]
4447 def cp_all(self,use_cache=1):
4448 mylist = self.cpv_all(use_cache=use_cache)
4453 mysplit=catpkgsplit(y)
4455 self.invalidentry(self.root+VDB_PATH+"/"+y)
4457 d[mysplit[0]+"/"+mysplit[1]] = None
4460 def checkblockers(self,origdep):
4463 def match(self,origdep,use_cache=1):
4464 "caching match function"
4465 mydep=dep_expand(origdep,mydb=self,use_cache=use_cache)
4466 mykey=dep_getkey(mydep)
4467 mycat=mykey.split("/")[0]
4469 if self.matchcache.has_key(mycat):
4470 del self.mtdircache[mycat]
4471 del self.matchcache[mycat]
4472 return match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4474 curmtime=os.stat(self.root+VDB_PATH+"/"+mycat)[stat.ST_MTIME]
4475 except SystemExit, e:
4480 if not self.matchcache.has_key(mycat) or not self.mtdircache[mycat]==curmtime:
4482 self.mtdircache[mycat]=curmtime
4483 self.matchcache[mycat]={}
4484 if not self.matchcache[mycat].has_key(mydep):
4485 mymatch=match_from_list(mydep,self.cp_list(mykey,use_cache=use_cache))
4486 self.matchcache[mycat][mydep]=mymatch
4487 return self.matchcache[mycat][mydep][:]
4489 def findname(self, mycpv):
4490 return self.root+VDB_PATH+"/"+str(mycpv)+"/"+mycpv.split("/")[1]+".ebuild"
4492 def aux_get(self, mycpv, wants):
4496 myfn = self.root+VDB_PATH+"/"+str(mycpv)+"/"+str(x)
4497 if os.access(myfn,os.R_OK):
4498 myf = open(myfn, "r")
4501 myd = re.sub("[\n\r\t]+"," ",myd)
4502 myd = re.sub(" +"," ",myd)
4503 myd = string.strip(myd)
4508 idx = wants.index("EAPI")
4509 if not results[idx]:
4514 class vartree(packagetree):
4515 "this tree will scan a var/db/pkg database located at root (passed to init)"
4516 def __init__(self,root="/",virtual=None,clone=None,categories=None):
4518 self.root = clone.root[:]
4519 self.dbapi = copy.deepcopy(clone.dbapi)
4523 self.dbapi = vardbapi(self.root,categories=categories)
4526 def zap(self,mycpv):
4529 def inject(self,mycpv):
4532 def get_provide(self,mycpv):
4535 mylines = grabfile(self.root+VDB_PATH+"/"+mycpv+"/PROVIDE")
4537 myuse = grabfile(self.root+VDB_PATH+"/"+mycpv+"/USE")
4538 myuse = string.split(string.join(myuse))
4539 mylines = string.join(mylines)
4540 mylines = flatten(portage_dep.use_reduce(portage_dep.paren_reduce(mylines), uselist=myuse))
4541 for myprovide in mylines:
4542 mys = catpkgsplit(myprovide)
4544 mys = string.split(myprovide, "/")
4545 myprovides += [mys[0] + "/" + mys[1]]
4547 except SystemExit, e:
4549 except Exception, e:
4551 print "Check " + self.root+VDB_PATH+"/"+mycpv+"/PROVIDE and USE."
4552 print "Possibly Invalid: " + str(mylines)
4553 print "Exception: "+str(e)
4557 def get_all_provides(self):
4559 for node in self.getallcpv():
4560 for mykey in self.get_provide(node):
4561 if myprovides.has_key(mykey):
4562 myprovides[mykey] += [node]
4564 myprovides[mykey] = [node]
4567 def dep_bestmatch(self,mydep,use_cache=1):
4568 "compatibility method -- all matches, not just visible ones"
4569 #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
4570 mymatch=best(self.dbapi.match(dep_expand(mydep,mydb=self.dbapi),use_cache=use_cache))
4576 def dep_match(self,mydep,use_cache=1):
4577 "compatibility method -- we want to see all matches, not just visible ones"
4578 #mymatch=match(mydep,self.dbapi)
4579 mymatch=self.dbapi.match(mydep,use_cache=use_cache)
4585 def exists_specific(self,cpv):
4586 return self.dbapi.cpv_exists(cpv)
4588 def getallcpv(self):
4589 """temporary function, probably to be renamed --- Gets a list of all
4590 category/package-versions installed on the system."""
4591 return self.dbapi.cpv_all()
4593 def getallnodes(self):
4594 """new behavior: these are all *unmasked* nodes. There may or may not be available
4595 masked package for nodes in this nodes list."""
4596 return self.dbapi.cp_all()
4598 def exists_specific_cat(self,cpv,use_cache=1):
4599 cpv=key_expand(cpv,mydb=self.dbapi,use_cache=use_cache)
4603 mylist=listdir(self.root+VDB_PATH+"/"+a[0],EmptyOnError=1)
4607 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+a[0]+"/"+x)
4613 def getebuildpath(self,fullpackage):
4614 cat,package=fullpackage.split("/")
4615 return self.root+VDB_PATH+"/"+fullpackage+"/"+package+".ebuild"
4617 def getnode(self,mykey,use_cache=1):
4618 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
4621 mysplit=mykey.split("/")
4622 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4625 mypsplit=pkgsplit(x)
4627 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4629 if mypsplit[0]==mysplit[1]:
4630 appendme=[mysplit[0]+"/"+x,[mysplit[0],mypsplit[0],mypsplit[1],mypsplit[2]]]
4631 returnme.append(appendme)
4635 def getslot(self,mycatpkg):
4636 "Get a slot for a catpkg; assume it exists."
4639 myslot=string.join(grabfile(self.root+VDB_PATH+"/"+mycatpkg+"/SLOT"))
4640 except SystemExit, e:
4642 except Exception, e:
4646 def hasnode(self,mykey,use_cache):
4647 """Does the particular node (cat/pkg key) exist?"""
4648 mykey=key_expand(mykey,mydb=self.dbapi,use_cache=use_cache)
4649 mysplit=mykey.split("/")
4650 mydirlist=listdir(self.root+VDB_PATH+"/"+mysplit[0],EmptyOnError=1)
4652 mypsplit=pkgsplit(x)
4654 self.dbapi.invalidentry(self.root+VDB_PATH+"/"+mysplit[0]+"/"+x)
4656 if mypsplit[0]==mysplit[1]:
4664 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
4665 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
4666 'KEYWORDS', 'INHERITED', 'IUSE', 'CDEPEND',
4667 'PDEPEND', 'PROVIDE', 'EAPI',
4668 'UNUSED_01', 'UNUSED_02', 'UNUSED_03', 'UNUSED_04',
4669 'UNUSED_05', 'UNUSED_06', 'UNUSED_07',
4671 auxdbkeylen=len(auxdbkeys)
4673 def close_portdbapi_caches():
4674 for i in portdbapi.portdbapi_instances:
4678 class portdbapi(dbapi):
4679 """this tree will scan a portage directory located at root (passed to init)"""
4680 portdbapi_instances = []
4682 def __init__(self,porttree_root,mysettings=None):
4683 portdbapi.portdbapi_instances.append(self)
4687 self.mysettings = mysettings
4689 self.mysettings = config(clone=settings)
4691 self.manifestVerifyLevel = None
4692 self.manifestVerifier = None
4693 self.manifestCache = {} # {location: [stat, md5]}
4694 self.manifestMissingCache = []
4696 if "gpg" in self.mysettings.features:
4697 self.manifestVerifyLevel = portage_gpg.EXISTS
4698 if "strict" in self.mysettings.features:
4699 self.manifestVerifyLevel = portage_gpg.MARGINAL
4700 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
4701 elif "severe" in self.mysettings.features:
4702 self.manifestVerifyLevel = portage_gpg.TRUSTED
4703 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", requireSignedRing=True, minimumTrust=self.manifestVerifyLevel)
4705 self.manifestVerifier = portage_gpg.FileChecker(self.mysettings["PORTAGE_GPG_DIR"], "gentoo.gpg", minimumTrust=self.manifestVerifyLevel)
4707 #self.root=settings["PORTDIR"]
4708 self.porttree_root = porttree_root
4710 self.depcachedir = self.mysettings.depcachedir[:]
4712 self.tmpfs = self.mysettings["PORTAGE_TMPFS"]
4713 if self.tmpfs and not os.path.exists(self.tmpfs):
4715 if self.tmpfs and not os.access(self.tmpfs, os.W_OK):
4717 if self.tmpfs and not os.access(self.tmpfs, os.R_OK):
4720 self.eclassdb = eclass_cache.cache(self.porttree_root, overlays=settings["PORTDIR_OVERLAY"].split())
4723 self.metadbmodule = self.mysettings.load_best_module("portdbapi.metadbmodule")
4725 #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
4729 self.porttrees=[self.porttree_root]+self.mysettings["PORTDIR_OVERLAY"].split()
4730 self.auxdbmodule = self.mysettings.load_best_module("portdbapi.auxdbmodule")
4733 # XXX: REMOVE THIS ONCE UNUSED_0 IS YANKED FROM auxdbkeys
4735 filtered_auxdbkeys = filter(lambda x: not x.startswith("UNUSED_0"), auxdbkeys)
4736 for x in self.porttrees:
4737 # location, label, auxdbkeys
4738 self.auxdb[x] = self.auxdbmodule(portage_const.DEPCACHE_PATH, x, filtered_auxdbkeys, gid=portage_gid)
4740 def close_caches(self):
4741 for x in self.auxdb.keys():
4742 self.auxdb[x].sync()
4745 def flush_cache(self):
4749 def finddigest(self,mycpv):
4751 mydig = self.findname2(mycpv)[0]
4752 mydigs = string.split(mydig, "/")[:-1]
4753 mydig = string.join(mydigs, "/")
4755 mysplit = mycpv.split("/")
4756 except SystemExit, e:
4760 return mydig+"/files/digest-"+mysplit[-1]
4762 def findname(self,mycpv):
4763 return self.findname2(mycpv)[0]
4765 def findname2(self,mycpv):
4766 "returns file location for this particular package and in_overlay flag"
4769 mysplit=mycpv.split("/")
4771 psplit=pkgsplit(mysplit[1])
4774 for x in self.porttrees:
4775 file=x+"/"+mysplit[0]+"/"+psplit[0]+"/"+mysplit[1]+".ebuild"
4776 if os.access(file, os.R_OK):
4780 return ret[0], ret[1]
4785 def aux_get(self, mycpv, mylist):
4786 "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
4787 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
4788 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise KeyError if error'
4789 global auxdbkeys,auxdbkeylen
4791 cat,pkg = string.split(mycpv, "/", 1)
4793 myebuild, mylocation=self.findname2(mycpv)
4796 writemsg("!!! aux_get(): ebuild path for '%(cpv)s' not specified:\n" % {"cpv":mycpv})
4797 writemsg("!!! %s\n" % myebuild)
4798 raise KeyError, "'%(cpv)s' at %(path)s" % {"cpv":mycpv,"path":myebuild}
4800 myManifestPath = string.join(myebuild.split("/")[:-1],"/")+"/Manifest"
4801 if "gpg" in self.mysettings.features:
4803 mys = portage_gpg.fileStats(myManifestPath)
4804 if (myManifestPath in self.manifestCache) and \
4805 (self.manifestCache[myManifestPath] == mys):
4807 elif self.manifestVerifier:
4808 if not self.manifestVerifier.verify(myManifestPath):
4809 # Verification failed the desired level.
4810 raise portage_exception.UntrustedSignature, "Untrusted Manifest: %(manifest)s" % {"manifest":myManifestPath}
4812 if ("severe" in self.mysettings.features) and \
4813 (mys != portage_gpg.fileStats(myManifestPath)):
4814 raise portage_exception.SecurityViolation, "Manifest changed: %(manifest)s" % {"manifest":myManifestPath}
4816 except portage_exception.InvalidSignature, e:
4817 if ("strict" in self.mysettings.features) or \
4818 ("severe" in self.mysettings.features):
4820 writemsg("!!! INVALID MANIFEST SIGNATURE DETECTED: %(manifest)s\n" % {"manifest":myManifestPath})
4821 except portage_exception.MissingSignature, e:
4822 if ("severe" in self.mysettings.features):
4824 if ("strict" in self.mysettings.features):
4825 if myManifestPath not in self.manifestMissingCache:
4826 writemsg("!!! WARNING: Missing signature in: %(manifest)s\n" % {"manifest":myManifestPath})
4827 self.manifestMissingCache.insert(0,myManifestPath)
4828 except (OSError,portage_exception.FileNotFound), e:
4829 if ("strict" in self.mysettings.features) or \
4830 ("severe" in self.mysettings.features):
4831 raise portage_exception.SecurityViolation, "Error in verification of signatures: %(errormsg)s" % {"errormsg":str(e)}
4832 writemsg("!!! Manifest is missing or inaccessable: %(manifest)s\n" % {"manifest":myManifestPath})
4835 if os.access(myebuild, os.R_OK):
4836 emtime=os.stat(myebuild)[stat.ST_MTIME]
4838 writemsg("!!! aux_get(): ebuild for '%(cpv)s' does not exist at:\n" % {"cpv":mycpv})
4839 writemsg("!!! %s\n" % myebuild)
4843 mydata = self.auxdb[mylocation][mycpv]
4844 if emtime != long(mydata.get("_mtime_", 0)):
4846 elif len(mydata.get("_eclasses_", [])) > 0:
4847 doregen = not self.eclassdb.is_eclass_data_valid(mydata["_eclasses_"])
4855 try: del self.auxdb[mylocation][mycpv]
4856 except KeyError: pass
4858 writemsg("auxdb is valid: "+str(not doregen)+" "+str(pkg)+"\n", 2)
4861 writemsg("doregen: %s %s\n" % (doregen,mycpv), 2)
4862 writemsg("Generating cache entry(0) for: "+str(myebuild)+"\n",1)
4865 mydbkey = self.tmpfs+"/aux_db_key_temp"
4867 mydbkey = self.depcachedir+"/aux_db_key_temp"
4869 # XXX: Part of the gvisible hack/fix to prevent deadlock
4870 # XXX: through doebuild. Need to isolate this somehow...
4871 self.mysettings.reset()
4874 raise "Lock is already held by me?"
4876 mylock = portage_locks.lockfile(mydbkey, wantnewlockfile=1)
4878 if os.path.exists(mydbkey):
4881 except (IOError, OSError), e:
4882 portage_locks.unlockfile(mylock)
4884 writemsg("Uncaught handled exception: %(exception)s\n" % {"exception":str(e)})
4887 myret=doebuild(myebuild,"depend","/",self.mysettings,dbkey=mydbkey,tree="porttree")
4889 portage_locks.unlockfile(mylock)
4891 #depend returned non-zero exit code...
4892 writemsg(str(red("\naux_get():")+" (0) Error in "+mycpv+" ebuild. ("+str(myret)+")\n"
4893 " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
4897 mycent=open(mydbkey,"r")
4899 mylines=mycent.readlines()
4902 except (IOError, OSError):
4903 portage_locks.unlockfile(mylock)
4905 writemsg(str(red("\naux_get():")+" (1) Error in "+mycpv+" ebuild.\n"
4906 " Check for syntax error or corruption in the ebuild. (--debug)\n\n"))
4909 portage_locks.unlockfile(mylock)
4913 for x in range(0,len(mylines)):
4914 if mylines[x][-1] == '\n':
4915 mylines[x] = mylines[x][:-1]
4916 mydata[auxdbkeys[x]] = mylines[x]
4918 if "EAPI" not in mydata or not mydata["EAPI"].strip():
4919 mydata["EAPI"] = "0"
4921 if not eapi_is_supported(mydata["EAPI"]):
4922 # if newer version, wipe everything and negate eapi
4923 eapi = mydata["EAPI"]
4925 map(lambda x:mydata.setdefault(x, ""), auxdbkeys)
4926 mydata["EAPI"] = "-"+eapi
4928 if mydata.get("INHERITED", False):
4929 mydata["_eclasses_"] = self.eclassdb.get_eclass_data(mydata["INHERITED"].split())
4931 mydata["_eclasses_"] = {}
4933 del mydata["INHERITED"]
4935 mydata["_mtime_"] = emtime
4937 self.auxdb[mylocation][mycpv] = mydata
4939 #finally, we look at our internal cache entry and return the requested data.
4942 if x == "INHERITED":
4943 returnme.append(' '.join(mydata.get("_eclasses_", {}).keys()))
4945 returnme.append(mydata.get(x,""))
4947 if "EAPI" in mylist:
4948 idx = mylist.index("EAPI")
4949 if not returnme[idx]:
4954 def getfetchlist(self,mypkg,useflags=None,mysettings=None,all=0):
4955 if mysettings == None:
4956 mysettings = self.mysettings
4958 myuris = self.aux_get(mypkg,["SRC_URI"])[0]
4959 except (IOError,KeyError):
4960 print red("getfetchlist():")+" aux_get() error reading "+mypkg+"; aborting."
4963 if useflags is None:
4964 useflags = string.split(mysettings["USE"])
4966 myurilist = portage_dep.paren_reduce(myuris)
4967 myurilist = portage_dep.use_reduce(myurilist,uselist=useflags,matchall=all)
4968 newuris = flatten(myurilist)
4972 mya = os.path.basename(x)
4973 if not mya in myfiles:
4975 return [newuris, myfiles]
4977 def getfetchsizes(self,mypkg,useflags=None,debug=0):
4978 # returns a filename:size dictionnary of remaining downloads
4979 mydigest=self.finddigest(mypkg)
4980 checksums=digestParseFile(mydigest)
4982 if debug: print "[empty/missing/bad digest]: "+mypkg
4985 if useflags == None:
4986 myuris, myfiles = self.getfetchlist(mypkg,all=1)
4988 myuris, myfiles = self.getfetchlist(mypkg,useflags=useflags)
4989 #XXX: maybe this should be improved: take partial downloads
4990 # into account? check checksums?
4991 for myfile in myfiles:
4992 if debug and myfile not in checksums.keys():
4993 print "[bad digest]: missing",myfile,"for",mypkg
4994 elif myfile in checksums.keys():
4995 distfile=settings["DISTDIR"]+"/"+myfile
4996 if not os.access(distfile, os.R_OK):
4997 filesdict[myfile]=int(checksums[myfile]["size"])
5000 def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
5003 useflags = mysettings["USE"].split()
5004 myuri, myfiles = self.getfetchlist(mypkg, useflags=useflags, mysettings=mysettings, all=all)
5005 mydigest = self.finddigest(mypkg)
5006 mysums = digestParseFile(mydigest)
5010 if not mysums or x not in mysums:
5012 reason = "digest missing"
5014 ok,reason = portage_checksum.verify_all(self.mysettings["DISTDIR"]+"/"+x, mysums[x])
5016 failures[x] = reason
5021 def getsize(self,mypkg,useflags=None,debug=0):
5022 # returns the total size of remaining downloads
5024 # we use getfetchsizes() now, so this function would be obsoleted
5026 filesdict=self.getfetchsizes(mypkg,useflags=useflags,debug=debug)
5028 return "[empty/missing/bad digest]"
5030 for myfile in filesdict.keys():
5031 mysum+=filesdict[myfile]
5034 def cpv_exists(self,mykey):
5035 "Tells us whether an actual ebuild exists on disk (no masking)"
5036 cps2=mykey.split("/")
5037 cps=catpkgsplit(mykey,silent=0)
5041 if self.findname(cps[0]+"/"+cps2[1]):
5047 "returns a list of all keys in our tree"
5049 for x in self.mysettings.categories:
5050 for oroot in self.porttrees:
5051 for y in listdir(oroot+"/"+x,EmptyOnError=1,ignorecvs=1,dirsonly=1):
5057 def p_list(self,mycp):
5059 for oroot in self.porttrees:
5060 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5061 if x[-7:]==".ebuild":
5065 def cp_list(self,mycp,use_cache=1):
5066 mysplit=mycp.split("/")
5068 for oroot in self.porttrees:
5069 for x in listdir(oroot+"/"+mycp,EmptyOnError=1,ignorecvs=1):
5070 if x[-7:]==".ebuild":
5071 d[mysplit[0]+"/"+x[:-7]] = None
5075 for x in ["list-visible","bestmatch-visible","match-visible","match-all"]:
5083 def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
5084 "caching match function; very trick stuff"
5085 #if no updates are being made to the tree, we can consult our xcache...
5088 return self.xcache[level][origdep]
5093 #this stuff only runs on first call of xmatch()
5094 #create mydep, mykey from origdep
5095 mydep=dep_expand(origdep,mydb=self)
5096 mykey=dep_getkey(mydep)
5098 if level=="list-visible":
5099 #a list of all visible packages, not called directly (just by xmatch())
5100 #myval=self.visible(self.cp_list(mykey))
5101 myval=self.gvisible(self.visible(self.cp_list(mykey)))
5102 elif level=="bestmatch-visible":
5103 #dep match -- best match of all visible packages
5104 myval=best(self.xmatch("match-visible",None,mydep=mydep,mykey=mykey))
5105 #get all visible matches (from xmatch()), then choose the best one
5106 elif level=="bestmatch-list":
5107 #dep match -- find best match but restrict search to sublist
5108 myval=best(match_from_list(mydep,mylist))
5109 #no point is calling xmatch again since we're not caching list deps
5110 elif level=="match-list":
5111 #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
5112 myval=match_from_list(mydep,mylist)
5113 elif level=="match-visible":
5114 #dep match -- find all visible matches
5115 myval=match_from_list(mydep,self.xmatch("list-visible",None,mydep=mydep,mykey=mykey))
5116 #get all visible packages, then get the matching ones
5117 elif level=="match-all":
5118 #match *all* visible *and* masked packages
5119 myval=match_from_list(mydep,self.cp_list(mykey))
5121 print "ERROR: xmatch doesn't handle",level,"query!"
5123 if self.frozen and (level not in ["match-list","bestmatch-list"]):
5124 self.xcache[level][mydep]=myval
5127 def match(self,mydep,use_cache=1):
5128 return self.xmatch("match-visible",mydep)
5130 def visible(self,mylist):
5131 """two functions in one. Accepts a list of cpv values and uses the package.mask *and*
5132 packages file to remove invisible entries, returning remaining items. This function assumes
5133 that all entries in mylist have the same category and package name."""
5134 if (mylist==None) or (len(mylist)==0):
5137 #first, we mask out packages in the package.mask file
5139 cpv=catpkgsplit(mykey)
5142 print "visible(): invalid cat/pkg-v:",mykey
5144 mycp=cpv[0]+"/"+cpv[1]
5145 maskdict=self.mysettings.pmaskdict
5146 unmaskdict=self.mysettings.punmaskdict
5147 if maskdict.has_key(mycp):
5148 for x in maskdict[mycp]:
5149 mymatches=self.xmatch("match-all",x)
5151 #error in package.mask file; print warning and continue:
5152 print "visible(): package.mask entry \""+x+"\" is invalid, ignoring..."
5156 if unmaskdict.has_key(mycp):
5157 for z in unmaskdict[mycp]:
5158 mymatches_unmask=self.xmatch("match-all",z)
5159 if y in mymatches_unmask:
5168 revmaskdict=self.mysettings.prevmaskdict
5169 if revmaskdict.has_key(mycp):
5170 for x in revmaskdict[mycp]:
5171 #important: only match against the still-unmasked entries...
5172 #notice how we pass "newlist" to the xmatch() call below....
5173 #Without this, ~ deps in the packages files are broken.
5174 mymatches=self.xmatch("match-list",x,mylist=newlist)
5176 #error in packages file; print warning and continue:
5177 print "emerge: visible(): profile packages entry \""+x+"\" is invalid, ignoring..."
5180 while pos<len(newlist):
5181 if newlist[pos] not in mymatches:
5187 def gvisible(self,mylist):
5188 "strip out group-masked (not in current group) entries"
5194 pkgdict = self.mysettings.pkeywordsdict
5195 for mycpv in mylist:
5196 #we need to update this next line when we have fully integrated the new db api
5199 keys, eapi = db["/"]["porttree"].dbapi.aux_get(mycpv, ["KEYWORDS", "EAPI"])
5200 except (KeyError,IOError,TypeError):
5204 #print "!!! No KEYWORDS for "+str(mycpv)+" -- Untested Status"
5206 mygroups=keys.split()
5209 cp = dep_getkey(mycpv)
5210 if pkgdict.has_key(cp):
5211 matches = match_to_list(mycpv, pkgdict[cp].keys())
5212 for atom in matches:
5213 pgroups.extend(pkgdict[cp][atom])
5218 writemsg("--- WARNING: Package '%s' uses '*' keyword.\n" % mycpv)
5221 elif "-"+gp in pgroups:
5231 if not match and ((hastesting and "~*" in pgroups) or (hasstable and "*" in pgroups)):
5233 if match and eapi_is_supported(eapi):
5234 newlist.append(mycpv)
5237 class binarytree(packagetree):
5238 "this tree scans for a list of all packages available in PKGDIR"
5239 def __init__(self,root,pkgdir,virtual=None,clone=None):
5242 # XXX This isn't cloning. It's an instance of the same thing.
5243 self.root=clone.root
5244 self.pkgdir=clone.pkgdir
5245 self.dbapi=clone.dbapi
5246 self.populated=clone.populated
5247 self.tree=clone.tree
5248 self.remotepkgs=clone.remotepkgs
5249 self.invalids=clone.invalids
5252 #self.pkgdir=settings["PKGDIR"]
5254 self.dbapi=bindbapi(self)
5260 def move_ent(self,mylist):
5261 if not self.populated:
5265 mynewcat=newcp.split("/")[0]
5266 origmatches=self.dbapi.cp_list(origcp)
5269 for mycpv in origmatches:
5271 mycpsplit=catpkgsplit(mycpv)
5272 mynewcpv=newcp+"-"+mycpsplit[2]
5273 if mycpsplit[3]!="r0":
5274 mynewcpv += "-"+mycpsplit[3]
5275 myoldpkg=mycpv.split("/")[1]
5276 mynewpkg=mynewcpv.split("/")[1]
5278 if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
5279 writemsg("!!! Cannot update binary: Destination exists.\n")
5280 writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n")
5283 tbz2path=self.getname(mycpv)
5284 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5285 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5288 #print ">>> Updating data in:",mycpv
5289 sys.stdout.write("%")
5291 mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5292 mytbz2=xpak.tbz2(tbz2path)
5293 mytbz2.decompose(mytmpdir, cleanup=1)
5295 fixdbentries(origcp, newcp, mytmpdir)
5297 write_atomic(os.path.join(mytmpdir, "CATEGORY"), mynewcat+"\n")
5299 os.rename(mytmpdir+"/"+string.split(mycpv,"/")[1]+".ebuild", mytmpdir+"/"+string.split(mynewcpv, "/")[1]+".ebuild")
5300 except SystemExit, e:
5302 except Exception, e:
5305 mytbz2.recompose(mytmpdir, cleanup=1)
5307 self.dbapi.cpv_remove(mycpv)
5308 if (mynewpkg != myoldpkg):
5309 os.rename(tbz2path,self.getname(mynewcpv))
5310 self.dbapi.cpv_inject(mynewcpv)
5313 def move_slot_ent(self,mylist,mytmpdir):
5314 #mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5315 mytmpdir=mytmpdir+"/tbz2"
5316 if not self.populated:
5321 origmatches=self.dbapi.match(pkg)
5324 for mycpv in origmatches:
5325 mycpsplit=catpkgsplit(mycpv)
5326 myoldpkg=mycpv.split("/")[1]
5327 tbz2path=self.getname(mycpv)
5328 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5329 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5332 #print ">>> Updating data in:",mycpv
5333 mytbz2=xpak.tbz2(tbz2path)
5334 mytbz2.decompose(mytmpdir, cleanup=1)
5336 slot=grabfile(mytmpdir+"/SLOT");
5340 if (slot[0]!=origslot):
5343 sys.stdout.write("S")
5346 slotfile=open(mytmpdir+"/SLOT", "w")
5347 slotfile.write(newslot+"\n")
5349 mytbz2.recompose(mytmpdir, cleanup=1)
5352 def update_ents(self,mybiglist,mytmpdir):
5353 #XXX mytmpdir=settings["PORTAGE_TMPDIR"]+"/tbz2"
5354 if not self.populated:
5356 for mycpv in self.dbapi.cp_all():
5357 tbz2path=self.getname(mycpv)
5358 if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
5359 writemsg("!!! Cannot update readonly binary: "+mycpv+"\n")
5361 #print ">>> Updating binary data:",mycpv
5363 mytbz2=xpak.tbz2(tbz2path)
5364 mytbz2.decompose(mytmpdir,cleanup=1)
5365 for mylist in mybiglist:
5366 mylist=string.split(mylist)
5367 if mylist[0] != "move":
5369 fixdbentries(mylist[1], mylist[2], mytmpdir)
5370 mytbz2.recompose(mytmpdir,cleanup=1)
5373 def populate(self, getbinpkgs=0,getbinpkgsonly=0):
5374 "populates the binarytree"
5375 if (not os.path.isdir(self.pkgdir) and not getbinpkgs):
5377 if (not os.path.isdir(self.pkgdir+"/All") and not getbinpkgs):
5380 if (not getbinpkgsonly) and os.path.exists(self.pkgdir+"/All"):
5381 for mypkg in listdir(self.pkgdir+"/All"):
5382 if mypkg[-5:]!=".tbz2":
5384 mytbz2=xpak.tbz2(self.pkgdir+"/All/"+mypkg)
5385 mycat=mytbz2.getfile("CATEGORY")
5387 #old-style or corrupt package
5388 writemsg("!!! Invalid binary package: "+mypkg+"\n")
5389 self.invalids.append(mypkg)
5391 mycat=string.strip(mycat)
5392 fullpkg=mycat+"/"+mypkg[:-5]
5393 mykey=dep_getkey(fullpkg)
5395 # invalid tbz2's can hurt things.
5396 self.dbapi.cpv_inject(fullpkg)
5397 except SystemExit, e:
5402 if getbinpkgs and not settings["PORTAGE_BINHOST"]:
5403 writemsg(red("!!! PORTAGE_BINHOST unset, but use is requested.\n"))
5405 if getbinpkgs and settings["PORTAGE_BINHOST"] and not self.remotepkgs:
5407 chunk_size = long(settings["PORTAGE_BINHOST_CHUNKSIZE"])
5410 except SystemExit, e:
5415 writemsg(green("Fetching binary packages info...\n"))
5416 self.remotepkgs = getbinpkg.dir_get_metadata(settings["PORTAGE_BINHOST"], chunk_size=chunk_size)
5417 writemsg(green(" -- DONE!\n\n"))
5419 for mypkg in self.remotepkgs.keys():
5420 if not self.remotepkgs[mypkg].has_key("CATEGORY"):
5421 #old-style or corrupt package
5422 writemsg("!!! Invalid remote binary package: "+mypkg+"\n")
5423 del self.remotepkgs[mypkg]
5425 mycat=string.strip(self.remotepkgs[mypkg]["CATEGORY"])
5426 fullpkg=mycat+"/"+mypkg[:-5]
5427 mykey=dep_getkey(fullpkg)
5429 # invalid tbz2's can hurt things.
5430 #print "cpv_inject("+str(fullpkg)+")"
5431 self.dbapi.cpv_inject(fullpkg)
5432 #print " -- Injected"
5433 except SystemExit, e:
5436 writemsg("!!! Failed to inject remote binary package:"+str(fullpkg)+"\n")
5437 del self.remotepkgs[mypkg]
5441 def inject(self,cpv):
5442 return self.dbapi.cpv_inject(cpv)
5444 def exists_specific(self,cpv):
5445 if not self.populated:
5447 return self.dbapi.match(dep_expand("="+cpv,mydb=self.dbapi))
5449 def dep_bestmatch(self,mydep):
5450 "compatibility method -- all matches, not just visible ones"
5451 if not self.populated:
5454 writemsg("mydep: %s\n" % mydep, 1)
5455 mydep=dep_expand(mydep,mydb=self.dbapi)
5456 writemsg("mydep: %s\n" % mydep, 1)
5457 mykey=dep_getkey(mydep)
5458 writemsg("mykey: %s\n" % mykey, 1)
5459 mymatch=best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
5460 writemsg("mymatch: %s\n" % mymatch, 1)
5465 def getname(self,pkgname):
5466 "returns file location for this particular package"
5467 mysplit=string.split(pkgname,"/")
5469 return self.pkgdir+"/All/"+self.resolve_specific(pkgname)+".tbz2"
5471 return self.pkgdir+"/All/"+mysplit[1]+".tbz2"
5473 def isremote(self,pkgname):
5474 "Returns true if the package is kept remotely."
5475 mysplit=string.split(pkgname,"/")
5476 remote = (not os.path.exists(self.getname(pkgname))) and self.remotepkgs.has_key(mysplit[1]+".tbz2")
5479 def get_use(self,pkgname):
5480 mysplit=string.split(pkgname,"/")
5481 if self.isremote(pkgname):
5482 return string.split(self.remotepkgs[mysplit[1]+".tbz2"]["USE"][:])
5483 tbz2=xpak.tbz2(self.getname(pkgname))
5484 return string.split(tbz2.getfile("USE"))
5486 def gettbz2(self,pkgname):
5487 "fetches the package from a remote site, if necessary."
5488 print "Fetching '"+str(pkgname)+"'"
5489 mysplit = string.split(pkgname,"/")
5490 tbz2name = mysplit[1]+".tbz2"
5491 if not self.isremote(pkgname):
5492 if (tbz2name not in self.invalids):
5495 writemsg("Resuming download of this tbz2, but it is possible that it is corrupt.\n")
5496 mydest = self.pkgdir+"/All/"
5498 os.makedirs(mydest, 0775)
5499 except SystemExit, e:
5503 return getbinpkg.file_get(settings["PORTAGE_BINHOST"]+"/"+tbz2name, mydest, fcmd=settings["RESUMECOMMAND"])
5505 def getslot(self,mycatpkg):
5506 "Get a slot for a catpkg; assume it exists."
5509 myslot=self.dbapi.aux_get(mycatpkg,["SLOT"])[0]
5510 except SystemExit, e:
5512 except Exception, e:
5517 "this class provides an interface to the standard text package database"
5518 def __init__(self,cat,pkg,myroot,mysettings,treetype=None):
5519 "create a dblink object for cat/pkg. This dblink entry may or may not exist"
5522 self.mycpv = self.cat+"/"+self.pkg
5523 self.mysplit = pkgsplit(self.mycpv)
5524 self.treetype = treetype
5526 self.dbroot = os.path.normpath(myroot+VDB_PATH)
5527 self.dbcatdir = self.dbroot+"/"+cat
5528 self.dbpkgdir = self.dbcatdir+"/"+pkg
5529 self.dbtmpdir = self.dbcatdir+"/-MERGING-"+pkg
5530 self.dbdir = self.dbpkgdir
5532 self.lock_pkg = None
5533 self.lock_tmp = None
5534 self.lock_num = 0 # Count of the held locks on the db.
5536 self.settings = mysettings
5537 if self.settings==1:
5541 self.updateprotect()
5542 self.contentscache=[]
5545 if self.lock_num == 0:
5546 self.lock_pkg = portage_locks.lockdir(self.dbpkgdir)
5547 self.lock_tmp = portage_locks.lockdir(self.dbtmpdir)
5552 if self.lock_num == 0:
5553 portage_locks.unlockdir(self.lock_tmp)
5554 portage_locks.unlockdir(self.lock_pkg)
5557 "return path to location of db information (for >>> informational display)"
5561 "does the db entry exist? boolean."
5562 return os.path.exists(self.dbdir)
5565 "create the skeleton db directory structure. No contents, virtuals, provides or anything. Also will create /var/db/pkg if necessary."
5566 # XXXXX Delete this eventually
5567 raise Exception, "This is bad. Don't use it."
5568 if not os.path.exists(self.dbdir):
5569 os.makedirs(self.dbdir)
5572 "erase this db entry completely"
5573 if not os.path.exists(self.dbdir):
5576 for x in listdir(self.dbdir):
5577 os.unlink(self.dbdir+"/"+x)
5578 os.rmdir(self.dbdir)
5580 print "!!! Unable to remove db entry for this package."
5581 print "!!! It is possible that a directory is in this one. Portage will still"
5582 print "!!! register this package as installed as long as this directory exists."
5583 print "!!! You may delete this directory with 'rm -Rf "+self.dbdir+"'"
5588 def clearcontents(self):
5589 if os.path.exists(self.dbdir+"/CONTENTS"):
5590 os.unlink(self.dbdir+"/CONTENTS")
5592 def getcontents(self):
5593 if not os.path.exists(self.dbdir+"/CONTENTS"):
5595 if self.contentscache != []:
5596 return self.contentscache
5598 myc=open(self.dbdir+"/CONTENTS","r")
5599 mylines=myc.readlines()
5602 for line in mylines:
5603 mydat = string.split(line)
5604 # we do this so we can remove from non-root filesystems
5605 # (use the ROOT var to allow maintenance on other partitions)
5607 mydat[1]=os.path.normpath(root+mydat[1][1:])
5609 #format: type, mtime, md5sum
5610 pkgfiles[string.join(mydat[1:-2]," ")]=[mydat[0], mydat[-1], mydat[-2]]
5611 elif mydat[0]=="dir":
5613 pkgfiles[string.join(mydat[1:])]=[mydat[0] ]
5614 elif mydat[0]=="sym":
5615 #format: type, mtime, dest
5617 if (x >= 13) and (mydat[-1][-1]==')'): # Old/Broken symlink entry
5618 mydat = mydat[:-10]+[mydat[-10:][stat.ST_MTIME][:-1]]
5619 writemsg("FIXED SYMLINK LINE: %s\n" % mydat, 1)
5629 pkgfiles[string.join(mydat[1:splitter]," ")]=[mydat[0], mydat[-1], string.join(mydat[(splitter+1):-1]," ")]
5630 elif mydat[0]=="dev":
5632 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0] ]
5633 elif mydat[0]=="fif":
5635 pkgfiles[string.join(mydat[1:]," ")]=[mydat[0]]
5638 except (KeyError,IndexError):
5639 print "portage: CONTENTS line",pos,"corrupt!"
5641 self.contentscache=pkgfiles
5644 def updateprotect(self):
5645 #do some config file management prep
5647 for x in string.split(self.settings["CONFIG_PROTECT"]):
5648 ppath=normalize_path(self.myroot+x)+"/"
5649 if os.path.isdir(ppath):
5650 self.protect.append(ppath)
5653 for x in string.split(self.settings["CONFIG_PROTECT_MASK"]):
5654 ppath=normalize_path(self.myroot+x)+"/"
5655 if os.path.isdir(ppath):
5656 self.protectmask.append(ppath)
5657 #if it doesn't exist, silently skip it
5659 def isprotected(self,obj):
5660 """Checks if obj is in the current protect/mask directories. Returns
5661 0 on unprotected/masked, and 1 on protected."""
5664 for ppath in self.protect:
5665 if (len(ppath) > masked) and (obj[0:len(ppath)]==ppath):
5666 protected=len(ppath)
5667 #config file management
5668 for pmpath in self.protectmask:
5669 if (len(pmpath) >= protected) and (obj[0:len(pmpath)]==pmpath):
5670 #skip, it's in the mask
5672 return (protected > masked)
5674 def unmerge(self,pkgfiles=None,trimworld=1,cleanup=0):
5680 self.settings.load_infodir(self.dbdir)
5683 print "No package files given... Grabbing a set."
5684 pkgfiles=self.getcontents()
5686 # Now, don't assume that the name of the ebuild is the same as the
5687 # name of the dir; the package may have been moved.
5690 # We should use the environement file if possible,
5691 # as it has all sourced files already included.
5692 # XXX: Need to ensure it doesn't overwrite any important vars though.
5693 if os.access(self.dbdir+"/environment.bz2", os.R_OK):
5694 spawn("bzip2 -d "+self.dbdir+"/environment.bz2",self.settings,free=1)
5696 if not myebuildpath:
5697 mystuff=listdir(self.dbdir,EmptyOnError=1)
5699 if x[-7:]==".ebuild":
5700 myebuildpath=self.dbdir+"/"+x
5704 if myebuildpath and os.path.exists(myebuildpath):
5705 a=doebuild(myebuildpath,"prerm",self.myroot,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5706 # XXX: Decide how to handle failures here.
5708 writemsg("!!! FAILED prerm: "+str(a)+"\n")
5712 mykeys=pkgfiles.keys()
5716 self.updateprotect()
5718 #process symlinks second-to-last, directories last.
5720 modprotect="/lib/modules/"
5721 for objkey in mykeys:
5722 obj=os.path.normpath(objkey)
5727 statobj = os.stat(obj)
5732 lstatobj = os.lstat(obj)
5733 except (OSError, AttributeError):
5735 islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
5738 #we skip this if we're dealing with a symlink
5739 #because os.stat() will operate on the
5740 #link target rather than the link itself.
5741 writemsg_stdout("--- !found "+str(pkgfiles[objkey][0])+ " %s\n" % obj)
5743 # next line includes a tweak to protect modules from being unmerged,
5744 # but we don't protect modules from being overwritten if they are
5745 # upgraded. We effectively only want one half of the config protection
5746 # functionality for /lib/modules. For portage-ng both capabilities
5747 # should be able to be independently specified.
5748 if self.isprotected(obj) or ((len(obj) > len(modprotect)) and (obj[0:len(modprotect)]==modprotect)):
5749 writemsg_stdout("--- cfgpro %s %s\n" % (pkgfiles[objkey][0], obj))
5752 lmtime=str(lstatobj[stat.ST_MTIME])
5753 if (pkgfiles[objkey][0] not in ("dir","fif","dev")) and (lmtime != pkgfiles[objkey][1]):
5754 writemsg_stdout("--- !mtime %s %s\n" % (pkgfiles[objkey][0], obj))
5757 if pkgfiles[objkey][0]=="dir":
5758 if statobj is None or not stat.S_ISDIR(statobj.st_mode):
5759 writemsg_stdout("--- !dir %s %s\n" % ("dir", obj))
5762 elif pkgfiles[objkey][0]=="sym":
5764 writemsg_stdout("--- !sym %s %s\n" % ("sym", obj))
5768 writemsg_stdout("<<< %s %s\n" % ("sym",obj))
5769 except (OSError,IOError),e:
5770 writemsg_stdout("!!! %s %s\n" % ("sym",obj))
5771 elif pkgfiles[objkey][0]=="obj":
5772 if statobj is None or not stat.S_ISREG(statobj.st_mode):
5773 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
5777 mymd5 = portage_checksum.perform_md5(obj, calc_prelink=1)
5778 except portage_exception.FileNotFound, e:
5779 # the file has disappeared between now and our stat call
5780 writemsg_stdout("--- !obj %s %s\n" % ("obj", obj))
5783 # string.lower is needed because db entries used to be in upper-case. The
5784 # string.lower allows for backwards compatibility.
5785 if mymd5 != string.lower(pkgfiles[objkey][2]):
5786 writemsg_stdout("--- !md5 %s %s\n" % ("obj", obj))
5790 except (OSError,IOError),e:
5792 writemsg_stdout("<<< %s %s\n" % ("obj",obj))
5793 elif pkgfiles[objkey][0]=="fif":
5794 if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
5795 writemsg_stdout("--- !fif %s %s\n" % ("fif", obj))
5799 except (OSError,IOError),e:
5801 writemsg_stdout("<<< %s %s\n" % ("fif",obj))
5802 elif pkgfiles[objkey][0]=="dev":
5803 writemsg_stdout("--- %s %s\n" % ("dev",obj))
5810 if not last_non_empty.startswith(obj) and not listdir(obj):
5813 writemsg_stdout("<<< %s %s\n" % ("dir",obj))
5816 except (OSError,IOError),e:
5820 writemsg_stdout("--- !empty dir %s\n" % obj)
5821 last_non_empty = obj
5824 #remove self from vartree database so that our own virtual gets zapped if we're the last node
5825 db[self.myroot]["vartree"].zap(self.mycpv)
5827 # New code to remove stuff from the world and virtuals files when unmerged.
5829 worldlist=grabfile(self.myroot+WORLD_FILE)
5830 mykey=cpv_getkey(self.mycpv)
5833 if dep_getkey(x)==mykey:
5834 matches=db[self.myroot]["vartree"].dbapi.match(x,use_cache=0)
5836 #zap our world entry
5838 elif (len(matches)==1) and (matches[0]==self.mycpv):
5839 #zap our world entry
5842 #others are around; keep it.
5843 newworldlist.append(x)
5845 #this doesn't match the package we're unmerging; keep it.
5846 newworldlist.append(x)
5848 # if the base dir doesn't exist, create it.
5849 # (spanky noticed bug)
5850 # XXX: dumb question, but abstracting the root uid might be wise/useful for
5851 # 2nd pkg manager installation setups.
5852 if not os.path.exists(os.path.dirname(self.myroot+WORLD_FILE)):
5853 pdir = os.path.dirname(self.myroot + WORLD_FILE)
5854 os.makedirs(pdir, mode=0755)
5855 os.chown(pdir, 0, portage_gid)
5856 os.chmod(pdir, 02770)
5858 write_atomic(os.path.join(self.myroot,WORLD_FILE),"\n".join(newworldlist))
5861 if myebuildpath and os.path.exists(myebuildpath):
5862 # XXX: This should be the old config, not the current one.
5863 # XXX: Use vardbapi to load up env vars.
5864 a=doebuild(myebuildpath,"postrm",self.myroot,self.settings,use_cache=0,tree=self.treetype)
5865 # XXX: Decide how to handle failures here.
5867 writemsg("!!! FAILED postrm: "+str(a)+"\n")
5872 def isowner(self,filename,destroot):
5873 """ check if filename is a new file or belongs to this package
5874 (for this or a previous version)"""
5875 destfile = os.path.normpath(destroot+"/"+filename)
5876 if not os.path.exists(destfile):
5878 if self.getcontents() and filename in self.getcontents().keys():
5883 def treewalk(self,srcroot,destroot,inforoot,myebuild,cleanup=0):
5886 # destroot = where to merge, ie. ${ROOT},
5887 # inforoot = root of db entry,
5888 # secondhand = list of symlinks that have been skipped due to
5889 # their target not existing (will merge later),
5891 if not os.path.exists(self.dbcatdir):
5892 os.makedirs(self.dbcatdir)
5894 # This blocks until we can get the dirs to ourselves.
5898 for v in db[self.myroot]["vartree"].dbapi.cp_list(self.mysplit[0]):
5899 otherversions.append(v.split("/")[1])
5901 # check for package collisions
5902 if "collision-protect" in features:
5903 myfilelist = listdir(srcroot, recursive=1, filesonly=1, followSymlinks=False)
5905 # the linkcheck only works if we are in srcroot
5908 mysymlinks = filter(os.path.islink, listdir(srcroot, recursive=1, filesonly=0, followSymlinks=False))
5909 myfilelist.extend(mysymlinks)
5912 starttime=time.time()
5918 if self.pkg in otherversions:
5919 otherversions.remove(self.pkg) # we already checked this package
5921 for v in otherversions:
5922 # should we check for same SLOT here ?
5923 mypkglist.append(dblink(self.cat,v,destroot,self.settings))
5925 print green("*")+" checking "+str(len(myfilelist))+" files for package collisions"
5926 for f in myfilelist:
5928 # listdir isn't intelligent enough to exclude symlinked dirs,
5929 # so we have to do it ourself
5930 for s in mysymlinks:
5931 # the length comparison makes sure that the symlink itself is checked
5932 if f[:len(s)] == s and len(f) > len(s):
5938 print str(i)+" files checked ..."
5942 for ver in [self]+mypkglist:
5943 if (ver.isowner(f, destroot) or ver.isprotected(f)):
5947 print "existing file "+f+" is not owned by this package"
5949 print green("*")+" spent "+str(time.time()-starttime)+" seconds checking for file collisions"
5951 print red("*")+" This package is blocked because it wants to overwrite"
5952 print red("*")+" files belonging to other packages (see messages above)."
5953 print red("*")+" If you have no clue what this is all about report it "
5954 print red("*")+" as a bug for this package on http://bugs.gentoo.org"
5956 print red("package "+self.cat+"/"+self.pkg+" NOT merged")
5958 # Why is the package already merged here db-wise? Shouldn't be the case
5959 # only unmerge if it ia new package and has no contents
5960 if not self.getcontents():
5967 except SystemExit, e:
5973 # get old contents info for later unmerging
5974 oldcontents = self.getcontents()
5976 self.dbdir = self.dbtmpdir
5978 if not os.path.exists(self.dbtmpdir):
5979 os.makedirs(self.dbtmpdir)
5981 writemsg_stdout(">>> Merging %s %s %s\n" % (self.mycpv,"to",destroot))
5983 # run preinst script
5985 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
5987 a=doebuild(myebuild,"preinst",root,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5989 a=doebuild(inforoot+"/"+self.pkg+".ebuild","preinst",root,self.settings,cleanup=cleanup,use_cache=0,tree=self.treetype)
5991 # XXX: Decide how to handle failures here.
5993 writemsg("!!! FAILED preinst: "+str(a)+"\n")
5996 # copy "info" files (like SLOT, CFLAGS, etc.) into the database
5997 for x in listdir(inforoot):
5998 self.copyfile(inforoot+"/"+x)
6000 # get current counter value (counter_tick also takes care of incrementing it)
6001 # XXX Need to make this destroot, but it needs to be initialized first. XXX
6002 # XXX bis: leads to some invalidentry() call through cp_all().
6003 counter = db["/"]["vartree"].dbapi.counter_tick(self.myroot,mycpv=self.mycpv)
6004 # write local package counter for recording
6005 lcfile = open(self.dbtmpdir+"/COUNTER","w")
6006 lcfile.write(str(counter))
6009 # open CONTENTS file (possibly overwriting old one) for recording
6010 outfile=open(self.dbtmpdir+"/CONTENTS","w")
6012 self.updateprotect()
6014 #if we have a file containing previously-merged config file md5sums, grab it.
6015 if os.path.exists(destroot+CONFIG_MEMORY_FILE):
6016 cfgfiledict=grabdict(destroot+CONFIG_MEMORY_FILE)
6019 if self.settings.has_key("NOCONFMEM"):
6020 cfgfiledict["IGNORE"]=1
6022 cfgfiledict["IGNORE"]=0
6024 # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
6025 mymtime = long(time.time())
6026 prevmask = os.umask(0)
6029 # we do a first merge; this will recurse through all files in our srcroot but also build up a
6030 # "second hand" of symlinks to merge later
6031 if self.mergeme(srcroot,destroot,outfile,secondhand,"",cfgfiledict,mymtime):
6034 # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
6035 # broken symlinks. We'll merge them too.
6037 while len(secondhand) and len(secondhand)!=lastlen:
6038 # clear the thirdhand. Anything from our second hand that
6039 # couldn't get merged will be added to thirdhand.
6042 self.mergeme(srcroot,destroot,outfile,thirdhand,secondhand,cfgfiledict,mymtime)
6045 lastlen=len(secondhand)
6047 # our thirdhand now becomes our secondhand. It's ok to throw
6048 # away secondhand since thirdhand contains all the stuff that
6049 # couldn't be merged.
6050 secondhand = thirdhand
6053 # force merge of remaining symlinks (broken or circular; oh well)
6054 self.mergeme(srcroot,destroot,outfile,None,secondhand,cfgfiledict,mymtime)
6059 #if we opened it, close it
6063 writemsg_stdout(">>> Safely unmerging already-installed instance...\n")
6064 self.dbdir = self.dbpkgdir
6065 self.unmerge(oldcontents,trimworld=0)
6066 self.dbdir = self.dbtmpdir
6067 writemsg_stdout(">>> Original instance of package unmerged safely.\n")
6069 # We hold both directory locks.
6070 self.dbdir = self.dbpkgdir
6072 movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
6076 #write out our collection of md5sums
6077 if cfgfiledict.has_key("IGNORE"):
6078 del cfgfiledict["IGNORE"]
6080 # XXXX: HACK! PathSpec is very necessary here.
6081 if not os.path.exists(destroot+PRIVATE_PATH):
6082 os.makedirs(destroot+PRIVATE_PATH)
6083 os.chown(destroot+PRIVATE_PATH,os.getuid(),portage_gid)
6084 os.chmod(destroot+PRIVATE_PATH,02770)
6085 dirlist = prefix_array(listdir(destroot+PRIVATE_PATH),destroot+PRIVATE_PATH+"/")
6088 dirlist.reverse() # Gets them in file-before basedir order
6090 if os.path.isdir(x):
6091 dirlist += prefix_array(listdir(x),x+"/")
6093 os.unlink(destroot+PRIVATE_PATH+"/"+x)
6095 mylock = portage_locks.lockfile(destroot+CONFIG_MEMORY_FILE)
6096 writedict(cfgfiledict,destroot+CONFIG_MEMORY_FILE)
6097 portage_locks.unlockfile(mylock)
6101 # if we are merging a new ebuild, use *its* pre/postinst rather than using the one in /var/db/pkg
6103 a=doebuild(myebuild,"postinst",root,self.settings,use_cache=0,tree=self.treetype)
6105 a=doebuild(inforoot+"/"+self.pkg+".ebuild","postinst",root,self.settings,use_cache=0,tree=self.treetype)
6107 # XXX: Decide how to handle failures here.
6109 writemsg("!!! FAILED postinst: "+str(a)+"\n")
6113 for v in otherversions:
6114 if pkgcmp(catpkgsplit(self.pkg)[1:], catpkgsplit(v)[1:]) < 0:
6117 #update environment settings, library paths. DO NOT change symlinks.
6118 env_update(makelinks=(not downgrade))
6119 #dircache may break autoclean because it remembers the -MERGING-pkg file
6121 if dircache.has_key(self.dbcatdir):
6122 del dircache[self.dbcatdir]
6123 writemsg_stdout(">>> %s %s\n" % (self.mycpv,"merged."))
6125 # Process ebuild logfiles
6126 elog_process(self.mycpv, self.settings)
6130 def mergeme(self,srcroot,destroot,outfile,secondhand,stufftomerge,cfgfiledict,thismtime):
6131 srcroot=os.path.normpath("///"+srcroot)+"/"
6132 destroot=os.path.normpath("///"+destroot)+"/"
6133 # this is supposed to merge a list of files. There will be 2 forms of argument passing.
6134 if type(stufftomerge)==types.StringType:
6135 #A directory is specified. Figure out protection paths, listdir() it and process it.
6136 mergelist=listdir(srcroot+stufftomerge)
6138 # We need mydest defined up here to calc. protection paths. This is now done once per
6139 # directory rather than once per file merge. This should really help merge performance.
6140 # Trailing / ensures that protects/masks with trailing /'s match.
6141 mytruncpath="/"+offset+"/"
6142 myppath=self.isprotected(mytruncpath)
6144 mergelist=stufftomerge
6147 mysrc=os.path.normpath("///"+srcroot+offset+x)
6148 mydest=os.path.normpath("///"+destroot+offset+x)
6149 # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
6150 myrealdest="/"+offset+x
6151 # stat file once, test using S_* macros many times (faster that way)
6153 mystat=os.lstat(mysrc)
6154 except SystemExit, e:
6158 writemsg(red("!!! ERROR: There appears to be ")+bold("FILE SYSTEM CORRUPTION.")+red(" A file that is listed\n"))
6159 writemsg(red("!!! as existing is not capable of being stat'd. If you are using an\n"))
6160 writemsg(red("!!! experimental kernel, please boot into a stable one, force an fsck,\n"))
6161 writemsg(red("!!! and ensure your filesystem is in a sane state. ")+bold("'shutdown -Fr now'\n"))
6162 writemsg(red("!!! File: ")+str(mysrc)+"\n")
6163 writemsg(red("!!! Error: ")+str(e)+"\n")
6165 except Exception, e:
6167 writemsg(red("!!! ERROR: An unknown error has occurred during the merge process.\n"))
6168 writemsg(red("!!! A stat call returned the following error for the following file:"))
6169 writemsg( "!!! Please ensure that your filesystem is intact, otherwise report\n")
6170 writemsg( "!!! this as a portage bug at bugs.gentoo.org. Append 'emerge info'.\n")
6171 writemsg( "!!! File: "+str(mysrc)+"\n")
6172 writemsg( "!!! Error: "+str(e)+"\n")
6176 mymode=mystat[stat.ST_MODE]
6177 # handy variables; mydest is the target object on the live filesystems;
6178 # mysrc is the source object in the temporary install dir
6180 mydmode=os.lstat(mydest)[stat.ST_MODE]
6181 except SystemExit, e:
6184 #dest file doesn't exist
6187 if stat.S_ISLNK(mymode):
6188 # we are merging a symbolic link
6189 myabsto=abssymlink(mysrc)
6190 if myabsto[0:len(srcroot)]==srcroot:
6191 myabsto=myabsto[len(srcroot):]
6194 myto=os.readlink(mysrc)
6195 if self.settings and self.settings["D"]:
6196 if myto.find(self.settings["D"])==0:
6197 myto=myto[len(self.settings["D"]):]
6198 # myrealto contains the path of the real file to which this symlink points.
6199 # we can simply test for existence of this file to see if the target has been merged yet
6200 myrealto=os.path.normpath(os.path.join(destroot,myabsto))
6203 if not stat.S_ISLNK(mydmode):
6204 if stat.S_ISDIR(mydmode):
6205 # directory in the way: we can't merge a symlink over a directory
6206 # we won't merge this, continue with next file...
6208 srctarget = os.path.normpath(os.path.dirname(mysrc)+"/"+myto)
6209 if os.path.exists(mysrc) and stat.S_ISDIR(os.stat(mysrc)[stat.ST_MODE]):
6210 # Kill file blocking installation of symlink to dir #71787
6212 elif self.isprotected(mydest):
6213 # Use md5 of the target in ${D} if it exists...
6214 if os.path.exists(os.path.normpath(srcroot+myabsto)):
6215 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(srcroot+myabsto))
6217 mydest = new_protect_filename(myrealdest, newmd5=portage_checksum.perform_md5(myabsto))
6219 # if secondhand==None it means we're operating in "force" mode and should not create a second hand.
6220 if (secondhand!=None) and (not os.path.exists(myrealto)):
6221 # either the target directory doesn't exist yet or the target file doesn't exist -- or
6222 # the target is a broken symlink. We will add this file to our "second hand" and merge
6224 secondhand.append(mysrc[len(srcroot):])
6226 # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
6227 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
6229 print ">>>",mydest,"->",myto
6230 outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
6232 print "!!! Failed to move file."
6233 print "!!!",mydest,"->",myto
6235 elif stat.S_ISDIR(mymode):
6236 # we are merging a directory
6238 # destination exists
6241 # Save then clear flags on dest.
6242 dflags=bsd_chflags.lgetflags(mydest)
6243 if(bsd_chflags.lchflags(mydest, 0)<0):
6244 writemsg("!!! Couldn't clear flags on '"+mydest+"'.\n")
6246 if not os.access(mydest, os.W_OK):
6247 pkgstuff = pkgsplit(self.pkg)
6248 writemsg("\n!!! Cannot write to '"+mydest+"'.\n")
6249 writemsg("!!! Please check permissions and directories for broken symlinks.\n")
6250 writemsg("!!! You may start the merge process again by using ebuild:\n")
6251 writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
6252 writemsg("!!! And finish by running this: env-update\n\n")
6255 if stat.S_ISLNK(mydmode) or stat.S_ISDIR(mydmode):
6256 # a symlink to an existing directory will work for us; keep it:
6257 writemsg_stdout("--- %s/\n" % mydest)
6259 bsd_chflags.lchflags(mydest, dflags)
6261 # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
6262 if movefile(mydest,mydest+".backup", mysettings=self.settings) == None:
6264 print "bak",mydest,mydest+".backup"
6265 #now create our directory
6267 sid = selinux.get_sid(mysrc)
6268 selinux.secure_mkdir(mydest,sid)
6272 bsd_chflags.lchflags(mydest, dflags)
6273 os.chmod(mydest,mystat[0])
6274 os.chown(mydest,mystat[4],mystat[5])
6275 writemsg_stdout(">>> %s/\n" % mydest)
6277 #destination doesn't exist
6279 sid = selinux.get_sid(mysrc)
6280 selinux.secure_mkdir(mydest,sid)
6283 os.chmod(mydest,mystat[0])
6285 bsd_chflags.lchflags(mydest, bsd_chflags.lgetflags(mysrc))
6286 os.chown(mydest,mystat[4],mystat[5])
6287 writemsg_stdout(">>> %s/\n" % mydest)
6288 outfile.write("dir "+myrealdest+"\n")
6289 # recurse and merge this directory
6290 if self.mergeme(srcroot,destroot,outfile,secondhand,offset+x+"/",cfgfiledict,thismtime):
6292 elif stat.S_ISREG(mymode):
6293 # we are merging a regular file
6294 mymd5=portage_checksum.perform_md5(mysrc,calc_prelink=1)
6295 # calculate config file protection stuff
6296 mydestdir=os.path.dirname(mydest)
6300 # destination file exists
6301 if stat.S_ISDIR(mydmode):
6302 # install of destination is blocked by an existing directory with the same name
6304 writemsg_stdout("!!! %s\n" % mydest)
6305 elif stat.S_ISREG(mydmode) or (stat.S_ISLNK(mydmode) and os.path.exists(mydest) and stat.S_ISREG(os.stat(mydest)[stat.ST_MODE])):
6307 # install of destination is blocked by an existing regular file,
6308 # or by a symlink to an existing regular file;
6309 # now, config file management may come into play.
6310 # we only need to tweak mydest if cfg file management is in play.
6312 # we have a protection path; enable config file management.
6313 destmd5=portage_checksum.perform_md5(mydest,calc_prelink=1)
6315 if cfgfiledict.has_key(myrealdest):
6316 if destmd5 in cfgfiledict[myrealdest]:
6319 del cfgfiledict[myrealdest]
6322 #file already in place; simply update mtimes of destination
6323 os.utime(mydest,(thismtime,thismtime))
6327 #mymd5!=destmd5 and we've cycled; move mysrc into place as a ._cfg file
6329 cfgfiledict[myrealdest]=[mymd5]
6331 elif cfgfiledict.has_key(myrealdest) and (mymd5 in cfgfiledict[myrealdest]):
6332 #myd5!=destmd5, we haven't cycled, and the file we're merging has been already merged previously
6334 moveme=cfgfiledict["IGNORE"]
6335 cfgprot=cfgfiledict["IGNORE"]
6337 #mymd5!=destmd5, we haven't cycled, and the file we're merging hasn't been merged before
6340 if not cfgfiledict.has_key(myrealdest):
6341 cfgfiledict[myrealdest]=[]
6342 if mymd5 not in cfgfiledict[myrealdest]:
6343 cfgfiledict[myrealdest].append(mymd5)
6344 # only record the last md5
6345 if len(cfgfiledict[myrealdest])>1:
6346 del cfgfiledict[myrealdest][0]
6349 mydest = new_protect_filename(myrealdest, newmd5=mymd5)
6351 # whether config protection or not, we merge the new file the
6352 # same way. Unless moveme=0 (blocking directory)
6354 mymtime=movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)
6360 # We need to touch the destination so that on --update the
6361 # old package won't yank the file with it. (non-cfgprot related)
6362 os.utime(myrealdest,(thismtime,thismtime))
6364 if self.settings["USERLAND"] == "Darwin" and myrealdest[-2:] == ".a":
6366 # XXX kludge, can be killed when portage stops relying on
6367 # md5+mtime, and uses refcounts
6368 # alright, we've fooled w/ mtime on the file; this pisses off static archives
6369 # basically internal mtime != file's mtime, so the linker (falsely) thinks
6370 # the archive is stale, and needs to have it's toc rebuilt.
6372 myf=open(myrealdest,"r+")
6374 # ar mtime field is digits padded with spaces, 12 bytes.
6375 lms=str(thismtime+5).ljust(12)
6378 if magic != "!<arch>\n":
6379 # not an archive (dolib.a from portage.py makes it here fex)
6382 st=os.stat(myrealdest)
6383 while myf.tell() < st.st_size - 12:
6390 # skip uid/gid/mperm
6393 # read the archive member's size
6394 x=long(myf.read(10))
6396 # skip the trailing newlines, and add the potential
6397 # extra padding byte if it's not an even size
6398 myf.seek(x + 2 + (x % 2),1)
6400 # and now we're at the end. yay.
6402 mymd5=portage_checksum.perform_md5(myrealdest,calc_prelink=1)
6403 os.utime(myrealdest,(thismtime,thismtime))
6407 outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
6408 writemsg_stdout("%s %s\n" % (zing,mydest))
6410 # we are merging a fifo or device node
6413 # destination doesn't exist
6414 if movefile(mysrc,mydest,newmtime=thismtime,sstat=mystat, mysettings=self.settings)!=None:
6416 if stat.S_ISFIFO(mymode):
6417 # we don't record device nodes in CONTENTS,
6418 # although we do merge them.
6419 outfile.write("fif "+myrealdest+"\n")
6422 writemsg_stdout(zing+" "+mydest+"\n")
6424 def merge(self,mergeroot,inforoot,myroot,myebuild=None,cleanup=0):
6425 return self.treewalk(mergeroot,myroot,inforoot,myebuild,cleanup=cleanup)
6427 def getstring(self,name):
6428 "returns contents of a file with whitespace converted to spaces"
6429 if not os.path.exists(self.dbdir+"/"+name):
6431 myfile=open(self.dbdir+"/"+name,"r")
6432 mydata=string.split(myfile.read())
6434 return string.join(mydata," ")
6436 def copyfile(self,fname):
6437 shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
6439 def getfile(self,fname):
6440 if not os.path.exists(self.dbdir+"/"+fname):
6442 myfile=open(self.dbdir+"/"+fname,"r")
6443 mydata=myfile.read()
6447 def setfile(self,fname,data):
6448 myfile=open(self.dbdir+"/"+fname,"w")
6452 def getelements(self,ename):
6453 if not os.path.exists(self.dbdir+"/"+ename):
6455 myelement=open(self.dbdir+"/"+ename,"r")
6456 mylines=myelement.readlines()
6459 for y in string.split(x[:-1]):
6464 def setelements(self,mylist,ename):
6465 myelement=open(self.dbdir+"/"+ename,"w")
6467 myelement.write(x+"\n")
6470 def isregular(self):
6471 "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
6472 return os.path.exists(self.dbdir+"/CATEGORY")
6474 def cleanup_pkgmerge(mypkg,origdir):
6475 shutil.rmtree(settings["PORTAGE_TMPDIR"]+"/portage-pkg/"+mypkg)
6476 if os.path.exists(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment"):
6477 os.unlink(settings["PORTAGE_TMPDIR"]+"/portage/"+mypkg+"/temp/environment")
6480 def pkgmerge(mytbz2,myroot,mysettings):
6481 """will merge a .tbz2 file, returning a list of runtime dependencies
6482 that must be satisfied, or None if there was a merge error. This
6483 code assumes the package exists."""
6484 if mytbz2[-5:]!=".tbz2":
6485 print "!!! Not a .tbz2 file"
6487 mypkg=os.path.basename(mytbz2)[:-5]
6488 xptbz2=xpak.tbz2(mytbz2)
6490 mycat=xptbz2.getfile("CATEGORY")
6492 print "!!! CATEGORY info missing from info chunk, aborting..."
6495 mycatpkg=mycat+"/"+mypkg
6496 tmploc=mysettings["PORTAGE_TMPDIR"]+"/portage-pkg/"
6497 pkgloc=tmploc+"/"+mypkg+"/bin/"
6498 infloc=tmploc+"/"+mypkg+"/inf/"
6499 myebuild=tmploc+"/"+mypkg+"/inf/"+os.path.basename(mytbz2)[:-4]+"ebuild"
6500 if os.path.exists(tmploc+"/"+mypkg):
6501 shutil.rmtree(tmploc+"/"+mypkg,1)
6504 writemsg_stdout(">>> Extracting info\n")
6505 xptbz2.unpackinfo(infloc)
6506 # run pkg_setup early, so we can bail out early
6507 # (before extracting binaries) if there's a problem
6511 mysettings.configdict["pkg"]["CATEGORY"] = mycat;
6512 a=doebuild(myebuild,"setup",myroot,mysettings,tree="bintree")
6513 writemsg_stdout(">>> Extracting %s\n" % mypkg)
6514 notok=spawn("bzip2 -dqc -- '"+mytbz2+"' | tar xpf -",mysettings,free=1)
6516 print "!!! Error Extracting",mytbz2
6517 cleanup_pkgmerge(mypkg,origdir)
6520 # the merge takes care of pre/postinst and old instance
6521 # auto-unmerge, virtual/provides updates, etc.
6522 mysettings.load_infodir(infloc)
6523 mylink=dblink(mycat,mypkg,myroot,mysettings,treetype="bintree")
6524 mylink.merge(pkgloc,infloc,myroot,myebuild,cleanup=1)
6526 if not os.path.exists(infloc+"/RDEPEND"):
6529 #get runtime dependencies
6530 a=open(infloc+"/RDEPEND","r")
6531 returnme=string.join(string.split(a.read())," ")
6533 cleanup_pkgmerge(mypkg,origdir)
6537 if os.environ.has_key("ROOT"):
6538 root=os.environ["ROOT"]
6546 if not os.path.exists(root[:-1]):
6547 writemsg("!!! Error: ROOT "+root+" does not exist. Please correct this.\n")
6548 writemsg("!!! Exiting.\n\n")
6550 elif not os.path.isdir(root[:-1]):
6551 writemsg("!!! Error: ROOT "+root[:-1]+" is not a directory. Please correct this.\n")
6552 writemsg("!!! Exiting.\n\n")
6555 #create tmp and var/tmp if they don't exist; read config
6557 if not os.path.exists(root+"tmp"):
6558 writemsg(">>> "+root+"tmp doesn't exist, creating it...\n")
6559 os.mkdir(root+"tmp",01777)
6560 if not os.path.exists(root+"var/tmp"):
6561 writemsg(">>> "+root+"var/tmp doesn't exist, creating it...\n")
6563 os.mkdir(root+"var",0755)
6564 except (OSError,IOError):
6567 os.mkdir(root+"var/tmp",01777)
6568 except SystemExit, e:
6571 writemsg("portage: couldn't create /var/tmp; exiting.\n")
6573 if not os.path.exists(root+"var/lib/portage"):
6574 writemsg(">>> "+root+"var/lib/portage doesn't exist, creating it...\n")
6576 os.mkdir(root+"var",0755)
6577 except (OSError,IOError):
6580 os.mkdir(root+"var/lib",0755)
6581 except (OSError,IOError):
6584 os.mkdir(root+"var/lib/portage",02750)
6585 except SystemExit, e:
6588 writemsg("portage: couldn't create /var/lib/portage; exiting.\n")
6592 #####################################
6593 # Deprecation Checks
6597 if os.path.isdir(PROFILE_PATH):
6598 profiledir = PROFILE_PATH
6599 if "PORTAGE_CALLER" in os.environ and os.environ["PORTAGE_CALLER"] == "emerge" and os.access(DEPRECATED_PROFILE_FILE, os.R_OK):
6600 deprecatedfile = open(DEPRECATED_PROFILE_FILE, "r")
6601 dcontent = deprecatedfile.readlines()
6602 deprecatedfile.close()
6603 newprofile = dcontent[0]
6604 writemsg(red("\n!!! Your current profile is deprecated and not supported anymore.\n"))
6605 writemsg(red("!!! Please upgrade to the following profile if possible:\n"))
6606 writemsg(8*" "+green(newprofile)+"\n")
6607 if len(dcontent) > 1:
6608 writemsg("To upgrade do the following steps:\n")
6609 for myline in dcontent[1:]:
6613 if os.path.exists(USER_VIRTUALS_FILE):
6614 writemsg(red("\n!!! /etc/portage/virtuals is deprecated in favor of\n"))
6615 writemsg(red("!!! /etc/portage/profile/virtuals. Please move it to\n"))
6616 writemsg(red("!!! this new location.\n\n"))
6619 #####################################
6623 # =============================================================================
6624 # =============================================================================
6625 # -----------------------------------------------------------------------------
6626 # We're going to lock the global config to prevent changes, but we need
6627 # to ensure the global settings are right.
6628 settings=config(config_profile_path=PROFILE_PATH,config_incrementals=portage_const.INCREMENTALS)
6631 settings["PORTAGE_MASTER_PID"]=str(os.getpid())
6632 settings.backup_changes("PORTAGE_MASTER_PID")
6633 # We are disabling user-specific bashrc files.
6634 settings["BASH_ENV"] = INVALID_ENV_FILE
6635 settings.backup_changes("BASH_ENV")
6637 # gets virtual package settings
6638 def getvirtuals(myroot):
6640 writemsg("--- DEPRECATED call to getvirtual\n")
6641 return settings.getvirtuals(myroot)
6643 def do_vartree(mysettings):
6644 global virts,virts_p
6645 virts=mysettings.getvirtuals("/")
6649 myvkeys=virts.keys()
6651 vkeysplit=x.split("/")
6652 if not virts_p.has_key(vkeysplit[1]):
6653 virts_p[vkeysplit[1]]=virts[x]
6654 db["/"]={"virtuals":virts,"vartree":vartree("/",virts)}
6656 virts=mysettings.getvirtuals(root)
6657 db[root]={"virtuals":virts,"vartree":vartree(root,virts)}
6658 #We need to create the vartree first, then load our settings, and then set up our other trees
6660 usedefaults=settings.use_defs
6662 # XXX: This is a circular fix.
6663 #do_vartree(settings)
6664 #settings.loadVirtuals('/')
6665 do_vartree(settings)
6666 #settings.loadVirtuals('/')
6668 settings.reset() # XXX: Regenerate use after we get a vartree -- GLOBAL
6671 # XXX: Might cause problems with root="/" assumptions
6672 portdb=portdbapi(settings["PORTDIR"])
6675 # -----------------------------------------------------------------------------
6676 # =============================================================================
6677 # =============================================================================
6680 if 'selinux' in settings["USE"].split(" "):
6683 if hasattr(selinux, "enabled"):
6684 selinux_enabled = selinux.enabled
6688 writemsg(red("!!! SELinux not loaded: ")+str(e)+"\n")
6691 writemsg(red("!!! SELinux module not found.")+" Please verify that it was installed.\n")
6693 if selinux_enabled == 0:
6695 del sys.modules["selinux"]
6701 cachedirs=[CACHE_PATH]
6703 cachedirs.append(root+CACHE_PATH)
6704 if not os.environ.has_key("SANDBOX_ACTIVE"):
6705 for cachedir in cachedirs:
6706 if not os.path.exists(cachedir):
6707 os.makedirs(cachedir,0755)
6708 writemsg(">>> "+cachedir+" doesn't exist, creating it...\n")
6709 if not os.path.exists(cachedir+"/dep"):
6710 os.makedirs(cachedir+"/dep",2755)
6711 writemsg(">>> "+cachedir+"/dep doesn't exist, creating it...\n")
6713 os.chown(cachedir,uid,portage_gid)
6714 os.chmod(cachedir,0775)
6718 mystat=os.lstat(cachedir+"/dep")
6719 os.chown(cachedir+"/dep",uid,portage_gid)
6720 os.chmod(cachedir+"/dep",02775)
6721 if mystat[stat.ST_GID]!=portage_gid:
6722 spawn("chown -R "+str(uid)+":"+str(portage_gid)+" "+cachedir+"/dep",settings,free=1)
6723 spawn("chmod -R u+rw,g+rw "+cachedir+"/dep",settings,free=1)
6727 def flushmtimedb(record):
6729 if record in mtimedb.keys():
6731 #print "mtimedb["+record+"] is cleared."
6733 writemsg("Invalid or unset record '"+record+"' in mtimedb.\n")
6735 #grab mtimes for eclasses and upgrades
6739 "version", "starttime",
6742 mtimedbfile=root+"var/cache/edb/mtimedb"
6744 mypickle=cPickle.Unpickler(open(mtimedbfile))
6745 mypickle.find_global=None
6746 mtimedb=mypickle.load()
6747 if mtimedb.has_key("old"):
6748 mtimedb["updates"]=mtimedb["old"]
6750 if mtimedb.has_key("cur"):
6752 except SystemExit, e:
6756 mtimedb={"updates":{},"version":"","starttime":0}
6758 for x in mtimedb.keys():
6759 if x not in mtimedbkeys:
6760 writemsg("Deleting invalid mtimedb key: "+str(x)+"\n")
6763 #,"porttree":portagetree(root,virts),"bintree":binarytree(root,virts)}
6764 features=settings["FEATURES"].split()
6766 do_upgrade_packagesmessage=0
6767 def do_upgrade(mykey):
6768 global do_upgrade_packagesmessage
6770 writemsg(green("Performing Global Updates: ")+bold(mykey)+"\n")
6771 writemsg("(Could take a couple of minutes if you have a lot of binary packages.)\n")
6772 writemsg(" "+bold(".")+"='update pass' "+bold("*")+"='binary update' "+bold("@")+"='/var/db move'\n"+" "+bold("s")+"='/var/db SLOT move' "+bold("S")+"='binary SLOT move' "+bold("p")+"='update /etc/portage/package.*'\n")
6774 #remove stale virtual entries (mappings for packages that no longer exist)
6778 myxfiles = ["package.mask","package.unmask","package.keywords","package.use"]
6779 myxfiles.extend(prefix_array(myxfiles, "profile/"))
6782 if os.path.isdir(USER_CONFIG_PATH+os.path.sep+x):
6783 recursivefiles.extend([x+os.path.sep+y for y in listdir(USER_CONFIG_PATH+os.path.sep+x, filesonly=1, recursive=1)])
6785 recursivefiles.append(x)
6786 myxfiles = recursivefiles
6789 myfile = open(USER_CONFIG_PATH+os.path.sep+x,"r")
6790 file_contents[x] = myfile.readlines()
6793 if file_contents.has_key(x):
6794 del file_contents[x]
6797 worldlist=grabfile("/"+WORLD_FILE)
6798 myupd=grabfile(mykey)
6799 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
6800 for myline in myupd:
6801 mysplit=myline.split()
6802 if not len(mysplit):
6804 if mysplit[0]!="move" and mysplit[0]!="slotmove":
6805 writemsg("portage: Update type \""+mysplit[0]+"\" not recognized.\n")
6808 if mysplit[0]=="move" and len(mysplit)!=3:
6809 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
6812 if mysplit[0]=="slotmove" and len(mysplit)!=4:
6813 writemsg("portage: Update command \""+myline+"\" invalid; skipping.\n")
6816 sys.stdout.write(".")
6819 if mysplit[0]=="move":
6820 db["/"]["vartree"].dbapi.move_ent(mysplit)
6821 db["/"]["bintree"].move_ent(mysplit)
6822 #update world entries:
6823 for x in range(0,len(worldlist)):
6824 #update world entries, if any.
6825 worldlist[x]=dep_transform(worldlist[x],mysplit[1],mysplit[2])
6827 #update /etc/portage/packages.*
6828 for x in file_contents:
6829 for mypos in range(0,len(file_contents[x])):
6830 line=file_contents[x][mypos]
6831 if line[0]=="#" or string.strip(line)=="":
6833 key=dep_getkey(line.split()[0])
6835 file_contents[x][mypos]=string.replace(line,mysplit[1],mysplit[2])
6837 sys.stdout.write("p")
6840 elif mysplit[0]=="slotmove":
6841 db["/"]["vartree"].dbapi.move_slot_ent(mysplit)
6842 db["/"]["bintree"].move_slot_ent(mysplit,settings["PORTAGE_TMPDIR"]+"/tbz2")
6844 for x in update_files:
6845 mydblink = dblink('','','/',settings)
6846 if mydblink.isprotected(USER_CONFIG_PATH+os.path.sep+x):
6847 updating_file=new_protect_filename(USER_CONFIG_PATH+os.path.sep+x)[0]
6849 updating_file=USER_CONFIG_PATH+os.path.sep+x
6851 myfile=open(updating_file,"w")
6852 myfile.writelines(file_contents[x])
6857 # We gotta do the brute force updates for these now.
6858 if (settings["PORTAGE_CALLER"] in ["fixpackages"]) or \
6859 ("fixpackages" in features):
6860 db["/"]["bintree"].update_ents(myupd,settings["PORTAGE_TMPDIR"]+"/tbz2")
6862 do_upgrade_packagesmessage = 1
6865 #update our internal mtime since we processed all our directives.
6866 mtimedb["updates"][mykey]=os.stat(mykey)[stat.ST_MTIME]
6867 write_atomic(WORLD_FILE,"\n".join(worldlist))
6870 def commit_mtimedb():
6876 mtimedb["version"]=VERSION
6877 f = portage_util.atomic_ofstream(mymfn)
6878 cPickle.dump(mtimedb, f, -1)
6880 except SystemExit, e:
6882 except Exception, e:
6887 os.chown(mymfn,uid,portage_gid)
6888 os.chmod(mymfn,0664)
6889 except SystemExit, e:
6891 except Exception, e:
6895 global uid,portage_gid,portdb,db
6896 if secpass and not os.environ.has_key("SANDBOX_ACTIVE"):
6897 close_portdbapi_caches()
6900 atexit_register(portageexit)
6902 if (secpass==2) and (not os.environ.has_key("SANDBOX_ACTIVE")):
6903 if settings["PORTAGE_CALLER"] in ["emerge","fixpackages"]:
6904 #only do this if we're root and not running repoman/ebuild digest
6905 updpath=os.path.normpath(settings["PORTDIR"]+"///profiles/updates")
6907 if not mtimedb.has_key("updates"):
6908 mtimedb["updates"]={}
6910 mylist=listdir(updpath,EmptyOnError=1)
6912 mylist=[myfile[3:]+"-"+myfile[:2] for myfile in mylist]
6914 mylist=[myfile[5:]+"-"+myfile[:4] for myfile in mylist]
6915 for myfile in mylist:
6916 mykey=updpath+"/"+myfile
6917 if not os.path.isfile(mykey):
6919 if (not mtimedb["updates"].has_key(mykey)) or \
6920 (mtimedb["updates"][mykey] != os.stat(mykey)[stat.ST_MTIME]) or \
6921 (settings["PORTAGE_CALLER"] == "fixpackages"):
6924 commit_mtimedb() # This lets us save state for C-c.
6926 #directory doesn't exist
6929 #make sure our internal databases are consistent; recreate our virts and vartree
6930 do_vartree(settings)
6931 if do_upgrade_packagesmessage and \
6932 listdir(settings["PKGDIR"]+"/All/",EmptyOnError=1):
6933 writemsg("\n\n\n ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the")
6934 writemsg("\n tbz2's in the packages directory. "+bold("Note: This can take a very long time."))
6941 #continue setting up other trees
6942 db["/"]["porttree"]=portagetree("/",virts)
6943 db["/"]["bintree"]=binarytree("/",settings["PKGDIR"],virts)
6945 db[root]["porttree"]=portagetree(root,virts)
6946 db[root]["bintree"]=binarytree(root,settings["PKGDIR"],virts)
6948 profileroots = [settings["PORTDIR"]+"/profiles/"]
6949 for x in settings["PORTDIR_OVERLAY"].split():
6950 profileroots.insert(0, x+"/profiles/")
6951 thirdparty_lists = [grabdict(os.path.join(x, "thirdpartymirrors")) for x in profileroots]
6952 thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
6954 if not os.path.exists(settings["PORTAGE_TMPDIR"]):
6955 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
6956 writemsg("does not exist. Please create this directory or correct your PORTAGE_TMPDIR setting.\n")
6958 if not os.path.isdir(settings["PORTAGE_TMPDIR"]):
6959 writemsg("portage: the directory specified in your PORTAGE_TMPDIR variable, \""+settings["PORTAGE_TMPDIR"]+",\"\n")
6960 writemsg("is not a directory. Please correct your PORTAGE_TMPDIR setting.\n")
6963 # COMPATABILITY -- This shouldn't be used.
6964 pkglines = settings.packages
6966 groups = settings["ACCEPT_KEYWORDS"].split()
6967 archlist = flatten([[myarch, "~"+myarch] for myarch in settings["PORTAGE_ARCHLIST"].split()])
6969 for group in groups:
6971 writemsg("--- 'profiles/arch.list' is empty or not available. Empty portage tree?\n")
6973 elif (group not in archlist) and group[0]!='-':
6974 writemsg("\n"+red("!!! INVALID ACCEPT_KEYWORDS: ")+str(group)+"\n")
6979 if not os.path.islink(PROFILE_PATH) and os.path.exists(settings["PORTDIR"]+"/profiles"):
6980 writemsg(red("\a\n\n!!! "+PROFILE_PATH+" is not a symlink and will probably prevent most merges.\n"))
6981 writemsg(red("!!! It should point into a profile within %s/profiles/\n" % settings["PORTDIR"]))
6982 writemsg(red("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
6985 # ============================================================================
6986 # ============================================================================