# Copyright 2002-2009 Gentoo Foundation; 2008-2009 Various authors (see AUTHORS)
# Distributed under the GPL v2
+ 11 Jan 2009; Andrew Gaffney <agaffney@gentoo.org>
+ modules/catalyst/arch/arm.py, modules/catalyst/arch/mips.py,
+ modules/catalyst/arch/x86.py, modules/catalyst/config.py,
+ modules/catalyst/lock.py, modules/catalyst/support.py,
+ modules/catalyst/target/generic_stage.py, modules/catalyst/target/grp.py,
+ modules/catalyst/target/livecd_stage1.py,
+ modules/catalyst/target/livecd_stage2.py,
+ modules/catalyst/target/netboot.py, modules/catalyst/target/netboot2.py,
+ modules/catalyst/target/snapshot.py, modules/catalyst/target/stage1.py,
+ modules/catalyst/target/stage2.py, modules/catalyst/target/tinderbox.py,
+ targets/stage1/build.py:
+ Kill lots and lots of trailing whitespace in .py files
+
11 Jan 2009; Andrew Gaffney <agaffney@gentoo.org>
modules/catalyst/target/generic_stage.py:
Fix up a few bad replacements from previous commit
self.settings["CHROOT"]="chroot"
self.settings["CFLAGS"]="-O2 -pipe"
self.settings["CXXFLAGS"]="-O1 -pipe"
-
+
class generic_armeb(catalyst.arch.generic_arch):
"Abstract base class for all arm (big endian) builders"
def __init__(self,myspec):
arch_mips4_n32.__init__(self,myspec)
self.settings["HOSTUSE"]=["ip30","n32"]
-__subarch_map = {
+__subarch_map = {
"cobalt" : arch_cobalt,
"cobalt_n32" : arch_cobalt_n32,
"ip27" : arch_ip27,
generic_x86.__init__(self,myspec)
self.settings["CFLAGS"]="-O2 -march=pentium-mmx -pipe"
self.settings["HOSTUSE"]=["mmx"]
-
+
class arch_pentium2(generic_x86):
"Intel Pentium 2 CPU with MMX support"
def __init__(self,myspec):
for x, myline in enumerate(self.lines):
myline = myline.strip()
- # Force the line to be clean
+ # Force the line to be clean
# Remove Comments ( anything following # )
myline = trailing_comment.sub("", myline)
# Look for separator
msearch = myline.find(self.key_value_separator)
-
+
# If separator found assume its a new key
if msearch != -1:
# Split on the first occurence of the separator creating two strings in the array mobjs
cur_array += myline.split()
else:
raise CatalystError, "Syntax error: " + x
-
+
# XXX: Do we really still need this "single value is a string" behavior?
if len(cur_array) == 2:
values[cur_array[0]] = cur_array[1]
else:
values[cur_array[0]] = cur_array[1:]
-
+
if not self.empty_values:
for x in values.keys():
# Delete empty key pairs
self.set_lockdir(lockdir)
self.set_lockfilename(".catalyst_lock")
self.set_lockfile()
-
+
if LockDir.lock_dirs_in_use.count(lockdir)>0:
raise "This directory already associated with a lock object"
else:
LockDir.lock_dirs_in_use.append(lockdir)
-
+
self.hardlock_paths={}
-
+
def delete_lock_from_path_list(self):
def set_gid(self,gid):
if not self.islocked():
# if "DEBUG" in self.settings:
-# print "setting gid to", gid
+# print "setting gid to", gid
self.gid=gid
def set_lockdir(self,lockdir):
print "HARDLOCKING doesnt support shared-read locks"
print "using exclusive write locks"
self.hard_lock()
-
+
def write_lock(self):
if not self.locking_method == "HARDLOCK":
self.fcntl_lock("write")
os.umask(old_mask)
else:
self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
-
+
try:
if locktype == "read":
self.locking_method(self.myfd,fcntl.LOCK_SH|fcntl.LOCK_NB)
else:
self.locked=True
#writemsg("Lockfile obtained\n")
-
-
+
+
def fcntl_unlock(self):
import fcntl
unlinkfile = 1
# open fd closed automatically on them.
#if type(lockfilename) == types.StringType:
# os.close(myfd)
-
+
if (self.myfd != None):
os.close(self.myfd)
self.myfd=None
#mypath = self.normpath(path)
if os.path.isdir(self.lockdir) and os.path.isfile(self.myhardlock):
self.hardlock_paths[self.lockdir]=self.myhardlock
-
+
def remove_hardlock_file_from_cleanup(self):
if self.lockdir in self.hardlock_paths:
del self.hardlock_paths[self.lockdir]
pid = hostpid[-1]
if not filename in mylist:
mylist[filename] = {}
-
+
if not host in mylist[filename]:
mylist[filename][host] = []
mylist[filename][host].append(pid)
if __name__ == "__main__":
def lock_work():
- print
+ print
for i in range(1,6):
print i,time.time()
time.sleep(1)
print "Lock 5 starting"
import time
Lock1=LockDir("/tmp/lock_path")
- Lock1.write_lock()
+ Lock1.write_lock()
print "Lock1 write lock"
-
+
lock_work()
-
- Lock1.unlock()
+
+ Lock1.unlock()
print "Lock1 unlock"
-
+
Lock1.read_lock()
print "Lock1 read lock"
-
+
lock_work()
-
- Lock1.unlock()
+
+ Lock1.unlock()
print "Lock1 unlock"
Lock1.read_lock()
print "Lock1 read lock"
-
+
Lock1.write_lock()
print "Lock1 write lock"
-
+
lock_work()
-
+
Lock1.unlock()
print "Lock1 unlock"
-
+
Lock1.read_lock()
print "Lock1 read lock"
-
+
lock_work()
-
+
Lock1.unlock()
print "Lock1 unlock"
#Lock1.write_lock()
if message:
(type,value)=sys.exc_info()[:2]
if value!=None:
- print
+ print
print traceback.print_exc(file=sys.stdout)
print
print "!!! catalyst: "+message
print
-
+
class LockInUse(Exception):
def __init__(self, message):
if message:
Can either have a tuple, or a string passed in. If uid/gid/groups/umask specified, it changes
the forked process to said value. If path_lookup is on, a non-absolute command will be converted
to an absolute command, otherwise it returns None.
-
+
selinux_context is the desired context, dependant on selinux being available.
opt_name controls the name the processor goes by.
fd_pipes controls which file descriptor numbers are left open in the forked process- it's a dict of
current fd's raw fd #, desired #.
-
+
func_call is a boolean for specifying to execute a python function- use spawn_func instead.
raise_signals is questionable. Basically throw an exception if signal'd. No exception is thrown
if raw_input is on.
-
+
logfile overloads the specified fd's to write to a tee process which logs to logfile
returnpid returns the relevant pids (a list, including the logging process if logfile is on).
-
+
non-returnpid calls to spawn will block till the process has exited, returning the exitcode/signal
raw_exit_code controls whether the actual waitpid result is returned, or intrepretted."""
if raw_exit_code:
return retval
return process_exit_code(retval)
-
+
if fd_pipes == None:
fd_pipes={}
fd_pipes[0] = 0
for x in k:
trg_fd.append(x)
src_fd.append(fd_pipes[x])
-
+
# run through said list dup'ing descriptors so that they won't be waxed
# by other dup calls.
for x in range(0,len(trg_fd)):
os.dup2(src_fd[x], trg_fd[x])
else:
trg_fd=[0,1,2]
-
+
# wax all open descriptors that weren't requested be left open.
for x in range(0,max_fd_limit):
if x not in trg_fd:
meep
bark
gleep moop
-
+
This file would be interpreted as defining three items: item1, item2 and item3. item1 would contain
the string value "value1". Item2 would contain an ordered list [ "foo", "bar", "oni" ]. item3
would contain an ordered list as well: [ "meep", "bark", "gleep", "moop" ]. It's important to note
else:
makeconf={}
return makeconf
-
+
def msg(mymsg,verblevel=1):
if verbosity>=verblevel:
print mymsg
# Removing ending slash
path1 = re.sub("/$","",path1)
path2 = re.sub("/$","",path2)
-
+
if path1 == path2:
return 1
return 0
def addl_arg_parse(myspec,addlargs,requiredspec,validspec):
"helper function to help targets parse additional arguments"
global valid_config_file_values
-
+
for x in addlargs.keys():
if x not in validspec and x not in valid_config_file_values and x not in requiredspec:
raise CatalystError, "Argument \""+x+"\" not recognized."
else:
myspec[x]=addlargs[x]
-
+
for x in requiredspec:
if not x in myspec:
raise CatalystError, "Required argument \""+x+"\" not specified."
-
+
def touch(myfile):
try:
myf=open(myfile,"w")
else:
""" First clean up any existing target stuff """
# XXX WTF are we removing the old tarball before we start building the
- # XXX new one? If the build fails, you don't want to be left with
+ # XXX new one? If the build fails, you don't want to be left with
# XXX nothing at all
# if os.path.isfile(self.settings["target_path"]):
# cmd("rm -f "+self.settings["target_path"],\
raise CatalystError,\
"ISO volume ID must not exceed 32 characters."
else:
- self.settings["iso_volume_id"]="catalyst "+self.settings["snapshot"]
+ self.settings["iso_volume_id"]="catalyst "+self.settings["snapshot"]
def set_action_sequence(self):
""" Default action sequence for run method """
"""
Check and verify that none of our paths in mypath are mounted. We don't
- want to clean up with things still mounted, and this allows us to check.
+ want to clean up with things still mounted, and this allows us to check.
Returns 1 on ok, 0 on "something is still mounted" case.
"""
"unpack")
if "SEEDCACHE" in self.settings:
- if os.path.isdir(self.settings["source_path"]):
+ if os.path.isdir(self.settings["source_path"]):
""" SEEDCACHE Is a directory, use rsync """
unpack_cmd="rsync -a --delete "+self.settings["source_path"]+\
" "+self.settings["chroot_path"]
""" Autoresume is valid, tarball is valid """
unpack=False
invalid_snapshot=True
-
+
elif os.path.isdir(self.settings["source_path"]) \
and not os.path.exists(self.settings["autoresume_path"]+\
"unpack"):
snapshot_hash=read_from_clst(self.settings["autoresume_path"]+\
"unpack_portage")
- if "SNAPCACHE" in self.settings:
+ if "SNAPCACHE" in self.settings:
snapshot_cache_hash=\
read_from_clst(self.settings["snapshot_cache_path"]+\
"catalyst-hash")
unpack=False
if unpack:
- if "SNAPCACHE" in self.settings:
+ if "SNAPCACHE" in self.settings:
self.snapshot_lock_object.write_lock()
if os.path.exists(destdir):
print cleanup_msg
print "Unpacking portage tree (This can take a long time) ..."
cmd(unpack_cmd,unpack_errmsg,env=self.env)
- if "SNAPCACHE" in self.settings:
+ if "SNAPCACHE" in self.settings:
myf=open(self.settings["snapshot_cache_path"]+"catalyst-hash","w")
myf.write(self.settings["snapshot_path_hash"])
myf.close()
myf.write(self.settings["snapshot_path_hash"])
myf.close()
- if "SNAPCACHE" in self.settings:
+ if "SNAPCACHE" in self.settings:
self.snapshot_lock_object.unlock()
def config_profile_link(self):
def portage_overlay(self):
""" We copy the contents of our overlays to /usr/local/portage """
if "portage_overlay" in self.settings:
- for x in self.settings["portage_overlay"]:
+ for x in self.settings["portage_overlay"]:
if os.path.exists(x):
print "Copying overlay dir " +x
cmd("mkdir -p "+self.settings["chroot_path"]+\
""" Copy over the root_overlay """
if self.settings["spec_prefix"]+"/root_overlay" in self.settings:
for x in self.settings[self.settings["spec_prefix"]+\
- "/root_overlay"]:
+ "/root_overlay"]:
if os.path.exists(x):
print "Copying root_overlay: "+x
cmd("rsync -a "+x+"/ "+\
pass
def bind(self):
- for x in self.mounts:
+ for x in self.mounts:
if not os.path.exists(self.settings["chroot_path"]+x):
os.makedirs(self.settings["chroot_path"]+x,0755)
self.override_cbuild()
self.override_chost()
self.override_cflags()
- self.override_cxxflags()
- self.override_ldflags()
+ self.override_cxxflags()
+ self.override_ldflags()
if "AUTORESUME" in self.settings \
and os.path.exists(self.settings["autoresume_path"]+"chroot_setup"):
print "Resume point detected, skipping chroot_setup operation..."
else:
print "Setting up chroot..."
-
+
#self.makeconf=read_makeconf(self.settings["chroot_path"]+"/etc/make.conf")
-
+
cmd("cp /etc/resolv.conf "+self.settings["chroot_path"]+"/etc",\
"Could not copy resolv.conf into place.",env=self.env)
-
+
""" Copy over the envscript, if applicable """
if "ENVSCRIPT" in self.settings:
if not os.path.exists(self.settings["ENVSCRIPT"]):
and os.path.exists(self.settings["autoresume_path"]+"clean"):
print "Resume point detected, skipping clean operation..."
else:
- for x in self.settings["cleanables"]:
+ for x in self.settings["cleanables"]:
print "Cleaning chroot: "+x+"... "
cmd("rm -rf "+self.settings["destpath"]+x,"Couldn't clean "+\
x,env=self.env)
print x,"not a directory or does not exist, skipping 'empty' operation."
continue
print "Emptying directory",x
- """
+ """
stat the dir, delete the dir, recreate the dir and set
the proper perms and ownership
"""
cmd("/bin/bash "+self.settings["controller_file"]+\
" preclean","preclean script failed.",env=self.env)
touch(self.settings["autoresume_path"]+"preclean")
-
+
except:
self.unbind()
raise CatalystError, "Build failed, could not execute preclean"
except:
self.mount_safety_check()
raise
-
+
self.chroot_lock.unlock()
def unmerge(self):
print "Resume point detected, skipping setup_overlay operation..."
else:
if self.settings["spec_prefix"]+"/overlay" in self.settings:
- for x in self.settings[self.settings["spec_prefix"]+"/overlay"]:
+ for x in self.settings[self.settings["spec_prefix"]+"/overlay"]:
if os.path.exists(x):
cmd("rsync -a "+x+"/ "+\
self.settings["target_path"],\
print "clearing kerncache ..."
self.clear_kerncache()
-# vim: ts=4 sw=4 sta et sts=4 ai
+# vim: ts=4 sw=4 sta et sts=4 ai
def __init__(self,spec,addlargs):
self.required_values=["version_stamp","target","subarch",\
"rel_type","profile","snapshot","source_subpath"]
-
+
self.valid_values=self.required_values[:]
self.valid_values.extend(["grp/use"])
if not "grp" in addlargs:
raise CatalystError,"Required value \"grp\" not specified in spec."
-
+
self.required_values.extend(["grp"])
if type(addlargs["grp"])==types.StringType:
addlargs["grp"]=[addlargs["grp"]]
-
+
if "grp/use" in addlargs:
if type(addlargs["grp/use"])==types.StringType:
addlargs["grp/use"]=[addlargs["grp/use"]]
-
+
for x in addlargs["grp"]:
self.required_values.append("grp/"+x+"/packages")
self.required_values.append("grp/"+x+"/type")
-
+
generic_stage_target.__init__(self,spec,addlargs)
def set_target_path(self):
try:
cmd("/bin/bash "+self.settings["controller_file"]+" run "+self.settings["grp/"+pkgset+"/type"]\
+" "+pkgset+" "+mypackages,env=self.env)
-
+
except CatalystError:
self.unbind()
raise CatalystError,"GRP build aborting due to error."
def set_mounts(self):
self.mounts.append("/tmp/grp")
self.mountmap["/tmp/grp"]=self.settings["target_path"]
-
+
def generate_digests(self):
for pkgset in self.settings["grp"]:
if self.settings["grp/"+pkgset+"/type"] == "pkgset":
destdir=normpath(self.settings["target_path"]+"/"+pkgset+"/All")
- print "Digesting files in the pkgset....."
+ print "Digesting files in the pkgset....."
digests=glob.glob(destdir+'/*.DIGESTS')
for i in digests:
if os.path.exists(i):
self.gen_digest_file(normpath(destdir+"/"+i))
else:
destdir=normpath(self.settings["target_path"]+"/"+pkgset)
- print "Digesting files in the srcset....."
-
+ print "Digesting files in the srcset....."
+
digests=glob.glob(destdir+'/*.DIGESTS')
for i in digests:
if os.path.exists(i):
cmd("rm -rf "+self.settings["target_path"],\
"Could not remove existing directory: "+self.settings["target_path"],env=self.env)
touch(self.settings["autoresume_path"]+"setup_target_path")
-
+
if not os.path.exists(self.settings["target_path"]):
os.makedirs(self.settings["target_path"])
-
-
+
+
def set_target_path(self):
pass
def set_spec_prefix(self):
self.settings["spec_prefix"]="livecd"
-
+
def set_use(self):
generic_stage_target.set_use(self)
if "use" in self.settings:
class livecd_stage2_target(generic_stage_target):
def __init__(self,spec,addlargs):
self.required_values=["boot/kernel"]
-
+
self.valid_values=[]
-
+
self.valid_values.extend(self.required_values)
self.valid_values.extend(["livecd/cdtar","livecd/empty","livecd/rm",\
"livecd/unmerge","livecd/iso","livecd/gk_mainargs","livecd/type",\
"livecd/root_overlay","livecd/users","portage_overlay",\
"livecd/fstype","livecd/fsops","livecd/linuxrc","livecd/bootargs",\
"gamecd/conf","livecd/xdm","livecd/xsession","livecd/volid"])
-
+
generic_stage_target.__init__(self,spec,addlargs)
if not "livecd/type" in self.settings:
self.settings["livecd/type"] = "generic-livecd"
file_locate(self.settings, ["cdtar","controller_file"])
-
+
def set_source_path(self):
self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
if os.path.isfile(self.settings["source_path"]):
self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/")
if not os.path.exists(self.settings["source_path"]):
raise CatalystError,"Source Path: "+self.settings["source_path"]+" does not exist."
-
+
def set_spec_prefix(self):
self.settings["spec_prefix"]="livecd"
except:
self.unbind()
raise CatalystError,"Couldn't open "+self.settings["chroot_path"]+"/etc/hotplug/blacklist."
-
+
myf.write("\n#Added by Catalyst:")
for x in self.settings["livecd/modblacklist"]:
myf.write("\n"+x)
myf.close()
-
+
def unpack(self):
unpack=True
display_msg=None
"netboot/packages"
]
self.required_values=[]
-
+
try:
if "netboot/packages" in addlargs:
if type(addlargs["netboot/packages"]) == types.StringType:
loopy=[addlargs["netboot/packages"]]
else:
loopy=addlargs["netboot/packages"]
-
+
# for x in loopy:
# self.required_values.append("netboot/packages/"+x+"/files")
except:
raise CatalystError,"configuration error in netboot/packages."
-
-
-
+
+
+
generic_stage_target.__init__(self,spec,addlargs)
self.set_build_kernel_vars(addlargs)
# Custom Kernel Tarball --- use that instead ...
# unless the user wants specific CFLAGS/CXXFLAGS, let's use -Os
-
+
for envvar in "CFLAGS", "CXXFLAGS":
if not envvar in os.environ and not envvar in addlargs:
self.settings[envvar] = "-Os -pipe"
-
+
def set_root_path(self):
# ROOT= variable for emerges
# except CatalystError:
# self.unbind()
# raise CatalystError,"netboot build aborting due to error."
-
+
def build_busybox(self):
# build busybox
if "netboot/busybox_config" in self.settings:
except CatalystError:
self.unbind()
raise CatalystError,"netboot build aborting due to error."
-
+
def copy_files_to_image(self):
# create image
loopy=[self.settings["netboot/packages"]]
else:
loopy=self.settings["netboot/packages"]
-
+
for x in loopy:
if "netboot/packages/"+x+"/files" in self.settings:
if type(self.settings["netboot/packages/"+x+"/files"]) == types.ListType:
self.valid_values.append("netboot2/packages/"+x+"/files")
except:
raise CatalystError,"configuration error in netboot2/packages."
-
+
generic_stage_target.__init__(self,spec,addlargs)
self.set_build_kernel_vars()
self.settings["merge_path"]=normpath("/tmp/image/")
loopy=[self.settings["netboot2/packages"]]
else:
loopy=self.settings["netboot2/packages"]
-
+
for x in loopy:
if "netboot2/packages/"+x+"/files" in self.settings:
if type(self.settings["netboot2/packages/"+x+"/files"]) == types.ListType:
touch(self.settings["autoresume_path"]+"copy_files_to_image")
- def setup_overlay(self):
+ def setup_overlay(self):
if "AUTORESUME" in self.settings \
and os.path.exists(self.settings["autoresume_path"]+"setup_overlay"):
print "Resume point detected, skipping setup_overlay operation..."
else:
if "netboot2/overlay" in self.settings:
- for x in self.settings["netboot2/overlay"]:
+ for x in self.settings["netboot2/overlay"]:
if os.path.exists(x):
cmd("rsync -a "+x+"/ "+\
self.settings["chroot_path"] + self.settings["merge_path"], "netboot2/overlay: "+x+" copy failed.",env=self.env)
print "netboot2: removing " + x
os.system("rm -rf " + self.settings["chroot_path"] + self.settings["merge_path"] + x)
- def empty(self):
+ def empty(self):
if "AUTORESUME" in self.settings \
and os.path.exists(self.settings["autoresume_path"]+"empty"):
print "Resume point detected, skipping empty operation..."
def __init__(self,myspec,addlargs):
self.required_values=["version_stamp","target"]
self.valid_values=["version_stamp","target"]
-
+
generic_target.__init__(self,myspec,addlargs)
self.settings=myspec
self.settings["target_subpath"]="portage"
x=normpath(self.settings["storedir"]+"/snapshots")
if not os.path.exists(x):
os.makedirs(x)
-
+
def mount_safety_check(self):
pass
-
+
def run(self):
if "PURGEONLY" in self.settings:
self.purge()
self.setup()
print "Creating Portage tree snapshot "+self.settings["version_stamp"]+\
" from "+self.settings["portdir"]+"..."
-
+
mytmp=self.settings["tmp_path"]
if not os.path.exists(mytmp):
os.makedirs(mytmp)
-
+
cmd("rsync -a --delete --exclude /packages/ --exclude /distfiles/ --exclude /local/ --exclude CVS/ --exclude .svn --filter=H_**/files/digest-* "+\
self.settings["portdir"]+"/ "+mytmp+"/portage/","Snapshot failure",env=self.env)
-
+
print "Compressing Portage snapshot tarball..."
cmd("tar cjf "+self.settings["snapshot_path"]+" -C "+mytmp+" portage",\
"Snapshot creation failure",env=self.env)
-
+
self.gen_contents_file(self.settings["snapshot_path"])
self.gen_digest_file(self.settings["snapshot_path"])
self.cleanup()
print "snapshot: complete!"
-
+
def kill_chroot_pids(self):
pass
os.makedirs(myemp,0755)
os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
os.chmod(myemp,mystat[ST_MODE])
-
+
__target_map = {"snapshot":snapshot_target}
self.required_values=[]
self.valid_values=["chost"]
generic_stage_target.__init__(self,spec,addlargs)
-
+
def set_stage_path(self):
self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
print "stage1 stage path is "+self.settings["stage_path"]
-
+
def set_root_path(self):
# sets the root path, relative to 'chroot_path', of the stage1 root
self.settings["root_path"]=normpath("/tmp/stage1root")
print "stage1 root path is "+self.settings["root_path"]
-
+
def set_cleanables(self):
generic_stage_target.set_cleanables(self)
self.settings["cleanables"].extend(["/usr/share/gettext",\
os.makedirs(self.settings["stage_path"]+"/dev")
if not os.path.isfile(self.settings["stage_path"]+"/proc/.keep"):
try:
- proc_keepfile = open(self.settings["stage_path"]+"/proc/.keep","w")
+ proc_keepfile = open(self.settings["stage_path"]+"/proc/.keep","w")
proc_keepfile.write('')
proc_keepfile.close()
except IOError:
try:
dev_keepfile = open(self.settings["stage_path"]+"/dev/.keep","w")
dev_keepfile.write('')
- dev_keepfile.close()
+ dev_keepfile.close()
except IOError:
print "!!! Failed to create %s" % (self.settings["stage_path"]+"/dev/.keep")
else:
self.required_values=[]
self.valid_values=["chost"]
generic_stage_target.__init__(self,spec,addlargs)
-
+
def set_source_path(self):
if "SEEDCACHE" in self.settings and os.path.isdir(normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/tmp/stage1root/")):
self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/tmp/stage1root/")
def set_cleanables(self):
generic_stage_target.set_cleanables(self)
self.settings["cleanables"].extend(["/etc/portage"])
-
+
def override_chost(self):
if "chost" in self.settings:
self.settings["CHOST"]=list_to_string(self.settings["chost"])
if os.path.exists(self.settings["controller_file"]):
cmd("/bin/bash "+self.settings["controller_file"]+" run "+\
list_bashify(self.settings["tinderbox/packages"]),"run script failed.",env=self.env)
-
+
except CatalystError:
self.unbind()
raise CatalystError,"Tinderbox aborting due to error."
"config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","run_local","preclean","unbind","clean",\
"clear_autoresume"]
-
+
__target_map = {"tinderbox":tinderbox_target}
else:
if "grab_multiple" in dir(portage):
return portage.stack_lists( portage.grab_multiple(file, portage.settings.profiles, portage.grabfile), incremental=1);
- else:
+ else:
return portage.stack_lists( [portage.grabfile_package(os.path.join(x, file)) for x in portage.settings.profiles], incremental=1);
# loaded the stacked packages / packages.build files