class generic:
def __init__(self,myspec):
self.settings=myspec
+
def mount_safety_check(self):
"""
Make sure that no bind mounts exist in chrootdir (to use before
a bind mount
"""
pass
+
def mount_all(self):
"""do all bind mounts"""
pass
+
def umount_all(self):
"""unmount all bind mounts"""
pass
for x, myline in enumerate(self.lines):
myline = myline.strip()
- # Force the line to be clean
+ # Force the line to be clean
# Remove Comments ( anything following # )
myline = trailing_comment.sub("", myline)
# Look for separator
msearch = myline.find(self.key_value_separator)
-
+
# If separator found assume its a new key
if msearch != -1:
# Split on the first occurence of the separator creating two strings in the array mobjs
cur_array += myline.split()
else:
raise CatalystError, "Syntax error: " + x
-
+
# XXX: Do we really still need this "single value is a string" behavior?
if len(cur_array) == 2:
values[cur_array[0]] = cur_array[1]
else:
values[cur_array[0]] = cur_array[1:]
-
+
if not self.empty_values:
for x in values.keys():
# Delete empty key pairs
def __init__(self, filename=""):
if filename:
self.parse_file(filename)
-
self.delete_lock_from_path_list()
if self.islocked():
self.fcntl_unlock()
+
def __init__(self,lockdir):
self.locked=False
self.myfd=None
self.set_lockdir(lockdir)
self.set_lockfilename(".catalyst_lock")
self.set_lockfile()
-
+
if LockDir.lock_dirs_in_use.count(lockdir)>0:
raise "This directory already associated with a lock object"
else:
LockDir.lock_dirs_in_use.append(lockdir)
-
- self.hardlock_paths={}
-
+ self.hardlock_paths={}
def delete_lock_from_path_list(self):
i=0
def set_gid(self,gid):
if not self.islocked():
# if self.settings.has_key("DEBUG"):
-# print "setting gid to", gid
+# print "setting gid to", gid
self.gid=gid
def set_lockdir(self,lockdir):
print "HARDLOCKING doesnt support shared-read locks"
print "using exclusive write locks"
self.hard_lock()
-
+
def write_lock(self):
if not self.locking_method == "HARDLOCK":
self.fcntl_lock("write")
os.umask(old_mask)
else:
self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
-
+
try:
if locktype == "read":
self.locking_method(self.myfd,fcntl.LOCK_SH|fcntl.LOCK_NB)
else:
self.locked=True
#writemsg("Lockfile obtained\n")
-
-
+
def fcntl_unlock(self):
import fcntl
unlinkfile = 1
# open fd closed automatically on them.
#if type(lockfilename) == types.StringType:
# os.close(myfd)
-
+
if (self.myfd != None):
os.close(self.myfd)
self.myfd=None
#mypath = self.normpath(path)
if os.path.isdir(self.lockdir) and os.path.isfile(self.myhardlock):
self.hardlock_paths[self.lockdir]=self.myhardlock
-
+
def remove_hardlock_file_from_cleanup(self):
if self.hardlock_paths.has_key(self.lockdir):
del self.hardlock_paths[self.lockdir]
newpath = "/"+newpath.lstrip("/")
return newpath
-
def hardlink_is_mine(self,link,lock):
import stat
try:
pid = hostpid[-1]
if not mylist.has_key(filename):
mylist[filename] = {}
-
+
if not mylist[filename].has_key(host):
mylist[filename][host] = []
mylist[filename][host].append(pid)
pass
return results
-
if __name__ == "__main__":
def lock_work():
- print
+ print
for i in range(1,6):
print i,time.time()
time.sleep(1)
print "Lock 5 starting"
import time
Lock1=LockDir("/tmp/lock_path")
- Lock1.write_lock()
+ Lock1.write_lock()
print "Lock1 write lock"
-
+
lock_work()
-
- Lock1.unlock()
+
+ Lock1.unlock()
print "Lock1 unlock"
-
+
Lock1.read_lock()
print "Lock1 read lock"
-
+
lock_work()
-
- Lock1.unlock()
+
+ Lock1.unlock()
print "Lock1 unlock"
Lock1.read_lock()
print "Lock1 read lock"
-
+
Lock1.write_lock()
print "Lock1 write lock"
-
+
lock_work()
-
+
Lock1.unlock()
print "Lock1 unlock"
-
+
Lock1.read_lock()
print "Lock1 read lock"
-
+
lock_work()
-
+
Lock1.unlock()
print "Lock1 unlock"
+
#Lock1.write_lock()
#time.sleep(2)
#Lock1.unlock()
if message:
(type,value)=sys.exc_info()[:2]
if value!=None:
- print
+ print
print traceback.print_exc(file=sys.stdout)
print
print "!!! catalyst: "+message
print
-
+
class LockInUse(Exception):
def __init__(self, message):
if message:
def warn(msg):
print "!!! catalyst: "+msg
-
def find_binary(myc):
"""look through the environmental path for an executable file named whatever myc is"""
# this sucks. badly.
return "%s/%s" % (x,myc)
return None
-
def spawn_bash(mycommand,env={},debug=False,opt_name=None,**keywords):
"""spawn mycommand as an arguement to bash"""
args=[BASH_BINARY]
#def spawn_get_output(mycommand,spawn_type=spawn,raw_exit_code=False,emulate_gso=True, \
# collect_fds=[1],fd_pipes=None,**keywords):
+
def spawn_get_output(mycommand,raw_exit_code=False,emulate_gso=True, \
collect_fds=[1],fd_pipes=None,**keywords):
"""call spawn, collecting the output to fd's specified in collect_fds list
retval=process_exit_code(retval)
return [retval, mydata]
-
# base spawn function
def spawn(mycommand,env={},raw_exit_code=False,opt_name=None,fd_pipes=None,returnpid=False,\
uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True,\
selinux_context=None, raise_signals=False, func_call=False):
- """base fork/execve function.
+ """base fork/execve function.
mycommand is the desired command- if you need a command to execute in a bash/sandbox/fakeroot
environment, use the appropriate spawn call. This is a straight fork/exec code path.
Can either have a tuple, or a string passed in. If uid/gid/groups/umask specified, it changes
the forked process to said value. If path_lookup is on, a non-absolute command will be converted
to an absolute command, otherwise it returns None.
-
+
selinux_context is the desired context, dependant on selinux being available.
opt_name controls the name the processor goes by.
fd_pipes controls which file descriptor numbers are left open in the forked process- it's a dict of
current fd's raw fd #, desired #.
-
+
func_call is a boolean for specifying to execute a python function- use spawn_func instead.
raise_signals is questionable. Basically throw an exception if signal'd. No exception is thrown
if raw_input is on.
-
+
logfile overloads the specified fd's to write to a tee process which logs to logfile
returnpid returns the relevant pids (a list, including the logging process if logfile is on).
-
+
non-returnpid calls to spawn will block till the process has exited, returning the exitcode/signal
raw_exit_code controls whether the actual waitpid result is returned, or intrepretted."""
-
myc=''
if not func_call:
if type(mycommand)==types.StringType:
if raw_exit_code:
return retval
return process_exit_code(retval)
-
+
if fd_pipes == None:
fd_pipes={}
fd_pipes[0] = 0
for x in k:
trg_fd.append(x)
src_fd.append(fd_pipes[x])
-
+
# run through said list dup'ing descriptors so that they won't be waxed
# by other dup calls.
for x in range(0,len(trg_fd)):
os.dup2(src_fd[x], trg_fd[x])
else:
trg_fd=[0,1,2]
-
+
# wax all open descriptors that weren't requested be left open.
for x in range(0,max_fd_limit):
if x not in trg_fd:
raise SystemExit()
return (retval & 0xff) << 8 # interrupted by signal
-
def file_locate(settings,filelist,expand=1):
#if expand=1, non-absolute paths will be accepted and
# expanded to os.getcwd()+"/"+localpath if file exists
meep
bark
gleep moop
-
+
This file would be interpreted as defining three items: item1, item2 and item3. item1 would contain
the string value "value1". Item2 would contain an ordered list [ "foo", "bar", "oni" ]. item3
would contain an ordered list as well: [ "meep", "bark", "gleep", "moop" ]. It's important to note
else:
makeconf={}
return makeconf
-
+
def msg(mymsg,verblevel=1):
if verbosity>=verblevel:
print mymsg
# Removing ending slash
path1 = re.sub("/$","",path1)
path2 = re.sub("/$","",path2)
-
+
if path1 == path2:
return 1
return 0
def addl_arg_parse(myspec,addlargs,requiredspec,validspec):
"helper function to help targets parse additional arguments"
global valid_config_file_values
-
+
messages = []
for x in addlargs.keys():
if x not in validspec and x not in valid_config_file_values and x not in requiredspec:
messages.append("Argument \""+x+"\" not recognized.")
else:
myspec[x]=addlargs[x]
-
+
for x in requiredspec:
if not myspec.has_key(x):
messages.append("Required argument \""+x+"\" not specified.")
if messages:
raise CatalystError, '\n\tAlso: '.join(messages)
-
+
def touch(myfile):
try:
myf=open(myfile,"w")
if TrailingSlash:
newpath=newpath+'/'
return newpath
-
else:
""" First clean up any existing target stuff """
# XXX WTF are we removing the old tarball before we start building the
- # XXX new one? If the build fails, you don't want to be left with
+ # XXX new one? If the build fails, you don't want to be left with
# XXX nothing at all
# if os.path.isfile(self.settings["target_path"]):
# cmd("rm -f "+self.settings["target_path"],\
raise CatalystError,\
"ISO volume ID must not exceed 32 characters."
else:
- self.settings["iso_volume_id"]="catalyst "+self.settings["snapshot"]
+ self.settings["iso_volume_id"]="catalyst "+self.settings["snapshot"]
def set_action_sequence(self):
""" Default action sequence for run method """
"""
Check and verify that none of our paths in mypath are mounted. We don't
- want to clean up with things still mounted, and this allows us to check.
+ want to clean up with things still mounted, and this allows us to check.
Returns 1 on ok, 0 on "something is still mounted" case.
"""
"unpack")
if self.settings.has_key("SEEDCACHE"):
- if os.path.isdir(self.settings["source_path"]):
+ if os.path.isdir(self.settings["source_path"]):
""" SEEDCACHE Is a directory, use rsync """
unpack_cmd="rsync -a --delete "+self.settings["source_path"]+\
" "+self.settings["chroot_path"]
""" Autoresume is valid, tarball is valid """
unpack=False
invalid_snapshot=True
-
+
elif os.path.isdir(self.settings["source_path"]) \
and not os.path.exists(self.settings["autoresume_path"]+\
"unpack"):
snapshot_hash=read_from_clst(self.settings["autoresume_path"]+\
"unpack_portage")
- if self.settings.has_key("SNAPCACHE"):
+ if self.settings.has_key("SNAPCACHE"):
snapshot_cache_hash=\
read_from_clst(self.settings["snapshot_cache_path"]+\
"catalyst-hash")
unpack=False
if unpack:
- if self.settings.has_key("SNAPCACHE"):
+ if self.settings.has_key("SNAPCACHE"):
self.snapshot_lock_object.write_lock()
if os.path.exists(destdir):
print cleanup_msg
print "Unpacking portage tree (This can take a long time) ..."
cmd(unpack_cmd,unpack_errmsg,env=self.env)
- if self.settings.has_key("SNAPCACHE"):
+ if self.settings.has_key("SNAPCACHE"):
myf=open(self.settings["snapshot_cache_path"]+"catalyst-hash","w")
myf.write(self.settings["snapshot_path_hash"])
myf.close()
myf.write(self.settings["snapshot_path_hash"])
myf.close()
- if self.settings.has_key("SNAPCACHE"):
+ if self.settings.has_key("SNAPCACHE"):
self.snapshot_lock_object.unlock()
def config_profile_link(self):
def portage_overlay(self):
""" We copy the contents of our overlays to /usr/local/portage """
if self.settings.has_key("portage_overlay"):
- for x in self.settings["portage_overlay"]:
+ for x in self.settings["portage_overlay"]:
if os.path.exists(x):
print "Copying overlay dir " +x
cmd("mkdir -p "+self.settings["chroot_path"]+\
""" Copy over the root_overlay """
if self.settings.has_key(self.settings["spec_prefix"]+"/root_overlay"):
for x in self.settings[self.settings["spec_prefix"]+\
- "/root_overlay"]:
+ "/root_overlay"]:
if os.path.exists(x):
print "Copying root_overlay: "+x
cmd("rsync -a "+x+"/ "+\
pass
def bind(self):
- for x in self.mounts:
+ for x in self.mounts:
if not os.path.exists(self.settings["chroot_path"]+x):
os.makedirs(self.settings["chroot_path"]+x,0755)
self.override_cbuild()
self.override_chost()
self.override_cflags()
- self.override_cxxflags()
- self.override_ldflags()
+ self.override_cxxflags()
+ self.override_ldflags()
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"chroot_setup"):
print "Resume point detected, skipping chroot_setup operation..."
else:
print "Setting up chroot..."
-
+
#self.makeconf=read_makeconf(self.settings["chroot_path"]+"/etc/portage/make.conf")
-
+
cmd("cp /etc/resolv.conf "+self.settings["chroot_path"]+"/etc",\
"Could not copy resolv.conf into place.",env=self.env)
-
+
""" Copy over the envscript, if applicable """
if self.settings.has_key("ENVSCRIPT"):
if not os.path.exists(self.settings["ENVSCRIPT"]):
and os.path.exists(self.settings["autoresume_path"]+"clean"):
print "Resume point detected, skipping clean operation..."
else:
- for x in self.settings["cleanables"]:
+ for x in self.settings["cleanables"]:
print "Cleaning chroot: "+x+"... "
cmd("rm -rf "+self.settings["destpath"]+x,"Couldn't clean "+\
x,env=self.env)
print x,"not a directory or does not exist, skipping 'empty' operation."
continue
print "Emptying directory",x
- """
+ """
stat the dir, delete the dir, recreate the dir and set
the proper perms and ownership
"""
cmd("/bin/bash "+self.settings["controller_file"]+\
" preclean","preclean script failed.",env=self.env)
touch(self.settings["autoresume_path"]+"preclean")
-
+
except:
self.unbind()
raise CatalystError, "Build failed, could not execute preclean"
except:
self.mount_safety_check()
raise
-
+
self.chroot_lock.unlock()
def unmerge(self):
print "Resume point detected, skipping setup_overlay operation..."
else:
if self.settings.has_key(self.settings["spec_prefix"]+"/overlay"):
- for x in self.settings[self.settings["spec_prefix"]+"/overlay"]:
+ for x in self.settings[self.settings["spec_prefix"]+"/overlay"]:
if os.path.exists(x):
cmd("rsync -a "+x+"/ "+\
self.settings["target_path"],\
print "clearing kerncache ..."
self.clear_kerncache()
-# vim: ts=4 sw=4 sta et sts=4 ai
+# vim: ts=4 sw=4 sta et sts=4 ai
def __init__(self,spec,addlargs):
self.required_values=["version_stamp","target","subarch",\
"rel_type","profile","snapshot","source_subpath"]
-
+
self.valid_values=self.required_values[:]
self.valid_values.extend(["grp/use"])
if not addlargs.has_key("grp"):
raise CatalystError,"Required value \"grp\" not specified in spec."
-
+
self.required_values.extend(["grp"])
if type(addlargs["grp"])==types.StringType:
addlargs["grp"]=[addlargs["grp"]]
-
+
if addlargs.has_key("grp/use"):
- if type(addlargs["grp/use"])==types.StringType:
- addlargs["grp/use"]=[addlargs["grp/use"]]
-
+ if type(addlargs["grp/use"])==types.StringType:
+ addlargs["grp/use"]=[addlargs["grp/use"]]
+
for x in addlargs["grp"]:
self.required_values.append("grp/"+x+"/packages")
self.required_values.append("grp/"+x+"/type")
-
+
generic_stage_target.__init__(self,spec,addlargs)
def set_target_path(self):
try:
cmd("/bin/bash "+self.settings["controller_file"]+" run "+self.settings["grp/"+pkgset+"/type"]\
+" "+pkgset+" "+mypackages,env=self.env)
-
+
except CatalystError:
self.unbind()
raise CatalystError,"GRP build aborting due to error."
def set_mounts(self):
self.mounts.append("/tmp/grp")
self.mountmap["/tmp/grp"]=self.settings["target_path"]
-
+
def generate_digests(self):
for pkgset in self.settings["grp"]:
if self.settings["grp/"+pkgset+"/type"] == "pkgset":
destdir=normpath(self.settings["target_path"]+"/"+pkgset+"/All")
- print "Digesting files in the pkgset....."
+ print "Digesting files in the pkgset....."
digests=glob.glob(destdir+'/*.DIGESTS')
for i in digests:
if os.path.exists(i):
self.gen_digest_file(normpath(destdir+"/"+i))
else:
destdir=normpath(self.settings["target_path"]+"/"+pkgset)
- print "Digesting files in the srcset....."
-
+ print "Digesting files in the srcset....."
+
digests=glob.glob(destdir+'/*.DIGESTS')
for i in digests:
if os.path.exists(i):
def set_action_sequence(self):
self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","portage_overlay","bind","chroot_setup",\
- "setup_environment","run_local","unbind",\
- "generate_digests","clear_autoresume"]
+ "config_profile_link","setup_confdir","portage_overlay","bind","chroot_setup",\
+ "setup_environment","run_local","unbind",\
+ "generate_digests","clear_autoresume"]
def register(foo):
foo.update({"grp":grp_target})
"config_profile_link","setup_confdir","portage_overlay",\
"bind","chroot_setup","setup_environment","build_packages",\
"unbind", "clean","clear_autoresume"]
+
def set_target_path(self):
self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"])
if self.settings.has_key("AUTORESUME") \
cmd("rm -rf "+self.settings["target_path"],\
"Could not remove existing directory: "+self.settings["target_path"],env=self.env)
touch(self.settings["autoresume_path"]+"setup_target_path")
-
+
if not os.path.exists(self.settings["target_path"]):
os.makedirs(self.settings["target_path"])
-
-
+
def set_target_path(self):
pass
+
def set_spec_prefix(self):
self.settings["spec_prefix"]="livecd"
-
+
def set_use(self):
generic_stage_target.set_use(self)
if self.settings.has_key("use"):
"""
def __init__(self,spec,addlargs):
self.required_values=["boot/kernel"]
-
+
self.valid_values=[]
-
+
self.valid_values.extend(self.required_values)
self.valid_values.extend(["livecd/cdtar","livecd/empty","livecd/rm",\
"livecd/unmerge","livecd/iso","livecd/gk_mainargs","livecd/type",\
"livecd/root_overlay","livecd/users","portage_overlay",\
"livecd/fstype","livecd/fsops","livecd/linuxrc","livecd/bootargs",\
"gamecd/conf","livecd/xdm","livecd/xsession","livecd/volid"])
-
+
generic_stage_target.__init__(self,spec,addlargs)
if not self.settings.has_key("livecd/type"):
self.settings["livecd/type"] = "generic-livecd"
file_locate(self.settings, ["cdtar","controller_file"])
-
+
def set_source_path(self):
self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
if os.path.isfile(self.settings["source_path"]):
self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/")
if not os.path.exists(self.settings["source_path"]):
raise CatalystError,"Source Path: "+self.settings["source_path"]+" does not exist."
-
+
def set_spec_prefix(self):
self.settings["spec_prefix"]="livecd"
except:
self.unbind()
raise CatalystError,"Couldn't open "+self.settings["chroot_path"]+"/etc/modprobe.d/blacklist.conf."
-
+
myf.write("\n#Added by Catalyst:")
for x in self.settings["livecd/modblacklist"]:
myf.write("\nblacklist "+x)
myf.close()
-
+
def unpack(self):
unpack=True
display_msg=None
self.valid_values.append("netboot2/packages/"+x+"/files")
except:
raise CatalystError,"configuration error in netboot2/packages."
-
+
generic_stage_target.__init__(self,spec,addlargs)
self.set_build_kernel_vars()
self.settings["merge_path"]=normpath("/tmp/image/")
loopy=[self.settings["netboot2/packages"]]
else:
loopy=self.settings["netboot2/packages"]
-
+
for x in loopy:
if self.settings.has_key("netboot2/packages/"+x+"/files"):
if type(self.settings["netboot2/packages/"+x+"/files"]) == types.ListType:
touch(self.settings["autoresume_path"]+"copy_files_to_image")
- def setup_overlay(self):
+ def setup_overlay(self):
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_overlay"):
print "Resume point detected, skipping setup_overlay operation..."
else:
if self.settings.has_key("netboot2/overlay"):
- for x in self.settings["netboot2/overlay"]:
+ for x in self.settings["netboot2/overlay"]:
if os.path.exists(x):
cmd("rsync -a "+x+"/ "+\
self.settings["chroot_path"] + self.settings["merge_path"], "netboot2/overlay: "+x+" copy failed.",env=self.env)
print "netboot2: removing " + x
os.system("rm -rf " + self.settings["chroot_path"] + self.settings["merge_path"] + x)
- def empty(self):
+ def empty(self):
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"empty"):
print "Resume point detected, skipping empty operation..."
"netboot/packages"
]
self.required_values=[]
-
+
try:
if addlargs.has_key("netboot/packages"):
if type(addlargs["netboot/packages"]) == types.StringType:
loopy=[addlargs["netboot/packages"]]
else:
loopy=addlargs["netboot/packages"]
-
+
# for x in loopy:
# self.required_values.append("netboot/packages/"+x+"/files")
except:
raise CatalystError,"configuration error in netboot/packages."
-
-
-
generic_stage_target.__init__(self,spec,addlargs)
self.set_build_kernel_vars(addlargs)
# Custom Kernel Tarball --- use that instead ...
# unless the user wants specific CFLAGS/CXXFLAGS, let's use -Os
-
+
for envvar in "CFLAGS", "CXXFLAGS":
if not os.environ.has_key(envvar) and not addlargs.has_key(envvar):
self.settings[envvar] = "-Os -pipe"
-
def set_root_path(self):
# ROOT= variable for emerges
# except CatalystError:
# self.unbind()
# raise CatalystError,"netboot build aborting due to error."
-
+
def build_busybox(self):
# build busybox
if self.settings.has_key("netboot/busybox_config"):
except CatalystError:
self.unbind()
raise CatalystError,"netboot build aborting due to error."
-
def copy_files_to_image(self):
# create image
loopy=[self.settings["netboot/packages"]]
else:
loopy=self.settings["netboot/packages"]
-
+
for x in loopy:
if self.settings.has_key("netboot/packages/"+x+"/files"):
if type(self.settings["netboot/packages/"+x+"/files"]) == types.ListType:
self.unbind()
raise CatalystError,"netboot build aborting due to error."
-
def create_netboot_files(self):
# finish it all up
try:
# end
print "netboot: build finished !"
-
def set_action_sequence(self):
self.settings["action_sequence"]=["unpack","unpack_snapshot",
"config_profile_link","setup_confdir","bind","chroot_setup",\
def __init__(self,myspec,addlargs):
self.required_values=["version_stamp","target"]
self.valid_values=["version_stamp","target"]
-
+
generic_target.__init__(self,myspec,addlargs)
self.settings=myspec
self.settings["target_subpath"]="portage"
x=normpath(self.settings["storedir"]+"/snapshots")
if not os.path.exists(x):
os.makedirs(x)
-
+
def mount_safety_check(self):
pass
-
+
def run(self):
if self.settings.has_key("PURGEONLY"):
self.purge()
self.setup()
print "Creating Portage tree snapshot "+self.settings["version_stamp"]+\
" from "+self.settings["portdir"]+"..."
-
+
mytmp=self.settings["tmp_path"]
if not os.path.exists(mytmp):
os.makedirs(mytmp)
-
+
cmd("rsync -a --delete --exclude /packages/ --exclude /distfiles/ --exclude /local/ --exclude CVS/ --exclude .svn --filter=H_**/files/digest-* "+\
self.settings["portdir"]+"/ "+mytmp+"/portage/","Snapshot failure",env=self.env)
-
+
print "Compressing Portage snapshot tarball..."
cmd("tar cjf "+self.settings["snapshot_path"]+" -C "+mytmp+" portage",\
"Snapshot creation failure",env=self.env)
-
+
self.gen_contents_file(self.settings["snapshot_path"])
self.gen_digest_file(self.settings["snapshot_path"])
self.cleanup()
print "snapshot: complete!"
-
+
def kill_chroot_pids(self):
pass
os.makedirs(myemp,0755)
os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
os.chmod(myemp,mystat[ST_MODE])
-
+
def register(foo):
foo.update({"snapshot":snapshot_target})
return foo
self.valid_values.extend(["update_seed"])
generic_stage_target.__init__(self,spec,addlargs)
self.set_update_seed()
-
+
def set_stage_path(self):
self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
print "stage1 stage path is "+self.settings["stage_path"]
-
+
def set_root_path(self):
# sets the root path, relative to 'chroot_path', of the stage1 root
self.settings["root_path"]=normpath("/tmp/stage1root")
print "stage1 root path is "+self.settings["root_path"]
-
+
def set_cleanables(self):
generic_stage_target.set_cleanables(self)
self.settings["cleanables"].extend(["/usr/share/gettext",\
os.makedirs(self.settings["stage_path"]+"/dev")
if not os.path.isfile(self.settings["stage_path"]+"/proc/.keep"):
try:
- proc_keepfile = open(self.settings["stage_path"]+"/proc/.keep","w")
+ proc_keepfile = open(self.settings["stage_path"]+"/proc/.keep","w")
proc_keepfile.write('')
proc_keepfile.close()
except IOError:
try:
dev_keepfile = open(self.settings["stage_path"]+"/dev/.keep","w")
dev_keepfile.write('')
- dev_keepfile.close()
+ dev_keepfile.close()
except IOError:
print "!!! Failed to create %s" % (self.settings["stage_path"]+"/dev/.keep")
else:
self.required_values=[]
self.valid_values=["chost"]
generic_stage_target.__init__(self,spec,addlargs)
-
+
def set_source_path(self):
if self.settings.has_key("SEEDCACHE") and os.path.isdir(normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/tmp/stage1root/")):
self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/tmp/stage1root/")
if os.path.exists(self.settings["controller_file"]):
cmd("/bin/bash "+self.settings["controller_file"]+" run "+\
list_bashify(self.settings["tinderbox/packages"]),"run script failed.",env=self.env)
-
+
except CatalystError:
self.unbind()
raise CatalystError,"Tinderbox aborting due to error."
def set_cleanables(self):
self.settings["cleanables"]=["/etc/resolv.conf","/var/tmp/*","/root/*",\
"/usr/portage"]
+
def set_action_sequence(self):
#Default action sequence for run method
self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","run_local","preclean","unbind","clean",\
"clear_autoresume"]
-
+
def register(foo):
foo.update({"tinderbox":tinderbox_target})
return foo