# Copyright 2002-2005 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/ChangeLog,v 1.311 2005/07/27 20:30:50 rocket Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/ChangeLog,v 1.312 2005/08/09 14:12:26 rocket Exp $
+
+ 09 Aug 2005; Eric Edgar <rocket@gentoo.org> catalyst,
+ +modules/catalyst_lock.py, modules/catalyst_support.py,
+ modules/embedded_target.py, modules/generic_stage_target.py,
+ modules/grp_target.py, modules/livecd_stage1_target.py,
+ modules/livecd_stage2_target.py, modules/netboot_target.py,
+ modules/snapshot_target.py, modules/stage1_target.py,
+ modules/stage2_target.py, modules/stage4_target.py,
+ targets/support/functions.sh:
+ Add locking support. Code simplification for unpack and unpack snapshot.
+ Remove redundant setup_dir. change --clear_autoresume to --clear-autoresume.
+ Add seedcache support (Grabs output from previous target run)
+ options=seedcache. Cleanup code in functions.sh to remove extra /'s printed.
27 Jul 2005; Eric Edgar <rocket@gentoo.org> catalyst,
modules/catalyst_support.py, modules/generic_stage_target.py:
#!/usr/bin/python
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/catalyst,v 1.87 2005/07/27 20:30:50 rocket Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/catalyst,v 1.88 2005/08/09 14:12:26 rocket Exp $
# Maintained in full by:
# Eric Edgar <rocket@gentoo.org>
def usage():
print "Usage catalyst [options] [-C variable=value...] [ -s identifier]"
- print " -a --clear_autoresume clear autoresume flags"
+ print " -a --clear-autoresume clear autoresume flags"
print " -c --config use specified configuration file"
print " -C --cli catalyst commandline (MUST BE LAST OPTION)"
print " -d --debug enable debugging"
if "snapcache" in string.split(conf_values["options"]):
print "Snapshot cache support enabled."
conf_values["SNAPCACHE"]="1"
+
+ if "seedcache" in string.split(conf_values["options"]):
+ print "Seed cache support enabled."
+ conf_values["SEEDCACHE"]="1"
if "kerncache" in string.split(conf_values["options"]):
print "Kernel cache support enabled."
print "Purge support enabled."
conf_values["PURGE"]="1"
- if "clear_autoresume" in string.split(conf_values["options"]):
+ if "clear-autoresume" in string.split(conf_values["options"]):
print "Cleaning autoresume flags support enabled."
conf_values["CLEAR_AUTORESUME"]="1"
-
if myconf.has_key("envscript"):
print "Envscript support enabled."
conf_values["ENVSCRIPT"]=myconf["envscript"]
sys.exit(2)
else:
conf_values["PURGE"]="1"
- if o in ("-a", "--clear_autoresume"):
+ if o in ("-a", "--clear-autoresume"):
if len(sys.argv) < 3:
print "!!! catalyst: please specify one of either -f or -C\n"
usage()
print "\nCatalyst build aborted due to user interrupt ( Ctrl-C )"
print
print "Catalyst aborting...."
- except KeyError:
- print "\nproblem with command line or spec file ( Key Error )"
+ #except KeyError:
+ # print "\nproblem with command line or spec file ( Key Error )"
+ # print "Key: "+str(sys.exc_value)+" was not found"
+ # print "Catalyst aborting...."
+ # sys.exit(2)
+ except UnboundLocalError:
+ print
+ print "UnboundLocalError: "+str(sys.exc_value)+" was not found"
print
print "Catalyst aborting...."
sys.exit(2)
- except UnboundLocalError:
- pass
+ except LockInUse:
+ print "Catalyst aborting...."
+ sys.exit(2)
except:
print "Catalyst aborting...."
raise
--- /dev/null
+#!/usr/bin/python
+import os
+import fcntl
+import errno
+import sys
+import string
+import time
+from catalyst_support import *
+
+def writemsg(mystr):
+ sys.stderr.write(mystr)
+ sys.stderr.flush()
+
+#def normpath(mypath):
+#newpath = os.path.normpath(mypath)
+#if len(newpath) > 1:
+# if newpath[1] == "/":
+# newpath = "/"+newpath.lstrip("/")
+#return newpath
+
+class LockDir:
+ locking_method=fcntl.flock
+ #locking_method="HARDLOCK"
+ die_on_failed_lock=True
+ lock_dirs_in_use=[]
+ def __del__(self):
+ self.clean_my_hardlocks()
+ self.delete_lock_from_path_list()
+ if self.islocked():
+ self.fcntl_unlock()
+ def __init__(self,lockdir):
+ self.locked=False
+ self.myfd=None
+ self.set_gid(250)
+ self.locking_method=LockDir.locking_method
+ self.set_lockdir(lockdir)
+ self.set_lockfilename(".catalyst_lock")
+ self.set_lockfile()
+
+ if LockDir.lock_dirs_in_use.count(lockdir)>0:
+ raise "This directory already associated with a lock object"
+ else:
+ LockDir.lock_dirs_in_use.append(lockdir)
+
+ self.hardlock_paths={}
+
+
+
+ def delete_lock_from_path_list(self):
+ i=0
+ try:
+ if LockDir.lock_dirs_in_use:
+ for x in LockDir.lock_dirs_in_use:
+ if LockDir.lock_dirs_in_use[i] == self.lockdir:
+ del LockDir.lock_dirs_in_use[i]
+ break
+ i=i+1
+ except AttributeError:
+ pass
+
+
+ def islocked(self):
+ if self.locked:
+ return True
+ else:
+ return False
+
+ def set_gid(self,gid):
+ if not self.islocked():
+ #print "setting gid to", gid
+ self.gid=gid
+
+ def set_lockdir(self,lockdir):
+ if not os.path.exists(lockdir):
+ os.mkdir(lockdir)
+ if os.path.isdir(lockdir):
+ if not self.islocked():
+ if lockdir[-1] == "/":
+ lockdir=lockdir[:-1]
+ self.lockdir=normpath(lockdir)
+ #print "setting lockdir to", self.lockdir
+ else:
+ raise "lockdir not a directory",lockdir
+
+ def set_lockfilename(self,lockfilename):
+ if not self.islocked():
+ self.lockfilename=lockfilename
+ #print "setting lockfilename to", self.lockfilename
+
+ def set_lockfile(self):
+ if not self.islocked():
+ self.lockfile=normpath(self.lockdir+self.lockfilename)
+ #print "setting lockfile to", self.lockfile
+
+ def read_lock(self):
+ if not self.locking_method == "HARDLOCK":
+ self.fcntl_lock("read")
+ else:
+ print "HARDLOCKING doesnt support shared-read locks"
+ print "using exclusive write locks"
+ self.hard_lock()
+
+ def write_lock(self):
+ if not self.locking_method == "HARDLOCK":
+ self.fcntl_lock("write")
+ else:
+ self.hard_lock()
+
+ def unlock(self):
+ if not self.locking_method == "HARDLOCK":
+ self.fcntl_unlock()
+ else:
+ self.hard_unlock()
+
+ def fcntl_lock(self,locktype):
+ if self.myfd==None:
+ if not os.path.exists(os.path.dirname(self.lockdir)):
+ raise DirectoryNotFound, os.path.dirname(self.lockdir)
+ if not os.path.exists(self.lockfile):
+ old_mask=os.umask(000)
+ self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
+ try:
+ if os.stat(self.lockfile).st_gid != self.gid:
+ os.chown(self.lockfile,os.getuid(),self.gid)
+ except SystemExit, e:
+ raise
+ except OSError, e:
+ if e[0] == 2: #XXX: No such file or directory
+ return self.fcntl_locking(locktype)
+
+ else:
+ writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n")
+
+ os.umask(old_mask)
+ else:
+ self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
+
+ try:
+ if locktype == "read":
+ self.locking_method(self.myfd,fcntl.LOCK_SH|fcntl.LOCK_NB)
+ else:
+ self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError, e:
+ if "errno" not in dir(e):
+ raise
+ if e.errno == errno.EAGAIN:
+ if not LockDir.die_on_failed_lock:
+ # resource temp unavailable; eg, someone beat us to the lock.
+ writemsg("waiting for lock on %s\n" % self.lockfile)
+
+ # try for the exclusive or shared lock again.
+ if locktype == "read":
+ self.locking_method(self.myfd,fcntl.LOCK_SH)
+ else:
+ self.locking_method(self.myfd,fcntl.LOCK_EX)
+ else:
+ raise LockInUse,self.lockfile
+ elif e.errno == errno.ENOLCK:
+ pass
+ else:
+ raise
+ if not os.path.exists(self.lockfile):
+ os.close(self.myfd)
+ self.myfd=None
+ #writemsg("lockfile recurse\n")
+ self.fcntl_lock(locktype)
+ else:
+ self.locked=True
+ #writemsg("Lockfile obtained\n")
+
+
+ def fcntl_unlock(self):
+ import fcntl
+ unlinkfile = 1
+ if not os.path.exists(self.lockfile):
+ print "lockfile does not exist '%s'. Skipping Unlock..." % self.lockfile
+ if (self.myfd != None):
+ try:
+ os.close(myfd)
+ self.myfd=None
+ except:
+ pass
+ return False
+ if self.myfd == None:
+ return False
+
+ try:
+ if self.myfd == None:
+ self.myfd = os.open(self.lockfile, os.O_WRONLY,0660)
+ unlinkfile = 1
+ self.locking_method(self.myfd,fcntl.LOCK_UN)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ os.close(self.myfd)
+ self.myfd=None
+ raise IOError, "Failed to unlock file '%s'\n" % self.lockfile
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ InUse=False
+ try:
+ self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except:
+ print "Read lock may be in effect. skipping lockfile delete..."
+ InUse=True
+
+ ### We won the lock, so there isn't competition for it.
+ ### We can safely delete the file.
+ ###writemsg("Got the lockfile...\n")
+ ###writemsg("Unlinking...\n")
+ self.locking_method(self.myfd,fcntl.LOCK_UN)
+ if not InUse:
+ os.unlink(self.lockfile)
+ os.close(self.myfd)
+ self.myfd=None
+ #print "Unlinked lockfile..."
+
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ # We really don't care... Someone else has the lock.
+ # So it is their problem now.
+ print "Failed to get lock... someone took it."
+ print str(e)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ #if type(lockfilename) == types.StringType:
+ # os.close(myfd)
+
+ if (self.myfd != None):
+ os.close(self.myfd)
+ self.myfd=None
+ self.locked=False
+ time.sleep(.0001)
+
+ def hard_lock(self,max_wait=14400):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE lockfile, that is just a placeholder on the disk.
+ Then we HARDLINK the real lockfile to that private file.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ We default to a 4 hour timeout.
+ """
+
+ self.myhardlock = self.hardlock_name(self.lockdir)
+
+ start_time = time.time()
+ reported_waiting = False
+
+ while(time.time() < (start_time + max_wait)):
+ # We only need it to exist.
+ self.myfd = os.open(self.myhardlock, os.O_CREAT|os.O_RDWR,0660)
+ os.close(self.myfd)
+
+ self.add_hardlock_file_to_cleanup()
+ if not os.path.exists(self.myhardlock):
+ raise FileNotFound, "Created lockfile is missing: %(filename)s" % {"filename":self.myhardlock}
+ try:
+ res = os.link(self.myhardlock, self.lockfile)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ #print "lockfile(): Hardlink: Link failed."
+ #print "Exception: ",e
+ pass
+
+ if self.hardlink_is_mine(self.myhardlock, self.lockfile):
+ # We have the lock.
+ if reported_waiting:
+ print
+ return True
+
+ if reported_waiting:
+ writemsg(".")
+ else:
+ reported_waiting = True
+ print
+ print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
+ print "Lockfile: " + self.lockfile
+ time.sleep(3)
+
+ os.unlink(self.myhardlock)
+ return False
+
+ def hard_unlock(self):
+ try:
+ if os.path.exists(self.myhardlock):
+ os.unlink(self.myhardlock)
+ if os.path.exists(self.lockfile):
+ os.unlink(self.lockfile)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("Something strange happened to our hardlink locks.\n")
+
+ def add_hardlock_file_to_cleanup(self):
+ #mypath = self.normpath(path)
+ if os.path.isdir(self.lockdir) and os.path.isfile(self.myhardlock):
+ self.hardlock_paths[self.lockdir]=self.myhardlock
+
+ def remove_hardlock_file_from_cleanup(self):
+ if self.hardlock_paths.has_key(self.lockdir):
+ del self.hardlock_paths[self.lockdir]
+ print self.hardlock_paths
+
+ def hardlock_name(self, path):
+ mypath=path+"/.hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[1] == "/":
+ newpath = "/"+newpath.lstrip("/")
+
+ return newpath
+
+
+ def hardlink_is_mine(self,link,lock):
+ import stat
+ try:
+ myhls = os.stat(link)
+ mylfs = os.stat(lock)
+ except SystemExit, e:
+ raise
+ except:
+ myhls = None
+ mylfs = None
+
+ if myhls:
+ if myhls[stat.ST_NLINK] == 2:
+ return True
+ if mylfs:
+ if mylfs[stat.ST_INO] == myhls[stat.ST_INO]:
+ return True
+
+ return False
+
+ def hardlink_active(lock):
+ if not os.path.exists(lock):
+ return False
+ # XXXXXXXXXXXXXXXXXXXXXXXXXX
+
+ def clean_my_hardlocks(self):
+ try:
+ for x in self.hardlock_paths.keys():
+ self.hardlock_cleanup(x)
+ except AttributeError:
+ pass
+
+ def hardlock_cleanup(self,path):
+ mypid = str(os.getpid())
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ filepath=path+"/"+x
+ if os.path.isfile(filepath):
+ parts = filepath.split(".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
+ if not mylist.has_key(filename):
+ mylist[filename] = {}
+
+ if not mylist[filename].has_key(host):
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+ mycount += 1
+ else:
+ mylist[filename][host].append(pid)
+ mycount += 1
+
+
+ results.append("Found %(count)s locks" % {"count":mycount})
+ for x in mylist.keys():
+ if mylist[x].has_key(myhost):
+ mylockname = self.hardlock_name(x)
+ if self.hardlink_is_mine(mylockname, self.lockfile) or \
+ not os.path.exists(self.lockfile):
+ for y in mylist[x].keys():
+ for z in mylist[x][y]:
+ filename = x+".hardlock-"+y+"-"+z
+ if filename == mylockname:
+ self.hard_unlock()
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append("Unlinked: " + filename)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ try:
+ os.unlink(x)
+ results.append("Unlinked: " + x)
+ os.unlink(mylockname)
+ results.append("Unlinked: " + mylockname)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append("Unlinked: " + mylockname)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ return results
+
+
+if __name__ == "__main__":
+
+ def lock_work():
+ print
+ for i in range(1,6):
+ print i,time.time()
+ time.sleep(1)
+ print
+
+ print "catalyst_lock.py starting"
+ import time
+ Lock1=LockDir("/tmp/lock_path")
+ print "Lock1 write lock"
+ Lock1.write_lock()
+ Lock1.unlock()
+ Lock1.unlock()
+
+ Lock1.write_lock()
+ print "Lock1 write lock"
+ lock_work()
+ Lock1.unlock()
+ Lock1.read_lock()
+ print "Lock1 read lock"
+ lock_work()
+ Lock1.unlock()
+
+ Lock1.read_lock()
+ print "Lock1 read lock"
+ print "Lock1 write lock"
+ Lock1.write_lock()
+ lock_work()
+ Lock1.unlock()
+ Lock1.write_lock()
+ lock_work()
+ Lock1.unlock()
+ #Lock1.write_lock()
+ #time.sleep(2)
+ #Lock1.unlock()
+ ##Lock1.write_lock()
+ #time.sleep(2)
+ #Lock1.unlock()
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/catalyst_support.py,v 1.52 2005/07/27 20:30:50 rocket Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/catalyst_support.py,v 1.53 2005/08/09 14:12:26 rocket Exp $
import sys,string,os,types,re,signal,traceback,md5,time
selinux_capable = False
try:
myf=open(file,"r")
except:
- raise CatalystError, "Could not open file "+file
+ return -1
+ #raise CatalystError, "Could not open file "+file
for line in myf.readlines():
line = string.replace(line, "\n", "") # drop newline
myline = myline + line
valid_config_file_values.append("PURGE")
valid_config_file_values.append("SNAPCACHE")
valid_config_file_values.append("snapshot_cache")
+valid_config_file_values.append("SEEDCACHE")
verbosity=1
print "!!! catalyst: "+message
print
+class LockInUse(Exception):
+ def __init__(self, message):
+ if message:
+ #(type,value)=sys.exc_info()[:2]
+ #if value!=None:
+ #print
+ #kprint traceback.print_exc(file=sys.stdout)
+ print
+ print "!!! catalyst lock file in use: "+message
+ print
+
def die(msg=None):
warn(msg)
sys.exit(1)
sys.stdout.flush()
time.sleep(1)
print
+
+def normpath(mypath):
+ TrailingSlash=False
+ if mypath[-1] == "/":
+ TrailingSlash=True
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
+ if TrailingSlash:
+ newpath=newpath+'/'
+ return newpath
+
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/embedded_target.py,v 1.13 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/embedded_target.py,v 1.14 2005/08/09 14:12:26 rocket Exp $
"""
This class works like a 'stage3'. A stage2 tarball is unpacked, but instead
"unbind","remove","empty","clean","capture","clear_autoresume"]
def set_stage_path(self):
- self.settings["stage_path"]=self.settings["chroot_path"]+"/tmp/mergeroot"
+ self.settings["stage_path"]=normpath(self.settings["chroot_path"]+"/tmp/mergeroot")
print "embedded stage path is "+self.settings["stage_path"]
def set_root_path(self):
- self.settings["root_path"]="/tmp/mergeroot"
+ self.settings["root_path"]=normpath("/tmp/mergeroot")
print "embedded root path is "+self.settings["root_path"]
def set_dest_path(self):
- self.settings["destpath"]=self.settings["chroot_path"]+self.settings["root_path"]
+ self.settings["destpath"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
def register(foo):
foo.update({"embedded":embedded_target})
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/generic_stage_target.py,v 1.52 2005/07/27 20:30:50 rocket Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/generic_stage_target.py,v 1.53 2005/08/09 14:12:26 rocket Exp $
"""
This class does all of the chroot setup, copying of files, etc. It is
from generic_target import *
from stat import *
import pdb
+import catalyst_lock
class generic_stage_target(generic_target):
def __init__(self,myspec,addlargs):
# set paths
self.set_snapshot_path()
self.set_snapcache_path()
+ self.set_root_path()
self.set_source_path()
self.set_chroot_path()
self.set_autoresume_path()
- self.set_root_path()
self.set_dest_path()
self.set_stage_path()
self.set_target_path()
"-"+self.settings["subarch"]+"-"+self.settings["version_stamp"]
def set_pkgcache_path(self):
- self.settings["pkgcache_path"]=self.settings["storedir"]+"/packages/"+\
- self.settings["target_subpath"]
+ self.settings["pkgcache_path"]=normpath(self.settings["storedir"]+"/packages/"+\
+ self.settings["target_subpath"]+"/")
def set_target_path(self):
- self.settings["target_path"]=self.settings["storedir"]+"/builds/"+self.settings["target_subpath"]+".tar.bz2"
+ self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+\
+ self.settings["target_subpath"]+".tar.bz2")
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_target_path"):
print "Resume point detected, skipping target path setup operation..."
if not os.path.exists(self.settings["storedir"]+"/builds/"):
os.makedirs(self.settings["storedir"]+"/builds/")
+
def set_archscript(self):
if self.settings.has_key(self.settings["spec_prefix"]+"/archscript"):
print "\nWarning!!! "
def set_cdtar(self):
if self.settings.has_key(self.settings["spec_prefix"]+"/cdtar"):
- self.settings["cdtar"]=self.settings[self.settings["spec_prefix"]+"/cdtar"]
+ self.settings["cdtar"]=normpath(self.settings[self.settings["spec_prefix"]+"/cdtar"])
del self.settings[self.settings["spec_prefix"]+"/cdtar"]
def set_iso(self):
if self.settings.has_key(self.settings["spec_prefix"]+"/iso"):
- self.settings["iso"]=self.settings[self.settings["spec_prefix"]+"/iso"]
+ self.settings["iso"]=normpath(self.settings[self.settings["spec_prefix"]+"/iso"])
del self.settings[self.settings["spec_prefix"]+"/iso"]
def set_fstype(self):
del self.settings[self.settings["spec_prefix"]+"/fsops"]
def set_source_path(self):
- self.settings["source_path"]=self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2"
- if os.path.isfile(self.settings["source_path"]):
- if os.path.exists(self.settings["source_path"]):
- self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
+ if self.settings.has_key("SEEDCACHE") and os.path.isdir(normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/")):
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/")
+ else:
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
+
+ if os.path.isfile(self.settings["source_path"]):
+ if os.path.exists(self.settings["source_path"]):
+ self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
+ if os.path.isdir(self.settings["source_path"]):
+ print "Source path set to "+self.settings["source_path"]
+ print "\tIf this is not desired, remove this directory or turn of seedcache in the options of catalyst.conf"
+ print "\tthe source path with then be "+normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2\n")
+ else:
+ print "Source path set to "+self.settings["source_path"]
+
def set_dest_path(self):
- self.settings["destpath"]=self.settings["chroot_path"]
+ self.settings["destpath"]=normpath(self.settings["chroot_path"])
def set_cleanables(self):
self.settings["cleanables"]=["/etc/resolv.conf","/var/tmp/*","/tmp/*","/root/*",\
"/usr/portage"]
def set_snapshot_path(self):
- self.settings["snapshot_path"]=self.settings["storedir"]+"/snapshots/portage-"+self.settings["snapshot"]+".tar.bz2"
+ self.settings["snapshot_path"]=normpath(self.settings["storedir"]+"/snapshots/portage-"+self.settings["snapshot"]+".tar.bz2")
if os.path.exists(self.settings["snapshot_path"]):
- self.settings["snapshot_path_md5sum"]=calc_md5(self.settings["snapshot_path"])
+ self.settings["snapshot_path_md5sum"]=calc_md5(self.settings["snapshot_path"])
def set_snapcache_path(self):
if self.settings.has_key("SNAPCACHE"):
- self.settings["snapshot_cache_path"]=self.settings["snapshot_cache"]+"/"+self.settings["snapshot"]+"/"
- print "Caching snapshot to " + self.settings["snapshot_cache_path"]
+ self.settings["snapshot_cache_path"]=normpath(self.settings["snapshot_cache"]+"/"+self.settings["snapshot"]+"/")
+ self.snapcache_lock=catalyst_lock.LockDir(self.settings["snapshot_cache_path"])
+ print "Caching snapshot to " + self.settings["snapshot_cache_path"]
def set_chroot_path(self):
- self.settings["chroot_path"]=self.settings["storedir"]+"/tmp/"+self.settings["target_subpath"]+"/"
+ # Note the trailing slash is very important and things would break without it
+ self.settings["chroot_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["target_subpath"]+"/")
+ self.chroot_lock=catalyst_lock.LockDir(self.settings["chroot_path"])
def set_autoresume_path(self):
- self.settings["autoresume_path"]=self.settings["storedir"]+"/tmp/"+self.settings["rel_type"]+"/"+\
- ".autoresume-"+self.settings["target"]+"-"+self.settings["subarch"]+"-"+self.settings["version_stamp"]+"/"
+ self.settings["autoresume_path"]=normpath(self.settings["storedir"]+"/tmp/"+\
+ self.settings["rel_type"]+"/"+".autoresume-"+self.settings["target"]+\
+ "-"+self.settings["subarch"]+"-"+self.settings["version_stamp"]+"/")
print "The autoresume path is " + self.settings["autoresume_path"]
if not os.path.exists(self.settings["autoresume_path"]):
- os.makedirs(self.settings["autoresume_path"],0755)
+ os.makedirs(self.settings["autoresume_path"],0755)
def set_controller_file(self):
- self.settings["controller_file"]=self.settings["sharedir"]+"/targets/"+self.settings["target"]+"/"+self.settings["target"]+"-controller.sh"
+ self.settings["controller_file"]=normpath(self.settings["sharedir"]+"/targets/"+self.settings["target"]+"/"+self.settings["target"]+"-controller.sh")
def set_iso_volume_id(self):
if self.settings.has_key(self.settings["spec_prefix"]+"/volid"):
- self.settings["iso_volume_id"] = string.join(self.settings[self.settings["spec_prefix"]+"/volid"])
- if len(self.settings["iso_volume_id"])>32:
- raise CatalystError,"ISO VOLUME ID: volid must not exceed 32 characters."
+ self.settings["iso_volume_id"] = string.join(self.settings[self.settings["spec_prefix"]+"/volid"])
+ if len(self.settings["iso_volume_id"])>32:
+ raise CatalystError,"ISO VOLUME ID: volid must not exceed 32 characters."
else:
- self.settings["iso_volume_id"] = "catalyst " + self.settings["snapshot"]
+ self.settings["iso_volume_id"] = "catalyst " + self.settings["snapshot"]
def set_action_sequence(self):
#Default action sequence for run method
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","run_local","preclean","unbind","clean","capture","clear_autoresume"]
del self.settings[self.settings["spec_prefix"]+"/use"]
def set_stage_path(self):
- self.settings["stage_path"]=self.settings["chroot_path"]
+ self.settings["stage_path"]=normpath(self.settings["chroot_path"])
def set_mounts(self):
pass
except CatalystError:
raise CatalystError, "Unable to auto-unbind "+x
- def dir_setup(self):
- if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["autoresume_path"]+"dir_setup"):
- print "Resume point detected, skipping dir_setup operation..."
- else:
- print "Setting up directories..."
- self.mount_safety_check()
-
- self.clear_chroot()
+ def unpack(self):
+ unpack=True
+ invalid_snapshot=False
- if not os.path.exists(self.settings["chroot_path"]+"/tmp"):
- os.makedirs(self.settings["chroot_path"]+"/tmp",1777)
-
- if not os.path.exists(self.settings["chroot_path"]):
- os.makedirs(self.settings["chroot_path"],0755)
+ clst_unpack_md5sum=read_from_clst(self.settings["autoresume_path"]+"unpack")
- if self.settings.has_key("PKGCACHE"):
- if not os.path.exists(self.settings["pkgcache_path"]):
- os.makedirs(self.settings["pkgcache_path"],0755)
-
- touch(self.settings["autoresume_path"]+"dir_setup")
+ if self.settings.has_key("SNAPCACHE"):
+ unpack_cmd="rsync -a --delete "+self.settings["source_path"]+" "+self.settings["chroot_path"]
+ display_msg="\nStarting rsync from "+self.settings["source_path"]+"\nto "+\
+ self.settings["chroot_path"]+" (This may take some time) ...\n"
+ error_msg="Rsync of "+self.settings["source_path"]+" to "+self.settings["chroot_path"]+" failed."
+ else:
+ unpack_cmd="tar xjpf "+self.settings["source_path"]+" -C "+self.settings["chroot_path"]
+ error_msg="Tarball extraction of "+self.settings["source_path"]+" to "+self.settings["chroot_path"]+" failed."
- def unpack(self):
- if os.path.exists(self.settings["autoresume_path"]+"unpack"):
- clst_unpack_md5sum=read_from_clst(self.settings["autoresume_path"]+"unpack")
+
+ if self.settings.has_key("AUTORESUME") and self.settings.has_key("SNAPCACHE") \
+ and os.path.exists(self.settings["autoresume_path"]+"unpack"):
+ print "Resume point detected, skipping unpack operation..."
+ unpack=False
+ elif self.settings.has_key("AUTORESUME") and self.settings.has_key("source_path_md5sum"):
+ if self.settings["source_path_md5sum"] != clst_unpack_md5sum:
+ invalid_snapshot=True
- if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["autoresume_path"]+"unpack") \
- and self.settings["source_path_md5sum"] != clst_unpack_md5sum:
- print "InValid Resume point detected, cleaning up ..."
- os.remove(self.settings["autoresume_path"]+"dir_setup")
- os.remove(self.settings["autoresume_path"]+"unpack")
- self.dir_setup()
- if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["autoresume_path"]+"unpack") \
- and self.settings["source_path_md5sum"] == clst_unpack_md5sum:
- print "Valid Resume point detected, skipping unpack ..."
- else:
- print "Unpacking stage..."
- if not os.path.exists(self.settings["chroot_path"]):
- os.makedirs(self.settings["chroot_path"])
+ if unpack:
+ self.mount_safety_check()
+
+ if invalid_snapshot:
+ print "InValid Resume point detected, cleaning up ..."
+ #os.remove(self.settings["autoresume_path"]+"dir_setup")
+ os.remove(self.settings["autoresume_path"]+"unpack")
+ self.clear_chroot()
+ #self.dir_setup()
+
+ if not os.path.exists(self.settings["chroot_path"]):
+ os.makedirs(self.settings["chroot_path"])
- cmd("tar xjpf "+self.settings["source_path"]+" -C "+self.settings["chroot_path"],\
- "Error unpacking ")
+ if not os.path.exists(self.settings["chroot_path"]+"/tmp"):
+ os.makedirs(self.settings["chroot_path"]+"/tmp",1777)
+
+ if self.settings.has_key("PKGCACHE"):
+ if not os.path.exists(self.settings["pkgcache_path"]):
+ os.makedirs(self.settings["pkgcache_path"],0755)
+
+ print display_msg
+ print unpack_cmd
+ cmd(unpack_cmd,error_msg)
- if self.settings.has_key("source_path_md5sum"):
- myf=open(self.settings["autoresume_path"]+"unpack","w")
- myf.write(self.settings["source_path_md5sum"])
- myf.close()
+ if self.settings.has_key("source_path_md5sum"):
+ myf=open(self.settings["autoresume_path"]+"unpack","w")
+ myf.write(self.settings["source_path_md5sum"])
+ myf.close()
+ else:
+ touch(self.settings["autoresume_path"]+"unpack")
- def unpack_snapshot(self):
-
- if self.settings.has_key("SNAPCACHE"):
- if os.path.exists(self.settings["snapshot_cache_path"]+"catalyst-md5sum"):
- snapshot_cache_md5sum=read_from_clst(self.settings["snapshot_cache_path"]+"catalyst-md5sum")
- if self.settings["snapshot_path_md5sum"] == snapshot_cache_md5sum:
- print "Valid snapshot cache, skipping unpack of portage tree..."
- else:
- print "Cleaning up invalid cache at "+self.settings["snapshot_cache_path"]
- cmd("rm -rf "+self.settings["snapshot_cache_path"],\
- "Error removing existing snapshot directory.")
- if not os.path.exists(self.settings["snapshot_cache_path"]):
- os.makedirs(self.settings["snapshot_cache_path"],0755)
- print "Unpacking portage tree to snapshot cache ..."
- cmd("tar xjpf "+self.settings["snapshot_path"]+" -C "+\
- self.settings["snapshot_cache_path"],"Error unpacking snapshot")
- myf=open(self.settings["snapshot_cache_path"]+"catalyst-md5sum","w")
- myf.write(self.settings["snapshot_path_md5sum"])
- myf.close()
- else:
- if os.path.exists(self.settings["snapshot_cache_path"]):
- print "Cleaning up existing snapshot cache ..."
- cmd("rm -rf "+self.settings["snapshot_cache_path"],\
- "Error removing existing snapshot directory.")
-
- if not os.path.exists(self.settings["snapshot_cache_path"]):
- os.makedirs(self.settings["snapshot_cache_path"],0755)
- print "Unpacking portage tree to snapshot cache ..."
- cmd("tar xjpf "+self.settings["snapshot_path"]+" -C "+\
- self.settings["snapshot_cache_path"],"Error unpacking snapshot")
- myf=open(self.settings["snapshot_cache_path"]+"catalyst-md5sum","w")
- myf.write(self.settings["snapshot_path_md5sum"])
- myf.close()
- else:
- if os.path.exists(self.settings["autoresume_path"]+"unpack_portage"):
- clst_unpack_portage_md5sum=read_from_clst(self.settings["autoresume_path"]+"unpack_portage")
+ def unpack_snapshot(self):
+ unpack=True
+ snapshot_cache_md5sum=read_from_clst(self.settings["snapshot_cache_path"]+"catalyst-md5sum")
+ snapshot_md5sum=read_from_clst(self.settings["autoresume_path"]+"unpack_portage")
+
+ if self.settings.has_key("SNAPCACHE"):
+ destdir=self.settings["snapshot_cache_path"]
+ unpack_cmd="tar xjpf "+self.settings["snapshot_path"]+" -C "+destdir
+ unpack_errmsg="Error unpacking snapshot"
+ cleanup_msg="Cleaning up invalid snapshot cache at \n\t"+self.settings["snapshot_cache_path"]+" (This can take a long time)..."
+ cleanup_errmsg="Error removing existing snapshot cache directory."
+ self.snapshot_lock_object=self.snapcache_lock
+ if self.settings["snapshot_path_md5sum"] == snapshot_cache_md5sum:
+ print "Valid snapshot cache, skipping unpack of portage tree ..."
+ unpack=False
+
+ else:
+ destdir=normpath(self.settings["chroot_path"]+"/usr/portage")
+ cleanup_errmsg="Error removing existing snapshot directory."
+ cleanup_msg="Cleaning up existing portage tree (This can take a long time) ..."
+ unpack_cmd="tar xjpf "+self.settings["snapshot_path"]+" -C "+self.settings["chroot_path"]+"/usr"
+ unpack_errmsg="Error unpacking snapshot"
+ self.snapshot_lock_object=self.snapshot_lock
+
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["chroot_path"]+"/usr/portage/") \
and os.path.exists(self.settings["autoresume_path"]+"unpack_portage") \
and self.settings["snapshot_path_md5sum"] == clst_unpack_portage_md5sum:
print "Valid Resume point detected, skipping unpack of portage tree..."
- else:
- if os.path.exists(self.settings["chroot_path"]+"/usr/portage"):
- print "Cleaning up existing portage tree ..."
- cmd("rm -rf "+self.settings["chroot_path"]+"/usr/portage",\
- "Error removing existing snapshot directory.")
+ unpack=False
+
+
+
+ if unpack:
+ self.snapshot_lock_object.write_lock()
+ if os.path.exists(destdir):
+ print cleanup_msg
+ cleanup_cmd="rm -rf "+destdir
+ cmd(cleanup_cmd,cleanup_errmsg)
+ if not os.path.exists(destdir):
+ os.makedirs(destdir,0755)
+
+ print "Unpacking portage tree (This can take a long time) ..."
+ cmd(unpack_cmd,unpack_errmsg)
+
+ if self.settings.has_key("SNAPCACHE"):
+ myf=open(self.settings["snapshot_cache_path"]+"catalyst-md5sum","w")
+ myf.write(self.settings["snapshot_path_md5sum"])
+ myf.close()
- print "Unpacking portage tree ..."
- cmd("tar xjpf "+self.settings["snapshot_path"]+" -C "+\
- self.settings["chroot_path"]+"/usr","Error unpacking snapshot")
-
+ else:
print "Setting snapshot autoresume point"
myf=open(self.settings["autoresume_path"]+"unpack_portage","w")
myf.write(self.settings["snapshot_path_md5sum"])
myf.close()
+
+ self.snapshot_lock_object.unlock()
def config_profile_link(self):
if self.settings.has_key("AUTORESUME") \
# Always copy over the overlay incase it has changed
if self.settings.has_key(self.settings["spec_prefix"]+"/root_overlay"):
print "Copying root overlay ..."
- cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/root_overlay"]+"/* "+\
+ cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/root_overlay"]+"/ "+\
self.settings["chroot_path"], self.settings["spec_prefix"]+"/root_overlay copy failed.")
def bind(self):
os.makedirs(self.mountmap[x],0755)
src=self.mountmap[x]
+ if self.settings.has_key("SNAPCACHE") and x == "/usr/portage":
+ self.snapshot_lock_object.read_lock()
retval=os.system("mount --bind "+src+" "+self.settings["chroot_path"]+x)
if retval!=0:
self.unbind()
warn("Couldn't umount bind mount: "+mypath+x)
# keep trying to umount the others, to minimize damage if developer makes a mistake
+ if self.settings.has_key("SNAPCACHE") and x == "/usr/portage":
+ self.snapshot_lock_object.unlock()
if ouch:
"""
if any bind mounts really failed, then we need to raise
try:
if os.path.exists(self.settings["controller_file"]):
cmd("/bin/bash "+self.settings["controller_file"]+" clean",\
- "Clean runscript failed.")
+ "Clean failed.")
touch(self.settings["autoresume_path"]+"remove")
except:
self.unbind()
os.environ[varname]=string.join(self.settings[x])
def run(self):
+ self.chroot_lock.write_lock()
+
for x in self.settings["action_sequence"]:
print "Running action sequence: "+x
sys.stdout.flush()
except:
self.unbind()
raise
- #if x == 'chroot_setup':
- # try:
- # self.chroot_setup()
- # except:
- # self.unbind()
- # raise
- #else:
- # apply(getattr(self,x))
+
+ self.chroot_lock.unlock()
def unmerge(self):
if self.settings.has_key("AUTORESUME") \
else:
if self.settings.has_key(self.settings["spec_prefix"]+"/overlay") \
and os.path.exists(self.settings["spec_prefix"]+"/overlay"):
- cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/overlay"]+"/* "+\
+ cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/overlay"]+"/ "+\
self.settings["target_path"], self.settings["spec_prefix"]+"overlay copy failed.")
touch(self.settings["autoresume_path"]+"setup_overlay")
print "clearing package cache ..."
self.clear_packages()
+#vim: ts=4 sw=4 sta et sts=4 ai
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/grp_target.py,v 1.13 2005/07/19 21:44:14 rocket Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/grp_target.py,v 1.14 2005/08/09 14:12:26 rocket Exp $
"""
The builder class for GRP (Gentoo Reference Platform) builds.
raise CatalystError,"GRP build aborting due to error."
def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","run_local","unmerge","unbind",\
"remove","empty","clear_autoresume"]
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage1_target.py,v 1.14 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage1_target.py,v 1.15 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for LiveCD stage1.
generic_stage_target.__init__(self,spec,addlargs)
def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","portage_overlay",\
"bind","chroot_setup","setup_environment","build_packages",\
"unbind", "clean","clear_autoresume"]
def set_target_path(self):
- self.settings["target_path"]=self.settings["storedir"]+"/builds/"+self.settings["target_subpath"]
+ self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"])
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_target_path"):
print "Resume point detected, skipping target path setup operation..."
def set_pkgcache_path(self):
if self.settings.has_key("pkgcache_path"):
if type(self.settings["pkgcache_path"]) != types.StringType:
- self.settings["pkgcache_path"]=string.join(self.settings["pkgcache_path"])
+ self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
else:
generic_stage_target.set_pkgcache_path(self)
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage2_target.py,v 1.45 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage2_target.py,v 1.46 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for a LiveCD stage2 build.
file_locate(self.settings, ["cdtar","controller_file"])
def set_source_path(self):
- self.settings["source_path"]=self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2"
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
if os.path.isfile(self.settings["source_path"]):
- if os.path.exists(self.settings["source_path"]):
- self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
+ self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
else:
- self.settings["source_path"]=self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"])
+ if not os.path.exists(self.settings["source_path"]):
+ raise CatalystError,"Source Path: "+self.settings["source_path"]+" does not exist."
def set_spec_prefix(self):
self.settings["spec_prefix"]="livecd"
def set_target_path(self):
- self.settings["target_path"]=self.settings["storedir"]+"/builds/"+self.settings["target_subpath"]
+ self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"])
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_target_path"):
print "Resume point detected, skipping target path setup operation..."
if not os.path.exists(self.settings["target_path"]):
os.makedirs(self.settings["target_path"])
- def unpack(self):
- if not os.path.isdir(self.settings["source_path"]):
- generic_stage_target.unpack(self)
- else:
- if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["autoresume_path"]+"unpack"):
- print "Resume point detected, skipping unpack operation..."
- else:
- if not os.path.exists(self.settings["chroot_path"]):
- os.makedirs(self.settings["chroot_path"])
-
- print "Copying livecd-stage1 result to new livecd-stage2 work directory..."
- cmd("rsync -a --delete "+self.settings["source_path"]+"/* "+self.settings["chroot_path"],\
- "Error copying initial livecd-stage2")
- touch(self.settings["autoresume_path"]+"unpack")
+# def unpack(self):
+# if not os.path.isdir(self.settings["source_path"]):
+# generic_stage_target.unpack(self)
+# else:
+# if self.settings.has_key("AUTORESUME") \
+# and os.path.exists(self.settings["autoresume_path"]+"unpack"):
+# print "Resume point detected, skipping unpack operation..."
+# else:
+# if not os.path.exists(self.settings["chroot_path"]):
+# os.makedirs(self.settings["chroot_path"])
+#
+# print "Copying livecd-stage1 result to new livecd-stage2 work directory..."
+# cmd("rsync -a --delete "+self.settings["source_path"]+"/* "+self.settings["chroot_path"],\
+# "Error copying initial livecd-stage2")
+# touch(self.settings["autoresume_path"]+"unpack")
def run_local(self):
# what modules do we want to blacklist?
myf.close()
def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","portage_overlay",\
"bind","chroot_setup","setup_environment","run_local",\
"build_kernel","bootloader","preclean","livecd_update",
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/netboot_target.py,v 1.4 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/netboot_target.py,v 1.5 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for a netboot build.
def set_dest_path(self):
#destpath=self.settings["chroot_path"]+self.settings["root_path"]
- destpath=self.settings["chroot_path"]+"/tmp/image"
+ destpath=normpath(self.settings["chroot_path"]+"/tmp/image")
# def build_packages(self):
# # build packages
def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",
"config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","build_packages","build_busybox",\
"build_kernel","copy_files_to_image",\
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/snapshot_target.py,v 1.11 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/snapshot_target.py,v 1.12 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for snapshots.
self.settings=myspec
self.settings["target_subpath"]="portage"
st=self.settings["storedir"]
- self.settings["snapshot_path"]=st+"/snapshots/portage-"+self.settings["version_stamp"]\
- +".tar.bz2"
- self.settings["tmp_path"]=st+"/tmp/"+self.settings["target_subpath"]
+ self.settings["snapshot_path"]=normpath(st+"/snapshots/portage-"+self.settings["version_stamp"]\
+ +".tar.bz2")
+ self.settings["tmp_path"]=normpath(st+"/tmp/"+self.settings["target_subpath"])
def setup(self):
- x=self.settings["storedir"]+"/snapshots"
+ x=normpath(self.settings["storedir"]+"/snapshots")
if not os.path.exists(x):
os.makedirs(x)
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage1_target.py,v 1.9 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage1_target.py,v 1.10 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for a stage1 installation tarball build.
generic_stage_target.__init__(self,spec,addlargs)
def set_stage_path(self):
- self.settings["stage_path"]=self.settings["chroot_path"]+self.settings["root_path"]
+ self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
print "stage1 stage path is "+self.settings["stage_path"]
def set_root_path(self):
# ROOT= variable for emerges
- self.settings["root_path"]="/tmp/stage1root"
+ self.settings["root_path"]=normpath("/tmp/stage1root")
print "stage1 root path is "+self.settings["root_path"]
def set_dest_path(self):
- self.settings["destpath"]=self.settings["chroot_path"]+self.settings["root_path"]
+ self.settings["destpath"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
def set_cleanables(self):
generic_stage_target.set_cleanables(self)
self.settings["cleanables"].extend(["/usr/share/gettext","/usr/lib/python2.2/test", "/usr/lib/python2.2/encodings","/usr/lib/python2.2/email", "/usr/lib/python2.2/lib-tk","/usr/share/zoneinfo"])
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage2_target.py,v 1.4 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage2_target.py,v 1.5 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for a stage2 installation tarball build.
self.required_values=[]
self.valid_values=[]
generic_stage_target.__init__(self,spec,addlargs)
+ def set_root_path(self):
+ # ROOT= variable for emerges
+ self.settings["root_path"]=normpath("/tmp/stage1root")
+
+ def set_source_path(self):
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/"+self.settings["root_path"]+"/")
+
+ # reset the root path so the preclean doesnt fail
+ generic_stage_target.set_root_path(self)
+
+ if os.path.isdir(self.settings["source_path"]):
+ print "\nUsing seed-stage from "+self.settings["source_path"]
+ print "Delete this folder if you wish to use a seed stage tarball instead\n"
+ else:
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
+ if os.path.isfile(self.settings["source_path"]):
+ if os.path.exists(self.settings["source_path"]):
+ self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
def register(foo):
foo.update({"stage2":stage2_target})
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage4_target.py,v 1.8 2005/07/05 21:53:41 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage4_target.py,v 1.9 2005/08/09 14:12:26 rocket Exp $
"""
Builder class for LiveCD stage1.
def set_pkgcache_path(self):
if self.settings.has_key("pkgcache_path"):
if type(self.settings["pkgcache_path"]) != types.StringType:
- self.settings["pkgcache_path"]=string.join(self.settings["pkgcache_path"])
+ self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
else:
generic_stage_target.set_pkgcache_path(self)
self.settings["cleanables"]=["/var/tmp/*","/tmp/*"]
def set_action_sequence(self):
- self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
"config_profile_link","setup_confdir","portage_overlay",\
"bind","chroot_setup","setup_environment","build_packages",\
"build_kernel","bootloader","root_overlay","fsscript",
# Distributed under the terms of the GNU General Public License v2
copy_to_chroot(){
local file_name=$(basename ${1})
+ local dest_dir=${clst_chroot_path}${2}
if [ "${2}" != "" ]
then
- echo "copying ${file_name} to ${clst_chroot_path}/${2}"
- cp -a ${1} ${clst_chroot_path}/${2}
- chmod 755 ${clst_chroot_path}/${2}/${file_name}
+ echo "copying ${file_name} to ${dest_dir}"
+ mkdir -p ${dest_dir}
+ cp -a ${1} ${dest_dir}
+ chmod 755 ${dest_dir}/${file_name}
else
echo "copying ${file_name} to ${clst_chroot_path}/tmp"
+ mkdir -p ${chroot_path}/tmp
cp -a ${1} ${clst_chroot_path}/tmp
chmod 755 ${clst_chroot_path}/tmp/${file_name}
fi
}
delete_from_chroot(){
- echo "removing ${clst_chroot_path}/${1} from the chroot"
- rm -f ${clst_chroot_path}/${1}
+ echo "removing ${clst_chroot_path}${1} from the chroot"
+ rm -f ${clst_chroot_path}${1}
}
exec_in_chroot(){
# and executes it.
local file_name=$(basename ${1})
- copy_to_chroot ${1} ${2}/tmp
-
- if [ "${2}" != "" ]
+ local subdir=${2#/}
+
+ if [ "${subdir}" != "" ]
then
- chroot_path=${clst_chroot_path}/${2}
+ copy_to_chroot ${1} ${subdir}/tmp/
+ chroot_path=${clst_chroot_path}${subdir}
+ copy_to_chroot ${clst_sharedir}/targets/support/chroot-functions.sh ${subdir}/tmp/
+ echo "Running ${file_name} in chroot ${chroot_path}"
+ ${clst_CHROOT} ${chroot_path} ${subdir}/tmp/${file_name} || exit 1
else
+ copy_to_chroot ${1} tmp/
chroot_path=${clst_chroot_path}
+ copy_to_chroot ${clst_sharedir}/targets/support/chroot-functions.sh tmp/
+ echo "Running ${file_name} in chroot ${chroot_path}"
+ ${clst_CHROOT} ${chroot_path}/ tmp/${file_name} || exit 1
fi
- mkdir -p ${chroot_path}/tmp
- cp -a ${1} ${chroot_path}/tmp/${file_name}
- chmod 755 ${chroot_path}/tmp/${file_name}
- copy_to_chroot ${clst_sharedir}/targets/support/chroot-functions.sh /${2}/tmp
- echo "Running ${file_name} in chroot ${chroot_path}"
- ${clst_CHROOT} ${chroot_path}/ /tmp/${file_name} || exit 1
-
rm -f ${chroot_path}/tmp/${file_name}
- delete_from_chroot /${2}/tmp/chroot-functions.sh
+ if [ "${subdir}" != "" ]
+ then
+ delete_from_chroot ${subdir}/tmp/${file_name}
+ delete_from_chroot ${subdir}/tmp/chroot-functions.sh
+ else
+ delete_from_chroot tmp/chroot-functions.sh
+ delete_from_chroot tmp/${file_name}
+ fi
}