# Copyright 2002-2005 Gentoo Foundation; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/ChangeLog,v 1.417 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/ChangeLog,v 1.418 2005/12/02 19:37:01 wolf31o2 Exp $
+
+ 02 Dec 2005; Chris Gianelloni <wolf31o2@gentoo.org> catalyst,
+ modules/catalyst_lock.py, modules/catalyst_support.py,
+ modules/embedded_target.py, modules/generic_stage_target.py,
+ modules/grp_target.py, modules/livecd_stage1_target.py,
+ modules/livecd_stage2_target.py, modules/netboot_target.py,
+ modules/stage1_target.py, modules/stage2_target.py,
+ modules/stage4_target.py, modules/tinderbox_target.py:
+ Reverting my nasty spaces->tabs mess-up and pushing out 2.0_rc4 quickly.
02 Dec 2005; Chris Gianelloni <wolf31o2@gentoo.org> catalyst,
files/catalyst.conf, modules/catalyst_lock.py,
#!/usr/bin/python
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/catalyst,v 1.105 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/catalyst,v 1.106 2005/12/02 19:37:02 wolf31o2 Exp $
# Maintained in full by:
# Eric Edgar <rocket@gentoo.org>
import os,sys,imp,string,getopt
import pdb
__maintainer__="Chris Gianelloni <wolf31o2@gentoo.org>"
-__version__="2.0_rc3"
+__version__="2.0_rc4"
conf_values={}
#!/usr/bin/python
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/catalyst_lock.py,v 1.4 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/catalyst_lock.py,v 1.5 2005/12/02 19:37:02 wolf31o2 Exp $
import os
import fcntl
import errno
from catalyst_support import *
def writemsg(mystr):
- sys.stderr.write(mystr)
- sys.stderr.flush()
+ sys.stderr.write(mystr)
+ sys.stderr.flush()
#def normpath(mypath):
#newpath = os.path.normpath(mypath)
#return newpath
class LockDir:
- locking_method=fcntl.flock
- #locking_method="HARDLOCK"
- die_on_failed_lock=True
- lock_dirs_in_use=[]
- def __del__(self):
+ locking_method=fcntl.flock
+ #locking_method="HARDLOCK"
+ die_on_failed_lock=True
+ lock_dirs_in_use=[]
+ def __del__(self):
self.clean_my_hardlocks()
self.delete_lock_from_path_list()
if self.islocked():
self.set_lockfile()
if LockDir.lock_dirs_in_use.count(lockdir)>0:
- raise "This directory already associated with a lock object"
+ raise "This directory already associated with a lock object"
else:
- LockDir.lock_dirs_in_use.append(lockdir)
+ LockDir.lock_dirs_in_use.append(lockdir)
self.hardlock_paths={}
- def delete_lock_from_path_list(self):
+ def delete_lock_from_path_list(self):
i=0
try:
- if LockDir.lock_dirs_in_use:
+ if LockDir.lock_dirs_in_use:
for x in LockDir.lock_dirs_in_use:
- if LockDir.lock_dirs_in_use[i] == self.lockdir:
+ if LockDir.lock_dirs_in_use[i] == self.lockdir:
del LockDir.lock_dirs_in_use[i]
break
- i=i+1
+ i=i+1
except AttributeError:
- pass
+ pass
-
- def islocked(self):
+
+ def islocked(self):
if self.locked:
- return True
+ return True
else:
- return False
+ return False
- def set_gid(self,gid):
+ def set_gid(self,gid):
if not self.islocked():
- #print "setting gid to", gid
- self.gid=gid
+ #print "setting gid to", gid
+ self.gid=gid
- def set_lockdir(self,lockdir):
+ def set_lockdir(self,lockdir):
if not os.path.exists(lockdir):
- os.makedirs(lockdir)
+ os.makedirs(lockdir)
if os.path.isdir(lockdir):
- if not self.islocked():
- if lockdir[-1] == "/":
+ if not self.islocked():
+ if lockdir[-1] == "/":
lockdir=lockdir[:-1]
self.lockdir=normpath(lockdir)
#print "setting lockdir to", self.lockdir
else:
- raise "lockdir not a directory",lockdir
+ raise "lockdir not a directory",lockdir
- def set_lockfilename(self,lockfilename):
+ def set_lockfilename(self,lockfilename):
if not self.islocked():
- self.lockfilename=lockfilename
- #print "setting lockfilename to", self.lockfilename
+ self.lockfilename=lockfilename
+ #print "setting lockfilename to", self.lockfilename
- def set_lockfile(self):
+ def set_lockfile(self):
if not self.islocked():
- self.lockfile=normpath(self.lockdir+self.lockfilename)
- #print "setting lockfile to", self.lockfile
-
- def read_lock(self):
+ self.lockfile=normpath(self.lockdir+self.lockfilename)
+ #print "setting lockfile to", self.lockfile
+
+ def read_lock(self):
if not self.locking_method == "HARDLOCK":
- self.fcntl_lock("read")
+ self.fcntl_lock("read")
else:
- print "HARDLOCKING doesnt support shared-read locks"
- print "using exclusive write locks"
- self.hard_lock()
-
- def write_lock(self):
+ print "HARDLOCKING doesnt support shared-read locks"
+ print "using exclusive write locks"
+ self.hard_lock()
+
+ def write_lock(self):
if not self.locking_method == "HARDLOCK":
- self.fcntl_lock("write")
+ self.fcntl_lock("write")
else:
- self.hard_lock()
+ self.hard_lock()
- def unlock(self):
+ def unlock(self):
if not self.locking_method == "HARDLOCK":
- self.fcntl_unlock()
+ self.fcntl_unlock()
else:
- self.hard_unlock()
+ self.hard_unlock()
- def fcntl_lock(self,locktype):
+ def fcntl_lock(self,locktype):
if self.myfd==None:
if not os.path.exists(os.path.dirname(self.lockdir)):
raise DirectoryNotFound, os.path.dirname(self.lockdir)
if not os.path.exists(self.lockfile):
- old_mask=os.umask(000)
- self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
- try:
- if os.stat(self.lockfile).st_gid != self.gid:
- os.chown(self.lockfile,os.getuid(),self.gid)
- except SystemExit, e:
- raise
- except OSError, e:
- if e[0] == 2: #XXX: No such file or directory
- return self.fcntl_locking(locktype)
+ old_mask=os.umask(000)
+ self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
+ try:
+ if os.stat(self.lockfile).st_gid != self.gid:
+ os.chown(self.lockfile,os.getuid(),self.gid)
+ except SystemExit, e:
+ raise
+ except OSError, e:
+ if e[0] == 2: #XXX: No such file or directory
+ return self.fcntl_locking(locktype)
- else:
- writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n")
+ else:
+ writemsg("Cannot chown a lockfile. This could cause inconvenience later.\n")
- os.umask(old_mask)
- else:
- self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
+ os.umask(old_mask)
+ else:
+ self.myfd = os.open(self.lockfile, os.O_CREAT|os.O_RDWR,0660)
try:
if locktype == "read":
else:
self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
except IOError, e:
- if "errno" not in dir(e):
- raise
- if e.errno == errno.EAGAIN:
+ if "errno" not in dir(e):
+ raise
+ if e.errno == errno.EAGAIN:
if not LockDir.die_on_failed_lock:
- # resource temp unavailable; eg, someone beat us to the lock.
- writemsg("waiting for lock on %s\n" % self.lockfile)
-
- # try for the exclusive or shared lock again.
- if locktype == "read":
- self.locking_method(self.myfd,fcntl.LOCK_SH)
- else:
- self.locking_method(self.myfd,fcntl.LOCK_EX)
+ # resource temp unavailable; eg, someone beat us to the lock.
+ writemsg("waiting for lock on %s\n" % self.lockfile)
+
+ # try for the exclusive or shared lock again.
+ if locktype == "read":
+ self.locking_method(self.myfd,fcntl.LOCK_SH)
+ else:
+ self.locking_method(self.myfd,fcntl.LOCK_EX)
else:
- raise LockInUse,self.lockfile
- elif e.errno == errno.ENOLCK:
- pass
- else:
- raise
+ raise LockInUse,self.lockfile
+ elif e.errno == errno.ENOLCK:
+ pass
+ else:
+ raise
if not os.path.exists(self.lockfile):
- os.close(self.myfd)
- self.myfd=None
- #writemsg("lockfile recurse\n")
- self.fcntl_lock(locktype)
+ os.close(self.myfd)
+ self.myfd=None
+ #writemsg("lockfile recurse\n")
+ self.fcntl_lock(locktype)
else:
- self.locked=True
- #writemsg("Lockfile obtained\n")
-
-
- def fcntl_unlock(self):
+ self.locked=True
+ #writemsg("Lockfile obtained\n")
+
+
+ def fcntl_unlock(self):
import fcntl
- unlinkfile = 1
- if not os.path.exists(self.lockfile):
- print "lockfile does not exist '%s'. Skipping Unlock..." % self.lockfile
- if (self.myfd != None):
- try:
- os.close(myfd)
+ unlinkfile = 1
+ if not os.path.exists(self.lockfile):
+ print "lockfile does not exist '%s'. Skipping Unlock..." % self.lockfile
+ if (self.myfd != None):
+ try:
+ os.close(myfd)
self.myfd=None
- except:
- pass
- return False
- if self.myfd == None:
- return False
-
- try:
- if self.myfd == None:
- self.myfd = os.open(self.lockfile, os.O_WRONLY,0660)
- unlinkfile = 1
- self.locking_method(self.myfd,fcntl.LOCK_UN)
- except SystemExit, e:
- raise
- except Exception, e:
+ except:
+ pass
+ return False
+ if self.myfd == None:
+ return False
+
+ try:
+ if self.myfd == None:
+ self.myfd = os.open(self.lockfile, os.O_WRONLY,0660)
+ unlinkfile = 1
+ self.locking_method(self.myfd,fcntl.LOCK_UN)
+ except SystemExit, e:
+ raise
+ except Exception, e:
os.close(self.myfd)
self.myfd=None
raise IOError, "Failed to unlock file '%s'\n" % self.lockfile
- try:
- # This sleep call was added to allow other processes that are
- # waiting for a lock to be able to grab it before it is deleted.
- # lockfile() already accounts for this situation, however, and
- # the sleep here adds more time than is saved overall, so am
- # commenting until it is proved necessary.
- #time.sleep(0.0001)
- if unlinkfile:
- InUse=False
- try:
- self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
- except:
- print "Read lock may be in effect. skipping lockfile delete..."
- InUse=True
-
- ### We won the lock, so there isn't competition for it.
- ### We can safely delete the file.
- ###writemsg("Got the lockfile...\n")
- ###writemsg("Unlinking...\n")
- self.locking_method(self.myfd,fcntl.LOCK_UN)
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ InUse=False
+ try:
+ self.locking_method(self.myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except:
+ print "Read lock may be in effect. skipping lockfile delete..."
+ InUse=True
+
+ ### We won the lock, so there isn't competition for it.
+ ### We can safely delete the file.
+ ###writemsg("Got the lockfile...\n")
+ ###writemsg("Unlinking...\n")
+ self.locking_method(self.myfd,fcntl.LOCK_UN)
if not InUse:
os.unlink(self.lockfile)
os.close(self.myfd)
self.myfd=None
#print "Unlinked lockfile..."
- except SystemExit, e:
- raise
- except Exception, e:
- # We really don't care... Someone else has the lock.
- # So it is their problem now.
- print "Failed to get lock... someone took it."
- print str(e)
-
- # why test lockfilename? because we may have been handed an
- # fd originally, and the caller might not like having their
- # open fd closed automatically on them.
- #if type(lockfilename) == types.StringType:
- # os.close(myfd)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ # We really don't care... Someone else has the lock.
+ # So it is their problem now.
+ print "Failed to get lock... someone took it."
+ print str(e)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ #if type(lockfilename) == types.StringType:
+ # os.close(myfd)
- if (self.myfd != None):
- os.close(self.myfd)
- self.myfd=None
+ if (self.myfd != None):
+ os.close(self.myfd)
+ self.myfd=None
self.locked=False
time.sleep(.0001)
- def hard_lock(self,max_wait=14400):
- """Does the NFS, hardlink shuffle to ensure locking on the disk.
- We create a PRIVATE lockfile, that is just a placeholder on the disk.
- Then we HARDLINK the real lockfile to that private file.
- If our file can 2 references, then we have the lock. :)
- Otherwise we lather, rise, and repeat.
- We default to a 4 hour timeout.
- """
+ def hard_lock(self,max_wait=14400):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE lockfile, that is just a placeholder on the disk.
+ Then we HARDLINK the real lockfile to that private file.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ We default to a 4 hour timeout.
+ """
- self.myhardlock = self.hardlock_name(self.lockdir)
+ self.myhardlock = self.hardlock_name(self.lockdir)
- start_time = time.time()
- reported_waiting = False
+ start_time = time.time()
+ reported_waiting = False
- while(time.time() < (start_time + max_wait)):
- # We only need it to exist.
- self.myfd = os.open(self.myhardlock, os.O_CREAT|os.O_RDWR,0660)
- os.close(self.myfd)
+ while(time.time() < (start_time + max_wait)):
+ # We only need it to exist.
+ self.myfd = os.open(self.myhardlock, os.O_CREAT|os.O_RDWR,0660)
+ os.close(self.myfd)
self.add_hardlock_file_to_cleanup()
- if not os.path.exists(self.myhardlock):
- raise FileNotFound, "Created lockfile is missing: %(filename)s" % {"filename":self.myhardlock}
- try:
- res = os.link(self.myhardlock, self.lockfile)
- except SystemExit, e:
- raise
- except Exception, e:
- #print "lockfile(): Hardlink: Link failed."
- #print "Exception: ",e
- pass
-
- if self.hardlink_is_mine(self.myhardlock, self.lockfile):
- # We have the lock.
- if reported_waiting:
- print
- return True
-
- if reported_waiting:
- writemsg(".")
- else:
- reported_waiting = True
- print
- print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
- print "Lockfile: " + self.lockfile
- time.sleep(3)
-
- os.unlink(self.myhardlock)
- return False
-
- def hard_unlock(self):
- try:
- if os.path.exists(self.myhardlock):
- os.unlink(self.myhardlock)
- if os.path.exists(self.lockfile):
- os.unlink(self.lockfile)
- except SystemExit, e:
- raise
- except:
- writemsg("Something strange happened to our hardlink locks.\n")
-
- def add_hardlock_file_to_cleanup(self):
+ if not os.path.exists(self.myhardlock):
+ raise FileNotFound, "Created lockfile is missing: %(filename)s" % {"filename":self.myhardlock}
+ try:
+ res = os.link(self.myhardlock, self.lockfile)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ #print "lockfile(): Hardlink: Link failed."
+ #print "Exception: ",e
+ pass
+
+ if self.hardlink_is_mine(self.myhardlock, self.lockfile):
+ # We have the lock.
+ if reported_waiting:
+ print
+ return True
+
+ if reported_waiting:
+ writemsg(".")
+ else:
+ reported_waiting = True
+ print
+ print "Waiting on (hardlink) lockfile: (one '.' per 3 seconds)"
+ print "Lockfile: " + self.lockfile
+ time.sleep(3)
+
+ os.unlink(self.myhardlock)
+ return False
+
+ def hard_unlock(self):
+ try:
+ if os.path.exists(self.myhardlock):
+ os.unlink(self.myhardlock)
+ if os.path.exists(self.lockfile):
+ os.unlink(self.lockfile)
+ except SystemExit, e:
+ raise
+ except:
+ writemsg("Something strange happened to our hardlink locks.\n")
+
+ def add_hardlock_file_to_cleanup(self):
#mypath = self.normpath(path)
if os.path.isdir(self.lockdir) and os.path.isfile(self.myhardlock):
- self.hardlock_paths[self.lockdir]=self.myhardlock
-
- def remove_hardlock_file_from_cleanup(self):
+ self.hardlock_paths[self.lockdir]=self.myhardlock
+
+ def remove_hardlock_file_from_cleanup(self):
if self.hardlock_paths.has_key(self.lockdir):
- del self.hardlock_paths[self.lockdir]
- print self.hardlock_paths
+ del self.hardlock_paths[self.lockdir]
+ print self.hardlock_paths
- def hardlock_name(self, path):
- mypath=path+"/.hardlock-"+os.uname()[1]+"-"+str(os.getpid())
+ def hardlock_name(self, path):
+ mypath=path+"/.hardlock-"+os.uname()[1]+"-"+str(os.getpid())
newpath = os.path.normpath(mypath)
- if len(newpath) > 1:
- if newpath[1] == "/":
+ if len(newpath) > 1:
+ if newpath[1] == "/":
newpath = "/"+newpath.lstrip("/")
return newpath
- def hardlink_is_mine(self,link,lock):
+ def hardlink_is_mine(self,link,lock):
import stat
- try:
- myhls = os.stat(link)
- mylfs = os.stat(lock)
- except SystemExit, e:
- raise
- except:
- myhls = None
- mylfs = None
-
- if myhls:
- if myhls[stat.ST_NLINK] == 2:
- return True
+ try:
+ myhls = os.stat(link)
+ mylfs = os.stat(lock)
+ except SystemExit, e:
+ raise
+ except:
+ myhls = None
+ mylfs = None
+
+ if myhls:
+ if myhls[stat.ST_NLINK] == 2:
+ return True
if mylfs:
if mylfs[stat.ST_INO] == myhls[stat.ST_INO]:
return True
- return False
+ return False
- def hardlink_active(lock):
- if not os.path.exists(lock):
- return False
- # XXXXXXXXXXXXXXXXXXXXXXXXXX
+ def hardlink_active(lock):
+ if not os.path.exists(lock):
+ return False
+ # XXXXXXXXXXXXXXXXXXXXXXXXXX
- def clean_my_hardlocks(self):
+ def clean_my_hardlocks(self):
try:
- for x in self.hardlock_paths.keys():
+ for x in self.hardlock_paths.keys():
self.hardlock_cleanup(x)
except AttributeError:
- pass
+ pass
- def hardlock_cleanup(self,path):
- mypid = str(os.getpid())
- myhost = os.uname()[1]
- mydl = os.listdir(path)
- results = []
- mycount = 0
+ def hardlock_cleanup(self,path):
+ mypid = str(os.getpid())
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+ results = []
+ mycount = 0
- mylist = {}
- for x in mydl:
+ mylist = {}
+ for x in mydl:
filepath=path+"/"+x
- if os.path.isfile(filepath):
- parts = filepath.split(".hardlock-")
- if len(parts) == 2:
+ if os.path.isfile(filepath):
+ parts = filepath.split(".hardlock-")
+ if len(parts) == 2:
filename = parts[0]
- hostpid = parts[1].split("-")
- host = "-".join(hostpid[:-1])
- pid = hostpid[-1]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
if not mylist.has_key(filename):
- mylist[filename] = {}
-
+ mylist[filename] = {}
+
if not mylist[filename].has_key(host):
mylist[filename][host] = []
- mylist[filename][host].append(pid)
- mycount += 1
+ mylist[filename][host].append(pid)
+ mycount += 1
else:
- mylist[filename][host].append(pid)
- mycount += 1
+ mylist[filename][host].append(pid)
+ mycount += 1
- results.append("Found %(count)s locks" % {"count":mycount})
- for x in mylist.keys():
- if mylist[x].has_key(myhost):
- mylockname = self.hardlock_name(x)
- if self.hardlink_is_mine(mylockname, self.lockfile) or \
- not os.path.exists(self.lockfile):
+ results.append("Found %(count)s locks" % {"count":mycount})
+ for x in mylist.keys():
+ if mylist[x].has_key(myhost):
+ mylockname = self.hardlock_name(x)
+ if self.hardlink_is_mine(mylockname, self.lockfile) or \
+ not os.path.exists(self.lockfile):
for y in mylist[x].keys():
- for z in mylist[x][y]:
- filename = x+".hardlock-"+y+"-"+z
- if filename == mylockname:
- self.hard_unlock()
- continue
- try:
- # We're sweeping through, unlinking everyone's locks.
- os.unlink(filename)
- results.append("Unlinked: " + filename)
- except SystemExit, e:
- raise
- except Exception,e:
- pass
+ for z in mylist[x][y]:
+ filename = x+".hardlock-"+y+"-"+z
+ if filename == mylockname:
+ self.hard_unlock()
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append("Unlinked: " + filename)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
try:
- os.unlink(x)
- results.append("Unlinked: " + x)
- os.unlink(mylockname)
- results.append("Unlinked: " + mylockname)
- except SystemExit, e:
- raise
- except Exception,e:
- pass
- else:
- try:
- os.unlink(mylockname)
- results.append("Unlinked: " + mylockname)
- except SystemExit, e:
- raise
- except Exception,e:
- pass
- return results
+ os.unlink(x)
+ results.append("Unlinked: " + x)
+ os.unlink(mylockname)
+ results.append("Unlinked: " + mylockname)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append("Unlinked: " + mylockname)
+ except SystemExit, e:
+ raise
+ except Exception,e:
+ pass
+ return results
if __name__ == "__main__":
- def lock_work():
+ def lock_work():
print
for i in range(1,6):
- print i,time.time()
- time.sleep(1)
+ print i,time.time()
+ time.sleep(1)
print
- print "catalyst_lock.py starting"
- import time
- Lock1=LockDir("/tmp/lock_path")
- print "Lock1 write lock"
- Lock1.write_lock()
- Lock1.unlock()
- Lock1.unlock()
-
- Lock1.write_lock()
- print "Lock1 write lock"
- lock_work()
- Lock1.unlock()
- Lock1.read_lock()
- print "Lock1 read lock"
- lock_work()
- Lock1.unlock()
-
- Lock1.read_lock()
- print "Lock1 read lock"
- print "Lock1 write lock"
- Lock1.write_lock()
- lock_work()
- Lock1.unlock()
- Lock1.write_lock()
- lock_work()
- Lock1.unlock()
- #Lock1.write_lock()
- #time.sleep(2)
- #Lock1.unlock()
- ##Lock1.write_lock()
- #time.sleep(2)
- #Lock1.unlock()
+ print "catalyst_lock.py starting"
+ import time
+ Lock1=LockDir("/tmp/lock_path")
+ print "Lock1 write lock"
+ Lock1.write_lock()
+ Lock1.unlock()
+ Lock1.unlock()
+
+ Lock1.write_lock()
+ print "Lock1 write lock"
+ lock_work()
+ Lock1.unlock()
+ Lock1.read_lock()
+ print "Lock1 read lock"
+ lock_work()
+ Lock1.unlock()
+
+ Lock1.read_lock()
+ print "Lock1 read lock"
+ print "Lock1 write lock"
+ Lock1.write_lock()
+ lock_work()
+ Lock1.unlock()
+ Lock1.write_lock()
+ lock_work()
+ Lock1.unlock()
+ #Lock1.write_lock()
+ #time.sleep(2)
+ #Lock1.unlock()
+ ##Lock1.write_lock()
+ #time.sleep(2)
+ #Lock1.unlock()
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/catalyst_support.py,v 1.60 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/catalyst_support.py,v 1.61 2005/12/02 19:37:02 wolf31o2 Exp $
import sys,string,os,types,re,signal,traceback,md5,sha,time
selinux_capable = False
#userpriv_capable = (os.getuid() == 0)
#fakeroot_capable = False
-BASH_BINARY = "/bin/bash"
+BASH_BINARY = "/bin/bash"
try:
- import resource
- max_fd_limit=resource.getrlimit(RLIMIT_NOFILE)
+ import resource
+ max_fd_limit=resource.getrlimit(RLIMIT_NOFILE)
except SystemExit, e:
- raise
+ raise
except:
- # hokay, no resource module.
- max_fd_limit=256
+ # hokay, no resource module.
+ max_fd_limit=256
# pids this process knows of.
spawned_pids = []
try:
- import urllib
+ import urllib
except SystemExit, e:
- raise
+ raise
def cleanup(pids,block_exceptions=True):
- """function to go through and reap the list of pids passed to it"""
- global spawned_pids
- if type(pids) == int:
- pids = [pids]
- for x in pids:
- try:
- os.kill(x,signal.SIGTERM)
- if os.waitpid(x,os.WNOHANG)[1] == 0:
- # feisty bugger, still alive.
- os.kill(x,signal.SIGKILL)
- os.waitpid(x,0)
-
- except OSError, oe:
- if block_exceptions:
- pass
- if oe.errno not in (10,3):
- raise oe
- except SystemExit:
- raise
- except Exception:
- if block_exceptions:
- pass
- try: spawned_pids.remove(x)
- except IndexError: pass
+ """function to go through and reap the list of pids passed to it"""
+ global spawned_pids
+ if type(pids) == int:
+ pids = [pids]
+ for x in pids:
+ try:
+ os.kill(x,signal.SIGTERM)
+ if os.waitpid(x,os.WNOHANG)[1] == 0:
+ # feisty bugger, still alive.
+ os.kill(x,signal.SIGKILL)
+ os.waitpid(x,0)
+
+ except OSError, oe:
+ if block_exceptions:
+ pass
+ if oe.errno not in (10,3):
+ raise oe
+ except SystemExit:
+ raise
+ except Exception:
+ if block_exceptions:
+ pass
+ try: spawned_pids.remove(x)
+ except IndexError: pass
# A function to calculate the md5 sum of a file
def calc_md5(file,verbose=False):
- m = md5.new()
- f = open(file, 'r')
- for line in f.readlines():
+ m = md5.new()
+ f = open(file, 'r')
+ for line in f.readlines():
m.update(line)
- f.close()
- md5sum = hexify(m.digest())
- if verbose:
+ f.close()
+ md5sum = hexify(m.digest())
+ if verbose:
print "MD5 (%s) = %s" % (file, md5sum)
- return md5sum
+ return md5sum
# calc_md5
def calc_sha(file,verbose=False):
- m = sha.new()
- f = open(file, 'r')
- for line in f.readlines():
+ m = sha.new()
+ f = open(file, 'r')
+ for line in f.readlines():
m.update(line)
- f.close()
- shaval = hexify(m.digest())
- if verbose:
+ f.close()
+ shaval = hexify(m.digest())
+ if verbose:
print "SHA (%s) = %s" % (file, shaval)
- return shaval
+ return shaval
def read_from_clst(file):
line = ''
return -1
#raise CatalystError, "Could not open file "+file
for line in myf.readlines():
- line = string.replace(line, "\n", "") # drop newline
- myline = myline + line
+ line = string.replace(line, "\n", "") # drop newline
+ myline = myline + line
myf.close()
return myline
# read_from_clst
if message:
(type,value)=sys.exc_info()[:2]
if value!=None:
- print
- print traceback.print_exc(file=sys.stdout)
+ print
+ print traceback.print_exc(file=sys.stdout)
print
print "!!! catalyst: "+message
print
if message:
#(type,value)=sys.exc_info()[:2]
#if value!=None:
- #print
- #kprint traceback.print_exc(file=sys.stdout)
+ #print
+ #kprint traceback.print_exc(file=sys.stdout)
print
print "!!! catalyst lock file in use: "+message
print
def find_binary(myc):
- """look through the environmental path for an executable file named whatever myc is"""
- # this sucks. badly.
- p=os.getenv("PATH")
- if p == None:
- return None
- for x in p.split(":"):
- #if it exists, and is executable
- if os.path.exists("%s/%s" % (x,myc)) and os.stat("%s/%s" % (x,myc))[0] & 0x0248:
- return "%s/%s" % (x,myc)
- return None
+ """look through the environmental path for an executable file named whatever myc is"""
+ # this sucks. badly.
+ p=os.getenv("PATH")
+ if p == None:
+ return None
+ for x in p.split(":"):
+ #if it exists, and is executable
+ if os.path.exists("%s/%s" % (x,myc)) and os.stat("%s/%s" % (x,myc))[0] & 0x0248:
+ return "%s/%s" % (x,myc)
+ return None
def spawn_bash(mycommand,env={},debug=False,opt_name=None,**keywords):
"""spawn mycommand as an arguement to bash"""
args=[BASH_BINARY]
if not opt_name:
- opt_name=mycommand.split()[0]
+ opt_name=mycommand.split()[0]
if not env.has_key("BASH_ENV"):
- env["BASH_ENV"] = "/etc/spork/is/not/valid/profile.env"
+ env["BASH_ENV"] = "/etc/spork/is/not/valid/profile.env"
if debug:
- args.append("-x")
+ args.append("-x")
args.append("-c")
args.append(mycommand)
return spawn(args,env=env,opt_name=opt_name,**keywords)
#def spawn_get_output(mycommand,spawn_type=spawn,raw_exit_code=False,emulate_gso=True, \
-# collect_fds=[1],fd_pipes=None,**keywords):
+# collect_fds=[1],fd_pipes=None,**keywords):
def spawn_get_output(mycommand,raw_exit_code=False,emulate_gso=True, \
- collect_fds=[1],fd_pipes=None,**keywords):
- """call spawn, collecting the output to fd's specified in collect_fds list
- emulate_gso is a compatability hack to emulate commands.getstatusoutput's return, minus the
- requirement it always be a bash call (spawn_type controls the actual spawn call), and minus the
- 'lets let log only stdin and let stderr slide by'.
-
- emulate_gso was deprecated from the day it was added, so convert your code over.
- spawn_type is the passed in function to call- typically spawn_bash, spawn, spawn_sandbox, or spawn_fakeroot"""
- global selinux_capable
- pr,pw=os.pipe()
-
- #if type(spawn_type) not in [types.FunctionType, types.MethodType]:
- # s="spawn_type must be passed a function, not",type(spawn_type),spawn_type
- # raise Exception,s
-
- if fd_pipes==None:
- fd_pipes={}
- fd_pipes[0] = 0
-
- for x in collect_fds:
- fd_pipes[x] = pw
- keywords["returnpid"]=True
-
- mypid=spawn_bash(mycommand,fd_pipes=fd_pipes,**keywords)
- os.close(pw)
- if type(mypid) != types.ListType:
- os.close(pr)
- return [mypid, "%s: No such file or directory" % mycommand.split()[0]]
-
- fd=os.fdopen(pr,"r")
- mydata=fd.readlines()
- fd.close()
- if emulate_gso:
- mydata=string.join(mydata)
- if len(mydata) and mydata[-1] == "\n":
- mydata=mydata[:-1]
- retval=os.waitpid(mypid[0],0)[1]
- cleanup(mypid)
- if raw_exit_code:
- return [retval,mydata]
- retval=process_exit_code(retval)
- return [retval, mydata]
+ collect_fds=[1],fd_pipes=None,**keywords):
+ """call spawn, collecting the output to fd's specified in collect_fds list
+ emulate_gso is a compatability hack to emulate commands.getstatusoutput's return, minus the
+ requirement it always be a bash call (spawn_type controls the actual spawn call), and minus the
+ 'lets let log only stdin and let stderr slide by'.
+
+ emulate_gso was deprecated from the day it was added, so convert your code over.
+ spawn_type is the passed in function to call- typically spawn_bash, spawn, spawn_sandbox, or spawn_fakeroot"""
+ global selinux_capable
+ pr,pw=os.pipe()
+
+ #if type(spawn_type) not in [types.FunctionType, types.MethodType]:
+ # s="spawn_type must be passed a function, not",type(spawn_type),spawn_type
+ # raise Exception,s
+
+ if fd_pipes==None:
+ fd_pipes={}
+ fd_pipes[0] = 0
+
+ for x in collect_fds:
+ fd_pipes[x] = pw
+ keywords["returnpid"]=True
+
+ mypid=spawn_bash(mycommand,fd_pipes=fd_pipes,**keywords)
+ os.close(pw)
+ if type(mypid) != types.ListType:
+ os.close(pr)
+ return [mypid, "%s: No such file or directory" % mycommand.split()[0]]
+
+ fd=os.fdopen(pr,"r")
+ mydata=fd.readlines()
+ fd.close()
+ if emulate_gso:
+ mydata=string.join(mydata)
+ if len(mydata) and mydata[-1] == "\n":
+ mydata=mydata[:-1]
+ retval=os.waitpid(mypid[0],0)[1]
+ cleanup(mypid)
+ if raw_exit_code:
+ return [retval,mydata]
+ retval=process_exit_code(retval)
+ return [retval, mydata]
# base spawn function
def spawn(mycommand,env={},raw_exit_code=False,opt_name=None,fd_pipes=None,returnpid=False,\
uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True,\
selinux_context=None, raise_signals=False, func_call=False):
- """base fork/execve function.
+ """base fork/execve function.
mycommand is the desired command- if you need a command to execute in a bash/sandbox/fakeroot
environment, use the appropriate spawn call. This is a straight fork/exec code path.
Can either have a tuple, or a string passed in. If uid/gid/groups/umask specified, it changes
return None
myc = find_binary(myc)
if myc == None:
- return None
- mypid=[]
+ return None
+ mypid=[]
if logfile:
pr,pw=os.pipe()
mypid.extend(spawn(('tee','-i','-a',logfile),returnpid=True,fd_pipes={0:pr,1:1,2:2}))
if x not in trg_fd:
try:
os.close(x)
- except SystemExit, e:
- raise
- except:
- pass
-
- # note this order must be preserved- can't change gid/groups if you change uid first.
- if selinux_capable and selinux_context:
- import selinux
- selinux.setexec(selinux_context)
- if gid:
- os.setgid(gid)
- if groups:
- os.setgroups(groups)
- if uid:
- os.setuid(uid)
- if umask:
- os.umask(umask)
-
- try:
- #print "execing", myc, myargs
- if func_call:
- # either use a passed in func for interpretting the results, or return if no exception.
- # note the passed in list, and dict are expanded.
- if len(mycommand) == 4:
- os._exit(mycommand[3](mycommand[0](*mycommand[1],**mycommand[2])))
- try:
- mycommand[0](*mycommand[1],**mycommand[2])
- except Exception,e:
- print "caught exception",e," in forked func",mycommand[0]
- sys.exit(0)
+ except SystemExit, e:
+ raise
+ except:
+ pass
+
+ # note this order must be preserved- can't change gid/groups if you change uid first.
+ if selinux_capable and selinux_context:
+ import selinux
+ selinux.setexec(selinux_context)
+ if gid:
+ os.setgid(gid)
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ os.setuid(uid)
+ if umask:
+ os.umask(umask)
+
+ try:
+ #print "execing", myc, myargs
+ if func_call:
+ # either use a passed in func for interpretting the results, or return if no exception.
+ # note the passed in list, and dict are expanded.
+ if len(mycommand) == 4:
+ os._exit(mycommand[3](mycommand[0](*mycommand[1],**mycommand[2])))
+ try:
+ mycommand[0](*mycommand[1],**mycommand[2])
+ except Exception,e:
+ print "caught exception",e," in forked func",mycommand[0]
+ sys.exit(0)
os.execvp(myc,myargs)
- #os.execve(myc,myargs,env)
- except SystemExit, e:
- raise
- except Exception, e:
- if not func_call:
- raise str(e)+":\n "+myc+" "+string.join(myargs)
- print "func call failed"
-
- # If the execve fails, we need to report it, and exit
- # *carefully* --- report error here
- os._exit(1)
- sys.exit(1)
- return # should never get reached
-
- # if we were logging, kill the pipes.
- if logfile:
- os.close(pr)
- os.close(pw)
-
- if returnpid:
- return mypid
-
- # loop through pids (typically one, unless logging), either waiting on their death, or waxing them
- # if the main pid (mycommand) returned badly.
- while len(mypid):
- retval=os.waitpid(mypid[-1],0)[1]
- if retval != 0:
- cleanup(mypid[0:-1],block_exceptions=False)
- # at this point we've killed all other kid pids generated via this call.
- # return now.
- if raw_exit_code:
- return retval
- return process_exit_code(retval,throw_signals=raise_signals)
- else:
- mypid.pop(-1)
- cleanup(mypid)
- return 0
+ #os.execve(myc,myargs,env)
+ except SystemExit, e:
+ raise
+ except Exception, e:
+ if not func_call:
+ raise str(e)+":\n "+myc+" "+string.join(myargs)
+ print "func call failed"
+
+ # If the execve fails, we need to report it, and exit
+ # *carefully* --- report error here
+ os._exit(1)
+ sys.exit(1)
+ return # should never get reached
+
+ # if we were logging, kill the pipes.
+ if logfile:
+ os.close(pr)
+ os.close(pw)
+
+ if returnpid:
+ return mypid
+
+ # loop through pids (typically one, unless logging), either waiting on their death, or waxing them
+ # if the main pid (mycommand) returned badly.
+ while len(mypid):
+ retval=os.waitpid(mypid[-1],0)[1]
+ if retval != 0:
+ cleanup(mypid[0:-1],block_exceptions=False)
+ # at this point we've killed all other kid pids generated via this call.
+ # return now.
+ if raw_exit_code:
+ return retval
+ return process_exit_code(retval,throw_signals=raise_signals)
+ else:
+ mypid.pop(-1)
+ cleanup(mypid)
+ return 0
def cmd(mycmd,myexc=""):
try:
raise
def process_exit_code(retval,throw_signals=False):
- """process a waitpid returned exit code, returning exit code if it exit'd, or the
- signal if it died from signalling
- if throw_signals is on, it raises a SystemExit if the process was signaled.
- This is intended for usage with threads, although at the moment you can't signal individual
- threads in python, only the master thread, so it's a questionable option."""
- if (retval & 0xff)==0:
- return retval >> 8 # return exit code
- else:
- if throw_signals:
- #use systemexit, since portage is stupid about exception catching.
- raise SystemExit()
- return (retval & 0xff) << 8 # interrupted by signal
+ """process a waitpid returned exit code, returning exit code if it exit'd, or the
+ signal if it died from signalling
+ if throw_signals is on, it raises a SystemExit if the process was signaled.
+ This is intended for usage with threads, although at the moment you can't signal individual
+ threads in python, only the master thread, so it's a questionable option."""
+ if (retval & 0xff)==0:
+ return retval >> 8 # return exit code
+ else:
+ if throw_signals:
+ #use systemexit, since portage is stupid about exception catching.
+ raise SystemExit()
+ return (retval & 0xff) << 8 # interrupted by signal
def file_locate(settings,filelist,expand=1):
#filenames such as cdtar are optional, so we don't assume the variable is defined.
pass
else:
- if len(settings[myfile])==0:
- raise CatalystError, "File variable \""+myfile+"\" has a length of zero (not specified.)"
- if settings[myfile][0]=="/":
- if not os.path.exists(settings[myfile]):
- raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]
- elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
- settings[myfile]=os.getcwd()+"/"+settings[myfile]
- else:
- raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]+" (2nd try)"
+ if len(settings[myfile])==0:
+ raise CatalystError, "File variable \""+myfile+"\" has a length of zero (not specified.)"
+ if settings[myfile][0]=="/":
+ if not os.path.exists(settings[myfile]):
+ raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]
+ elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
+ settings[myfile]=os.getcwd()+"/"+settings[myfile]
+ else:
+ raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]+" (2nd try)"
"""
Spec file format:
mobj=pat.match(myline)
pos += 1
if mobj.group(2):
- clean_string = re.sub(r"\"",r"",mobj.group(2))
- mymakeconf[mobj.group(1)]=clean_string
+ clean_string = re.sub(r"\"",r"",mobj.group(2))
+ mymakeconf[mobj.group(1)]=clean_string
return mymakeconf
def read_spec(myspecfile):
def read_makeconf(mymakeconffile):
if os.path.exists(mymakeconffile):
- try:
- myf=open(mymakeconffile,"r")
- mylines=myf.readlines()
- myf.close()
- return parse_makeconf(mylines)
- except:
- raise CatalystError, "Could not open make.conf file "+myspecfile
+ try:
+ myf=open(mymakeconffile,"r")
+ mylines=myf.readlines()
+ myf.close()
+ return parse_makeconf(mylines)
+ except:
+ raise CatalystError, "Could not open make.conf file "+myspecfile
else:
- makeconf={}
- return makeconf
+ makeconf={}
+ return makeconf
def msg(mymsg,verblevel=1):
if verbosity>=verblevel:
raise CatalystError, "Could not touch "+myfile+"."
def countdown(secs=5, doing="Starting"):
- if secs:
+ if secs:
print ">>> Waiting",secs,"seconds before starting..."
print ">>> (Control-C to abort)...\n"+doing+" in: ",
ticks=range(secs)
def normpath(mypath):
TrailingSlash=False
- if mypath[-1] == "/":
- TrailingSlash=True
- newpath = os.path.normpath(mypath)
- if len(newpath) > 1:
- if newpath[:2] == "//":
- newpath = newpath[1:]
+ if mypath[-1] == "/":
+ TrailingSlash=True
+ newpath = os.path.normpath(mypath)
+ if len(newpath) > 1:
+ if newpath[:2] == "//":
+ newpath = newpath[1:]
if TrailingSlash:
- newpath=newpath+'/'
- return newpath
+ newpath=newpath+'/'
+ return newpath
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/embedded_target.py,v 1.16 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/embedded_target.py,v 1.17 2005/12/02 19:37:02 wolf31o2 Exp $
"""
This class works like a 'stage3'. A stage2 tarball is unpacked, but instead
class embedded_target(generic_stage_target):
def __init__(self,spec,addlargs):
- self.required_values=[]
- self.valid_values=[]
- self.valid_values.extend(["embedded/empty","embedded/rm","embedded/unmerge","embedded/fs-prepare","embedded/fs-finish","embedded/mergeroot","embedded/packages","embedded/fs-type","embedded/runscript","boot/kernel"])
+ self.required_values=[]
+ self.valid_values=[]
+ self.valid_values.extend(["embedded/empty","embedded/rm","embedded/unmerge","embedded/fs-prepare","embedded/fs-finish","embedded/mergeroot","embedded/packages","embedded/fs-type","embedded/runscript","boot/kernel"])
self.valid_values.extend(["embedded/use"])
- if addlargs.has_key("embedded/fs-type"):
- self.valid_values.append("embedded/fs-ops")
+ if addlargs.has_key("embedded/fs-type"):
+ self.valid_values.append("embedded/fs-ops")
generic_stage_target.__init__(self,spec,addlargs)
self.set_build_kernel_vars(addlargs)
- def set_action_sequence(self):
+ def set_action_sequence(self):
self.settings["action_sequence"]=["dir_setup","unpack","unpack_snapshot",\
"config_profile_link","setup_confdir",\
"portage_overlay","bind","chroot_setup",\
"bootloader","root_overlay","fsscript","unmerge",\
"unbind","remove","empty","clean","capture","clear_autoresume"]
- def set_stage_path(self):
- self.settings["stage_path"]=normpath(self.settings["chroot_path"]+"/tmp/mergeroot")
+ def set_stage_path(self):
+ self.settings["stage_path"]=normpath(self.settings["chroot_path"]+"/tmp/mergeroot")
print "embedded stage path is "+self.settings["stage_path"]
- def set_root_path(self):
- self.settings["root_path"]=normpath("/tmp/mergeroot")
+ def set_root_path(self):
+ self.settings["root_path"]=normpath("/tmp/mergeroot")
print "embedded root path is "+self.settings["root_path"]
- def set_dest_path(self):
+ def set_dest_path(self):
self.settings["destpath"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
def register(foo):
- foo.update({"embedded":embedded_target})
- return foo
+ foo.update({"embedded":embedded_target})
+ return foo
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/generic_stage_target.py,v 1.85 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/generic_stage_target.py,v 1.86 2005/12/02 19:37:02 wolf31o2 Exp $
"""
This class does all of the chroot setup, copying of files, etc. It is
msg("Can't find "+x+".py plugin in "+self.settings["sharedir"]+"/arch/")
# call arch constructor, pass our settings
- try:
- self.arch=self.subarchmap[self.settings["subarch"]](self.settings)
- except:
- print "Invalid subarch: "+self.settings["subarch"]
- print "Choose one of the following:",
- for x in self.subarchmap:
- print x,
+ try:
+ self.arch=self.subarchmap[self.settings["subarch"]](self.settings)
+ except:
+ print "Invalid subarch: "+self.settings["subarch"]
+ print "Choose one of the following:",
+ for x in self.subarchmap:
+ print x,
print
- sys.exit(2)
+ sys.exit(2)
print "Using target:",self.settings["target"]
# self.settings["mainarch"] should now be set by our arch constructor, so we print
if os.environ.has_key("CHOST"):
self.settings["CHOST"] = os.environ["CHOST"]
if self.settings.has_key("chost"):
- self.settings["CHOST"]=list_to_string(self.settings["chost"])
+ self.settings["CHOST"]=list_to_string(self.settings["chost"])
if self.makeconf.has_key("CHOST"):
- print "Using CHOST setting from seed stage"
- self.settings["CHOST"]=self.makeconf["CHOST"]
+ print "Using CHOST setting from seed stage"
+ self.settings["CHOST"]=self.makeconf["CHOST"]
def override_cflags(self):
if os.environ.has_key("CFLAGS"):
- self.settings["CFLAGS"] = os.environ["CFLAGS"]
+ self.settings["CFLAGS"] = os.environ["CFLAGS"]
if self.settings.has_key("cflags"):
- self.settings["CFLAGS"]=list_to_string(self.settings["cflags"])
+ self.settings["CFLAGS"]=list_to_string(self.settings["cflags"])
if self.makeconf.has_key("CFLAGS"):
- print "Using CFLAGS setting from seed stage"
- self.settings["CFLAGS"]=self.makeconf["CFLAGS"]
+ print "Using CFLAGS setting from seed stage"
+ self.settings["CFLAGS"]=self.makeconf["CFLAGS"]
def override_cxxflags(self):
if os.environ.has_key("CXXFLAGS"):
- self.settings["CXXFLAGS"] = os.environ["CXXFLAGS"]
+ self.settings["CXXFLAGS"] = os.environ["CXXFLAGS"]
if self.settings.has_key("cxxflags"):
- self.settings["CXXFLAGS"]=list_to_string(self.settings["cxxflags"])
+ self.settings["CXXFLAGS"]=list_to_string(self.settings["cxxflags"])
if self.makeconf.has_key("CXXFLAGS"):
- print "Using CXXFLAGS setting from seed stage"
- self.settings["CXXFLAGS"]=self.makeconf["CXXFLAGS"]
+ print "Using CXXFLAGS setting from seed stage"
+ self.settings["CXXFLAGS"]=self.makeconf["CXXFLAGS"]
def override_ldflags(self):
if os.environ.has_key("LDFLAGS"):
- self.settings["LDFLAGS"] = os.environ["LDFLAGS"]
+ self.settings["LDFLAGS"] = os.environ["LDFLAGS"]
if self.settings.has_key("ldflags"):
- self.settings["LDFLAGS"]=list_to_string(self.settings["ldflags"])
+ self.settings["LDFLAGS"]=list_to_string(self.settings["ldflags"])
if self.makeconf.has_key("LDFLAGS"):
- print "Using LDFLAGS setting from seed stage"
- self.settings["LDFLAGS"]=self.makeconf["LDFLAGS"]
+ print "Using LDFLAGS setting from seed stage"
+ self.settings["LDFLAGS"]=self.makeconf["LDFLAGS"]
def set_install_mask(self):
if self.settings.has_key("install_mask"):
if os.path.isfile(self.settings["target_path"]):
cmd("rm -f "+self.settings["target_path"],
"Could not remove existing file: "+self.settings["target_path"])
- touch(self.settings["autoresume_path"]+"setup_target_path")
+ touch(self.settings["autoresume_path"]+"setup_target_path")
if not os.path.exists(self.settings["storedir"]+"/builds/"):
os.makedirs(self.settings["storedir"]+"/builds/")
def set_archscript(self):
- if self.settings.has_key(self.settings["spec_prefix"]+"/archscript"):
+ if self.settings.has_key(self.settings["spec_prefix"]+"/archscript"):
print "\nWarning!!! "
print "\t"+self.settings["spec_prefix"]+"/archscript" + " is deprecated and no longer used.\n"
def set_runscript(self):
- if self.settings.has_key(self.settings["spec_prefix"]+"/runscript"):
+ if self.settings.has_key(self.settings["spec_prefix"]+"/runscript"):
print "\nWarning!!! "
print "\t"+self.settings["spec_prefix"]+"/runscript" + " is deprecated and no longer used.\n"
def set_controller_file(self):
self.settings["controller_file"]=normpath(self.settings["sharedir"]+"/targets/"+self.settings["target"]+"/"+self.settings["target"]+"-controller.sh")
def set_iso_volume_id(self):
- if self.settings.has_key(self.settings["spec_prefix"]+"/volid"):
+ if self.settings.has_key(self.settings["spec_prefix"]+"/volid"):
self.settings["iso_volume_id"] = string.join(self.settings[self.settings["spec_prefix"]+"/volid"])
if len(self.settings["iso_volume_id"])>32:
raise CatalystError,"ISO VOLUME ID: volid must not exceed 32 characters."
self.settings["use"]=self.settings[self.settings["spec_prefix"]+"/use"]
del self.settings[self.settings["spec_prefix"]+"/use"]
if self.settings.has_key("use"):
- if type(self.settings["use"])==types.StringType:
+ if type(self.settings["use"])==types.StringType:
self.settings["use"]=self.settings["use"].split()
def set_stage_path(self):
pass
def set_rm(self):
- if self.settings.has_key(self.settings["spec_prefix"]+"/rm"):
+ if self.settings.has_key(self.settings["spec_prefix"]+"/rm"):
if type(self.settings[self.settings["spec_prefix"]+"/rm"])==types.StringType:
- self.settings[self.settings["spec_prefix"]+"/rm"]=self.settings[self.settings["spec_prefix"]+"/rm"].split()
+ self.settings[self.settings["spec_prefix"]+"/rm"]=self.settings[self.settings["spec_prefix"]+"/rm"].split()
def set_root_path(self):
# ROOT= variable for emerges
def set_build_kernel_vars(self,addlargs):
- if addlargs.has_key("boot/kernel"):
+ if addlargs.has_key("boot/kernel"):
if type(addlargs["boot/kernel"]) == types.StringType:
loopy=[addlargs["boot/kernel"]]
else:
loopy=addlargs["boot/kernel"]
-
+
for x in loopy:
self.required_values.append("boot/kernel/"+x+"/sources")
self.required_values.append("boot/kernel/"+x+"/config")
self.valid_values.append("boot/kernel/"+x+"/gk_kernargs")
self.valid_values.append("boot/kernel/"+x+"/gk_action")
self.valid_values.append("boot/kernel/"+x+"/initramfs_overlay")
- if self.settings.has_key("boot/kernel/"+x+"/postconf"):
+ if self.settings.has_key("boot/kernel/"+x+"/postconf"):
print "boot/kernel/"+x+"/postconf is deprecated"
print "\tInternally moving these ebuilds to boot/kernel/"+x+"/packages"
print "\tPlease move them to boot/kernel/"+x+"/packages in your specfile"
for y in loop2:
self.settings["boot/kernel/"+x+"/packages"].append(y)
- if self.settings.has_key(self.settings["spec_prefix"]+"/devmanager"):
+ if self.settings.has_key(self.settings["spec_prefix"]+"/devmanager"):
self.settings["devmanager"]=self.settings[self.settings["spec_prefix"]+"/devmanager"]
del self.settings[self.settings["spec_prefix"]+"/devmanager"]
-
- if self.settings.has_key(self.settings["spec_prefix"]+"/splashtype"):
+
+ if self.settings.has_key(self.settings["spec_prefix"]+"/splashtype"):
self.settings["splashtype"]=self.settings[self.settings["spec_prefix"]+"/splashtype"]
del self.settings[self.settings["spec_prefix"]+"/splashtype"]
-
- if self.settings.has_key(self.settings["spec_prefix"]+"/gk_mainargs"):
+
+ if self.settings.has_key(self.settings["spec_prefix"]+"/gk_mainargs"):
self.settings["gk_mainargs"]=self.settings[self.settings["spec_prefix"]+"/gk_mainargs"]
del self.settings[self.settings["spec_prefix"]+"/gk_mainargs"]
def kill_chroot_pids(self):
- print "Checking for processes running in chroot and killing them."
-
- # Force environment variables to be exported so script can see them
- self.setup_environment()
+ print "Checking for processes running in chroot and killing them."
+
+ # Force environment variables to be exported so script can see them
+ self.setup_environment()
- if os.path.exists(self.settings["sharedir"]+"/targets/support/kill-chroot-pids.sh"):
- cmd("/bin/bash "+self.settings["sharedir"]+"/targets/support/kill-chroot-pids.sh","kill-chroot-pids script failed.")
+ if os.path.exists(self.settings["sharedir"]+"/targets/support/kill-chroot-pids.sh"):
+ cmd("/bin/bash "+self.settings["sharedir"]+"/targets/support/kill-chroot-pids.sh","kill-chroot-pids script failed.")
def mount_safety_check(self):
mypath=self.settings["chroot_path"]
if self.settings.has_key("AUTORESUME"):
- if os.path.isdir(self.settings["source_path"]) and \
- os.path.exists(self.settings["autoresume_path"]+"unpack"):
+ if os.path.isdir(self.settings["source_path"]) and \
+ os.path.exists(self.settings["autoresume_path"]+"unpack"):
unpack=False
- elif self.settings.has_key("source_path_md5sum"):
+ elif self.settings.has_key("source_path_md5sum"):
if self.settings["source_path_md5sum"] != clst_unpack_md5sum:
invalid_snapshot=True
unpack=True
if not unpack:
- print "Resume point detected, skipping unpack operation..."
+ print "Resume point detected, skipping unpack operation..."
if unpack:
self.mount_safety_check()
destdir=self.settings["snapshot_cache_path"]
unpack_cmd="tar xjpf "+self.settings["snapshot_path"]+" -C "+destdir
unpack_errmsg="Error unpacking snapshot"
- cleanup_msg="Cleaning up invalid snapshot cache at \n\t"+self.settings["snapshot_cache_path"]+" (This can take a long time)..."
+ cleanup_msg="Cleaning up invalid snapshot cache at \n\t"+self.settings["snapshot_cache_path"]+" (This can take a long time)..."
cleanup_errmsg="Error removing existing snapshot cache directory."
self.snapshot_lock_object=self.snapcache_lock
unpack_errmsg="Error unpacking snapshot"
if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["chroot_path"]+"/usr/portage/") \
- and os.path.exists(self.settings["autoresume_path"]+"unpack_portage") \
+ and os.path.exists(self.settings["chroot_path"]+"/usr/portage/") \
+ and os.path.exists(self.settings["autoresume_path"]+"unpack_portage") \
and self.settings["snapshot_path_md5sum"] == snapshot_md5sum:
print "Valid Resume point detected, skipping unpack of portage tree..."
unpack=False
-
+
if unpack:
if self.settings.has_key("SNAPCACHE"):
- self.snapshot_lock_object.write_lock()
- if os.path.exists(destdir):
+ self.snapshot_lock_object.write_lock()
+ if os.path.exists(destdir):
print cleanup_msg
cleanup_cmd="rm -rf "+destdir
cmd(cleanup_cmd,cleanup_errmsg)
- if not os.path.exists(destdir):
+ if not os.path.exists(destdir):
os.makedirs(destdir,0755)
-
+
print "Unpacking portage tree (This can take a long time) ..."
cmd(unpack_cmd,unpack_errmsg)
myf.close()
if self.settings.has_key("SNAPCACHE"):
- self.snapshot_lock_object.unlock()
+ self.snapshot_lock_object.unlock()
def config_profile_link(self):
if self.settings.has_key("AUTORESUME") \
print "Configuring profile link..."
cmd("rm -f "+self.settings["chroot_path"]+"/etc/make.profile",\
"Error zapping profile link")
- cmd("ln -sf ../usr/portage/profiles/"+self.settings["target_profile"]+\
- " "+self.settings["chroot_path"]+"/etc/make.profile","Error creating profile link")
- touch(self.settings["autoresume_path"]+"config_profile_link")
-
+ cmd("ln -sf ../usr/portage/profiles/"+self.settings["target_profile"]+\
+ " "+self.settings["chroot_path"]+"/etc/make.profile","Error creating profile link")
+ touch(self.settings["autoresume_path"]+"config_profile_link")
+
def setup_confdir(self):
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_confdir"):
print "Resume point detected, skipping setup_confdir operation..."
else:
- if self.settings.has_key("portage_confdir"):
+ if self.settings.has_key("portage_confdir"):
print "Configuring /etc/portage..."
cmd("rm -rf "+self.settings["chroot_path"]+"/etc/portage","Error zapping /etc/portage")
cmd("cp -R "+self.settings["portage_confdir"]+"/ "+self.settings["chroot_path"]+\
"/etc/portage","Error copying /etc/portage")
- touch(self.settings["autoresume_path"]+"setup_confdir")
+ touch(self.settings["autoresume_path"]+"setup_confdir")
def portage_overlay(self):
- # copy over the portage overlays
- # Always copy over the overlay incase it has changed
- if self.settings.has_key("portage_overlay"):
- if type(self.settings["portage_overlay"])==types.StringType:
+ # copy over the portage overlays
+ # Always copy over the overlay incase it has changed
+ if self.settings.has_key("portage_overlay"):
+ if type(self.settings["portage_overlay"])==types.StringType:
self.settings["portage_overlay"]=[self.settings["portage_overlay"]]
for x in self.settings["portage_overlay"]:
cmd("cp -R "+x+"/* "+self.settings["chroot_path"]+x,"Could not copy portage_overlay")
def root_overlay(self):
- # copy over the root_overlay
- # Always copy over the overlay incase it has changed
+ # copy over the root_overlay
+ # Always copy over the overlay incase it has changed
if self.settings.has_key(self.settings["spec_prefix"]+"/root_overlay"):
- print "Copying root overlay ..."
- cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/root_overlay"]+"/ "+\
+ print "Copying root overlay ..."
+ cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/root_overlay"]+"/ "+\
self.settings["chroot_path"], self.settings["spec_prefix"]+"/root_overlay copy failed.")
def bind(self):
if retval!=0:
self.unbind()
raise CatalystError,"Couldn't bind mount "+src
-
+
def unbind(self):
ouch=0
retval2=os.system("umount "+mypath+x)
if retval2!=0:
- ouch=1
- warn("Couldn't umount bind mount: "+mypath+x)
- # keep trying to umount the others, to minimize damage if developer makes a mistake
+ ouch=1
+ warn("Couldn't umount bind mount: "+mypath+x)
+ # keep trying to umount the others, to minimize damage if developer makes a mistake
if self.settings.has_key("SNAPCACHE") and x == "/usr/portage":
try:
- # Its possible the snapshot lock object isnt created yet
- # this is because mount safety check calls unbind before the target is fully initialized
- self.snapshot_lock_object.unlock()
+ # Its possible the snapshot lock object isnt created yet
+ # this is because mount safety check calls unbind before the target is fully initialized
+ self.snapshot_lock_object.unlock()
except:
- pass
+ pass
if ouch:
"""
if any bind mounts really failed, then we need to raise
and os.path.exists(self.settings["autoresume_path"]+"chroot_setup"):
print "Resume point detected, skipping chroot_setup operation..."
else:
- print "Setting up chroot..."
-
- self.makeconf=read_makeconf(self.settings["chroot_path"]+"/etc/make.conf")
-
- cmd("cp /etc/resolv.conf "+self.settings["chroot_path"]+"/etc",\
- "Could not copy resolv.conf into place.")
+ print "Setting up chroot..."
+
+ self.makeconf=read_makeconf(self.settings["chroot_path"]+"/etc/make.conf")
+
+ cmd("cp /etc/resolv.conf "+self.settings["chroot_path"]+"/etc",\
+ "Could not copy resolv.conf into place.")
- # copy over the envscript, if applicable
- if self.settings.has_key("ENVSCRIPT"):
- if not os.path.exists(self.settings["ENVSCRIPT"]):
+ # copy over the envscript, if applicable
+ if self.settings.has_key("ENVSCRIPT"):
+ if not os.path.exists(self.settings["ENVSCRIPT"]):
raise CatalystError, "Can't find envscript "+self.settings["ENVSCRIPT"]
- cmd("cp "+self.settings["ENVSCRIPT"]+" "+self.settings["chroot_path"]+"/tmp/envscript",\
- "Could not copy envscript into place.")
-
- # copy over /etc/hosts from the host in case there are any specialties in there
- if os.path.exists("/etc/hosts"):
- cmd("mv "+self.settings["chroot_path"]+"/etc/hosts "+self.settings["chroot_path"]+\
- "/etc/hosts.bck", "Could not backup /etc/hosts")
- cmd("cp /etc/hosts "+self.settings["chroot_path"]+"/etc/hosts", "Could not copy /etc/hosts")
- self.override_chost()
- self.override_cflags()
- self.override_cxxflags()
- self.override_ldflags()
- # modify and write out make.conf (for the chroot)
- cmd("rm -f "+self.settings["chroot_path"]+"/etc/make.conf","Could not remove "+self.settings["chroot_path"]+"/etc/make.conf")
- myf=open(self.settings["chroot_path"]+"/etc/make.conf","w")
- myf.write("# These settings were set by the catalyst build script that automatically built this stage\n")
- myf.write("# Please consult /etc/make.conf.example for a more detailed example\n")
- myf.write('CFLAGS="'+self.settings["CFLAGS"]+'"\n')
- myf.write('CHOST="'+self.settings["CHOST"]+'"\n')
-
- if self.settings.has_key("CXXFLAGS"):
- myf.write('CXXFLAGS="'+self.settings["CXXFLAGS"]+'"\n')
- else:
- myf.write('CXXFLAGS="${CFLAGS}"\n')
-
- if self.settings.has_key("LDFLAGS"):
- myf.write('LDFLAGS="'+self.settings["LDFLAGS"]+'"\n')
-
- # figure out what our USE vars are for building
- myusevars=[]
- if self.settings.has_key("HOSTUSE"):
- myusevars.extend(self.settings["HOSTUSE"])
+ cmd("cp "+self.settings["ENVSCRIPT"]+" "+self.settings["chroot_path"]+"/tmp/envscript",\
+ "Could not copy envscript into place.")
+
+ # copy over /etc/hosts from the host in case there are any specialties in there
+ if os.path.exists("/etc/hosts"):
+ cmd("mv "+self.settings["chroot_path"]+"/etc/hosts "+self.settings["chroot_path"]+\
+ "/etc/hosts.bck", "Could not backup /etc/hosts")
+ cmd("cp /etc/hosts "+self.settings["chroot_path"]+"/etc/hosts", "Could not copy /etc/hosts")
+ self.override_chost()
+ self.override_cflags()
+ self.override_cxxflags()
+ self.override_ldflags()
+ # modify and write out make.conf (for the chroot)
+ cmd("rm -f "+self.settings["chroot_path"]+"/etc/make.conf","Could not remove "+self.settings["chroot_path"]+"/etc/make.conf")
+ myf=open(self.settings["chroot_path"]+"/etc/make.conf","w")
+ myf.write("# These settings were set by the catalyst build script that automatically built this stage\n")
+ myf.write("# Please consult /etc/make.conf.example for a more detailed example\n")
+ myf.write('CFLAGS="'+self.settings["CFLAGS"]+'"\n')
+ myf.write('CHOST="'+self.settings["CHOST"]+'"\n')
+
+ if self.settings.has_key("CXXFLAGS"):
+ myf.write('CXXFLAGS="'+self.settings["CXXFLAGS"]+'"\n')
+ else:
+ myf.write('CXXFLAGS="${CFLAGS}"\n')
+
+ if self.settings.has_key("LDFLAGS"):
+ myf.write('LDFLAGS="'+self.settings["LDFLAGS"]+'"\n')
+
+ # figure out what our USE vars are for building
+ myusevars=[]
+ if self.settings.has_key("HOSTUSE"):
+ myusevars.extend(self.settings["HOSTUSE"])
- if self.settings.has_key("use"):
- myusevars.extend(self.settings["use"])
- myf.write('USE="'+string.join(myusevars)+'"\n')
+ if self.settings.has_key("use"):
+ myusevars.extend(self.settings["use"])
+ myf.write('USE="'+string.join(myusevars)+'"\n')
- # setup the portage overlay
- if self.settings.has_key("portage_overlay"):
+ # setup the portage overlay
+ if self.settings.has_key("portage_overlay"):
if type(self.settings["portage_overlay"])==types.StringType:
self.settings[self.settings["portage_overlay"]]=[self.settings["portage_overlay"]]
myf.write('PORTDIR_OVERLAY="'+string.join(self.settings["portage_overlay"])+'"\n')
- myf.close()
- touch(self.settings["autoresume_path"]+"chroot_setup")
+ myf.close()
+ touch(self.settings["autoresume_path"]+"chroot_setup")
def fsscript(self):
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"fsscript"):
print "Resume point detected, skipping fsscript operation..."
else:
- if self.settings.has_key("fsscript"):
+ if self.settings.has_key("fsscript"):
if os.path.exists(self.settings["controller_file"]):
- cmd("/bin/bash "+self.settings["controller_file"]+" fsscript","fsscript script failed.")
+ cmd("/bin/bash "+self.settings["controller_file"]+" fsscript","fsscript script failed.")
touch(self.settings["autoresume_path"]+"fsscript")
def rcupdate(self):
and os.path.exists(self.settings["autoresume_path"]+"rcupdate"):
print "Resume point detected, skipping rcupdate operation..."
else:
- if os.path.exists(self.settings["controller_file"]):
+ if os.path.exists(self.settings["controller_file"]):
cmd("/bin/bash "+self.settings["controller_file"]+" rc-update","rc-update script failed.")
touch(self.settings["autoresume_path"]+"rcupdate")
and os.path.exists(self.settings["autoresume_path"]+"clean"):
print "Resume point detected, skipping clean operation..."
else:
- for x in self.settings["cleanables"]:
- print "Cleaning chroot: "+x+"... "
- cmd("rm -rf "+self.settings["destpath"]+x,"Couldn't clean "+x)
+ for x in self.settings["cleanables"]:
+ print "Cleaning chroot: "+x+"... "
+ cmd("rm -rf "+self.settings["destpath"]+x,"Couldn't clean "+x)
- # put /etc/hosts back into place
- if os.path.exists("/etc/hosts.bck"):
- cmd("mv -f "+self.settings["chroot_path"]+"/etc/hosts.bck "+self.settings["chroot_path"]+\
+ # put /etc/hosts back into place
+ if os.path.exists("/etc/hosts.bck"):
+ cmd("mv -f "+self.settings["chroot_path"]+"/etc/hosts.bck "+self.settings["chroot_path"]+\
"/etc/hosts", "Could not replace /etc/hosts")
- if os.path.exists(self.settings["controller_file"]):
+ if os.path.exists(self.settings["controller_file"]):
cmd("/bin/bash "+self.settings["controller_file"]+" clean","clean script failed.")
touch(self.settings["autoresume_path"]+"clean")
def empty(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"empty"):
print "Resume point detected, skipping empty operation..."
- else:
+ else:
if self.settings.has_key(self.settings["spec_prefix"]+"/empty"):
- if type(self.settings[self.settings["spec_prefix"]+"/empty"])==types.StringType:
+ if type(self.settings[self.settings["spec_prefix"]+"/empty"])==types.StringType:
self.settings[self.settings["spec_prefix"]+"/empty"]=self.settings[self.settings["spec_prefix"]+"/empty"].split()
- for x in self.settings[self.settings["spec_prefix"]+"/empty"]:
+ for x in self.settings[self.settings["spec_prefix"]+"/empty"]:
myemp=self.settings["destpath"]+x
if not os.path.isdir(myemp):
- print x,"not a directory or does not exist, skipping 'empty' operation."
- continue
+ print x,"not a directory or does not exist, skipping 'empty' operation."
+ continue
print "Emptying directory",x
# stat the dir, delete the dir, recreate the dir and set
# the proper perms and ownership
os.makedirs(myemp,0755)
os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
os.chmod(myemp,mystat[ST_MODE])
- touch(self.settings["autoresume_path"]+"empty")
+ touch(self.settings["autoresume_path"]+"empty")
def remove(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"remove"):
print "Resume point detected, skipping remove operation..."
- else:
+ else:
if self.settings.has_key(self.settings["spec_prefix"]+"/rm"):
- for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
+ for x in self.settings[self.settings["spec_prefix"]+"/rm"]:
# we're going to shell out for all these cleaning operations,
# so we get easy glob handling
print "livecd: removing "+x
os.system("rm -rf "+self.settings["chroot_path"]+x)
try:
- if os.path.exists(self.settings["controller_file"]):
- cmd("/bin/bash "+self.settings["controller_file"]+" clean",\
+ if os.path.exists(self.settings["controller_file"]):
+ cmd("/bin/bash "+self.settings["controller_file"]+" clean",\
"Clean failed.")
- touch(self.settings["autoresume_path"]+"remove")
+ touch(self.settings["autoresume_path"]+"remove")
except:
- self.unbind()
- raise
+ self.unbind()
+ raise
def preclean(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"preclean"):
print "Resume point detected, skipping preclean operation..."
- else:
+ else:
try:
if os.path.exists(self.settings["controller_file"]):
- cmd("/bin/bash "+self.settings["controller_file"]+" preclean","preclean script failed.")
+ cmd("/bin/bash "+self.settings["controller_file"]+" preclean","preclean script failed.")
touch(self.settings["autoresume_path"]+"preclean")
except:
raise CatalystError, "Build failed, could not execute preclean"
def capture(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"capture"):
print "Resume point detected, skipping capture operation..."
- else:
+ else:
"""capture target in a tarball"""
mypath=self.settings["target_path"].split("/")
# remove filename from path
touch(self.settings["autoresume_path"]+"capture")
def run_local(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"run_local"):
print "Resume point detected, skipping run_local operation..."
- else:
+ else:
try:
if os.path.exists(self.settings["controller_file"]):
- cmd("/bin/bash "+self.settings["controller_file"]+" run","run script failed.")
+ cmd("/bin/bash "+self.settings["controller_file"]+" run","run script failed.")
touch(self.settings["autoresume_path"]+"run_local")
except CatalystError:
def run(self):
self.chroot_lock.write_lock()
- # Kill any pids in the chroot
- self.kill_chroot_pids()
+ # Kill any pids in the chroot
+ self.kill_chroot_pids()
- # Check for mounts right away and abort if we cannot unmount them.
- self.mount_safety_check()
+ # Check for mounts right away and abort if we cannot unmount them.
+ self.mount_safety_check()
- if self.settings.has_key("CLEAR_AUTORESUME"):
- self.clear_autoresume()
- if self.settings.has_key("PURGE"):
- self.purge()
+ if self.settings.has_key("CLEAR_AUTORESUME"):
+ self.clear_autoresume()
+ if self.settings.has_key("PURGE"):
+ self.purge()
for x in self.settings["action_sequence"]:
print "Running action sequence: "+x
self.chroot_lock.unlock()
- def unmerge(self):
- if self.settings.has_key("AUTORESUME") \
+ def unmerge(self):
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"unmerge"):
- print "Resume point detected, skipping unmerge operation..."
- else:
+ print "Resume point detected, skipping unmerge operation..."
+ else:
if self.settings.has_key(self.settings["spec_prefix"]+"/unmerge"):
- if type(self.settings[self.settings["spec_prefix"]+"/unmerge"])==types.StringType:
+ if type(self.settings[self.settings["spec_prefix"]+"/unmerge"])==types.StringType:
self.settings[self.settings["spec_prefix"]+"/unmerge"]=[self.settings[self.settings["spec_prefix"]+"/unmerge"]]
- myunmerge=self.settings[self.settings["spec_prefix"]+"/unmerge"][:]
-
- for x in range(0,len(myunmerge)):
- #surround args with quotes for passing to bash,
- #allows things like "<" to remain intact
- myunmerge[x]="'"+myunmerge[x]+"'"
- myunmerge=string.join(myunmerge)
-
- #before cleaning, unmerge stuff:
- try:
+ myunmerge=self.settings[self.settings["spec_prefix"]+"/unmerge"][:]
+
+ for x in range(0,len(myunmerge)):
+ #surround args with quotes for passing to bash,
+ #allows things like "<" to remain intact
+ myunmerge[x]="'"+myunmerge[x]+"'"
+ myunmerge=string.join(myunmerge)
+
+ #before cleaning, unmerge stuff:
+ try:
cmd("/bin/bash "+self.settings["sharedir"]+"/targets/" \
+self.settings["target"]+"/unmerge.sh "+myunmerge,"Unmerge script failed.")
print "unmerge shell script"
- except CatalystError:
+ except CatalystError:
self.unbind()
raise
touch(self.settings["autoresume_path"]+"unmerge")
def target_setup(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"target_setup"):
- print "Resume point detected, skipping target_setup operation..."
- else:
+ print "Resume point detected, skipping target_setup operation..."
+ else:
print "Setting up filesystems per filesystem type"
cmd("/bin/bash "+self.settings["controller_file"]+" target_image_setup "+ self.settings["target_path"],"target_image_setup script failed.")
touch(self.settings["autoresume_path"]+"target_setup")
def setup_overlay(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_overlay"):
- print "Resume point detected, skipping setup_overlay operation..."
- else:
+ print "Resume point detected, skipping setup_overlay operation..."
+ else:
if self.settings.has_key(self.settings["spec_prefix"]+"/overlay") \
and os.path.exists(self.settings["spec_prefix"]+"/overlay"):
cmd("rsync -a "+self.settings[self.settings["spec_prefix"]+"/overlay"]+"/ "+\
touch(self.settings["autoresume_path"]+"setup_overlay")
def create_iso(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"create_iso"):
- print "Resume point detected, skipping create_iso operation..."
- else:
+ print "Resume point detected, skipping create_iso operation..."
+ else:
# create the ISO - this is the preferred method (the iso scripts do not always work)
if self.settings.has_key("iso"):
cmd("/bin/bash "+self.settings["controller_file"]+" iso "+\
print "A CD Image will not be created, skipping create-iso.sh..."
- def build_packages(self):
- if self.settings.has_key("AUTORESUME") \
+ def build_packages(self):
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"build_packages"):
- print "Resume point detected, skipping build_packages operation..."
- else:
+ print "Resume point detected, skipping build_packages operation..."
+ else:
if self.settings.has_key(self.settings["spec_prefix"]+"/packages"):
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"build_packages"):
raise CatalystError,self.settings["spec_prefix"] + "build aborting due to error."
def build_kernel(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"build_kernel"):
- print "Resume point detected, skipping build_kernel operation..."
- else:
+ print "Resume point detected, skipping build_kernel operation..."
+ else:
if self.settings.has_key("boot/kernel"):
- try:
+ try:
mynames=self.settings["boot/kernel"]
if type(mynames)==types.StringType:
mynames=[mynames]
for kname in mynames:
if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["autoresume_path"]+"build_kernel_"+kname):
+ and os.path.exists(self.settings["autoresume_path"]+"build_kernel_"+kname):
print "Resume point detected, skipping build_kernel for "+kname+" operation..."
else:
-
- try:
- if not os.path.exists(self.settings["boot/kernel/"+kname+"/config"]):
- self.unbind()
- raise CatalystError, "Can't find kernel config: " \
- +self.settings["boot/kernel/"+kname+"/config"]
+
+ try:
+ if not os.path.exists(self.settings["boot/kernel/"+kname+"/config"]):
+ self.unbind()
+ raise CatalystError, "Can't find kernel config: " \
+ +self.settings["boot/kernel/"+kname+"/config"]
- except TypeError:
- raise CatalystError, "Required value boot/kernel/config not specified"
+ except TypeError:
+ raise CatalystError, "Required value boot/kernel/config not specified"
- try:
- cmd("cp "+self.settings["boot/kernel/"+kname+"/config"]+" "+ \
- self.settings["chroot_path"]+"/var/tmp/"+kname+".config", \
- "Couldn't copy kernel config: "+self.settings["boot/kernel/"+kname+"/config"])
+ try:
+ cmd("cp "+self.settings["boot/kernel/"+kname+"/config"]+" "+ \
+ self.settings["chroot_path"]+"/var/tmp/"+kname+".config", \
+ "Couldn't copy kernel config: "+self.settings["boot/kernel/"+kname+"/config"])
- except CatalystError:
- self.unbind()
+ except CatalystError:
+ self.unbind()
- # If we need to pass special options to the bootloader
- # for this kernel put them into the environment.
- if self.settings.has_key("boot/kernel/"+kname+"/kernelopts"):
- myopts=self.settings["boot/kernel/"+kname+"/kernelopts"]
+ # If we need to pass special options to the bootloader
+ # for this kernel put them into the environment.
+ if self.settings.has_key("boot/kernel/"+kname+"/kernelopts"):
+ myopts=self.settings["boot/kernel/"+kname+"/kernelopts"]
- if type(myopts) != types.StringType:
- myopts = string.join(myopts)
- os.putenv(kname+"_kernelopts", myopts)
+ if type(myopts) != types.StringType:
+ myopts = string.join(myopts)
+ os.putenv(kname+"_kernelopts", myopts)
- else:
- os.putenv(kname+"_kernelopts", "")
+ else:
+ os.putenv(kname+"_kernelopts", "")
- if not self.settings.has_key("boot/kernel/"+kname+"/extraversion"):
- self.settings["boot/kernel/"+kname+"/extraversion"]=""
+ if not self.settings.has_key("boot/kernel/"+kname+"/extraversion"):
+ self.settings["boot/kernel/"+kname+"/extraversion"]=""
- os.putenv("clst_kextraversion", self.settings["boot/kernel/"+kname+"/extraversion"])
- if self.settings.has_key("boot/kernel/"+kname+"/initramfs_overlay"):
+ os.putenv("clst_kextraversion", self.settings["boot/kernel/"+kname+"/extraversion"])
+ if self.settings.has_key("boot/kernel/"+kname+"/initramfs_overlay"):
if os.path.exists(self.settings["boot/kernel/"+kname+"/initramfs_overlay"]):
- print "Copying initramfs_overlay dir " +self.settings["boot/kernel/"+kname+"/initramfs_overlay"]
+ print "Copying initramfs_overlay dir " +self.settings["boot/kernel/"+kname+"/initramfs_overlay"]
- cmd("mkdir -p "+self.settings["chroot_path"]+"/tmp/initramfs_overlay/" + \
- self.settings["boot/kernel/"+kname+"/initramfs_overlay"])
+ cmd("mkdir -p "+self.settings["chroot_path"]+"/tmp/initramfs_overlay/" + \
+ self.settings["boot/kernel/"+kname+"/initramfs_overlay"])
- cmd("cp -R "+self.settings["boot/kernel/"+kname+"/initramfs_overlay"]+"/* " + \
- self.settings["chroot_path"] + "/tmp/initramfs_overlay/" + \
- self.settings["boot/kernel/"+kname+"/initramfs_overlay"])
+ cmd("cp -R "+self.settings["boot/kernel/"+kname+"/initramfs_overlay"]+"/* " + \
+ self.settings["chroot_path"] + "/tmp/initramfs_overlay/" + \
+ self.settings["boot/kernel/"+kname+"/initramfs_overlay"])
-
- # execute the script that builds the kernel
- cmd("/bin/bash "+self.settings["controller_file"]+" kernel "+kname,\
+
+ # execute the script that builds the kernel
+ cmd("/bin/bash "+self.settings["controller_file"]+" kernel "+kname,\
"Runscript kernel build failed")
- if self.settings.has_key("boot/kernel/"+kname+"/initramfs_overlay"):
+ if self.settings.has_key("boot/kernel/"+kname+"/initramfs_overlay"):
if os.path.exists(self.settings["chroot_path"]+"/tmp/initramfs_overlay/"):
- print "Cleaning up temporary overlay dir"
- cmd("rm -R "+self.settings["chroot_path"]+"/tmp/initramfs_overlay/")
-
- touch(self.settings["autoresume_path"]+"build_kernel_"+kname)
+ print "Cleaning up temporary overlay dir"
+ cmd("rm -R "+self.settings["chroot_path"]+"/tmp/initramfs_overlay/")
+
+ touch(self.settings["autoresume_path"]+"build_kernel_"+kname)
# execute the script that cleans up the kernel build environment
cmd("/bin/bash "+self.settings["controller_file"]+" post-kmerge ",\
raise CatalystError,"build aborting due to kernel build error."
def bootloader(self):
- if self.settings.has_key("AUTORESUME") \
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"bootloader"):
- print "Resume point detected, skipping bootloader operation..."
- else:
+ print "Resume point detected, skipping bootloader operation..."
+ else:
try:
cmd("/bin/bash "+self.settings["controller_file"]+" bootloader " + self.settings["target_path"],\
"Bootloader runscript failed.")
self.unbind()
raise CatalystError,"Runscript aborting due to error."
- def livecd_update(self):
- if self.settings.has_key("AUTORESUME") \
+ def livecd_update(self):
+ if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"livecd_update"):
- print "Resume point detected, skipping build_packages operation..."
- else:
+ print "Resume point detected, skipping build_packages operation..."
+ else:
try:
cmd("/bin/bash "+self.settings["controller_file"]+" livecd-update",\
"livecd-update failed.")
def clear_chroot(self):
myemp=self.settings["chroot_path"]
if os.path.isdir(myemp):
- print "Emptying directory",myemp
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- mystat=os.stat(myemp)
- #cmd("rm -rf "+myemp, "Could not remove existing file: "+myemp)
- shutil.rmtree(myemp)
- os.makedirs(myemp,0755)
- os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
- os.chmod(myemp,mystat[ST_MODE])
+ print "Emptying directory",myemp
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ mystat=os.stat(myemp)
+ #cmd("rm -rf "+myemp, "Could not remove existing file: "+myemp)
+ shutil.rmtree(myemp)
+ os.makedirs(myemp,0755)
+ os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
+ os.chmod(myemp,mystat[ST_MODE])
def clear_packages(self):
- if self.settings.has_key("PKGCACHE"):
+ if self.settings.has_key("PKGCACHE"):
print "purging the pkgcache ..."
myemp=self.settings["pkgcache_path"]
if os.path.isdir(myemp):
- print "Emptying directory",myemp
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- mystat=os.stat(myemp)
- #cmd("rm -rf "+myemp, "Could not remove existing file: "+myemp)
- shutil.rmtree(myemp)
- os.makedirs(myemp,0755)
- os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
- os.chmod(myemp,mystat[ST_MODE])
-
+ print "Emptying directory",myemp
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ mystat=os.stat(myemp)
+ #cmd("rm -rf "+myemp, "Could not remove existing file: "+myemp)
+ shutil.rmtree(myemp)
+ os.makedirs(myemp,0755)
+ os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
+ os.chmod(myemp,mystat[ST_MODE])
+
def clear_autoresume(self):
# clean resume points since they are no longer needed
if self.settings.has_key("AUTORESUME"):
print "Removing AutoResume Points: ..."
myemp=self.settings["autoresume_path"]
if os.path.isdir(myemp):
- print "Emptying directory",myemp
- # stat the dir, delete the dir, recreate the dir and set
- # the proper perms and ownership
- mystat=os.stat(myemp)
- #cmd("rm -rf "+myemp, "Could not remove existing file: "+myemp)
- shutil.rmtree(myemp)
- os.makedirs(myemp,0755)
+ print "Emptying directory",myemp
+ # stat the dir, delete the dir, recreate the dir and set
+ # the proper perms and ownership
+ mystat=os.stat(myemp)
+ #cmd("rm -rf "+myemp, "Could not remove existing file: "+myemp)
+ shutil.rmtree(myemp)
+ os.makedirs(myemp,0755)
os.chown(myemp,mystat[ST_UID],mystat[ST_GID])
os.chmod(myemp,mystat[ST_MODE])
- def gen_digest_file(self,file):
- if os.path.exists(file+".digests"):
- os.remove(file+".digests")
- if self.settings.has_key("SHA") or self.settings.has_key("MD5"):
- if os.path.exists(file):
- myf=open(file+".digests","w")
+ def gen_digest_file(self,file):
+ if os.path.exists(file+".digests"):
+ os.remove(file+".digests")
+ if self.settings.has_key("SHA") or self.settings.has_key("MD5"):
+ if os.path.exists(file):
+ myf=open(file+".digests","w")
- if self.settings.has_key("MD5"):
+ if self.settings.has_key("MD5"):
if self.settings.has_key("VERBOSE"):
- md5=calc_md5(file,True)
+ md5=calc_md5(file,True)
else:
- md5=calc_md5(file)
- myf.write("MD5: "+md5+"\n")
+ md5=calc_md5(file)
+ myf.write("MD5: "+md5+"\n")
- if self.settings.has_key("SHA"):
+ if self.settings.has_key("SHA"):
if self.settings.has_key("VERBOSE"):
- sha=calc_sha(file,True)
+ sha=calc_sha(file,True)
else:
- sha=calc_sha(file)
- myf.write("SHA: "+sha+"\n")
+ sha=calc_sha(file)
+ myf.write("SHA: "+sha+"\n")
- myf.close()
+ myf.close()
def purge(self):
- countdown(10,"Purging Caches ...")
- if self.settings.has_key("PURGE"):
+ countdown(10,"Purging Caches ...")
+ if self.settings.has_key("PURGE"):
print "clearing autoresume ..."
self.clear_autoresume()
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/grp_target.py,v 1.19 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/grp_target.py,v 1.20 2005/12/02 19:37:02 wolf31o2 Exp $
"""
The builder class for GRP (Gentoo Reference Platform) builds.
generic_stage_target.__init__(self,spec,addlargs)
- def set_target_path(self):
- self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"]+"/")
- if self.settings.has_key("AUTORESUME") \
- and os.path.exists(self.settings["autoresume_path"]+"setup_target_path"):
- print "Resume point detected, skipping target path setup operation..."
- else:
- # first clean up any existing target stuff
- #if os.path.isdir(self.settings["target_path"]):
- #cmd("rm -rf "+self.settings["target_path"],
- #"Could not remove existing directory: "+self.settings["target_path"])
- if not os.path.exists(self.settings["target_path"]):
- os.makedirs(self.settings["target_path"])
-
+ def set_target_path(self):
+ self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"]+"/")
+ if self.settings.has_key("AUTORESUME") \
+ and os.path.exists(self.settings["autoresume_path"]+"setup_target_path"):
+ print "Resume point detected, skipping target path setup operation..."
+ else:
+ # first clean up any existing target stuff
+ #if os.path.isdir(self.settings["target_path"]):
+ #cmd("rm -rf "+self.settings["target_path"],
+ #"Could not remove existing directory: "+self.settings["target_path"])
+ if not os.path.exists(self.settings["target_path"]):
+ os.makedirs(self.settings["target_path"])
+
touch(self.settings["autoresume_path"]+"setup_target_path")
def run_local(self):
self.unbind()
raise CatalystError,"GRP build aborting due to error."
- def set_pkgcache_path(self):
- if self.settings.has_key("pkgcache_path"):
- if type(self.settings["pkgcache_path"]) != types.StringType:
- self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
- else:
- generic_stage_target.set_pkgcache_path(self)
+ def set_pkgcache_path(self):
+ if self.settings.has_key("pkgcache_path"):
+ if type(self.settings["pkgcache_path"]) != types.StringType:
+ self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
+ else:
+ generic_stage_target.set_pkgcache_path(self)
def set_use(self):
- generic_stage_target.set_use(self)
- if self.settings.has_key("use"):
- self.settings["use"].append("bindist")
- else:
- self.settings["use"]=["bindist"]
+ generic_stage_target.set_use(self)
+ if self.settings.has_key("use"):
+ self.settings["use"].append("bindist")
+ else:
+ self.settings["use"]=["bindist"]
def set_mounts(self):
- self.mounts.append("/tmp/grp")
- self.mountmap["/tmp/grp"]=self.settings["target_path"]
+ self.mounts.append("/tmp/grp")
+ self.mountmap["/tmp/grp"]=self.settings["target_path"]
def generate_digests(self):
for pkgset in self.settings["grp"]:
- if self.settings["grp/"+pkgset+"/type"] == "pkgset":
+ if self.settings["grp/"+pkgset+"/type"] == "pkgset":
destdir=normpath(self.settings["target_path"]+"/"+pkgset+"/All")
print "Digesting files in the pkgset....."
digests=glob.glob(destdir+'/*.digests')
for i in digests:
- if os.path.exists(i):
+ if os.path.exists(i):
os.remove(i)
files=os.listdir(destdir)
#ignore files starting with '.' using list comprehension
files=[filename for filename in files if filename[0] != '.']
for i in files:
- if os.path.isfile(normpath(destdir+"/"+i)):
+ if os.path.isfile(normpath(destdir+"/"+i)):
self.gen_digest_file(normpath(destdir+"/"+i))
- else:
+ else:
destdir=normpath(self.settings["target_path"]+"/"+pkgset)
print "Digesting files in the srcset....."
digests=glob.glob(destdir+'/*.digests')
for i in digests:
- if os.path.exists(i):
+ if os.path.exists(i):
os.remove(i)
files=os.listdir(destdir)
#ignore files starting with '.' using list comprehension
files=[filename for filename in files if filename[0] != '.']
for i in files:
- if os.path.isfile(normpath(destdir+"/"+i)):
+ if os.path.isfile(normpath(destdir+"/"+i)):
self.gen_digest_file(normpath(destdir+"/"+i))
-
+
def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","bind","chroot_setup",\
- "setup_environment","run_local","unbind",\
- "generate_digests","clear_autoresume"]
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
+ "config_profile_link","setup_confdir","bind","chroot_setup",\
+ "setup_environment","run_local","unbind",\
+ "generate_digests","clear_autoresume"]
def register(foo):
foo.update({"grp":grp_target})
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage1_target.py,v 1.21 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage1_target.py,v 1.22 2005/12/02 19:37:02 wolf31o2 Exp $
"""
Builder class for LiveCD stage1.
"config_profile_link","setup_confdir","portage_overlay",\
"bind","chroot_setup","setup_environment","build_packages",\
"unbind", "clean","clear_autoresume"]
- def set_target_path(self):
+ def set_target_path(self):
self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"])
if self.settings.has_key("AUTORESUME") \
and os.path.exists(self.settings["autoresume_path"]+"setup_target_path"):
if not os.path.exists(self.settings["target_path"]):
os.makedirs(self.settings["target_path"])
-
+
def set_target_path(self):
pass
def set_spec_prefix(self):
- self.settings["spec_prefix"]="livecd"
+ self.settings["spec_prefix"]="livecd"
def set_use(self):
generic_stage_target.set_use(self)
self.settings["use"].append("livecd")
def set_packages(self):
- generic_stage_target.set_packages(self)
- if self.settings.has_key(self.settings["spec_prefix"]+"/packages"):
+ generic_stage_target.set_packages(self)
+ if self.settings.has_key(self.settings["spec_prefix"]+"/packages"):
if type(self.settings[self.settings["spec_prefix"]+"/packages"]) == types.StringType:
- self.settings[self.settings["spec_prefix"]+"/packages"] = \
+ self.settings[self.settings["spec_prefix"]+"/packages"] = \
self.settings[self.settings["spec_prefix"]+"/packages"].split()
self.settings[self.settings["spec_prefix"]+"/packages"].append("livecd-tools")
def set_pkgcache_path(self):
- if self.settings.has_key("pkgcache_path"):
+ if self.settings.has_key("pkgcache_path"):
if type(self.settings["pkgcache_path"]) != types.StringType:
- self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
- else:
+ self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
+ else:
generic_stage_target.set_pkgcache_path(self)
def register(foo):
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage2_target.py,v 1.52 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/livecd_stage2_target.py,v 1.53 2005/12/02 19:37:02 wolf31o2 Exp $
"""
Builder class for a LiveCD stage2 build.
file_locate(self.settings, ["cdtar","controller_file"])
def set_source_path(self):
- self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
- if os.path.isfile(self.settings["source_path"]):
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
+ if os.path.isfile(self.settings["source_path"]):
self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
- else:
+ else:
self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/")
if not os.path.exists(self.settings["source_path"]):
- raise CatalystError,"Source Path: "+self.settings["source_path"]+" does not exist."
+ raise CatalystError,"Source Path: "+self.settings["source_path"]+" does not exist."
def set_spec_prefix(self):
- self.settings["spec_prefix"]="livecd"
+ self.settings["spec_prefix"]="livecd"
def set_target_path(self):
self.settings["target_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["target_subpath"]+"/")
for x in self.settings["livecd/modblacklist"]:
myf.write("\n"+x)
myf.close()
- def unpack(self):
- unpack=True
-
- clst_unpack_md5sum=read_from_clst(self.settings["autoresume_path"]+"unpack")
-
- if os.path.isdir(self.settings["source_path"]):
- unpack_cmd="rsync -a --delete "+self.settings["source_path"]+" "+self.settings["chroot_path"]
- display_msg="\nStarting rsync from "+self.settings["source_path"]+"\nto "+\
- self.settings["chroot_path"]+" (This may take some time) ...\n"
- error_msg="Rsync of "+self.settings["source_path"]+" to "+self.settings["chroot_path"]+" failed."
- invalid_snapshot=False
-
- if self.settings.has_key("AUTORESUME"):
- if os.path.isdir(self.settings["source_path"]) and \
- os.path.exists(self.settings["autoresume_path"]+"unpack"):
- print "Resume point detected, skipping unpack operation..."
- unpack=False
- elif self.settings.has_key("source_path_md5sum"):
- if self.settings["source_path_md5sum"] != clst_unpack_md5sum:
- invalid_snapshot=True
-
- if unpack:
- self.mount_safety_check()
-
- if invalid_snapshot:
- print "No Valid Resume point detected, cleaning up ..."
- #os.remove(self.settings["autoresume_path"]+"dir_setup")
- self.clear_autoresume()
- self.clear_chroot()
- #self.dir_setup()
-
- if not os.path.exists(self.settings["chroot_path"]):
- os.makedirs(self.settings["chroot_path"])
-
- if not os.path.exists(self.settings["chroot_path"]+"/tmp"):
- os.makedirs(self.settings["chroot_path"]+"/tmp",1777)
-
- if self.settings.has_key("PKGCACHE"):
- if not os.path.exists(self.settings["pkgcache_path"]):
- os.makedirs(self.settings["pkgcache_path"],0755)
-
- print display_msg
- cmd(unpack_cmd,error_msg)
-
- if self.settings.has_key("source_path_md5sum"):
- myf=open(self.settings["autoresume_path"]+"unpack","w")
- myf.write(self.settings["source_path_md5sum"])
- myf.close()
- else:
- touch(self.settings["autoresume_path"]+"unpack")
+ def unpack(self):
+ unpack=True
+
+ clst_unpack_md5sum=read_from_clst(self.settings["autoresume_path"]+"unpack")
+
+ if os.path.isdir(self.settings["source_path"]):
+ unpack_cmd="rsync -a --delete "+self.settings["source_path"]+" "+self.settings["chroot_path"]
+ display_msg="\nStarting rsync from "+self.settings["source_path"]+"\nto "+\
+ self.settings["chroot_path"]+" (This may take some time) ...\n"
+ error_msg="Rsync of "+self.settings["source_path"]+" to "+self.settings["chroot_path"]+" failed."
+ invalid_snapshot=False
+
+ if self.settings.has_key("AUTORESUME"):
+ if os.path.isdir(self.settings["source_path"]) and \
+ os.path.exists(self.settings["autoresume_path"]+"unpack"):
+ print "Resume point detected, skipping unpack operation..."
+ unpack=False
+ elif self.settings.has_key("source_path_md5sum"):
+ if self.settings["source_path_md5sum"] != clst_unpack_md5sum:
+ invalid_snapshot=True
+
+ if unpack:
+ self.mount_safety_check()
+
+ if invalid_snapshot:
+ print "No Valid Resume point detected, cleaning up ..."
+ #os.remove(self.settings["autoresume_path"]+"dir_setup")
+ self.clear_autoresume()
+ self.clear_chroot()
+ #self.dir_setup()
+
+ if not os.path.exists(self.settings["chroot_path"]):
+ os.makedirs(self.settings["chroot_path"])
+
+ if not os.path.exists(self.settings["chroot_path"]+"/tmp"):
+ os.makedirs(self.settings["chroot_path"]+"/tmp",1777)
+
+ if self.settings.has_key("PKGCACHE"):
+ if not os.path.exists(self.settings["pkgcache_path"]):
+ os.makedirs(self.settings["pkgcache_path"],0755)
+
+ print display_msg
+ cmd(unpack_cmd,error_msg)
+
+ if self.settings.has_key("source_path_md5sum"):
+ myf=open(self.settings["autoresume_path"]+"unpack","w")
+ myf.write(self.settings["source_path_md5sum"])
+ myf.close()
+ else:
+ touch(self.settings["autoresume_path"]+"unpack")
def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","portage_overlay",\
- "bind","chroot_setup","setup_environment","run_local",\
- "build_kernel","bootloader","preclean","livecd_update",
- "root_overlay","fsscript","rcupdate","unmerge",\
- "unbind","remove","empty","target_setup",\
- "setup_overlay","create_iso","clear_autoresume"]
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
+ "config_profile_link","setup_confdir","portage_overlay",\
+ "bind","chroot_setup","setup_environment","run_local",\
+ "build_kernel","bootloader","preclean","livecd_update",
+ "root_overlay","fsscript","rcupdate","unmerge",\
+ "unbind","remove","empty","target_setup",\
+ "setup_overlay","create_iso","clear_autoresume"]
def register(foo):
foo.update({"livecd-stage2":livecd_stage2_target})
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/netboot_target.py,v 1.7 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/netboot_target.py,v 1.8 2005/12/02 19:37:02 wolf31o2 Exp $
"""
Builder class for a netboot build.
for x in loopy:
if self.settings.has_key("netboot/packages/"+x+"/files"):
- if type(self.settings["netboot/packages/"+x+"/files"]) == types.ListType:
- myfiles.extend(self.settings["netboot/packages/"+x+"/files"])
- else:
- myfiles.append(self.settings["netboot/packages/"+x+"/files"])
+ if type(self.settings["netboot/packages/"+x+"/files"]) == types.ListType:
+ myfiles.extend(self.settings["netboot/packages/"+x+"/files"])
+ else:
+ myfiles.append(self.settings["netboot/packages/"+x+"/files"])
if self.settings.has_key("netboot/extra_files"):
if type(self.settings["netboot/extra_files"]) == types.ListType:
def set_action_sequence(self):
- self.settings["action_sequence"]=["unpack","unpack_snapshot",
- "config_profile_link","setup_confdir","bind","chroot_setup",\
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",
+ "config_profile_link","setup_confdir","bind","chroot_setup",\
"setup_environment","build_packages","build_busybox",\
"build_kernel","copy_files_to_image",\
"clean","create_netboot_files","unbind","clear_autoresume"]
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage1_target.py,v 1.13 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage1_target.py,v 1.14 2005/12/02 19:37:02 wolf31o2 Exp $
"""
Builder class for a stage1 installation tarball build.
self.settings["stage_path"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
print "stage1 stage path is "+self.settings["stage_path"]
def set_root_path(self):
- # ROOT= variable for emerges
+ # ROOT= variable for emerges
self.settings["root_path"]=normpath("/tmp/stage1root")
print "stage1 root path is "+self.settings["root_path"]
def set_dest_path(self):
- self.settings["destpath"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
+ self.settings["destpath"]=normpath(self.settings["chroot_path"]+self.settings["root_path"])
def set_cleanables(self):
generic_stage_target.set_cleanables(self)
self.settings["cleanables"].extend(["/usr/share/gettext","/usr/lib/python2.2/test", "/usr/lib/python2.2/encodings","/usr/lib/python2.2/email", "/usr/lib/python2.2/lib-tk","/usr/share/zoneinfo"])
def override_chost(self):
- if os.environ.has_key("CHOST"):
+ if os.environ.has_key("CHOST"):
self.settings["CHOST"] = os.environ["CHOST"]
- if self.settings.has_key("chost"):
+ if self.settings.has_key("chost"):
self.settings["CHOST"]=list_to_string(self.settings["chost"])
def override_cflags(self):
- if os.environ.has_key("CFLAGS"):
+ if os.environ.has_key("CFLAGS"):
self.settings["CFLAGS"] = os.environ["CFLAGS"]
- if self.settings.has_key("cflags"):
+ if self.settings.has_key("cflags"):
self.settings["CFLAGS"]=list_to_string(self.settings["cflags"])
def override_cxxflags(self):
- if os.environ.has_key("CXXFLAGS"):
+ if os.environ.has_key("CXXFLAGS"):
self.settings["CXXFLAGS"] = os.environ["CXXFLAGS"]
- if self.settings.has_key("cxxflags"):
+ if self.settings.has_key("cxxflags"):
self.settings["CXXFLAGS"]=list_to_string(self.settings["cxxflags"])
- def override_ldflags(self):
- if os.environ.has_key("LDFLAGS"):
+ def override_ldflags(self):
+ if os.environ.has_key("LDFLAGS"):
self.settings["LDFLAGS"] = os.environ["LDFLAGS"]
- if self.settings.has_key("ldflags"):
+ if self.settings.has_key("ldflags"):
self.settings["LDFLAGS"]=list_to_string(self.settings["ldflags"])
def register(foo):
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage2_target.py,v 1.6 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage2_target.py,v 1.7 2005/12/02 19:37:02 wolf31o2 Exp $
"""
Builder class for a stage2 installation tarball build.
self.valid_values=[]
generic_stage_target.__init__(self,spec,addlargs)
def set_root_path(self):
- # ROOT= variable for emerges
- self.settings["root_path"]=normpath("/tmp/stage1root")
+ # ROOT= variable for emerges
+ self.settings["root_path"]=normpath("/tmp/stage1root")
- def set_source_path(self):
- self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/"+self.settings["root_path"]+"/")
+ def set_source_path(self):
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/tmp/"+self.settings["source_subpath"]+"/"+self.settings["root_path"]+"/")
- # reset the root path so the preclean doesnt fail
- generic_stage_target.set_root_path(self)
-
- if os.path.isdir(self.settings["source_path"]):
- print "\nUsing seed-stage from "+self.settings["source_path"]
- print "Delete this folder if you wish to use a seed stage tarball instead\n"
- else:
- self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
- if os.path.isfile(self.settings["source_path"]):
- if os.path.exists(self.settings["source_path"]):
- self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
+ # reset the root path so the preclean doesnt fail
+ generic_stage_target.set_root_path(self)
+
+ if os.path.isdir(self.settings["source_path"]):
+ print "\nUsing seed-stage from "+self.settings["source_path"]
+ print "Delete this folder if you wish to use a seed stage tarball instead\n"
+ else:
+ self.settings["source_path"]=normpath(self.settings["storedir"]+"/builds/"+self.settings["source_subpath"]+".tar.bz2")
+ if os.path.isfile(self.settings["source_path"]):
+ if os.path.exists(self.settings["source_path"]):
+ self.settings["source_path_md5sum"]=calc_md5(self.settings["source_path"])
def register(foo):
foo.update({"stage2":stage2_target})
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage4_target.py,v 1.10 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/stage4_target.py,v 1.11 2005/12/02 19:37:02 wolf31o2 Exp $
"""
Builder class for LiveCD stage1.
self.required_values=[]
self.valid_values=self.required_values[:]
self.valid_values.extend(["stage4/use", "stage4/packages", "stage4/root_overlay", "stage4/fsscript", \
- "stage4/rcadd","stage4/rcdel"])
+ "stage4/rcadd","stage4/rcdel"])
generic_stage_target.__init__(self,spec,addlargs)
def set_pkgcache_path(self):
# Copyright 1999-2005 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo/src/catalyst/modules/tinderbox_target.py,v 1.15 2005/12/02 17:05:56 wolf31o2 Exp $
+# $Header: /var/cvsroot/gentoo/src/catalyst/modules/tinderbox_target.py,v 1.16 2005/12/02 19:37:02 wolf31o2 Exp $
"""
builder class for the tinderbox target
self.unbind()
raise CatalystError,"Tinderbox aborting due to error."
- def set_pkgcache_path(self):
- if self.settings.has_key("pkgcache_path"):
- if type(self.settings["pkgcache_path"]) != types.StringType:
- self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
- else:
- generic_stage_target.set_pkgcache_path(self)
+ def set_pkgcache_path(self):
+ if self.settings.has_key("pkgcache_path"):
+ if type(self.settings["pkgcache_path"]) != types.StringType:
+ self.settings["pkgcache_path"]=normpath(string.join(self.settings["pkgcache_path"]))
+ else:
+ generic_stage_target.set_pkgcache_path(self)
def set_cleanables(self):
- self.settings["cleanables"]=["/etc/resolv.conf","/var/tmp/*","/root/*",\
+ self.settings["cleanables"]=["/etc/resolv.conf","/var/tmp/*","/root/*",\
"/usr/portage"]
def set_action_sequence(self):
- #Default action sequence for run method
- self.settings["action_sequence"]=["unpack","unpack_snapshot",\
- "config_profile_link","setup_confdir","bind","chroot_setup",\
- "setup_environment","run_local","preclean","unbind","clean",\
+ #Default action sequence for run method
+ self.settings["action_sequence"]=["unpack","unpack_snapshot",\
+ "config_profile_link","setup_confdir","bind","chroot_setup",\
+ "setup_environment","run_local","preclean","unbind","clean",\
"clear_autoresume"]
def register(foo):