#!/usr/bin/python import os,sys,imp,string def die(msg=None): if msg: print "catalyst: "+msg sys.exit(1) def warn(msg): print "catalyst: "+msg def usage(): print "usage: meep!" if len(sys.argv)==1 or sys.argv[1] in ["-h","--help"]: usage() sys.exit(1) elif os.getuid()!=0: #non-root callers can still get -h and --help to work. die("This script requires root privileges to operate.") """ Overview of catalyst operation ============================== * The program starts, and the local machine type is detected. * Based on this information, catalyst determines what kind of machine types it can build for (amd64 and ia64 can build for x86 as well, for example.) The appropriate arch plugins are loaded, which contain builder classes for each supported sub-arch. * Command-line arguments are parsed. If specified, a spec file is read. * These build variables are stored in an internal "spec" object, which will be a standard python dictionary. This spec dictionary contains all relevant build-related information. * The spec object is passed to the appropriate subarch builder constructor. The subarch builder constructor updates the spec object with variables relevant to the sub-arch (pentium4, g3, etc.) * The spec object is passed to the appropriate target constructor. The target constructor updates the spec object to contain data relevant to the particular target (stage1, stage3, grp, etc.) * The full data of the spec object is written to disc, so there is a complete record of all variables that will be used to build what we're building. This will allow for another person to re-use this information to replicate our work (it should be possible to distribute a spec file along with a portage snapshot and a starter tarball, and our build can be replicated exactly on any machine.) The spec object contains data like CFLAGS, CHOST, subarch, mainarch, the profile used to build, and for GRP and LiveCDs the complete package build list. This is important to allow work to be replicated. It's possible that the stage1/2/3.sh scripts should be distributed as well, to allow proper replication of work. * The build process begins by calling the appropriate method of the builder instance. This includes cleanup, setup of chroot, entering the chroot, running the appropriate bash build script, checking for error conditions, and finishing up. * The catalyst process is now complete :) """ #This allows plugins to import modules in the /modules dir sys.path.append(os.getcwd()+"/modules") #map current machine information from uname() to the mainarch we are running #under machinemap={ "i386" : "x86", "i486" : "x86", "i586" : "x86", "i686" : "x86", "x86_64" : "amd64" } # map the mainarch we are running under to the mainarches we support for # building stages and LiveCDs. (for example, on amd64, we can build stages for # x86 or amd64. targetmap={ "x86" : ["x86"], "amd64" : ["x86","amd64"] } mymachine=os.uname()[4] if not machinemap.has_key(mymachine): print "Unknown machine type:",mymachine sys.exit(1) hostarch=machinemap[mymachine] print "Host architecture:",hostarch print "Supported architectures for targets:",string.join(targetmap[hostarch]) print "Loading plugins:", archmap={} subarchmap={} for x in targetmap[hostarch]: fh=open("arch/"+x+".py") #this next line loads the plugin as a module and assigns it to archmap[x] archmap[x]=imp.load_module(x,fh,"arch/"+x+".py",(".py","r",imp.PY_SOURCE)) #this next line registers all the subarches supported in the plugin archmap[x].register(subarchmap) fh.close() print x, print print "Available subarches:",string.join(subarchmap.keys()) import targets targetmap={} targets.register(targetmap) print "Available targets:",string.join(targetmap.keys())