2 import sys,string,os,types,re,signal,traceback,time
4 selinux_capable = False
5 #userpriv_capable = (os.getuid() == 0)
6 #fakeroot_capable = False
7 BASH_BINARY = "/bin/bash"
9 # set it to 0 for the soft limit, 1 for the hard limit
13 max_fd_limit=resource.getrlimit(resource.RLIMIT_NOFILE)[DESIRED_RLIMIT]
17 # hokay, no resource module.
20 # pids this process knows of.
28 def cleanup(pids,block_exceptions=True):
29 """function to go through and reap the list of pids passed to it"""
35 os.kill(x,signal.SIGTERM)
36 if os.waitpid(x,os.WNOHANG)[1] == 0:
37 # feisty bugger, still alive.
38 os.kill(x,signal.SIGKILL)
44 if oe.errno not in (10,3):
51 try: spawned_pids.remove(x)
52 except IndexError: pass
56 # a function to turn a string of non-printable characters into a string of
59 hexStr = string.hexdigits
63 r = r + hexStr[(i >> 4) & 0xF] + hexStr[i & 0xF]
68 def read_from_clst(file):
75 #raise CatalystError, "Could not open file "+file
76 for line in myf.readlines():
77 #line = string.replace(line, "\n", "") # drop newline
78 myline = myline + line
83 # these should never be touched
84 required_build_targets=["generic_target","generic_stage_target"]
86 # new build types should be added here
87 valid_build_targets=["stage1_target","stage2_target","stage3_target","stage4_target","grp_target",
88 "livecd_stage1_target","livecd_stage2_target","embedded_target",
89 "tinderbox_target","snapshot_target","netboot_target","netboot2_target"]
91 required_config_file_values=["storedir","sharedir","distdir","portdir"]
92 valid_config_file_values=required_config_file_values[:]
93 valid_config_file_values.append("PKGCACHE")
94 valid_config_file_values.append("KERNCACHE")
95 valid_config_file_values.append("CCACHE")
96 valid_config_file_values.append("DISTCC")
97 valid_config_file_values.append("ICECREAM")
98 valid_config_file_values.append("ENVSCRIPT")
99 valid_config_file_values.append("AUTORESUME")
100 valid_config_file_values.append("FETCH")
101 valid_config_file_values.append("CLEAR_AUTORESUME")
102 valid_config_file_values.append("options")
103 valid_config_file_values.append("DEBUG")
104 valid_config_file_values.append("VERBOSE")
105 valid_config_file_values.append("PURGE")
106 valid_config_file_values.append("PURGEONLY")
107 valid_config_file_values.append("SNAPCACHE")
108 valid_config_file_values.append("snapshot_cache")
109 valid_config_file_values.append("hash_function")
110 valid_config_file_values.append("digests")
111 valid_config_file_values.append("contents")
112 valid_config_file_values.append("SEEDCACHE")
116 def list_bashify(mylist):
117 if type(mylist)==types.StringType:
121 for x in range(0,len(mypack)):
122 # surround args with quotes for passing to bash,
123 # allows things like "<" to remain intact
124 mypack[x]="'"+mypack[x]+"'"
125 mypack=string.join(mypack)
128 def list_to_string(mylist):
129 if type(mylist)==types.StringType:
133 for x in range(0,len(mypack)):
134 # surround args with quotes for passing to bash,
135 # allows things like "<" to remain intact
137 mypack=string.join(mypack)
140 class CatalystError(Exception):
141 def __init__(self, message):
143 (type,value)=sys.exc_info()[:2]
146 print traceback.print_exc(file=sys.stdout)
148 print "!!! catalyst: "+message
151 class LockInUse(Exception):
152 def __init__(self, message):
154 #(type,value)=sys.exc_info()[:2]
157 #kprint traceback.print_exc(file=sys.stdout)
159 print "!!! catalyst lock file in use: "+message
167 print "!!! catalyst: "+msg
169 def find_binary(myc):
170 """look through the environmental path for an executable file named whatever myc is"""
175 for x in p.split(":"):
176 #if it exists, and is executable
177 if os.path.exists("%s/%s" % (x,myc)) and os.stat("%s/%s" % (x,myc))[0] & 0x0248:
178 return "%s/%s" % (x,myc)
181 def spawn_bash(mycommand,env={},debug=False,opt_name=None,**keywords):
182 """spawn mycommand as an arguement to bash"""
185 opt_name=mycommand.split()[0]
186 if "BASH_ENV" not in env:
187 env["BASH_ENV"] = "/etc/spork/is/not/valid/profile.env"
191 args.append(mycommand)
192 return spawn(args,env=env,opt_name=opt_name,**keywords)
194 #def spawn_get_output(mycommand,spawn_type=spawn,raw_exit_code=False,emulate_gso=True, \
195 # collect_fds=[1],fd_pipes=None,**keywords):
197 def spawn_get_output(mycommand,raw_exit_code=False,emulate_gso=True, \
198 collect_fds=[1],fd_pipes=None,**keywords):
199 """call spawn, collecting the output to fd's specified in collect_fds list
200 emulate_gso is a compatability hack to emulate commands.getstatusoutput's return, minus the
201 requirement it always be a bash call (spawn_type controls the actual spawn call), and minus the
202 'lets let log only stdin and let stderr slide by'.
204 emulate_gso was deprecated from the day it was added, so convert your code over.
205 spawn_type is the passed in function to call- typically spawn_bash, spawn, spawn_sandbox, or spawn_fakeroot"""
206 global selinux_capable
209 #if type(spawn_type) not in [types.FunctionType, types.MethodType]:
210 # s="spawn_type must be passed a function, not",type(spawn_type),spawn_type
217 for x in collect_fds:
219 keywords["returnpid"]=True
221 mypid=spawn_bash(mycommand,fd_pipes=fd_pipes,**keywords)
223 if type(mypid) != types.ListType:
225 return [mypid, "%s: No such file or directory" % mycommand.split()[0]]
228 mydata=fd.readlines()
231 mydata=string.join(mydata)
232 if len(mydata) and mydata[-1] == "\n":
234 retval=os.waitpid(mypid[0],0)[1]
237 return [retval,mydata]
238 retval=process_exit_code(retval)
239 return [retval, mydata]
241 # base spawn function
242 def spawn(mycommand,env={},raw_exit_code=False,opt_name=None,fd_pipes=None,returnpid=False,\
243 uid=None,gid=None,groups=None,umask=None,logfile=None,path_lookup=True,\
244 selinux_context=None, raise_signals=False, func_call=False):
245 """base fork/execve function.
246 mycommand is the desired command- if you need a command to execute in a bash/sandbox/fakeroot
247 environment, use the appropriate spawn call. This is a straight fork/exec code path.
248 Can either have a tuple, or a string passed in. If uid/gid/groups/umask specified, it changes
249 the forked process to said value. If path_lookup is on, a non-absolute command will be converted
250 to an absolute command, otherwise it returns None.
252 selinux_context is the desired context, dependant on selinux being available.
253 opt_name controls the name the processor goes by.
254 fd_pipes controls which file descriptor numbers are left open in the forked process- it's a dict of
255 current fd's raw fd #, desired #.
257 func_call is a boolean for specifying to execute a python function- use spawn_func instead.
258 raise_signals is questionable. Basically throw an exception if signal'd. No exception is thrown
261 logfile overloads the specified fd's to write to a tee process which logs to logfile
262 returnpid returns the relevant pids (a list, including the logging process if logfile is on).
264 non-returnpid calls to spawn will block till the process has exited, returning the exitcode/signal
265 raw_exit_code controls whether the actual waitpid result is returned, or intrepretted."""
269 if type(mycommand)==types.StringType:
270 mycommand=mycommand.split()
272 if not os.access(myc, os.X_OK):
275 myc = find_binary(myc)
281 mypid.extend(spawn(('tee','-i','-a',logfile),returnpid=True,fd_pipes={0:pr,1:1,2:2}))
282 retval=os.waitpid(mypid[-1],os.WNOHANG)[1]
287 return process_exit_code(retval)
296 opt_name = mycommand[0]
298 myargs.extend(mycommand[1:])
300 mypid.append(os.fork())
303 spawned_pids.extend(mypid)
309 # this may look ugly, but basically it moves file descriptors around to ensure no
310 # handles that are needed are accidentally closed during the final dup2 calls.
312 if type(fd_pipes)==types.DictType:
317 #build list of which fds will be where, and where they are at currently
320 src_fd.append(fd_pipes[x])
322 # run through said list dup'ing descriptors so that they won't be waxed
323 # by other dup calls.
324 for x in range(0,len(trg_fd)):
325 if trg_fd[x] == src_fd[x]:
327 if trg_fd[x] in src_fd[x+1:]:
328 new=os.dup2(trg_fd[x],max(src_fd) + 1)
332 src_fd[s.index(trg_fd[x])]=new
333 except SystemExit, e:
338 # transfer the fds to their final pre-exec position.
339 for x in range(0,len(trg_fd)):
340 if trg_fd[x] != src_fd[x]:
341 os.dup2(src_fd[x], trg_fd[x])
345 # wax all open descriptors that weren't requested be left open.
346 for x in range(0,max_fd_limit):
350 except SystemExit, e:
355 # note this order must be preserved- can't change gid/groups if you change uid first.
356 if selinux_capable and selinux_context:
358 selinux.setexec(selinux_context)
371 #print "execing", myc, myargs
373 # either use a passed in func for interpretting the results, or return if no exception.
374 # note the passed in list, and dict are expanded.
375 if len(mycommand) == 4:
376 os._exit(mycommand[3](mycommand[0](*mycommand[1],**mycommand[2])))
378 mycommand[0](*mycommand[1],**mycommand[2])
380 print "caught exception",e," in forked func",mycommand[0]
383 #os.execvp(myc,myargs)
384 os.execve(myc,myargs,env)
385 except SystemExit, e:
389 raise str(e)+":\n "+myc+" "+string.join(myargs)
390 print "func call failed"
392 # If the execve fails, we need to report it, and exit
393 # *carefully* --- report error here
396 return # should never get reached
398 # if we were logging, kill the pipes.
406 # loop through pids (typically one, unless logging), either waiting on their death, or waxing them
407 # if the main pid (mycommand) returned badly.
409 retval=os.waitpid(mypid[-1],0)[1]
411 cleanup(mypid[0:-1],block_exceptions=False)
412 # at this point we've killed all other kid pids generated via this call.
416 return process_exit_code(retval,throw_signals=raise_signals)
422 def cmd(mycmd,myexc="",env={}):
425 retval=spawn_bash(mycmd,env)
427 raise CatalystError,myexc
431 def process_exit_code(retval,throw_signals=False):
432 """process a waitpid returned exit code, returning exit code if it exit'd, or the
433 signal if it died from signalling
434 if throw_signals is on, it raises a SystemExit if the process was signaled.
435 This is intended for usage with threads, although at the moment you can't signal individual
436 threads in python, only the master thread, so it's a questionable option."""
437 if (retval & 0xff)==0:
438 return retval >> 8 # return exit code
441 #use systemexit, since portage is stupid about exception catching.
443 return (retval & 0xff) << 8 # interrupted by signal
445 def file_locate(settings,filelist,expand=1):
446 #if expand=1, non-absolute paths will be accepted and
447 # expanded to os.getcwd()+"/"+localpath if file exists
448 for myfile in filelist:
449 if myfile not in settings:
450 #filenames such as cdtar are optional, so we don't assume the variable is defined.
453 if len(settings[myfile])==0:
454 raise CatalystError, "File variable \""+myfile+"\" has a length of zero (not specified.)"
455 if settings[myfile][0]=="/":
456 if not os.path.exists(settings[myfile]):
457 raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]
458 elif expand and os.path.exists(os.getcwd()+"/"+settings[myfile]):
459 settings[myfile]=os.getcwd()+"/"+settings[myfile]
461 raise CatalystError, "Cannot locate specified "+myfile+": "+settings[myfile]+" (2nd try)"
465 The spec file format is a very simple and easy-to-use format for storing data. Here's an example
475 This file would be interpreted as defining three items: item1, item2 and item3. item1 would contain
476 the string value "value1". Item2 would contain an ordered list [ "foo", "bar", "oni" ]. item3
477 would contain an ordered list as well: [ "meep", "bark", "gleep", "moop" ]. It's important to note
478 that the order of multiple-value items is preserved, but the order that the items themselves are
479 defined are not preserved. In other words, "foo", "bar", "oni" ordering is preserved but "item1"
480 "item2" "item3" ordering is not, as the item strings are stored in a dictionary (hash).
483 def parse_makeconf(mylines):
486 pat=re.compile("([0-9a-zA-Z_]*)=(.*)")
487 while pos<len(mylines):
488 if len(mylines[pos])<=1:
492 if mylines[pos][0] in ["#"," ","\t"]:
493 #skip indented lines, comments
498 mobj=pat.match(myline)
501 clean_string = re.sub(r"\"",r"",mobj.group(2))
502 mymakeconf[mobj.group(1)]=clean_string
505 def read_makeconf(mymakeconffile):
506 if os.path.exists(mymakeconffile):
509 import snakeoil.bash #import snakeoil.fileutils
510 return snakeoil.bash.read_bash_dict(mymakeconffile, sourcing_command="source")
514 return portage.util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
518 return portage_util.getconfig(mymakeconffile, tolerant=1, allow_sourcing=True)
520 myf=open(mymakeconffile,"r")
521 mylines=myf.readlines()
523 return parse_makeconf(mylines)
525 raise CatalystError, "Could not parse make.conf file "+mymakeconffile
530 def msg(mymsg,verblevel=1):
531 if verbosity>=verblevel:
534 def pathcompare(path1,path2):
535 # Change double slashes to slash
536 path1 = re.sub(r"//",r"/",path1)
537 path2 = re.sub(r"//",r"/",path2)
538 # Removing ending slash
539 path1 = re.sub("/$","",path1)
540 path2 = re.sub("/$","",path2)
547 "enhanced to handle bind mounts"
548 if os.path.ismount(path):
551 mylines=a.readlines()
555 if pathcompare(path,mysplit[2]):
559 def addl_arg_parse(myspec,addlargs,requiredspec,validspec):
560 "helper function to help targets parse additional arguments"
561 global valid_config_file_values
564 for x in addlargs.keys():
565 if x not in validspec and x not in valid_config_file_values and x not in requiredspec:
566 messages.append("Argument \""+x+"\" not recognized.")
568 myspec[x]=addlargs[x]
570 for x in requiredspec:
572 messages.append("Required argument \""+x+"\" not specified.")
575 raise CatalystError, '\n\tAlso: '.join(messages)
582 raise CatalystError, "Could not touch "+myfile+"."
584 def countdown(secs=5, doing="Starting"):
586 print ">>> Waiting",secs,"seconds before starting..."
587 print ">>> (Control-C to abort)...\n"+doing+" in: ",
591 sys.stdout.write(str(sec+1)+" ")
596 def normpath(mypath):
598 if mypath[-1] == "/":
600 newpath = os.path.normpath(mypath)
602 if newpath[:2] == "//":
603 newpath = newpath[1:]