--- /dev/null
+A bunch of validation checks. Run automatically before allowing
+merges/commits to the main branch. The smaller, faster subset is run
+before allowing commits/merges to any branch.
+
+Since this software is being developed on a cluster, many of the tests
+take advantage of the more powerful environment. You should still be
+able to run the tests if you're on a standard machine though, it will
+just take more time :p. To allow for the differences in invocation,
+the most tests are broken out into two scripts, a X.cl to be run on a
+cluster and an X.sh to be run on a standard machine.
+
+Each test directory contains the script run_test.sh, which gives
+automated testing scripts a common file name to look for. Exporting
+the global variable ISACLUSTER allows more efficient execution on a
+cluster. This can be accomplised (in Bash) with the command
+
+ $ export ISACLUSTER=1
+
+Note that it is also necessary to `touch' the testing directory after
+you add or remove a test to update bin/run-test.sh.
+
+The common directory stores code shared among the tests.
--- /dev/null
+#!/usr/bin/python
+#
+# fit data coming in on stdin to bell-theory histogram
+# output:
+# <convergence information>
+# a:\t<some number>
+# b:\t<another number>
+# N:\t<number of data points>
+# bin:\t<width of histogram bins>
+#
+# usage: fit_bell
+# for example:
+# $ cat datafile | fit_bell
+
+import sys
+sys.path.append('../common/')
+import fit
+from scipy import exp
+
+class Model (fit.Model) :
+ def printModel(self) :
+ print "bin*N*a*exp(b*x + a/b(1-exp(b*x)))"
+ def printParams(self, xopt) :
+ print "a:\t%g" % xopt[0]
+ print "b:\t%g" % xopt[1]
+ def model(self, x, params) :
+ a = params[0]
+ b = params[1]
+ return self.binwidth*self.N*a*exp(b*x + a/b*(1.-exp(b*x)))
+ def guessInitialParams(self) :
+ print "guessing initial params"
+ init = self.data[0]
+ # find the coords (Xmax,Ymax) of the peak of the histogram
+ Yarr = list(zip(*self.data)[1])
+ Xarr = list(zip(*self.data)[0])
+ Ymax = max(Yarr)
+ Xmax = Xarr[Yarr.index(Ymax)]
+ # two guessing methods for B:
+ if Ymax < 1.3*init[1] :
+ # in the first, we have a high unforced rate, and
+ # the histogram is more-or-less exponential decay.
+ print "guessing with method 1"
+ Aguess = init[1]/self.N/self.binwidth
+ i = 0
+ s = 0
+ while s < self.N/2.0 :
+ s += Yarr[i]
+ i += 1
+ print "half dead by", self.data[i]
+ Aguess = init[1]/self.N/self.binwidth
+ Bguess = 1.0/self.data[i][0]
+ else :
+ # in the second, we have a low unforced rate, and the histogram has
+ # a well defined 'bump' at some likely unfolding force
+ # h(f) = binwidth N a e^{b x + a/b[1-e^(b f)]}
+ # dh/df = h [b - a e^(b f)]
+ # so dh/df = 0 at f=fmax when
+ # b = a e^(b fmax)
+ # hmm, trancendental equation for b
+ print "Xmax = ", Xmax
+ Bguess = 1.0/Xmax
+ Aguess = init[1]/self.N/self.binwidth
+ print "guessing ", (Aguess, Bguess)
+ return (Aguess, Bguess)
+
+if __name__ == "__main__" :
+ fit.run(2, Model)
--- /dev/null
+#!/bin/bash
+#
+# Use a single hooke domain with a bell unfolding rate, so unfolding force
+# is proportional to time, and we expect a peaked death histogram.
+#
+# e.g.
+# spring constant k=df/dx=1 \__
+# velocity v=dx/dt=1 / `--> df/dt = kv = 1 --> f = t (+ f0)
+# unfolding rate K= a e^(bf) (frac/s)
+# let f0 = 0
+# let P(f) be the population existing at force f (normed so P(0)=0).
+# the probability of dying (normed # deaths) between f and f+df is given by
+# dD = K P(f) * dt = K P(f) df/kv since dt = df/kv
+# this is what whe plot in the normalized histogram with dD in a bin of width
+# df. The continuous analog is the function dD/df = K/kv P(f).
+#
+# dP/dt = - KP
+# dP/P = -K dt = -a e^{bf} df/kv = -z e^{bf} df where z ≡ a/kv
+# ln(P) = -z/b e^{bf) + c
+# P = C e^{-z/b e^{bf}}
+# Applying our f0=0 boundary condition we see that C = P0 e^{z/b}.
+# Plugging P(f) into our formula for dD/df yields
+# dD/df = K/kv P(f) = z e^{bf} P0 e^{z/b} e^{-z/b e^{bf}}
+# dD/df = P0 z e^{bf + z/b(1-e^{bf})}
+#
+# The histogram scaling isn't dD/df, though, it's dD/d(bin)
+# dD/d(bin) = dD/df df/d(bin) = binwidth P0 z e^{bf + z/b(1-e^{bf})}
+#
+# test with lots of runs of v-dx=1
+# sawsim -v1 -mhooke -a1 -kbell -K1,1 -T7.24296e22 -F1
+# ^- v=1 ^- k=1 ^-a=1 ^-1/kB
+# (kB = 1.3806503 10-23 J/K, we use T=1/kB to get the bell K=e^{-f})
+#
+# 70 runs of that takes ~1 second:
+# time xtimes 200 sawsim -v1 -mhooke -a1 -kconst -K1 -F1 > /dev/null
+#
+# usage: bell_rate.sh
+
+# since we're on the cluster, we'll go crazy and get really good statistics ;)
+N=10000
+
+SAWSIM=../../bin/sawsim
+DATA=bell_rate.d
+HIST=bell_rate.hist
+GPFILE=bell_rate.gp
+PNGFILE=bell_rate.png
+
+# set up the theory
+# dD/d(bin) = binwidth P0 z e^{bf + z/b(1-e^{bf})}
+# where z ≡ a/kv, and b = Δx/kBT
+KB=1.3806503e-23 # J/K
+STYLE="protein"
+if [ "$STYLE" == "ones" ]
+then
+ df=1e-1 # histogram bin width
+ DX=1
+ T=7.24296e22 # 1/kB
+ A=1
+ K=1
+ V=1
+else
+ df=1e-12
+ DX=1e-9
+ T=300
+ A=1e-3
+ K=0.05
+ V=1e-6
+fi
+B=`echo "print ($DX/($KB*$T))" | python`
+Z=`echo "print (float($A)/($K*$V))" | python`
+THEORY_FN="$df * $N * $Z * exp($B*x + $Z/$B *(1 - exp($B*x)))"
+
+# run the experiments
+> $DATA
+if [ "$ISACLUSTER" -eq 1 ]
+then
+ qcmd "xtimes $N $SAWSIM -v$V -mhooke -a$K -kbell -K$A,$DX -T$T -F1 | grep -v '^#' >> $DATA"
+else
+ xtimes $N $SAWSIM -v$V -mhooke -a$K -kbell -K$A,$DX -T$T -F1 | grep -v '^#' >> $DATA
+fi
+
+# histogram the data
+cat $DATA | stem_leaf -v-1 -b$df | sed 's/://' > $HIST
+
+# fit the data
+FIT=`cat $HIST | ./fit_bell`
+#echo "$FIT"
+FITZ=`echo "$FIT" | sed -n 's/a:[[:space:]]*//p'`
+FITB=`echo "$FIT" | sed -n 's/b:[[:space:]]*//p'`
+echo -e "a:\t$FITZ\t(expected $Z)"
+echo -e "b:\t$FITB\t(expected $B)"
+
+# generate gnuplot script
+echo "set terminal png" > $GPFILE
+echo "set output '$PNGFILE'" >> $GPFILE
+echo "set logscale y" >> $GPFILE
+echo "set xlabel 'Time'" >> $GPFILE
+echo "set ylabel 'Deaths'" >> $GPFILE
+echo "theory(x) = $THEORY_FN" >> $GPFILE
+echo "fit(x) = $df * $N * $FITZ * exp($FITB*x + $FITZ/$FITB*(1-exp($FITB*x)))" >> $GPFILE
+echo "plot theory(x), '$HIST' with points, fit(x)" >> $GPFILE
+
+gnuplot $GPFILE
--- /dev/null
+# -*- coding: utf-8 -*-
+
+"""Fit histogram data coming in on stdin to a user-defined model. You
+should write a python script sublcassing fit-hist.Model."""
+
+import scipy.optimize, scipy
+import sys
+
+kB=1.3806503e-23 # m²·kg/s²·K
+
+class Model (object) :
+
+ """For minimizing fit error: returns the normalized rms difference
+ between a dataset and a function y(X). In order to use this
+ class, you must sublcass it and overwrite Model.model(x,params).
+ It is recommended that you also overwrite
+ Model.guess_initial_params(), and then not pass initializing
+ params to Model.model(). This helps ensure good automation for
+ unsupervised testing. Developing good heuristics for
+ Model.guessInitialParams() is usually the hardest part writing a
+ theoretical histogram test.
+
+ For bonus points, you can override Model.printModel() and
+ Model.printParams(), to give your users an easy to understand idea
+ of what's going on. It's better to be verbose and clear.
+ Remember, this is the testing branch. It needs to be solid. Save
+ your wonderful, sneaky hacks for the main code ;).
+
+ """
+ def __init__(self, numParams, data) :
+ """Initialize the Model instance with a set of data, for
+ example: data = [(0,4),(1,3),(2,3),(4,0)]"""
+ self.data = data # an iterable filled with (x,y) pairs
+ # N (# unfolding events) = the sum of the y values (# hits per bin)
+ self.N = sum(zip(*data)[1]) # zip(* takes data to [[x0,x1,..],[y..]]
+ # assumes constant binwidth
+ self.binwidth = self.data[1][0] - self.data[0][0]
+ self.numParams = numParams
+ def nrms(self, params) :
+ """Returns the normalized root mean squagred between the data
+ and a model for a given set of parameters"""
+ s = 0.0
+ for x,y in self.data :
+ yResidual = y-self.model(x, params)
+ s += yResidual**2
+ s /= float(len(self.data))
+ return s
+ def fit(self, x0=None) :
+ """Minimize error given an initial guess vector x0, the heart of
+ the class."""
+ if x0 == None :
+ x0 = self.guessInitialParams()
+ xopt = scipy.optimize.fmin_powell(self.nrms, x0)
+ return xopt
+ def printModel(self) :
+ "OVERRIDE: Give the user some idea of what model we're fitting with"
+ print "model string not defined"
+ def printParams(self) :
+ "OVERRIDE: Give the user nicely named format for a parameter vector"
+ for i,p in zip(range(len(params)), params) :
+ print "param_%d\t=\t%g\n" % (i,p)
+ def model(self, x, params) :
+ "OVERRIDE: Implement your model"
+ assert len(params) == self.numParams
+ raise Exception, "model method not defined"
+ return 1.0
+ def guessInitialParams(self) :
+ """OVERRIDE: Provide some intelligent heuristic to pick a
+ starting point for minimizing your model"""
+ return [1.0] * self.numParams
+
+def readData(fileObj) :
+ """Read histogram data from a whitespace-seperated, two-column
+ ASCII text-file into a list of tuples"""
+ data = []
+ for line in fileObj :
+ if line[0] == "#" : continue
+ data.append([float(x) for x in line.split()])
+ return data
+
+def run(numParams, modelClass) :
+ import sys
+ data = readData(sys.stdin)
+ mod = modelClass(numParams, data)
+ popt = mod.fit()
+ mod.printModel()
+ mod.printParams(popt)
+ print "N:\t%g" % mod.N
+ print "bin:\t%g" % mod.binwidth
--- /dev/null
+#!/bin/bash
+#
+# Use a single hooke domain with a constant unfolding rate, so unfolding force
+# is proportional to time, and we expect an exponential population decay.
+#
+# e.g.
+# spring constant k=df/dx \__
+# velocity v=dx/dt / `--> df/dt = kv --> f = kvt (+ f0)
+# unfolding rate K=1 frac/s
+#
+# Population as a function of force/time follows
+# p(t) = exp(-tK) = exp(-FK/kv) = p(F)
+# so histogram of unfolding vs. force p(F) normalized to p(0)=1 should follow
+# -binwidth N0 dp/dF = binwidth N0 pK/kv = binwidth N0 K/kv exp(-FK/kv)
+#
+# test with lots of runs of
+# sawsim -v1 -mhooke -a1 -kconst -K1 -F1
+# which is not a problem, because 200 runs of that takes ~1 second:
+# time xtimes 200 sawsim -v1 -mhooke -a1 -kconst -K1 -F1 > /dev/null
+#
+# usage: const_rate.sh <velocity> <spring_constant> <unfolding_rate>
+
+if [ $# -ne 3 ]
+then
+ echo "usage: const_rate.sh <velocity> <spring_constant> <unfolding_rate>"
+ exit 1
+fi
+
+V=$1
+K=$2
+RATE=$3
+DEBUG=0
+
+# since we're on the cluster, we'll go crazy and get really good statistics ;)
+N=10000
+df=`python -c "print 0.1/$RATE"`
+
+SAWSIM=../../bin/sawsim
+DATA=const_rate_${V}_${K}_$RATE.d
+HIST=const_rate_${V}_${K}_$RATE.hist
+COMPFILE=const_rate_${V}_${K}_$RATE.compare
+GPFILE=const_rate_${V}_${K}_$RATE.gp
+PNGFILE=const_rate_${V}_${K}_$RATE.png
+
+# set up the theory
+# our predicted histogram (see notes above) is
+# hist(F) = binwidth N0 K/kv exp(-FK/kv) = A exp(F/B)
+theoryA=`python -c "print $df*$N*$RATE/float($K*$V)"`
+theoryB=`python -c "print float(-$K*$V)/$RATE"`
+THEORY_FN="$theoryA * exp(x / ($theoryB))"
+
+
+# run the experiments
+> $DATA
+if [ "$ISACLUSTER" -eq 1 ]
+then
+ if [ $DEBUG -eq 1 ]; then
+ echo "qcmd \"xtimes $N $SAWSIM -v$V -mhooke -a$K -kconst -K$RATE -F1 | grep -v '^#' >> $DATA\""
+ fi
+ qcmd "xtimes $N $SAWSIM -v$V -mhooke -a$K -kconst -K$RATE -F1 | grep -v '^#' >> $DATA"
+else
+ if [ $DEBUG -eq 1 ]; then
+ echo "xtimes $N $SAWSIM -v$V -mhooke -a$K -kconst -K$RATE -F1 | grep -v '^#' >> $DATA"
+ fi
+ xtimes $N $SAWSIM -v$V -mhooke -a$K -kconst -K$RATE -F1 | grep -v '^#' >> $DATA
+fi
+
+# histogram the data
+if [ $DEBUG -eq 1 ]; then
+ echo "cat $DATA | stem_leaf -v-1 -b$df | sed 's/://' > $HIST"
+fi
+cat $DATA | stem_leaf -v-1 -b$df | sed 's/://' > $HIST
+
+# fit the data to an exponential decay
+if [ $DEBUG -eq 1 ]; then
+ echo "cat $HIST | ./fit_exponential"
+fi
+FIT=`cat $HIST | ./fit_exponential` || exit 1
+B=`echo "$FIT" | sed -n 's/time constant:[[:space:]]*//p'`
+A=`echo "$FIT" | sed -n 's/initial value:[[:space:]]*//p'`
+echo -e "force scale:\t$B\t(expected $theoryB)" > $COMPFILE
+echo -e "initial value:\t$A\t(expected $theoryA)" >> $COMPFILE
+cat $COMPFILE
+
+# generate gnuplot script
+if [ $DEBUG -eq 1 ]; then
+ echo "generate Gnuplot script"
+fi
+echo "set terminal png" > $GPFILE
+echo "set output '$PNGFILE'" >> $GPFILE
+echo "set logscale y" >> $GPFILE
+echo "set xlabel 'Force'" >> $GPFILE
+echo "set ylabel 'Deaths'" >> $GPFILE
+echo "theory(x) = $THEORY_FN" >> $GPFILE
+echo "fit(x) = $A * exp(x / $B)" >> $GPFILE
+echo "plot theory(x), '$HIST' with points, fit(x)" >> $GPFILE
+
+if [ $DEBUG -eq 1 ]; then
+ echo "Generate PNG"
+fi
+gnuplot $GPFILE
--- /dev/null
+#!/usr/bin/python
+#
+# fit data coming in on stdin to an exponential decay
+# output:
+# <convergence information>
+# initial value:\t<some number>
+# time constant:\t<another number>
+#
+# usage: fit_exponential
+# for example:
+# $ cat datafile | fit_exponential
+
+import sys
+sys.path.append('../common/')
+import fit
+from scipy import exp
+
+class Model (fit.Model) :
+ def printModel(self) :
+ print "a*exp(x/b)"
+ def printParams(self, params) :
+ print "initial value:\t%g" % params[0]
+ print "time constant:\t%g" % params[1]
+ def model(self, x, params) :
+ a = params[0]
+ b = params[1]
+ return a*exp(x/b)
+ def guessInitialParams(self) :
+ init = self.data[0]
+ fin = self.data[-1]
+ slope = (fin[1]-init[1])/(fin[0]-init[0])
+ return (init[1],slope)
+
+if __name__ == "__main__" :
+ fit.run(2, Model)
--- /dev/null
+# velocity spring constant unfolding rate
+1 1 1
+2 1 1
+1 2 1
+1 1 2
--- /dev/null
+#!/bin/bash
+#
+# run const_rate.sh with a variety of parameters
+#
+# usage: run_test.sh
+
+while read LINE
+do
+ echo ./const_rate.sh $LINE
+ ./const_rate.sh $LINE
+done < <(cat params | grep -v '^#')
+
+exit 0
--- /dev/null
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# fit data coming in on stdin to Kramers' k(F) over a cusp.
+# k ≅ ω²/2πγ √(πβE_b) e^(-βE_b)
+# E_b(F) = ½ω²Δx² - FΔx - (½+1/ω²)F²
+# fitting gamma, omega, T, and Δx
+# output:
+# <convergence information>
+# gamma:\t<some number>
+# omega:\t<another number>
+# temp:\t<yet another number>
+# delta x:\t<a final number>
+#
+# usage: fit_kramers
+# for example:
+# $ cat datafile | fit_kramers
+
+import sys
+sys.path.append('../common/')
+import fit
+from scipy import pi, sqrt, exp
+
+kB=1.3806503e-23 # m²·kg/s²·K
+
+class Model (fit.Model) :
+ def printModel(self) :
+ print "k ≅ ω²/2πγ √(πβE_b) e^(-βE_b) with E_b(F) = ½ω²Δx² - FΔx - (½+1/ω²)F²"
+ def printParams(self, xopt) :
+ print "gamma:\t%g" % xopt[0]
+ print "omega:\t%g" % xopt[1]
+ print "temp:\t%g" % xopt[2]
+ print "delta x:\t%g" % xopt[3]
+ def model(self, x, params) :
+ F = x
+ gamma = params[0]
+ omega = params[1]
+ T = params[2]
+ dX = params[3]
+ betaEb = (0.5*(omega**2)*(dX**2) - F*dX - (0.5 + 1.0/omega**2)*F**2)/(kB*T)
+ return (omega**2/(2*pi*gamma) * sqrt(pi*betaEb) * exp(-betaEb))
+ def guessInitialParams(self) :
+ return [1, 1, 1.0/kB, 1.0]
+
+if __name__ == "__main__" :
+ fit.run(4, Model)
--- /dev/null
+#!/bin/bash
+# -*- coding: utf-8 -*-
+#
+# Test the Kramers integral k(F) implementation on the cusp potential.
+#
+# The test is escape from a cusp potential from Kramers' 1940 paper.
+# He solves the cusp potential in the text immediately after Eq. 17 & Fig. 2
+# We can translate Kramers' notation to Hänggi's more modern notation with
+# Kramers Hängii Meaning Why I think so
+# 2πω ω Curvature at minimum Hänggi's footnote to Eq. 1.5
+# η γ Viscosity/damping Compare H. Eq.1.5 with K. Eq. 25
+# Q E_b Energy barrier height K. Fig. 1 (p 291)
+# T k_B·T≡1/β Absolute temperature K: “k_B = 1” (text after Eq. 5)
+# r k Reaction rate seems more reasonable than k ;)
+# With these translations, Kramers' Eq. is
+# k ≅ ω²/2πγ √(πβE_b) e^(-βE_b)
+# for E(x) = ½ω²(|x|-Δx)².
+# The barrier is at x_b=0 with minimums at x_ac=±Δx.
+# The barrier height E_b = E(0) = ½ω²Δx²
+#
+# As a force is applied to tip the potential,
+# E(F,x) = ½ω²(|x|-Δx)²-Fx.
+# Focusing only on the area left of the barrier (x<0), we have
+# E_L(F,x) = ½ω²(-x-Δx)²-Fx = ½ω²(x+Δx)²-Fx
+# E_L'(F,x) = ω²(x+Δx)-F
+# E_L"(F,x) = ω²,
+# just like the F=0 case. Kramers' solution still holds then, but with a
+# different barrier height. The minimum moves to
+# x_a = F/ω² - Δx.
+# so
+# Eb(F) = E(F,0) - E(F,x_a) = E(0,0) - E(F,x_a)
+# = ½ω²Δx² - ½ω²[(F/ω² - Δx)+Δx]²-F(F/ω² - Δx)
+# = ½ω²Δx² - ½F² - F²/ω² - FΔx
+# = ½ω²Δx² - FΔx - (½+1/ω²)F²
+# ----+----- ^- reduction due to decreasing barrier distance
+# ^- Bell model
+#
+# Usage: kramers_rate.sh
+
+# Since we're on the cluster, we'll go crazy and get really good statistics ;)
+FMAX=1
+K_UTILS=../../bin/k_model_utils
+DATA=kramers_rate.d
+HIST=kramers_rate.hist
+GPFILE=kramers_rate.gp
+PNGFILE=kramers_rate.png
+
+# set up the physics
+# E(x) = ½ω²(|x|-Δx)²
+OMEGA=3
+GAMMA=100
+KB=1.3806503e-23 # m²·kg/s²·K
+T=`python -c "print (1 / ($KB))"` # pick units where k_B = 1
+DX=1
+EB=`python -c "print (0.5 * (($OMEGA)*($DX))**2)"`
+
+# set up the theory
+E_FN="0.5*($OMEGA)**2 * (abs(x)-($DX))**2"
+BETA=`python -c "print (1.0 / (($KB)*($T)))"`
+# k ≅ ω²/2πγ √(πβE_b) e^(-βE_b)
+BETAEB=`python -c "print (($BETA)*($EB))"`
+THEORY_K="BETAEB(F) = ($BETA)*(($EB) - (0.5+1/($OMEGA)**2)*F**2 - F*($DX))
+K(F) = ($OMEGA)**2/(2*pi*($GAMMA)) * sqrt(pi*BETAEB(F)) * exp(-BETAEB(F))"
+
+# Get the diffusion coefficient from the Einstein-Smoluchowski relation
+# D = µ_p·k_B·T = k_B·T/γ
+# where µ_p=1/γ is the particle mobility
+# See, for example,
+# http://en.wikipedia.org/wiki/Einstein_relation_(kinetic_theory)
+D=`python -c "print (1/(($BETA)*($GAMMA)))"`
+# pick safe integration bounds
+XMIN=`python -c "print (-2*($DX))"`
+XMAX="$DX"
+# set up the cubic-spline approximation to a sharp cusp… get close anyway…
+SPLINE_PTS=`python -c "from sys import stdout
+dx = (($XMAX)-($XMIN))/100.0
+stdout.write('{')
+for i in range(100) :
+ x = ($XMIN)+i*dx
+ stdout.write('(%g,%g),' % (x, ($E_FN)))
+stdout.write('(%g,%g)}\n' % (($XMAX), ($E_FN)))"`
+
+# Run the simulation
+echo "$K_UTILS -kkramers_integ -K \"$D,$SPLINE_PTS,$XMIN,$XMAX\" -x$XMIN -X$XMAX -T$T -F$FMAX > $DATA"
+$K_UTILS -kkramers_integ -K "$D,$SPLINE_PTS,$XMIN,$XMAX" -x$XMIN -X$XMAX -T$T -F$FMAX > $DATA
+
+# fit the data
+FIT=`cat $DATA | ./fit_kramers`
+FITGAMMA=`echo "$FIT" | sed -n 's/gamma:[[:space:]]*//p'`
+FITOMEGA=`echo "$FIT" | sed -n 's/omega:[[:space:]]*//p'`
+FITT=`echo "$FIT" | sed -n 's/temp:[[:space:]]*//p'`
+FITDX=`echo "$FIT" | sed -n 's/delta x:[[:space:]]*//p'`
+echo -e "gamma:\t$FITGAMMA\t(expected $GAMMA)"
+echo -e "omega:\t$FITOMEGA\t(expected $OMEGA)"
+echo -e "temp:\t$FITT\t(expected $T)"
+echo -e "delta x:\t$FITDX\t(expected $DX)"
+
+FITEB=`python -c "print (0.5 * (($FITOMEGA)*($FITDX))**2)"`
+FITE_FN="0.5*($FITOMEGA)**2 * (abs(x)-($FITDX))**2"
+FITBETA=`python -c "print (1.0 / (($KB)*($FITT)))"`
+FITBETAEB=`python -c "print (($FITBETA)*($FITEB))"`
+FIT_K="FITBETAEB(F) = ($FITBETA)*(($FITEB) - (0.5+1/($FITOMEGA)**2)*F**2 - F*($FITDX))
+fitK(F) = ($FITOMEGA)**2/(2*pi*($FITGAMMA)) * sqrt(pi*FITBETAEB(F)) * exp(-FITBETAEB(F))"
+
+# generate gnuplot script
+echo "set terminal png" > $GPFILE
+echo "set output '$PNGFILE'" >> $GPFILE
+echo "set logscale y" >> $GPFILE
+echo "set xlabel 'Time'" >> $GPFILE
+echo "set ylabel 'Deaths'" >> $GPFILE
+echo "$THEORY_K" >> $GPFILE
+echo "$FIT_K" >> $GPFILE
+echo "plot K(x), fitK(x), '$DATA' with points" >> $GPFILE
+
+gnuplot $GPFILE
--- /dev/null
+#!/bin/bash
+#
+# Check to make sure the PBS queue is working before going
+# whole hog on the simulations.
+
+if [ "$ISACLUSTER" -eq 1 ]
+then
+ qcmd printenv
+fi
+
+exit 0