bin_is = numpy.floor((data - self.bin_edges[0])/bin_width)
self.counts = []
for i in range(len(self.bin_edges)-1):
- self.counts.append(sum(bin_is == i))
+ self.counts.append(sum(bin_is == i).sum())
self.total = float(len(data)) # some data might be outside the bins
self.mean = data.mean()
self.std_dev = data.std()
'one ends).',
])
parser = OptionParser(usage, epilog=epilog)
+ parser.format_epilog = lambda formatter: epilog+'\n'
for option in sr.optparse_options:
if option.dest == 'param_string':
continue
+++ /dev/null
-A bunch of validation checks. Run automatically before allowing
-merges/commits to the main branch. The smaller, faster subset is run
-before allowing commits/merges to any branch.
-
-Since this software is being developed on a cluster, many of the tests
-take advantage of the more powerful environment. You should still be
-able to run the tests if you're on a standard machine though, it will
-just take more time :p. To allow for the differences in invocation,
-the most tests are broken out into two scripts, a X.cl to be run on a
-cluster and an X.sh to be run on a standard machine.
-
-Each test directory contains the script run_test.sh, which gives
-automated testing scripts a common file name to look for. Exporting
-the global variable ISACLUSTER allows more efficient execution on a
-cluster. This can be accomplised (in Bash) with the command
-
- $ export ISACLUSTER=1
-
-Note that it is also necessary to `touch' the testing directory after
-you add or remove a test to update bin/run-test.sh.
-
-The common directory stores code shared among the tests.
+++ /dev/null
-#!/bin/bash
-#
-# Check to make sure the PBS queue is working before going
-# whole hog on the simulations.
-
-if [ "$ISACLUSTER" -eq 1 ]
-then
- qcmd printenv
-fi
-
-exit 0