nodes are free (and therefore ready to receive new jobs).
"""
+import os
from Queue import Queue, Empty
import sys
from threading import Thread
'w.run()',
'w.teardown()',
])
- if worker_pool == None:
- worker_pool = MPI.COMM_WORLD.Get_size()
+ if worker_pool is None:
+ worker_pool = int(os.environ.get('WORKER_POOL',
+ MPI.COMM_WORLD.Get_size()))
comm = MPI.COMM_SELF.Spawn( # locks with mpich2 if no mpd running
sys.executable, args=['-c', spawn_script], maxprocs=worker_pool)
rank = comm.Get_rank()
Process = object
_SKIP = ' # doctest: +SKIP'
+import os
from .. import log
from . import Job
self._receive_queue = Queue()
def _spawn_workers(self, worker_pool=None):
- if worker_pool == None:
- worker_pool = cpu_count() + 1
+ if worker_pool is None:
+ worker_pool = int(os.environ.get('WORKER_POOL', cpu_count() + 1))
self._manager = Manager()
self._workers = []
for i in range(worker_pool):
"""
import copy
+import os
from Queue import Queue, Empty
import threading
Increasing `worker_pool` will only help you get around IO blockin
at the cost increased time-slicing overhead.
"""
- def __init__(self, worker_pool=2):
+ def __init__(self, worker_pool=None):
super(ThreadManager, self).__init__()
self._blocked = []
self._setup_queues()
self._receive_queue = Queue()
def _spawn_workers(self, worker_pool):
+ if worker_pool is None:
+ worker_pool = int(os.environ.get('WORKER_POOL', 2))
self._workers = []
for i in range(worker_pool):
worker = WorkerThread(spawn_queue=self._spawn_queue,
.. math:: k = df/dx
and a pulling velocity
-
+unfolindg
.. math:: v = dx/dt
we have a loading rate
def bell_rate(sawsim_runner, num_domains=1, unfolding_rate=1,
unfolding_distance=1, temperature=1/kB, spring_constant=1,
- velocity=1, N=100):
+ velocity=1, N=200):
loading_rate = float(spring_constant * velocity)
rho = kB * temperature / unfolding_distance
alpha = rho * log(loading_rate / (unfolding_rate * rho))