mpi4py_ is a Python wrapper around MPI.
-.. _mpi4py: http://mpi4py.scipy.org/http://mpi4py.scipy.org/vb>
+.. _mpi4py: http://mpi4py.scipy.org/
The MPIManager data flow is a bit complicated, so I've made a
diagram::
nodes are free (and therefore ready to receive new jobs).
"""
+import os
from Queue import Queue, Empty
import sys
from threading import Thread
try:
from mpi4py import MPI
+ _ENABLED = True
+ _DISABLING_ERROR = None
if MPI.COMM_WORLD.Get_rank() == 0:
_SKIP = ''
else:
_SKIP = ' # doctest: +SKIP'
-except ImportError, MPI_error:
- MPI = None
+except ImportError, _DISABLING_ERROR:
+ _ENABLED = False
_SKIP = ' # doctest: +SKIP'
from .. import log
def MPI_worker_death():
- if MPI == None:
+ if _ENABLED != True:
return
if MPI.COMM_WORLD.Get_rank() != 0:
sys.exit(0)
def _manager_check():
- assert MPI != None, MPI_error
+ if _ENABLED == False:
+ raise _DISABLING_ERROR
rank = MPI.COMM_WORLD.Get_rank()
assert rank == 0, (
'process %d should have been killed by an MPI_worker_death() call'
'w.run()',
'w.teardown()',
])
- if worker_pool == None:
- worker_pool = MPI.COMM_WORLD.Get_size()
- comm = MPI.COMM_SELF.Spawn(
+ if worker_pool is None:
+ worker_pool = int(os.environ.get('WORKER_POOL',
+ MPI.COMM_WORLD.Get_size()))
+ comm = MPI.COMM_SELF.Spawn( # locks with mpich2 if no mpd running
sys.executable, args=['-c', spawn_script], maxprocs=worker_pool)
rank = comm.Get_rank()
assert rank == 0, rank