diff --git a/IPython/parallel/apps/ipclusterapp.py b/IPython/parallel/apps/ipclusterapp.py index 0927f04..cca9eed 100755 --- a/IPython/parallel/apps/ipclusterapp.py +++ b/IPython/parallel/apps/ipclusterapp.py @@ -255,11 +255,22 @@ class IPClusterEngines(BaseParallelApplication): MPIExecEngineSetLauncher : use mpiexec to launch in an MPI environment PBSEngineSetLauncher : use PBS (qsub) to submit engines to a batch queue SGEEngineSetLauncher : use SGE (qsub) to submit engines to a batch queue + LSFEngineSetLauncher : use LSF (bsub) to submit engines to a batch queue SSHEngineSetLauncher : use SSH to start the controller Note that SSH does *not* move the connection files around, so you will likely have to do this manually unless the machines are on a shared file system. WindowsHPCEngineSetLauncher : use Windows HPC + + If you are using one of IPython's builtin launchers, you can specify just the + prefix, e.g: + + c.IPClusterEngines.engine_launcher_class = 'SSH' + + or: + + ipcluster start --engines 'MPIExec' + """ ) daemonize = Bool(False, config=True, @@ -420,8 +431,19 @@ class IPClusterStart(IPClusterEngines): MPIExecControllerLauncher : use mpiexec to launch engines in an MPI universe PBSControllerLauncher : use PBS (qsub) to submit engines to a batch queue SGEControllerLauncher : use SGE (qsub) to submit engines to a batch queue + LSFControllerLauncher : use LSF (bsub) to submit engines to a batch queue SSHControllerLauncher : use SSH to start the controller WindowsHPCControllerLauncher : use Windows HPC + + If you are using one of IPython's builtin launchers, you can specify just the + prefix, e.g: + + c.IPClusterStart.controller_launcher_class = 'SSH' + + or: + + ipcluster start --controller 'MPIExec' + """ ) reset = Bool(False, config=True, diff --git a/docs/source/parallel/parallel_process.txt b/docs/source/parallel/parallel_process.txt index 93ab220..b151f61 100644 --- a/docs/source/parallel/parallel_process.txt +++ b/docs/source/parallel/parallel_process.txt @@ -141,9 +141,39 @@ Using various batch systems with :command:`ipcluster` :command:`ipcluster` has a notion of Launchers that can start controllers and engines with various remote execution schemes. Currently supported -models include :command:`ssh`, :command:`mpiexec`, PBS-style (Torque, SGE), +models include :command:`ssh`, :command:`mpiexec`, PBS-style (Torque, SGE, LSF), and Windows HPC Server. +In general, these are configured by the :attr:`IPClusterEngines.engine_set_launcher_class`, +and :attr:`IPClusterStart.controller_launcher_class` configurables, which can be the +fully specified object name (e.g. ``'IPython.parallel.apps.launcher.LocalControllerLauncher'``), +but if you are using IPython's builtin launchers, you can specify just the class name, +or even just the prefix e.g: + +.. sourcecode:: python + + c.IPClusterEngines.engine_launcher_class = 'SSH' + # equivalent to + c.IPClusterEngines.engine_launcher_class = 'SSHEngineSetLauncher' + # both of which expand to + c.IPClusterEngines.engine_launcher_class = 'IPython.parallel.apps.launcher.SSHEngineSetLauncher' + +The shortest form being of particular use on the command line, where all you need to do to +get an IPython cluster running with engines started with MPI is: + +.. sourcecode:: bash + + $> ipcluster start --engines=MPIExec + +Assuming that the default MPI config is sufficient. + +.. note:: + + shortcuts for builtin launcher names were added in 0.12, as was the ``_class`` suffix + on the configurable names. If you use the old 0.11 names (e.g. ``engine_set_launcher``), + they will still work, but you will get a deprecation warning that the name has changed. + + .. note:: The Launchers and configuration are designed in such a way that advanced @@ -170,7 +200,7 @@ There, instruct ipcluster to use the MPIExec launchers by adding the lines: .. sourcecode:: python - c.IPClusterEngines.engine_launcher = 'IPython.parallel.apps.launcher.MPIExecEngineSetLauncher' + c.IPClusterEngines.engine_launcher_class = 'MPIExecEngineSetLauncher' If the default MPI configuration is correct, then you can now start your cluster, with:: @@ -185,7 +215,7 @@ If you have a reason to also start the Controller with mpi, you can specify: .. sourcecode:: python - c.IPClusterStart.controller_launcher = 'IPython.parallel.apps.launcher.MPIExecControllerLauncher' + c.IPClusterStart.controller_launcher_class = 'MPIExecControllerLauncher' .. note:: @@ -226,10 +256,8 @@ and engines: .. sourcecode:: python - c.IPClusterStart.controller_launcher = \ - 'IPython.parallel.apps.launcher.PBSControllerLauncher' - c.IPClusterEngines.engine_launcher = \ - 'IPython.parallel.apps.launcher.PBSEngineSetLauncher' + c.IPClusterStart.controller_launcher_class = 'PBSControllerLauncher' + c.IPClusterEngines.engine_launcher_class = 'PBSEngineSetLauncher' .. note:: @@ -355,12 +383,11 @@ To use this mode, select the SSH launchers in :file:`ipcluster_config.py`: .. sourcecode:: python - c.IPClusterEngines.engine_launcher = \ - 'IPython.parallel.apps.launcher.SSHEngineSetLauncher' + c.IPClusterEngines.engine_launcher_class = 'SSHEngineSetLauncher' # and if the Controller is also to be remote: - c.IPClusterStart.controller_launcher = \ - 'IPython.parallel.apps.launcher.SSHControllerLauncher' - + c.IPClusterStart.controller_launcher_class = 'SSHControllerLauncher' + + The controller's remote location and configuration can be specified: diff --git a/docs/source/parallel/parallel_winhpc.txt b/docs/source/parallel/parallel_winhpc.txt index a19a9eb..6d140ad 100644 --- a/docs/source/parallel/parallel_winhpc.txt +++ b/docs/source/parallel/parallel_winhpc.txt @@ -232,10 +232,8 @@ will need to edit the following attributes in the file # Set these at the top of the file to tell ipcluster to use the # Windows HPC job scheduler. - c.IPClusterStart.controller_launcher = \ - 'IPython.parallel.apps.launcher.WindowsHPCControllerLauncher' - c.IPClusterEngines.engine_launcher = \ - 'IPython.parallel.apps.launcher.WindowsHPCEngineSetLauncher' + c.IPClusterStart.controller_launcher_class = 'WindowsHPCControllerLauncher' + c.IPClusterEngines.engine_launcher_class = 'WindowsHPCEngineSetLauncher' # Set these to the host name of the scheduler (head node) of your cluster. c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE'