##// END OF EJS Templates
parallel docs, tests, default config updated to newconfig
MinRK -
Show More
@@ -328,8 +328,8 b' class ClusterApplication(BaseIPythonApplication):'
328 328
329 329 The cluster directory is resolved as follows:
330 330
331 * If the ``--cluster-dir`` option is given, it is used.
332 * If ``--cluster-dir`` is not given, the application directory is
331 * If the ``cluster_dir`` option is given, it is used.
332 * If ``cluster_dir`` is not given, the application directory is
333 333 resolve using the profile name as ``cluster_<profile>``. The search
334 334 path for this directory is then i) cwd if it is found there
335 335 and ii) in ipython_dir otherwise.
@@ -46,8 +46,7 b' from IPython.parallel.apps.clusterdir import ('
46 46 default_config_file_name = u'ipcluster_config.py'
47 47
48 48
49 _description = """\
50 Start an IPython cluster for parallel computing.\n\n
49 _description = """Start an IPython cluster for parallel computing.
51 50
52 51 An IPython cluster consists of 1 controller and 1 or more engines.
53 52 This command automates the startup of these processes using a wide
@@ -78,7 +77,8 b' NO_CLUSTER = 12'
78 77 #-----------------------------------------------------------------------------
79 78 # Main application
80 79 #-----------------------------------------------------------------------------
81 start_help = """
80 start_help = """Start an IPython cluster for parallel computing
81
82 82 Start an ipython cluster by its profile name or cluster
83 83 directory. Cluster directories contain configuration, log and
84 84 security related files and are named using the convention
@@ -88,7 +88,8 b' the cwd or the ipython directory, you can simply refer to it'
88 88 using its profile name, 'ipcluster start n=4 profile=<profile>`,
89 89 otherwise use the 'cluster_dir' option.
90 90 """
91 stop_help = """
91 stop_help = """Stop a running IPython cluster
92
92 93 Stop a running ipython cluster by its profile name or cluster
93 94 directory. Cluster directories are named using the convention
94 95 'cluster_<profile>'. If your cluster directory is in
@@ -96,7 +97,8 b' the cwd or the ipython directory, you can simply refer to it'
96 97 using its profile name, 'ipcluster stop profile=<profile>`, otherwise
97 98 use the 'cluster_dir' option.
98 99 """
99 engines_help = """
100 engines_help = """Start engines connected to an existing IPython cluster
101
100 102 Start one or more engines to connect to an existing Cluster
101 103 by profile name or cluster directory.
102 104 Cluster directories contain configuration, log and
@@ -107,7 +109,8 b' the cwd or the ipython directory, you can simply refer to it'
107 109 using its profile name, 'ipcluster engines n=4 profile=<profile>`,
108 110 otherwise use the 'cluster_dir' option.
109 111 """
110 create_help = """
112 create_help = """Create an ipcluster profile by name
113
111 114 Create an ipython cluster directory by its profile name or
112 115 cluster directory path. Cluster directories contain
113 116 configuration, log and security related files and are named
@@ -119,7 +122,9 b' cluster directory by profile name,'
119 122 `ipcluster create profile=mycluster`, which will put the directory
120 123 in `<ipython_dir>/cluster_mycluster`.
121 124 """
122 list_help = """List all available clusters, by cluster directory, that can
125 list_help = """List available cluster profiles
126
127 List all available clusters, by cluster directory, that can
123 128 be found in the current working directly or in the ipython
124 129 directory. Cluster directories are named using the convention
125 130 'cluster_<profile>'.
@@ -79,8 +79,8 b' The IPython controller provides a gateway between the IPython engines and'
79 79 clients. The controller needs to be started before the engines and can be
80 80 configured using command line options or using a cluster directory. Cluster
81 81 directories contain config, log and security files and are usually located in
82 your ipython directory and named as "cluster_<profile>". See the --profile
83 and --cluster-dir options for details.
82 your ipython directory and named as "cluster_<profile>". See the `profile`
83 and `cluster_dir` options for details.
84 84 """
85 85
86 86
@@ -92,14 +92,16 b' and --cluster-dir options for details.'
92 92 flags = {}
93 93 flags.update(base_flags)
94 94 flags.update({
95 'usethreads' : ( {'IPControllerApp' : {'usethreads' : True}},
95 'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}},
96 96 'Use threads instead of processes for the schedulers'),
97 'sqlitedb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'}},
97 'sqlitedb' : ({'HubFactory' : Config({'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'})},
98 98 'use the SQLiteDB backend'),
99 'mongodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'}},
99 'mongodb' : ({'HubFactory' : Config({'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'})},
100 100 'use the MongoDB backend'),
101 'dictdb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.DictDB'}},
101 'dictdb' : ({'HubFactory' : Config({'db_class' : 'IPython.parallel.controller.dictdb.DictDB'})},
102 102 'use the in-memory DictDB backend'),
103 'reuse' : ({'IPControllerApp' : Config({'reuse_files' : True})},
104 'reuse existing json connection files')
103 105 })
104 106
105 107 flags.update()
@@ -133,7 +135,7 b' class IPControllerApp(ClusterApplication):'
133 135 help="import statements to be run at startup. Necessary in some environments"
134 136 )
135 137
136 usethreads = Bool(False, config=True,
138 use_threads = Bool(False, config=True,
137 139 help='Use threads instead of processes for the schedulers',
138 140 )
139 141
@@ -141,7 +143,7 b' class IPControllerApp(ClusterApplication):'
141 143 children = List()
142 144 mq_class = Unicode('zmq.devices.ProcessMonitoredQueue')
143 145
144 def _usethreads_changed(self, name, old, new):
146 def _use_threads_changed(self, name, old, new):
145 147 self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
146 148
147 149 aliases = Dict(dict(
@@ -152,7 +154,7 b' class IPControllerApp(ClusterApplication):'
152 154 reuse_files = 'IPControllerApp.reuse_files',
153 155 secure = 'IPControllerApp.secure',
154 156 ssh = 'IPControllerApp.ssh_server',
155 usethreads = 'IPControllerApp.usethreads',
157 use_threads = 'IPControllerApp.use_threads',
156 158 import_statements = 'IPControllerApp.import_statements',
157 159 location = 'IPControllerApp.location',
158 160
@@ -271,7 +273,7 b' class IPControllerApp(ClusterApplication):'
271 273 mq = import_item(str(self.mq_class))
272 274
273 275 hub = self.factory
274 # maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url
276 # maybe_inproc = 'inproc://monitor' if self.use_threads else self.monitor_url
275 277 # IOPub relay (in a Process)
276 278 q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A','iopub')
277 279 q.bind_in(hub.client_info['iopub'])
@@ -46,7 +46,7 b' from IPython.utils.traitlets import Bool, Unicode, Dict, List'
46 46 #: The default config file name for this application
47 47 default_config_file_name = u'ipengine_config.py'
48 48
49 _description = """Start an IPython engine for parallel computing.\n\n
49 _description = """Start an IPython engine for parallel computing.
50 50
51 51 IPython engines run in parallel and perform computations on behalf of a client
52 52 and controller. A controller needs to be started before the engines. The
@@ -36,14 +36,14 b' from IPython.parallel.apps.logwatcher import LogWatcher'
36 36 #: The default config file name for this application
37 37 default_config_file_name = u'iplogger_config.py'
38 38
39 _description = """Start an IPython logger for parallel computing.\n\n
39 _description = """Start an IPython logger for parallel computing.
40 40
41 41 IPython controllers and engines (and your own processes) can broadcast log messages
42 42 by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The
43 43 logger can be configured using command line options or using a cluster
44 44 directory. Cluster directories contain config, log and security files and are
45 45 usually located in your ipython directory and named as "cluster_<profile>".
46 See the --profile and --cluster-dir options for details.
46 See the `profile` and `cluster_dir` options for details.
47 47 """
48 48
49 49
@@ -141,7 +141,6 b' class EngineFactory(RegistrationFactory):'
141 141 self.kernel.start()
142 142 hb_addrs = [ disambiguate_url(addr, self.location) for addr in hb_addrs ]
143 143 heart = Heart(*map(str, hb_addrs), heart_id=identity)
144 # ioloop.DelayedCallback(heart.start, 1000, self.loop).start()
145 144 heart.start()
146 145
147 146
@@ -48,7 +48,7 b' class TestProcessLauncher(LocalProcessLauncher):'
48 48 def setup():
49 49 cp = TestProcessLauncher()
50 50 cp.cmd_and_args = ipcontroller_cmd_argv + \
51 ['--profile', 'iptest', '--log-level', '99', '-r']
51 ['profile=iptest', 'log_level=50', '--reuse']
52 52 cp.start()
53 53 launchers.append(cp)
54 54 cluster_dir = os.path.join(get_ipython_dir(), 'cluster_iptest')
@@ -70,7 +70,7 b" def add_engines(n=1, profile='iptest'):"
70 70 eps = []
71 71 for i in range(n):
72 72 ep = TestProcessLauncher()
73 ep.cmd_and_args = ipengine_cmd_argv + ['--profile', profile, '--log-level', '99']
73 ep.cmd_and_args = ipengine_cmd_argv + ['profile=%s'%profile, 'log_level=50']
74 74 ep.start()
75 75 launchers.append(ep)
76 76 eps.append(ep)
@@ -48,11 +48,11 b' class TestSession(SessionTestCase):'
48 48 self.assertTrue(s.unpack is ss.default_unpacker)
49 49 self.assertEquals(s.username, os.environ.get('USER', 'username'))
50 50
51 s = ss.StreamSession(username=None)
51 s = ss.StreamSession()
52 52 self.assertEquals(s.username, os.environ.get('USER', 'username'))
53 53
54 self.assertRaises(TypeError, ss.StreamSession, packer='hi')
55 self.assertRaises(TypeError, ss.StreamSession, unpacker='hi')
54 self.assertRaises(TypeError, ss.StreamSession, pack='hi')
55 self.assertRaises(TypeError, ss.StreamSession, unpack='hi')
56 56 u = str(uuid.uuid4())
57 57 s = ss.StreamSession(username='carrot', session=u)
58 58 self.assertEquals(s.session, u)
@@ -195,7 +195,7 b' simply start a controller and engines on a single host using the'
195 195 :command:`ipcluster` command. To start a controller and 4 engines on your
196 196 localhost, just do::
197 197
198 $ ipcluster start -n 4
198 $ ipcluster start n=4
199 199
200 200 More details about starting the IPython controller and engines can be found
201 201 :ref:`here <parallel_process>`
@@ -53,11 +53,11 b' these things to happen.'
53 53 Automatic starting using :command:`mpiexec` and :command:`ipcluster`
54 54 --------------------------------------------------------------------
55 55
56 The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`,
56 The easiest approach is to use the `MPIExec` Launchers in :command:`ipcluster`,
57 57 which will first start a controller and then a set of engines using
58 58 :command:`mpiexec`::
59 59
60 $ ipcluster mpiexec -n 4
60 $ ipcluster start n=4 elauncher=MPIExecEngineSetLauncher
61 61
62 62 This approach is best as interrupting :command:`ipcluster` will automatically
63 63 stop and clean up the controller and engines.
@@ -68,14 +68,14 b' Manual starting using :command:`mpiexec`'
68 68 If you want to start the IPython engines using the :command:`mpiexec`, just
69 69 do::
70 70
71 $ mpiexec -n 4 ipengine --mpi=mpi4py
71 $ mpiexec n=4 ipengine mpi=mpi4py
72 72
73 73 This requires that you already have a controller running and that the FURL
74 74 files for the engines are in place. We also have built in support for
75 75 PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by
76 76 starting the engines with::
77 77
78 $ mpiexec -n 4 ipengine --mpi=pytrilinos
78 $ mpiexec n=4 ipengine mpi=pytrilinos
79 79
80 80 Automatic starting using PBS and :command:`ipcluster`
81 81 ------------------------------------------------------
@@ -110,7 +110,7 b' distributed array. Save the following text in a file called :file:`psum.py`:'
110 110
111 111 Now, start an IPython cluster::
112 112
113 $ ipcluster start -p mpi -n 4
113 $ ipcluster start profile=mpi n=4
114 114
115 115 .. note::
116 116
@@ -19,7 +19,7 b' To follow along with this tutorial, you will need to start the IPython'
19 19 controller and four IPython engines. The simplest way of doing this is to use
20 20 the :command:`ipcluster` command::
21 21
22 $ ipcluster start -n 4
22 $ ipcluster start n=4
23 23
24 24 For more detailed information about starting the controller and engines, see
25 25 our :ref:`introduction <ip1par>` to using IPython for parallel computing.
@@ -57,7 +57,7 b' controller and engines in the following situations:'
57 57
58 58 1. When the controller and engines are all run on localhost. This is useful
59 59 for testing or running on a multicore computer.
60 2. When engines are started using the :command:`mpirun` command that comes
60 2. When engines are started using the :command:`mpiexec` command that comes
61 61 with most MPI [MPI]_ implementations
62 62 3. When engines are started using the PBS [PBS]_ batch system
63 63 (or other `qsub` systems, such as SGE).
@@ -80,9 +80,9 b' The simplest way to use ipcluster requires no configuration, and will'
80 80 launch a controller and a number of engines on the local machine. For instance,
81 81 to start one controller and 4 engines on localhost, just do::
82 82
83 $ ipcluster start -n 4
83 $ ipcluster start n=4
84 84
85 To see other command line options for the local mode, do::
85 To see other command line options, do::
86 86
87 87 $ ipcluster -h
88 88
@@ -92,12 +92,12 b' Configuring an IPython cluster'
92 92
93 93 Cluster configurations are stored as `profiles`. You can create a new profile with::
94 94
95 $ ipcluster create -p myprofile
95 $ ipcluster create profile=myprofile
96 96
97 97 This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it
98 98 with the default configuration files for the three IPython cluster commands. Once
99 99 you edit those files, you can continue to call ipcluster/ipcontroller/ipengine
100 with no arguments beyond ``-p myprofile``, and any configuration will be maintained.
100 with no arguments beyond ``p=myprofile``, and any configuration will be maintained.
101 101
102 102 There is no limit to the number of profiles you can have, so you can maintain a profile for each
103 103 of your common use cases. The default profile will be used whenever the
@@ -112,7 +112,8 b' Using various batch systems with :command:`ipcluster`'
112 112
113 113 :command:`ipcluster` has a notion of Launchers that can start controllers
114 114 and engines with various remote execution schemes. Currently supported
115 models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server.
115 models include :command:`ssh`, :command`mpiexec`, PBS-style (Torque, SGE),
116 and Windows HPC Server.
116 117
117 118 .. note::
118 119
@@ -132,7 +133,7 b' The mpiexec/mpirun mode is useful if you:'
132 133
133 134 If these are satisfied, you can create a new profile::
134 135
135 $ ipcluster create -p mpi
136 $ ipcluster create profile=mpi
136 137
137 138 and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`.
138 139
@@ -140,11 +141,11 b' There, instruct ipcluster to use the MPIExec launchers by adding the lines:'
140 141
141 142 .. sourcecode:: python
142 143
143 c.Global.engine_launcher = 'IPython.parallel.apps.launcher.MPIExecEngineSetLauncher'
144 c.IPClusterEnginesApp.engine_launcher = 'IPython.parallel.apps.launcher.MPIExecEngineSetLauncher'
144 145
145 146 If the default MPI configuration is correct, then you can now start your cluster, with::
146 147
147 $ ipcluster start -n 4 -p mpi
148 $ ipcluster start n=4 profile=mpi
148 149
149 150 This does the following:
150 151
@@ -155,7 +156,7 b' If you have a reason to also start the Controller with mpi, you can specify:'
155 156
156 157 .. sourcecode:: python
157 158
158 c.Global.controller_launcher = 'IPython.parallel.apps.launcher.MPIExecControllerLauncher'
159 c.IPClusterStartApp.controller_launcher = 'IPython.parallel.apps.launcher.MPIExecControllerLauncher'
159 160
160 161 .. note::
161 162
@@ -189,7 +190,7 b' The PBS mode uses the Portable Batch System [PBS]_ to start the engines.'
189 190
190 191 As usual, we will start by creating a fresh profile::
191 192
192 $ ipcluster create -p pbs
193 $ ipcluster create profile=pbs
193 194
194 195 And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller
195 196 and engines:
@@ -213,7 +214,7 b' to specify your own. Here is a sample PBS script template:'
213 214 cd $$PBS_O_WORKDIR
214 215 export PATH=$$HOME/usr/local/bin
215 216 export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages
216 /usr/local/bin/mpiexec -n ${n} ipengine --cluster_dir=${cluster_dir}
217 /usr/local/bin/mpiexec -n ${n} ipengine cluster_dir=${cluster_dir}
217 218
218 219 There are a few important points about this template:
219 220
@@ -251,7 +252,7 b' The controller template should be similar, but simpler:'
251 252 cd $$PBS_O_WORKDIR
252 253 export PATH=$$HOME/usr/local/bin
253 254 export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages
254 ipcontroller --cluster_dir=${cluster_dir}
255 ipcontroller cluster_dir=${cluster_dir}
255 256
256 257
257 258 Once you have created these scripts, save them with names like
@@ -287,7 +288,7 b' connections on all its interfaces, by adding in :file:`ipcontroller_config`:'
287 288
288 289 You can now run the cluster with::
289 290
290 $ ipcluster start -p pbs -n 128
291 $ ipcluster start profile=pbs n=128
291 292
292 293 Additional configuration options can be found in the PBS section of :file:`ipcluster_config`.
293 294
@@ -312,7 +313,7 b' nodes and :command:`ipcontroller` can be run remotely as well, or on localhost.'
312 313
313 314 As usual, we start by creating a clean profile::
314 315
315 $ ipcluster create -p ssh
316 $ ipcluster create profile= ssh
316 317
317 318 To use this mode, select the SSH launchers in :file:`ipcluster_config.py`:
318 319
@@ -334,7 +335,7 b" The controller's remote location and configuration can be specified:"
334 335 # Set the arguments to be passed to ipcontroller
335 336 # note that remotely launched ipcontroller will not get the contents of
336 337 # the local ipcontroller_config.py unless it resides on the *remote host*
337 # in the location specified by the --cluster_dir argument.
338 # in the location specified by the `cluster_dir` argument.
338 339 # c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd']
339 340
340 341 .. note::
@@ -351,7 +352,7 b' on that host.'
351 352
352 353 c.SSHEngineSetLauncher.engines = { 'host1.example.com' : 2,
353 354 'host2.example.com' : 5,
354 'host3.example.com' : (1, ['--cluster_dir', '/home/different/location']),
355 'host3.example.com' : (1, ['cluster_dir=/home/different/location']),
355 356 'host4.example.com' : 8 }
356 357
357 358 * The `engines` dict, where the keys are the host we want to run engines on and
@@ -452,10 +453,10 b' you want to unlock the door and enter your house. As with your house, you want'
452 453 to be able to create the key (or JSON file) once, and then simply use it at
453 454 any point in the future.
454 455
455 To do this, the only thing you have to do is specify the `-r` flag, so that
456 To do this, the only thing you have to do is specify the `--reuse` flag, so that
456 457 the connection information in the JSON files remains accurate::
457 458
458 $ ipcontroller -r
459 $ ipcontroller --reuse
459 460
460 461 Then, just copy the JSON files over the first time and you are set. You can
461 462 start and stop the controller and engines any many times as you want in the
@@ -24,7 +24,7 b' To follow along with this tutorial, you will need to start the IPython'
24 24 controller and four IPython engines. The simplest way of doing this is to use
25 25 the :command:`ipcluster` command::
26 26
27 $ ipcluster start -n 4
27 $ ipcluster start n=4
28 28
29 29 For more detailed information about starting the controller and engines, see
30 30 our :ref:`introduction <ip1par>` to using IPython for parallel computing.
@@ -342,17 +342,17 b' Schedulers'
342 342
343 343 There are a variety of valid ways to determine where jobs should be assigned in a
344 344 load-balancing situation. In IPython, we support several standard schemes, and
345 even make it easy to define your own. The scheme can be selected via the ``--scheme``
346 argument to :command:`ipcontroller`, or in the :attr:`HubFactory.scheme` attribute
345 even make it easy to define your own. The scheme can be selected via the ``scheme``
346 argument to :command:`ipcontroller`, or in the :attr:`TaskScheduler.schemename` attribute
347 347 of a controller config object.
348 348
349 349 The built-in routing schemes:
350 350
351 351 To select one of these schemes, simply do::
352 352
353 $ ipcontroller --scheme <schemename>
353 $ ipcontroller scheme=<schemename>
354 354 for instance:
355 $ ipcontroller --scheme lru
355 $ ipcontroller scheme=lru
356 356
357 357 lru: Least Recently Used
358 358
@@ -162,7 +162,7 b' cluster using the Windows HPC Server 2008 job scheduler. To make sure that'
162 162 to start an IPython cluster on your local host. To do this, open a Windows
163 163 Command Prompt and type the following command::
164 164
165 ipcluster start -n 2
165 ipcluster start n=2
166 166
167 167 You should see a number of messages printed to the screen, ending with
168 168 "IPython cluster: started". The result should look something like the following
@@ -179,11 +179,11 b' describe how to configure and run an IPython cluster on an actual compute'
179 179 cluster running Windows HPC Server 2008. Here is an outline of the needed
180 180 steps:
181 181
182 1. Create a cluster profile using: ``ipcluster create -p mycluster``
182 1. Create a cluster profile using: ``ipcluster create profile=mycluster``
183 183
184 184 2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster`
185 185
186 3. Start the cluster using: ``ipcluser start -p mycluster -n 32``
186 3. Start the cluster using: ``ipcluser start profile=mycluster n=32``
187 187
188 188 Creating a cluster profile
189 189 --------------------------
@@ -204,7 +204,7 b' security keys. The naming convention for cluster directories is:'
204 204 To create a new cluster profile (named "mycluster") and the associated cluster
205 205 directory, type the following command at the Windows Command Prompt::
206 206
207 ipcluster create -p mycluster
207 ipcluster create profile=mycluster
208 208
209 209 The output of this command is shown in the screenshot below. Notice how
210 210 :command:`ipcluster` prints out the location of the newly created cluster
@@ -257,7 +257,7 b' Starting the cluster profile'
257 257 Once a cluster profile has been configured, starting an IPython cluster using
258 258 the profile is simple::
259 259
260 ipcluster start -p mycluster -n 32
260 ipcluster start profile=mycluster n=32
261 261
262 262 The ``-n`` option tells :command:`ipcluster` how many engines to start (in
263 263 this case 32). Stopping the cluster is as simple as typing Control-C.
General Comments 0
You need to be logged in to leave comments. Login now