##// END OF EJS Templates
rename '--cluster' flag to '--parallel' in ProfileApp...
MinRK -
Show More
@@ -1,219 +1,219 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """
2 """
3 An application for managing IPython profiles.
3 An application for managing IPython profiles.
4
4
5 To be invoked as the `ipython profile` subcommand.
5 To be invoked as the `ipython profile` subcommand.
6
6
7 Authors:
7 Authors:
8
8
9 * Min RK
9 * Min RK
10
10
11 """
11 """
12
12
13 #-----------------------------------------------------------------------------
13 #-----------------------------------------------------------------------------
14 # Copyright (C) 2008-2011 The IPython Development Team
14 # Copyright (C) 2008-2011 The IPython Development Team
15 #
15 #
16 # Distributed under the terms of the BSD License. The full license is in
16 # Distributed under the terms of the BSD License. The full license is in
17 # the file COPYING, distributed as part of this software.
17 # the file COPYING, distributed as part of this software.
18 #-----------------------------------------------------------------------------
18 #-----------------------------------------------------------------------------
19
19
20 #-----------------------------------------------------------------------------
20 #-----------------------------------------------------------------------------
21 # Imports
21 # Imports
22 #-----------------------------------------------------------------------------
22 #-----------------------------------------------------------------------------
23
23
24 import logging
24 import logging
25 import os
25 import os
26
26
27 from IPython.config.application import Application, boolean_flag
27 from IPython.config.application import Application, boolean_flag
28 from IPython.core.application import (
28 from IPython.core.application import (
29 BaseIPythonApplication, base_flags, base_aliases
29 BaseIPythonApplication, base_flags, base_aliases
30 )
30 )
31 from IPython.core.profiledir import ProfileDir
31 from IPython.core.profiledir import ProfileDir
32 from IPython.utils.path import get_ipython_dir
32 from IPython.utils.path import get_ipython_dir
33 from IPython.utils.traitlets import Unicode, Bool, Dict
33 from IPython.utils.traitlets import Unicode, Bool, Dict
34
34
35 #-----------------------------------------------------------------------------
35 #-----------------------------------------------------------------------------
36 # Constants
36 # Constants
37 #-----------------------------------------------------------------------------
37 #-----------------------------------------------------------------------------
38
38
39 create_help = """Create an ipcluster profile by name
39 create_help = """Create an IPython profile by name
40
40
41 Create an ipython profile directory by its name or
41 Create an ipython profile directory by its name or
42 profile directory path. Profile directories contain
42 profile directory path. Profile directories contain
43 configuration, log and security related files and are named
43 configuration, log and security related files and are named
44 using the convention 'profile_<name>'. By default they are
44 using the convention 'profile_<name>'. By default they are
45 located in your ipython directory. Once created, you will
45 located in your ipython directory. Once created, you will
46 can edit the configuration files in the profile
46 can edit the configuration files in the profile
47 directory to configure IPython. Most users will create a
47 directory to configure IPython. Most users will create a
48 cluster directory by profile name,
48 profile directory by name,
49 `ipython profile create myprofile`, which will put the directory
49 `ipython profile create myprofile`, which will put the directory
50 in `<ipython_dir>/profile_myprofile`.
50 in `<ipython_dir>/profile_myprofile`.
51 """
51 """
52 list_help = """List available IPython profiles
52 list_help = """List available IPython profiles
53
53
54 List all available profiles, by profile location, that can
54 List all available profiles, by profile location, that can
55 be found in the current working directly or in the ipython
55 be found in the current working directly or in the ipython
56 directory. Profile directories are named using the convention
56 directory. Profile directories are named using the convention
57 'profile_<profile>'.
57 'profile_<profile>'.
58 """
58 """
59 profile_help = """Manage IPython profiles
59 profile_help = """Manage IPython profiles
60
60
61 Profile directories contain
61 Profile directories contain
62 configuration, log and security related files and are named
62 configuration, log and security related files and are named
63 using the convention 'profile_<name>'. By default they are
63 using the convention 'profile_<name>'. By default they are
64 located in your ipython directory. You can create profiles
64 located in your ipython directory. You can create profiles
65 with `ipython profile create <name>`, or see the profiles you
65 with `ipython profile create <name>`, or see the profiles you
66 already have with `ipython profile list`
66 already have with `ipython profile list`
67
67
68 To get started configuring IPython, simply do:
68 To get started configuring IPython, simply do:
69
69
70 $> ipython profile create
70 $> ipython profile create
71
71
72 and IPython will create the default profile in <ipython_dir>/profile_default,
72 and IPython will create the default profile in <ipython_dir>/profile_default,
73 where you can edit ipython_config.py to start configuring IPython.
73 where you can edit ipython_config.py to start configuring IPython.
74
74
75 """
75 """
76
76
77 #-----------------------------------------------------------------------------
77 #-----------------------------------------------------------------------------
78 # Profile Application Class (for `ipython profile` subcommand)
78 # Profile Application Class (for `ipython profile` subcommand)
79 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
80
80
81
81
82
82
83 class ProfileList(Application):
83 class ProfileList(Application):
84 name = u'ipython-profile'
84 name = u'ipython-profile'
85 description = list_help
85 description = list_help
86
86
87 aliases = Dict(dict(
87 aliases = Dict(dict(
88 ipython_dir = 'ProfileList.ipython_dir',
88 ipython_dir = 'ProfileList.ipython_dir',
89 log_level = 'Application.log_level',
89 log_level = 'Application.log_level',
90 ))
90 ))
91 flags = Dict(dict(
91 flags = Dict(dict(
92 debug = ({'Application' : {'log_level' : 0}},
92 debug = ({'Application' : {'log_level' : 0}},
93 "Set log_level to 0, maximizing log output."
93 "Set log_level to 0, maximizing log output."
94 )
94 )
95 ))
95 ))
96 ipython_dir = Unicode(get_ipython_dir(), config=True,
96 ipython_dir = Unicode(get_ipython_dir(), config=True,
97 help="""
97 help="""
98 The name of the IPython directory. This directory is used for logging
98 The name of the IPython directory. This directory is used for logging
99 configuration (through profiles), history storage, etc. The default
99 configuration (through profiles), history storage, etc. The default
100 is usually $HOME/.ipython. This options can also be specified through
100 is usually $HOME/.ipython. This options can also be specified through
101 the environment variable IPYTHON_DIR.
101 the environment variable IPYTHON_DIR.
102 """
102 """
103 )
103 )
104
104
105 def list_profile_dirs(self):
105 def list_profile_dirs(self):
106 # Find the search paths
106 # Find the search paths
107 paths = [os.getcwdu(), self.ipython_dir]
107 paths = [os.getcwdu(), self.ipython_dir]
108
108
109 self.log.warn('Searching for IPython profiles in paths: %r' % paths)
109 self.log.warn('Searching for IPython profiles in paths: %r' % paths)
110 for path in paths:
110 for path in paths:
111 files = os.listdir(path)
111 files = os.listdir(path)
112 for f in files:
112 for f in files:
113 full_path = os.path.join(path, f)
113 full_path = os.path.join(path, f)
114 if os.path.isdir(full_path) and f.startswith('profile_'):
114 if os.path.isdir(full_path) and f.startswith('profile_'):
115 profile = f.split('_',1)[-1]
115 profile = f.split('_',1)[-1]
116 start_cmd = 'ipython profile=%s' % profile
116 start_cmd = 'ipython profile=%s' % profile
117 print start_cmd + " ==> " + full_path
117 print start_cmd + " ==> " + full_path
118
118
119 def start(self):
119 def start(self):
120 self.list_profile_dirs()
120 self.list_profile_dirs()
121
121
122
122
123 create_flags = {}
123 create_flags = {}
124 create_flags.update(base_flags)
124 create_flags.update(base_flags)
125 create_flags.update(boolean_flag('reset', 'ProfileCreate.overwrite',
125 create_flags.update(boolean_flag('reset', 'ProfileCreate.overwrite',
126 "reset config files to defaults", "leave existing config files"))
126 "reset config files to defaults", "leave existing config files"))
127 create_flags.update(boolean_flag('cluster', 'ProfileCreate.cluster',
127 create_flags.update(boolean_flag('parallel', 'ProfileCreate.parallel',
128 "Include parallel computing config files",
128 "Include parallel computing config files",
129 "Don't include parallel computing config files"))
129 "Don't include parallel computing config files"))
130
130
131 class ProfileCreate(BaseIPythonApplication):
131 class ProfileCreate(BaseIPythonApplication):
132 name = u'ipython-profile'
132 name = u'ipython-profile'
133 description = create_help
133 description = create_help
134 auto_create = Bool(True, config=False)
134 auto_create = Bool(True, config=False)
135
135
136 def _copy_config_files_default(self):
136 def _copy_config_files_default(self):
137 return True
137 return True
138
138
139 cluster = Bool(False, config=True,
139 parallel = Bool(False, config=True,
140 help="whether to include parallel computing config files")
140 help="whether to include parallel computing config files")
141 def _cluster_changed(self, name, old, new):
141 def _parallel_changed(self, name, old, new):
142 cluster_files = [ 'ipcontroller_config.py',
142 parallel_files = [ 'ipcontroller_config.py',
143 'ipengine_config.py',
143 'ipengine_config.py',
144 'ipcluster_config.py'
144 'ipcluster_config.py'
145 ]
145 ]
146 if new:
146 if new:
147 for cf in cluster_files:
147 for cf in parallel_files:
148 self.config_files.append(cf)
148 self.config_files.append(cf)
149 else:
149 else:
150 for cf in cluster_files:
150 for cf in parallel_files:
151 if cf in self.config_files:
151 if cf in self.config_files:
152 self.config_files.remove(cf)
152 self.config_files.remove(cf)
153
153
154 def parse_command_line(self, argv):
154 def parse_command_line(self, argv):
155 super(ProfileCreate, self).parse_command_line(argv)
155 super(ProfileCreate, self).parse_command_line(argv)
156 # accept positional arg as profile name
156 # accept positional arg as profile name
157 if self.extra_args:
157 if self.extra_args:
158 self.profile = self.extra_args[0]
158 self.profile = self.extra_args[0]
159
159
160 flags = Dict(create_flags)
160 flags = Dict(create_flags)
161
161
162 aliases = Dict(dict(profile='BaseIPythonApplication.profile'))
162 aliases = Dict(dict(profile='BaseIPythonApplication.profile'))
163
163
164 classes = [ProfileDir]
164 classes = [ProfileDir]
165
165
166 def init_config_files(self):
166 def init_config_files(self):
167 super(ProfileCreate, self).init_config_files()
167 super(ProfileCreate, self).init_config_files()
168 # use local imports, since these classes may import from here
168 # use local imports, since these classes may import from here
169 from IPython.frontend.terminal.ipapp import TerminalIPythonApp
169 from IPython.frontend.terminal.ipapp import TerminalIPythonApp
170 apps = [TerminalIPythonApp]
170 apps = [TerminalIPythonApp]
171 try:
171 try:
172 from IPython.frontend.qt.console.qtconsoleapp import IPythonQtConsoleApp
172 from IPython.frontend.qt.console.qtconsoleapp import IPythonQtConsoleApp
173 except ImportError:
173 except ImportError:
174 pass
174 pass
175 else:
175 else:
176 apps.append(IPythonQtConsoleApp)
176 apps.append(IPythonQtConsoleApp)
177 if self.cluster:
177 if self.parallel:
178 from IPython.parallel.apps.ipcontrollerapp import IPControllerApp
178 from IPython.parallel.apps.ipcontrollerapp import IPControllerApp
179 from IPython.parallel.apps.ipengineapp import IPEngineApp
179 from IPython.parallel.apps.ipengineapp import IPEngineApp
180 from IPython.parallel.apps.ipclusterapp import IPClusterStart
180 from IPython.parallel.apps.ipclusterapp import IPClusterStart
181 from IPython.parallel.apps.iploggerapp import IPLoggerApp
181 from IPython.parallel.apps.iploggerapp import IPLoggerApp
182 apps.extend([
182 apps.extend([
183 IPControllerApp,
183 IPControllerApp,
184 IPEngineApp,
184 IPEngineApp,
185 IPClusterStart,
185 IPClusterStart,
186 IPLoggerApp,
186 IPLoggerApp,
187 ])
187 ])
188 for App in apps:
188 for App in apps:
189 app = App()
189 app = App()
190 app.config.update(self.config)
190 app.config.update(self.config)
191 app.log = self.log
191 app.log = self.log
192 app.overwrite = self.overwrite
192 app.overwrite = self.overwrite
193 app.copy_config_files=True
193 app.copy_config_files=True
194 app.profile = self.profile
194 app.profile = self.profile
195 app.init_profile_dir()
195 app.init_profile_dir()
196 app.init_config_files()
196 app.init_config_files()
197
197
198 def stage_default_config_file(self):
198 def stage_default_config_file(self):
199 pass
199 pass
200
200
201 class ProfileApp(Application):
201 class ProfileApp(Application):
202 name = u'ipython-profile'
202 name = u'ipython-profile'
203 description = profile_help
203 description = profile_help
204
204
205 subcommands = Dict(dict(
205 subcommands = Dict(dict(
206 create = (ProfileCreate, "Create a new profile dir with default config files"),
206 create = (ProfileCreate, "Create a new profile dir with default config files"),
207 list = (ProfileList, "List existing profiles")
207 list = (ProfileList, "List existing profiles")
208 ))
208 ))
209
209
210 def start(self):
210 def start(self):
211 if self.subapp is None:
211 if self.subapp is None:
212 print "No subcommand specified. Must specify one of: %s"%(self.subcommands.keys())
212 print "No subcommand specified. Must specify one of: %s"%(self.subcommands.keys())
213 print
213 print
214 self.print_description()
214 self.print_description()
215 self.print_subcommands()
215 self.print_subcommands()
216 self.exit(1)
216 self.exit(1)
217 else:
217 else:
218 return self.subapp.start()
218 return self.subapp.start()
219
219
@@ -1,504 +1,504 b''
1 .. _parallel_process:
1 .. _parallel_process:
2
2
3 ===========================================
3 ===========================================
4 Starting the IPython controller and engines
4 Starting the IPython controller and engines
5 ===========================================
5 ===========================================
6
6
7 To use IPython for parallel computing, you need to start one instance of
7 To use IPython for parallel computing, you need to start one instance of
8 the controller and one or more instances of the engine. The controller
8 the controller and one or more instances of the engine. The controller
9 and each engine can run on different machines or on the same machine.
9 and each engine can run on different machines or on the same machine.
10 Because of this, there are many different possibilities.
10 Because of this, there are many different possibilities.
11
11
12 Broadly speaking, there are two ways of going about starting a controller and engines:
12 Broadly speaking, there are two ways of going about starting a controller and engines:
13
13
14 * In an automated manner using the :command:`ipcluster` command.
14 * In an automated manner using the :command:`ipcluster` command.
15 * In a more manual way using the :command:`ipcontroller` and
15 * In a more manual way using the :command:`ipcontroller` and
16 :command:`ipengine` commands.
16 :command:`ipengine` commands.
17
17
18 This document describes both of these methods. We recommend that new users
18 This document describes both of these methods. We recommend that new users
19 start with the :command:`ipcluster` command as it simplifies many common usage
19 start with the :command:`ipcluster` command as it simplifies many common usage
20 cases.
20 cases.
21
21
22 General considerations
22 General considerations
23 ======================
23 ======================
24
24
25 Before delving into the details about how you can start a controller and
25 Before delving into the details about how you can start a controller and
26 engines using the various methods, we outline some of the general issues that
26 engines using the various methods, we outline some of the general issues that
27 come up when starting the controller and engines. These things come up no
27 come up when starting the controller and engines. These things come up no
28 matter which method you use to start your IPython cluster.
28 matter which method you use to start your IPython cluster.
29
29
30 Let's say that you want to start the controller on ``host0`` and engines on
30 Let's say that you want to start the controller on ``host0`` and engines on
31 hosts ``host1``-``hostn``. The following steps are then required:
31 hosts ``host1``-``hostn``. The following steps are then required:
32
32
33 1. Start the controller on ``host0`` by running :command:`ipcontroller` on
33 1. Start the controller on ``host0`` by running :command:`ipcontroller` on
34 ``host0``.
34 ``host0``.
35 2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the
35 2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the
36 controller from ``host0`` to hosts ``host1``-``hostn``.
36 controller from ``host0`` to hosts ``host1``-``hostn``.
37 3. Start the engines on hosts ``host1``-``hostn`` by running
37 3. Start the engines on hosts ``host1``-``hostn`` by running
38 :command:`ipengine`. This command has to be told where the JSON file
38 :command:`ipengine`. This command has to be told where the JSON file
39 (:file:`ipcontroller-engine.json`) is located.
39 (:file:`ipcontroller-engine.json`) is located.
40
40
41 At this point, the controller and engines will be connected. By default, the JSON files
41 At this point, the controller and engines will be connected. By default, the JSON files
42 created by the controller are put into the :file:`~/.ipython/cluster_default/security`
42 created by the controller are put into the :file:`~/.ipython/cluster_default/security`
43 directory. If the engines share a filesystem with the controller, step 2 can be skipped as
43 directory. If the engines share a filesystem with the controller, step 2 can be skipped as
44 the engines will automatically look at that location.
44 the engines will automatically look at that location.
45
45
46 The final step required to actually use the running controller from a client is to move
46 The final step required to actually use the running controller from a client is to move
47 the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients
47 the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients
48 will be run. If these file are put into the :file:`~/.ipython/cluster_default/security`
48 will be run. If these file are put into the :file:`~/.ipython/cluster_default/security`
49 directory of the client's host, they will be found automatically. Otherwise, the full path
49 directory of the client's host, they will be found automatically. Otherwise, the full path
50 to them has to be passed to the client's constructor.
50 to them has to be passed to the client's constructor.
51
51
52 Using :command:`ipcluster`
52 Using :command:`ipcluster`
53 ===========================
53 ===========================
54
54
55 The :command:`ipcluster` command provides a simple way of starting a
55 The :command:`ipcluster` command provides a simple way of starting a
56 controller and engines in the following situations:
56 controller and engines in the following situations:
57
57
58 1. When the controller and engines are all run on localhost. This is useful
58 1. When the controller and engines are all run on localhost. This is useful
59 for testing or running on a multicore computer.
59 for testing or running on a multicore computer.
60 2. When engines are started using the :command:`mpiexec` command that comes
60 2. When engines are started using the :command:`mpiexec` command that comes
61 with most MPI [MPI]_ implementations
61 with most MPI [MPI]_ implementations
62 3. When engines are started using the PBS [PBS]_ batch system
62 3. When engines are started using the PBS [PBS]_ batch system
63 (or other `qsub` systems, such as SGE).
63 (or other `qsub` systems, such as SGE).
64 4. When the controller is started on localhost and the engines are started on
64 4. When the controller is started on localhost and the engines are started on
65 remote nodes using :command:`ssh`.
65 remote nodes using :command:`ssh`.
66 5. When engines are started using the Windows HPC Server batch system.
66 5. When engines are started using the Windows HPC Server batch system.
67
67
68 .. note::
68 .. note::
69
69
70 Currently :command:`ipcluster` requires that the
70 Currently :command:`ipcluster` requires that the
71 :file:`~/.ipython/profile_<name>/security` directory live on a shared filesystem that is
71 :file:`~/.ipython/profile_<name>/security` directory live on a shared filesystem that is
72 seen by both the controller and engines. If you don't have a shared file
72 seen by both the controller and engines. If you don't have a shared file
73 system you will need to use :command:`ipcontroller` and
73 system you will need to use :command:`ipcontroller` and
74 :command:`ipengine` directly.
74 :command:`ipengine` directly.
75
75
76 Under the hood, :command:`ipcluster` just uses :command:`ipcontroller`
76 Under the hood, :command:`ipcluster` just uses :command:`ipcontroller`
77 and :command:`ipengine` to perform the steps described above.
77 and :command:`ipengine` to perform the steps described above.
78
78
79 The simplest way to use ipcluster requires no configuration, and will
79 The simplest way to use ipcluster requires no configuration, and will
80 launch a controller and a number of engines on the local machine. For instance,
80 launch a controller and a number of engines on the local machine. For instance,
81 to start one controller and 4 engines on localhost, just do::
81 to start one controller and 4 engines on localhost, just do::
82
82
83 $ ipcluster start n=4
83 $ ipcluster start n=4
84
84
85 To see other command line options, do::
85 To see other command line options, do::
86
86
87 $ ipcluster -h
87 $ ipcluster -h
88
88
89
89
90 Configuring an IPython cluster
90 Configuring an IPython cluster
91 ==============================
91 ==============================
92
92
93 Cluster configurations are stored as `profiles`. You can create a new profile with::
93 Cluster configurations are stored as `profiles`. You can create a new profile with::
94
94
95 $ ipython profile create --cluster profile=myprofile
95 $ ipython profile create --parallel profile=myprofile
96
96
97 This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it
97 This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it
98 with the default configuration files for the three IPython cluster commands. Once
98 with the default configuration files for the three IPython cluster commands. Once
99 you edit those files, you can continue to call ipcluster/ipcontroller/ipengine
99 you edit those files, you can continue to call ipcluster/ipcontroller/ipengine
100 with no arguments beyond ``p=myprofile``, and any configuration will be maintained.
100 with no arguments beyond ``p=myprofile``, and any configuration will be maintained.
101
101
102 There is no limit to the number of profiles you can have, so you can maintain a profile for each
102 There is no limit to the number of profiles you can have, so you can maintain a profile for each
103 of your common use cases. The default profile will be used whenever the
103 of your common use cases. The default profile will be used whenever the
104 profile argument is not specified, so edit :file:`IPYTHONDIR/cluster_default/*_config.py` to
104 profile argument is not specified, so edit :file:`IPYTHONDIR/cluster_default/*_config.py` to
105 represent your most common use case.
105 represent your most common use case.
106
106
107 The configuration files are loaded with commented-out settings and explanations,
107 The configuration files are loaded with commented-out settings and explanations,
108 which should cover most of the available possibilities.
108 which should cover most of the available possibilities.
109
109
110 Using various batch systems with :command:`ipcluster`
110 Using various batch systems with :command:`ipcluster`
111 ------------------------------------------------------
111 ------------------------------------------------------
112
112
113 :command:`ipcluster` has a notion of Launchers that can start controllers
113 :command:`ipcluster` has a notion of Launchers that can start controllers
114 and engines with various remote execution schemes. Currently supported
114 and engines with various remote execution schemes. Currently supported
115 models include :command:`ssh`, :command`mpiexec`, PBS-style (Torque, SGE),
115 models include :command:`ssh`, :command`mpiexec`, PBS-style (Torque, SGE),
116 and Windows HPC Server.
116 and Windows HPC Server.
117
117
118 .. note::
118 .. note::
119
119
120 The Launchers and configuration are designed in such a way that advanced
120 The Launchers and configuration are designed in such a way that advanced
121 users can subclass and configure them to fit their own system that we
121 users can subclass and configure them to fit their own system that we
122 have not yet supported (such as Condor)
122 have not yet supported (such as Condor)
123
123
124 Using :command:`ipcluster` in mpiexec/mpirun mode
124 Using :command:`ipcluster` in mpiexec/mpirun mode
125 --------------------------------------------------
125 --------------------------------------------------
126
126
127
127
128 The mpiexec/mpirun mode is useful if you:
128 The mpiexec/mpirun mode is useful if you:
129
129
130 1. Have MPI installed.
130 1. Have MPI installed.
131 2. Your systems are configured to use the :command:`mpiexec` or
131 2. Your systems are configured to use the :command:`mpiexec` or
132 :command:`mpirun` commands to start MPI processes.
132 :command:`mpirun` commands to start MPI processes.
133
133
134 If these are satisfied, you can create a new profile::
134 If these are satisfied, you can create a new profile::
135
135
136 $ ipython profile create --cluster profile=mpi
136 $ ipython profile create --parallel profile=mpi
137
137
138 and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`.
138 and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`.
139
139
140 There, instruct ipcluster to use the MPIExec launchers by adding the lines:
140 There, instruct ipcluster to use the MPIExec launchers by adding the lines:
141
141
142 .. sourcecode:: python
142 .. sourcecode:: python
143
143
144 c.IPClusterEnginesApp.engine_launcher = 'IPython.parallel.apps.launcher.MPIExecEngineSetLauncher'
144 c.IPClusterEnginesApp.engine_launcher = 'IPython.parallel.apps.launcher.MPIExecEngineSetLauncher'
145
145
146 If the default MPI configuration is correct, then you can now start your cluster, with::
146 If the default MPI configuration is correct, then you can now start your cluster, with::
147
147
148 $ ipcluster start n=4 profile=mpi
148 $ ipcluster start n=4 profile=mpi
149
149
150 This does the following:
150 This does the following:
151
151
152 1. Starts the IPython controller on current host.
152 1. Starts the IPython controller on current host.
153 2. Uses :command:`mpiexec` to start 4 engines.
153 2. Uses :command:`mpiexec` to start 4 engines.
154
154
155 If you have a reason to also start the Controller with mpi, you can specify:
155 If you have a reason to also start the Controller with mpi, you can specify:
156
156
157 .. sourcecode:: python
157 .. sourcecode:: python
158
158
159 c.IPClusterStartApp.controller_launcher = 'IPython.parallel.apps.launcher.MPIExecControllerLauncher'
159 c.IPClusterStartApp.controller_launcher = 'IPython.parallel.apps.launcher.MPIExecControllerLauncher'
160
160
161 .. note::
161 .. note::
162
162
163 The Controller *will not* be in the same MPI universe as the engines, so there is not
163 The Controller *will not* be in the same MPI universe as the engines, so there is not
164 much reason to do this unless sysadmins demand it.
164 much reason to do this unless sysadmins demand it.
165
165
166 On newer MPI implementations (such as OpenMPI), this will work even if you
166 On newer MPI implementations (such as OpenMPI), this will work even if you
167 don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI
167 don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI
168 implementations actually require each process to call :func:`MPI_Init` upon
168 implementations actually require each process to call :func:`MPI_Init` upon
169 starting. The easiest way of having this done is to install the mpi4py
169 starting. The easiest way of having this done is to install the mpi4py
170 [mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine_config.py`:
170 [mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine_config.py`:
171
171
172 .. sourcecode:: python
172 .. sourcecode:: python
173
173
174 c.MPI.use = 'mpi4py'
174 c.MPI.use = 'mpi4py'
175
175
176 Unfortunately, even this won't work for some MPI implementations. If you are
176 Unfortunately, even this won't work for some MPI implementations. If you are
177 having problems with this, you will likely have to use a custom Python
177 having problems with this, you will likely have to use a custom Python
178 executable that itself calls :func:`MPI_Init` at the appropriate time.
178 executable that itself calls :func:`MPI_Init` at the appropriate time.
179 Fortunately, mpi4py comes with such a custom Python executable that is easy to
179 Fortunately, mpi4py comes with such a custom Python executable that is easy to
180 install and use. However, this custom Python executable approach will not work
180 install and use. However, this custom Python executable approach will not work
181 with :command:`ipcluster` currently.
181 with :command:`ipcluster` currently.
182
182
183 More details on using MPI with IPython can be found :ref:`here <parallelmpi>`.
183 More details on using MPI with IPython can be found :ref:`here <parallelmpi>`.
184
184
185
185
186 Using :command:`ipcluster` in PBS mode
186 Using :command:`ipcluster` in PBS mode
187 ---------------------------------------
187 ---------------------------------------
188
188
189 The PBS mode uses the Portable Batch System [PBS]_ to start the engines.
189 The PBS mode uses the Portable Batch System [PBS]_ to start the engines.
190
190
191 As usual, we will start by creating a fresh profile::
191 As usual, we will start by creating a fresh profile::
192
192
193 $ ipython profile create --cluster profile=pbs
193 $ ipython profile create --parallel profile=pbs
194
194
195 And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller
195 And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller
196 and engines:
196 and engines:
197
197
198 .. sourcecode:: python
198 .. sourcecode:: python
199
199
200 c.Global.controller_launcher = 'IPython.parallel.apps.launcher.PBSControllerLauncher'
200 c.Global.controller_launcher = 'IPython.parallel.apps.launcher.PBSControllerLauncher'
201 c.Global.engine_launcher = 'IPython.parallel.apps.launcher.PBSEngineSetLauncher'
201 c.Global.engine_launcher = 'IPython.parallel.apps.launcher.PBSEngineSetLauncher'
202
202
203 IPython does provide simple default batch templates for PBS and SGE, but you may need
203 IPython does provide simple default batch templates for PBS and SGE, but you may need
204 to specify your own. Here is a sample PBS script template:
204 to specify your own. Here is a sample PBS script template:
205
205
206 .. sourcecode:: bash
206 .. sourcecode:: bash
207
207
208 #PBS -N ipython
208 #PBS -N ipython
209 #PBS -j oe
209 #PBS -j oe
210 #PBS -l walltime=00:10:00
210 #PBS -l walltime=00:10:00
211 #PBS -l nodes={n/4}:ppn=4
211 #PBS -l nodes={n/4}:ppn=4
212 #PBS -q {queue}
212 #PBS -q {queue}
213
213
214 cd $PBS_O_WORKDIR
214 cd $PBS_O_WORKDIR
215 export PATH=$HOME/usr/local/bin
215 export PATH=$HOME/usr/local/bin
216 export PYTHONPATH=$HOME/usr/local/lib/python2.7/site-packages
216 export PYTHONPATH=$HOME/usr/local/lib/python2.7/site-packages
217 /usr/local/bin/mpiexec -n {n} ipengine profile_dir={profile_dir}
217 /usr/local/bin/mpiexec -n {n} ipengine profile_dir={profile_dir}
218
218
219 There are a few important points about this template:
219 There are a few important points about this template:
220
220
221 1. This template will be rendered at runtime using IPython's :class:`EvalFormatter`.
221 1. This template will be rendered at runtime using IPython's :class:`EvalFormatter`.
222 This is simply a subclass of :class:`string.Formatter` that allows simple expressions
222 This is simply a subclass of :class:`string.Formatter` that allows simple expressions
223 on keys.
223 on keys.
224
224
225 2. Instead of putting in the actual number of engines, use the notation
225 2. Instead of putting in the actual number of engines, use the notation
226 ``{n}`` to indicate the number of engines to be started. You can also use
226 ``{n}`` to indicate the number of engines to be started. You can also use
227 expressions like ``{n/4}`` in the template to indicate the number of nodes.
227 expressions like ``{n/4}`` in the template to indicate the number of nodes.
228 There will always be ``{n}`` and ``{profile_dir}`` variables passed to the formatter.
228 There will always be ``{n}`` and ``{profile_dir}`` variables passed to the formatter.
229 These allow the batch system to know how many engines, and where the configuration
229 These allow the batch system to know how many engines, and where the configuration
230 files reside. The same is true for the batch queue, with the template variable
230 files reside. The same is true for the batch queue, with the template variable
231 ``{queue}``.
231 ``{queue}``.
232
232
233 3. Any options to :command:`ipengine` can be given in the batch script
233 3. Any options to :command:`ipengine` can be given in the batch script
234 template, or in :file:`ipengine_config.py`.
234 template, or in :file:`ipengine_config.py`.
235
235
236 4. Depending on the configuration of you system, you may have to set
236 4. Depending on the configuration of you system, you may have to set
237 environment variables in the script template.
237 environment variables in the script template.
238
238
239 The controller template should be similar, but simpler:
239 The controller template should be similar, but simpler:
240
240
241 .. sourcecode:: bash
241 .. sourcecode:: bash
242
242
243 #PBS -N ipython
243 #PBS -N ipython
244 #PBS -j oe
244 #PBS -j oe
245 #PBS -l walltime=00:10:00
245 #PBS -l walltime=00:10:00
246 #PBS -l nodes=1:ppn=4
246 #PBS -l nodes=1:ppn=4
247 #PBS -q {queue}
247 #PBS -q {queue}
248
248
249 cd $PBS_O_WORKDIR
249 cd $PBS_O_WORKDIR
250 export PATH=$HOME/usr/local/bin
250 export PATH=$HOME/usr/local/bin
251 export PYTHONPATH=$HOME/usr/local/lib/python2.7/site-packages
251 export PYTHONPATH=$HOME/usr/local/lib/python2.7/site-packages
252 ipcontroller profile_dir={profile_dir}
252 ipcontroller profile_dir={profile_dir}
253
253
254
254
255 Once you have created these scripts, save them with names like
255 Once you have created these scripts, save them with names like
256 :file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster_config` with:
256 :file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster_config` with:
257
257
258 .. sourcecode:: python
258 .. sourcecode:: python
259
259
260 c.PBSEngineSetLauncher.batch_template_file = "pbs.engine.template"
260 c.PBSEngineSetLauncher.batch_template_file = "pbs.engine.template"
261
261
262 c.PBSControllerLauncher.batch_template_file = "pbs.controller.template"
262 c.PBSControllerLauncher.batch_template_file = "pbs.controller.template"
263
263
264
264
265 Alternately, you can just define the templates as strings inside :file:`ipcluster_config`.
265 Alternately, you can just define the templates as strings inside :file:`ipcluster_config`.
266
266
267 Whether you are using your own templates or our defaults, the extra configurables available are
267 Whether you are using your own templates or our defaults, the extra configurables available are
268 the number of engines to launch (``{n}``, and the batch system queue to which the jobs are to be
268 the number of engines to launch (``{n}``, and the batch system queue to which the jobs are to be
269 submitted (``{queue}``)). These are configurables, and can be specified in
269 submitted (``{queue}``)). These are configurables, and can be specified in
270 :file:`ipcluster_config`:
270 :file:`ipcluster_config`:
271
271
272 .. sourcecode:: python
272 .. sourcecode:: python
273
273
274 c.PBSLauncher.queue = 'veryshort.q'
274 c.PBSLauncher.queue = 'veryshort.q'
275 c.IPClusterEnginesApp.n = 64
275 c.IPClusterEnginesApp.n = 64
276
276
277 Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior
277 Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior
278 of listening only on localhost is likely too restrictive. In this case, also assuming the
278 of listening only on localhost is likely too restrictive. In this case, also assuming the
279 nodes are safely behind a firewall, you can simply instruct the Controller to listen for
279 nodes are safely behind a firewall, you can simply instruct the Controller to listen for
280 connections on all its interfaces, by adding in :file:`ipcontroller_config`:
280 connections on all its interfaces, by adding in :file:`ipcontroller_config`:
281
281
282 .. sourcecode:: python
282 .. sourcecode:: python
283
283
284 c.RegistrationFactory.ip = '*'
284 c.RegistrationFactory.ip = '*'
285
285
286 You can now run the cluster with::
286 You can now run the cluster with::
287
287
288 $ ipcluster start profile=pbs n=128
288 $ ipcluster start profile=pbs n=128
289
289
290 Additional configuration options can be found in the PBS section of :file:`ipcluster_config`.
290 Additional configuration options can be found in the PBS section of :file:`ipcluster_config`.
291
291
292 .. note::
292 .. note::
293
293
294 Due to the flexibility of configuration, the PBS launchers work with simple changes
294 Due to the flexibility of configuration, the PBS launchers work with simple changes
295 to the template for other :command:`qsub`-using systems, such as Sun Grid Engine,
295 to the template for other :command:`qsub`-using systems, such as Sun Grid Engine,
296 and with further configuration in similar batch systems like Condor.
296 and with further configuration in similar batch systems like Condor.
297
297
298
298
299 Using :command:`ipcluster` in SSH mode
299 Using :command:`ipcluster` in SSH mode
300 ---------------------------------------
300 ---------------------------------------
301
301
302
302
303 The SSH mode uses :command:`ssh` to execute :command:`ipengine` on remote
303 The SSH mode uses :command:`ssh` to execute :command:`ipengine` on remote
304 nodes and :command:`ipcontroller` can be run remotely as well, or on localhost.
304 nodes and :command:`ipcontroller` can be run remotely as well, or on localhost.
305
305
306 .. note::
306 .. note::
307
307
308 When using this mode it highly recommended that you have set up SSH keys
308 When using this mode it highly recommended that you have set up SSH keys
309 and are using ssh-agent [SSH]_ for password-less logins.
309 and are using ssh-agent [SSH]_ for password-less logins.
310
310
311 As usual, we start by creating a clean profile::
311 As usual, we start by creating a clean profile::
312
312
313 $ ipython profile create --cluster profile=ssh
313 $ ipython profile create --parallel profile=ssh
314
314
315 To use this mode, select the SSH launchers in :file:`ipcluster_config.py`:
315 To use this mode, select the SSH launchers in :file:`ipcluster_config.py`:
316
316
317 .. sourcecode:: python
317 .. sourcecode:: python
318
318
319 c.Global.engine_launcher = 'IPython.parallel.apps.launcher.SSHEngineSetLauncher'
319 c.Global.engine_launcher = 'IPython.parallel.apps.launcher.SSHEngineSetLauncher'
320 # and if the Controller is also to be remote:
320 # and if the Controller is also to be remote:
321 c.Global.controller_launcher = 'IPython.parallel.apps.launcher.SSHControllerLauncher'
321 c.Global.controller_launcher = 'IPython.parallel.apps.launcher.SSHControllerLauncher'
322
322
323
323
324 The controller's remote location and configuration can be specified:
324 The controller's remote location and configuration can be specified:
325
325
326 .. sourcecode:: python
326 .. sourcecode:: python
327
327
328 # Set the user and hostname for the controller
328 # Set the user and hostname for the controller
329 # c.SSHControllerLauncher.hostname = 'controller.example.com'
329 # c.SSHControllerLauncher.hostname = 'controller.example.com'
330 # c.SSHControllerLauncher.user = os.environ.get('USER','username')
330 # c.SSHControllerLauncher.user = os.environ.get('USER','username')
331
331
332 # Set the arguments to be passed to ipcontroller
332 # Set the arguments to be passed to ipcontroller
333 # note that remotely launched ipcontroller will not get the contents of
333 # note that remotely launched ipcontroller will not get the contents of
334 # the local ipcontroller_config.py unless it resides on the *remote host*
334 # the local ipcontroller_config.py unless it resides on the *remote host*
335 # in the location specified by the `profile_dir` argument.
335 # in the location specified by the `profile_dir` argument.
336 # c.SSHControllerLauncher.program_args = ['--reuse', 'ip=0.0.0.0', 'profile_dir=/path/to/cd']
336 # c.SSHControllerLauncher.program_args = ['--reuse', 'ip=0.0.0.0', 'profile_dir=/path/to/cd']
337
337
338 .. note::
338 .. note::
339
339
340 SSH mode does not do any file movement, so you will need to distribute configuration
340 SSH mode does not do any file movement, so you will need to distribute configuration
341 files manually. To aid in this, the `reuse_files` flag defaults to True for ssh-launched
341 files manually. To aid in this, the `reuse_files` flag defaults to True for ssh-launched
342 Controllers, so you will only need to do this once, unless you override this flag back
342 Controllers, so you will only need to do this once, unless you override this flag back
343 to False.
343 to False.
344
344
345 Engines are specified in a dictionary, by hostname and the number of engines to be run
345 Engines are specified in a dictionary, by hostname and the number of engines to be run
346 on that host.
346 on that host.
347
347
348 .. sourcecode:: python
348 .. sourcecode:: python
349
349
350 c.SSHEngineSetLauncher.engines = { 'host1.example.com' : 2,
350 c.SSHEngineSetLauncher.engines = { 'host1.example.com' : 2,
351 'host2.example.com' : 5,
351 'host2.example.com' : 5,
352 'host3.example.com' : (1, ['profile_dir=/home/different/location']),
352 'host3.example.com' : (1, ['profile_dir=/home/different/location']),
353 'host4.example.com' : 8 }
353 'host4.example.com' : 8 }
354
354
355 * The `engines` dict, where the keys are the host we want to run engines on and
355 * The `engines` dict, where the keys are the host we want to run engines on and
356 the value is the number of engines to run on that host.
356 the value is the number of engines to run on that host.
357 * on host3, the value is a tuple, where the number of engines is first, and the arguments
357 * on host3, the value is a tuple, where the number of engines is first, and the arguments
358 to be passed to :command:`ipengine` are the second element.
358 to be passed to :command:`ipengine` are the second element.
359
359
360 For engines without explicitly specified arguments, the default arguments are set in
360 For engines without explicitly specified arguments, the default arguments are set in
361 a single location:
361 a single location:
362
362
363 .. sourcecode:: python
363 .. sourcecode:: python
364
364
365 c.SSHEngineSetLauncher.engine_args = ['profile_dir=/path/to/cluster_ssh']
365 c.SSHEngineSetLauncher.engine_args = ['profile_dir=/path/to/cluster_ssh']
366
366
367 Current limitations of the SSH mode of :command:`ipcluster` are:
367 Current limitations of the SSH mode of :command:`ipcluster` are:
368
368
369 * Untested on Windows. Would require a working :command:`ssh` on Windows.
369 * Untested on Windows. Would require a working :command:`ssh` on Windows.
370 Also, we are using shell scripts to setup and execute commands on remote
370 Also, we are using shell scripts to setup and execute commands on remote
371 hosts.
371 hosts.
372 * No file movement -
372 * No file movement -
373
373
374 Using the :command:`ipcontroller` and :command:`ipengine` commands
374 Using the :command:`ipcontroller` and :command:`ipengine` commands
375 ====================================================================
375 ====================================================================
376
376
377 It is also possible to use the :command:`ipcontroller` and :command:`ipengine`
377 It is also possible to use the :command:`ipcontroller` and :command:`ipengine`
378 commands to start your controller and engines. This approach gives you full
378 commands to start your controller and engines. This approach gives you full
379 control over all aspects of the startup process.
379 control over all aspects of the startup process.
380
380
381 Starting the controller and engine on your local machine
381 Starting the controller and engine on your local machine
382 --------------------------------------------------------
382 --------------------------------------------------------
383
383
384 To use :command:`ipcontroller` and :command:`ipengine` to start things on your
384 To use :command:`ipcontroller` and :command:`ipengine` to start things on your
385 local machine, do the following.
385 local machine, do the following.
386
386
387 First start the controller::
387 First start the controller::
388
388
389 $ ipcontroller
389 $ ipcontroller
390
390
391 Next, start however many instances of the engine you want using (repeatedly)
391 Next, start however many instances of the engine you want using (repeatedly)
392 the command::
392 the command::
393
393
394 $ ipengine
394 $ ipengine
395
395
396 The engines should start and automatically connect to the controller using the
396 The engines should start and automatically connect to the controller using the
397 JSON files in :file:`~/.ipython/cluster_default/security`. You are now ready to use the
397 JSON files in :file:`~/.ipython/cluster_default/security`. You are now ready to use the
398 controller and engines from IPython.
398 controller and engines from IPython.
399
399
400 .. warning::
400 .. warning::
401
401
402 The order of the above operations may be important. You *must*
402 The order of the above operations may be important. You *must*
403 start the controller before the engines, unless you are reusing connection
403 start the controller before the engines, unless you are reusing connection
404 information (via `-r`), in which case ordering is not important.
404 information (via `-r`), in which case ordering is not important.
405
405
406 .. note::
406 .. note::
407
407
408 On some platforms (OS X), to put the controller and engine into the
408 On some platforms (OS X), to put the controller and engine into the
409 background you may need to give these commands in the form ``(ipcontroller
409 background you may need to give these commands in the form ``(ipcontroller
410 &)`` and ``(ipengine &)`` (with the parentheses) for them to work
410 &)`` and ``(ipengine &)`` (with the parentheses) for them to work
411 properly.
411 properly.
412
412
413 Starting the controller and engines on different hosts
413 Starting the controller and engines on different hosts
414 ------------------------------------------------------
414 ------------------------------------------------------
415
415
416 When the controller and engines are running on different hosts, things are
416 When the controller and engines are running on different hosts, things are
417 slightly more complicated, but the underlying ideas are the same:
417 slightly more complicated, but the underlying ideas are the same:
418
418
419 1. Start the controller on a host using :command:`ipcontroller`.
419 1. Start the controller on a host using :command:`ipcontroller`.
420 2. Copy :file:`ipcontroller-engine.json` from :file:`~/.ipython/profile_<name>/security` on
420 2. Copy :file:`ipcontroller-engine.json` from :file:`~/.ipython/profile_<name>/security` on
421 the controller's host to the host where the engines will run.
421 the controller's host to the host where the engines will run.
422 3. Use :command:`ipengine` on the engine's hosts to start the engines.
422 3. Use :command:`ipengine` on the engine's hosts to start the engines.
423
423
424 The only thing you have to be careful of is to tell :command:`ipengine` where
424 The only thing you have to be careful of is to tell :command:`ipengine` where
425 the :file:`ipcontroller-engine.json` file is located. There are two ways you
425 the :file:`ipcontroller-engine.json` file is located. There are two ways you
426 can do this:
426 can do this:
427
427
428 * Put :file:`ipcontroller-engine.json` in the :file:`~/.ipython/profile_<name>/security`
428 * Put :file:`ipcontroller-engine.json` in the :file:`~/.ipython/profile_<name>/security`
429 directory on the engine's host, where it will be found automatically.
429 directory on the engine's host, where it will be found automatically.
430 * Call :command:`ipengine` with the ``--file=full_path_to_the_file``
430 * Call :command:`ipengine` with the ``--file=full_path_to_the_file``
431 flag.
431 flag.
432
432
433 The ``--file`` flag works like this::
433 The ``--file`` flag works like this::
434
434
435 $ ipengine --file=/path/to/my/ipcontroller-engine.json
435 $ ipengine --file=/path/to/my/ipcontroller-engine.json
436
436
437 .. note::
437 .. note::
438
438
439 If the controller's and engine's hosts all have a shared file system
439 If the controller's and engine's hosts all have a shared file system
440 (:file:`~/.ipython/profile_<name>/security` is the same on all of them), then things
440 (:file:`~/.ipython/profile_<name>/security` is the same on all of them), then things
441 will just work!
441 will just work!
442
442
443 Make JSON files persistent
443 Make JSON files persistent
444 --------------------------
444 --------------------------
445
445
446 At fist glance it may seem that that managing the JSON files is a bit
446 At fist glance it may seem that that managing the JSON files is a bit
447 annoying. Going back to the house and key analogy, copying the JSON around
447 annoying. Going back to the house and key analogy, copying the JSON around
448 each time you start the controller is like having to make a new key every time
448 each time you start the controller is like having to make a new key every time
449 you want to unlock the door and enter your house. As with your house, you want
449 you want to unlock the door and enter your house. As with your house, you want
450 to be able to create the key (or JSON file) once, and then simply use it at
450 to be able to create the key (or JSON file) once, and then simply use it at
451 any point in the future.
451 any point in the future.
452
452
453 To do this, the only thing you have to do is specify the `--reuse` flag, so that
453 To do this, the only thing you have to do is specify the `--reuse` flag, so that
454 the connection information in the JSON files remains accurate::
454 the connection information in the JSON files remains accurate::
455
455
456 $ ipcontroller --reuse
456 $ ipcontroller --reuse
457
457
458 Then, just copy the JSON files over the first time and you are set. You can
458 Then, just copy the JSON files over the first time and you are set. You can
459 start and stop the controller and engines any many times as you want in the
459 start and stop the controller and engines any many times as you want in the
460 future, just make sure to tell the controller to reuse the file.
460 future, just make sure to tell the controller to reuse the file.
461
461
462 .. note::
462 .. note::
463
463
464 You may ask the question: what ports does the controller listen on if you
464 You may ask the question: what ports does the controller listen on if you
465 don't tell is to use specific ones? The default is to use high random port
465 don't tell is to use specific ones? The default is to use high random port
466 numbers. We do this for two reasons: i) to increase security through
466 numbers. We do this for two reasons: i) to increase security through
467 obscurity and ii) to multiple controllers on a given host to start and
467 obscurity and ii) to multiple controllers on a given host to start and
468 automatically use different ports.
468 automatically use different ports.
469
469
470 Log files
470 Log files
471 ---------
471 ---------
472
472
473 All of the components of IPython have log files associated with them.
473 All of the components of IPython have log files associated with them.
474 These log files can be extremely useful in debugging problems with
474 These log files can be extremely useful in debugging problems with
475 IPython and can be found in the directory :file:`~/.ipython/profile_<name>/log`.
475 IPython and can be found in the directory :file:`~/.ipython/profile_<name>/log`.
476 Sending the log files to us will often help us to debug any problems.
476 Sending the log files to us will often help us to debug any problems.
477
477
478
478
479 Configuring `ipcontroller`
479 Configuring `ipcontroller`
480 ---------------------------
480 ---------------------------
481
481
482 Ports and addresses
482 Ports and addresses
483 *******************
483 *******************
484
484
485
485
486 Database Backend
486 Database Backend
487 ****************
487 ****************
488
488
489
489
490 .. seealso::
490 .. seealso::
491
491
492
492
493
493
494 Configuring `ipengine`
494 Configuring `ipengine`
495 -----------------------
495 -----------------------
496
496
497 .. note::
497 .. note::
498
498
499 TODO
499 TODO
500
500
501
501
502
502
503 .. [PBS] Portable Batch System. http://www.openpbs.org/
503 .. [PBS] Portable Batch System. http://www.openpbs.org/
504 .. [SSH] SSH-Agent http://en.wikipedia.org/wiki/ssh-agent
504 .. [SSH] SSH-Agent http://en.wikipedia.org/wiki/ssh-agent
@@ -1,334 +1,334 b''
1 ============================================
1 ============================================
2 Getting started with Windows HPC Server 2008
2 Getting started with Windows HPC Server 2008
3 ============================================
3 ============================================
4
4
5 .. note::
5 .. note::
6
6
7 Not adapted to zmq yet
7 Not adapted to zmq yet
8
8
9 Introduction
9 Introduction
10 ============
10 ============
11
11
12 The Python programming language is an increasingly popular language for
12 The Python programming language is an increasingly popular language for
13 numerical computing. This is due to a unique combination of factors. First,
13 numerical computing. This is due to a unique combination of factors. First,
14 Python is a high-level and *interactive* language that is well matched to
14 Python is a high-level and *interactive* language that is well matched to
15 interactive numerical work. Second, it is easy (often times trivial) to
15 interactive numerical work. Second, it is easy (often times trivial) to
16 integrate legacy C/C++/Fortran code into Python. Third, a large number of
16 integrate legacy C/C++/Fortran code into Python. Third, a large number of
17 high-quality open source projects provide all the needed building blocks for
17 high-quality open source projects provide all the needed building blocks for
18 numerical computing: numerical arrays (NumPy), algorithms (SciPy), 2D/3D
18 numerical computing: numerical arrays (NumPy), algorithms (SciPy), 2D/3D
19 Visualization (Matplotlib, Mayavi, Chaco), Symbolic Mathematics (Sage, Sympy)
19 Visualization (Matplotlib, Mayavi, Chaco), Symbolic Mathematics (Sage, Sympy)
20 and others.
20 and others.
21
21
22 The IPython project is a core part of this open-source toolchain and is
22 The IPython project is a core part of this open-source toolchain and is
23 focused on creating a comprehensive environment for interactive and
23 focused on creating a comprehensive environment for interactive and
24 exploratory computing in the Python programming language. It enables all of
24 exploratory computing in the Python programming language. It enables all of
25 the above tools to be used interactively and consists of two main components:
25 the above tools to be used interactively and consists of two main components:
26
26
27 * An enhanced interactive Python shell with support for interactive plotting
27 * An enhanced interactive Python shell with support for interactive plotting
28 and visualization.
28 and visualization.
29 * An architecture for interactive parallel computing.
29 * An architecture for interactive parallel computing.
30
30
31 With these components, it is possible to perform all aspects of a parallel
31 With these components, it is possible to perform all aspects of a parallel
32 computation interactively. This type of workflow is particularly relevant in
32 computation interactively. This type of workflow is particularly relevant in
33 scientific and numerical computing where algorithms, code and data are
33 scientific and numerical computing where algorithms, code and data are
34 continually evolving as the user/developer explores a problem. The broad
34 continually evolving as the user/developer explores a problem. The broad
35 treads in computing (commodity clusters, multicore, cloud computing, etc.)
35 treads in computing (commodity clusters, multicore, cloud computing, etc.)
36 make these capabilities of IPython particularly relevant.
36 make these capabilities of IPython particularly relevant.
37
37
38 While IPython is a cross platform tool, it has particularly strong support for
38 While IPython is a cross platform tool, it has particularly strong support for
39 Windows based compute clusters running Windows HPC Server 2008. This document
39 Windows based compute clusters running Windows HPC Server 2008. This document
40 describes how to get started with IPython on Windows HPC Server 2008. The
40 describes how to get started with IPython on Windows HPC Server 2008. The
41 content and emphasis here is practical: installing IPython, configuring
41 content and emphasis here is practical: installing IPython, configuring
42 IPython to use the Windows job scheduler and running example parallel programs
42 IPython to use the Windows job scheduler and running example parallel programs
43 interactively. A more complete description of IPython's parallel computing
43 interactively. A more complete description of IPython's parallel computing
44 capabilities can be found in IPython's online documentation
44 capabilities can be found in IPython's online documentation
45 (http://ipython.scipy.org/moin/Documentation).
45 (http://ipython.scipy.org/moin/Documentation).
46
46
47 Setting up your Windows cluster
47 Setting up your Windows cluster
48 ===============================
48 ===============================
49
49
50 This document assumes that you already have a cluster running Windows
50 This document assumes that you already have a cluster running Windows
51 HPC Server 2008. Here is a broad overview of what is involved with setting up
51 HPC Server 2008. Here is a broad overview of what is involved with setting up
52 such a cluster:
52 such a cluster:
53
53
54 1. Install Windows Server 2008 on the head and compute nodes in the cluster.
54 1. Install Windows Server 2008 on the head and compute nodes in the cluster.
55 2. Setup the network configuration on each host. Each host should have a
55 2. Setup the network configuration on each host. Each host should have a
56 static IP address.
56 static IP address.
57 3. On the head node, activate the "Active Directory Domain Services" role
57 3. On the head node, activate the "Active Directory Domain Services" role
58 and make the head node the domain controller.
58 and make the head node the domain controller.
59 4. Join the compute nodes to the newly created Active Directory (AD) domain.
59 4. Join the compute nodes to the newly created Active Directory (AD) domain.
60 5. Setup user accounts in the domain with shared home directories.
60 5. Setup user accounts in the domain with shared home directories.
61 6. Install the HPC Pack 2008 on the head node to create a cluster.
61 6. Install the HPC Pack 2008 on the head node to create a cluster.
62 7. Install the HPC Pack 2008 on the compute nodes.
62 7. Install the HPC Pack 2008 on the compute nodes.
63
63
64 More details about installing and configuring Windows HPC Server 2008 can be
64 More details about installing and configuring Windows HPC Server 2008 can be
65 found on the Windows HPC Home Page (http://www.microsoft.com/hpc). Regardless
65 found on the Windows HPC Home Page (http://www.microsoft.com/hpc). Regardless
66 of what steps you follow to set up your cluster, the remainder of this
66 of what steps you follow to set up your cluster, the remainder of this
67 document will assume that:
67 document will assume that:
68
68
69 * There are domain users that can log on to the AD domain and submit jobs
69 * There are domain users that can log on to the AD domain and submit jobs
70 to the cluster scheduler.
70 to the cluster scheduler.
71 * These domain users have shared home directories. While shared home
71 * These domain users have shared home directories. While shared home
72 directories are not required to use IPython, they make it much easier to
72 directories are not required to use IPython, they make it much easier to
73 use IPython.
73 use IPython.
74
74
75 Installation of IPython and its dependencies
75 Installation of IPython and its dependencies
76 ============================================
76 ============================================
77
77
78 IPython and all of its dependencies are freely available and open source.
78 IPython and all of its dependencies are freely available and open source.
79 These packages provide a powerful and cost-effective approach to numerical and
79 These packages provide a powerful and cost-effective approach to numerical and
80 scientific computing on Windows. The following dependencies are needed to run
80 scientific computing on Windows. The following dependencies are needed to run
81 IPython on Windows:
81 IPython on Windows:
82
82
83 * Python 2.6 or 2.7 (http://www.python.org)
83 * Python 2.6 or 2.7 (http://www.python.org)
84 * pywin32 (http://sourceforge.net/projects/pywin32/)
84 * pywin32 (http://sourceforge.net/projects/pywin32/)
85 * PyReadline (https://launchpad.net/pyreadline)
85 * PyReadline (https://launchpad.net/pyreadline)
86 * pyzmq (http://github.com/zeromq/pyzmq/downloads)
86 * pyzmq (http://github.com/zeromq/pyzmq/downloads)
87 * IPython (http://ipython.scipy.org)
87 * IPython (http://ipython.scipy.org)
88
88
89 In addition, the following dependencies are needed to run the demos described
89 In addition, the following dependencies are needed to run the demos described
90 in this document.
90 in this document.
91
91
92 * NumPy and SciPy (http://www.scipy.org)
92 * NumPy and SciPy (http://www.scipy.org)
93 * Matplotlib (http://matplotlib.sourceforge.net/)
93 * Matplotlib (http://matplotlib.sourceforge.net/)
94
94
95 The easiest way of obtaining these dependencies is through the Enthought
95 The easiest way of obtaining these dependencies is through the Enthought
96 Python Distribution (EPD) (http://www.enthought.com/products/epd.php). EPD is
96 Python Distribution (EPD) (http://www.enthought.com/products/epd.php). EPD is
97 produced by Enthought, Inc. and contains all of these packages and others in a
97 produced by Enthought, Inc. and contains all of these packages and others in a
98 single installer and is available free for academic users. While it is also
98 single installer and is available free for academic users. While it is also
99 possible to download and install each package individually, this is a tedious
99 possible to download and install each package individually, this is a tedious
100 process. Thus, we highly recommend using EPD to install these packages on
100 process. Thus, we highly recommend using EPD to install these packages on
101 Windows.
101 Windows.
102
102
103 Regardless of how you install the dependencies, here are the steps you will
103 Regardless of how you install the dependencies, here are the steps you will
104 need to follow:
104 need to follow:
105
105
106 1. Install all of the packages listed above, either individually or using EPD
106 1. Install all of the packages listed above, either individually or using EPD
107 on the head node, compute nodes and user workstations.
107 on the head node, compute nodes and user workstations.
108
108
109 2. Make sure that :file:`C:\\Python27` and :file:`C:\\Python27\\Scripts` are
109 2. Make sure that :file:`C:\\Python27` and :file:`C:\\Python27\\Scripts` are
110 in the system :envvar:`%PATH%` variable on each node.
110 in the system :envvar:`%PATH%` variable on each node.
111
111
112 3. Install the latest development version of IPython. This can be done by
112 3. Install the latest development version of IPython. This can be done by
113 downloading the the development version from the IPython website
113 downloading the the development version from the IPython website
114 (http://ipython.scipy.org) and following the installation instructions.
114 (http://ipython.scipy.org) and following the installation instructions.
115
115
116 Further details about installing IPython or its dependencies can be found in
116 Further details about installing IPython or its dependencies can be found in
117 the online IPython documentation (http://ipython.scipy.org/moin/Documentation)
117 the online IPython documentation (http://ipython.scipy.org/moin/Documentation)
118 Once you are finished with the installation, you can try IPython out by
118 Once you are finished with the installation, you can try IPython out by
119 opening a Windows Command Prompt and typing ``ipython``. This will
119 opening a Windows Command Prompt and typing ``ipython``. This will
120 start IPython's interactive shell and you should see something like the
120 start IPython's interactive shell and you should see something like the
121 following screenshot:
121 following screenshot:
122
122
123 .. image:: ipython_shell.*
123 .. image:: ipython_shell.*
124
124
125 Starting an IPython cluster
125 Starting an IPython cluster
126 ===========================
126 ===========================
127
127
128 To use IPython's parallel computing capabilities, you will need to start an
128 To use IPython's parallel computing capabilities, you will need to start an
129 IPython cluster. An IPython cluster consists of one controller and multiple
129 IPython cluster. An IPython cluster consists of one controller and multiple
130 engines:
130 engines:
131
131
132 IPython controller
132 IPython controller
133 The IPython controller manages the engines and acts as a gateway between
133 The IPython controller manages the engines and acts as a gateway between
134 the engines and the client, which runs in the user's interactive IPython
134 the engines and the client, which runs in the user's interactive IPython
135 session. The controller is started using the :command:`ipcontroller`
135 session. The controller is started using the :command:`ipcontroller`
136 command.
136 command.
137
137
138 IPython engine
138 IPython engine
139 IPython engines run a user's Python code in parallel on the compute nodes.
139 IPython engines run a user's Python code in parallel on the compute nodes.
140 Engines are starting using the :command:`ipengine` command.
140 Engines are starting using the :command:`ipengine` command.
141
141
142 Once these processes are started, a user can run Python code interactively and
142 Once these processes are started, a user can run Python code interactively and
143 in parallel on the engines from within the IPython shell using an appropriate
143 in parallel on the engines from within the IPython shell using an appropriate
144 client. This includes the ability to interact with, plot and visualize data
144 client. This includes the ability to interact with, plot and visualize data
145 from the engines.
145 from the engines.
146
146
147 IPython has a command line program called :command:`ipcluster` that automates
147 IPython has a command line program called :command:`ipcluster` that automates
148 all aspects of starting the controller and engines on the compute nodes.
148 all aspects of starting the controller and engines on the compute nodes.
149 :command:`ipcluster` has full support for the Windows HPC job scheduler,
149 :command:`ipcluster` has full support for the Windows HPC job scheduler,
150 meaning that :command:`ipcluster` can use this job scheduler to start the
150 meaning that :command:`ipcluster` can use this job scheduler to start the
151 controller and engines. In our experience, the Windows HPC job scheduler is
151 controller and engines. In our experience, the Windows HPC job scheduler is
152 particularly well suited for interactive applications, such as IPython. Once
152 particularly well suited for interactive applications, such as IPython. Once
153 :command:`ipcluster` is configured properly, a user can start an IPython
153 :command:`ipcluster` is configured properly, a user can start an IPython
154 cluster from their local workstation almost instantly, without having to log
154 cluster from their local workstation almost instantly, without having to log
155 on to the head node (as is typically required by Unix based job schedulers).
155 on to the head node (as is typically required by Unix based job schedulers).
156 This enables a user to move seamlessly between serial and parallel
156 This enables a user to move seamlessly between serial and parallel
157 computations.
157 computations.
158
158
159 In this section we show how to use :command:`ipcluster` to start an IPython
159 In this section we show how to use :command:`ipcluster` to start an IPython
160 cluster using the Windows HPC Server 2008 job scheduler. To make sure that
160 cluster using the Windows HPC Server 2008 job scheduler. To make sure that
161 :command:`ipcluster` is installed and working properly, you should first try
161 :command:`ipcluster` is installed and working properly, you should first try
162 to start an IPython cluster on your local host. To do this, open a Windows
162 to start an IPython cluster on your local host. To do this, open a Windows
163 Command Prompt and type the following command::
163 Command Prompt and type the following command::
164
164
165 ipcluster start n=2
165 ipcluster start n=2
166
166
167 You should see a number of messages printed to the screen, ending with
167 You should see a number of messages printed to the screen, ending with
168 "IPython cluster: started". The result should look something like the following
168 "IPython cluster: started". The result should look something like the following
169 screenshot:
169 screenshot:
170
170
171 .. image:: ipcluster_start.*
171 .. image:: ipcluster_start.*
172
172
173 At this point, the controller and two engines are running on your local host.
173 At this point, the controller and two engines are running on your local host.
174 This configuration is useful for testing and for situations where you want to
174 This configuration is useful for testing and for situations where you want to
175 take advantage of multiple cores on your local computer.
175 take advantage of multiple cores on your local computer.
176
176
177 Now that we have confirmed that :command:`ipcluster` is working properly, we
177 Now that we have confirmed that :command:`ipcluster` is working properly, we
178 describe how to configure and run an IPython cluster on an actual compute
178 describe how to configure and run an IPython cluster on an actual compute
179 cluster running Windows HPC Server 2008. Here is an outline of the needed
179 cluster running Windows HPC Server 2008. Here is an outline of the needed
180 steps:
180 steps:
181
181
182 1. Create a cluster profile using: ``ipython profile create --cluster profile=mycluster``
182 1. Create a cluster profile using: ``ipython profile create --parallel profile=mycluster``
183
183
184 2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster`
184 2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster`
185
185
186 3. Start the cluster using: ``ipcluser start profile=mycluster n=32``
186 3. Start the cluster using: ``ipcluser start profile=mycluster n=32``
187
187
188 Creating a cluster profile
188 Creating a cluster profile
189 --------------------------
189 --------------------------
190
190
191 In most cases, you will have to create a cluster profile to use IPython on a
191 In most cases, you will have to create a cluster profile to use IPython on a
192 cluster. A cluster profile is a name (like "mycluster") that is associated
192 cluster. A cluster profile is a name (like "mycluster") that is associated
193 with a particular cluster configuration. The profile name is used by
193 with a particular cluster configuration. The profile name is used by
194 :command:`ipcluster` when working with the cluster.
194 :command:`ipcluster` when working with the cluster.
195
195
196 Associated with each cluster profile is a cluster directory. This cluster
196 Associated with each cluster profile is a cluster directory. This cluster
197 directory is a specially named directory (typically located in the
197 directory is a specially named directory (typically located in the
198 :file:`.ipython` subdirectory of your home directory) that contains the
198 :file:`.ipython` subdirectory of your home directory) that contains the
199 configuration files for a particular cluster profile, as well as log files and
199 configuration files for a particular cluster profile, as well as log files and
200 security keys. The naming convention for cluster directories is:
200 security keys. The naming convention for cluster directories is:
201 :file:`profile_<profile name>`. Thus, the cluster directory for a profile named
201 :file:`profile_<profile name>`. Thus, the cluster directory for a profile named
202 "foo" would be :file:`.ipython\\cluster_foo`.
202 "foo" would be :file:`.ipython\\cluster_foo`.
203
203
204 To create a new cluster profile (named "mycluster") and the associated cluster
204 To create a new cluster profile (named "mycluster") and the associated cluster
205 directory, type the following command at the Windows Command Prompt::
205 directory, type the following command at the Windows Command Prompt::
206
206
207 ipython profile create --cluster profile=mycluster
207 ipython profile create --parallel profile=mycluster
208
208
209 The output of this command is shown in the screenshot below. Notice how
209 The output of this command is shown in the screenshot below. Notice how
210 :command:`ipcluster` prints out the location of the newly created cluster
210 :command:`ipcluster` prints out the location of the newly created cluster
211 directory.
211 directory.
212
212
213 .. image:: ipcluster_create.*
213 .. image:: ipcluster_create.*
214
214
215 Configuring a cluster profile
215 Configuring a cluster profile
216 -----------------------------
216 -----------------------------
217
217
218 Next, you will need to configure the newly created cluster profile by editing
218 Next, you will need to configure the newly created cluster profile by editing
219 the following configuration files in the cluster directory:
219 the following configuration files in the cluster directory:
220
220
221 * :file:`ipcluster_config.py`
221 * :file:`ipcluster_config.py`
222 * :file:`ipcontroller_config.py`
222 * :file:`ipcontroller_config.py`
223 * :file:`ipengine_config.py`
223 * :file:`ipengine_config.py`
224
224
225 When :command:`ipcluster` is run, these configuration files are used to
225 When :command:`ipcluster` is run, these configuration files are used to
226 determine how the engines and controller will be started. In most cases,
226 determine how the engines and controller will be started. In most cases,
227 you will only have to set a few of the attributes in these files.
227 you will only have to set a few of the attributes in these files.
228
228
229 To configure :command:`ipcluster` to use the Windows HPC job scheduler, you
229 To configure :command:`ipcluster` to use the Windows HPC job scheduler, you
230 will need to edit the following attributes in the file
230 will need to edit the following attributes in the file
231 :file:`ipcluster_config.py`::
231 :file:`ipcluster_config.py`::
232
232
233 # Set these at the top of the file to tell ipcluster to use the
233 # Set these at the top of the file to tell ipcluster to use the
234 # Windows HPC job scheduler.
234 # Windows HPC job scheduler.
235 c.Global.controller_launcher = \
235 c.Global.controller_launcher = \
236 'IPython.parallel.apps.launcher.WindowsHPCControllerLauncher'
236 'IPython.parallel.apps.launcher.WindowsHPCControllerLauncher'
237 c.Global.engine_launcher = \
237 c.Global.engine_launcher = \
238 'IPython.parallel.apps.launcher.WindowsHPCEngineSetLauncher'
238 'IPython.parallel.apps.launcher.WindowsHPCEngineSetLauncher'
239
239
240 # Set these to the host name of the scheduler (head node) of your cluster.
240 # Set these to the host name of the scheduler (head node) of your cluster.
241 c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE'
241 c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE'
242 c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE'
242 c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE'
243
243
244 There are a number of other configuration attributes that can be set, but
244 There are a number of other configuration attributes that can be set, but
245 in most cases these will be sufficient to get you started.
245 in most cases these will be sufficient to get you started.
246
246
247 .. warning::
247 .. warning::
248 If any of your configuration attributes involve specifying the location
248 If any of your configuration attributes involve specifying the location
249 of shared directories or files, you must make sure that you use UNC paths
249 of shared directories or files, you must make sure that you use UNC paths
250 like :file:`\\\\host\\share`. It is also important that you specify
250 like :file:`\\\\host\\share`. It is also important that you specify
251 these paths using raw Python strings: ``r'\\host\share'`` to make sure
251 these paths using raw Python strings: ``r'\\host\share'`` to make sure
252 that the backslashes are properly escaped.
252 that the backslashes are properly escaped.
253
253
254 Starting the cluster profile
254 Starting the cluster profile
255 ----------------------------
255 ----------------------------
256
256
257 Once a cluster profile has been configured, starting an IPython cluster using
257 Once a cluster profile has been configured, starting an IPython cluster using
258 the profile is simple::
258 the profile is simple::
259
259
260 ipcluster start profile=mycluster n=32
260 ipcluster start profile=mycluster n=32
261
261
262 The ``-n`` option tells :command:`ipcluster` how many engines to start (in
262 The ``-n`` option tells :command:`ipcluster` how many engines to start (in
263 this case 32). Stopping the cluster is as simple as typing Control-C.
263 this case 32). Stopping the cluster is as simple as typing Control-C.
264
264
265 Using the HPC Job Manager
265 Using the HPC Job Manager
266 -------------------------
266 -------------------------
267
267
268 When ``ipcluster start`` is run the first time, :command:`ipcluster` creates
268 When ``ipcluster start`` is run the first time, :command:`ipcluster` creates
269 two XML job description files in the cluster directory:
269 two XML job description files in the cluster directory:
270
270
271 * :file:`ipcontroller_job.xml`
271 * :file:`ipcontroller_job.xml`
272 * :file:`ipengineset_job.xml`
272 * :file:`ipengineset_job.xml`
273
273
274 Once these files have been created, they can be imported into the HPC Job
274 Once these files have been created, they can be imported into the HPC Job
275 Manager application. Then, the controller and engines for that profile can be
275 Manager application. Then, the controller and engines for that profile can be
276 started using the HPC Job Manager directly, without using :command:`ipcluster`.
276 started using the HPC Job Manager directly, without using :command:`ipcluster`.
277 However, anytime the cluster profile is re-configured, ``ipcluster start``
277 However, anytime the cluster profile is re-configured, ``ipcluster start``
278 must be run again to regenerate the XML job description files. The
278 must be run again to regenerate the XML job description files. The
279 following screenshot shows what the HPC Job Manager interface looks like
279 following screenshot shows what the HPC Job Manager interface looks like
280 with a running IPython cluster.
280 with a running IPython cluster.
281
281
282 .. image:: hpc_job_manager.*
282 .. image:: hpc_job_manager.*
283
283
284 Performing a simple interactive parallel computation
284 Performing a simple interactive parallel computation
285 ====================================================
285 ====================================================
286
286
287 Once you have started your IPython cluster, you can start to use it. To do
287 Once you have started your IPython cluster, you can start to use it. To do
288 this, open up a new Windows Command Prompt and start up IPython's interactive
288 this, open up a new Windows Command Prompt and start up IPython's interactive
289 shell by typing::
289 shell by typing::
290
290
291 ipython
291 ipython
292
292
293 Then you can create a :class:`MultiEngineClient` instance for your profile and
293 Then you can create a :class:`MultiEngineClient` instance for your profile and
294 use the resulting instance to do a simple interactive parallel computation. In
294 use the resulting instance to do a simple interactive parallel computation. In
295 the code and screenshot that follows, we take a simple Python function and
295 the code and screenshot that follows, we take a simple Python function and
296 apply it to each element of an array of integers in parallel using the
296 apply it to each element of an array of integers in parallel using the
297 :meth:`MultiEngineClient.map` method:
297 :meth:`MultiEngineClient.map` method:
298
298
299 .. sourcecode:: ipython
299 .. sourcecode:: ipython
300
300
301 In [1]: from IPython.parallel import *
301 In [1]: from IPython.parallel import *
302
302
303 In [2]: c = MultiEngineClient(profile='mycluster')
303 In [2]: c = MultiEngineClient(profile='mycluster')
304
304
305 In [3]: mec.get_ids()
305 In [3]: mec.get_ids()
306 Out[3]: [0, 1, 2, 3, 4, 5, 67, 8, 9, 10, 11, 12, 13, 14]
306 Out[3]: [0, 1, 2, 3, 4, 5, 67, 8, 9, 10, 11, 12, 13, 14]
307
307
308 In [4]: def f(x):
308 In [4]: def f(x):
309 ...: return x**10
309 ...: return x**10
310
310
311 In [5]: mec.map(f, range(15)) # f is applied in parallel
311 In [5]: mec.map(f, range(15)) # f is applied in parallel
312 Out[5]:
312 Out[5]:
313 [0,
313 [0,
314 1,
314 1,
315 1024,
315 1024,
316 59049,
316 59049,
317 1048576,
317 1048576,
318 9765625,
318 9765625,
319 60466176,
319 60466176,
320 282475249,
320 282475249,
321 1073741824,
321 1073741824,
322 3486784401L,
322 3486784401L,
323 10000000000L,
323 10000000000L,
324 25937424601L,
324 25937424601L,
325 61917364224L,
325 61917364224L,
326 137858491849L,
326 137858491849L,
327 289254654976L]
327 289254654976L]
328
328
329 The :meth:`map` method has the same signature as Python's builtin :func:`map`
329 The :meth:`map` method has the same signature as Python's builtin :func:`map`
330 function, but runs the calculation in parallel. More involved examples of using
330 function, but runs the calculation in parallel. More involved examples of using
331 :class:`MultiEngineClient` are provided in the examples that follow.
331 :class:`MultiEngineClient` are provided in the examples that follow.
332
332
333 .. image:: mec_simple.*
333 .. image:: mec_simple.*
334
334
General Comments 0
You need to be logged in to leave comments. Login now