Show More
@@ -11,8 +11,8 b' c = get_config()' | |||
|
11 | 11 | # - Start as a regular process on localhost. |
|
12 | 12 | # - Start using mpiexec. |
|
13 | 13 | # - Start using the Windows HPC Server 2008 scheduler |
|
14 | # - Start using PBS | |
|
15 |
# - Start using SSH |
|
|
14 | # - Start using PBS/SGE | |
|
15 | # - Start using SSH | |
|
16 | 16 | |
|
17 | 17 | |
|
18 | 18 | # The selected launchers can be configured below. |
@@ -21,15 +21,18 b' c = get_config()' | |||
|
21 | 21 | # - LocalControllerLauncher |
|
22 | 22 | # - MPIExecControllerLauncher |
|
23 | 23 | # - PBSControllerLauncher |
|
24 | # - SGEControllerLauncher | |
|
24 | 25 | # - WindowsHPCControllerLauncher |
|
25 |
# c.Global.controller_launcher = 'IPython. |
|
|
26 | # c.Global.controller_launcher = 'IPython.parallel.launcher.LocalControllerLauncher' | |
|
27 | # c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher' | |
|
26 | 28 | |
|
27 | 29 | # Options are: |
|
28 | 30 | # - LocalEngineSetLauncher |
|
29 | 31 | # - MPIExecEngineSetLauncher |
|
30 | 32 | # - PBSEngineSetLauncher |
|
33 | # - SGEEngineSetLauncher | |
|
31 | 34 | # - WindowsHPCEngineSetLauncher |
|
32 |
# c.Global.engine_launcher = 'IPython. |
|
|
35 | # c.Global.engine_launcher = 'IPython.parallel.launcher.LocalEngineSetLauncher' | |
|
33 | 36 | |
|
34 | 37 | #----------------------------------------------------------------------------- |
|
35 | 38 | # Global configuration |
@@ -68,23 +71,23 b' c = get_config()' | |||
|
68 | 71 | # MPIExec launchers |
|
69 | 72 | #----------------------------------------------------------------------------- |
|
70 | 73 | |
|
71 |
# The mpiexec/mpirun command to use in |
|
|
72 |
# c.MPIExec |
|
|
74 | # The mpiexec/mpirun command to use in both the controller and engines. | |
|
75 | # c.MPIExecLauncher.mpi_cmd = ['mpiexec'] | |
|
73 | 76 | |
|
74 | 77 | # Additional arguments to pass to the actual mpiexec command. |
|
78 | # c.MPIExecLauncher.mpi_args = [] | |
|
79 | ||
|
80 | # The mpiexec/mpirun command and args can be overridden if they should be different | |
|
81 | # for controller and engines. | |
|
82 | # c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec'] | |
|
75 | 83 | # c.MPIExecControllerLauncher.mpi_args = [] |
|
84 | # c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec'] | |
|
85 | # c.MPIExecEngineSetLauncher.mpi_args = [] | |
|
76 | 86 | |
|
77 | 87 | # The command line argument to call the controller with. |
|
78 | 88 | # c.MPIExecControllerLauncher.controller_args = \ |
|
79 | 89 | # ['--log-to-file','--log-level', '40'] |
|
80 | 90 | |
|
81 | ||
|
82 | # The mpiexec/mpirun command to use in started the controller. | |
|
83 | # c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec'] | |
|
84 | ||
|
85 | # Additional arguments to pass to the actual mpiexec command. | |
|
86 | # c.MPIExecEngineSetLauncher.mpi_args = [] | |
|
87 | ||
|
88 | 91 | # Command line argument passed to the engines. |
|
89 | 92 | # c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40'] |
|
90 | 93 | |
@@ -95,51 +98,105 b' c = get_config()' | |||
|
95 | 98 | # SSH launchers |
|
96 | 99 | #----------------------------------------------------------------------------- |
|
97 | 100 | |
|
98 | # Todo | |
|
101 | # ipclusterz can be used to launch controller and engines remotely via ssh. | |
|
102 | # Note that currently ipclusterz does not do any file distribution, so if | |
|
103 | # machines are not on a shared filesystem, config and json files must be | |
|
104 | # distributed. For this reason, the reuse_files defaults to True on an | |
|
105 | # ssh-launched Controller. This flag can be overridded by the program_args | |
|
106 | # attribute of c.SSHControllerLauncher. | |
|
107 | ||
|
108 | # set the ssh cmd for launching remote commands. The default is ['ssh'] | |
|
109 | # c.SSHLauncher.ssh_cmd = ['ssh'] | |
|
110 | ||
|
111 | # set the ssh cmd for launching remote commands. The default is ['ssh'] | |
|
112 | # c.SSHLauncher.ssh_args = ['tt'] | |
|
113 | ||
|
114 | # Set the user and hostname for the controller | |
|
115 | # c.SSHControllerLauncher.hostname = 'controller.example.com' | |
|
116 | # c.SSHControllerLauncher.user = os.environ.get('USER','username') | |
|
117 | ||
|
118 | # Set the arguments to be passed to ipcontrollerz | |
|
119 | # note that remotely launched ipcontrollerz will not get the contents of | |
|
120 | # the local ipcontrollerz_config.py unless it resides on the *remote host* | |
|
121 | # in the location specified by the --cluster_dir argument. | |
|
122 | # c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd'] | |
|
123 | ||
|
124 | # Set the default args passed to ipenginez for SSH launched engines | |
|
125 | # c.SSHEngineSetLauncher.engine_args = ['--mpi', 'mpi4py'] | |
|
99 | 126 | |
|
127 | # SSH engines are launched as a dict of locations/n-engines. | |
|
128 | # if a value is a tuple instead of an int, it is assumed to be of the form | |
|
129 | # (n, [args]), setting the arguments to passed to ipenginez on `host`. | |
|
130 | # otherwise, c.SSHEngineSetLauncher.engine_args will be used as the default. | |
|
131 | ||
|
132 | # In this case, there will be 3 engines at my.example.com, and | |
|
133 | # 2 at you@ipython.scipy.org with a special json connector location. | |
|
134 | # c.SSHEngineSetLauncher.engines = {'my.example.com' : 3, | |
|
135 | # 'you@ipython.scipy.org' : (2, ['-f', '/path/to/ipcontroller-engine.json']} | |
|
136 | # } | |
|
100 | 137 | |
|
101 | 138 | #----------------------------------------------------------------------------- |
|
102 | 139 | # Unix batch (PBS) schedulers launchers |
|
103 | 140 | #----------------------------------------------------------------------------- |
|
104 | 141 | |
|
142 | # SGE and PBS are very similar. All configurables in this section called 'PBS*' | |
|
143 | # also exist as 'SGE*'. | |
|
144 | ||
|
105 | 145 | # The command line program to use to submit a PBS job. |
|
106 |
# c.PBS |
|
|
146 | # c.PBSLauncher.submit_command = ['qsub'] | |
|
107 | 147 | |
|
108 | 148 | # The command line program to use to delete a PBS job. |
|
109 |
# c.PBS |
|
|
149 | # c.PBSLauncher.delete_command = ['qdel'] | |
|
150 | ||
|
151 | # The PBS queue in which the job should run | |
|
152 | # c.PBSLauncher.queue = 'myqueue' | |
|
110 | 153 | |
|
111 | 154 | # A regular expression that takes the output of qsub and find the job id. |
|
112 |
# c.PBS |
|
|
155 | # c.PBSLauncher.job_id_regexp = r'\d+' | |
|
156 | ||
|
157 | # If for some reason the Controller and Engines have different options above, they | |
|
158 | # can be set as c.PBSControllerLauncher.<option> etc. | |
|
159 | ||
|
160 | # PBS and SGE have default templates, but you can specify your own, either as strings | |
|
161 | # or from files, as described here: | |
|
113 | 162 | |
|
114 | 163 | # The batch submission script used to start the controller. This is where |
|
115 |
# environment variables would be setup, etc. This string is interp |
|
|
164 | # environment variables would be setup, etc. This string is interpreted using | |
|
116 | 165 | # the Itpl module in IPython.external. Basically, you can use ${n} for the |
|
117 | 166 | # number of engine and ${cluster_dir} for the cluster_dir. |
|
118 |
# c.PBSControllerLauncher.batch_template = """ |
|
|
167 | # c.PBSControllerLauncher.batch_template = """ | |
|
168 | # #PBS -N ipcontroller | |
|
169 | # #PBS -q $queue | |
|
170 | # | |
|
171 | # ipcontrollerz --cluster-dir $cluster_dir | |
|
172 | # """ | |
|
173 | ||
|
174 | # You can also load this template from a file | |
|
175 | # c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh" | |
|
119 | 176 | |
|
120 | 177 | # The name of the instantiated batch script that will actually be used to |
|
121 | 178 | # submit the job. This will be written to the cluster directory. |
|
122 |
# c.PBSControllerLauncher.batch_file_name = u'pbs_ |
|
|
123 | ||
|
124 | ||
|
125 | # The command line program to use to submit a PBS job. | |
|
126 | # c.PBSEngineSetLauncher.submit_command = 'qsub' | |
|
127 | ||
|
128 | # The command line program to use to delete a PBS job. | |
|
129 | # c.PBSEngineSetLauncher.delete_command = 'qdel' | |
|
130 | ||
|
131 | # A regular expression that takes the output of qsub and find the job id. | |
|
132 | # c.PBSEngineSetLauncher.job_id_regexp = r'\d+' | |
|
179 | # c.PBSControllerLauncher.batch_file_name = u'pbs_controller' | |
|
133 | 180 | |
|
134 | 181 | # The batch submission script used to start the engines. This is where |
|
135 |
# environment variables would be setup, etc. This string is interp |
|
|
182 | # environment variables would be setup, etc. This string is interpreted using | |
|
136 | 183 | # the Itpl module in IPython.external. Basically, you can use ${n} for the |
|
137 | 184 | # number of engine and ${cluster_dir} for the cluster_dir. |
|
138 |
# c.PBSEngineSetLauncher.batch_template = """ |
|
|
185 | # c.PBSEngineSetLauncher.batch_template = """ | |
|
186 | # #PBS -N ipcontroller | |
|
187 | # #PBS -l nprocs=$n | |
|
188 | # | |
|
189 | # ipenginez --cluster-dir $cluster_dir$s | |
|
190 | # """ | |
|
191 | ||
|
192 | # You can also load this template from a file | |
|
193 | # c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh" | |
|
139 | 194 | |
|
140 | 195 | # The name of the instantiated batch script that will actually be used to |
|
141 | 196 | # submit the job. This will be written to the cluster directory. |
|
142 |
# c.PBSEngineSetLauncher.batch_file_name = u'pbs_ |
|
|
197 | # c.PBSEngineSetLauncher.batch_file_name = u'pbs_engines' | |
|
198 | ||
|
199 | ||
|
143 | 200 | |
|
144 | 201 | #----------------------------------------------------------------------------- |
|
145 | 202 | # Windows HPC Server 2008 launcher configuration |
@@ -25,112 +25,156 b' c = get_config()' | |||
|
25 | 25 | # be imported in the controller for pickling to work. |
|
26 | 26 | # c.Global.import_statements = ['import math'] |
|
27 | 27 | |
|
28 |
# Reuse the controller's |
|
|
28 | # Reuse the controller's JSON files. If False, JSON files are regenerated | |
|
29 | 29 | # each time the controller is run. If True, they will be reused, *but*, you |
|
30 | 30 | # also must set the network ports by hand. If set, this will override the |
|
31 | 31 | # values set for the client and engine connections below. |
|
32 |
# c.Global.reuse_f |
|
|
32 | # c.Global.reuse_files = True | |
|
33 | 33 | |
|
34 | # Enable SSL encryption on all connections to the controller. If set, this | |
|
35 | # will override the values set for the client and engine connections below. | |
|
34 | # Enable exec_key authentication on all messages. Default is True | |
|
36 | 35 | # c.Global.secure = True |
|
37 | 36 | |
|
38 | 37 | # The working directory for the process. The application will use os.chdir |
|
39 | 38 | # to change to this directory before starting. |
|
40 | 39 | # c.Global.work_dir = os.getcwd() |
|
41 | 40 | |
|
41 | # The log url for logging to an `iploggerz` application. This will override | |
|
42 | # log-to-file. | |
|
43 | # c.Global.log_url = 'tcp://127.0.0.1:20202' | |
|
44 | ||
|
45 | # The specific external IP that is used to disambiguate multi-interface URLs. | |
|
46 | # The default behavior is to guess from external IPs gleaned from `socket`. | |
|
47 | # c.Global.location = '192.168.1.123' | |
|
48 | ||
|
49 | # The ssh server remote clients should use to connect to this controller. | |
|
50 | # It must be a machine that can see the interface specified in client_ip. | |
|
51 | # The default for client_ip is localhost, in which case the sshserver must | |
|
52 | # be an external IP of the controller machine. | |
|
53 | # c.Global.sshserver = 'controller.example.com' | |
|
54 | ||
|
55 | # the url to use for registration. If set, this overrides engine-ip, | |
|
56 | # engine-transport client-ip,client-transport, and regport. | |
|
57 | # c.RegistrationFactory.url = 'tcp://*:12345' | |
|
58 | ||
|
59 | # the port to use for registration. Clients and Engines both use this | |
|
60 | # port for registration. | |
|
61 | # c.RegistrationFactory.regport = 10101 | |
|
62 | ||
|
42 | 63 | #----------------------------------------------------------------------------- |
|
43 |
# Configure the cl |
|
|
64 | # Configure the Task Scheduler | |
|
44 | 65 | #----------------------------------------------------------------------------- |
|
45 | 66 | |
|
46 | # Basic client service config attributes | |
|
67 | # The routing scheme. 'pure' will use the pure-ZMQ scheduler. Any other | |
|
68 | # value will use a Python scheduler with various routing schemes. | |
|
69 | # python schemes are: lru, weighted, random, twobin. Default is 'weighted'. | |
|
70 | # Note that the pure ZMQ scheduler does not support many features, such as | |
|
71 | # dying engines, dependencies, or engine-subset load-balancing. | |
|
72 | # c.ControllerFactory.scheme = 'pure' | |
|
47 | 73 | |
|
48 | # The network interface the controller will listen on for client connections. | |
|
49 | # This should be an IP address or hostname of the controller's host. The empty | |
|
50 | # string means listen on all interfaces. | |
|
51 | # c.FCClientServiceFactory.ip = '' | |
|
74 | # The pure ZMQ scheduler can limit the number of outstanding tasks per engine | |
|
75 | # by using the ZMQ HWM option. This allows engines with long-running tasks | |
|
76 | # to not steal too many tasks from other engines. The default is 0, which | |
|
77 | # means agressively distribute messages, never waiting for them to finish. | |
|
78 | # c.ControllerFactory.hwm = 1 | |
|
52 | 79 | |
|
53 | # The TCP/IP port the controller will listen on for client connections. If 0 | |
|
54 | # a random port will be used. If the controller's host has a firewall running | |
|
55 | # it must allow incoming traffic on this port. | |
|
56 | # c.FCClientServiceFactory.port = 0 | |
|
80 | # Whether to use Threads or Processes to start the Schedulers. Threads will | |
|
81 | # use less resources, but potentially reduce throughput. Default is to | |
|
82 | # use processes. Note that the a Python scheduler will always be in a Process. | |
|
83 | # c.ControllerFactory.usethreads | |
|
57 | 84 | |
|
58 | # The client learns how to connect to the controller by looking at the | |
|
59 | # location field embedded in the FURL. If this field is empty, all network | |
|
60 | # interfaces that the controller is listening on will be listed. To have the | |
|
61 | # client connect on a particular interface, list it here. | |
|
62 | # c.FCClientServiceFactory.location = '' | |
|
85 | #----------------------------------------------------------------------------- | |
|
86 | # Configure the Hub | |
|
87 | #----------------------------------------------------------------------------- | |
|
88 | ||
|
89 | # Which class to use for the db backend. Currently supported are DictDB (the | |
|
90 | # default), and MongoDB. Uncomment this line to enable MongoDB, which will | |
|
91 | # slow-down the Hub's responsiveness, but also reduce its memory footprint. | |
|
92 | # c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB' | |
|
63 | 93 | |
|
64 | # Use SSL encryption for the client connection. | |
|
65 | # c.FCClientServiceFactory.secure = True | |
|
94 | # The heartbeat ping frequency. This is the frequency (in ms) at which the | |
|
95 | # Hub pings engines for heartbeats. This determines how quickly the Hub | |
|
96 | # will react to engines coming and going. A lower number means faster response | |
|
97 | # time, but more network activity. The default is 100ms | |
|
98 | # c.HubFactory.ping = 100 | |
|
66 | 99 | |
|
67 | # Reuse the client FURL each time the controller is started. If set, you must | |
|
68 | # also pick a specific network port above (FCClientServiceFactory.port). | |
|
69 | # c.FCClientServiceFactory.reuse_furls = False | |
|
100 | # HubFactory queue port pairs, to set by name: mux, iopub, control, task. Set | |
|
101 | # each as a tuple of length 2 of ints. The default is to find random | |
|
102 | # available ports | |
|
103 | # c.HubFactory.mux = (10102,10112) | |
|
70 | 104 | |
|
71 | 105 | #----------------------------------------------------------------------------- |
|
72 |
# Configure the |
|
|
106 | # Configure the client connections | |
|
73 | 107 | #----------------------------------------------------------------------------- |
|
74 | 108 | |
|
75 | # Basic config attributes for the engine services. | |
|
109 | # Basic client connection config attributes | |
|
76 | 110 | |
|
77 |
# The network interface the controller will listen on for |
|
|
78 |
# This should be an IP address or |
|
|
79 |
# |
|
|
80 | # c.FCEngineServiceFactory.ip = '' | |
|
111 | # The network interface the controller will listen on for client connections. | |
|
112 | # This should be an IP address or interface on the controller. An asterisk | |
|
113 | # means listen on all interfaces. The transport can be any transport | |
|
114 | # supported by zeromq (tcp,epgm,pgm,ib,ipc): | |
|
115 | # c.HubFactory.client_ip = '*' | |
|
116 | # c.HubFactory.client_transport = 'tcp' | |
|
81 | 117 | |
|
82 | # The TCP/IP port the controller will listen on for engine connections. If 0 | |
|
83 | # a random port will be used. If the controller's host has a firewall running | |
|
84 | # it must allow incoming traffic on this port. | |
|
85 | # c.FCEngineServiceFactory.port = 0 | |
|
118 | # individual client ports to configure by name: query_port, notifier_port | |
|
119 | # c.HubFactory.query_port = 12345 | |
|
86 | 120 | |
|
87 | # The engine learns how to connect to the controller by looking at the | |
|
88 | # location field embedded in the FURL. If this field is empty, all network | |
|
89 | # interfaces that the controller is listening on will be listed. To have the | |
|
90 | # client connect on a particular interface, list it here. | |
|
91 | # c.FCEngineServiceFactory.location = '' | |
|
121 | #----------------------------------------------------------------------------- | |
|
122 | # Configure the engine connections | |
|
123 | #----------------------------------------------------------------------------- | |
|
92 | 124 | |
|
93 |
# |
|
|
94 | # c.FCEngineServiceFactory.secure = True | |
|
125 | # Basic config attributes for the engine connections. | |
|
95 | 126 | |
|
96 | # Reuse the client FURL each time the controller is started. If set, you must | |
|
97 | # also pick a specific network port above (FCClientServiceFactory.port). | |
|
98 | # c.FCEngineServiceFactory.reuse_furls = False | |
|
127 | # The network interface the controller will listen on for engine connections. | |
|
128 | # This should be an IP address or interface on the controller. An asterisk | |
|
129 | # means listen on all interfaces. The transport can be any transport | |
|
130 | # supported by zeromq (tcp,epgm,pgm,ib,ipc): | |
|
131 | # c.HubFactory.engine_ip = '*' | |
|
132 | # c.HubFactory.engine_transport = 'tcp' | |
|
133 | ||
|
134 | # set the engine heartbeat ports to use: | |
|
135 | # c.HubFactory.hb = (10303,10313) | |
|
99 | 136 | |
|
100 | 137 | #----------------------------------------------------------------------------- |
|
101 | # Developer level configuration attributes | |
|
138 | # Configure the TaskRecord database backend | |
|
102 | 139 | #----------------------------------------------------------------------------- |
|
103 | 140 | |
|
104 | # You shouldn't have to modify anything in this section. These attributes | |
|
105 | # are more for developers who want to change the behavior of the controller | |
|
106 | # at a fundamental level. | |
|
107 | ||
|
108 | # c.FCClientServiceFactory.cert_file = u'ipcontroller-client.pem' | |
|
109 | ||
|
110 | # default_client_interfaces = Config() | |
|
111 | # default_client_interfaces.Task.interface_chain = [ | |
|
112 | # 'IPython.kernel.task.ITaskController', | |
|
113 | # 'IPython.kernel.taskfc.IFCTaskController' | |
|
114 | # ] | |
|
115 | # | |
|
116 | # default_client_interfaces.Task.furl_file = u'ipcontroller-tc.furl' | |
|
117 | # | |
|
118 | # default_client_interfaces.MultiEngine.interface_chain = [ | |
|
119 | # 'IPython.kernel.multiengine.IMultiEngine', | |
|
120 | # 'IPython.kernel.multienginefc.IFCSynchronousMultiEngine' | |
|
121 | # ] | |
|
122 | # | |
|
123 | # default_client_interfaces.MultiEngine.furl_file = u'ipcontroller-mec.furl' | |
|
124 | # | |
|
125 | # c.FCEngineServiceFactory.interfaces = default_client_interfaces | |
|
126 | ||
|
127 | # c.FCEngineServiceFactory.cert_file = u'ipcontroller-engine.pem' | |
|
128 | ||
|
129 | # default_engine_interfaces = Config() | |
|
130 | # default_engine_interfaces.Default.interface_chain = [ | |
|
131 | # 'IPython.kernel.enginefc.IFCControllerBase' | |
|
132 | # ] | |
|
133 | # | |
|
134 | # default_engine_interfaces.Default.furl_file = u'ipcontroller-engine.furl' | |
|
135 | # | |
|
136 | # c.FCEngineServiceFactory.interfaces = default_engine_interfaces | |
|
141 | # For memory/persistance reasons, tasks can be stored out-of-memory in a database. | |
|
142 | # Currently, only sqlite and mongodb are supported as backends, but the interface | |
|
143 | # is fairly simple, so advanced developers could write their own backend. | |
|
144 | ||
|
145 | # ----- in-memory configuration -------- | |
|
146 | # this line restores the default behavior: in-memory storage of all results. | |
|
147 | # c.HubFactory.db_class = 'IPython.parallel.dictdb.DictDB' | |
|
148 | ||
|
149 | # ----- sqlite configuration -------- | |
|
150 | # use this line to activate sqlite: | |
|
151 | # c.HubFactory.db_class = 'IPython.parallel.sqlitedb.SQLiteDB' | |
|
152 | ||
|
153 | # You can specify the name of the db-file. By default, this will be located | |
|
154 | # in the active cluster_dir, e.g. ~/.ipython/clusterz_default/tasks.db | |
|
155 | # c.SQLiteDB.filename = 'tasks.db' | |
|
156 | ||
|
157 | # You can also specify the location of the db-file, if you want it to be somewhere | |
|
158 | # other than the cluster_dir. | |
|
159 | # c.SQLiteDB.location = '/scratch/' | |
|
160 | ||
|
161 | # This will specify the name of the table for the controller to use. The default | |
|
162 | # behavior is to use the session ID of the SessionFactory object (a uuid). Overriding | |
|
163 | # this will result in results persisting for multiple sessions. | |
|
164 | # c.SQLiteDB.table = 'results' | |
|
165 | ||
|
166 | # ----- mongodb configuration -------- | |
|
167 | # use this line to activate mongodb: | |
|
168 | # c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB' | |
|
169 | ||
|
170 | # You can specify the args and kwargs pymongo will use when creating the Connection. | |
|
171 | # For more information on what these options might be, see pymongo documentation. | |
|
172 | # c.MongoDB.connection_kwargs = {} | |
|
173 | # c.MongoDB.connection_args = [] | |
|
174 | ||
|
175 | # This will specify the name of the mongo database for the controller to use. The default | |
|
176 | # behavior is to use the session ID of the SessionFactory object (a uuid). Overriding | |
|
177 | # this will result in task results persisting through multiple sessions. | |
|
178 | # c.MongoDB.database = 'ipythondb' | |
|
179 | ||
|
180 |
@@ -29,10 +29,10 b' c = get_config()' | |||
|
29 | 29 | # c.Global.connect_delay = 0.1 |
|
30 | 30 | # c.Global.connect_max_tries = 15 |
|
31 | 31 | |
|
32 |
# By default, the engine will look for the controller's |
|
|
33 |
# cluster directory. Sometimes, the |
|
|
34 |
# attribute can be set to the full path of the |
|
|
35 | # c.Global.furl_file = u'' | |
|
32 | # By default, the engine will look for the controller's JSON file in its own | |
|
33 | # cluster directory. Sometimes, the JSON file will be elsewhere and this | |
|
34 | # attribute can be set to the full path of the JSON file. | |
|
35 | # c.Global.url_file = u'/path/to/my/ipcontroller-engine.json' | |
|
36 | 36 | |
|
37 | 37 | # The working directory for the process. The application will use os.chdir |
|
38 | 38 | # to change to this directory before starting. |
@@ -78,12 +78,7 b' c = get_config()' | |||
|
78 | 78 | |
|
79 | 79 | # You should not have to change these attributes. |
|
80 | 80 | |
|
81 | # c.Global.shell_class = 'IPython.kernel.core.interpreter.Interpreter' | |
|
82 | ||
|
83 | # c.Global.furl_file_name = u'ipcontroller-engine.furl' | |
|
84 | ||
|
85 | ||
|
86 | ||
|
81 | # c.Global.url_file_name = u'ipcontroller-engine.furl' | |
|
87 | 82 | |
|
88 | 83 | |
|
89 | 84 |
@@ -138,8 +138,8 b' class ClusterDir(Configurable):' | |||
|
138 | 138 | |
|
139 | 139 | def copy_all_config_files(self, path=None, overwrite=False): |
|
140 | 140 | """Copy all config files into the active cluster directory.""" |
|
141 |
for f in [u'ipcontroller |
|
|
142 |
u'ipcluster |
|
|
141 | for f in [u'ipcontroller_config.py', u'ipengine_config.py', | |
|
142 | u'ipcluster_config.py']: | |
|
143 | 143 | self.copy_config_file(f, path=path, overwrite=overwrite) |
|
144 | 144 | |
|
145 | 145 | @classmethod |
@@ -164,11 +164,11 b' class ClusterDir(Configurable):' | |||
|
164 | 164 | The path (directory) to put the cluster directory in. |
|
165 | 165 | profile : str |
|
166 | 166 | The name of the profile. The name of the cluster directory will |
|
167 |
be "cluster |
|
|
167 | be "cluster_<profile>". | |
|
168 | 168 | """ |
|
169 | 169 | if not os.path.isdir(path): |
|
170 | 170 | raise ClusterDirError('Directory not found: %s' % path) |
|
171 |
cluster_dir = os.path.join(path, u'cluster |
|
|
171 | cluster_dir = os.path.join(path, u'cluster_' + profile) | |
|
172 | 172 | return ClusterDir(location=cluster_dir) |
|
173 | 173 | |
|
174 | 174 | @classmethod |
@@ -190,9 +190,9 b' class ClusterDir(Configurable):' | |||
|
190 | 190 | The IPython directory to use. |
|
191 | 191 | profile : unicode or str |
|
192 | 192 | The name of the profile. The name of the cluster directory |
|
193 |
will be "cluster |
|
|
193 | will be "cluster_<profile>". | |
|
194 | 194 | """ |
|
195 |
dirname = u'cluster |
|
|
195 | dirname = u'cluster_' + profile | |
|
196 | 196 | cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','') |
|
197 | 197 | if cluster_dir_paths: |
|
198 | 198 | cluster_dir_paths = cluster_dir_paths.split(':') |
@@ -37,7 +37,7 b' from IPython.parallel.clusterdir import (' | |||
|
37 | 37 | #----------------------------------------------------------------------------- |
|
38 | 38 | |
|
39 | 39 | |
|
40 |
default_config_file_name = u'ipcluster |
|
|
40 | default_config_file_name = u'ipcluster_config.py' | |
|
41 | 41 | |
|
42 | 42 | |
|
43 | 43 | _description = """\ |
@@ -47,9 +47,9 b' An IPython cluster consists of 1 controller and 1 or more engines.' | |||
|
47 | 47 | This command automates the startup of these processes using a wide |
|
48 | 48 | range of startup methods (SSH, local processes, PBS, mpiexec, |
|
49 | 49 | Windows HPC Server 2008). To start a cluster with 4 engines on your |
|
50 |
local host simply do 'ipcluster |
|
|
51 |
you will typically do 'ipcluster |
|
|
52 |
configuration files, followed by 'ipcluster |
|
|
50 | local host simply do 'ipcluster start -n 4'. For more complex usage | |
|
51 | you will typically do 'ipcluster create -p mycluster', then edit | |
|
52 | configuration files, followed by 'ipcluster start -p mycluster -n 4'. | |
|
53 | 53 | """ |
|
54 | 54 | |
|
55 | 55 | |
@@ -108,9 +108,9 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
108 | 108 | title='ipcluster subcommands', |
|
109 | 109 | description= |
|
110 | 110 | """ipcluster has a variety of subcommands. The general way of |
|
111 |
running ipcluster is 'ipcluster |
|
|
112 |
on a particular subcommand do 'ipcluster |
|
|
113 |
# help="For more help, type 'ipcluster |
|
|
111 | running ipcluster is 'ipcluster <cmd> [options]'. To get help | |
|
112 | on a particular subcommand do 'ipcluster <cmd> -h'.""" | |
|
113 | # help="For more help, type 'ipcluster <cmd> -h'", | |
|
114 | 114 | ) |
|
115 | 115 | |
|
116 | 116 | # The "list" subcommand parser |
@@ -123,7 +123,7 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
123 | 123 | """List all available clusters, by cluster directory, that can |
|
124 | 124 | be found in the current working directly or in the ipython |
|
125 | 125 | directory. Cluster directories are named using the convention |
|
126 |
'cluster |
|
|
126 | 'cluster_<profile>'.""" | |
|
127 | 127 | ) |
|
128 | 128 | |
|
129 | 129 | # The "create" subcommand parser |
@@ -136,13 +136,13 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
136 | 136 | """Create an ipython cluster directory by its profile name or |
|
137 | 137 | cluster directory path. Cluster directories contain |
|
138 | 138 | configuration, log and security related files and are named |
|
139 |
using the convention 'cluster |
|
|
139 | using the convention 'cluster_<profile>'. By default they are | |
|
140 | 140 | located in your ipython directory. Once created, you will |
|
141 | 141 | probably need to edit the configuration files in the cluster |
|
142 | 142 | directory to configure your cluster. Most users will create a |
|
143 | 143 | cluster directory by profile name, |
|
144 |
'ipcluster |
|
|
145 |
in '<ipython_dir>/cluster |
|
|
144 | 'ipcluster create -p mycluster', which will put the directory | |
|
145 | in '<ipython_dir>/cluster_mycluster'. | |
|
146 | 146 | """ |
|
147 | 147 | ) |
|
148 | 148 | paa = parser_create.add_argument |
@@ -162,10 +162,10 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
162 | 162 | """Start an ipython cluster by its profile name or cluster |
|
163 | 163 | directory. Cluster directories contain configuration, log and |
|
164 | 164 | security related files and are named using the convention |
|
165 |
'cluster |
|
|
165 | 'cluster_<profile>' and should be creating using the 'start' | |
|
166 | 166 | subcommand of 'ipcluster'. If your cluster directory is in |
|
167 | 167 | the cwd or the ipython directory, you can simply refer to it |
|
168 |
using its profile name, 'ipcluster |
|
|
168 | using its profile name, 'ipcluster start -n 4 -p <profile>`, | |
|
169 | 169 | otherwise use the '--cluster-dir' option. |
|
170 | 170 | """ |
|
171 | 171 | ) |
@@ -200,9 +200,9 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
200 | 200 | description= |
|
201 | 201 | """Stop a running ipython cluster by its profile name or cluster |
|
202 | 202 | directory. Cluster directories are named using the convention |
|
203 |
'cluster |
|
|
203 | 'cluster_<profile>'. If your cluster directory is in | |
|
204 | 204 | the cwd or the ipython directory, you can simply refer to it |
|
205 |
using its profile name, 'ipcluster |
|
|
205 | using its profile name, 'ipcluster stop -p <profile>`, otherwise | |
|
206 | 206 | use the '--cluster-dir' option. |
|
207 | 207 | """ |
|
208 | 208 | ) |
@@ -223,10 +223,10 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
223 | 223 | by profile name or cluster directory. |
|
224 | 224 | Cluster directories contain configuration, log and |
|
225 | 225 | security related files and are named using the convention |
|
226 |
'cluster |
|
|
226 | 'cluster_<profile>' and should be creating using the 'start' | |
|
227 | 227 | subcommand of 'ipcluster'. If your cluster directory is in |
|
228 | 228 | the cwd or the ipython directory, you can simply refer to it |
|
229 |
using its profile name, 'ipcluster |
|
|
229 | using its profile name, 'ipcluster engines -n 4 -p <profile>`, | |
|
230 | 230 | otherwise use the '--cluster-dir' option. |
|
231 | 231 | """ |
|
232 | 232 | ) |
@@ -249,7 +249,7 b' class IPClusterAppConfigLoader(ClusterDirConfigLoader):' | |||
|
249 | 249 | |
|
250 | 250 | class IPClusterApp(ApplicationWithClusterDir): |
|
251 | 251 | |
|
252 |
name = u'ipcluster |
|
|
252 | name = u'ipcluster' | |
|
253 | 253 | description = _description |
|
254 | 254 | usage = None |
|
255 | 255 | command_line_loader = IPClusterAppConfigLoader |
@@ -286,8 +286,8 b' class IPClusterApp(ApplicationWithClusterDir):' | |||
|
286 | 286 | except ClusterDirError: |
|
287 | 287 | raise ClusterDirError( |
|
288 | 288 | "Could not find a cluster directory. A cluster dir must " |
|
289 |
"be created before running 'ipcluster |
|
|
290 |
"'ipcluster |
|
|
289 | "be created before running 'ipcluster start'. Do " | |
|
290 | "'ipcluster create -h' or 'ipcluster list -h' for more " | |
|
291 | 291 | "information about creating and listing cluster dirs." |
|
292 | 292 | ) |
|
293 | 293 | elif subcommand=='engines': |
@@ -297,8 +297,8 b' class IPClusterApp(ApplicationWithClusterDir):' | |||
|
297 | 297 | except ClusterDirError: |
|
298 | 298 | raise ClusterDirError( |
|
299 | 299 | "Could not find a cluster directory. A cluster dir must " |
|
300 |
"be created before running 'ipcluster |
|
|
301 |
"'ipcluster |
|
|
300 | "be created before running 'ipcluster start'. Do " | |
|
301 | "'ipcluster create -h' or 'ipcluster list -h' for more " | |
|
302 | 302 | "information about creating and listing cluster dirs." |
|
303 | 303 | ) |
|
304 | 304 | |
@@ -322,9 +322,9 b' class IPClusterApp(ApplicationWithClusterDir):' | |||
|
322 | 322 | files = os.listdir(path) |
|
323 | 323 | for f in files: |
|
324 | 324 | full_path = os.path.join(path, f) |
|
325 |
if os.path.isdir(full_path) and f.startswith('cluster |
|
|
325 | if os.path.isdir(full_path) and f.startswith('cluster_'): | |
|
326 | 326 | profile = full_path.split('_')[-1] |
|
327 |
start_cmd = 'ipcluster |
|
|
327 | start_cmd = 'ipcluster start -p %s -n 4' % profile | |
|
328 | 328 | print start_cmd + " ==> " + full_path |
|
329 | 329 | |
|
330 | 330 | def pre_construct(self): |
@@ -498,7 +498,7 b' class IPClusterApp(ApplicationWithClusterDir):' | |||
|
498 | 498 | else: |
|
499 | 499 | self.log.critical( |
|
500 | 500 | 'Cluster is already running with [pid=%s]. ' |
|
501 |
'use "ipcluster |
|
|
501 | 'use "ipcluster stop" to stop the cluster.' % pid | |
|
502 | 502 | ) |
|
503 | 503 | # Here I exit with a unusual exit status that other processes |
|
504 | 504 | # can watch for to learn how I existed. |
@@ -506,7 +506,7 b' class IPClusterApp(ApplicationWithClusterDir):' | |||
|
506 | 506 | |
|
507 | 507 | # Now log and daemonize |
|
508 | 508 | self.log.info( |
|
509 |
'Starting ipcluster |
|
|
509 | 'Starting ipcluster with [daemon=%r]' % config.Global.daemonize | |
|
510 | 510 | ) |
|
511 | 511 | # TODO: Get daemonize working on Windows or as a Windows Server. |
|
512 | 512 | if config.Global.daemonize: |
@@ -48,7 +48,7 b' from IPython.utils.traitlets import Instance, Unicode' | |||
|
48 | 48 | |
|
49 | 49 | |
|
50 | 50 | #: The default config file name for this application |
|
51 |
default_config_file_name = u'ipcontroller |
|
|
51 | default_config_file_name = u'ipcontroller_config.py' | |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | _description = """Start the IPython controller for parallel computing. |
@@ -57,7 +57,7 b' The IPython controller provides a gateway between the IPython engines and' | |||
|
57 | 57 | clients. The controller needs to be started before the engines and can be |
|
58 | 58 | configured using command line options or using a cluster directory. Cluster |
|
59 | 59 | directories contain config, log and security files and are usually located in |
|
60 |
your ipython directory and named as "cluster |
|
|
60 | your ipython directory and named as "cluster_<profile>". See the --profile | |
|
61 | 61 | and --cluster-dir options for details. |
|
62 | 62 | """ |
|
63 | 63 | |
@@ -251,7 +251,7 b' class IPControllerAppConfigLoader(ClusterDirConfigLoader):' | |||
|
251 | 251 | |
|
252 | 252 | class IPControllerApp(ApplicationWithClusterDir): |
|
253 | 253 | |
|
254 |
name = u'ipcontroller |
|
|
254 | name = u'ipcontroller' | |
|
255 | 255 | description = _description |
|
256 | 256 | command_line_loader = IPControllerAppConfigLoader |
|
257 | 257 | default_config_file_name = default_config_file_name |
@@ -40,7 +40,7 b' from IPython.utils.importstring import import_item' | |||
|
40 | 40 | #----------------------------------------------------------------------------- |
|
41 | 41 | |
|
42 | 42 | #: The default config file name for this application |
|
43 |
default_config_file_name = u'ipengine |
|
|
43 | default_config_file_name = u'ipengine_config.py' | |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | mpi4py_init = """from mpi4py import MPI as mpi |
@@ -64,7 +64,7 b' IPython engines run in parallel and perform computations on behalf of a client' | |||
|
64 | 64 | and controller. A controller needs to be started before the engines. The |
|
65 | 65 | engine can be configured using command line options or using a cluster |
|
66 | 66 | directory. Cluster directories contain config, log and security files and are |
|
67 |
usually located in your ipython directory and named as "cluster |
|
|
67 | usually located in your ipython directory and named as "cluster_<profile>". | |
|
68 | 68 | See the --profile and --cluster-dir options for details. |
|
69 | 69 | """ |
|
70 | 70 | |
@@ -124,7 +124,7 b' class IPEngineAppConfigLoader(ClusterDirConfigLoader):' | |||
|
124 | 124 | |
|
125 | 125 | class IPEngineApp(ApplicationWithClusterDir): |
|
126 | 126 | |
|
127 |
name = u'ipengine |
|
|
127 | name = u'ipengine' | |
|
128 | 128 | description = _description |
|
129 | 129 | command_line_loader = IPEngineAppConfigLoader |
|
130 | 130 | default_config_file_name = default_config_file_name |
@@ -39,7 +39,7 b' IPython controllers and engines (and your own processes) can broadcast log messa' | |||
|
39 | 39 | by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The |
|
40 | 40 | logger can be configured using command line options or using a cluster |
|
41 | 41 | directory. Cluster directories contain config, log and security files and are |
|
42 |
usually located in your ipython directory and named as "cluster |
|
|
42 | usually located in your ipython directory and named as "cluster_<profile>". | |
|
43 | 43 | See the --profile and --cluster-dir options for details. |
|
44 | 44 | """ |
|
45 | 45 |
@@ -63,15 +63,15 b' except ImportError:' | |||
|
63 | 63 | #----------------------------------------------------------------------------- |
|
64 | 64 | |
|
65 | 65 | |
|
66 |
ipcluster |
|
|
66 | ipcluster_cmd_argv = pycmd2argv(get_ipython_module_path( | |
|
67 | 67 | 'IPython.parallel.ipclusterapp' |
|
68 | 68 | )) |
|
69 | 69 | |
|
70 |
ipengine |
|
|
70 | ipengine_cmd_argv = pycmd2argv(get_ipython_module_path( | |
|
71 | 71 | 'IPython.parallel.ipengineapp' |
|
72 | 72 | )) |
|
73 | 73 | |
|
74 |
ipcontroller |
|
|
74 | ipcontroller_cmd_argv = pycmd2argv(get_ipython_module_path( | |
|
75 | 75 | 'IPython.parallel.ipcontrollerapp' |
|
76 | 76 | )) |
|
77 | 77 | |
@@ -304,7 +304,7 b' class LocalProcessLauncher(BaseLauncher):' | |||
|
304 | 304 | class LocalControllerLauncher(LocalProcessLauncher): |
|
305 | 305 | """Launch a controller as a regular external process.""" |
|
306 | 306 | |
|
307 |
controller_cmd = List(ipcontroller |
|
|
307 | controller_cmd = List(ipcontroller_cmd_argv, config=True) | |
|
308 | 308 | # Command line arguments to ipcontroller. |
|
309 | 309 | controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True) |
|
310 | 310 | |
@@ -322,7 +322,7 b' class LocalControllerLauncher(LocalProcessLauncher):' | |||
|
322 | 322 | class LocalEngineLauncher(LocalProcessLauncher): |
|
323 | 323 | """Launch a single engine as a regular externall process.""" |
|
324 | 324 | |
|
325 |
engine_cmd = List(ipengine |
|
|
325 | engine_cmd = List(ipengine_cmd_argv, config=True) | |
|
326 | 326 | # Command line arguments for ipengine. |
|
327 | 327 | engine_args = List( |
|
328 | 328 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
@@ -443,7 +443,7 b' class MPIExecLauncher(LocalProcessLauncher):' | |||
|
443 | 443 | class MPIExecControllerLauncher(MPIExecLauncher): |
|
444 | 444 | """Launch a controller using mpiexec.""" |
|
445 | 445 | |
|
446 |
controller_cmd = List(ipcontroller |
|
|
446 | controller_cmd = List(ipcontroller_cmd_argv, config=True) | |
|
447 | 447 | # Command line arguments to ipcontroller. |
|
448 | 448 | controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True) |
|
449 | 449 | n = Int(1, config=False) |
@@ -462,7 +462,7 b' class MPIExecControllerLauncher(MPIExecLauncher):' | |||
|
462 | 462 | |
|
463 | 463 | class MPIExecEngineSetLauncher(MPIExecLauncher): |
|
464 | 464 | |
|
465 |
program = List(ipengine |
|
|
465 | program = List(ipengine_cmd_argv, config=True) | |
|
466 | 466 | # Command line arguments for ipengine. |
|
467 | 467 | program_args = List( |
|
468 | 468 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
@@ -531,13 +531,13 b' class SSHLauncher(LocalProcessLauncher):' | |||
|
531 | 531 | |
|
532 | 532 | class SSHControllerLauncher(SSHLauncher): |
|
533 | 533 | |
|
534 |
program = List(ipcontroller |
|
|
534 | program = List(ipcontroller_cmd_argv, config=True) | |
|
535 | 535 | # Command line arguments to ipcontroller. |
|
536 | 536 | program_args = List(['-r', '--log-to-file','--log-level', str(logging.INFO)], config=True) |
|
537 | 537 | |
|
538 | 538 | |
|
539 | 539 | class SSHEngineLauncher(SSHLauncher): |
|
540 |
program = List(ipengine |
|
|
540 | program = List(ipengine_cmd_argv, config=True) | |
|
541 | 541 | # Command line arguments for ipengine. |
|
542 | 542 | program_args = List( |
|
543 | 543 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
@@ -883,9 +883,9 b' class PBSControllerLauncher(PBSLauncher):' | |||
|
883 | 883 | batch_file_name = CUnicode(u'pbs_controller', config=True) |
|
884 | 884 | default_template= CUnicode("""#!/bin/sh |
|
885 | 885 | #PBS -V |
|
886 |
#PBS -N ipcontroller |
|
|
886 | #PBS -N ipcontroller | |
|
887 | 887 | %s --log-to-file --cluster-dir $cluster_dir |
|
888 |
"""%(' '.join(ipcontroller |
|
|
888 | """%(' '.join(ipcontroller_cmd_argv))) | |
|
889 | 889 | |
|
890 | 890 | def start(self, cluster_dir): |
|
891 | 891 | """Start the controller by profile or cluster_dir.""" |
@@ -898,9 +898,9 b' class PBSEngineSetLauncher(PBSLauncher):' | |||
|
898 | 898 | batch_file_name = CUnicode(u'pbs_engines', config=True) |
|
899 | 899 | default_template= CUnicode(u"""#!/bin/sh |
|
900 | 900 | #PBS -V |
|
901 |
#PBS -N ipengine |
|
|
901 | #PBS -N ipengine | |
|
902 | 902 | %s --cluster-dir $cluster_dir |
|
903 |
"""%(' '.join(ipengine |
|
|
903 | """%(' '.join(ipengine_cmd_argv))) | |
|
904 | 904 | |
|
905 | 905 | def start(self, n, cluster_dir): |
|
906 | 906 | """Start n engines by profile or cluster_dir.""" |
@@ -922,9 +922,9 b' class SGEControllerLauncher(SGELauncher):' | |||
|
922 | 922 | batch_file_name = CUnicode(u'sge_controller', config=True) |
|
923 | 923 | default_template= CUnicode(u"""#$$ -V |
|
924 | 924 | #$$ -S /bin/sh |
|
925 |
#$$ -N ipcontroller |
|
|
925 | #$$ -N ipcontroller | |
|
926 | 926 | %s --log-to-file --cluster-dir $cluster_dir |
|
927 |
"""%(' '.join(ipcontroller |
|
|
927 | """%(' '.join(ipcontroller_cmd_argv))) | |
|
928 | 928 | |
|
929 | 929 | def start(self, cluster_dir): |
|
930 | 930 | """Start the controller by profile or cluster_dir.""" |
@@ -936,9 +936,9 b' class SGEEngineSetLauncher(SGELauncher):' | |||
|
936 | 936 | batch_file_name = CUnicode(u'sge_engines', config=True) |
|
937 | 937 | default_template = CUnicode("""#$$ -V |
|
938 | 938 | #$$ -S /bin/sh |
|
939 |
#$$ -N ipengine |
|
|
939 | #$$ -N ipengine | |
|
940 | 940 | %s --cluster-dir $cluster_dir |
|
941 |
"""%(' '.join(ipengine |
|
|
941 | """%(' '.join(ipengine_cmd_argv))) | |
|
942 | 942 | |
|
943 | 943 | def start(self, n, cluster_dir): |
|
944 | 944 | """Start n engines by profile or cluster_dir.""" |
@@ -954,7 +954,7 b' class SGEEngineSetLauncher(SGELauncher):' | |||
|
954 | 954 | class IPClusterLauncher(LocalProcessLauncher): |
|
955 | 955 | """Launch the ipcluster program in an external process.""" |
|
956 | 956 | |
|
957 |
ipcluster_cmd = List(ipcluster |
|
|
957 | ipcluster_cmd = List(ipcluster_cmd_argv, config=True) | |
|
958 | 958 | # Command line arguments to pass to ipcluster. |
|
959 | 959 | ipcluster_args = List( |
|
960 | 960 | ['--clean-logs', '--log-to-file', '--log-level', str(logging.INFO)], config=True) |
@@ -1,5 +1,5 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 |
"""A simple logger object that consolidates messages incoming from ipcluster |
|
|
2 | """A simple logger object that consolidates messages incoming from ipcluster processes.""" | |
|
3 | 3 | |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Copyright (C) 2011 The IPython Development Team |
|
1 | NO CONTENT: file renamed from IPython/parallel/scripts/ipclusterz to IPython/parallel/scripts/ipcluster |
|
1 | NO CONTENT: file renamed from IPython/parallel/scripts/ipcontrollerz to IPython/parallel/scripts/ipcontroller |
|
1 | NO CONTENT: file renamed from IPython/parallel/scripts/ipenginez to IPython/parallel/scripts/ipengine |
|
1 | NO CONTENT: file renamed from IPython/parallel/scripts/iploggerz to IPython/parallel/scripts/iplogger |
@@ -23,7 +23,7 b' blackhole = tempfile.TemporaryFile()' | |||
|
23 | 23 | # nose setup/teardown |
|
24 | 24 | |
|
25 | 25 | def setup(): |
|
26 |
cp = Popen('ipcontroller |
|
|
26 | cp = Popen('ipcontroller --profile iptest -r --log-level 10 --log-to-file'.split(), stdout=blackhole, stderr=STDOUT) | |
|
27 | 27 | processes.append(cp) |
|
28 | 28 | time.sleep(.5) |
|
29 | 29 | add_engines(1) |
@@ -38,7 +38,7 b" def add_engines(n=1, profile='iptest'):" | |||
|
38 | 38 | base = len(rc) |
|
39 | 39 | eps = [] |
|
40 | 40 | for i in range(n): |
|
41 |
ep = Popen(['ipengine |
|
|
41 | ep = Popen(['ipengine']+ ['--profile', profile, '--log-level', '10', '--log-to-file'], stdout=blackhole, stderr=STDOUT) | |
|
42 | 42 | # ep.start() |
|
43 | 43 | processes.append(ep) |
|
44 | 44 | eps.append(ep) |
@@ -13,7 +13,7 b'' | |||
|
13 | 13 | |
|
14 | 14 | from unittest import TestCase |
|
15 | 15 | |
|
16 |
from IPython.testing. |
|
|
16 | from IPython.testing.decorators import parametric | |
|
17 | 17 | from IPython.utils import newserialized as ns |
|
18 | 18 | from IPython.utils.pickleutil import can, uncan, CannedObject, CannedFunction |
|
19 | 19 | from IPython.parallel.tests.clienttest import skip_without |
@@ -185,6 +185,7 b' def make_exclude():' | |||
|
185 | 185 | |
|
186 | 186 | if not have['zmq']: |
|
187 | 187 | exclusions.append(ipjoin('zmq')) |
|
188 | exclusions.append(ipjoin('parallel')) | |
|
188 | 189 | |
|
189 | 190 | # This is needed for the reg-exp to match on win32 in the ipdoctest plugin. |
|
190 | 191 | if sys.platform == 'win32': |
@@ -19,8 +19,7 b' Contents' | |||
|
19 | 19 | whatsnew/index.txt |
|
20 | 20 | install/index.txt |
|
21 | 21 | interactive/index.txt |
|
22 |
|
|
|
23 | parallelz/index.txt | |
|
22 | parallel/index.txt | |
|
24 | 23 | config/index.txt |
|
25 | 24 | development/index.txt |
|
26 | 25 | api/index.txt |
@@ -9,16 +9,16 b' install all of its dependencies.' | |||
|
9 | 9 | |
|
10 | 10 | |
|
11 | 11 | Please let us know if you have problems installing IPython or any of its |
|
12 |
dependencies. Officially, IPython requires Python version 2. |
|
|
13 | have *not* yet started to port IPython to Python 3.0. | |
|
12 | dependencies. Officially, IPython requires Python version 2.6 or 2.7. There | |
|
13 | is an experimental port of IPython for Python3 `on GitHub | |
|
14 | <https://github.com/ipython/ipython-py3k>`_ | |
|
14 | 15 | |
|
15 | 16 | .. warning:: |
|
16 | 17 | |
|
17 |
Officially, IPython supports Python versions 2. |
|
|
18 | Officially, IPython supports Python versions 2.6 and 2.7. | |
|
18 | 19 | |
|
19 | IPython 0.10 has only been well tested with Python 2.5 and 2.6. Parts of | |
|
20 | it may work with Python 2.4, but we do not officially support Python 2.4 | |
|
21 | anymore. If you need to use 2.4, you can still run IPython 0.9. | |
|
20 | IPython 0.11 has a hard syntax dependency on 2.6, and will no longer work | |
|
21 | on Python <= 2.5. | |
|
22 | 22 | |
|
23 | 23 | Some of the installation approaches use the :mod:`setuptools` package and its |
|
24 | 24 | :command:`easy_install` command line program. In many scenarios, this provides |
@@ -38,9 +38,9 b' optional dependencies:' | |||
|
38 | 38 | |
|
39 | 39 | .. code-block:: bash |
|
40 | 40 | |
|
41 |
$ easy_install ipython[ |
|
|
41 | $ easy_install ipython[zmq,test] | |
|
42 | 42 | |
|
43 |
This will get |
|
|
43 | This will get pyzmq, which is needed for | |
|
44 | 44 | IPython's parallel computing features as well as the nose package, which will |
|
45 | 45 | enable you to run IPython's test suite. |
|
46 | 46 | |
@@ -221,8 +221,7 b' On Windows, you will need the PyReadline module. PyReadline is a separate,' | |||
|
221 | 221 | Windows only implementation of readline that uses native Windows calls through |
|
222 | 222 | :mod:`ctypes`. The easiest way of installing PyReadline is you use the binary |
|
223 | 223 | installer available `here <http://ipython.scipy.org/dist/>`_. The :mod:`ctypes` |
|
224 |
module, which comes with Python 2.5 and greater, is required by PyReadline. |
|
|
225 | is available for Python 2.4 at http://python.net/crew/theller/ctypes. | |
|
224 | module, which comes with Python 2.5 and greater, is required by PyReadline. | |
|
226 | 225 | |
|
227 | 226 | nose |
|
228 | 227 | ---- |
@@ -267,91 +266,30 b" The `pexpect <http://www.noah.org/wiki/Pexpect>`_ package is used in IPython's" | |||
|
267 | 266 | |
|
268 | 267 | Windows users are out of luck as pexpect does not run there. |
|
269 | 268 | |
|
270 |
Dependencies for IPython. |
|
|
271 | ==================================================== | |
|
269 | Dependencies for IPython.parallel (parallel computing) | |
|
270 | ====================================================== | |
|
272 | 271 | |
|
273 | The IPython kernel provides a nice architecture for parallel computing. The | |
|
274 | main focus of this architecture is on interactive parallel computing. These | |
|
275 | features require a number of additional packages: | |
|
272 | :mod:`IPython.kernel` has been replaced by :mod:`IPython.parallel`, | |
|
273 | which uses ZeroMQ for all communication. | |
|
276 | 274 | |
|
277 | * zope.interface (yep, we use interfaces) | |
|
278 | * Twisted (asynchronous networking framework) | |
|
279 | * Foolscap (a nice, secure network protocol) | |
|
280 | * pyOpenSSL (security for network connections) | |
|
275 | IPython.parallel provides a nice architecture for parallel computing. The | |
|
276 | main focus of this architecture is on interactive parallel computing. These | |
|
277 | features require just one package: pyzmq. See the next section for pyzmq | |
|
278 | details. | |
|
281 | 279 | |
|
282 | 280 | On a Unix style platform (including OS X), if you want to use |
|
283 | 281 | :mod:`setuptools`, you can just do: |
|
284 | 282 | |
|
285 | 283 | .. code-block:: bash |
|
286 | 284 | |
|
287 |
$ easy_install ipython[ |
|
|
288 | $ easy_install ipython[security] # pyOpenSSL | |
|
289 | ||
|
290 | zope.interface and Twisted | |
|
291 | -------------------------- | |
|
292 | ||
|
293 | Twisted [Twisted]_ and zope.interface [ZopeInterface]_ are used for networking | |
|
294 | related things. On Unix style platforms (including OS X), the simplest way of | |
|
295 | getting the these is to use :command:`easy_install`: | |
|
296 | ||
|
297 | .. code-block:: bash | |
|
298 | ||
|
299 | $ easy_install zope.interface | |
|
300 | $ easy_install Twisted | |
|
285 | $ easy_install ipython[zmq] # will include pyzmq | |
|
301 | 286 | |
|
302 | Of course, you can also download the source tarballs from the Twisted website | |
|
303 | [Twisted]_ and the | |
|
304 | `zope.interface page at PyPI <http://pypi.python.org/pypi/zope.interface>`_ | |
|
305 | and do the usual ``python setup.py install`` if you prefer. | |
|
287 | Security in IPython.parallel is provided by SSH tunnels. By default, Linux | |
|
288 | and OSX clients will use the shell ssh command, but on Windows, we also | |
|
289 | support tunneling with paramiko [paramiko]_. | |
|
306 | 290 | |
|
307 | Windows is a bit different. For zope.interface and Twisted, simply get the | |
|
308 | latest binary ``.exe`` installer from the Twisted website. This installer | |
|
309 | includes both zope.interface and Twisted and should just work. | |
|
310 | ||
|
311 | Foolscap | |
|
312 | -------- | |
|
313 | ||
|
314 | Foolscap [Foolscap]_ uses Twisted to provide a very nice secure RPC protocol that we use to implement our parallel computing features. | |
|
315 | ||
|
316 | On all platforms a simple: | |
|
317 | ||
|
318 | .. code-block:: bash | |
|
319 | ||
|
320 | $ easy_install foolscap | |
|
321 | ||
|
322 | should work. You can also download the source tarballs from the `Foolscap | |
|
323 | website <http://foolscap.lothar.com/trac>`_ and do ``python setup.py install`` | |
|
324 | if you prefer. | |
|
325 | ||
|
326 | pyOpenSSL | |
|
327 | --------- | |
|
328 | ||
|
329 | IPython does not work with version 0.7 of pyOpenSSL [pyOpenSSL]_. It is known | |
|
330 | to work with version 0.6 and will likely work with the more recent 0.8 and 0.9 | |
|
331 | versions. There are a couple of options for getting this: | |
|
332 | ||
|
333 | 1. Most Linux distributions have packages for pyOpenSSL. | |
|
334 | 2. The built-in Python 2.5 on OS X 10.5 already has it installed. | |
|
335 | 3. There are source tarballs on the pyOpenSSL website. On Unix-like | |
|
336 | platforms, these can be built using ``python seutp.py install``. | |
|
337 | 4. There is also a binary ``.exe`` Windows installer on the | |
|
338 | `pyOpenSSL website <http://pyopenssl.sourceforge.net/>`_. | |
|
339 | ||
|
340 | Dependencies for IPython.frontend (the IPython GUI) | |
|
341 | =================================================== | |
|
342 | ||
|
343 | wxPython | |
|
344 | -------- | |
|
345 | ||
|
346 | Starting with IPython 0.9, IPython has a new :mod:`IPython.frontend` package | |
|
347 | that has a nice wxPython based IPython GUI. As you would expect, this GUI | |
|
348 | requires wxPython. Most Linux distributions have wxPython packages available | |
|
349 | and the built-in Python on OS X comes with wxPython preinstalled. For Windows, | |
|
350 | a binary installer is available on the `wxPython website | |
|
351 | <http://www.wxpython.org/>`_. | |
|
352 | ||
|
353 | Dependencies for IPython.zmq (new parallel) | |
|
354 | =========================================== | |
|
291 | Dependencies for IPython.zmq | |
|
292 | ============================ | |
|
355 | 293 | |
|
356 | 294 | pyzmq |
|
357 | 295 | ----- |
@@ -359,9 +297,11 b' pyzmq' | |||
|
359 | 297 | IPython 0.11 introduced some new functionality, including a two-process |
|
360 | 298 | execution model using ZeroMQ for communication [ZeroMQ]_. The Python bindings |
|
361 | 299 | to ZeroMQ are found in the pyzmq project, which is easy_install-able once you |
|
362 | have ZeroMQ installed. :mod:`IPython.kernel` is also in the process of being | |
|
363 | replaced by :mod:`IPython.zmq.parallel`, which uses ZeroMQ for all | |
|
364 | communication. | |
|
300 | have ZeroMQ installed (or even if you don't). | |
|
301 | ||
|
302 | IPython.zmq depends on pyzmq >= 2.0.10.1, but IPython.parallel requires the more | |
|
303 | recent 2.1.4. 2.1.4 also has binary releases for OSX and Windows, that do not | |
|
304 | require prior installation of libzmq. | |
|
365 | 305 | |
|
366 | 306 | Dependencies for ipython-qtconsole (new GUI) |
|
367 | 307 | ============================================ |
@@ -377,11 +317,12 b' which can be installed from the' | |||
|
377 | 317 | pygments |
|
378 | 318 | -------- |
|
379 | 319 | |
|
380 |
The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project, |
|
|
320 | The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project, | |
|
321 | which is easy_install-able. | |
|
381 | 322 | |
|
382 | 323 | .. [Twisted] Twisted matrix. http://twistedmatrix.org |
|
383 | 324 | .. [ZopeInterface] http://pypi.python.org/pypi/zope.interface |
|
384 | 325 | .. [Foolscap] Foolscap network protocol. http://foolscap.lothar.com/trac |
|
385 | 326 | .. [pyOpenSSL] pyOpenSSL. http://pyopenssl.sourceforge.net |
|
386 | 327 | .. [ZeroMQ] ZeroMQ. http://www.zeromq.org |
|
387 | ||
|
328 | .. [paramiko] paramiko. https://github.com/robey/paramiko |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_call.pdf to docs/source/parallel/asian_call.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_call.png to docs/source/parallel/asian_call.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_put.pdf to docs/source/parallel/asian_put.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_put.png to docs/source/parallel/asian_put.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/dag_dependencies.txt to docs/source/parallel/dag_dependencies.txt |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/dagdeps.pdf to docs/source/parallel/dagdeps.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/dagdeps.png to docs/source/parallel/dagdeps.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/hpc_job_manager.pdf to docs/source/parallel/hpc_job_manager.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/hpc_job_manager.png to docs/source/parallel/hpc_job_manager.png |
@@ -4,9 +4,19 b'' | |||
|
4 | 4 | Using IPython for parallel computing |
|
5 | 5 | ==================================== |
|
6 | 6 | |
|
7 | The twisted-based :mod:`IPython.kernel` has been removed, in favor of | |
|
8 | the new 0MQ-based :mod:`IPython.parallel`, whose merge into master is imminent. | |
|
7 | .. toctree:: | |
|
8 | :maxdepth: 2 | |
|
9 | ||
|
10 | parallel_intro.txt | |
|
11 | parallel_process.txt | |
|
12 | parallel_multiengine.txt | |
|
13 | parallel_task.txt | |
|
14 | parallel_mpi.txt | |
|
15 | parallel_security.txt | |
|
16 | parallel_winhpc.txt | |
|
17 | parallel_demos.txt | |
|
18 | dag_dependencies.txt | |
|
19 | parallel_details.txt | |
|
20 | parallel_transition.txt | |
|
21 | ||
|
9 | 22 | |
|
10 | Until that code is merged, it can be found in the `newparallel branch | |
|
11 | <https://github.com/ipython/ipython/tree/newparallel>`_, and its draft documentation can be | |
|
12 | found `here <http://minrk.github.com/ipython-doc/newparallel>`_. No newline at end of file |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_create.pdf to docs/source/parallel/ipcluster_create.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_create.png to docs/source/parallel/ipcluster_create.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_start.pdf to docs/source/parallel/ipcluster_start.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_start.png to docs/source/parallel/ipcluster_start.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipython_shell.pdf to docs/source/parallel/ipython_shell.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipython_shell.png to docs/source/parallel/ipython_shell.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/mec_simple.pdf to docs/source/parallel/mec_simple.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/mec_simple.png to docs/source/parallel/mec_simple.png |
@@ -110,7 +110,7 b' results. The code to run this calculation in parallel is contained in' | |||
|
110 | 110 | :file:`docs/examples/newparallel/parallelpi.py`. This code can be run in parallel |
|
111 | 111 | using IPython by following these steps: |
|
112 | 112 | |
|
113 |
1. Use :command:`ipcluster |
|
|
113 | 1. Use :command:`ipcluster` to start 15 engines. We used an 8 core (2 quad | |
|
114 | 114 | core CPUs) cluster with hyperthreading enabled which makes the 8 cores |
|
115 | 115 | looks like 16 (1 controller + 15 engines) in the OS. However, the maximum |
|
116 | 116 | speedup we can observe is still only 8x. |
@@ -230,7 +230,7 b' plot using Matplotlib.' | |||
|
230 | 230 | .. literalinclude:: ../../examples/newparallel/mcdriver.py |
|
231 | 231 | :language: python |
|
232 | 232 | |
|
233 |
To use this code, start an IPython cluster using :command:`ipcluster |
|
|
233 | To use this code, start an IPython cluster using :command:`ipcluster`, open | |
|
234 | 234 | IPython in the pylab mode with the file :file:`mcdriver.py` in your current |
|
235 | 235 | working directory and then type: |
|
236 | 236 |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_details.txt to docs/source/parallel/parallel_details.txt |
@@ -156,7 +156,7 b' To connect and authenticate to the controller an engine or client needs' | |||
|
156 | 156 | some information that the controller has stored in a JSON file. |
|
157 | 157 | Thus, the JSON files need to be copied to a location where |
|
158 | 158 | the clients and engines can find them. Typically, this is the |
|
159 |
:file:`~/.ipython/cluster |
|
|
159 | :file:`~/.ipython/cluster_default/security` directory on the host where the | |
|
160 | 160 | client/engine is running (which could be a different host than the controller). |
|
161 | 161 | Once the JSON files are copied over, everything should work fine. |
|
162 | 162 | |
@@ -192,10 +192,10 b' Getting Started' | |||
|
192 | 192 | To use IPython for parallel computing, you need to start one instance of the |
|
193 | 193 | controller and one or more instances of the engine. Initially, it is best to |
|
194 | 194 | simply start a controller and engines on a single host using the |
|
195 |
:command:`ipcluster |
|
|
195 | :command:`ipcluster` command. To start a controller and 4 engines on your | |
|
196 | 196 | localhost, just do:: |
|
197 | 197 | |
|
198 |
$ ipcluster |
|
|
198 | $ ipcluster start -n 4 | |
|
199 | 199 | |
|
200 | 200 | More details about starting the IPython controller and engines can be found |
|
201 | 201 | :ref:`here <parallel_process>` |
@@ -218,7 +218,7 b' everything is working correctly, try the following commands:' | |||
|
218 | 218 | |
|
219 | 219 | |
|
220 | 220 | When a client is created with no arguments, the client tries to find the corresponding JSON file |
|
221 |
in the local `~/.ipython/cluster |
|
|
221 | in the local `~/.ipython/cluster_default/security` directory. Or if you specified a profile, | |
|
222 | 222 | you can use that with the Client. This should cover most cases: |
|
223 | 223 | |
|
224 | 224 | .. sourcecode:: ipython |
@@ -50,16 +50,16 b' To use code that calls MPI, there are typically two things that MPI requires.' | |||
|
50 | 50 | There are a couple of ways that you can start the IPython engines and get |
|
51 | 51 | these things to happen. |
|
52 | 52 | |
|
53 |
Automatic starting using :command:`mpiexec` and :command:`ipcluster |
|
|
53 | Automatic starting using :command:`mpiexec` and :command:`ipcluster` | |
|
54 | 54 | -------------------------------------------------------------------- |
|
55 | 55 | |
|
56 |
The easiest approach is to use the `mpiexec` mode of :command:`ipcluster |
|
|
56 | The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`, | |
|
57 | 57 | which will first start a controller and then a set of engines using |
|
58 | 58 | :command:`mpiexec`:: |
|
59 | 59 | |
|
60 |
$ ipcluster |
|
|
60 | $ ipcluster mpiexec -n 4 | |
|
61 | 61 | |
|
62 |
This approach is best as interrupting :command:`ipcluster |
|
|
62 | This approach is best as interrupting :command:`ipcluster` will automatically | |
|
63 | 63 | stop and clean up the controller and engines. |
|
64 | 64 | |
|
65 | 65 | Manual starting using :command:`mpiexec` |
@@ -68,20 +68,20 b' Manual starting using :command:`mpiexec`' | |||
|
68 | 68 | If you want to start the IPython engines using the :command:`mpiexec`, just |
|
69 | 69 | do:: |
|
70 | 70 | |
|
71 |
$ mpiexec -n 4 ipengine |
|
|
71 | $ mpiexec -n 4 ipengine --mpi=mpi4py | |
|
72 | 72 | |
|
73 | 73 | This requires that you already have a controller running and that the FURL |
|
74 | 74 | files for the engines are in place. We also have built in support for |
|
75 | 75 | PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by |
|
76 | 76 | starting the engines with:: |
|
77 | 77 | |
|
78 |
$ mpiexec -n 4 ipengine |
|
|
78 | $ mpiexec -n 4 ipengine --mpi=pytrilinos | |
|
79 | 79 | |
|
80 |
Automatic starting using PBS and :command:`ipcluster |
|
|
80 | Automatic starting using PBS and :command:`ipcluster` | |
|
81 | 81 | ------------------------------------------------------ |
|
82 | 82 | |
|
83 |
The :command:`ipcluster |
|
|
84 |
more information on this approach, see our documentation on :ref:`ipcluster |
|
|
83 | The :command:`ipcluster` command also has built-in integration with PBS. For | |
|
84 | more information on this approach, see our documentation on :ref:`ipcluster | |
|
85 | 85 | <parallel_process>`. |
|
86 | 86 | |
|
87 | 87 | Actually using MPI |
@@ -110,7 +110,7 b' distributed array. Save the following text in a file called :file:`psum.py`:' | |||
|
110 | 110 | |
|
111 | 111 | Now, start an IPython cluster:: |
|
112 | 112 | |
|
113 |
$ ipcluster |
|
|
113 | $ ipcluster start -p mpi -n 4 | |
|
114 | 114 | |
|
115 | 115 | .. note:: |
|
116 | 116 |
@@ -17,9 +17,9 b' Starting the IPython controller and engines' | |||
|
17 | 17 | |
|
18 | 18 | To follow along with this tutorial, you will need to start the IPython |
|
19 | 19 | controller and four IPython engines. The simplest way of doing this is to use |
|
20 |
the :command:`ipcluster |
|
|
20 | the :command:`ipcluster` command:: | |
|
21 | 21 | |
|
22 |
$ ipcluster |
|
|
22 | $ ipcluster start -n 4 | |
|
23 | 23 | |
|
24 | 24 | For more detailed information about starting the controller and engines, see |
|
25 | 25 | our :ref:`introduction <ip1par>` to using IPython for parallel computing. |
@@ -37,7 +37,7 b' module and then create a :class:`.Client` instance:' | |||
|
37 | 37 | In [2]: rc = Client() |
|
38 | 38 | |
|
39 | 39 | This form assumes that the default connection information (stored in |
|
40 |
:file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/cluster |
|
|
40 | :file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/cluster_default/security`) is | |
|
41 | 41 | accurate. If the controller was started on a remote machine, you must copy that connection |
|
42 | 42 | file to the client machine, or enter its contents as arguments to the Client constructor: |
|
43 | 43 |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_pi.pdf to docs/source/parallel/parallel_pi.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_pi.png to docs/source/parallel/parallel_pi.png |
@@ -11,12 +11,12 b' Because of this, there are many different possibilities.' | |||
|
11 | 11 | |
|
12 | 12 | Broadly speaking, there are two ways of going about starting a controller and engines: |
|
13 | 13 | |
|
14 |
* In an automated manner using the :command:`ipcluster |
|
|
15 |
* In a more manual way using the :command:`ipcontroller |
|
|
16 |
:command:`ipengine |
|
|
14 | * In an automated manner using the :command:`ipcluster` command. | |
|
15 | * In a more manual way using the :command:`ipcontroller` and | |
|
16 | :command:`ipengine` commands. | |
|
17 | 17 | |
|
18 | 18 | This document describes both of these methods. We recommend that new users |
|
19 |
start with the :command:`ipcluster |
|
|
19 | start with the :command:`ipcluster` command as it simplifies many common usage | |
|
20 | 20 | cases. |
|
21 | 21 | |
|
22 | 22 | General considerations |
@@ -30,29 +30,29 b' matter which method you use to start your IPython cluster.' | |||
|
30 | 30 | Let's say that you want to start the controller on ``host0`` and engines on |
|
31 | 31 | hosts ``host1``-``hostn``. The following steps are then required: |
|
32 | 32 | |
|
33 |
1. Start the controller on ``host0`` by running :command:`ipcontroller |
|
|
33 | 1. Start the controller on ``host0`` by running :command:`ipcontroller` on | |
|
34 | 34 | ``host0``. |
|
35 | 35 | 2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the |
|
36 | 36 | controller from ``host0`` to hosts ``host1``-``hostn``. |
|
37 | 37 | 3. Start the engines on hosts ``host1``-``hostn`` by running |
|
38 |
:command:`ipengine |
|
|
38 | :command:`ipengine`. This command has to be told where the JSON file | |
|
39 | 39 | (:file:`ipcontroller-engine.json`) is located. |
|
40 | 40 | |
|
41 | 41 | At this point, the controller and engines will be connected. By default, the JSON files |
|
42 |
created by the controller are put into the :file:`~/.ipython/cluster |
|
|
42 | created by the controller are put into the :file:`~/.ipython/cluster_default/security` | |
|
43 | 43 | directory. If the engines share a filesystem with the controller, step 2 can be skipped as |
|
44 | 44 | the engines will automatically look at that location. |
|
45 | 45 | |
|
46 | 46 | The final step required to actually use the running controller from a client is to move |
|
47 | 47 | the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients |
|
48 |
will be run. If these file are put into the :file:`~/.ipython/cluster |
|
|
48 | will be run. If these file are put into the :file:`~/.ipython/cluster_default/security` | |
|
49 | 49 | directory of the client's host, they will be found automatically. Otherwise, the full path |
|
50 | 50 | to them has to be passed to the client's constructor. |
|
51 | 51 | |
|
52 |
Using :command:`ipcluster |
|
|
52 | Using :command:`ipcluster` | |
|
53 | 53 | =========================== |
|
54 | 54 | |
|
55 |
The :command:`ipcluster |
|
|
55 | The :command:`ipcluster` command provides a simple way of starting a | |
|
56 | 56 | controller and engines in the following situations: |
|
57 | 57 | |
|
58 | 58 | 1. When the controller and engines are all run on localhost. This is useful |
@@ -67,24 +67,24 b' controller and engines in the following situations:' | |||
|
67 | 67 | |
|
68 | 68 | .. note:: |
|
69 | 69 | |
|
70 |
Currently :command:`ipcluster |
|
|
70 | Currently :command:`ipcluster` requires that the | |
|
71 | 71 | :file:`~/.ipython/cluster_<profile>/security` directory live on a shared filesystem that is |
|
72 | 72 | seen by both the controller and engines. If you don't have a shared file |
|
73 |
system you will need to use :command:`ipcontroller |
|
|
74 |
:command:`ipengine |
|
|
73 | system you will need to use :command:`ipcontroller` and | |
|
74 | :command:`ipengine` directly. | |
|
75 | 75 | |
|
76 |
Under the hood, :command:`ipcluster |
|
|
77 |
and :command:`ipengine |
|
|
76 | Under the hood, :command:`ipcluster` just uses :command:`ipcontroller` | |
|
77 | and :command:`ipengine` to perform the steps described above. | |
|
78 | 78 | |
|
79 |
The simplest way to use ipcluster |
|
|
79 | The simplest way to use ipcluster requires no configuration, and will | |
|
80 | 80 | launch a controller and a number of engines on the local machine. For instance, |
|
81 | 81 | to start one controller and 4 engines on localhost, just do:: |
|
82 | 82 | |
|
83 |
$ ipcluster |
|
|
83 | $ ipcluster start -n 4 | |
|
84 | 84 | |
|
85 | 85 | To see other command line options for the local mode, do:: |
|
86 | 86 | |
|
87 |
$ ipcluster |
|
|
87 | $ ipcluster -h | |
|
88 | 88 | |
|
89 | 89 | |
|
90 | 90 | Configuring an IPython cluster |
@@ -92,25 +92,25 b' Configuring an IPython cluster' | |||
|
92 | 92 | |
|
93 | 93 | Cluster configurations are stored as `profiles`. You can create a new profile with:: |
|
94 | 94 | |
|
95 |
$ ipcluster |
|
|
95 | $ ipcluster create -p myprofile | |
|
96 | 96 | |
|
97 |
This will create the directory :file:`IPYTHONDIR/cluster |
|
|
97 | This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it | |
|
98 | 98 | with the default configuration files for the three IPython cluster commands. Once |
|
99 |
you edit those files, you can continue to call ipcluster |
|
|
99 | you edit those files, you can continue to call ipcluster/ipcontroller/ipengine | |
|
100 | 100 | with no arguments beyond ``-p myprofile``, and any configuration will be maintained. |
|
101 | 101 | |
|
102 | 102 | There is no limit to the number of profiles you can have, so you can maintain a profile for each |
|
103 | 103 | of your common use cases. The default profile will be used whenever the |
|
104 |
profile argument is not specified, so edit :file:`IPYTHONDIR/cluster |
|
|
104 | profile argument is not specified, so edit :file:`IPYTHONDIR/cluster_default/*_config.py` to | |
|
105 | 105 | represent your most common use case. |
|
106 | 106 | |
|
107 | 107 | The configuration files are loaded with commented-out settings and explanations, |
|
108 | 108 | which should cover most of the available possibilities. |
|
109 | 109 | |
|
110 |
Using various batch systems with :command:`ipcluster |
|
|
110 | Using various batch systems with :command:`ipcluster` | |
|
111 | 111 | ------------------------------------------------------ |
|
112 | 112 | |
|
113 |
:command:`ipcluster |
|
|
113 | :command:`ipcluster` has a notion of Launchers that can start controllers | |
|
114 | 114 | and engines with various remote execution schemes. Currently supported |
|
115 | 115 | models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server. |
|
116 | 116 | |
@@ -120,7 +120,7 b' models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server.' | |||
|
120 | 120 | users can subclass and configure them to fit their own system that we |
|
121 | 121 | have not yet supported (such as Condor) |
|
122 | 122 | |
|
123 |
Using :command:`ipcluster |
|
|
123 | Using :command:`ipcluster` in mpiexec/mpirun mode | |
|
124 | 124 | -------------------------------------------------- |
|
125 | 125 | |
|
126 | 126 | |
@@ -132,11 +132,11 b' The mpiexec/mpirun mode is useful if you:' | |||
|
132 | 132 | |
|
133 | 133 | If these are satisfied, you can create a new profile:: |
|
134 | 134 | |
|
135 |
$ ipcluster |
|
|
135 | $ ipcluster create -p mpi | |
|
136 | 136 | |
|
137 |
and edit the file :file:`IPYTHONDIR/cluster |
|
|
137 | and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`. | |
|
138 | 138 | |
|
139 |
There, instruct ipcluster |
|
|
139 | There, instruct ipcluster to use the MPIExec launchers by adding the lines: | |
|
140 | 140 | |
|
141 | 141 | .. sourcecode:: python |
|
142 | 142 | |
@@ -144,7 +144,7 b' There, instruct ipclusterz to use the MPIExec launchers by adding the lines:' | |||
|
144 | 144 | |
|
145 | 145 | If the default MPI configuration is correct, then you can now start your cluster, with:: |
|
146 | 146 | |
|
147 |
$ ipcluster |
|
|
147 | $ ipcluster start -n 4 -p mpi | |
|
148 | 148 | |
|
149 | 149 | This does the following: |
|
150 | 150 | |
@@ -166,7 +166,7 b' On newer MPI implementations (such as OpenMPI), this will work even if you' | |||
|
166 | 166 | don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI |
|
167 | 167 | implementations actually require each process to call :func:`MPI_Init` upon |
|
168 | 168 | starting. The easiest way of having this done is to install the mpi4py |
|
169 |
[mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine |
|
|
169 | [mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine_config.py`: | |
|
170 | 170 | |
|
171 | 171 | .. sourcecode:: python |
|
172 | 172 | |
@@ -177,21 +177,21 b' having problems with this, you will likely have to use a custom Python' | |||
|
177 | 177 | executable that itself calls :func:`MPI_Init` at the appropriate time. |
|
178 | 178 | Fortunately, mpi4py comes with such a custom Python executable that is easy to |
|
179 | 179 | install and use. However, this custom Python executable approach will not work |
|
180 |
with :command:`ipcluster |
|
|
180 | with :command:`ipcluster` currently. | |
|
181 | 181 | |
|
182 | 182 | More details on using MPI with IPython can be found :ref:`here <parallelmpi>`. |
|
183 | 183 | |
|
184 | 184 | |
|
185 |
Using :command:`ipcluster |
|
|
185 | Using :command:`ipcluster` in PBS mode | |
|
186 | 186 | --------------------------------------- |
|
187 | 187 | |
|
188 | 188 | The PBS mode uses the Portable Batch System [PBS]_ to start the engines. |
|
189 | 189 | |
|
190 | 190 | As usual, we will start by creating a fresh profile:: |
|
191 | 191 | |
|
192 |
$ ipcluster |
|
|
192 | $ ipcluster create -p pbs | |
|
193 | 193 | |
|
194 |
And in :file:`ipcluster |
|
|
194 | And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller | |
|
195 | 195 | and engines: |
|
196 | 196 | |
|
197 | 197 | .. sourcecode:: python |
@@ -213,7 +213,7 b' to specify your own. Here is a sample PBS script template:' | |||
|
213 | 213 | cd $$PBS_O_WORKDIR |
|
214 | 214 | export PATH=$$HOME/usr/local/bin |
|
215 | 215 | export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages |
|
216 |
/usr/local/bin/mpiexec -n ${n} ipengine |
|
|
216 | /usr/local/bin/mpiexec -n ${n} ipengine --cluster_dir=${cluster_dir} | |
|
217 | 217 | |
|
218 | 218 | There are a few important points about this template: |
|
219 | 219 | |
@@ -232,8 +232,8 b' There are a few important points about this template:' | |||
|
232 | 232 | environment variables in the template, or in SGE, where the config lines start |
|
233 | 233 | with ``#$``, which will have to be ``#$$``. |
|
234 | 234 | |
|
235 |
4. Any options to :command:`ipengine |
|
|
236 |
template, or in :file:`ipengine |
|
|
235 | 4. Any options to :command:`ipengine` can be given in the batch script | |
|
236 | template, or in :file:`ipengine_config.py`. | |
|
237 | 237 | |
|
238 | 238 | 5. Depending on the configuration of you system, you may have to set |
|
239 | 239 | environment variables in the script template. |
@@ -251,11 +251,11 b' The controller template should be similar, but simpler:' | |||
|
251 | 251 | cd $$PBS_O_WORKDIR |
|
252 | 252 | export PATH=$$HOME/usr/local/bin |
|
253 | 253 | export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages |
|
254 |
ipcontroller |
|
|
254 | ipcontroller --cluster_dir=${cluster_dir} | |
|
255 | 255 | |
|
256 | 256 | |
|
257 | 257 | Once you have created these scripts, save them with names like |
|
258 |
:file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster |
|
|
258 | :file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster_config` with: | |
|
259 | 259 | |
|
260 | 260 | .. sourcecode:: python |
|
261 | 261 | |
@@ -264,12 +264,12 b' Once you have created these scripts, save them with names like' | |||
|
264 | 264 | c.PBSControllerLauncher.batch_template_file = "pbs.controller.template" |
|
265 | 265 | |
|
266 | 266 | |
|
267 |
Alternately, you can just define the templates as strings inside :file:`ipcluster |
|
|
267 | Alternately, you can just define the templates as strings inside :file:`ipcluster_config`. | |
|
268 | 268 | |
|
269 | 269 | Whether you are using your own templates or our defaults, the extra configurables available are |
|
270 | 270 | the number of engines to launch (``$n``, and the batch system queue to which the jobs are to be |
|
271 | 271 | submitted (``$queue``)). These are configurables, and can be specified in |
|
272 |
:file:`ipcluster |
|
|
272 | :file:`ipcluster_config`: | |
|
273 | 273 | |
|
274 | 274 | .. sourcecode:: python |
|
275 | 275 | |
@@ -279,7 +279,7 b' submitted (``$queue``)). These are configurables, and can be specified in' | |||
|
279 | 279 | Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior |
|
280 | 280 | of listening only on localhost is likely too restrictive. In this case, also assuming the |
|
281 | 281 | nodes are safely behind a firewall, you can simply instruct the Controller to listen for |
|
282 |
connections on all its interfaces, by adding in :file:`ipcontroller |
|
|
282 | connections on all its interfaces, by adding in :file:`ipcontroller_config`: | |
|
283 | 283 | |
|
284 | 284 | .. sourcecode:: python |
|
285 | 285 | |
@@ -287,9 +287,9 b' connections on all its interfaces, by adding in :file:`ipcontrollerz_config`:' | |||
|
287 | 287 | |
|
288 | 288 | You can now run the cluster with:: |
|
289 | 289 | |
|
290 |
$ ipcluster |
|
|
290 | $ ipcluster start -p pbs -n 128 | |
|
291 | 291 | |
|
292 |
Additional configuration options can be found in the PBS section of :file:`ipcluster |
|
|
292 | Additional configuration options can be found in the PBS section of :file:`ipcluster_config`. | |
|
293 | 293 | |
|
294 | 294 | .. note:: |
|
295 | 295 | |
@@ -298,12 +298,12 b' Additional configuration options can be found in the PBS section of :file:`ipclu' | |||
|
298 | 298 | and with further configuration in similar batch systems like Condor. |
|
299 | 299 | |
|
300 | 300 | |
|
301 |
Using :command:`ipcluster |
|
|
301 | Using :command:`ipcluster` in SSH mode | |
|
302 | 302 | --------------------------------------- |
|
303 | 303 | |
|
304 | 304 | |
|
305 |
The SSH mode uses :command:`ssh` to execute :command:`ipengine |
|
|
306 |
nodes and :command:`ipcontroller |
|
|
305 | The SSH mode uses :command:`ssh` to execute :command:`ipengine` on remote | |
|
306 | nodes and :command:`ipcontroller` can be run remotely as well, or on localhost. | |
|
307 | 307 | |
|
308 | 308 | .. note:: |
|
309 | 309 | |
@@ -312,9 +312,9 b' nodes and :command:`ipcontrollerz` can be run remotely as well, or on localhost.' | |||
|
312 | 312 | |
|
313 | 313 | As usual, we start by creating a clean profile:: |
|
314 | 314 | |
|
315 |
$ ipcluster |
|
|
315 | $ ipcluster create -p ssh | |
|
316 | 316 | |
|
317 |
To use this mode, select the SSH launchers in :file:`ipcluster |
|
|
317 | To use this mode, select the SSH launchers in :file:`ipcluster_config.py`: | |
|
318 | 318 | |
|
319 | 319 | .. sourcecode:: python |
|
320 | 320 | |
@@ -331,9 +331,9 b" The controller's remote location and configuration can be specified:" | |||
|
331 | 331 | # c.SSHControllerLauncher.hostname = 'controller.example.com' |
|
332 | 332 | # c.SSHControllerLauncher.user = os.environ.get('USER','username') |
|
333 | 333 | |
|
334 |
# Set the arguments to be passed to ipcontroller |
|
|
335 |
# note that remotely launched ipcontroller |
|
|
336 |
# the local ipcontroller |
|
|
334 | # Set the arguments to be passed to ipcontroller | |
|
335 | # note that remotely launched ipcontroller will not get the contents of | |
|
336 | # the local ipcontroller_config.py unless it resides on the *remote host* | |
|
337 | 337 | # in the location specified by the --cluster_dir argument. |
|
338 | 338 | # c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd'] |
|
339 | 339 | |
@@ -357,46 +357,46 b' on that host.' | |||
|
357 | 357 | * The `engines` dict, where the keys are the host we want to run engines on and |
|
358 | 358 | the value is the number of engines to run on that host. |
|
359 | 359 | * on host3, the value is a tuple, where the number of engines is first, and the arguments |
|
360 |
to be passed to :command:`ipengine |
|
|
360 | to be passed to :command:`ipengine` are the second element. | |
|
361 | 361 | |
|
362 | 362 | For engines without explicitly specified arguments, the default arguments are set in |
|
363 | 363 | a single location: |
|
364 | 364 | |
|
365 | 365 | .. sourcecode:: python |
|
366 | 366 | |
|
367 |
c.SSHEngineSetLauncher.engine_args = ['--cluster_dir', '/path/to/cluster |
|
|
367 | c.SSHEngineSetLauncher.engine_args = ['--cluster_dir', '/path/to/cluster_ssh'] | |
|
368 | 368 | |
|
369 |
Current limitations of the SSH mode of :command:`ipcluster |
|
|
369 | Current limitations of the SSH mode of :command:`ipcluster` are: | |
|
370 | 370 | |
|
371 | 371 | * Untested on Windows. Would require a working :command:`ssh` on Windows. |
|
372 | 372 | Also, we are using shell scripts to setup and execute commands on remote |
|
373 | 373 | hosts. |
|
374 | 374 | * No file movement - |
|
375 | 375 | |
|
376 |
Using the :command:`ipcontroller |
|
|
376 | Using the :command:`ipcontroller` and :command:`ipengine` commands | |
|
377 | 377 | ==================================================================== |
|
378 | 378 | |
|
379 |
It is also possible to use the :command:`ipcontroller |
|
|
379 | It is also possible to use the :command:`ipcontroller` and :command:`ipengine` | |
|
380 | 380 | commands to start your controller and engines. This approach gives you full |
|
381 | 381 | control over all aspects of the startup process. |
|
382 | 382 | |
|
383 | 383 | Starting the controller and engine on your local machine |
|
384 | 384 | -------------------------------------------------------- |
|
385 | 385 | |
|
386 |
To use :command:`ipcontroller |
|
|
386 | To use :command:`ipcontroller` and :command:`ipengine` to start things on your | |
|
387 | 387 | local machine, do the following. |
|
388 | 388 | |
|
389 | 389 | First start the controller:: |
|
390 | 390 | |
|
391 |
$ ipcontroller |
|
|
391 | $ ipcontroller | |
|
392 | 392 | |
|
393 | 393 | Next, start however many instances of the engine you want using (repeatedly) |
|
394 | 394 | the command:: |
|
395 | 395 | |
|
396 |
$ ipengine |
|
|
396 | $ ipengine | |
|
397 | 397 | |
|
398 | 398 | The engines should start and automatically connect to the controller using the |
|
399 |
JSON files in :file:`~/.ipython/cluster |
|
|
399 | JSON files in :file:`~/.ipython/cluster_default/security`. You are now ready to use the | |
|
400 | 400 | controller and engines from IPython. |
|
401 | 401 | |
|
402 | 402 | .. warning:: |
@@ -418,18 +418,18 b' Starting the controller and engines on different hosts' | |||
|
418 | 418 | When the controller and engines are running on different hosts, things are |
|
419 | 419 | slightly more complicated, but the underlying ideas are the same: |
|
420 | 420 | |
|
421 |
1. Start the controller on a host using :command:`ipcontroller |
|
|
421 | 1. Start the controller on a host using :command:`ipcontroller`. | |
|
422 | 422 | 2. Copy :file:`ipcontroller-engine.json` from :file:`~/.ipython/cluster_<profile>/security` on |
|
423 | 423 | the controller's host to the host where the engines will run. |
|
424 |
3. Use :command:`ipengine |
|
|
424 | 3. Use :command:`ipengine` on the engine's hosts to start the engines. | |
|
425 | 425 | |
|
426 |
The only thing you have to be careful of is to tell :command:`ipengine |
|
|
426 | The only thing you have to be careful of is to tell :command:`ipengine` where | |
|
427 | 427 | the :file:`ipcontroller-engine.json` file is located. There are two ways you |
|
428 | 428 | can do this: |
|
429 | 429 | |
|
430 | 430 | * Put :file:`ipcontroller-engine.json` in the :file:`~/.ipython/cluster_<profile>/security` |
|
431 | 431 | directory on the engine's host, where it will be found automatically. |
|
432 |
* Call :command:`ipengine |
|
|
432 | * Call :command:`ipengine` with the ``--file=full_path_to_the_file`` | |
|
433 | 433 | flag. |
|
434 | 434 | |
|
435 | 435 | The ``--file`` flag works like this:: |
@@ -455,7 +455,7 b' any point in the future.' | |||
|
455 | 455 | To do this, the only thing you have to do is specify the `-r` flag, so that |
|
456 | 456 | the connection information in the JSON files remains accurate:: |
|
457 | 457 | |
|
458 |
$ ipcontroller |
|
|
458 | $ ipcontroller -r | |
|
459 | 459 | |
|
460 | 460 | Then, just copy the JSON files over the first time and you are set. You can |
|
461 | 461 | start and stop the controller and engines any many times as you want in the |
@@ -478,7 +478,7 b' IPython and can be found in the directory :file:`~/.ipython/cluster_<profile>/lo' | |||
|
478 | 478 | Sending the log files to us will often help us to debug any problems. |
|
479 | 479 | |
|
480 | 480 | |
|
481 |
Configuring `ipcontroller |
|
|
481 | Configuring `ipcontroller` | |
|
482 | 482 | --------------------------- |
|
483 | 483 | |
|
484 | 484 | Ports and addresses |
@@ -493,7 +493,7 b' Database Backend' | |||
|
493 | 493 | |
|
494 | 494 | |
|
495 | 495 | |
|
496 |
Configuring `ipengine |
|
|
496 | Configuring `ipengine` | |
|
497 | 497 | ----------------------- |
|
498 | 498 | |
|
499 | 499 | .. note:: |
@@ -130,7 +130,7 b' way.' | |||
|
130 | 130 | There is exactly one key per cluster - it must be the same everywhere. Typically, the |
|
131 | 131 | controller creates this key, and stores it in the private connection files |
|
132 | 132 | `ipython-{engine|client}.json`. These files are typically stored in the |
|
133 |
`~/.ipython/cluster |
|
|
133 | `~/.ipython/cluster_<profile>/security` directory, and are maintained as readable only by | |
|
134 | 134 | the owner, just as is common practice with a user's keys in their `.ssh` directory. |
|
135 | 135 | |
|
136 | 136 | .. warning:: |
@@ -22,9 +22,9 b' Starting the IPython controller and engines' | |||
|
22 | 22 | |
|
23 | 23 | To follow along with this tutorial, you will need to start the IPython |
|
24 | 24 | controller and four IPython engines. The simplest way of doing this is to use |
|
25 |
the :command:`ipcluster |
|
|
25 | the :command:`ipcluster` command:: | |
|
26 | 26 | |
|
27 |
$ ipcluster |
|
|
27 | $ ipcluster start -n 4 | |
|
28 | 28 | |
|
29 | 29 | For more detailed information about starting the controller and engines, see |
|
30 | 30 | our :ref:`introduction <ip1par>` to using IPython for parallel computing. |
@@ -321,16 +321,16 b' Schedulers' | |||
|
321 | 321 | There are a variety of valid ways to determine where jobs should be assigned in a |
|
322 | 322 | load-balancing situation. In IPython, we support several standard schemes, and |
|
323 | 323 | even make it easy to define your own. The scheme can be selected via the ``--scheme`` |
|
324 |
argument to :command:`ipcontroller |
|
|
324 | argument to :command:`ipcontroller`, or in the :attr:`HubFactory.scheme` attribute | |
|
325 | 325 | of a controller config object. |
|
326 | 326 | |
|
327 | 327 | The built-in routing schemes: |
|
328 | 328 | |
|
329 | 329 | To select one of these schemes, simply do:: |
|
330 | 330 | |
|
331 |
$ ipcontroller |
|
|
331 | $ ipcontroller --scheme <schemename> | |
|
332 | 332 | for instance: |
|
333 |
$ ipcontroller |
|
|
333 | $ ipcontroller --scheme lru | |
|
334 | 334 | |
|
335 | 335 | lru: Least Recently Used |
|
336 | 336 |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_transition.txt to docs/source/parallel/parallel_transition.txt |
@@ -144,25 +144,25 b' in parallel on the engines from within the IPython shell using an appropriate' | |||
|
144 | 144 | client. This includes the ability to interact with, plot and visualize data |
|
145 | 145 | from the engines. |
|
146 | 146 | |
|
147 |
IPython has a command line program called :command:`ipcluster |
|
|
147 | IPython has a command line program called :command:`ipcluster` that automates | |
|
148 | 148 | all aspects of starting the controller and engines on the compute nodes. |
|
149 |
:command:`ipcluster |
|
|
150 |
meaning that :command:`ipcluster |
|
|
149 | :command:`ipcluster` has full support for the Windows HPC job scheduler, | |
|
150 | meaning that :command:`ipcluster` can use this job scheduler to start the | |
|
151 | 151 | controller and engines. In our experience, the Windows HPC job scheduler is |
|
152 | 152 | particularly well suited for interactive applications, such as IPython. Once |
|
153 |
:command:`ipcluster |
|
|
153 | :command:`ipcluster` is configured properly, a user can start an IPython | |
|
154 | 154 | cluster from their local workstation almost instantly, without having to log |
|
155 | 155 | on to the head node (as is typically required by Unix based job schedulers). |
|
156 | 156 | This enables a user to move seamlessly between serial and parallel |
|
157 | 157 | computations. |
|
158 | 158 | |
|
159 |
In this section we show how to use :command:`ipcluster |
|
|
159 | In this section we show how to use :command:`ipcluster` to start an IPython | |
|
160 | 160 | cluster using the Windows HPC Server 2008 job scheduler. To make sure that |
|
161 |
:command:`ipcluster |
|
|
161 | :command:`ipcluster` is installed and working properly, you should first try | |
|
162 | 162 | to start an IPython cluster on your local host. To do this, open a Windows |
|
163 | 163 | Command Prompt and type the following command:: |
|
164 | 164 | |
|
165 |
ipcluster |
|
|
165 | ipcluster start -n 2 | |
|
166 | 166 | |
|
167 | 167 | You should see a number of messages printed to the screen, ending with |
|
168 | 168 | "IPython cluster: started". The result should look something like the following |
@@ -174,12 +174,12 b' At this point, the controller and two engines are running on your local host.' | |||
|
174 | 174 | This configuration is useful for testing and for situations where you want to |
|
175 | 175 | take advantage of multiple cores on your local computer. |
|
176 | 176 | |
|
177 |
Now that we have confirmed that :command:`ipcluster |
|
|
177 | Now that we have confirmed that :command:`ipcluster` is working properly, we | |
|
178 | 178 | describe how to configure and run an IPython cluster on an actual compute |
|
179 | 179 | cluster running Windows HPC Server 2008. Here is an outline of the needed |
|
180 | 180 | steps: |
|
181 | 181 | |
|
182 |
1. Create a cluster profile using: ``ipcluster |
|
|
182 | 1. Create a cluster profile using: ``ipcluster create -p mycluster`` | |
|
183 | 183 | |
|
184 | 184 | 2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster` |
|
185 | 185 | |
@@ -191,7 +191,7 b' Creating a cluster profile' | |||
|
191 | 191 | In most cases, you will have to create a cluster profile to use IPython on a |
|
192 | 192 | cluster. A cluster profile is a name (like "mycluster") that is associated |
|
193 | 193 | with a particular cluster configuration. The profile name is used by |
|
194 |
:command:`ipcluster |
|
|
194 | :command:`ipcluster` when working with the cluster. | |
|
195 | 195 | |
|
196 | 196 | Associated with each cluster profile is a cluster directory. This cluster |
|
197 | 197 | directory is a specially named directory (typically located in the |
@@ -204,10 +204,10 b' security keys. The naming convention for cluster directories is:' | |||
|
204 | 204 | To create a new cluster profile (named "mycluster") and the associated cluster |
|
205 | 205 | directory, type the following command at the Windows Command Prompt:: |
|
206 | 206 | |
|
207 |
ipcluster |
|
|
207 | ipcluster create -p mycluster | |
|
208 | 208 | |
|
209 | 209 | The output of this command is shown in the screenshot below. Notice how |
|
210 |
:command:`ipcluster |
|
|
210 | :command:`ipcluster` prints out the location of the newly created cluster | |
|
211 | 211 | directory. |
|
212 | 212 | |
|
213 | 213 | .. image:: ipcluster_create.* |
@@ -218,19 +218,19 b' Configuring a cluster profile' | |||
|
218 | 218 | Next, you will need to configure the newly created cluster profile by editing |
|
219 | 219 | the following configuration files in the cluster directory: |
|
220 | 220 | |
|
221 |
* :file:`ipcluster |
|
|
221 | * :file:`ipcluster_config.py` | |
|
222 | 222 | * :file:`ipcontroller_config.py` |
|
223 | 223 | * :file:`ipengine_config.py` |
|
224 | 224 | |
|
225 |
When :command:`ipcluster |
|
|
225 | When :command:`ipcluster` is run, these configuration files are used to | |
|
226 | 226 | determine how the engines and controller will be started. In most cases, |
|
227 | 227 | you will only have to set a few of the attributes in these files. |
|
228 | 228 | |
|
229 |
To configure :command:`ipcluster |
|
|
229 | To configure :command:`ipcluster` to use the Windows HPC job scheduler, you | |
|
230 | 230 | will need to edit the following attributes in the file |
|
231 |
:file:`ipcluster |
|
|
231 | :file:`ipcluster_config.py`:: | |
|
232 | 232 | |
|
233 |
# Set these at the top of the file to tell ipcluster |
|
|
233 | # Set these at the top of the file to tell ipcluster to use the | |
|
234 | 234 | # Windows HPC job scheduler. |
|
235 | 235 | c.Global.controller_launcher = \ |
|
236 | 236 | 'IPython.parallel.launcher.WindowsHPCControllerLauncher' |
@@ -257,15 +257,15 b' Starting the cluster profile' | |||
|
257 | 257 | Once a cluster profile has been configured, starting an IPython cluster using |
|
258 | 258 | the profile is simple:: |
|
259 | 259 | |
|
260 |
ipcluster |
|
|
260 | ipcluster start -p mycluster -n 32 | |
|
261 | 261 | |
|
262 |
The ``-n`` option tells :command:`ipcluster |
|
|
262 | The ``-n`` option tells :command:`ipcluster` how many engines to start (in | |
|
263 | 263 | this case 32). Stopping the cluster is as simple as typing Control-C. |
|
264 | 264 | |
|
265 | 265 | Using the HPC Job Manager |
|
266 | 266 | ------------------------- |
|
267 | 267 | |
|
268 |
When ``ipcluster |
|
|
268 | When ``ipcluster start`` is run the first time, :command:`ipcluster` creates | |
|
269 | 269 | two XML job description files in the cluster directory: |
|
270 | 270 | |
|
271 | 271 | * :file:`ipcontroller_job.xml` |
@@ -273,8 +273,8 b' two XML job description files in the cluster directory:' | |||
|
273 | 273 | |
|
274 | 274 | Once these files have been created, they can be imported into the HPC Job |
|
275 | 275 | Manager application. Then, the controller and engines for that profile can be |
|
276 |
started using the HPC Job Manager directly, without using :command:`ipcluster |
|
|
277 |
However, anytime the cluster profile is re-configured, ``ipcluster |
|
|
276 | started using the HPC Job Manager directly, without using :command:`ipcluster`. | |
|
277 | However, anytime the cluster profile is re-configured, ``ipcluster start`` | |
|
278 | 278 | must be run again to regenerate the XML job description files. The |
|
279 | 279 | following screenshot shows what the HPC Job Manager interface looks like |
|
280 | 280 | with a running IPython cluster. |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/simpledag.pdf to docs/source/parallel/simpledag.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/simpledag.png to docs/source/parallel/simpledag.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/single_digits.pdf to docs/source/parallel/single_digits.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/single_digits.png to docs/source/parallel/single_digits.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/two_digit_counts.pdf to docs/source/parallel/two_digit_counts.pdf |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/two_digit_counts.png to docs/source/parallel/two_digit_counts.png |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/winhpc_index.txt to docs/source/parallel/winhpc_index.txt |
@@ -215,16 +215,16 b" if 'setuptools' in sys.modules:" | |||
|
215 | 215 | 'ipython = IPython.frontend.terminal.ipapp:launch_new_instance', |
|
216 | 216 | 'ipython-qtconsole = IPython.frontend.qt.console.ipythonqt:main', |
|
217 | 217 | 'pycolor = IPython.utils.PyColorize:main', |
|
218 |
'ipcontroller |
|
|
219 |
'ipengine |
|
|
220 |
'iplogger |
|
|
221 |
'ipcluster |
|
|
218 | 'ipcontroller = IPython.parallel.ipcontrollerapp:launch_new_instance', | |
|
219 | 'ipengine = IPython.parallel.ipengineapp:launch_new_instance', | |
|
220 | 'iplogger = IPython.parallel.iploggerapp:launch_new_instance', | |
|
221 | 'ipcluster = IPython.parallel.ipclusterapp:launch_new_instance', | |
|
222 | 222 | 'iptest = IPython.testing.iptest:main', |
|
223 | 223 | 'irunner = IPython.lib.irunner:main' |
|
224 | 224 | ] |
|
225 | 225 | } |
|
226 | 226 | setup_args['extras_require'] = dict( |
|
227 | zmq = 'pyzmq>=2.0.10', | |
|
227 | zmq = 'pyzmq>=2.0.10.1', | |
|
228 | 228 | doc='Sphinx>=0.3', |
|
229 | 229 | test='nose>=0.10.1', |
|
230 | 230 | security='pyOpenSSL>=0.6' |
@@ -127,6 +127,7 b' def find_packages():' | |||
|
127 | 127 | add_package(packages, 'frontend.qt.console', tests=True) |
|
128 | 128 | add_package(packages, 'frontend.terminal', tests=True) |
|
129 | 129 | add_package(packages, 'lib', tests=True) |
|
130 | add_package(packages, 'parallel', tests=True) | |
|
130 | 131 | add_package(packages, 'quarantine', tests=True) |
|
131 | 132 | add_package(packages, 'scripts') |
|
132 | 133 | add_package(packages, 'testing', tests=True) |
@@ -134,7 +135,6 b' def find_packages():' | |||
|
134 | 135 | add_package(packages, 'utils', tests=True) |
|
135 | 136 | add_package(packages, 'zmq') |
|
136 | 137 | add_package(packages, 'zmq.pylab') |
|
137 | add_package(packages, 'parallel') | |
|
138 | 138 | return packages |
|
139 | 139 | |
|
140 | 140 | #--------------------------------------------------------------------------- |
@@ -265,10 +265,10 b' def find_scripts():' | |||
|
265 | 265 | parallel_scripts = pjoin('IPython','parallel','scripts') |
|
266 | 266 | main_scripts = pjoin('IPython','scripts') |
|
267 | 267 | scripts = [ |
|
268 |
pjoin(parallel_scripts, 'ipengine |
|
|
269 |
pjoin(parallel_scripts, 'ipcontroller |
|
|
270 |
pjoin(parallel_scripts, 'ipcluster |
|
|
271 |
pjoin(parallel_scripts, 'iplogger |
|
|
268 | pjoin(parallel_scripts, 'ipengine'), | |
|
269 | pjoin(parallel_scripts, 'ipcontroller'), | |
|
270 | pjoin(parallel_scripts, 'ipcluster'), | |
|
271 | pjoin(parallel_scripts, 'iplogger'), | |
|
272 | 272 | pjoin(main_scripts, 'ipython'), |
|
273 | 273 | pjoin(main_scripts, 'ipython-qtconsole'), |
|
274 | 274 | pjoin(main_scripts, 'pycolor'), |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed |
|
1 | NO CONTENT: file was removed |
General Comments 0
You need to be logged in to leave comments.
Login now