Show More
@@ -1,184 +1,241 b'' | |||||
1 | import os |
|
1 | import os | |
2 |
|
2 | |||
3 | c = get_config() |
|
3 | c = get_config() | |
4 |
|
4 | |||
5 | #----------------------------------------------------------------------------- |
|
5 | #----------------------------------------------------------------------------- | |
6 | # Select which launchers to use |
|
6 | # Select which launchers to use | |
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 |
|
8 | |||
9 | # This allows you to control what method is used to start the controller |
|
9 | # This allows you to control what method is used to start the controller | |
10 | # and engines. The following methods are currently supported: |
|
10 | # and engines. The following methods are currently supported: | |
11 | # - Start as a regular process on localhost. |
|
11 | # - Start as a regular process on localhost. | |
12 | # - Start using mpiexec. |
|
12 | # - Start using mpiexec. | |
13 | # - Start using the Windows HPC Server 2008 scheduler |
|
13 | # - Start using the Windows HPC Server 2008 scheduler | |
14 | # - Start using PBS |
|
14 | # - Start using PBS/SGE | |
15 |
# - Start using SSH |
|
15 | # - Start using SSH | |
16 |
|
16 | |||
17 |
|
17 | |||
18 | # The selected launchers can be configured below. |
|
18 | # The selected launchers can be configured below. | |
19 |
|
19 | |||
20 | # Options are: |
|
20 | # Options are: | |
21 | # - LocalControllerLauncher |
|
21 | # - LocalControllerLauncher | |
22 | # - MPIExecControllerLauncher |
|
22 | # - MPIExecControllerLauncher | |
23 | # - PBSControllerLauncher |
|
23 | # - PBSControllerLauncher | |
|
24 | # - SGEControllerLauncher | |||
24 | # - WindowsHPCControllerLauncher |
|
25 | # - WindowsHPCControllerLauncher | |
25 |
# c.Global.controller_launcher = 'IPython. |
|
26 | # c.Global.controller_launcher = 'IPython.parallel.launcher.LocalControllerLauncher' | |
|
27 | # c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher' | |||
26 |
|
28 | |||
27 | # Options are: |
|
29 | # Options are: | |
28 | # - LocalEngineSetLauncher |
|
30 | # - LocalEngineSetLauncher | |
29 | # - MPIExecEngineSetLauncher |
|
31 | # - MPIExecEngineSetLauncher | |
30 | # - PBSEngineSetLauncher |
|
32 | # - PBSEngineSetLauncher | |
|
33 | # - SGEEngineSetLauncher | |||
31 | # - WindowsHPCEngineSetLauncher |
|
34 | # - WindowsHPCEngineSetLauncher | |
32 |
# c.Global.engine_launcher = 'IPython. |
|
35 | # c.Global.engine_launcher = 'IPython.parallel.launcher.LocalEngineSetLauncher' | |
33 |
|
36 | |||
34 | #----------------------------------------------------------------------------- |
|
37 | #----------------------------------------------------------------------------- | |
35 | # Global configuration |
|
38 | # Global configuration | |
36 | #----------------------------------------------------------------------------- |
|
39 | #----------------------------------------------------------------------------- | |
37 |
|
40 | |||
38 | # The default number of engines that will be started. This is overridden by |
|
41 | # The default number of engines that will be started. This is overridden by | |
39 | # the -n command line option: "ipcluster start -n 4" |
|
42 | # the -n command line option: "ipcluster start -n 4" | |
40 | # c.Global.n = 2 |
|
43 | # c.Global.n = 2 | |
41 |
|
44 | |||
42 | # Log to a file in cluster_dir/log, otherwise just log to sys.stdout. |
|
45 | # Log to a file in cluster_dir/log, otherwise just log to sys.stdout. | |
43 | # c.Global.log_to_file = False |
|
46 | # c.Global.log_to_file = False | |
44 |
|
47 | |||
45 | # Remove old logs from cluster_dir/log before starting. |
|
48 | # Remove old logs from cluster_dir/log before starting. | |
46 | # c.Global.clean_logs = True |
|
49 | # c.Global.clean_logs = True | |
47 |
|
50 | |||
48 | # The working directory for the process. The application will use os.chdir |
|
51 | # The working directory for the process. The application will use os.chdir | |
49 | # to change to this directory before starting. |
|
52 | # to change to this directory before starting. | |
50 | # c.Global.work_dir = os.getcwd() |
|
53 | # c.Global.work_dir = os.getcwd() | |
51 |
|
54 | |||
52 |
|
55 | |||
53 | #----------------------------------------------------------------------------- |
|
56 | #----------------------------------------------------------------------------- | |
54 | # Local process launchers |
|
57 | # Local process launchers | |
55 | #----------------------------------------------------------------------------- |
|
58 | #----------------------------------------------------------------------------- | |
56 |
|
59 | |||
57 | # The command line arguments to call the controller with. |
|
60 | # The command line arguments to call the controller with. | |
58 | # c.LocalControllerLauncher.controller_args = \ |
|
61 | # c.LocalControllerLauncher.controller_args = \ | |
59 | # ['--log-to-file','--log-level', '40'] |
|
62 | # ['--log-to-file','--log-level', '40'] | |
60 |
|
63 | |||
61 | # The working directory for the controller |
|
64 | # The working directory for the controller | |
62 | # c.LocalEngineSetLauncher.work_dir = u'' |
|
65 | # c.LocalEngineSetLauncher.work_dir = u'' | |
63 |
|
66 | |||
64 | # Command line argument passed to the engines. |
|
67 | # Command line argument passed to the engines. | |
65 | # c.LocalEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40'] |
|
68 | # c.LocalEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40'] | |
66 |
|
69 | |||
67 | #----------------------------------------------------------------------------- |
|
70 | #----------------------------------------------------------------------------- | |
68 | # MPIExec launchers |
|
71 | # MPIExec launchers | |
69 | #----------------------------------------------------------------------------- |
|
72 | #----------------------------------------------------------------------------- | |
70 |
|
73 | |||
71 |
# The mpiexec/mpirun command to use in |
|
74 | # The mpiexec/mpirun command to use in both the controller and engines. | |
72 |
# c.MPIExec |
|
75 | # c.MPIExecLauncher.mpi_cmd = ['mpiexec'] | |
73 |
|
76 | |||
74 | # Additional arguments to pass to the actual mpiexec command. |
|
77 | # Additional arguments to pass to the actual mpiexec command. | |
|
78 | # c.MPIExecLauncher.mpi_args = [] | |||
|
79 | ||||
|
80 | # The mpiexec/mpirun command and args can be overridden if they should be different | |||
|
81 | # for controller and engines. | |||
|
82 | # c.MPIExecControllerLauncher.mpi_cmd = ['mpiexec'] | |||
75 | # c.MPIExecControllerLauncher.mpi_args = [] |
|
83 | # c.MPIExecControllerLauncher.mpi_args = [] | |
|
84 | # c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec'] | |||
|
85 | # c.MPIExecEngineSetLauncher.mpi_args = [] | |||
76 |
|
86 | |||
77 | # The command line argument to call the controller with. |
|
87 | # The command line argument to call the controller with. | |
78 | # c.MPIExecControllerLauncher.controller_args = \ |
|
88 | # c.MPIExecControllerLauncher.controller_args = \ | |
79 | # ['--log-to-file','--log-level', '40'] |
|
89 | # ['--log-to-file','--log-level', '40'] | |
80 |
|
90 | |||
81 |
|
||||
82 | # The mpiexec/mpirun command to use in started the controller. |
|
|||
83 | # c.MPIExecEngineSetLauncher.mpi_cmd = ['mpiexec'] |
|
|||
84 |
|
||||
85 | # Additional arguments to pass to the actual mpiexec command. |
|
|||
86 | # c.MPIExecEngineSetLauncher.mpi_args = [] |
|
|||
87 |
|
||||
88 | # Command line argument passed to the engines. |
|
91 | # Command line argument passed to the engines. | |
89 | # c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40'] |
|
92 | # c.MPIExecEngineSetLauncher.engine_args = ['--log-to-file','--log-level', '40'] | |
90 |
|
93 | |||
91 | # The default number of engines to start if not given elsewhere. |
|
94 | # The default number of engines to start if not given elsewhere. | |
92 | # c.MPIExecEngineSetLauncher.n = 1 |
|
95 | # c.MPIExecEngineSetLauncher.n = 1 | |
93 |
|
96 | |||
94 | #----------------------------------------------------------------------------- |
|
97 | #----------------------------------------------------------------------------- | |
95 | # SSH launchers |
|
98 | # SSH launchers | |
96 | #----------------------------------------------------------------------------- |
|
99 | #----------------------------------------------------------------------------- | |
97 |
|
100 | |||
98 | # Todo |
|
101 | # ipclusterz can be used to launch controller and engines remotely via ssh. | |
|
102 | # Note that currently ipclusterz does not do any file distribution, so if | |||
|
103 | # machines are not on a shared filesystem, config and json files must be | |||
|
104 | # distributed. For this reason, the reuse_files defaults to True on an | |||
|
105 | # ssh-launched Controller. This flag can be overridded by the program_args | |||
|
106 | # attribute of c.SSHControllerLauncher. | |||
|
107 | ||||
|
108 | # set the ssh cmd for launching remote commands. The default is ['ssh'] | |||
|
109 | # c.SSHLauncher.ssh_cmd = ['ssh'] | |||
|
110 | ||||
|
111 | # set the ssh cmd for launching remote commands. The default is ['ssh'] | |||
|
112 | # c.SSHLauncher.ssh_args = ['tt'] | |||
|
113 | ||||
|
114 | # Set the user and hostname for the controller | |||
|
115 | # c.SSHControllerLauncher.hostname = 'controller.example.com' | |||
|
116 | # c.SSHControllerLauncher.user = os.environ.get('USER','username') | |||
|
117 | ||||
|
118 | # Set the arguments to be passed to ipcontrollerz | |||
|
119 | # note that remotely launched ipcontrollerz will not get the contents of | |||
|
120 | # the local ipcontrollerz_config.py unless it resides on the *remote host* | |||
|
121 | # in the location specified by the --cluster_dir argument. | |||
|
122 | # c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd'] | |||
|
123 | ||||
|
124 | # Set the default args passed to ipenginez for SSH launched engines | |||
|
125 | # c.SSHEngineSetLauncher.engine_args = ['--mpi', 'mpi4py'] | |||
99 |
|
126 | |||
|
127 | # SSH engines are launched as a dict of locations/n-engines. | |||
|
128 | # if a value is a tuple instead of an int, it is assumed to be of the form | |||
|
129 | # (n, [args]), setting the arguments to passed to ipenginez on `host`. | |||
|
130 | # otherwise, c.SSHEngineSetLauncher.engine_args will be used as the default. | |||
|
131 | ||||
|
132 | # In this case, there will be 3 engines at my.example.com, and | |||
|
133 | # 2 at you@ipython.scipy.org with a special json connector location. | |||
|
134 | # c.SSHEngineSetLauncher.engines = {'my.example.com' : 3, | |||
|
135 | # 'you@ipython.scipy.org' : (2, ['-f', '/path/to/ipcontroller-engine.json']} | |||
|
136 | # } | |||
100 |
|
137 | |||
101 | #----------------------------------------------------------------------------- |
|
138 | #----------------------------------------------------------------------------- | |
102 | # Unix batch (PBS) schedulers launchers |
|
139 | # Unix batch (PBS) schedulers launchers | |
103 | #----------------------------------------------------------------------------- |
|
140 | #----------------------------------------------------------------------------- | |
104 |
|
141 | |||
|
142 | # SGE and PBS are very similar. All configurables in this section called 'PBS*' | |||
|
143 | # also exist as 'SGE*'. | |||
|
144 | ||||
105 | # The command line program to use to submit a PBS job. |
|
145 | # The command line program to use to submit a PBS job. | |
106 |
# c.PBS |
|
146 | # c.PBSLauncher.submit_command = ['qsub'] | |
107 |
|
147 | |||
108 | # The command line program to use to delete a PBS job. |
|
148 | # The command line program to use to delete a PBS job. | |
109 |
# c.PBS |
|
149 | # c.PBSLauncher.delete_command = ['qdel'] | |
|
150 | ||||
|
151 | # The PBS queue in which the job should run | |||
|
152 | # c.PBSLauncher.queue = 'myqueue' | |||
110 |
|
153 | |||
111 | # A regular expression that takes the output of qsub and find the job id. |
|
154 | # A regular expression that takes the output of qsub and find the job id. | |
112 |
# c.PBS |
|
155 | # c.PBSLauncher.job_id_regexp = r'\d+' | |
|
156 | ||||
|
157 | # If for some reason the Controller and Engines have different options above, they | |||
|
158 | # can be set as c.PBSControllerLauncher.<option> etc. | |||
|
159 | ||||
|
160 | # PBS and SGE have default templates, but you can specify your own, either as strings | |||
|
161 | # or from files, as described here: | |||
113 |
|
162 | |||
114 | # The batch submission script used to start the controller. This is where |
|
163 | # The batch submission script used to start the controller. This is where | |
115 |
# environment variables would be setup, etc. This string is interp |
|
164 | # environment variables would be setup, etc. This string is interpreted using | |
116 | # the Itpl module in IPython.external. Basically, you can use ${n} for the |
|
165 | # the Itpl module in IPython.external. Basically, you can use ${n} for the | |
117 | # number of engine and ${cluster_dir} for the cluster_dir. |
|
166 | # number of engine and ${cluster_dir} for the cluster_dir. | |
118 |
# c.PBSControllerLauncher.batch_template = """ |
|
167 | # c.PBSControllerLauncher.batch_template = """ | |
|
168 | # #PBS -N ipcontroller | |||
|
169 | # #PBS -q $queue | |||
|
170 | # | |||
|
171 | # ipcontrollerz --cluster-dir $cluster_dir | |||
|
172 | # """ | |||
|
173 | ||||
|
174 | # You can also load this template from a file | |||
|
175 | # c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh" | |||
119 |
|
176 | |||
120 | # The name of the instantiated batch script that will actually be used to |
|
177 | # The name of the instantiated batch script that will actually be used to | |
121 | # submit the job. This will be written to the cluster directory. |
|
178 | # submit the job. This will be written to the cluster directory. | |
122 |
# c.PBSControllerLauncher.batch_file_name = u'pbs_ |
|
179 | # c.PBSControllerLauncher.batch_file_name = u'pbs_controller' | |
123 |
|
||||
124 |
|
||||
125 | # The command line program to use to submit a PBS job. |
|
|||
126 | # c.PBSEngineSetLauncher.submit_command = 'qsub' |
|
|||
127 |
|
||||
128 | # The command line program to use to delete a PBS job. |
|
|||
129 | # c.PBSEngineSetLauncher.delete_command = 'qdel' |
|
|||
130 |
|
||||
131 | # A regular expression that takes the output of qsub and find the job id. |
|
|||
132 | # c.PBSEngineSetLauncher.job_id_regexp = r'\d+' |
|
|||
133 |
|
180 | |||
134 | # The batch submission script used to start the engines. This is where |
|
181 | # The batch submission script used to start the engines. This is where | |
135 |
# environment variables would be setup, etc. This string is interp |
|
182 | # environment variables would be setup, etc. This string is interpreted using | |
136 | # the Itpl module in IPython.external. Basically, you can use ${n} for the |
|
183 | # the Itpl module in IPython.external. Basically, you can use ${n} for the | |
137 | # number of engine and ${cluster_dir} for the cluster_dir. |
|
184 | # number of engine and ${cluster_dir} for the cluster_dir. | |
138 |
# c.PBSEngineSetLauncher.batch_template = """ |
|
185 | # c.PBSEngineSetLauncher.batch_template = """ | |
|
186 | # #PBS -N ipcontroller | |||
|
187 | # #PBS -l nprocs=$n | |||
|
188 | # | |||
|
189 | # ipenginez --cluster-dir $cluster_dir$s | |||
|
190 | # """ | |||
|
191 | ||||
|
192 | # You can also load this template from a file | |||
|
193 | # c.PBSControllerLauncher.batch_template_file = u"/path/to/my/template.sh" | |||
139 |
|
194 | |||
140 | # The name of the instantiated batch script that will actually be used to |
|
195 | # The name of the instantiated batch script that will actually be used to | |
141 | # submit the job. This will be written to the cluster directory. |
|
196 | # submit the job. This will be written to the cluster directory. | |
142 |
# c.PBSEngineSetLauncher.batch_file_name = u'pbs_ |
|
197 | # c.PBSEngineSetLauncher.batch_file_name = u'pbs_engines' | |
|
198 | ||||
|
199 | ||||
143 |
|
200 | |||
144 | #----------------------------------------------------------------------------- |
|
201 | #----------------------------------------------------------------------------- | |
145 | # Windows HPC Server 2008 launcher configuration |
|
202 | # Windows HPC Server 2008 launcher configuration | |
146 | #----------------------------------------------------------------------------- |
|
203 | #----------------------------------------------------------------------------- | |
147 |
|
204 | |||
148 | # c.IPControllerJob.job_name = 'IPController' |
|
205 | # c.IPControllerJob.job_name = 'IPController' | |
149 | # c.IPControllerJob.is_exclusive = False |
|
206 | # c.IPControllerJob.is_exclusive = False | |
150 | # c.IPControllerJob.username = r'USERDOMAIN\USERNAME' |
|
207 | # c.IPControllerJob.username = r'USERDOMAIN\USERNAME' | |
151 | # c.IPControllerJob.priority = 'Highest' |
|
208 | # c.IPControllerJob.priority = 'Highest' | |
152 | # c.IPControllerJob.requested_nodes = '' |
|
209 | # c.IPControllerJob.requested_nodes = '' | |
153 | # c.IPControllerJob.project = 'MyProject' |
|
210 | # c.IPControllerJob.project = 'MyProject' | |
154 |
|
211 | |||
155 | # c.IPControllerTask.task_name = 'IPController' |
|
212 | # c.IPControllerTask.task_name = 'IPController' | |
156 | # c.IPControllerTask.controller_cmd = [u'ipcontroller.exe'] |
|
213 | # c.IPControllerTask.controller_cmd = [u'ipcontroller.exe'] | |
157 | # c.IPControllerTask.controller_args = ['--log-to-file', '--log-level', '40'] |
|
214 | # c.IPControllerTask.controller_args = ['--log-to-file', '--log-level', '40'] | |
158 | # c.IPControllerTask.environment_variables = {} |
|
215 | # c.IPControllerTask.environment_variables = {} | |
159 |
|
216 | |||
160 | # c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE' |
|
217 | # c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE' | |
161 | # c.WindowsHPCControllerLauncher.job_file_name = u'ipcontroller_job.xml' |
|
218 | # c.WindowsHPCControllerLauncher.job_file_name = u'ipcontroller_job.xml' | |
162 |
|
219 | |||
163 |
|
220 | |||
164 | # c.IPEngineSetJob.job_name = 'IPEngineSet' |
|
221 | # c.IPEngineSetJob.job_name = 'IPEngineSet' | |
165 | # c.IPEngineSetJob.is_exclusive = False |
|
222 | # c.IPEngineSetJob.is_exclusive = False | |
166 | # c.IPEngineSetJob.username = r'USERDOMAIN\USERNAME' |
|
223 | # c.IPEngineSetJob.username = r'USERDOMAIN\USERNAME' | |
167 | # c.IPEngineSetJob.priority = 'Highest' |
|
224 | # c.IPEngineSetJob.priority = 'Highest' | |
168 | # c.IPEngineSetJob.requested_nodes = '' |
|
225 | # c.IPEngineSetJob.requested_nodes = '' | |
169 | # c.IPEngineSetJob.project = 'MyProject' |
|
226 | # c.IPEngineSetJob.project = 'MyProject' | |
170 |
|
227 | |||
171 | # c.IPEngineTask.task_name = 'IPEngine' |
|
228 | # c.IPEngineTask.task_name = 'IPEngine' | |
172 | # c.IPEngineTask.engine_cmd = [u'ipengine.exe'] |
|
229 | # c.IPEngineTask.engine_cmd = [u'ipengine.exe'] | |
173 | # c.IPEngineTask.engine_args = ['--log-to-file', '--log-level', '40'] |
|
230 | # c.IPEngineTask.engine_args = ['--log-to-file', '--log-level', '40'] | |
174 | # c.IPEngineTask.environment_variables = {} |
|
231 | # c.IPEngineTask.environment_variables = {} | |
175 |
|
232 | |||
176 | # c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE' |
|
233 | # c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE' | |
177 | # c.WindowsHPCEngineSetLauncher.job_file_name = u'ipengineset_job.xml' |
|
234 | # c.WindowsHPCEngineSetLauncher.job_file_name = u'ipengineset_job.xml' | |
178 |
|
235 | |||
179 |
|
236 | |||
180 |
|
237 | |||
181 |
|
238 | |||
182 |
|
239 | |||
183 |
|
240 | |||
184 |
|
241 |
@@ -1,136 +1,180 b'' | |||||
1 | from IPython.config.loader import Config |
|
1 | from IPython.config.loader import Config | |
2 |
|
2 | |||
3 | c = get_config() |
|
3 | c = get_config() | |
4 |
|
4 | |||
5 | #----------------------------------------------------------------------------- |
|
5 | #----------------------------------------------------------------------------- | |
6 | # Global configuration |
|
6 | # Global configuration | |
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 |
|
8 | |||
9 | # Basic Global config attributes |
|
9 | # Basic Global config attributes | |
10 |
|
10 | |||
11 | # Start up messages are logged to stdout using the logging module. |
|
11 | # Start up messages are logged to stdout using the logging module. | |
12 | # These all happen before the twisted reactor is started and are |
|
12 | # These all happen before the twisted reactor is started and are | |
13 | # useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL) |
|
13 | # useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL) | |
14 | # and smaller is more verbose. |
|
14 | # and smaller is more verbose. | |
15 | # c.Global.log_level = 20 |
|
15 | # c.Global.log_level = 20 | |
16 |
|
16 | |||
17 | # Log to a file in cluster_dir/log, otherwise just log to sys.stdout. |
|
17 | # Log to a file in cluster_dir/log, otherwise just log to sys.stdout. | |
18 | # c.Global.log_to_file = False |
|
18 | # c.Global.log_to_file = False | |
19 |
|
19 | |||
20 | # Remove old logs from cluster_dir/log before starting. |
|
20 | # Remove old logs from cluster_dir/log before starting. | |
21 | # c.Global.clean_logs = True |
|
21 | # c.Global.clean_logs = True | |
22 |
|
22 | |||
23 | # A list of Python statements that will be run before starting the |
|
23 | # A list of Python statements that will be run before starting the | |
24 | # controller. This is provided because occasionally certain things need to |
|
24 | # controller. This is provided because occasionally certain things need to | |
25 | # be imported in the controller for pickling to work. |
|
25 | # be imported in the controller for pickling to work. | |
26 | # c.Global.import_statements = ['import math'] |
|
26 | # c.Global.import_statements = ['import math'] | |
27 |
|
27 | |||
28 |
# Reuse the controller's |
|
28 | # Reuse the controller's JSON files. If False, JSON files are regenerated | |
29 | # each time the controller is run. If True, they will be reused, *but*, you |
|
29 | # each time the controller is run. If True, they will be reused, *but*, you | |
30 | # also must set the network ports by hand. If set, this will override the |
|
30 | # also must set the network ports by hand. If set, this will override the | |
31 | # values set for the client and engine connections below. |
|
31 | # values set for the client and engine connections below. | |
32 |
# c.Global.reuse_f |
|
32 | # c.Global.reuse_files = True | |
33 |
|
33 | |||
34 | # Enable SSL encryption on all connections to the controller. If set, this |
|
34 | # Enable exec_key authentication on all messages. Default is True | |
35 | # will override the values set for the client and engine connections below. |
|
|||
36 | # c.Global.secure = True |
|
35 | # c.Global.secure = True | |
37 |
|
36 | |||
38 | # The working directory for the process. The application will use os.chdir |
|
37 | # The working directory for the process. The application will use os.chdir | |
39 | # to change to this directory before starting. |
|
38 | # to change to this directory before starting. | |
40 | # c.Global.work_dir = os.getcwd() |
|
39 | # c.Global.work_dir = os.getcwd() | |
41 |
|
40 | |||
|
41 | # The log url for logging to an `iploggerz` application. This will override | |||
|
42 | # log-to-file. | |||
|
43 | # c.Global.log_url = 'tcp://127.0.0.1:20202' | |||
|
44 | ||||
|
45 | # The specific external IP that is used to disambiguate multi-interface URLs. | |||
|
46 | # The default behavior is to guess from external IPs gleaned from `socket`. | |||
|
47 | # c.Global.location = '192.168.1.123' | |||
|
48 | ||||
|
49 | # The ssh server remote clients should use to connect to this controller. | |||
|
50 | # It must be a machine that can see the interface specified in client_ip. | |||
|
51 | # The default for client_ip is localhost, in which case the sshserver must | |||
|
52 | # be an external IP of the controller machine. | |||
|
53 | # c.Global.sshserver = 'controller.example.com' | |||
|
54 | ||||
|
55 | # the url to use for registration. If set, this overrides engine-ip, | |||
|
56 | # engine-transport client-ip,client-transport, and regport. | |||
|
57 | # c.RegistrationFactory.url = 'tcp://*:12345' | |||
|
58 | ||||
|
59 | # the port to use for registration. Clients and Engines both use this | |||
|
60 | # port for registration. | |||
|
61 | # c.RegistrationFactory.regport = 10101 | |||
|
62 | ||||
42 | #----------------------------------------------------------------------------- |
|
63 | #----------------------------------------------------------------------------- | |
43 |
# Configure the cl |
|
64 | # Configure the Task Scheduler | |
44 | #----------------------------------------------------------------------------- |
|
65 | #----------------------------------------------------------------------------- | |
45 |
|
66 | |||
46 | # Basic client service config attributes |
|
67 | # The routing scheme. 'pure' will use the pure-ZMQ scheduler. Any other | |
|
68 | # value will use a Python scheduler with various routing schemes. | |||
|
69 | # python schemes are: lru, weighted, random, twobin. Default is 'weighted'. | |||
|
70 | # Note that the pure ZMQ scheduler does not support many features, such as | |||
|
71 | # dying engines, dependencies, or engine-subset load-balancing. | |||
|
72 | # c.ControllerFactory.scheme = 'pure' | |||
47 |
|
73 | |||
48 | # The network interface the controller will listen on for client connections. |
|
74 | # The pure ZMQ scheduler can limit the number of outstanding tasks per engine | |
49 | # This should be an IP address or hostname of the controller's host. The empty |
|
75 | # by using the ZMQ HWM option. This allows engines with long-running tasks | |
50 | # string means listen on all interfaces. |
|
76 | # to not steal too many tasks from other engines. The default is 0, which | |
51 | # c.FCClientServiceFactory.ip = '' |
|
77 | # means agressively distribute messages, never waiting for them to finish. | |
|
78 | # c.ControllerFactory.hwm = 1 | |||
52 |
|
79 | |||
53 | # The TCP/IP port the controller will listen on for client connections. If 0 |
|
80 | # Whether to use Threads or Processes to start the Schedulers. Threads will | |
54 | # a random port will be used. If the controller's host has a firewall running |
|
81 | # use less resources, but potentially reduce throughput. Default is to | |
55 | # it must allow incoming traffic on this port. |
|
82 | # use processes. Note that the a Python scheduler will always be in a Process. | |
56 | # c.FCClientServiceFactory.port = 0 |
|
83 | # c.ControllerFactory.usethreads | |
57 |
|
84 | |||
58 | # The client learns how to connect to the controller by looking at the |
|
85 | #----------------------------------------------------------------------------- | |
59 | # location field embedded in the FURL. If this field is empty, all network |
|
86 | # Configure the Hub | |
60 | # interfaces that the controller is listening on will be listed. To have the |
|
87 | #----------------------------------------------------------------------------- | |
61 | # client connect on a particular interface, list it here. |
|
88 | ||
62 | # c.FCClientServiceFactory.location = '' |
|
89 | # Which class to use for the db backend. Currently supported are DictDB (the | |
|
90 | # default), and MongoDB. Uncomment this line to enable MongoDB, which will | |||
|
91 | # slow-down the Hub's responsiveness, but also reduce its memory footprint. | |||
|
92 | # c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB' | |||
63 |
|
93 | |||
64 | # Use SSL encryption for the client connection. |
|
94 | # The heartbeat ping frequency. This is the frequency (in ms) at which the | |
65 | # c.FCClientServiceFactory.secure = True |
|
95 | # Hub pings engines for heartbeats. This determines how quickly the Hub | |
|
96 | # will react to engines coming and going. A lower number means faster response | |||
|
97 | # time, but more network activity. The default is 100ms | |||
|
98 | # c.HubFactory.ping = 100 | |||
66 |
|
99 | |||
67 | # Reuse the client FURL each time the controller is started. If set, you must |
|
100 | # HubFactory queue port pairs, to set by name: mux, iopub, control, task. Set | |
68 | # also pick a specific network port above (FCClientServiceFactory.port). |
|
101 | # each as a tuple of length 2 of ints. The default is to find random | |
69 | # c.FCClientServiceFactory.reuse_furls = False |
|
102 | # available ports | |
|
103 | # c.HubFactory.mux = (10102,10112) | |||
70 |
|
104 | |||
71 | #----------------------------------------------------------------------------- |
|
105 | #----------------------------------------------------------------------------- | |
72 |
# Configure the |
|
106 | # Configure the client connections | |
73 | #----------------------------------------------------------------------------- |
|
107 | #----------------------------------------------------------------------------- | |
74 |
|
108 | |||
75 | # Basic config attributes for the engine services. |
|
109 | # Basic client connection config attributes | |
76 |
|
110 | |||
77 |
# The network interface the controller will listen on for |
|
111 | # The network interface the controller will listen on for client connections. | |
78 |
# This should be an IP address or |
|
112 | # This should be an IP address or interface on the controller. An asterisk | |
79 |
# |
|
113 | # means listen on all interfaces. The transport can be any transport | |
80 | # c.FCEngineServiceFactory.ip = '' |
|
114 | # supported by zeromq (tcp,epgm,pgm,ib,ipc): | |
|
115 | # c.HubFactory.client_ip = '*' | |||
|
116 | # c.HubFactory.client_transport = 'tcp' | |||
81 |
|
117 | |||
82 | # The TCP/IP port the controller will listen on for engine connections. If 0 |
|
118 | # individual client ports to configure by name: query_port, notifier_port | |
83 | # a random port will be used. If the controller's host has a firewall running |
|
119 | # c.HubFactory.query_port = 12345 | |
84 | # it must allow incoming traffic on this port. |
|
|||
85 | # c.FCEngineServiceFactory.port = 0 |
|
|||
86 |
|
120 | |||
87 | # The engine learns how to connect to the controller by looking at the |
|
121 | #----------------------------------------------------------------------------- | |
88 | # location field embedded in the FURL. If this field is empty, all network |
|
122 | # Configure the engine connections | |
89 | # interfaces that the controller is listening on will be listed. To have the |
|
123 | #----------------------------------------------------------------------------- | |
90 | # client connect on a particular interface, list it here. |
|
|||
91 | # c.FCEngineServiceFactory.location = '' |
|
|||
92 |
|
124 | |||
93 |
# |
|
125 | # Basic config attributes for the engine connections. | |
94 | # c.FCEngineServiceFactory.secure = True |
|
|||
95 |
|
126 | |||
96 | # Reuse the client FURL each time the controller is started. If set, you must |
|
127 | # The network interface the controller will listen on for engine connections. | |
97 | # also pick a specific network port above (FCClientServiceFactory.port). |
|
128 | # This should be an IP address or interface on the controller. An asterisk | |
98 | # c.FCEngineServiceFactory.reuse_furls = False |
|
129 | # means listen on all interfaces. The transport can be any transport | |
|
130 | # supported by zeromq (tcp,epgm,pgm,ib,ipc): | |||
|
131 | # c.HubFactory.engine_ip = '*' | |||
|
132 | # c.HubFactory.engine_transport = 'tcp' | |||
|
133 | ||||
|
134 | # set the engine heartbeat ports to use: | |||
|
135 | # c.HubFactory.hb = (10303,10313) | |||
99 |
|
136 | |||
100 | #----------------------------------------------------------------------------- |
|
137 | #----------------------------------------------------------------------------- | |
101 | # Developer level configuration attributes |
|
138 | # Configure the TaskRecord database backend | |
102 | #----------------------------------------------------------------------------- |
|
139 | #----------------------------------------------------------------------------- | |
103 |
|
140 | |||
104 | # You shouldn't have to modify anything in this section. These attributes |
|
141 | # For memory/persistance reasons, tasks can be stored out-of-memory in a database. | |
105 | # are more for developers who want to change the behavior of the controller |
|
142 | # Currently, only sqlite and mongodb are supported as backends, but the interface | |
106 | # at a fundamental level. |
|
143 | # is fairly simple, so advanced developers could write their own backend. | |
107 |
|
144 | |||
108 | # c.FCClientServiceFactory.cert_file = u'ipcontroller-client.pem' |
|
145 | # ----- in-memory configuration -------- | |
109 |
|
146 | # this line restores the default behavior: in-memory storage of all results. | ||
110 | # default_client_interfaces = Config() |
|
147 | # c.HubFactory.db_class = 'IPython.parallel.dictdb.DictDB' | |
111 | # default_client_interfaces.Task.interface_chain = [ |
|
148 | ||
112 | # 'IPython.kernel.task.ITaskController', |
|
149 | # ----- sqlite configuration -------- | |
113 | # 'IPython.kernel.taskfc.IFCTaskController' |
|
150 | # use this line to activate sqlite: | |
114 | # ] |
|
151 | # c.HubFactory.db_class = 'IPython.parallel.sqlitedb.SQLiteDB' | |
115 | # |
|
152 | ||
116 | # default_client_interfaces.Task.furl_file = u'ipcontroller-tc.furl' |
|
153 | # You can specify the name of the db-file. By default, this will be located | |
117 | # |
|
154 | # in the active cluster_dir, e.g. ~/.ipython/clusterz_default/tasks.db | |
118 | # default_client_interfaces.MultiEngine.interface_chain = [ |
|
155 | # c.SQLiteDB.filename = 'tasks.db' | |
119 | # 'IPython.kernel.multiengine.IMultiEngine', |
|
156 | ||
120 | # 'IPython.kernel.multienginefc.IFCSynchronousMultiEngine' |
|
157 | # You can also specify the location of the db-file, if you want it to be somewhere | |
121 | # ] |
|
158 | # other than the cluster_dir. | |
122 | # |
|
159 | # c.SQLiteDB.location = '/scratch/' | |
123 | # default_client_interfaces.MultiEngine.furl_file = u'ipcontroller-mec.furl' |
|
160 | ||
124 | # |
|
161 | # This will specify the name of the table for the controller to use. The default | |
125 | # c.FCEngineServiceFactory.interfaces = default_client_interfaces |
|
162 | # behavior is to use the session ID of the SessionFactory object (a uuid). Overriding | |
126 |
|
163 | # this will result in results persisting for multiple sessions. | ||
127 | # c.FCEngineServiceFactory.cert_file = u'ipcontroller-engine.pem' |
|
164 | # c.SQLiteDB.table = 'results' | |
128 |
|
165 | |||
129 | # default_engine_interfaces = Config() |
|
166 | # ----- mongodb configuration -------- | |
130 | # default_engine_interfaces.Default.interface_chain = [ |
|
167 | # use this line to activate mongodb: | |
131 | # 'IPython.kernel.enginefc.IFCControllerBase' |
|
168 | # c.HubFactory.db_class = 'IPython.parallel.mongodb.MongoDB' | |
132 | # ] |
|
169 | ||
133 | # |
|
170 | # You can specify the args and kwargs pymongo will use when creating the Connection. | |
134 | # default_engine_interfaces.Default.furl_file = u'ipcontroller-engine.furl' |
|
171 | # For more information on what these options might be, see pymongo documentation. | |
135 | # |
|
172 | # c.MongoDB.connection_kwargs = {} | |
136 | # c.FCEngineServiceFactory.interfaces = default_engine_interfaces |
|
173 | # c.MongoDB.connection_args = [] | |
|
174 | ||||
|
175 | # This will specify the name of the mongo database for the controller to use. The default | |||
|
176 | # behavior is to use the session ID of the SessionFactory object (a uuid). Overriding | |||
|
177 | # this will result in task results persisting through multiple sessions. | |||
|
178 | # c.MongoDB.database = 'ipythondb' | |||
|
179 | ||||
|
180 |
@@ -1,90 +1,85 b'' | |||||
1 | c = get_config() |
|
1 | c = get_config() | |
2 |
|
2 | |||
3 | #----------------------------------------------------------------------------- |
|
3 | #----------------------------------------------------------------------------- | |
4 | # Global configuration |
|
4 | # Global configuration | |
5 | #----------------------------------------------------------------------------- |
|
5 | #----------------------------------------------------------------------------- | |
6 |
|
6 | |||
7 | # Start up messages are logged to stdout using the logging module. |
|
7 | # Start up messages are logged to stdout using the logging module. | |
8 | # These all happen before the twisted reactor is started and are |
|
8 | # These all happen before the twisted reactor is started and are | |
9 | # useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL) |
|
9 | # useful for debugging purposes. Can be (10=DEBUG,20=INFO,30=WARN,40=CRITICAL) | |
10 | # and smaller is more verbose. |
|
10 | # and smaller is more verbose. | |
11 | # c.Global.log_level = 20 |
|
11 | # c.Global.log_level = 20 | |
12 |
|
12 | |||
13 | # Log to a file in cluster_dir/log, otherwise just log to sys.stdout. |
|
13 | # Log to a file in cluster_dir/log, otherwise just log to sys.stdout. | |
14 | # c.Global.log_to_file = False |
|
14 | # c.Global.log_to_file = False | |
15 |
|
15 | |||
16 | # Remove old logs from cluster_dir/log before starting. |
|
16 | # Remove old logs from cluster_dir/log before starting. | |
17 | # c.Global.clean_logs = True |
|
17 | # c.Global.clean_logs = True | |
18 |
|
18 | |||
19 | # A list of strings that will be executed in the users namespace on the engine |
|
19 | # A list of strings that will be executed in the users namespace on the engine | |
20 | # before it connects to the controller. |
|
20 | # before it connects to the controller. | |
21 | # c.Global.exec_lines = ['import numpy'] |
|
21 | # c.Global.exec_lines = ['import numpy'] | |
22 |
|
22 | |||
23 | # The engine will try to connect to the controller multiple times, to allow |
|
23 | # The engine will try to connect to the controller multiple times, to allow | |
24 | # the controller time to startup and write its FURL file. These parameters |
|
24 | # the controller time to startup and write its FURL file. These parameters | |
25 | # control the number of retries (connect_max_tries) and the initial delay |
|
25 | # control the number of retries (connect_max_tries) and the initial delay | |
26 | # (connect_delay) between attemps. The actual delay between attempts gets |
|
26 | # (connect_delay) between attemps. The actual delay between attempts gets | |
27 | # longer each time by a factor of 1.5 (delay[i] = 1.5*delay[i-1]) |
|
27 | # longer each time by a factor of 1.5 (delay[i] = 1.5*delay[i-1]) | |
28 | # those attemps. |
|
28 | # those attemps. | |
29 | # c.Global.connect_delay = 0.1 |
|
29 | # c.Global.connect_delay = 0.1 | |
30 | # c.Global.connect_max_tries = 15 |
|
30 | # c.Global.connect_max_tries = 15 | |
31 |
|
31 | |||
32 |
# By default, the engine will look for the controller's |
|
32 | # By default, the engine will look for the controller's JSON file in its own | |
33 |
# cluster directory. Sometimes, the |
|
33 | # cluster directory. Sometimes, the JSON file will be elsewhere and this | |
34 |
# attribute can be set to the full path of the |
|
34 | # attribute can be set to the full path of the JSON file. | |
35 | # c.Global.furl_file = u'' |
|
35 | # c.Global.url_file = u'/path/to/my/ipcontroller-engine.json' | |
36 |
|
36 | |||
37 | # The working directory for the process. The application will use os.chdir |
|
37 | # The working directory for the process. The application will use os.chdir | |
38 | # to change to this directory before starting. |
|
38 | # to change to this directory before starting. | |
39 | # c.Global.work_dir = os.getcwd() |
|
39 | # c.Global.work_dir = os.getcwd() | |
40 |
|
40 | |||
41 | #----------------------------------------------------------------------------- |
|
41 | #----------------------------------------------------------------------------- | |
42 | # MPI configuration |
|
42 | # MPI configuration | |
43 | #----------------------------------------------------------------------------- |
|
43 | #----------------------------------------------------------------------------- | |
44 |
|
44 | |||
45 | # Upon starting the engine can be configured to call MPI_Init. This section |
|
45 | # Upon starting the engine can be configured to call MPI_Init. This section | |
46 | # configures that. |
|
46 | # configures that. | |
47 |
|
47 | |||
48 | # Select which MPI section to execute to setup MPI. The value of this |
|
48 | # Select which MPI section to execute to setup MPI. The value of this | |
49 | # attribute must match the name of another attribute in the MPI config |
|
49 | # attribute must match the name of another attribute in the MPI config | |
50 | # section (mpi4py, pytrilinos, etc.). This can also be set by the --mpi |
|
50 | # section (mpi4py, pytrilinos, etc.). This can also be set by the --mpi | |
51 | # command line option. |
|
51 | # command line option. | |
52 | # c.MPI.use = '' |
|
52 | # c.MPI.use = '' | |
53 |
|
53 | |||
54 | # Initialize MPI using mpi4py. To use this, set c.MPI.use = 'mpi4py' to use |
|
54 | # Initialize MPI using mpi4py. To use this, set c.MPI.use = 'mpi4py' to use | |
55 | # --mpi=mpi4py at the command line. |
|
55 | # --mpi=mpi4py at the command line. | |
56 | # c.MPI.mpi4py = """from mpi4py import MPI as mpi |
|
56 | # c.MPI.mpi4py = """from mpi4py import MPI as mpi | |
57 | # mpi.size = mpi.COMM_WORLD.Get_size() |
|
57 | # mpi.size = mpi.COMM_WORLD.Get_size() | |
58 | # mpi.rank = mpi.COMM_WORLD.Get_rank() |
|
58 | # mpi.rank = mpi.COMM_WORLD.Get_rank() | |
59 | # """ |
|
59 | # """ | |
60 |
|
60 | |||
61 | # Initialize MPI using pytrilinos. To use this, set c.MPI.use = 'pytrilinos' |
|
61 | # Initialize MPI using pytrilinos. To use this, set c.MPI.use = 'pytrilinos' | |
62 | # to use --mpi=pytrilinos at the command line. |
|
62 | # to use --mpi=pytrilinos at the command line. | |
63 | # c.MPI.pytrilinos = """from PyTrilinos import Epetra |
|
63 | # c.MPI.pytrilinos = """from PyTrilinos import Epetra | |
64 | # class SimpleStruct: |
|
64 | # class SimpleStruct: | |
65 | # pass |
|
65 | # pass | |
66 | # mpi = SimpleStruct() |
|
66 | # mpi = SimpleStruct() | |
67 | # mpi.rank = 0 |
|
67 | # mpi.rank = 0 | |
68 | # mpi.size = 0 |
|
68 | # mpi.size = 0 | |
69 | # """ |
|
69 | # """ | |
70 |
|
70 | |||
71 | #----------------------------------------------------------------------------- |
|
71 | #----------------------------------------------------------------------------- | |
72 | # Developer level configuration attributes |
|
72 | # Developer level configuration attributes | |
73 | #----------------------------------------------------------------------------- |
|
73 | #----------------------------------------------------------------------------- | |
74 |
|
74 | |||
75 | # You shouldn't have to modify anything in this section. These attributes |
|
75 | # You shouldn't have to modify anything in this section. These attributes | |
76 | # are more for developers who want to change the behavior of the controller |
|
76 | # are more for developers who want to change the behavior of the controller | |
77 | # at a fundamental level. |
|
77 | # at a fundamental level. | |
78 |
|
78 | |||
79 | # You should not have to change these attributes. |
|
79 | # You should not have to change these attributes. | |
80 |
|
80 | |||
81 | # c.Global.shell_class = 'IPython.kernel.core.interpreter.Interpreter' |
|
81 | # c.Global.url_file_name = u'ipcontroller-engine.furl' | |
82 |
|
||||
83 | # c.Global.furl_file_name = u'ipcontroller-engine.furl' |
|
|||
84 |
|
||||
85 |
|
||||
86 |
|
||||
87 |
|
82 | |||
88 |
|
83 | |||
89 |
|
84 | |||
90 |
|
85 |
@@ -1,537 +1,537 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | """ |
|
3 | """ | |
4 | The IPython cluster directory |
|
4 | The IPython cluster directory | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 | # Copyright (C) 2008-2009 The IPython Development Team |
|
8 | # Copyright (C) 2008-2009 The IPython Development Team | |
9 | # |
|
9 | # | |
10 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | # Distributed under the terms of the BSD License. The full license is in | |
11 | # the file COPYING, distributed as part of this software. |
|
11 | # the file COPYING, distributed as part of this software. | |
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | # Imports |
|
15 | # Imports | |
16 | #----------------------------------------------------------------------------- |
|
16 | #----------------------------------------------------------------------------- | |
17 |
|
17 | |||
18 | from __future__ import with_statement |
|
18 | from __future__ import with_statement | |
19 |
|
19 | |||
20 | import os |
|
20 | import os | |
21 | import logging |
|
21 | import logging | |
22 | import re |
|
22 | import re | |
23 | import shutil |
|
23 | import shutil | |
24 | import sys |
|
24 | import sys | |
25 |
|
25 | |||
26 | from IPython.config.loader import PyFileConfigLoader |
|
26 | from IPython.config.loader import PyFileConfigLoader | |
27 | from IPython.config.configurable import Configurable |
|
27 | from IPython.config.configurable import Configurable | |
28 | from IPython.core.application import Application, BaseAppConfigLoader |
|
28 | from IPython.core.application import Application, BaseAppConfigLoader | |
29 | from IPython.core.crashhandler import CrashHandler |
|
29 | from IPython.core.crashhandler import CrashHandler | |
30 | from IPython.core import release |
|
30 | from IPython.core import release | |
31 | from IPython.utils.path import ( |
|
31 | from IPython.utils.path import ( | |
32 | get_ipython_package_dir, |
|
32 | get_ipython_package_dir, | |
33 | expand_path |
|
33 | expand_path | |
34 | ) |
|
34 | ) | |
35 | from IPython.utils.traitlets import Unicode |
|
35 | from IPython.utils.traitlets import Unicode | |
36 |
|
36 | |||
37 | #----------------------------------------------------------------------------- |
|
37 | #----------------------------------------------------------------------------- | |
38 | # Module errors |
|
38 | # Module errors | |
39 | #----------------------------------------------------------------------------- |
|
39 | #----------------------------------------------------------------------------- | |
40 |
|
40 | |||
41 | class ClusterDirError(Exception): |
|
41 | class ClusterDirError(Exception): | |
42 | pass |
|
42 | pass | |
43 |
|
43 | |||
44 |
|
44 | |||
45 | class PIDFileError(Exception): |
|
45 | class PIDFileError(Exception): | |
46 | pass |
|
46 | pass | |
47 |
|
47 | |||
48 |
|
48 | |||
49 | #----------------------------------------------------------------------------- |
|
49 | #----------------------------------------------------------------------------- | |
50 | # Class for managing cluster directories |
|
50 | # Class for managing cluster directories | |
51 | #----------------------------------------------------------------------------- |
|
51 | #----------------------------------------------------------------------------- | |
52 |
|
52 | |||
53 | class ClusterDir(Configurable): |
|
53 | class ClusterDir(Configurable): | |
54 | """An object to manage the cluster directory and its resources. |
|
54 | """An object to manage the cluster directory and its resources. | |
55 |
|
55 | |||
56 | The cluster directory is used by :command:`ipengine`, |
|
56 | The cluster directory is used by :command:`ipengine`, | |
57 | :command:`ipcontroller` and :command:`ipclsuter` to manage the |
|
57 | :command:`ipcontroller` and :command:`ipclsuter` to manage the | |
58 | configuration, logging and security of these applications. |
|
58 | configuration, logging and security of these applications. | |
59 |
|
59 | |||
60 | This object knows how to find, create and manage these directories. This |
|
60 | This object knows how to find, create and manage these directories. This | |
61 | should be used by any code that want's to handle cluster directories. |
|
61 | should be used by any code that want's to handle cluster directories. | |
62 | """ |
|
62 | """ | |
63 |
|
63 | |||
64 | security_dir_name = Unicode('security') |
|
64 | security_dir_name = Unicode('security') | |
65 | log_dir_name = Unicode('log') |
|
65 | log_dir_name = Unicode('log') | |
66 | pid_dir_name = Unicode('pid') |
|
66 | pid_dir_name = Unicode('pid') | |
67 | security_dir = Unicode(u'') |
|
67 | security_dir = Unicode(u'') | |
68 | log_dir = Unicode(u'') |
|
68 | log_dir = Unicode(u'') | |
69 | pid_dir = Unicode(u'') |
|
69 | pid_dir = Unicode(u'') | |
70 | location = Unicode(u'') |
|
70 | location = Unicode(u'') | |
71 |
|
71 | |||
72 | def __init__(self, location=u''): |
|
72 | def __init__(self, location=u''): | |
73 | super(ClusterDir, self).__init__(location=location) |
|
73 | super(ClusterDir, self).__init__(location=location) | |
74 |
|
74 | |||
75 | def _location_changed(self, name, old, new): |
|
75 | def _location_changed(self, name, old, new): | |
76 | if not os.path.isdir(new): |
|
76 | if not os.path.isdir(new): | |
77 | os.makedirs(new) |
|
77 | os.makedirs(new) | |
78 | self.security_dir = os.path.join(new, self.security_dir_name) |
|
78 | self.security_dir = os.path.join(new, self.security_dir_name) | |
79 | self.log_dir = os.path.join(new, self.log_dir_name) |
|
79 | self.log_dir = os.path.join(new, self.log_dir_name) | |
80 | self.pid_dir = os.path.join(new, self.pid_dir_name) |
|
80 | self.pid_dir = os.path.join(new, self.pid_dir_name) | |
81 | self.check_dirs() |
|
81 | self.check_dirs() | |
82 |
|
82 | |||
83 | def _log_dir_changed(self, name, old, new): |
|
83 | def _log_dir_changed(self, name, old, new): | |
84 | self.check_log_dir() |
|
84 | self.check_log_dir() | |
85 |
|
85 | |||
86 | def check_log_dir(self): |
|
86 | def check_log_dir(self): | |
87 | if not os.path.isdir(self.log_dir): |
|
87 | if not os.path.isdir(self.log_dir): | |
88 | os.mkdir(self.log_dir) |
|
88 | os.mkdir(self.log_dir) | |
89 |
|
89 | |||
90 | def _security_dir_changed(self, name, old, new): |
|
90 | def _security_dir_changed(self, name, old, new): | |
91 | self.check_security_dir() |
|
91 | self.check_security_dir() | |
92 |
|
92 | |||
93 | def check_security_dir(self): |
|
93 | def check_security_dir(self): | |
94 | if not os.path.isdir(self.security_dir): |
|
94 | if not os.path.isdir(self.security_dir): | |
95 | os.mkdir(self.security_dir, 0700) |
|
95 | os.mkdir(self.security_dir, 0700) | |
96 | os.chmod(self.security_dir, 0700) |
|
96 | os.chmod(self.security_dir, 0700) | |
97 |
|
97 | |||
98 | def _pid_dir_changed(self, name, old, new): |
|
98 | def _pid_dir_changed(self, name, old, new): | |
99 | self.check_pid_dir() |
|
99 | self.check_pid_dir() | |
100 |
|
100 | |||
101 | def check_pid_dir(self): |
|
101 | def check_pid_dir(self): | |
102 | if not os.path.isdir(self.pid_dir): |
|
102 | if not os.path.isdir(self.pid_dir): | |
103 | os.mkdir(self.pid_dir, 0700) |
|
103 | os.mkdir(self.pid_dir, 0700) | |
104 | os.chmod(self.pid_dir, 0700) |
|
104 | os.chmod(self.pid_dir, 0700) | |
105 |
|
105 | |||
106 | def check_dirs(self): |
|
106 | def check_dirs(self): | |
107 | self.check_security_dir() |
|
107 | self.check_security_dir() | |
108 | self.check_log_dir() |
|
108 | self.check_log_dir() | |
109 | self.check_pid_dir() |
|
109 | self.check_pid_dir() | |
110 |
|
110 | |||
111 | def load_config_file(self, filename): |
|
111 | def load_config_file(self, filename): | |
112 | """Load a config file from the top level of the cluster dir. |
|
112 | """Load a config file from the top level of the cluster dir. | |
113 |
|
113 | |||
114 | Parameters |
|
114 | Parameters | |
115 | ---------- |
|
115 | ---------- | |
116 | filename : unicode or str |
|
116 | filename : unicode or str | |
117 | The filename only of the config file that must be located in |
|
117 | The filename only of the config file that must be located in | |
118 | the top-level of the cluster directory. |
|
118 | the top-level of the cluster directory. | |
119 | """ |
|
119 | """ | |
120 | loader = PyFileConfigLoader(filename, self.location) |
|
120 | loader = PyFileConfigLoader(filename, self.location) | |
121 | return loader.load_config() |
|
121 | return loader.load_config() | |
122 |
|
122 | |||
123 | def copy_config_file(self, config_file, path=None, overwrite=False): |
|
123 | def copy_config_file(self, config_file, path=None, overwrite=False): | |
124 | """Copy a default config file into the active cluster directory. |
|
124 | """Copy a default config file into the active cluster directory. | |
125 |
|
125 | |||
126 | Default configuration files are kept in :mod:`IPython.config.default`. |
|
126 | Default configuration files are kept in :mod:`IPython.config.default`. | |
127 | This function moves these from that location to the working cluster |
|
127 | This function moves these from that location to the working cluster | |
128 | directory. |
|
128 | directory. | |
129 | """ |
|
129 | """ | |
130 | if path is None: |
|
130 | if path is None: | |
131 | import IPython.config.default |
|
131 | import IPython.config.default | |
132 | path = IPython.config.default.__file__.split(os.path.sep)[:-1] |
|
132 | path = IPython.config.default.__file__.split(os.path.sep)[:-1] | |
133 | path = os.path.sep.join(path) |
|
133 | path = os.path.sep.join(path) | |
134 | src = os.path.join(path, config_file) |
|
134 | src = os.path.join(path, config_file) | |
135 | dst = os.path.join(self.location, config_file) |
|
135 | dst = os.path.join(self.location, config_file) | |
136 | if not os.path.isfile(dst) or overwrite: |
|
136 | if not os.path.isfile(dst) or overwrite: | |
137 | shutil.copy(src, dst) |
|
137 | shutil.copy(src, dst) | |
138 |
|
138 | |||
139 | def copy_all_config_files(self, path=None, overwrite=False): |
|
139 | def copy_all_config_files(self, path=None, overwrite=False): | |
140 | """Copy all config files into the active cluster directory.""" |
|
140 | """Copy all config files into the active cluster directory.""" | |
141 |
for f in [u'ipcontroller |
|
141 | for f in [u'ipcontroller_config.py', u'ipengine_config.py', | |
142 |
u'ipcluster |
|
142 | u'ipcluster_config.py']: | |
143 | self.copy_config_file(f, path=path, overwrite=overwrite) |
|
143 | self.copy_config_file(f, path=path, overwrite=overwrite) | |
144 |
|
144 | |||
145 | @classmethod |
|
145 | @classmethod | |
146 | def create_cluster_dir(csl, cluster_dir): |
|
146 | def create_cluster_dir(csl, cluster_dir): | |
147 | """Create a new cluster directory given a full path. |
|
147 | """Create a new cluster directory given a full path. | |
148 |
|
148 | |||
149 | Parameters |
|
149 | Parameters | |
150 | ---------- |
|
150 | ---------- | |
151 | cluster_dir : str |
|
151 | cluster_dir : str | |
152 | The full path to the cluster directory. If it does exist, it will |
|
152 | The full path to the cluster directory. If it does exist, it will | |
153 | be used. If not, it will be created. |
|
153 | be used. If not, it will be created. | |
154 | """ |
|
154 | """ | |
155 | return ClusterDir(location=cluster_dir) |
|
155 | return ClusterDir(location=cluster_dir) | |
156 |
|
156 | |||
157 | @classmethod |
|
157 | @classmethod | |
158 | def create_cluster_dir_by_profile(cls, path, profile=u'default'): |
|
158 | def create_cluster_dir_by_profile(cls, path, profile=u'default'): | |
159 | """Create a cluster dir by profile name and path. |
|
159 | """Create a cluster dir by profile name and path. | |
160 |
|
160 | |||
161 | Parameters |
|
161 | Parameters | |
162 | ---------- |
|
162 | ---------- | |
163 | path : str |
|
163 | path : str | |
164 | The path (directory) to put the cluster directory in. |
|
164 | The path (directory) to put the cluster directory in. | |
165 | profile : str |
|
165 | profile : str | |
166 | The name of the profile. The name of the cluster directory will |
|
166 | The name of the profile. The name of the cluster directory will | |
167 |
be "cluster |
|
167 | be "cluster_<profile>". | |
168 | """ |
|
168 | """ | |
169 | if not os.path.isdir(path): |
|
169 | if not os.path.isdir(path): | |
170 | raise ClusterDirError('Directory not found: %s' % path) |
|
170 | raise ClusterDirError('Directory not found: %s' % path) | |
171 |
cluster_dir = os.path.join(path, u'cluster |
|
171 | cluster_dir = os.path.join(path, u'cluster_' + profile) | |
172 | return ClusterDir(location=cluster_dir) |
|
172 | return ClusterDir(location=cluster_dir) | |
173 |
|
173 | |||
174 | @classmethod |
|
174 | @classmethod | |
175 | def find_cluster_dir_by_profile(cls, ipython_dir, profile=u'default'): |
|
175 | def find_cluster_dir_by_profile(cls, ipython_dir, profile=u'default'): | |
176 | """Find an existing cluster dir by profile name, return its ClusterDir. |
|
176 | """Find an existing cluster dir by profile name, return its ClusterDir. | |
177 |
|
177 | |||
178 | This searches through a sequence of paths for a cluster dir. If it |
|
178 | This searches through a sequence of paths for a cluster dir. If it | |
179 | is not found, a :class:`ClusterDirError` exception will be raised. |
|
179 | is not found, a :class:`ClusterDirError` exception will be raised. | |
180 |
|
180 | |||
181 | The search path algorithm is: |
|
181 | The search path algorithm is: | |
182 | 1. ``os.getcwd()`` |
|
182 | 1. ``os.getcwd()`` | |
183 | 2. ``ipython_dir`` |
|
183 | 2. ``ipython_dir`` | |
184 | 3. The directories found in the ":" separated |
|
184 | 3. The directories found in the ":" separated | |
185 | :env:`IPCLUSTER_DIR_PATH` environment variable. |
|
185 | :env:`IPCLUSTER_DIR_PATH` environment variable. | |
186 |
|
186 | |||
187 | Parameters |
|
187 | Parameters | |
188 | ---------- |
|
188 | ---------- | |
189 | ipython_dir : unicode or str |
|
189 | ipython_dir : unicode or str | |
190 | The IPython directory to use. |
|
190 | The IPython directory to use. | |
191 | profile : unicode or str |
|
191 | profile : unicode or str | |
192 | The name of the profile. The name of the cluster directory |
|
192 | The name of the profile. The name of the cluster directory | |
193 |
will be "cluster |
|
193 | will be "cluster_<profile>". | |
194 | """ |
|
194 | """ | |
195 |
dirname = u'cluster |
|
195 | dirname = u'cluster_' + profile | |
196 | cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','') |
|
196 | cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','') | |
197 | if cluster_dir_paths: |
|
197 | if cluster_dir_paths: | |
198 | cluster_dir_paths = cluster_dir_paths.split(':') |
|
198 | cluster_dir_paths = cluster_dir_paths.split(':') | |
199 | else: |
|
199 | else: | |
200 | cluster_dir_paths = [] |
|
200 | cluster_dir_paths = [] | |
201 | paths = [os.getcwd(), ipython_dir] + cluster_dir_paths |
|
201 | paths = [os.getcwd(), ipython_dir] + cluster_dir_paths | |
202 | for p in paths: |
|
202 | for p in paths: | |
203 | cluster_dir = os.path.join(p, dirname) |
|
203 | cluster_dir = os.path.join(p, dirname) | |
204 | if os.path.isdir(cluster_dir): |
|
204 | if os.path.isdir(cluster_dir): | |
205 | return ClusterDir(location=cluster_dir) |
|
205 | return ClusterDir(location=cluster_dir) | |
206 | else: |
|
206 | else: | |
207 | raise ClusterDirError('Cluster directory not found in paths: %s' % dirname) |
|
207 | raise ClusterDirError('Cluster directory not found in paths: %s' % dirname) | |
208 |
|
208 | |||
209 | @classmethod |
|
209 | @classmethod | |
210 | def find_cluster_dir(cls, cluster_dir): |
|
210 | def find_cluster_dir(cls, cluster_dir): | |
211 | """Find/create a cluster dir and return its ClusterDir. |
|
211 | """Find/create a cluster dir and return its ClusterDir. | |
212 |
|
212 | |||
213 | This will create the cluster directory if it doesn't exist. |
|
213 | This will create the cluster directory if it doesn't exist. | |
214 |
|
214 | |||
215 | Parameters |
|
215 | Parameters | |
216 | ---------- |
|
216 | ---------- | |
217 | cluster_dir : unicode or str |
|
217 | cluster_dir : unicode or str | |
218 | The path of the cluster directory. This is expanded using |
|
218 | The path of the cluster directory. This is expanded using | |
219 | :func:`IPython.utils.genutils.expand_path`. |
|
219 | :func:`IPython.utils.genutils.expand_path`. | |
220 | """ |
|
220 | """ | |
221 | cluster_dir = expand_path(cluster_dir) |
|
221 | cluster_dir = expand_path(cluster_dir) | |
222 | if not os.path.isdir(cluster_dir): |
|
222 | if not os.path.isdir(cluster_dir): | |
223 | raise ClusterDirError('Cluster directory not found: %s' % cluster_dir) |
|
223 | raise ClusterDirError('Cluster directory not found: %s' % cluster_dir) | |
224 | return ClusterDir(location=cluster_dir) |
|
224 | return ClusterDir(location=cluster_dir) | |
225 |
|
225 | |||
226 |
|
226 | |||
227 | #----------------------------------------------------------------------------- |
|
227 | #----------------------------------------------------------------------------- | |
228 | # Command line options |
|
228 | # Command line options | |
229 | #----------------------------------------------------------------------------- |
|
229 | #----------------------------------------------------------------------------- | |
230 |
|
230 | |||
231 | class ClusterDirConfigLoader(BaseAppConfigLoader): |
|
231 | class ClusterDirConfigLoader(BaseAppConfigLoader): | |
232 |
|
232 | |||
233 | def _add_cluster_profile(self, parser): |
|
233 | def _add_cluster_profile(self, parser): | |
234 | paa = parser.add_argument |
|
234 | paa = parser.add_argument | |
235 | paa('-p', '--profile', |
|
235 | paa('-p', '--profile', | |
236 | dest='Global.profile',type=unicode, |
|
236 | dest='Global.profile',type=unicode, | |
237 | help= |
|
237 | help= | |
238 | """The string name of the profile to be used. This determines the name |
|
238 | """The string name of the profile to be used. This determines the name | |
239 | of the cluster dir as: cluster_<profile>. The default profile is named |
|
239 | of the cluster dir as: cluster_<profile>. The default profile is named | |
240 | 'default'. The cluster directory is resolve this way if the |
|
240 | 'default'. The cluster directory is resolve this way if the | |
241 | --cluster-dir option is not used.""", |
|
241 | --cluster-dir option is not used.""", | |
242 | metavar='Global.profile') |
|
242 | metavar='Global.profile') | |
243 |
|
243 | |||
244 | def _add_cluster_dir(self, parser): |
|
244 | def _add_cluster_dir(self, parser): | |
245 | paa = parser.add_argument |
|
245 | paa = parser.add_argument | |
246 | paa('--cluster-dir', |
|
246 | paa('--cluster-dir', | |
247 | dest='Global.cluster_dir',type=unicode, |
|
247 | dest='Global.cluster_dir',type=unicode, | |
248 | help="""Set the cluster dir. This overrides the logic used by the |
|
248 | help="""Set the cluster dir. This overrides the logic used by the | |
249 | --profile option.""", |
|
249 | --profile option.""", | |
250 | metavar='Global.cluster_dir') |
|
250 | metavar='Global.cluster_dir') | |
251 |
|
251 | |||
252 | def _add_work_dir(self, parser): |
|
252 | def _add_work_dir(self, parser): | |
253 | paa = parser.add_argument |
|
253 | paa = parser.add_argument | |
254 | paa('--work-dir', |
|
254 | paa('--work-dir', | |
255 | dest='Global.work_dir',type=unicode, |
|
255 | dest='Global.work_dir',type=unicode, | |
256 | help='Set the working dir for the process.', |
|
256 | help='Set the working dir for the process.', | |
257 | metavar='Global.work_dir') |
|
257 | metavar='Global.work_dir') | |
258 |
|
258 | |||
259 | def _add_clean_logs(self, parser): |
|
259 | def _add_clean_logs(self, parser): | |
260 | paa = parser.add_argument |
|
260 | paa = parser.add_argument | |
261 | paa('--clean-logs', |
|
261 | paa('--clean-logs', | |
262 | dest='Global.clean_logs', action='store_true', |
|
262 | dest='Global.clean_logs', action='store_true', | |
263 | help='Delete old log flies before starting.') |
|
263 | help='Delete old log flies before starting.') | |
264 |
|
264 | |||
265 | def _add_no_clean_logs(self, parser): |
|
265 | def _add_no_clean_logs(self, parser): | |
266 | paa = parser.add_argument |
|
266 | paa = parser.add_argument | |
267 | paa('--no-clean-logs', |
|
267 | paa('--no-clean-logs', | |
268 | dest='Global.clean_logs', action='store_false', |
|
268 | dest='Global.clean_logs', action='store_false', | |
269 | help="Don't Delete old log flies before starting.") |
|
269 | help="Don't Delete old log flies before starting.") | |
270 |
|
270 | |||
271 | def _add_arguments(self): |
|
271 | def _add_arguments(self): | |
272 | super(ClusterDirConfigLoader, self)._add_arguments() |
|
272 | super(ClusterDirConfigLoader, self)._add_arguments() | |
273 | self._add_cluster_profile(self.parser) |
|
273 | self._add_cluster_profile(self.parser) | |
274 | self._add_cluster_dir(self.parser) |
|
274 | self._add_cluster_dir(self.parser) | |
275 | self._add_work_dir(self.parser) |
|
275 | self._add_work_dir(self.parser) | |
276 | self._add_clean_logs(self.parser) |
|
276 | self._add_clean_logs(self.parser) | |
277 | self._add_no_clean_logs(self.parser) |
|
277 | self._add_no_clean_logs(self.parser) | |
278 |
|
278 | |||
279 |
|
279 | |||
280 | #----------------------------------------------------------------------------- |
|
280 | #----------------------------------------------------------------------------- | |
281 | # Crash handler for this application |
|
281 | # Crash handler for this application | |
282 | #----------------------------------------------------------------------------- |
|
282 | #----------------------------------------------------------------------------- | |
283 |
|
283 | |||
284 |
|
284 | |||
285 | _message_template = """\ |
|
285 | _message_template = """\ | |
286 | Oops, $self.app_name crashed. We do our best to make it stable, but... |
|
286 | Oops, $self.app_name crashed. We do our best to make it stable, but... | |
287 |
|
287 | |||
288 | A crash report was automatically generated with the following information: |
|
288 | A crash report was automatically generated with the following information: | |
289 | - A verbatim copy of the crash traceback. |
|
289 | - A verbatim copy of the crash traceback. | |
290 | - Data on your current $self.app_name configuration. |
|
290 | - Data on your current $self.app_name configuration. | |
291 |
|
291 | |||
292 | It was left in the file named: |
|
292 | It was left in the file named: | |
293 | \t'$self.crash_report_fname' |
|
293 | \t'$self.crash_report_fname' | |
294 | If you can email this file to the developers, the information in it will help |
|
294 | If you can email this file to the developers, the information in it will help | |
295 | them in understanding and correcting the problem. |
|
295 | them in understanding and correcting the problem. | |
296 |
|
296 | |||
297 | You can mail it to: $self.contact_name at $self.contact_email |
|
297 | You can mail it to: $self.contact_name at $self.contact_email | |
298 | with the subject '$self.app_name Crash Report'. |
|
298 | with the subject '$self.app_name Crash Report'. | |
299 |
|
299 | |||
300 | If you want to do it now, the following command will work (under Unix): |
|
300 | If you want to do it now, the following command will work (under Unix): | |
301 | mail -s '$self.app_name Crash Report' $self.contact_email < $self.crash_report_fname |
|
301 | mail -s '$self.app_name Crash Report' $self.contact_email < $self.crash_report_fname | |
302 |
|
302 | |||
303 | To ensure accurate tracking of this issue, please file a report about it at: |
|
303 | To ensure accurate tracking of this issue, please file a report about it at: | |
304 | $self.bug_tracker |
|
304 | $self.bug_tracker | |
305 | """ |
|
305 | """ | |
306 |
|
306 | |||
307 | class ClusterDirCrashHandler(CrashHandler): |
|
307 | class ClusterDirCrashHandler(CrashHandler): | |
308 | """sys.excepthook for IPython itself, leaves a detailed report on disk.""" |
|
308 | """sys.excepthook for IPython itself, leaves a detailed report on disk.""" | |
309 |
|
309 | |||
310 | message_template = _message_template |
|
310 | message_template = _message_template | |
311 |
|
311 | |||
312 | def __init__(self, app): |
|
312 | def __init__(self, app): | |
313 | contact_name = release.authors['Brian'][0] |
|
313 | contact_name = release.authors['Brian'][0] | |
314 | contact_email = release.authors['Brian'][1] |
|
314 | contact_email = release.authors['Brian'][1] | |
315 | bug_tracker = 'http://github.com/ipython/ipython/issues' |
|
315 | bug_tracker = 'http://github.com/ipython/ipython/issues' | |
316 | super(ClusterDirCrashHandler,self).__init__( |
|
316 | super(ClusterDirCrashHandler,self).__init__( | |
317 | app, contact_name, contact_email, bug_tracker |
|
317 | app, contact_name, contact_email, bug_tracker | |
318 | ) |
|
318 | ) | |
319 |
|
319 | |||
320 |
|
320 | |||
321 | #----------------------------------------------------------------------------- |
|
321 | #----------------------------------------------------------------------------- | |
322 | # Main application |
|
322 | # Main application | |
323 | #----------------------------------------------------------------------------- |
|
323 | #----------------------------------------------------------------------------- | |
324 |
|
324 | |||
325 | class ApplicationWithClusterDir(Application): |
|
325 | class ApplicationWithClusterDir(Application): | |
326 | """An application that puts everything into a cluster directory. |
|
326 | """An application that puts everything into a cluster directory. | |
327 |
|
327 | |||
328 | Instead of looking for things in the ipython_dir, this type of application |
|
328 | Instead of looking for things in the ipython_dir, this type of application | |
329 | will use its own private directory called the "cluster directory" |
|
329 | will use its own private directory called the "cluster directory" | |
330 | for things like config files, log files, etc. |
|
330 | for things like config files, log files, etc. | |
331 |
|
331 | |||
332 | The cluster directory is resolved as follows: |
|
332 | The cluster directory is resolved as follows: | |
333 |
|
333 | |||
334 | * If the ``--cluster-dir`` option is given, it is used. |
|
334 | * If the ``--cluster-dir`` option is given, it is used. | |
335 | * If ``--cluster-dir`` is not given, the application directory is |
|
335 | * If ``--cluster-dir`` is not given, the application directory is | |
336 | resolve using the profile name as ``cluster_<profile>``. The search |
|
336 | resolve using the profile name as ``cluster_<profile>``. The search | |
337 | path for this directory is then i) cwd if it is found there |
|
337 | path for this directory is then i) cwd if it is found there | |
338 | and ii) in ipython_dir otherwise. |
|
338 | and ii) in ipython_dir otherwise. | |
339 |
|
339 | |||
340 | The config file for the application is to be put in the cluster |
|
340 | The config file for the application is to be put in the cluster | |
341 | dir and named the value of the ``config_file_name`` class attribute. |
|
341 | dir and named the value of the ``config_file_name`` class attribute. | |
342 | """ |
|
342 | """ | |
343 |
|
343 | |||
344 | command_line_loader = ClusterDirConfigLoader |
|
344 | command_line_loader = ClusterDirConfigLoader | |
345 | crash_handler_class = ClusterDirCrashHandler |
|
345 | crash_handler_class = ClusterDirCrashHandler | |
346 | auto_create_cluster_dir = True |
|
346 | auto_create_cluster_dir = True | |
347 | # temporarily override default_log_level to INFO |
|
347 | # temporarily override default_log_level to INFO | |
348 | default_log_level = logging.INFO |
|
348 | default_log_level = logging.INFO | |
349 |
|
349 | |||
350 | def create_default_config(self): |
|
350 | def create_default_config(self): | |
351 | super(ApplicationWithClusterDir, self).create_default_config() |
|
351 | super(ApplicationWithClusterDir, self).create_default_config() | |
352 | self.default_config.Global.profile = u'default' |
|
352 | self.default_config.Global.profile = u'default' | |
353 | self.default_config.Global.cluster_dir = u'' |
|
353 | self.default_config.Global.cluster_dir = u'' | |
354 | self.default_config.Global.work_dir = os.getcwd() |
|
354 | self.default_config.Global.work_dir = os.getcwd() | |
355 | self.default_config.Global.log_to_file = False |
|
355 | self.default_config.Global.log_to_file = False | |
356 | self.default_config.Global.log_url = None |
|
356 | self.default_config.Global.log_url = None | |
357 | self.default_config.Global.clean_logs = False |
|
357 | self.default_config.Global.clean_logs = False | |
358 |
|
358 | |||
359 | def find_resources(self): |
|
359 | def find_resources(self): | |
360 | """This resolves the cluster directory. |
|
360 | """This resolves the cluster directory. | |
361 |
|
361 | |||
362 | This tries to find the cluster directory and if successful, it will |
|
362 | This tries to find the cluster directory and if successful, it will | |
363 | have done: |
|
363 | have done: | |
364 | * Sets ``self.cluster_dir_obj`` to the :class:`ClusterDir` object for |
|
364 | * Sets ``self.cluster_dir_obj`` to the :class:`ClusterDir` object for | |
365 | the application. |
|
365 | the application. | |
366 | * Sets ``self.cluster_dir`` attribute of the application and config |
|
366 | * Sets ``self.cluster_dir`` attribute of the application and config | |
367 | objects. |
|
367 | objects. | |
368 |
|
368 | |||
369 | The algorithm used for this is as follows: |
|
369 | The algorithm used for this is as follows: | |
370 | 1. Try ``Global.cluster_dir``. |
|
370 | 1. Try ``Global.cluster_dir``. | |
371 | 2. Try using ``Global.profile``. |
|
371 | 2. Try using ``Global.profile``. | |
372 | 3. If both of these fail and ``self.auto_create_cluster_dir`` is |
|
372 | 3. If both of these fail and ``self.auto_create_cluster_dir`` is | |
373 | ``True``, then create the new cluster dir in the IPython directory. |
|
373 | ``True``, then create the new cluster dir in the IPython directory. | |
374 | 4. If all fails, then raise :class:`ClusterDirError`. |
|
374 | 4. If all fails, then raise :class:`ClusterDirError`. | |
375 | """ |
|
375 | """ | |
376 |
|
376 | |||
377 | try: |
|
377 | try: | |
378 | cluster_dir = self.command_line_config.Global.cluster_dir |
|
378 | cluster_dir = self.command_line_config.Global.cluster_dir | |
379 | except AttributeError: |
|
379 | except AttributeError: | |
380 | cluster_dir = self.default_config.Global.cluster_dir |
|
380 | cluster_dir = self.default_config.Global.cluster_dir | |
381 | cluster_dir = expand_path(cluster_dir) |
|
381 | cluster_dir = expand_path(cluster_dir) | |
382 | try: |
|
382 | try: | |
383 | self.cluster_dir_obj = ClusterDir.find_cluster_dir(cluster_dir) |
|
383 | self.cluster_dir_obj = ClusterDir.find_cluster_dir(cluster_dir) | |
384 | except ClusterDirError: |
|
384 | except ClusterDirError: | |
385 | pass |
|
385 | pass | |
386 | else: |
|
386 | else: | |
387 | self.log.info('Using existing cluster dir: %s' % \ |
|
387 | self.log.info('Using existing cluster dir: %s' % \ | |
388 | self.cluster_dir_obj.location |
|
388 | self.cluster_dir_obj.location | |
389 | ) |
|
389 | ) | |
390 | self.finish_cluster_dir() |
|
390 | self.finish_cluster_dir() | |
391 | return |
|
391 | return | |
392 |
|
392 | |||
393 | try: |
|
393 | try: | |
394 | self.profile = self.command_line_config.Global.profile |
|
394 | self.profile = self.command_line_config.Global.profile | |
395 | except AttributeError: |
|
395 | except AttributeError: | |
396 | self.profile = self.default_config.Global.profile |
|
396 | self.profile = self.default_config.Global.profile | |
397 | try: |
|
397 | try: | |
398 | self.cluster_dir_obj = ClusterDir.find_cluster_dir_by_profile( |
|
398 | self.cluster_dir_obj = ClusterDir.find_cluster_dir_by_profile( | |
399 | self.ipython_dir, self.profile) |
|
399 | self.ipython_dir, self.profile) | |
400 | except ClusterDirError: |
|
400 | except ClusterDirError: | |
401 | pass |
|
401 | pass | |
402 | else: |
|
402 | else: | |
403 | self.log.info('Using existing cluster dir: %s' % \ |
|
403 | self.log.info('Using existing cluster dir: %s' % \ | |
404 | self.cluster_dir_obj.location |
|
404 | self.cluster_dir_obj.location | |
405 | ) |
|
405 | ) | |
406 | self.finish_cluster_dir() |
|
406 | self.finish_cluster_dir() | |
407 | return |
|
407 | return | |
408 |
|
408 | |||
409 | if self.auto_create_cluster_dir: |
|
409 | if self.auto_create_cluster_dir: | |
410 | self.cluster_dir_obj = ClusterDir.create_cluster_dir_by_profile( |
|
410 | self.cluster_dir_obj = ClusterDir.create_cluster_dir_by_profile( | |
411 | self.ipython_dir, self.profile |
|
411 | self.ipython_dir, self.profile | |
412 | ) |
|
412 | ) | |
413 | self.log.info('Creating new cluster dir: %s' % \ |
|
413 | self.log.info('Creating new cluster dir: %s' % \ | |
414 | self.cluster_dir_obj.location |
|
414 | self.cluster_dir_obj.location | |
415 | ) |
|
415 | ) | |
416 | self.finish_cluster_dir() |
|
416 | self.finish_cluster_dir() | |
417 | else: |
|
417 | else: | |
418 | raise ClusterDirError('Could not find a valid cluster directory.') |
|
418 | raise ClusterDirError('Could not find a valid cluster directory.') | |
419 |
|
419 | |||
420 | def finish_cluster_dir(self): |
|
420 | def finish_cluster_dir(self): | |
421 | # Set the cluster directory |
|
421 | # Set the cluster directory | |
422 | self.cluster_dir = self.cluster_dir_obj.location |
|
422 | self.cluster_dir = self.cluster_dir_obj.location | |
423 |
|
423 | |||
424 | # These have to be set because they could be different from the one |
|
424 | # These have to be set because they could be different from the one | |
425 | # that we just computed. Because command line has the highest |
|
425 | # that we just computed. Because command line has the highest | |
426 | # priority, this will always end up in the master_config. |
|
426 | # priority, this will always end up in the master_config. | |
427 | self.default_config.Global.cluster_dir = self.cluster_dir |
|
427 | self.default_config.Global.cluster_dir = self.cluster_dir | |
428 | self.command_line_config.Global.cluster_dir = self.cluster_dir |
|
428 | self.command_line_config.Global.cluster_dir = self.cluster_dir | |
429 |
|
429 | |||
430 | def find_config_file_name(self): |
|
430 | def find_config_file_name(self): | |
431 | """Find the config file name for this application.""" |
|
431 | """Find the config file name for this application.""" | |
432 | # For this type of Application it should be set as a class attribute. |
|
432 | # For this type of Application it should be set as a class attribute. | |
433 | if not hasattr(self, 'default_config_file_name'): |
|
433 | if not hasattr(self, 'default_config_file_name'): | |
434 | self.log.critical("No config filename found") |
|
434 | self.log.critical("No config filename found") | |
435 | else: |
|
435 | else: | |
436 | self.config_file_name = self.default_config_file_name |
|
436 | self.config_file_name = self.default_config_file_name | |
437 |
|
437 | |||
438 | def find_config_file_paths(self): |
|
438 | def find_config_file_paths(self): | |
439 | # Set the search path to to the cluster directory. We should NOT |
|
439 | # Set the search path to to the cluster directory. We should NOT | |
440 | # include IPython.config.default here as the default config files |
|
440 | # include IPython.config.default here as the default config files | |
441 | # are ALWAYS automatically moved to the cluster directory. |
|
441 | # are ALWAYS automatically moved to the cluster directory. | |
442 | conf_dir = os.path.join(get_ipython_package_dir(), 'config', 'default') |
|
442 | conf_dir = os.path.join(get_ipython_package_dir(), 'config', 'default') | |
443 | self.config_file_paths = (self.cluster_dir,) |
|
443 | self.config_file_paths = (self.cluster_dir,) | |
444 |
|
444 | |||
445 | def pre_construct(self): |
|
445 | def pre_construct(self): | |
446 | # The log and security dirs were set earlier, but here we put them |
|
446 | # The log and security dirs were set earlier, but here we put them | |
447 | # into the config and log them. |
|
447 | # into the config and log them. | |
448 | config = self.master_config |
|
448 | config = self.master_config | |
449 | sdir = self.cluster_dir_obj.security_dir |
|
449 | sdir = self.cluster_dir_obj.security_dir | |
450 | self.security_dir = config.Global.security_dir = sdir |
|
450 | self.security_dir = config.Global.security_dir = sdir | |
451 | ldir = self.cluster_dir_obj.log_dir |
|
451 | ldir = self.cluster_dir_obj.log_dir | |
452 | self.log_dir = config.Global.log_dir = ldir |
|
452 | self.log_dir = config.Global.log_dir = ldir | |
453 | pdir = self.cluster_dir_obj.pid_dir |
|
453 | pdir = self.cluster_dir_obj.pid_dir | |
454 | self.pid_dir = config.Global.pid_dir = pdir |
|
454 | self.pid_dir = config.Global.pid_dir = pdir | |
455 | self.log.info("Cluster directory set to: %s" % self.cluster_dir) |
|
455 | self.log.info("Cluster directory set to: %s" % self.cluster_dir) | |
456 | config.Global.work_dir = unicode(expand_path(config.Global.work_dir)) |
|
456 | config.Global.work_dir = unicode(expand_path(config.Global.work_dir)) | |
457 | # Change to the working directory. We do this just before construct |
|
457 | # Change to the working directory. We do this just before construct | |
458 | # is called so all the components there have the right working dir. |
|
458 | # is called so all the components there have the right working dir. | |
459 | self.to_work_dir() |
|
459 | self.to_work_dir() | |
460 |
|
460 | |||
461 | def to_work_dir(self): |
|
461 | def to_work_dir(self): | |
462 | wd = self.master_config.Global.work_dir |
|
462 | wd = self.master_config.Global.work_dir | |
463 | if unicode(wd) != unicode(os.getcwd()): |
|
463 | if unicode(wd) != unicode(os.getcwd()): | |
464 | os.chdir(wd) |
|
464 | os.chdir(wd) | |
465 | self.log.info("Changing to working dir: %s" % wd) |
|
465 | self.log.info("Changing to working dir: %s" % wd) | |
466 |
|
466 | |||
467 | def start_logging(self): |
|
467 | def start_logging(self): | |
468 | # Remove old log files |
|
468 | # Remove old log files | |
469 | if self.master_config.Global.clean_logs: |
|
469 | if self.master_config.Global.clean_logs: | |
470 | log_dir = self.master_config.Global.log_dir |
|
470 | log_dir = self.master_config.Global.log_dir | |
471 | for f in os.listdir(log_dir): |
|
471 | for f in os.listdir(log_dir): | |
472 | if re.match(r'%s-\d+\.(log|err|out)'%self.name,f): |
|
472 | if re.match(r'%s-\d+\.(log|err|out)'%self.name,f): | |
473 | # if f.startswith(self.name + u'-') and f.endswith('.log'): |
|
473 | # if f.startswith(self.name + u'-') and f.endswith('.log'): | |
474 | os.remove(os.path.join(log_dir, f)) |
|
474 | os.remove(os.path.join(log_dir, f)) | |
475 | # Start logging to the new log file |
|
475 | # Start logging to the new log file | |
476 | if self.master_config.Global.log_to_file: |
|
476 | if self.master_config.Global.log_to_file: | |
477 | log_filename = self.name + u'-' + str(os.getpid()) + u'.log' |
|
477 | log_filename = self.name + u'-' + str(os.getpid()) + u'.log' | |
478 | logfile = os.path.join(self.log_dir, log_filename) |
|
478 | logfile = os.path.join(self.log_dir, log_filename) | |
479 | open_log_file = open(logfile, 'w') |
|
479 | open_log_file = open(logfile, 'w') | |
480 | elif self.master_config.Global.log_url: |
|
480 | elif self.master_config.Global.log_url: | |
481 | open_log_file = None |
|
481 | open_log_file = None | |
482 | else: |
|
482 | else: | |
483 | open_log_file = sys.stdout |
|
483 | open_log_file = sys.stdout | |
484 | if open_log_file is not None: |
|
484 | if open_log_file is not None: | |
485 | self.log.removeHandler(self._log_handler) |
|
485 | self.log.removeHandler(self._log_handler) | |
486 | self._log_handler = logging.StreamHandler(open_log_file) |
|
486 | self._log_handler = logging.StreamHandler(open_log_file) | |
487 | self._log_formatter = logging.Formatter("[%(name)s] %(message)s") |
|
487 | self._log_formatter = logging.Formatter("[%(name)s] %(message)s") | |
488 | self._log_handler.setFormatter(self._log_formatter) |
|
488 | self._log_handler.setFormatter(self._log_formatter) | |
489 | self.log.addHandler(self._log_handler) |
|
489 | self.log.addHandler(self._log_handler) | |
490 | # log.startLogging(open_log_file) |
|
490 | # log.startLogging(open_log_file) | |
491 |
|
491 | |||
492 | def write_pid_file(self, overwrite=False): |
|
492 | def write_pid_file(self, overwrite=False): | |
493 | """Create a .pid file in the pid_dir with my pid. |
|
493 | """Create a .pid file in the pid_dir with my pid. | |
494 |
|
494 | |||
495 | This must be called after pre_construct, which sets `self.pid_dir`. |
|
495 | This must be called after pre_construct, which sets `self.pid_dir`. | |
496 | This raises :exc:`PIDFileError` if the pid file exists already. |
|
496 | This raises :exc:`PIDFileError` if the pid file exists already. | |
497 | """ |
|
497 | """ | |
498 | pid_file = os.path.join(self.pid_dir, self.name + u'.pid') |
|
498 | pid_file = os.path.join(self.pid_dir, self.name + u'.pid') | |
499 | if os.path.isfile(pid_file): |
|
499 | if os.path.isfile(pid_file): | |
500 | pid = self.get_pid_from_file() |
|
500 | pid = self.get_pid_from_file() | |
501 | if not overwrite: |
|
501 | if not overwrite: | |
502 | raise PIDFileError( |
|
502 | raise PIDFileError( | |
503 | 'The pid file [%s] already exists. \nThis could mean that this ' |
|
503 | 'The pid file [%s] already exists. \nThis could mean that this ' | |
504 | 'server is already running with [pid=%s].' % (pid_file, pid) |
|
504 | 'server is already running with [pid=%s].' % (pid_file, pid) | |
505 | ) |
|
505 | ) | |
506 | with open(pid_file, 'w') as f: |
|
506 | with open(pid_file, 'w') as f: | |
507 | self.log.info("Creating pid file: %s" % pid_file) |
|
507 | self.log.info("Creating pid file: %s" % pid_file) | |
508 | f.write(repr(os.getpid())+'\n') |
|
508 | f.write(repr(os.getpid())+'\n') | |
509 |
|
509 | |||
510 | def remove_pid_file(self): |
|
510 | def remove_pid_file(self): | |
511 | """Remove the pid file. |
|
511 | """Remove the pid file. | |
512 |
|
512 | |||
513 | This should be called at shutdown by registering a callback with |
|
513 | This should be called at shutdown by registering a callback with | |
514 | :func:`reactor.addSystemEventTrigger`. This needs to return |
|
514 | :func:`reactor.addSystemEventTrigger`. This needs to return | |
515 | ``None``. |
|
515 | ``None``. | |
516 | """ |
|
516 | """ | |
517 | pid_file = os.path.join(self.pid_dir, self.name + u'.pid') |
|
517 | pid_file = os.path.join(self.pid_dir, self.name + u'.pid') | |
518 | if os.path.isfile(pid_file): |
|
518 | if os.path.isfile(pid_file): | |
519 | try: |
|
519 | try: | |
520 | self.log.info("Removing pid file: %s" % pid_file) |
|
520 | self.log.info("Removing pid file: %s" % pid_file) | |
521 | os.remove(pid_file) |
|
521 | os.remove(pid_file) | |
522 | except: |
|
522 | except: | |
523 | self.log.warn("Error removing the pid file: %s" % pid_file) |
|
523 | self.log.warn("Error removing the pid file: %s" % pid_file) | |
524 |
|
524 | |||
525 | def get_pid_from_file(self): |
|
525 | def get_pid_from_file(self): | |
526 | """Get the pid from the pid file. |
|
526 | """Get the pid from the pid file. | |
527 |
|
527 | |||
528 | If the pid file doesn't exist a :exc:`PIDFileError` is raised. |
|
528 | If the pid file doesn't exist a :exc:`PIDFileError` is raised. | |
529 | """ |
|
529 | """ | |
530 | pid_file = os.path.join(self.pid_dir, self.name + u'.pid') |
|
530 | pid_file = os.path.join(self.pid_dir, self.name + u'.pid') | |
531 | if os.path.isfile(pid_file): |
|
531 | if os.path.isfile(pid_file): | |
532 | with open(pid_file, 'r') as f: |
|
532 | with open(pid_file, 'r') as f: | |
533 | pid = int(f.read().strip()) |
|
533 | pid = int(f.read().strip()) | |
534 | return pid |
|
534 | return pid | |
535 | else: |
|
535 | else: | |
536 | raise PIDFileError('pid file not found: %s' % pid_file) |
|
536 | raise PIDFileError('pid file not found: %s' % pid_file) | |
537 |
|
537 |
@@ -1,592 +1,592 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | """ |
|
3 | """ | |
4 | The ipcluster application. |
|
4 | The ipcluster application. | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 | # Copyright (C) 2008-2009 The IPython Development Team |
|
8 | # Copyright (C) 2008-2009 The IPython Development Team | |
9 | # |
|
9 | # | |
10 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | # Distributed under the terms of the BSD License. The full license is in | |
11 | # the file COPYING, distributed as part of this software. |
|
11 | # the file COPYING, distributed as part of this software. | |
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | # Imports |
|
15 | # Imports | |
16 | #----------------------------------------------------------------------------- |
|
16 | #----------------------------------------------------------------------------- | |
17 |
|
17 | |||
18 | import errno |
|
18 | import errno | |
19 | import logging |
|
19 | import logging | |
20 | import os |
|
20 | import os | |
21 | import re |
|
21 | import re | |
22 | import signal |
|
22 | import signal | |
23 |
|
23 | |||
24 | import zmq |
|
24 | import zmq | |
25 | from zmq.eventloop import ioloop |
|
25 | from zmq.eventloop import ioloop | |
26 |
|
26 | |||
27 | from IPython.external.argparse import ArgumentParser, SUPPRESS |
|
27 | from IPython.external.argparse import ArgumentParser, SUPPRESS | |
28 | from IPython.utils.importstring import import_item |
|
28 | from IPython.utils.importstring import import_item | |
29 | from IPython.parallel.clusterdir import ( |
|
29 | from IPython.parallel.clusterdir import ( | |
30 | ApplicationWithClusterDir, ClusterDirConfigLoader, |
|
30 | ApplicationWithClusterDir, ClusterDirConfigLoader, | |
31 | ClusterDirError, PIDFileError |
|
31 | ClusterDirError, PIDFileError | |
32 | ) |
|
32 | ) | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | #----------------------------------------------------------------------------- |
|
35 | #----------------------------------------------------------------------------- | |
36 | # Module level variables |
|
36 | # Module level variables | |
37 | #----------------------------------------------------------------------------- |
|
37 | #----------------------------------------------------------------------------- | |
38 |
|
38 | |||
39 |
|
39 | |||
40 |
default_config_file_name = u'ipcluster |
|
40 | default_config_file_name = u'ipcluster_config.py' | |
41 |
|
41 | |||
42 |
|
42 | |||
43 | _description = """\ |
|
43 | _description = """\ | |
44 | Start an IPython cluster for parallel computing.\n\n |
|
44 | Start an IPython cluster for parallel computing.\n\n | |
45 |
|
45 | |||
46 | An IPython cluster consists of 1 controller and 1 or more engines. |
|
46 | An IPython cluster consists of 1 controller and 1 or more engines. | |
47 | This command automates the startup of these processes using a wide |
|
47 | This command automates the startup of these processes using a wide | |
48 | range of startup methods (SSH, local processes, PBS, mpiexec, |
|
48 | range of startup methods (SSH, local processes, PBS, mpiexec, | |
49 | Windows HPC Server 2008). To start a cluster with 4 engines on your |
|
49 | Windows HPC Server 2008). To start a cluster with 4 engines on your | |
50 |
local host simply do 'ipcluster |
|
50 | local host simply do 'ipcluster start -n 4'. For more complex usage | |
51 |
you will typically do 'ipcluster |
|
51 | you will typically do 'ipcluster create -p mycluster', then edit | |
52 |
configuration files, followed by 'ipcluster |
|
52 | configuration files, followed by 'ipcluster start -p mycluster -n 4'. | |
53 | """ |
|
53 | """ | |
54 |
|
54 | |||
55 |
|
55 | |||
56 | # Exit codes for ipcluster |
|
56 | # Exit codes for ipcluster | |
57 |
|
57 | |||
58 | # This will be the exit code if the ipcluster appears to be running because |
|
58 | # This will be the exit code if the ipcluster appears to be running because | |
59 | # a .pid file exists |
|
59 | # a .pid file exists | |
60 | ALREADY_STARTED = 10 |
|
60 | ALREADY_STARTED = 10 | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | # This will be the exit code if ipcluster stop is run, but there is not .pid |
|
63 | # This will be the exit code if ipcluster stop is run, but there is not .pid | |
64 | # file to be found. |
|
64 | # file to be found. | |
65 | ALREADY_STOPPED = 11 |
|
65 | ALREADY_STOPPED = 11 | |
66 |
|
66 | |||
67 | # This will be the exit code if ipcluster engines is run, but there is not .pid |
|
67 | # This will be the exit code if ipcluster engines is run, but there is not .pid | |
68 | # file to be found. |
|
68 | # file to be found. | |
69 | NO_CLUSTER = 12 |
|
69 | NO_CLUSTER = 12 | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | #----------------------------------------------------------------------------- |
|
72 | #----------------------------------------------------------------------------- | |
73 | # Command line options |
|
73 | # Command line options | |
74 | #----------------------------------------------------------------------------- |
|
74 | #----------------------------------------------------------------------------- | |
75 |
|
75 | |||
76 |
|
76 | |||
77 | class IPClusterAppConfigLoader(ClusterDirConfigLoader): |
|
77 | class IPClusterAppConfigLoader(ClusterDirConfigLoader): | |
78 |
|
78 | |||
79 | def _add_arguments(self): |
|
79 | def _add_arguments(self): | |
80 | # Don't call ClusterDirConfigLoader._add_arguments as we don't want |
|
80 | # Don't call ClusterDirConfigLoader._add_arguments as we don't want | |
81 | # its defaults on self.parser. Instead, we will put those on |
|
81 | # its defaults on self.parser. Instead, we will put those on | |
82 | # default options on our subparsers. |
|
82 | # default options on our subparsers. | |
83 |
|
83 | |||
84 | # This has all the common options that all subcommands use |
|
84 | # This has all the common options that all subcommands use | |
85 | parent_parser1 = ArgumentParser( |
|
85 | parent_parser1 = ArgumentParser( | |
86 | add_help=False, |
|
86 | add_help=False, | |
87 | argument_default=SUPPRESS |
|
87 | argument_default=SUPPRESS | |
88 | ) |
|
88 | ) | |
89 | self._add_ipython_dir(parent_parser1) |
|
89 | self._add_ipython_dir(parent_parser1) | |
90 | self._add_log_level(parent_parser1) |
|
90 | self._add_log_level(parent_parser1) | |
91 |
|
91 | |||
92 | # This has all the common options that other subcommands use |
|
92 | # This has all the common options that other subcommands use | |
93 | parent_parser2 = ArgumentParser( |
|
93 | parent_parser2 = ArgumentParser( | |
94 | add_help=False, |
|
94 | add_help=False, | |
95 | argument_default=SUPPRESS |
|
95 | argument_default=SUPPRESS | |
96 | ) |
|
96 | ) | |
97 | self._add_cluster_profile(parent_parser2) |
|
97 | self._add_cluster_profile(parent_parser2) | |
98 | self._add_cluster_dir(parent_parser2) |
|
98 | self._add_cluster_dir(parent_parser2) | |
99 | self._add_work_dir(parent_parser2) |
|
99 | self._add_work_dir(parent_parser2) | |
100 | paa = parent_parser2.add_argument |
|
100 | paa = parent_parser2.add_argument | |
101 | paa('--log-to-file', |
|
101 | paa('--log-to-file', | |
102 | action='store_true', dest='Global.log_to_file', |
|
102 | action='store_true', dest='Global.log_to_file', | |
103 | help='Log to a file in the log directory (default is stdout)') |
|
103 | help='Log to a file in the log directory (default is stdout)') | |
104 |
|
104 | |||
105 | # Create the object used to create the subparsers. |
|
105 | # Create the object used to create the subparsers. | |
106 | subparsers = self.parser.add_subparsers( |
|
106 | subparsers = self.parser.add_subparsers( | |
107 | dest='Global.subcommand', |
|
107 | dest='Global.subcommand', | |
108 | title='ipcluster subcommands', |
|
108 | title='ipcluster subcommands', | |
109 | description= |
|
109 | description= | |
110 | """ipcluster has a variety of subcommands. The general way of |
|
110 | """ipcluster has a variety of subcommands. The general way of | |
111 |
running ipcluster is 'ipcluster |
|
111 | running ipcluster is 'ipcluster <cmd> [options]'. To get help | |
112 |
on a particular subcommand do 'ipcluster |
|
112 | on a particular subcommand do 'ipcluster <cmd> -h'.""" | |
113 |
# help="For more help, type 'ipcluster |
|
113 | # help="For more help, type 'ipcluster <cmd> -h'", | |
114 | ) |
|
114 | ) | |
115 |
|
115 | |||
116 | # The "list" subcommand parser |
|
116 | # The "list" subcommand parser | |
117 | parser_list = subparsers.add_parser( |
|
117 | parser_list = subparsers.add_parser( | |
118 | 'list', |
|
118 | 'list', | |
119 | parents=[parent_parser1], |
|
119 | parents=[parent_parser1], | |
120 | argument_default=SUPPRESS, |
|
120 | argument_default=SUPPRESS, | |
121 | help="List all clusters in cwd and ipython_dir.", |
|
121 | help="List all clusters in cwd and ipython_dir.", | |
122 | description= |
|
122 | description= | |
123 | """List all available clusters, by cluster directory, that can |
|
123 | """List all available clusters, by cluster directory, that can | |
124 | be found in the current working directly or in the ipython |
|
124 | be found in the current working directly or in the ipython | |
125 | directory. Cluster directories are named using the convention |
|
125 | directory. Cluster directories are named using the convention | |
126 |
'cluster |
|
126 | 'cluster_<profile>'.""" | |
127 | ) |
|
127 | ) | |
128 |
|
128 | |||
129 | # The "create" subcommand parser |
|
129 | # The "create" subcommand parser | |
130 | parser_create = subparsers.add_parser( |
|
130 | parser_create = subparsers.add_parser( | |
131 | 'create', |
|
131 | 'create', | |
132 | parents=[parent_parser1, parent_parser2], |
|
132 | parents=[parent_parser1, parent_parser2], | |
133 | argument_default=SUPPRESS, |
|
133 | argument_default=SUPPRESS, | |
134 | help="Create a new cluster directory.", |
|
134 | help="Create a new cluster directory.", | |
135 | description= |
|
135 | description= | |
136 | """Create an ipython cluster directory by its profile name or |
|
136 | """Create an ipython cluster directory by its profile name or | |
137 | cluster directory path. Cluster directories contain |
|
137 | cluster directory path. Cluster directories contain | |
138 | configuration, log and security related files and are named |
|
138 | configuration, log and security related files and are named | |
139 |
using the convention 'cluster |
|
139 | using the convention 'cluster_<profile>'. By default they are | |
140 | located in your ipython directory. Once created, you will |
|
140 | located in your ipython directory. Once created, you will | |
141 | probably need to edit the configuration files in the cluster |
|
141 | probably need to edit the configuration files in the cluster | |
142 | directory to configure your cluster. Most users will create a |
|
142 | directory to configure your cluster. Most users will create a | |
143 | cluster directory by profile name, |
|
143 | cluster directory by profile name, | |
144 |
'ipcluster |
|
144 | 'ipcluster create -p mycluster', which will put the directory | |
145 |
in '<ipython_dir>/cluster |
|
145 | in '<ipython_dir>/cluster_mycluster'. | |
146 | """ |
|
146 | """ | |
147 | ) |
|
147 | ) | |
148 | paa = parser_create.add_argument |
|
148 | paa = parser_create.add_argument | |
149 | paa('--reset-config', |
|
149 | paa('--reset-config', | |
150 | dest='Global.reset_config', action='store_true', |
|
150 | dest='Global.reset_config', action='store_true', | |
151 | help= |
|
151 | help= | |
152 | """Recopy the default config files to the cluster directory. |
|
152 | """Recopy the default config files to the cluster directory. | |
153 | You will loose any modifications you have made to these files.""") |
|
153 | You will loose any modifications you have made to these files.""") | |
154 |
|
154 | |||
155 | # The "start" subcommand parser |
|
155 | # The "start" subcommand parser | |
156 | parser_start = subparsers.add_parser( |
|
156 | parser_start = subparsers.add_parser( | |
157 | 'start', |
|
157 | 'start', | |
158 | parents=[parent_parser1, parent_parser2], |
|
158 | parents=[parent_parser1, parent_parser2], | |
159 | argument_default=SUPPRESS, |
|
159 | argument_default=SUPPRESS, | |
160 | help="Start a cluster.", |
|
160 | help="Start a cluster.", | |
161 | description= |
|
161 | description= | |
162 | """Start an ipython cluster by its profile name or cluster |
|
162 | """Start an ipython cluster by its profile name or cluster | |
163 | directory. Cluster directories contain configuration, log and |
|
163 | directory. Cluster directories contain configuration, log and | |
164 | security related files and are named using the convention |
|
164 | security related files and are named using the convention | |
165 |
'cluster |
|
165 | 'cluster_<profile>' and should be creating using the 'start' | |
166 | subcommand of 'ipcluster'. If your cluster directory is in |
|
166 | subcommand of 'ipcluster'. If your cluster directory is in | |
167 | the cwd or the ipython directory, you can simply refer to it |
|
167 | the cwd or the ipython directory, you can simply refer to it | |
168 |
using its profile name, 'ipcluster |
|
168 | using its profile name, 'ipcluster start -n 4 -p <profile>`, | |
169 | otherwise use the '--cluster-dir' option. |
|
169 | otherwise use the '--cluster-dir' option. | |
170 | """ |
|
170 | """ | |
171 | ) |
|
171 | ) | |
172 |
|
172 | |||
173 | paa = parser_start.add_argument |
|
173 | paa = parser_start.add_argument | |
174 | paa('-n', '--number', |
|
174 | paa('-n', '--number', | |
175 | type=int, dest='Global.n', |
|
175 | type=int, dest='Global.n', | |
176 | help='The number of engines to start.', |
|
176 | help='The number of engines to start.', | |
177 | metavar='Global.n') |
|
177 | metavar='Global.n') | |
178 | paa('--clean-logs', |
|
178 | paa('--clean-logs', | |
179 | dest='Global.clean_logs', action='store_true', |
|
179 | dest='Global.clean_logs', action='store_true', | |
180 | help='Delete old log flies before starting.') |
|
180 | help='Delete old log flies before starting.') | |
181 | paa('--no-clean-logs', |
|
181 | paa('--no-clean-logs', | |
182 | dest='Global.clean_logs', action='store_false', |
|
182 | dest='Global.clean_logs', action='store_false', | |
183 | help="Don't delete old log flies before starting.") |
|
183 | help="Don't delete old log flies before starting.") | |
184 | paa('--daemon', |
|
184 | paa('--daemon', | |
185 | dest='Global.daemonize', action='store_true', |
|
185 | dest='Global.daemonize', action='store_true', | |
186 | help='Daemonize the ipcluster program. This implies --log-to-file') |
|
186 | help='Daemonize the ipcluster program. This implies --log-to-file') | |
187 | paa('--no-daemon', |
|
187 | paa('--no-daemon', | |
188 | dest='Global.daemonize', action='store_false', |
|
188 | dest='Global.daemonize', action='store_false', | |
189 | help="Dont't daemonize the ipcluster program.") |
|
189 | help="Dont't daemonize the ipcluster program.") | |
190 | paa('--delay', |
|
190 | paa('--delay', | |
191 | type=float, dest='Global.delay', |
|
191 | type=float, dest='Global.delay', | |
192 | help="Specify the delay (in seconds) between starting the controller and starting the engine(s).") |
|
192 | help="Specify the delay (in seconds) between starting the controller and starting the engine(s).") | |
193 |
|
193 | |||
194 | # The "stop" subcommand parser |
|
194 | # The "stop" subcommand parser | |
195 | parser_stop = subparsers.add_parser( |
|
195 | parser_stop = subparsers.add_parser( | |
196 | 'stop', |
|
196 | 'stop', | |
197 | parents=[parent_parser1, parent_parser2], |
|
197 | parents=[parent_parser1, parent_parser2], | |
198 | argument_default=SUPPRESS, |
|
198 | argument_default=SUPPRESS, | |
199 | help="Stop a running cluster.", |
|
199 | help="Stop a running cluster.", | |
200 | description= |
|
200 | description= | |
201 | """Stop a running ipython cluster by its profile name or cluster |
|
201 | """Stop a running ipython cluster by its profile name or cluster | |
202 | directory. Cluster directories are named using the convention |
|
202 | directory. Cluster directories are named using the convention | |
203 |
'cluster |
|
203 | 'cluster_<profile>'. If your cluster directory is in | |
204 | the cwd or the ipython directory, you can simply refer to it |
|
204 | the cwd or the ipython directory, you can simply refer to it | |
205 |
using its profile name, 'ipcluster |
|
205 | using its profile name, 'ipcluster stop -p <profile>`, otherwise | |
206 | use the '--cluster-dir' option. |
|
206 | use the '--cluster-dir' option. | |
207 | """ |
|
207 | """ | |
208 | ) |
|
208 | ) | |
209 | paa = parser_stop.add_argument |
|
209 | paa = parser_stop.add_argument | |
210 | paa('--signal', |
|
210 | paa('--signal', | |
211 | dest='Global.signal', type=int, |
|
211 | dest='Global.signal', type=int, | |
212 | help="The signal number to use in stopping the cluster (default=2).", |
|
212 | help="The signal number to use in stopping the cluster (default=2).", | |
213 | metavar="Global.signal") |
|
213 | metavar="Global.signal") | |
214 |
|
214 | |||
215 | # the "engines" subcommand parser |
|
215 | # the "engines" subcommand parser | |
216 | parser_engines = subparsers.add_parser( |
|
216 | parser_engines = subparsers.add_parser( | |
217 | 'engines', |
|
217 | 'engines', | |
218 | parents=[parent_parser1, parent_parser2], |
|
218 | parents=[parent_parser1, parent_parser2], | |
219 | argument_default=SUPPRESS, |
|
219 | argument_default=SUPPRESS, | |
220 | help="Attach some engines to an existing controller or cluster.", |
|
220 | help="Attach some engines to an existing controller or cluster.", | |
221 | description= |
|
221 | description= | |
222 | """Start one or more engines to connect to an existing Cluster |
|
222 | """Start one or more engines to connect to an existing Cluster | |
223 | by profile name or cluster directory. |
|
223 | by profile name or cluster directory. | |
224 | Cluster directories contain configuration, log and |
|
224 | Cluster directories contain configuration, log and | |
225 | security related files and are named using the convention |
|
225 | security related files and are named using the convention | |
226 |
'cluster |
|
226 | 'cluster_<profile>' and should be creating using the 'start' | |
227 | subcommand of 'ipcluster'. If your cluster directory is in |
|
227 | subcommand of 'ipcluster'. If your cluster directory is in | |
228 | the cwd or the ipython directory, you can simply refer to it |
|
228 | the cwd or the ipython directory, you can simply refer to it | |
229 |
using its profile name, 'ipcluster |
|
229 | using its profile name, 'ipcluster engines -n 4 -p <profile>`, | |
230 | otherwise use the '--cluster-dir' option. |
|
230 | otherwise use the '--cluster-dir' option. | |
231 | """ |
|
231 | """ | |
232 | ) |
|
232 | ) | |
233 | paa = parser_engines.add_argument |
|
233 | paa = parser_engines.add_argument | |
234 | paa('-n', '--number', |
|
234 | paa('-n', '--number', | |
235 | type=int, dest='Global.n', |
|
235 | type=int, dest='Global.n', | |
236 | help='The number of engines to start.', |
|
236 | help='The number of engines to start.', | |
237 | metavar='Global.n') |
|
237 | metavar='Global.n') | |
238 | paa('--daemon', |
|
238 | paa('--daemon', | |
239 | dest='Global.daemonize', action='store_true', |
|
239 | dest='Global.daemonize', action='store_true', | |
240 | help='Daemonize the ipcluster program. This implies --log-to-file') |
|
240 | help='Daemonize the ipcluster program. This implies --log-to-file') | |
241 | paa('--no-daemon', |
|
241 | paa('--no-daemon', | |
242 | dest='Global.daemonize', action='store_false', |
|
242 | dest='Global.daemonize', action='store_false', | |
243 | help="Dont't daemonize the ipcluster program.") |
|
243 | help="Dont't daemonize the ipcluster program.") | |
244 |
|
244 | |||
245 | #----------------------------------------------------------------------------- |
|
245 | #----------------------------------------------------------------------------- | |
246 | # Main application |
|
246 | # Main application | |
247 | #----------------------------------------------------------------------------- |
|
247 | #----------------------------------------------------------------------------- | |
248 |
|
248 | |||
249 |
|
249 | |||
250 | class IPClusterApp(ApplicationWithClusterDir): |
|
250 | class IPClusterApp(ApplicationWithClusterDir): | |
251 |
|
251 | |||
252 |
name = u'ipcluster |
|
252 | name = u'ipcluster' | |
253 | description = _description |
|
253 | description = _description | |
254 | usage = None |
|
254 | usage = None | |
255 | command_line_loader = IPClusterAppConfigLoader |
|
255 | command_line_loader = IPClusterAppConfigLoader | |
256 | default_config_file_name = default_config_file_name |
|
256 | default_config_file_name = default_config_file_name | |
257 | default_log_level = logging.INFO |
|
257 | default_log_level = logging.INFO | |
258 | auto_create_cluster_dir = False |
|
258 | auto_create_cluster_dir = False | |
259 |
|
259 | |||
260 | def create_default_config(self): |
|
260 | def create_default_config(self): | |
261 | super(IPClusterApp, self).create_default_config() |
|
261 | super(IPClusterApp, self).create_default_config() | |
262 | self.default_config.Global.controller_launcher = \ |
|
262 | self.default_config.Global.controller_launcher = \ | |
263 | 'IPython.parallel.launcher.LocalControllerLauncher' |
|
263 | 'IPython.parallel.launcher.LocalControllerLauncher' | |
264 | self.default_config.Global.engine_launcher = \ |
|
264 | self.default_config.Global.engine_launcher = \ | |
265 | 'IPython.parallel.launcher.LocalEngineSetLauncher' |
|
265 | 'IPython.parallel.launcher.LocalEngineSetLauncher' | |
266 | self.default_config.Global.n = 2 |
|
266 | self.default_config.Global.n = 2 | |
267 | self.default_config.Global.delay = 2 |
|
267 | self.default_config.Global.delay = 2 | |
268 | self.default_config.Global.reset_config = False |
|
268 | self.default_config.Global.reset_config = False | |
269 | self.default_config.Global.clean_logs = True |
|
269 | self.default_config.Global.clean_logs = True | |
270 | self.default_config.Global.signal = signal.SIGINT |
|
270 | self.default_config.Global.signal = signal.SIGINT | |
271 | self.default_config.Global.daemonize = False |
|
271 | self.default_config.Global.daemonize = False | |
272 |
|
272 | |||
273 | def find_resources(self): |
|
273 | def find_resources(self): | |
274 | subcommand = self.command_line_config.Global.subcommand |
|
274 | subcommand = self.command_line_config.Global.subcommand | |
275 | if subcommand=='list': |
|
275 | if subcommand=='list': | |
276 | self.list_cluster_dirs() |
|
276 | self.list_cluster_dirs() | |
277 | # Exit immediately because there is nothing left to do. |
|
277 | # Exit immediately because there is nothing left to do. | |
278 | self.exit() |
|
278 | self.exit() | |
279 | elif subcommand=='create': |
|
279 | elif subcommand=='create': | |
280 | self.auto_create_cluster_dir = True |
|
280 | self.auto_create_cluster_dir = True | |
281 | super(IPClusterApp, self).find_resources() |
|
281 | super(IPClusterApp, self).find_resources() | |
282 | elif subcommand=='start' or subcommand=='stop': |
|
282 | elif subcommand=='start' or subcommand=='stop': | |
283 | self.auto_create_cluster_dir = True |
|
283 | self.auto_create_cluster_dir = True | |
284 | try: |
|
284 | try: | |
285 | super(IPClusterApp, self).find_resources() |
|
285 | super(IPClusterApp, self).find_resources() | |
286 | except ClusterDirError: |
|
286 | except ClusterDirError: | |
287 | raise ClusterDirError( |
|
287 | raise ClusterDirError( | |
288 | "Could not find a cluster directory. A cluster dir must " |
|
288 | "Could not find a cluster directory. A cluster dir must " | |
289 |
"be created before running 'ipcluster |
|
289 | "be created before running 'ipcluster start'. Do " | |
290 |
"'ipcluster |
|
290 | "'ipcluster create -h' or 'ipcluster list -h' for more " | |
291 | "information about creating and listing cluster dirs." |
|
291 | "information about creating and listing cluster dirs." | |
292 | ) |
|
292 | ) | |
293 | elif subcommand=='engines': |
|
293 | elif subcommand=='engines': | |
294 | self.auto_create_cluster_dir = False |
|
294 | self.auto_create_cluster_dir = False | |
295 | try: |
|
295 | try: | |
296 | super(IPClusterApp, self).find_resources() |
|
296 | super(IPClusterApp, self).find_resources() | |
297 | except ClusterDirError: |
|
297 | except ClusterDirError: | |
298 | raise ClusterDirError( |
|
298 | raise ClusterDirError( | |
299 | "Could not find a cluster directory. A cluster dir must " |
|
299 | "Could not find a cluster directory. A cluster dir must " | |
300 |
"be created before running 'ipcluster |
|
300 | "be created before running 'ipcluster start'. Do " | |
301 |
"'ipcluster |
|
301 | "'ipcluster create -h' or 'ipcluster list -h' for more " | |
302 | "information about creating and listing cluster dirs." |
|
302 | "information about creating and listing cluster dirs." | |
303 | ) |
|
303 | ) | |
304 |
|
304 | |||
305 | def list_cluster_dirs(self): |
|
305 | def list_cluster_dirs(self): | |
306 | # Find the search paths |
|
306 | # Find the search paths | |
307 | cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','') |
|
307 | cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','') | |
308 | if cluster_dir_paths: |
|
308 | if cluster_dir_paths: | |
309 | cluster_dir_paths = cluster_dir_paths.split(':') |
|
309 | cluster_dir_paths = cluster_dir_paths.split(':') | |
310 | else: |
|
310 | else: | |
311 | cluster_dir_paths = [] |
|
311 | cluster_dir_paths = [] | |
312 | try: |
|
312 | try: | |
313 | ipython_dir = self.command_line_config.Global.ipython_dir |
|
313 | ipython_dir = self.command_line_config.Global.ipython_dir | |
314 | except AttributeError: |
|
314 | except AttributeError: | |
315 | ipython_dir = self.default_config.Global.ipython_dir |
|
315 | ipython_dir = self.default_config.Global.ipython_dir | |
316 | paths = [os.getcwd(), ipython_dir] + \ |
|
316 | paths = [os.getcwd(), ipython_dir] + \ | |
317 | cluster_dir_paths |
|
317 | cluster_dir_paths | |
318 | paths = list(set(paths)) |
|
318 | paths = list(set(paths)) | |
319 |
|
319 | |||
320 | self.log.info('Searching for cluster dirs in paths: %r' % paths) |
|
320 | self.log.info('Searching for cluster dirs in paths: %r' % paths) | |
321 | for path in paths: |
|
321 | for path in paths: | |
322 | files = os.listdir(path) |
|
322 | files = os.listdir(path) | |
323 | for f in files: |
|
323 | for f in files: | |
324 | full_path = os.path.join(path, f) |
|
324 | full_path = os.path.join(path, f) | |
325 |
if os.path.isdir(full_path) and f.startswith('cluster |
|
325 | if os.path.isdir(full_path) and f.startswith('cluster_'): | |
326 | profile = full_path.split('_')[-1] |
|
326 | profile = full_path.split('_')[-1] | |
327 |
start_cmd = 'ipcluster |
|
327 | start_cmd = 'ipcluster start -p %s -n 4' % profile | |
328 | print start_cmd + " ==> " + full_path |
|
328 | print start_cmd + " ==> " + full_path | |
329 |
|
329 | |||
330 | def pre_construct(self): |
|
330 | def pre_construct(self): | |
331 | # IPClusterApp.pre_construct() is where we cd to the working directory. |
|
331 | # IPClusterApp.pre_construct() is where we cd to the working directory. | |
332 | super(IPClusterApp, self).pre_construct() |
|
332 | super(IPClusterApp, self).pre_construct() | |
333 | config = self.master_config |
|
333 | config = self.master_config | |
334 | try: |
|
334 | try: | |
335 | daemon = config.Global.daemonize |
|
335 | daemon = config.Global.daemonize | |
336 | if daemon: |
|
336 | if daemon: | |
337 | config.Global.log_to_file = True |
|
337 | config.Global.log_to_file = True | |
338 | except AttributeError: |
|
338 | except AttributeError: | |
339 | pass |
|
339 | pass | |
340 |
|
340 | |||
341 | def construct(self): |
|
341 | def construct(self): | |
342 | config = self.master_config |
|
342 | config = self.master_config | |
343 | subcmd = config.Global.subcommand |
|
343 | subcmd = config.Global.subcommand | |
344 | reset = config.Global.reset_config |
|
344 | reset = config.Global.reset_config | |
345 | if subcmd == 'list': |
|
345 | if subcmd == 'list': | |
346 | return |
|
346 | return | |
347 | if subcmd == 'create': |
|
347 | if subcmd == 'create': | |
348 | self.log.info('Copying default config files to cluster directory ' |
|
348 | self.log.info('Copying default config files to cluster directory ' | |
349 | '[overwrite=%r]' % (reset,)) |
|
349 | '[overwrite=%r]' % (reset,)) | |
350 | self.cluster_dir_obj.copy_all_config_files(overwrite=reset) |
|
350 | self.cluster_dir_obj.copy_all_config_files(overwrite=reset) | |
351 | if subcmd =='start': |
|
351 | if subcmd =='start': | |
352 | self.cluster_dir_obj.copy_all_config_files(overwrite=False) |
|
352 | self.cluster_dir_obj.copy_all_config_files(overwrite=False) | |
353 | self.start_logging() |
|
353 | self.start_logging() | |
354 | self.loop = ioloop.IOLoop.instance() |
|
354 | self.loop = ioloop.IOLoop.instance() | |
355 | # reactor.callWhenRunning(self.start_launchers) |
|
355 | # reactor.callWhenRunning(self.start_launchers) | |
356 | dc = ioloop.DelayedCallback(self.start_launchers, 0, self.loop) |
|
356 | dc = ioloop.DelayedCallback(self.start_launchers, 0, self.loop) | |
357 | dc.start() |
|
357 | dc.start() | |
358 | if subcmd == 'engines': |
|
358 | if subcmd == 'engines': | |
359 | self.start_logging() |
|
359 | self.start_logging() | |
360 | self.loop = ioloop.IOLoop.instance() |
|
360 | self.loop = ioloop.IOLoop.instance() | |
361 | # reactor.callWhenRunning(self.start_launchers) |
|
361 | # reactor.callWhenRunning(self.start_launchers) | |
362 | engine_only = lambda : self.start_launchers(controller=False) |
|
362 | engine_only = lambda : self.start_launchers(controller=False) | |
363 | dc = ioloop.DelayedCallback(engine_only, 0, self.loop) |
|
363 | dc = ioloop.DelayedCallback(engine_only, 0, self.loop) | |
364 | dc.start() |
|
364 | dc.start() | |
365 |
|
365 | |||
366 | def start_launchers(self, controller=True): |
|
366 | def start_launchers(self, controller=True): | |
367 | config = self.master_config |
|
367 | config = self.master_config | |
368 |
|
368 | |||
369 | # Create the launchers. In both bases, we set the work_dir of |
|
369 | # Create the launchers. In both bases, we set the work_dir of | |
370 | # the launcher to the cluster_dir. This is where the launcher's |
|
370 | # the launcher to the cluster_dir. This is where the launcher's | |
371 | # subprocesses will be launched. It is not where the controller |
|
371 | # subprocesses will be launched. It is not where the controller | |
372 | # and engine will be launched. |
|
372 | # and engine will be launched. | |
373 | if controller: |
|
373 | if controller: | |
374 | cl_class = import_item(config.Global.controller_launcher) |
|
374 | cl_class = import_item(config.Global.controller_launcher) | |
375 | self.controller_launcher = cl_class( |
|
375 | self.controller_launcher = cl_class( | |
376 | work_dir=self.cluster_dir, config=config, |
|
376 | work_dir=self.cluster_dir, config=config, | |
377 | logname=self.log.name |
|
377 | logname=self.log.name | |
378 | ) |
|
378 | ) | |
379 | # Setup the observing of stopping. If the controller dies, shut |
|
379 | # Setup the observing of stopping. If the controller dies, shut | |
380 | # everything down as that will be completely fatal for the engines. |
|
380 | # everything down as that will be completely fatal for the engines. | |
381 | self.controller_launcher.on_stop(self.stop_launchers) |
|
381 | self.controller_launcher.on_stop(self.stop_launchers) | |
382 | # But, we don't monitor the stopping of engines. An engine dying |
|
382 | # But, we don't monitor the stopping of engines. An engine dying | |
383 | # is just fine and in principle a user could start a new engine. |
|
383 | # is just fine and in principle a user could start a new engine. | |
384 | # Also, if we did monitor engine stopping, it is difficult to |
|
384 | # Also, if we did monitor engine stopping, it is difficult to | |
385 | # know what to do when only some engines die. Currently, the |
|
385 | # know what to do when only some engines die. Currently, the | |
386 | # observing of engine stopping is inconsistent. Some launchers |
|
386 | # observing of engine stopping is inconsistent. Some launchers | |
387 | # might trigger on a single engine stopping, other wait until |
|
387 | # might trigger on a single engine stopping, other wait until | |
388 | # all stop. TODO: think more about how to handle this. |
|
388 | # all stop. TODO: think more about how to handle this. | |
389 | else: |
|
389 | else: | |
390 | self.controller_launcher = None |
|
390 | self.controller_launcher = None | |
391 |
|
391 | |||
392 | el_class = import_item(config.Global.engine_launcher) |
|
392 | el_class = import_item(config.Global.engine_launcher) | |
393 | self.engine_launcher = el_class( |
|
393 | self.engine_launcher = el_class( | |
394 | work_dir=self.cluster_dir, config=config, logname=self.log.name |
|
394 | work_dir=self.cluster_dir, config=config, logname=self.log.name | |
395 | ) |
|
395 | ) | |
396 |
|
396 | |||
397 | # Setup signals |
|
397 | # Setup signals | |
398 | signal.signal(signal.SIGINT, self.sigint_handler) |
|
398 | signal.signal(signal.SIGINT, self.sigint_handler) | |
399 |
|
399 | |||
400 | # Start the controller and engines |
|
400 | # Start the controller and engines | |
401 | self._stopping = False # Make sure stop_launchers is not called 2x. |
|
401 | self._stopping = False # Make sure stop_launchers is not called 2x. | |
402 | if controller: |
|
402 | if controller: | |
403 | self.start_controller() |
|
403 | self.start_controller() | |
404 | dc = ioloop.DelayedCallback(self.start_engines, 1000*config.Global.delay*controller, self.loop) |
|
404 | dc = ioloop.DelayedCallback(self.start_engines, 1000*config.Global.delay*controller, self.loop) | |
405 | dc.start() |
|
405 | dc.start() | |
406 | self.startup_message() |
|
406 | self.startup_message() | |
407 |
|
407 | |||
408 | def startup_message(self, r=None): |
|
408 | def startup_message(self, r=None): | |
409 | self.log.info("IPython cluster: started") |
|
409 | self.log.info("IPython cluster: started") | |
410 | return r |
|
410 | return r | |
411 |
|
411 | |||
412 | def start_controller(self, r=None): |
|
412 | def start_controller(self, r=None): | |
413 | # self.log.info("In start_controller") |
|
413 | # self.log.info("In start_controller") | |
414 | config = self.master_config |
|
414 | config = self.master_config | |
415 | d = self.controller_launcher.start( |
|
415 | d = self.controller_launcher.start( | |
416 | cluster_dir=config.Global.cluster_dir |
|
416 | cluster_dir=config.Global.cluster_dir | |
417 | ) |
|
417 | ) | |
418 | return d |
|
418 | return d | |
419 |
|
419 | |||
420 | def start_engines(self, r=None): |
|
420 | def start_engines(self, r=None): | |
421 | # self.log.info("In start_engines") |
|
421 | # self.log.info("In start_engines") | |
422 | config = self.master_config |
|
422 | config = self.master_config | |
423 |
|
423 | |||
424 | d = self.engine_launcher.start( |
|
424 | d = self.engine_launcher.start( | |
425 | config.Global.n, |
|
425 | config.Global.n, | |
426 | cluster_dir=config.Global.cluster_dir |
|
426 | cluster_dir=config.Global.cluster_dir | |
427 | ) |
|
427 | ) | |
428 | return d |
|
428 | return d | |
429 |
|
429 | |||
430 | def stop_controller(self, r=None): |
|
430 | def stop_controller(self, r=None): | |
431 | # self.log.info("In stop_controller") |
|
431 | # self.log.info("In stop_controller") | |
432 | if self.controller_launcher and self.controller_launcher.running: |
|
432 | if self.controller_launcher and self.controller_launcher.running: | |
433 | return self.controller_launcher.stop() |
|
433 | return self.controller_launcher.stop() | |
434 |
|
434 | |||
435 | def stop_engines(self, r=None): |
|
435 | def stop_engines(self, r=None): | |
436 | # self.log.info("In stop_engines") |
|
436 | # self.log.info("In stop_engines") | |
437 | if self.engine_launcher.running: |
|
437 | if self.engine_launcher.running: | |
438 | d = self.engine_launcher.stop() |
|
438 | d = self.engine_launcher.stop() | |
439 | # d.addErrback(self.log_err) |
|
439 | # d.addErrback(self.log_err) | |
440 | return d |
|
440 | return d | |
441 | else: |
|
441 | else: | |
442 | return None |
|
442 | return None | |
443 |
|
443 | |||
444 | def log_err(self, f): |
|
444 | def log_err(self, f): | |
445 | self.log.error(f.getTraceback()) |
|
445 | self.log.error(f.getTraceback()) | |
446 | return None |
|
446 | return None | |
447 |
|
447 | |||
448 | def stop_launchers(self, r=None): |
|
448 | def stop_launchers(self, r=None): | |
449 | if not self._stopping: |
|
449 | if not self._stopping: | |
450 | self._stopping = True |
|
450 | self._stopping = True | |
451 | # if isinstance(r, failure.Failure): |
|
451 | # if isinstance(r, failure.Failure): | |
452 | # self.log.error('Unexpected error in ipcluster:') |
|
452 | # self.log.error('Unexpected error in ipcluster:') | |
453 | # self.log.info(r.getTraceback()) |
|
453 | # self.log.info(r.getTraceback()) | |
454 | self.log.error("IPython cluster: stopping") |
|
454 | self.log.error("IPython cluster: stopping") | |
455 | # These return deferreds. We are not doing anything with them |
|
455 | # These return deferreds. We are not doing anything with them | |
456 | # but we are holding refs to them as a reminder that they |
|
456 | # but we are holding refs to them as a reminder that they | |
457 | # do return deferreds. |
|
457 | # do return deferreds. | |
458 | d1 = self.stop_engines() |
|
458 | d1 = self.stop_engines() | |
459 | d2 = self.stop_controller() |
|
459 | d2 = self.stop_controller() | |
460 | # Wait a few seconds to let things shut down. |
|
460 | # Wait a few seconds to let things shut down. | |
461 | dc = ioloop.DelayedCallback(self.loop.stop, 4000, self.loop) |
|
461 | dc = ioloop.DelayedCallback(self.loop.stop, 4000, self.loop) | |
462 | dc.start() |
|
462 | dc.start() | |
463 | # reactor.callLater(4.0, reactor.stop) |
|
463 | # reactor.callLater(4.0, reactor.stop) | |
464 |
|
464 | |||
465 | def sigint_handler(self, signum, frame): |
|
465 | def sigint_handler(self, signum, frame): | |
466 | self.stop_launchers() |
|
466 | self.stop_launchers() | |
467 |
|
467 | |||
468 | def start_logging(self): |
|
468 | def start_logging(self): | |
469 | # Remove old log files of the controller and engine |
|
469 | # Remove old log files of the controller and engine | |
470 | if self.master_config.Global.clean_logs: |
|
470 | if self.master_config.Global.clean_logs: | |
471 | log_dir = self.master_config.Global.log_dir |
|
471 | log_dir = self.master_config.Global.log_dir | |
472 | for f in os.listdir(log_dir): |
|
472 | for f in os.listdir(log_dir): | |
473 | if re.match(r'ip(engine|controller)z-\d+\.(log|err|out)',f): |
|
473 | if re.match(r'ip(engine|controller)z-\d+\.(log|err|out)',f): | |
474 | os.remove(os.path.join(log_dir, f)) |
|
474 | os.remove(os.path.join(log_dir, f)) | |
475 | # This will remove old log files for ipcluster itself |
|
475 | # This will remove old log files for ipcluster itself | |
476 | super(IPClusterApp, self).start_logging() |
|
476 | super(IPClusterApp, self).start_logging() | |
477 |
|
477 | |||
478 | def start_app(self): |
|
478 | def start_app(self): | |
479 | """Start the application, depending on what subcommand is used.""" |
|
479 | """Start the application, depending on what subcommand is used.""" | |
480 | subcmd = self.master_config.Global.subcommand |
|
480 | subcmd = self.master_config.Global.subcommand | |
481 | if subcmd=='create' or subcmd=='list': |
|
481 | if subcmd=='create' or subcmd=='list': | |
482 | return |
|
482 | return | |
483 | elif subcmd=='start': |
|
483 | elif subcmd=='start': | |
484 | self.start_app_start() |
|
484 | self.start_app_start() | |
485 | elif subcmd=='stop': |
|
485 | elif subcmd=='stop': | |
486 | self.start_app_stop() |
|
486 | self.start_app_stop() | |
487 | elif subcmd=='engines': |
|
487 | elif subcmd=='engines': | |
488 | self.start_app_engines() |
|
488 | self.start_app_engines() | |
489 |
|
489 | |||
490 | def start_app_start(self): |
|
490 | def start_app_start(self): | |
491 | """Start the app for the start subcommand.""" |
|
491 | """Start the app for the start subcommand.""" | |
492 | config = self.master_config |
|
492 | config = self.master_config | |
493 | # First see if the cluster is already running |
|
493 | # First see if the cluster is already running | |
494 | try: |
|
494 | try: | |
495 | pid = self.get_pid_from_file() |
|
495 | pid = self.get_pid_from_file() | |
496 | except PIDFileError: |
|
496 | except PIDFileError: | |
497 | pass |
|
497 | pass | |
498 | else: |
|
498 | else: | |
499 | self.log.critical( |
|
499 | self.log.critical( | |
500 | 'Cluster is already running with [pid=%s]. ' |
|
500 | 'Cluster is already running with [pid=%s]. ' | |
501 |
'use "ipcluster |
|
501 | 'use "ipcluster stop" to stop the cluster.' % pid | |
502 | ) |
|
502 | ) | |
503 | # Here I exit with a unusual exit status that other processes |
|
503 | # Here I exit with a unusual exit status that other processes | |
504 | # can watch for to learn how I existed. |
|
504 | # can watch for to learn how I existed. | |
505 | self.exit(ALREADY_STARTED) |
|
505 | self.exit(ALREADY_STARTED) | |
506 |
|
506 | |||
507 | # Now log and daemonize |
|
507 | # Now log and daemonize | |
508 | self.log.info( |
|
508 | self.log.info( | |
509 |
'Starting ipcluster |
|
509 | 'Starting ipcluster with [daemon=%r]' % config.Global.daemonize | |
510 | ) |
|
510 | ) | |
511 | # TODO: Get daemonize working on Windows or as a Windows Server. |
|
511 | # TODO: Get daemonize working on Windows or as a Windows Server. | |
512 | if config.Global.daemonize: |
|
512 | if config.Global.daemonize: | |
513 | if os.name=='posix': |
|
513 | if os.name=='posix': | |
514 | from twisted.scripts._twistd_unix import daemonize |
|
514 | from twisted.scripts._twistd_unix import daemonize | |
515 | daemonize() |
|
515 | daemonize() | |
516 |
|
516 | |||
517 | # Now write the new pid file AFTER our new forked pid is active. |
|
517 | # Now write the new pid file AFTER our new forked pid is active. | |
518 | self.write_pid_file() |
|
518 | self.write_pid_file() | |
519 | try: |
|
519 | try: | |
520 | self.loop.start() |
|
520 | self.loop.start() | |
521 | except KeyboardInterrupt: |
|
521 | except KeyboardInterrupt: | |
522 | pass |
|
522 | pass | |
523 | except zmq.ZMQError as e: |
|
523 | except zmq.ZMQError as e: | |
524 | if e.errno == errno.EINTR: |
|
524 | if e.errno == errno.EINTR: | |
525 | pass |
|
525 | pass | |
526 | else: |
|
526 | else: | |
527 | raise |
|
527 | raise | |
528 | self.remove_pid_file() |
|
528 | self.remove_pid_file() | |
529 |
|
529 | |||
530 | def start_app_engines(self): |
|
530 | def start_app_engines(self): | |
531 | """Start the app for the start subcommand.""" |
|
531 | """Start the app for the start subcommand.""" | |
532 | config = self.master_config |
|
532 | config = self.master_config | |
533 | # First see if the cluster is already running |
|
533 | # First see if the cluster is already running | |
534 |
|
534 | |||
535 | # Now log and daemonize |
|
535 | # Now log and daemonize | |
536 | self.log.info( |
|
536 | self.log.info( | |
537 | 'Starting engines with [daemon=%r]' % config.Global.daemonize |
|
537 | 'Starting engines with [daemon=%r]' % config.Global.daemonize | |
538 | ) |
|
538 | ) | |
539 | # TODO: Get daemonize working on Windows or as a Windows Server. |
|
539 | # TODO: Get daemonize working on Windows or as a Windows Server. | |
540 | if config.Global.daemonize: |
|
540 | if config.Global.daemonize: | |
541 | if os.name=='posix': |
|
541 | if os.name=='posix': | |
542 | from twisted.scripts._twistd_unix import daemonize |
|
542 | from twisted.scripts._twistd_unix import daemonize | |
543 | daemonize() |
|
543 | daemonize() | |
544 |
|
544 | |||
545 | # Now write the new pid file AFTER our new forked pid is active. |
|
545 | # Now write the new pid file AFTER our new forked pid is active. | |
546 | # self.write_pid_file() |
|
546 | # self.write_pid_file() | |
547 | try: |
|
547 | try: | |
548 | self.loop.start() |
|
548 | self.loop.start() | |
549 | except KeyboardInterrupt: |
|
549 | except KeyboardInterrupt: | |
550 | pass |
|
550 | pass | |
551 | except zmq.ZMQError as e: |
|
551 | except zmq.ZMQError as e: | |
552 | if e.errno == errno.EINTR: |
|
552 | if e.errno == errno.EINTR: | |
553 | pass |
|
553 | pass | |
554 | else: |
|
554 | else: | |
555 | raise |
|
555 | raise | |
556 | # self.remove_pid_file() |
|
556 | # self.remove_pid_file() | |
557 |
|
557 | |||
558 | def start_app_stop(self): |
|
558 | def start_app_stop(self): | |
559 | """Start the app for the stop subcommand.""" |
|
559 | """Start the app for the stop subcommand.""" | |
560 | config = self.master_config |
|
560 | config = self.master_config | |
561 | try: |
|
561 | try: | |
562 | pid = self.get_pid_from_file() |
|
562 | pid = self.get_pid_from_file() | |
563 | except PIDFileError: |
|
563 | except PIDFileError: | |
564 | self.log.critical( |
|
564 | self.log.critical( | |
565 | 'Problem reading pid file, cluster is probably not running.' |
|
565 | 'Problem reading pid file, cluster is probably not running.' | |
566 | ) |
|
566 | ) | |
567 | # Here I exit with a unusual exit status that other processes |
|
567 | # Here I exit with a unusual exit status that other processes | |
568 | # can watch for to learn how I existed. |
|
568 | # can watch for to learn how I existed. | |
569 | self.exit(ALREADY_STOPPED) |
|
569 | self.exit(ALREADY_STOPPED) | |
570 | else: |
|
570 | else: | |
571 | if os.name=='posix': |
|
571 | if os.name=='posix': | |
572 | sig = config.Global.signal |
|
572 | sig = config.Global.signal | |
573 | self.log.info( |
|
573 | self.log.info( | |
574 | "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig) |
|
574 | "Stopping cluster [pid=%r] with [signal=%r]" % (pid, sig) | |
575 | ) |
|
575 | ) | |
576 | os.kill(pid, sig) |
|
576 | os.kill(pid, sig) | |
577 | elif os.name=='nt': |
|
577 | elif os.name=='nt': | |
578 | # As of right now, we don't support daemonize on Windows, so |
|
578 | # As of right now, we don't support daemonize on Windows, so | |
579 | # stop will not do anything. Minimally, it should clean up the |
|
579 | # stop will not do anything. Minimally, it should clean up the | |
580 | # old .pid files. |
|
580 | # old .pid files. | |
581 | self.remove_pid_file() |
|
581 | self.remove_pid_file() | |
582 |
|
582 | |||
583 |
|
583 | |||
584 | def launch_new_instance(): |
|
584 | def launch_new_instance(): | |
585 | """Create and run the IPython cluster.""" |
|
585 | """Create and run the IPython cluster.""" | |
586 | app = IPClusterApp() |
|
586 | app = IPClusterApp() | |
587 | app.start() |
|
587 | app.start() | |
588 |
|
588 | |||
589 |
|
589 | |||
590 | if __name__ == '__main__': |
|
590 | if __name__ == '__main__': | |
591 | launch_new_instance() |
|
591 | launch_new_instance() | |
592 |
|
592 |
@@ -1,431 +1,431 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | """ |
|
3 | """ | |
4 | The IPython controller application. |
|
4 | The IPython controller application. | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 | # Copyright (C) 2008-2009 The IPython Development Team |
|
8 | # Copyright (C) 2008-2009 The IPython Development Team | |
9 | # |
|
9 | # | |
10 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | # Distributed under the terms of the BSD License. The full license is in | |
11 | # the file COPYING, distributed as part of this software. |
|
11 | # the file COPYING, distributed as part of this software. | |
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | # Imports |
|
15 | # Imports | |
16 | #----------------------------------------------------------------------------- |
|
16 | #----------------------------------------------------------------------------- | |
17 |
|
17 | |||
18 | from __future__ import with_statement |
|
18 | from __future__ import with_statement | |
19 |
|
19 | |||
20 | import copy |
|
20 | import copy | |
21 | import os |
|
21 | import os | |
22 | import logging |
|
22 | import logging | |
23 | import socket |
|
23 | import socket | |
24 | import stat |
|
24 | import stat | |
25 | import sys |
|
25 | import sys | |
26 | import uuid |
|
26 | import uuid | |
27 |
|
27 | |||
28 | import zmq |
|
28 | import zmq | |
29 | from zmq.log.handlers import PUBHandler |
|
29 | from zmq.log.handlers import PUBHandler | |
30 | from zmq.utils import jsonapi as json |
|
30 | from zmq.utils import jsonapi as json | |
31 |
|
31 | |||
32 | from IPython.config.loader import Config |
|
32 | from IPython.config.loader import Config | |
33 | from IPython.parallel import factory |
|
33 | from IPython.parallel import factory | |
34 | from IPython.parallel.controller import ControllerFactory |
|
34 | from IPython.parallel.controller import ControllerFactory | |
35 | from IPython.parallel.clusterdir import ( |
|
35 | from IPython.parallel.clusterdir import ( | |
36 | ApplicationWithClusterDir, |
|
36 | ApplicationWithClusterDir, | |
37 | ClusterDirConfigLoader |
|
37 | ClusterDirConfigLoader | |
38 | ) |
|
38 | ) | |
39 | from IPython.parallel.util import disambiguate_ip_address, split_url |
|
39 | from IPython.parallel.util import disambiguate_ip_address, split_url | |
40 | # from IPython.kernel.fcutil import FCServiceFactory, FURLError |
|
40 | # from IPython.kernel.fcutil import FCServiceFactory, FURLError | |
41 | from IPython.utils.traitlets import Instance, Unicode |
|
41 | from IPython.utils.traitlets import Instance, Unicode | |
42 |
|
42 | |||
43 |
|
43 | |||
44 |
|
44 | |||
45 | #----------------------------------------------------------------------------- |
|
45 | #----------------------------------------------------------------------------- | |
46 | # Module level variables |
|
46 | # Module level variables | |
47 | #----------------------------------------------------------------------------- |
|
47 | #----------------------------------------------------------------------------- | |
48 |
|
48 | |||
49 |
|
49 | |||
50 | #: The default config file name for this application |
|
50 | #: The default config file name for this application | |
51 |
default_config_file_name = u'ipcontroller |
|
51 | default_config_file_name = u'ipcontroller_config.py' | |
52 |
|
52 | |||
53 |
|
53 | |||
54 | _description = """Start the IPython controller for parallel computing. |
|
54 | _description = """Start the IPython controller for parallel computing. | |
55 |
|
55 | |||
56 | The IPython controller provides a gateway between the IPython engines and |
|
56 | The IPython controller provides a gateway between the IPython engines and | |
57 | clients. The controller needs to be started before the engines and can be |
|
57 | clients. The controller needs to be started before the engines and can be | |
58 | configured using command line options or using a cluster directory. Cluster |
|
58 | configured using command line options or using a cluster directory. Cluster | |
59 | directories contain config, log and security files and are usually located in |
|
59 | directories contain config, log and security files and are usually located in | |
60 |
your ipython directory and named as "cluster |
|
60 | your ipython directory and named as "cluster_<profile>". See the --profile | |
61 | and --cluster-dir options for details. |
|
61 | and --cluster-dir options for details. | |
62 | """ |
|
62 | """ | |
63 |
|
63 | |||
64 | #----------------------------------------------------------------------------- |
|
64 | #----------------------------------------------------------------------------- | |
65 | # Default interfaces |
|
65 | # Default interfaces | |
66 | #----------------------------------------------------------------------------- |
|
66 | #----------------------------------------------------------------------------- | |
67 |
|
67 | |||
68 | # The default client interfaces for FCClientServiceFactory.interfaces |
|
68 | # The default client interfaces for FCClientServiceFactory.interfaces | |
69 | default_client_interfaces = Config() |
|
69 | default_client_interfaces = Config() | |
70 | default_client_interfaces.Default.url_file = 'ipcontroller-client.url' |
|
70 | default_client_interfaces.Default.url_file = 'ipcontroller-client.url' | |
71 |
|
71 | |||
72 | # Make this a dict we can pass to Config.__init__ for the default |
|
72 | # Make this a dict we can pass to Config.__init__ for the default | |
73 | default_client_interfaces = dict(copy.deepcopy(default_client_interfaces.items())) |
|
73 | default_client_interfaces = dict(copy.deepcopy(default_client_interfaces.items())) | |
74 |
|
74 | |||
75 |
|
75 | |||
76 |
|
76 | |||
77 | # The default engine interfaces for FCEngineServiceFactory.interfaces |
|
77 | # The default engine interfaces for FCEngineServiceFactory.interfaces | |
78 | default_engine_interfaces = Config() |
|
78 | default_engine_interfaces = Config() | |
79 | default_engine_interfaces.Default.url_file = u'ipcontroller-engine.url' |
|
79 | default_engine_interfaces.Default.url_file = u'ipcontroller-engine.url' | |
80 |
|
80 | |||
81 | # Make this a dict we can pass to Config.__init__ for the default |
|
81 | # Make this a dict we can pass to Config.__init__ for the default | |
82 | default_engine_interfaces = dict(copy.deepcopy(default_engine_interfaces.items())) |
|
82 | default_engine_interfaces = dict(copy.deepcopy(default_engine_interfaces.items())) | |
83 |
|
83 | |||
84 |
|
84 | |||
85 | #----------------------------------------------------------------------------- |
|
85 | #----------------------------------------------------------------------------- | |
86 | # Service factories |
|
86 | # Service factories | |
87 | #----------------------------------------------------------------------------- |
|
87 | #----------------------------------------------------------------------------- | |
88 |
|
88 | |||
89 | # |
|
89 | # | |
90 | # class FCClientServiceFactory(FCServiceFactory): |
|
90 | # class FCClientServiceFactory(FCServiceFactory): | |
91 | # """A Foolscap implementation of the client services.""" |
|
91 | # """A Foolscap implementation of the client services.""" | |
92 | # |
|
92 | # | |
93 | # cert_file = Unicode(u'ipcontroller-client.pem', config=True) |
|
93 | # cert_file = Unicode(u'ipcontroller-client.pem', config=True) | |
94 | # interfaces = Instance(klass=Config, kw=default_client_interfaces, |
|
94 | # interfaces = Instance(klass=Config, kw=default_client_interfaces, | |
95 | # allow_none=False, config=True) |
|
95 | # allow_none=False, config=True) | |
96 | # |
|
96 | # | |
97 | # |
|
97 | # | |
98 | # class FCEngineServiceFactory(FCServiceFactory): |
|
98 | # class FCEngineServiceFactory(FCServiceFactory): | |
99 | # """A Foolscap implementation of the engine services.""" |
|
99 | # """A Foolscap implementation of the engine services.""" | |
100 | # |
|
100 | # | |
101 | # cert_file = Unicode(u'ipcontroller-engine.pem', config=True) |
|
101 | # cert_file = Unicode(u'ipcontroller-engine.pem', config=True) | |
102 | # interfaces = Instance(klass=dict, kw=default_engine_interfaces, |
|
102 | # interfaces = Instance(klass=dict, kw=default_engine_interfaces, | |
103 | # allow_none=False, config=True) |
|
103 | # allow_none=False, config=True) | |
104 | # |
|
104 | # | |
105 |
|
105 | |||
106 | #----------------------------------------------------------------------------- |
|
106 | #----------------------------------------------------------------------------- | |
107 | # Command line options |
|
107 | # Command line options | |
108 | #----------------------------------------------------------------------------- |
|
108 | #----------------------------------------------------------------------------- | |
109 |
|
109 | |||
110 |
|
110 | |||
111 | class IPControllerAppConfigLoader(ClusterDirConfigLoader): |
|
111 | class IPControllerAppConfigLoader(ClusterDirConfigLoader): | |
112 |
|
112 | |||
113 | def _add_arguments(self): |
|
113 | def _add_arguments(self): | |
114 | super(IPControllerAppConfigLoader, self)._add_arguments() |
|
114 | super(IPControllerAppConfigLoader, self)._add_arguments() | |
115 | paa = self.parser.add_argument |
|
115 | paa = self.parser.add_argument | |
116 |
|
116 | |||
117 | ## Hub Config: |
|
117 | ## Hub Config: | |
118 | paa('--mongodb', |
|
118 | paa('--mongodb', | |
119 | dest='HubFactory.db_class', action='store_const', |
|
119 | dest='HubFactory.db_class', action='store_const', | |
120 | const='IPython.parallel.mongodb.MongoDB', |
|
120 | const='IPython.parallel.mongodb.MongoDB', | |
121 | help='Use MongoDB for task storage [default: in-memory]') |
|
121 | help='Use MongoDB for task storage [default: in-memory]') | |
122 | paa('--sqlite', |
|
122 | paa('--sqlite', | |
123 | dest='HubFactory.db_class', action='store_const', |
|
123 | dest='HubFactory.db_class', action='store_const', | |
124 | const='IPython.parallel.sqlitedb.SQLiteDB', |
|
124 | const='IPython.parallel.sqlitedb.SQLiteDB', | |
125 | help='Use SQLite3 for DB task storage [default: in-memory]') |
|
125 | help='Use SQLite3 for DB task storage [default: in-memory]') | |
126 | paa('--hb', |
|
126 | paa('--hb', | |
127 | type=int, dest='HubFactory.hb', nargs=2, |
|
127 | type=int, dest='HubFactory.hb', nargs=2, | |
128 | help='The (2) ports the Hub\'s Heartmonitor will use for the heartbeat ' |
|
128 | help='The (2) ports the Hub\'s Heartmonitor will use for the heartbeat ' | |
129 | 'connections [default: random]', |
|
129 | 'connections [default: random]', | |
130 | metavar='Hub.hb_ports') |
|
130 | metavar='Hub.hb_ports') | |
131 | paa('--ping', |
|
131 | paa('--ping', | |
132 | type=int, dest='HubFactory.ping', |
|
132 | type=int, dest='HubFactory.ping', | |
133 | help='The frequency at which the Hub pings the engines for heartbeats ' |
|
133 | help='The frequency at which the Hub pings the engines for heartbeats ' | |
134 | ' (in ms) [default: 100]', |
|
134 | ' (in ms) [default: 100]', | |
135 | metavar='Hub.ping') |
|
135 | metavar='Hub.ping') | |
136 |
|
136 | |||
137 | # Client config |
|
137 | # Client config | |
138 | paa('--client-ip', |
|
138 | paa('--client-ip', | |
139 | type=str, dest='HubFactory.client_ip', |
|
139 | type=str, dest='HubFactory.client_ip', | |
140 | help='The IP address or hostname the Hub will listen on for ' |
|
140 | help='The IP address or hostname the Hub will listen on for ' | |
141 | 'client connections. Both engine-ip and client-ip can be set simultaneously ' |
|
141 | 'client connections. Both engine-ip and client-ip can be set simultaneously ' | |
142 | 'via --ip [default: loopback]', |
|
142 | 'via --ip [default: loopback]', | |
143 | metavar='Hub.client_ip') |
|
143 | metavar='Hub.client_ip') | |
144 | paa('--client-transport', |
|
144 | paa('--client-transport', | |
145 | type=str, dest='HubFactory.client_transport', |
|
145 | type=str, dest='HubFactory.client_transport', | |
146 | help='The ZeroMQ transport the Hub will use for ' |
|
146 | help='The ZeroMQ transport the Hub will use for ' | |
147 | 'client connections. Both engine-transport and client-transport can be set simultaneously ' |
|
147 | 'client connections. Both engine-transport and client-transport can be set simultaneously ' | |
148 | 'via --transport [default: tcp]', |
|
148 | 'via --transport [default: tcp]', | |
149 | metavar='Hub.client_transport') |
|
149 | metavar='Hub.client_transport') | |
150 | paa('--query', |
|
150 | paa('--query', | |
151 | type=int, dest='HubFactory.query_port', |
|
151 | type=int, dest='HubFactory.query_port', | |
152 | help='The port on which the Hub XREP socket will listen for result queries from clients [default: random]', |
|
152 | help='The port on which the Hub XREP socket will listen for result queries from clients [default: random]', | |
153 | metavar='Hub.query_port') |
|
153 | metavar='Hub.query_port') | |
154 | paa('--notifier', |
|
154 | paa('--notifier', | |
155 | type=int, dest='HubFactory.notifier_port', |
|
155 | type=int, dest='HubFactory.notifier_port', | |
156 | help='The port on which the Hub PUB socket will listen for notification connections [default: random]', |
|
156 | help='The port on which the Hub PUB socket will listen for notification connections [default: random]', | |
157 | metavar='Hub.notifier_port') |
|
157 | metavar='Hub.notifier_port') | |
158 |
|
158 | |||
159 | # Engine config |
|
159 | # Engine config | |
160 | paa('--engine-ip', |
|
160 | paa('--engine-ip', | |
161 | type=str, dest='HubFactory.engine_ip', |
|
161 | type=str, dest='HubFactory.engine_ip', | |
162 | help='The IP address or hostname the Hub will listen on for ' |
|
162 | help='The IP address or hostname the Hub will listen on for ' | |
163 | 'engine connections. This applies to the Hub and its schedulers' |
|
163 | 'engine connections. This applies to the Hub and its schedulers' | |
164 | 'engine-ip and client-ip can be set simultaneously ' |
|
164 | 'engine-ip and client-ip can be set simultaneously ' | |
165 | 'via --ip [default: loopback]', |
|
165 | 'via --ip [default: loopback]', | |
166 | metavar='Hub.engine_ip') |
|
166 | metavar='Hub.engine_ip') | |
167 | paa('--engine-transport', |
|
167 | paa('--engine-transport', | |
168 | type=str, dest='HubFactory.engine_transport', |
|
168 | type=str, dest='HubFactory.engine_transport', | |
169 | help='The ZeroMQ transport the Hub will use for ' |
|
169 | help='The ZeroMQ transport the Hub will use for ' | |
170 | 'client connections. Both engine-transport and client-transport can be set simultaneously ' |
|
170 | 'client connections. Both engine-transport and client-transport can be set simultaneously ' | |
171 | 'via --transport [default: tcp]', |
|
171 | 'via --transport [default: tcp]', | |
172 | metavar='Hub.engine_transport') |
|
172 | metavar='Hub.engine_transport') | |
173 |
|
173 | |||
174 | # Scheduler config |
|
174 | # Scheduler config | |
175 | paa('--mux', |
|
175 | paa('--mux', | |
176 | type=int, dest='ControllerFactory.mux', nargs=2, |
|
176 | type=int, dest='ControllerFactory.mux', nargs=2, | |
177 | help='The (2) ports the MUX scheduler will listen on for client,engine ' |
|
177 | help='The (2) ports the MUX scheduler will listen on for client,engine ' | |
178 | 'connections, respectively [default: random]', |
|
178 | 'connections, respectively [default: random]', | |
179 | metavar='Scheduler.mux_ports') |
|
179 | metavar='Scheduler.mux_ports') | |
180 | paa('--task', |
|
180 | paa('--task', | |
181 | type=int, dest='ControllerFactory.task', nargs=2, |
|
181 | type=int, dest='ControllerFactory.task', nargs=2, | |
182 | help='The (2) ports the Task scheduler will listen on for client,engine ' |
|
182 | help='The (2) ports the Task scheduler will listen on for client,engine ' | |
183 | 'connections, respectively [default: random]', |
|
183 | 'connections, respectively [default: random]', | |
184 | metavar='Scheduler.task_ports') |
|
184 | metavar='Scheduler.task_ports') | |
185 | paa('--control', |
|
185 | paa('--control', | |
186 | type=int, dest='ControllerFactory.control', nargs=2, |
|
186 | type=int, dest='ControllerFactory.control', nargs=2, | |
187 | help='The (2) ports the Control scheduler will listen on for client,engine ' |
|
187 | help='The (2) ports the Control scheduler will listen on for client,engine ' | |
188 | 'connections, respectively [default: random]', |
|
188 | 'connections, respectively [default: random]', | |
189 | metavar='Scheduler.control_ports') |
|
189 | metavar='Scheduler.control_ports') | |
190 | paa('--iopub', |
|
190 | paa('--iopub', | |
191 | type=int, dest='ControllerFactory.iopub', nargs=2, |
|
191 | type=int, dest='ControllerFactory.iopub', nargs=2, | |
192 | help='The (2) ports the IOPub scheduler will listen on for client,engine ' |
|
192 | help='The (2) ports the IOPub scheduler will listen on for client,engine ' | |
193 | 'connections, respectively [default: random]', |
|
193 | 'connections, respectively [default: random]', | |
194 | metavar='Scheduler.iopub_ports') |
|
194 | metavar='Scheduler.iopub_ports') | |
195 |
|
195 | |||
196 | paa('--scheme', |
|
196 | paa('--scheme', | |
197 | type=str, dest='HubFactory.scheme', |
|
197 | type=str, dest='HubFactory.scheme', | |
198 | choices = ['pure', 'lru', 'plainrandom', 'weighted', 'twobin','leastload'], |
|
198 | choices = ['pure', 'lru', 'plainrandom', 'weighted', 'twobin','leastload'], | |
199 | help='select the task scheduler scheme [default: Python LRU]', |
|
199 | help='select the task scheduler scheme [default: Python LRU]', | |
200 | metavar='Scheduler.scheme') |
|
200 | metavar='Scheduler.scheme') | |
201 | paa('--usethreads', |
|
201 | paa('--usethreads', | |
202 | dest='ControllerFactory.usethreads', action="store_true", |
|
202 | dest='ControllerFactory.usethreads', action="store_true", | |
203 | help='Use threads instead of processes for the schedulers', |
|
203 | help='Use threads instead of processes for the schedulers', | |
204 | ) |
|
204 | ) | |
205 | paa('--hwm', |
|
205 | paa('--hwm', | |
206 | dest='ControllerFactory.hwm', type=int, |
|
206 | dest='ControllerFactory.hwm', type=int, | |
207 | help='specify the High Water Mark (HWM) for the downstream ' |
|
207 | help='specify the High Water Mark (HWM) for the downstream ' | |
208 | 'socket in the pure ZMQ scheduler. This is the maximum number ' |
|
208 | 'socket in the pure ZMQ scheduler. This is the maximum number ' | |
209 | 'of allowed outstanding tasks on each engine.', |
|
209 | 'of allowed outstanding tasks on each engine.', | |
210 | ) |
|
210 | ) | |
211 |
|
211 | |||
212 | ## Global config |
|
212 | ## Global config | |
213 | paa('--log-to-file', |
|
213 | paa('--log-to-file', | |
214 | action='store_true', dest='Global.log_to_file', |
|
214 | action='store_true', dest='Global.log_to_file', | |
215 | help='Log to a file in the log directory (default is stdout)') |
|
215 | help='Log to a file in the log directory (default is stdout)') | |
216 | paa('--log-url', |
|
216 | paa('--log-url', | |
217 | type=str, dest='Global.log_url', |
|
217 | type=str, dest='Global.log_url', | |
218 | help='Broadcast logs to an iploggerz process [default: disabled]') |
|
218 | help='Broadcast logs to an iploggerz process [default: disabled]') | |
219 | paa('-r','--reuse-files', |
|
219 | paa('-r','--reuse-files', | |
220 | action='store_true', dest='Global.reuse_files', |
|
220 | action='store_true', dest='Global.reuse_files', | |
221 | help='Try to reuse existing json connection files.') |
|
221 | help='Try to reuse existing json connection files.') | |
222 | paa('--no-secure', |
|
222 | paa('--no-secure', | |
223 | action='store_false', dest='Global.secure', |
|
223 | action='store_false', dest='Global.secure', | |
224 | help='Turn off execution keys (default).') |
|
224 | help='Turn off execution keys (default).') | |
225 | paa('--secure', |
|
225 | paa('--secure', | |
226 | action='store_true', dest='Global.secure', |
|
226 | action='store_true', dest='Global.secure', | |
227 | help='Turn on execution keys.') |
|
227 | help='Turn on execution keys.') | |
228 | paa('--execkey', |
|
228 | paa('--execkey', | |
229 | type=str, dest='Global.exec_key', |
|
229 | type=str, dest='Global.exec_key', | |
230 | help='path to a file containing an execution key.', |
|
230 | help='path to a file containing an execution key.', | |
231 | metavar='keyfile') |
|
231 | metavar='keyfile') | |
232 | paa('--ssh', |
|
232 | paa('--ssh', | |
233 | type=str, dest='Global.sshserver', |
|
233 | type=str, dest='Global.sshserver', | |
234 | help='ssh url for clients to use when connecting to the Controller ' |
|
234 | help='ssh url for clients to use when connecting to the Controller ' | |
235 | 'processes. It should be of the form: [user@]server[:port]. The ' |
|
235 | 'processes. It should be of the form: [user@]server[:port]. The ' | |
236 | 'Controller\'s listening addresses must be accessible from the ssh server', |
|
236 | 'Controller\'s listening addresses must be accessible from the ssh server', | |
237 | metavar='Global.sshserver') |
|
237 | metavar='Global.sshserver') | |
238 | paa('--location', |
|
238 | paa('--location', | |
239 | type=str, dest='Global.location', |
|
239 | type=str, dest='Global.location', | |
240 | help="The external IP or domain name of this machine, used for disambiguating " |
|
240 | help="The external IP or domain name of this machine, used for disambiguating " | |
241 | "engine and client connections.", |
|
241 | "engine and client connections.", | |
242 | metavar='Global.location') |
|
242 | metavar='Global.location') | |
243 | factory.add_session_arguments(self.parser) |
|
243 | factory.add_session_arguments(self.parser) | |
244 | factory.add_registration_arguments(self.parser) |
|
244 | factory.add_registration_arguments(self.parser) | |
245 |
|
245 | |||
246 |
|
246 | |||
247 | #----------------------------------------------------------------------------- |
|
247 | #----------------------------------------------------------------------------- | |
248 | # The main application |
|
248 | # The main application | |
249 | #----------------------------------------------------------------------------- |
|
249 | #----------------------------------------------------------------------------- | |
250 |
|
250 | |||
251 |
|
251 | |||
252 | class IPControllerApp(ApplicationWithClusterDir): |
|
252 | class IPControllerApp(ApplicationWithClusterDir): | |
253 |
|
253 | |||
254 |
name = u'ipcontroller |
|
254 | name = u'ipcontroller' | |
255 | description = _description |
|
255 | description = _description | |
256 | command_line_loader = IPControllerAppConfigLoader |
|
256 | command_line_loader = IPControllerAppConfigLoader | |
257 | default_config_file_name = default_config_file_name |
|
257 | default_config_file_name = default_config_file_name | |
258 | auto_create_cluster_dir = True |
|
258 | auto_create_cluster_dir = True | |
259 |
|
259 | |||
260 |
|
260 | |||
261 | def create_default_config(self): |
|
261 | def create_default_config(self): | |
262 | super(IPControllerApp, self).create_default_config() |
|
262 | super(IPControllerApp, self).create_default_config() | |
263 | # Don't set defaults for Global.secure or Global.reuse_furls |
|
263 | # Don't set defaults for Global.secure or Global.reuse_furls | |
264 | # as those are set in a component. |
|
264 | # as those are set in a component. | |
265 | self.default_config.Global.import_statements = [] |
|
265 | self.default_config.Global.import_statements = [] | |
266 | self.default_config.Global.clean_logs = True |
|
266 | self.default_config.Global.clean_logs = True | |
267 | self.default_config.Global.secure = True |
|
267 | self.default_config.Global.secure = True | |
268 | self.default_config.Global.reuse_files = False |
|
268 | self.default_config.Global.reuse_files = False | |
269 | self.default_config.Global.exec_key = "exec_key.key" |
|
269 | self.default_config.Global.exec_key = "exec_key.key" | |
270 | self.default_config.Global.sshserver = None |
|
270 | self.default_config.Global.sshserver = None | |
271 | self.default_config.Global.location = None |
|
271 | self.default_config.Global.location = None | |
272 |
|
272 | |||
273 | def pre_construct(self): |
|
273 | def pre_construct(self): | |
274 | super(IPControllerApp, self).pre_construct() |
|
274 | super(IPControllerApp, self).pre_construct() | |
275 | c = self.master_config |
|
275 | c = self.master_config | |
276 | # The defaults for these are set in FCClientServiceFactory and |
|
276 | # The defaults for these are set in FCClientServiceFactory and | |
277 | # FCEngineServiceFactory, so we only set them here if the global |
|
277 | # FCEngineServiceFactory, so we only set them here if the global | |
278 | # options have be set to override the class level defaults. |
|
278 | # options have be set to override the class level defaults. | |
279 |
|
279 | |||
280 | # if hasattr(c.Global, 'reuse_furls'): |
|
280 | # if hasattr(c.Global, 'reuse_furls'): | |
281 | # c.FCClientServiceFactory.reuse_furls = c.Global.reuse_furls |
|
281 | # c.FCClientServiceFactory.reuse_furls = c.Global.reuse_furls | |
282 | # c.FCEngineServiceFactory.reuse_furls = c.Global.reuse_furls |
|
282 | # c.FCEngineServiceFactory.reuse_furls = c.Global.reuse_furls | |
283 | # del c.Global.reuse_furls |
|
283 | # del c.Global.reuse_furls | |
284 | # if hasattr(c.Global, 'secure'): |
|
284 | # if hasattr(c.Global, 'secure'): | |
285 | # c.FCClientServiceFactory.secure = c.Global.secure |
|
285 | # c.FCClientServiceFactory.secure = c.Global.secure | |
286 | # c.FCEngineServiceFactory.secure = c.Global.secure |
|
286 | # c.FCEngineServiceFactory.secure = c.Global.secure | |
287 | # del c.Global.secure |
|
287 | # del c.Global.secure | |
288 |
|
288 | |||
289 | def save_connection_dict(self, fname, cdict): |
|
289 | def save_connection_dict(self, fname, cdict): | |
290 | """save a connection dict to json file.""" |
|
290 | """save a connection dict to json file.""" | |
291 | c = self.master_config |
|
291 | c = self.master_config | |
292 | url = cdict['url'] |
|
292 | url = cdict['url'] | |
293 | location = cdict['location'] |
|
293 | location = cdict['location'] | |
294 | if not location: |
|
294 | if not location: | |
295 | try: |
|
295 | try: | |
296 | proto,ip,port = split_url(url) |
|
296 | proto,ip,port = split_url(url) | |
297 | except AssertionError: |
|
297 | except AssertionError: | |
298 | pass |
|
298 | pass | |
299 | else: |
|
299 | else: | |
300 | location = socket.gethostbyname_ex(socket.gethostname())[2][-1] |
|
300 | location = socket.gethostbyname_ex(socket.gethostname())[2][-1] | |
301 | cdict['location'] = location |
|
301 | cdict['location'] = location | |
302 | fname = os.path.join(c.Global.security_dir, fname) |
|
302 | fname = os.path.join(c.Global.security_dir, fname) | |
303 | with open(fname, 'w') as f: |
|
303 | with open(fname, 'w') as f: | |
304 | f.write(json.dumps(cdict, indent=2)) |
|
304 | f.write(json.dumps(cdict, indent=2)) | |
305 | os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR) |
|
305 | os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR) | |
306 |
|
306 | |||
307 | def load_config_from_json(self): |
|
307 | def load_config_from_json(self): | |
308 | """load config from existing json connector files.""" |
|
308 | """load config from existing json connector files.""" | |
309 | c = self.master_config |
|
309 | c = self.master_config | |
310 | # load from engine config |
|
310 | # load from engine config | |
311 | with open(os.path.join(c.Global.security_dir, 'ipcontroller-engine.json')) as f: |
|
311 | with open(os.path.join(c.Global.security_dir, 'ipcontroller-engine.json')) as f: | |
312 | cfg = json.loads(f.read()) |
|
312 | cfg = json.loads(f.read()) | |
313 | key = c.SessionFactory.exec_key = cfg['exec_key'] |
|
313 | key = c.SessionFactory.exec_key = cfg['exec_key'] | |
314 | xport,addr = cfg['url'].split('://') |
|
314 | xport,addr = cfg['url'].split('://') | |
315 | c.HubFactory.engine_transport = xport |
|
315 | c.HubFactory.engine_transport = xport | |
316 | ip,ports = addr.split(':') |
|
316 | ip,ports = addr.split(':') | |
317 | c.HubFactory.engine_ip = ip |
|
317 | c.HubFactory.engine_ip = ip | |
318 | c.HubFactory.regport = int(ports) |
|
318 | c.HubFactory.regport = int(ports) | |
319 | c.Global.location = cfg['location'] |
|
319 | c.Global.location = cfg['location'] | |
320 |
|
320 | |||
321 | # load client config |
|
321 | # load client config | |
322 | with open(os.path.join(c.Global.security_dir, 'ipcontroller-client.json')) as f: |
|
322 | with open(os.path.join(c.Global.security_dir, 'ipcontroller-client.json')) as f: | |
323 | cfg = json.loads(f.read()) |
|
323 | cfg = json.loads(f.read()) | |
324 | assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys" |
|
324 | assert key == cfg['exec_key'], "exec_key mismatch between engine and client keys" | |
325 | xport,addr = cfg['url'].split('://') |
|
325 | xport,addr = cfg['url'].split('://') | |
326 | c.HubFactory.client_transport = xport |
|
326 | c.HubFactory.client_transport = xport | |
327 | ip,ports = addr.split(':') |
|
327 | ip,ports = addr.split(':') | |
328 | c.HubFactory.client_ip = ip |
|
328 | c.HubFactory.client_ip = ip | |
329 | c.Global.sshserver = cfg['ssh'] |
|
329 | c.Global.sshserver = cfg['ssh'] | |
330 | assert int(ports) == c.HubFactory.regport, "regport mismatch" |
|
330 | assert int(ports) == c.HubFactory.regport, "regport mismatch" | |
331 |
|
331 | |||
332 | def construct(self): |
|
332 | def construct(self): | |
333 | # This is the working dir by now. |
|
333 | # This is the working dir by now. | |
334 | sys.path.insert(0, '') |
|
334 | sys.path.insert(0, '') | |
335 | c = self.master_config |
|
335 | c = self.master_config | |
336 |
|
336 | |||
337 | self.import_statements() |
|
337 | self.import_statements() | |
338 | reusing = c.Global.reuse_files |
|
338 | reusing = c.Global.reuse_files | |
339 | if reusing: |
|
339 | if reusing: | |
340 | try: |
|
340 | try: | |
341 | self.load_config_from_json() |
|
341 | self.load_config_from_json() | |
342 | except (AssertionError,IOError): |
|
342 | except (AssertionError,IOError): | |
343 | reusing=False |
|
343 | reusing=False | |
344 | # check again, because reusing may have failed: |
|
344 | # check again, because reusing may have failed: | |
345 | if reusing: |
|
345 | if reusing: | |
346 | pass |
|
346 | pass | |
347 | elif c.Global.secure: |
|
347 | elif c.Global.secure: | |
348 | keyfile = os.path.join(c.Global.security_dir, c.Global.exec_key) |
|
348 | keyfile = os.path.join(c.Global.security_dir, c.Global.exec_key) | |
349 | key = str(uuid.uuid4()) |
|
349 | key = str(uuid.uuid4()) | |
350 | with open(keyfile, 'w') as f: |
|
350 | with open(keyfile, 'w') as f: | |
351 | f.write(key) |
|
351 | f.write(key) | |
352 | os.chmod(keyfile, stat.S_IRUSR|stat.S_IWUSR) |
|
352 | os.chmod(keyfile, stat.S_IRUSR|stat.S_IWUSR) | |
353 | c.SessionFactory.exec_key = key |
|
353 | c.SessionFactory.exec_key = key | |
354 | else: |
|
354 | else: | |
355 | c.SessionFactory.exec_key = '' |
|
355 | c.SessionFactory.exec_key = '' | |
356 | key = None |
|
356 | key = None | |
357 |
|
357 | |||
358 | try: |
|
358 | try: | |
359 | self.factory = ControllerFactory(config=c, logname=self.log.name) |
|
359 | self.factory = ControllerFactory(config=c, logname=self.log.name) | |
360 | self.start_logging() |
|
360 | self.start_logging() | |
361 | self.factory.construct() |
|
361 | self.factory.construct() | |
362 | except: |
|
362 | except: | |
363 | self.log.error("Couldn't construct the Controller", exc_info=True) |
|
363 | self.log.error("Couldn't construct the Controller", exc_info=True) | |
364 | self.exit(1) |
|
364 | self.exit(1) | |
365 |
|
365 | |||
366 | if not reusing: |
|
366 | if not reusing: | |
367 | # save to new json config files |
|
367 | # save to new json config files | |
368 | f = self.factory |
|
368 | f = self.factory | |
369 | cdict = {'exec_key' : key, |
|
369 | cdict = {'exec_key' : key, | |
370 | 'ssh' : c.Global.sshserver, |
|
370 | 'ssh' : c.Global.sshserver, | |
371 | 'url' : "%s://%s:%s"%(f.client_transport, f.client_ip, f.regport), |
|
371 | 'url' : "%s://%s:%s"%(f.client_transport, f.client_ip, f.regport), | |
372 | 'location' : c.Global.location |
|
372 | 'location' : c.Global.location | |
373 | } |
|
373 | } | |
374 | self.save_connection_dict('ipcontroller-client.json', cdict) |
|
374 | self.save_connection_dict('ipcontroller-client.json', cdict) | |
375 | edict = cdict |
|
375 | edict = cdict | |
376 | edict['url']="%s://%s:%s"%((f.client_transport, f.client_ip, f.regport)) |
|
376 | edict['url']="%s://%s:%s"%((f.client_transport, f.client_ip, f.regport)) | |
377 | self.save_connection_dict('ipcontroller-engine.json', edict) |
|
377 | self.save_connection_dict('ipcontroller-engine.json', edict) | |
378 |
|
378 | |||
379 |
|
379 | |||
380 | def save_urls(self): |
|
380 | def save_urls(self): | |
381 | """save the registration urls to files.""" |
|
381 | """save the registration urls to files.""" | |
382 | c = self.master_config |
|
382 | c = self.master_config | |
383 |
|
383 | |||
384 | sec_dir = c.Global.security_dir |
|
384 | sec_dir = c.Global.security_dir | |
385 | cf = self.factory |
|
385 | cf = self.factory | |
386 |
|
386 | |||
387 | with open(os.path.join(sec_dir, 'ipcontroller-engine.url'), 'w') as f: |
|
387 | with open(os.path.join(sec_dir, 'ipcontroller-engine.url'), 'w') as f: | |
388 | f.write("%s://%s:%s"%(cf.engine_transport, cf.engine_ip, cf.regport)) |
|
388 | f.write("%s://%s:%s"%(cf.engine_transport, cf.engine_ip, cf.regport)) | |
389 |
|
389 | |||
390 | with open(os.path.join(sec_dir, 'ipcontroller-client.url'), 'w') as f: |
|
390 | with open(os.path.join(sec_dir, 'ipcontroller-client.url'), 'w') as f: | |
391 | f.write("%s://%s:%s"%(cf.client_transport, cf.client_ip, cf.regport)) |
|
391 | f.write("%s://%s:%s"%(cf.client_transport, cf.client_ip, cf.regport)) | |
392 |
|
392 | |||
393 |
|
393 | |||
394 | def import_statements(self): |
|
394 | def import_statements(self): | |
395 | statements = self.master_config.Global.import_statements |
|
395 | statements = self.master_config.Global.import_statements | |
396 | for s in statements: |
|
396 | for s in statements: | |
397 | try: |
|
397 | try: | |
398 | self.log.msg("Executing statement: '%s'" % s) |
|
398 | self.log.msg("Executing statement: '%s'" % s) | |
399 | exec s in globals(), locals() |
|
399 | exec s in globals(), locals() | |
400 | except: |
|
400 | except: | |
401 | self.log.msg("Error running statement: %s" % s) |
|
401 | self.log.msg("Error running statement: %s" % s) | |
402 |
|
402 | |||
403 | def start_logging(self): |
|
403 | def start_logging(self): | |
404 | super(IPControllerApp, self).start_logging() |
|
404 | super(IPControllerApp, self).start_logging() | |
405 | if self.master_config.Global.log_url: |
|
405 | if self.master_config.Global.log_url: | |
406 | context = self.factory.context |
|
406 | context = self.factory.context | |
407 | lsock = context.socket(zmq.PUB) |
|
407 | lsock = context.socket(zmq.PUB) | |
408 | lsock.connect(self.master_config.Global.log_url) |
|
408 | lsock.connect(self.master_config.Global.log_url) | |
409 | handler = PUBHandler(lsock) |
|
409 | handler = PUBHandler(lsock) | |
410 | handler.root_topic = 'controller' |
|
410 | handler.root_topic = 'controller' | |
411 | handler.setLevel(self.log_level) |
|
411 | handler.setLevel(self.log_level) | |
412 | self.log.addHandler(handler) |
|
412 | self.log.addHandler(handler) | |
413 | # |
|
413 | # | |
414 | def start_app(self): |
|
414 | def start_app(self): | |
415 | # Start the subprocesses: |
|
415 | # Start the subprocesses: | |
416 | self.factory.start() |
|
416 | self.factory.start() | |
417 | self.write_pid_file(overwrite=True) |
|
417 | self.write_pid_file(overwrite=True) | |
418 | try: |
|
418 | try: | |
419 | self.factory.loop.start() |
|
419 | self.factory.loop.start() | |
420 | except KeyboardInterrupt: |
|
420 | except KeyboardInterrupt: | |
421 | self.log.critical("Interrupted, Exiting...\n") |
|
421 | self.log.critical("Interrupted, Exiting...\n") | |
422 |
|
422 | |||
423 |
|
423 | |||
424 | def launch_new_instance(): |
|
424 | def launch_new_instance(): | |
425 | """Create and run the IPython controller""" |
|
425 | """Create and run the IPython controller""" | |
426 | app = IPControllerApp() |
|
426 | app = IPControllerApp() | |
427 | app.start() |
|
427 | app.start() | |
428 |
|
428 | |||
429 |
|
429 | |||
430 | if __name__ == '__main__': |
|
430 | if __name__ == '__main__': | |
431 | launch_new_instance() |
|
431 | launch_new_instance() |
@@ -1,294 +1,294 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | """ |
|
3 | """ | |
4 | The IPython engine application |
|
4 | The IPython engine application | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 | # Copyright (C) 2008-2009 The IPython Development Team |
|
8 | # Copyright (C) 2008-2009 The IPython Development Team | |
9 | # |
|
9 | # | |
10 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | # Distributed under the terms of the BSD License. The full license is in | |
11 | # the file COPYING, distributed as part of this software. |
|
11 | # the file COPYING, distributed as part of this software. | |
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | # Imports |
|
15 | # Imports | |
16 | #----------------------------------------------------------------------------- |
|
16 | #----------------------------------------------------------------------------- | |
17 |
|
17 | |||
18 | import json |
|
18 | import json | |
19 | import os |
|
19 | import os | |
20 | import sys |
|
20 | import sys | |
21 |
|
21 | |||
22 | import zmq |
|
22 | import zmq | |
23 | from zmq.eventloop import ioloop |
|
23 | from zmq.eventloop import ioloop | |
24 |
|
24 | |||
25 | from IPython.parallel.clusterdir import ( |
|
25 | from IPython.parallel.clusterdir import ( | |
26 | ApplicationWithClusterDir, |
|
26 | ApplicationWithClusterDir, | |
27 | ClusterDirConfigLoader |
|
27 | ClusterDirConfigLoader | |
28 | ) |
|
28 | ) | |
29 | from IPython.zmq.log import EnginePUBHandler |
|
29 | from IPython.zmq.log import EnginePUBHandler | |
30 |
|
30 | |||
31 | from IPython.parallel import factory |
|
31 | from IPython.parallel import factory | |
32 | from IPython.parallel.engine import EngineFactory |
|
32 | from IPython.parallel.engine import EngineFactory | |
33 | from IPython.parallel.streamkernel import Kernel |
|
33 | from IPython.parallel.streamkernel import Kernel | |
34 | from IPython.parallel.util import disambiguate_url |
|
34 | from IPython.parallel.util import disambiguate_url | |
35 | from IPython.utils.importstring import import_item |
|
35 | from IPython.utils.importstring import import_item | |
36 |
|
36 | |||
37 |
|
37 | |||
38 | #----------------------------------------------------------------------------- |
|
38 | #----------------------------------------------------------------------------- | |
39 | # Module level variables |
|
39 | # Module level variables | |
40 | #----------------------------------------------------------------------------- |
|
40 | #----------------------------------------------------------------------------- | |
41 |
|
41 | |||
42 | #: The default config file name for this application |
|
42 | #: The default config file name for this application | |
43 |
default_config_file_name = u'ipengine |
|
43 | default_config_file_name = u'ipengine_config.py' | |
44 |
|
44 | |||
45 |
|
45 | |||
46 | mpi4py_init = """from mpi4py import MPI as mpi |
|
46 | mpi4py_init = """from mpi4py import MPI as mpi | |
47 | mpi.size = mpi.COMM_WORLD.Get_size() |
|
47 | mpi.size = mpi.COMM_WORLD.Get_size() | |
48 | mpi.rank = mpi.COMM_WORLD.Get_rank() |
|
48 | mpi.rank = mpi.COMM_WORLD.Get_rank() | |
49 | """ |
|
49 | """ | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | pytrilinos_init = """from PyTrilinos import Epetra |
|
52 | pytrilinos_init = """from PyTrilinos import Epetra | |
53 | class SimpleStruct: |
|
53 | class SimpleStruct: | |
54 | pass |
|
54 | pass | |
55 | mpi = SimpleStruct() |
|
55 | mpi = SimpleStruct() | |
56 | mpi.rank = 0 |
|
56 | mpi.rank = 0 | |
57 | mpi.size = 0 |
|
57 | mpi.size = 0 | |
58 | """ |
|
58 | """ | |
59 |
|
59 | |||
60 |
|
60 | |||
61 | _description = """Start an IPython engine for parallel computing.\n\n |
|
61 | _description = """Start an IPython engine for parallel computing.\n\n | |
62 |
|
62 | |||
63 | IPython engines run in parallel and perform computations on behalf of a client |
|
63 | IPython engines run in parallel and perform computations on behalf of a client | |
64 | and controller. A controller needs to be started before the engines. The |
|
64 | and controller. A controller needs to be started before the engines. The | |
65 | engine can be configured using command line options or using a cluster |
|
65 | engine can be configured using command line options or using a cluster | |
66 | directory. Cluster directories contain config, log and security files and are |
|
66 | directory. Cluster directories contain config, log and security files and are | |
67 |
usually located in your ipython directory and named as "cluster |
|
67 | usually located in your ipython directory and named as "cluster_<profile>". | |
68 | See the --profile and --cluster-dir options for details. |
|
68 | See the --profile and --cluster-dir options for details. | |
69 | """ |
|
69 | """ | |
70 |
|
70 | |||
71 | #----------------------------------------------------------------------------- |
|
71 | #----------------------------------------------------------------------------- | |
72 | # Command line options |
|
72 | # Command line options | |
73 | #----------------------------------------------------------------------------- |
|
73 | #----------------------------------------------------------------------------- | |
74 |
|
74 | |||
75 |
|
75 | |||
76 | class IPEngineAppConfigLoader(ClusterDirConfigLoader): |
|
76 | class IPEngineAppConfigLoader(ClusterDirConfigLoader): | |
77 |
|
77 | |||
78 | def _add_arguments(self): |
|
78 | def _add_arguments(self): | |
79 | super(IPEngineAppConfigLoader, self)._add_arguments() |
|
79 | super(IPEngineAppConfigLoader, self)._add_arguments() | |
80 | paa = self.parser.add_argument |
|
80 | paa = self.parser.add_argument | |
81 | # Controller config |
|
81 | # Controller config | |
82 | paa('--file', '-f', |
|
82 | paa('--file', '-f', | |
83 | type=unicode, dest='Global.url_file', |
|
83 | type=unicode, dest='Global.url_file', | |
84 | help='The full location of the file containing the connection information fo ' |
|
84 | help='The full location of the file containing the connection information fo ' | |
85 | 'controller. If this is not given, the file must be in the ' |
|
85 | 'controller. If this is not given, the file must be in the ' | |
86 | 'security directory of the cluster directory. This location is ' |
|
86 | 'security directory of the cluster directory. This location is ' | |
87 | 'resolved using the --profile and --app-dir options.', |
|
87 | 'resolved using the --profile and --app-dir options.', | |
88 | metavar='Global.url_file') |
|
88 | metavar='Global.url_file') | |
89 | # MPI |
|
89 | # MPI | |
90 | paa('--mpi', |
|
90 | paa('--mpi', | |
91 | type=str, dest='MPI.use', |
|
91 | type=str, dest='MPI.use', | |
92 | help='How to enable MPI (mpi4py, pytrilinos, or empty string to disable).', |
|
92 | help='How to enable MPI (mpi4py, pytrilinos, or empty string to disable).', | |
93 | metavar='MPI.use') |
|
93 | metavar='MPI.use') | |
94 | # Global config |
|
94 | # Global config | |
95 | paa('--log-to-file', |
|
95 | paa('--log-to-file', | |
96 | action='store_true', dest='Global.log_to_file', |
|
96 | action='store_true', dest='Global.log_to_file', | |
97 | help='Log to a file in the log directory (default is stdout)') |
|
97 | help='Log to a file in the log directory (default is stdout)') | |
98 | paa('--log-url', |
|
98 | paa('--log-url', | |
99 | dest='Global.log_url', |
|
99 | dest='Global.log_url', | |
100 | help="url of ZMQ logger, as started with iploggerz") |
|
100 | help="url of ZMQ logger, as started with iploggerz") | |
101 | # paa('--execkey', |
|
101 | # paa('--execkey', | |
102 | # type=str, dest='Global.exec_key', |
|
102 | # type=str, dest='Global.exec_key', | |
103 | # help='path to a file containing an execution key.', |
|
103 | # help='path to a file containing an execution key.', | |
104 | # metavar='keyfile') |
|
104 | # metavar='keyfile') | |
105 | # paa('--no-secure', |
|
105 | # paa('--no-secure', | |
106 | # action='store_false', dest='Global.secure', |
|
106 | # action='store_false', dest='Global.secure', | |
107 | # help='Turn off execution keys.') |
|
107 | # help='Turn off execution keys.') | |
108 | # paa('--secure', |
|
108 | # paa('--secure', | |
109 | # action='store_true', dest='Global.secure', |
|
109 | # action='store_true', dest='Global.secure', | |
110 | # help='Turn on execution keys (default).') |
|
110 | # help='Turn on execution keys (default).') | |
111 | # init command |
|
111 | # init command | |
112 | paa('-c', |
|
112 | paa('-c', | |
113 | type=str, dest='Global.extra_exec_lines', |
|
113 | type=str, dest='Global.extra_exec_lines', | |
114 | help='specify a command to be run at startup') |
|
114 | help='specify a command to be run at startup') | |
115 |
|
115 | |||
116 | factory.add_session_arguments(self.parser) |
|
116 | factory.add_session_arguments(self.parser) | |
117 | factory.add_registration_arguments(self.parser) |
|
117 | factory.add_registration_arguments(self.parser) | |
118 |
|
118 | |||
119 |
|
119 | |||
120 | #----------------------------------------------------------------------------- |
|
120 | #----------------------------------------------------------------------------- | |
121 | # Main application |
|
121 | # Main application | |
122 | #----------------------------------------------------------------------------- |
|
122 | #----------------------------------------------------------------------------- | |
123 |
|
123 | |||
124 |
|
124 | |||
125 | class IPEngineApp(ApplicationWithClusterDir): |
|
125 | class IPEngineApp(ApplicationWithClusterDir): | |
126 |
|
126 | |||
127 |
name = u'ipengine |
|
127 | name = u'ipengine' | |
128 | description = _description |
|
128 | description = _description | |
129 | command_line_loader = IPEngineAppConfigLoader |
|
129 | command_line_loader = IPEngineAppConfigLoader | |
130 | default_config_file_name = default_config_file_name |
|
130 | default_config_file_name = default_config_file_name | |
131 | auto_create_cluster_dir = True |
|
131 | auto_create_cluster_dir = True | |
132 |
|
132 | |||
133 | def create_default_config(self): |
|
133 | def create_default_config(self): | |
134 | super(IPEngineApp, self).create_default_config() |
|
134 | super(IPEngineApp, self).create_default_config() | |
135 |
|
135 | |||
136 | # The engine should not clean logs as we don't want to remove the |
|
136 | # The engine should not clean logs as we don't want to remove the | |
137 | # active log files of other running engines. |
|
137 | # active log files of other running engines. | |
138 | self.default_config.Global.clean_logs = False |
|
138 | self.default_config.Global.clean_logs = False | |
139 | self.default_config.Global.secure = True |
|
139 | self.default_config.Global.secure = True | |
140 |
|
140 | |||
141 | # Global config attributes |
|
141 | # Global config attributes | |
142 | self.default_config.Global.exec_lines = [] |
|
142 | self.default_config.Global.exec_lines = [] | |
143 | self.default_config.Global.extra_exec_lines = '' |
|
143 | self.default_config.Global.extra_exec_lines = '' | |
144 |
|
144 | |||
145 | # Configuration related to the controller |
|
145 | # Configuration related to the controller | |
146 | # This must match the filename (path not included) that the controller |
|
146 | # This must match the filename (path not included) that the controller | |
147 | # used for the FURL file. |
|
147 | # used for the FURL file. | |
148 | self.default_config.Global.url_file = u'' |
|
148 | self.default_config.Global.url_file = u'' | |
149 | self.default_config.Global.url_file_name = u'ipcontroller-engine.json' |
|
149 | self.default_config.Global.url_file_name = u'ipcontroller-engine.json' | |
150 | # If given, this is the actual location of the controller's FURL file. |
|
150 | # If given, this is the actual location of the controller's FURL file. | |
151 | # If not, this is computed using the profile, app_dir and furl_file_name |
|
151 | # If not, this is computed using the profile, app_dir and furl_file_name | |
152 | # self.default_config.Global.key_file_name = u'exec_key.key' |
|
152 | # self.default_config.Global.key_file_name = u'exec_key.key' | |
153 | # self.default_config.Global.key_file = u'' |
|
153 | # self.default_config.Global.key_file = u'' | |
154 |
|
154 | |||
155 | # MPI related config attributes |
|
155 | # MPI related config attributes | |
156 | self.default_config.MPI.use = '' |
|
156 | self.default_config.MPI.use = '' | |
157 | self.default_config.MPI.mpi4py = mpi4py_init |
|
157 | self.default_config.MPI.mpi4py = mpi4py_init | |
158 | self.default_config.MPI.pytrilinos = pytrilinos_init |
|
158 | self.default_config.MPI.pytrilinos = pytrilinos_init | |
159 |
|
159 | |||
160 | def post_load_command_line_config(self): |
|
160 | def post_load_command_line_config(self): | |
161 | pass |
|
161 | pass | |
162 |
|
162 | |||
163 | def pre_construct(self): |
|
163 | def pre_construct(self): | |
164 | super(IPEngineApp, self).pre_construct() |
|
164 | super(IPEngineApp, self).pre_construct() | |
165 | # self.find_cont_url_file() |
|
165 | # self.find_cont_url_file() | |
166 | self.find_url_file() |
|
166 | self.find_url_file() | |
167 | if self.master_config.Global.extra_exec_lines: |
|
167 | if self.master_config.Global.extra_exec_lines: | |
168 | self.master_config.Global.exec_lines.append(self.master_config.Global.extra_exec_lines) |
|
168 | self.master_config.Global.exec_lines.append(self.master_config.Global.extra_exec_lines) | |
169 |
|
169 | |||
170 | # def find_key_file(self): |
|
170 | # def find_key_file(self): | |
171 | # """Set the key file. |
|
171 | # """Set the key file. | |
172 | # |
|
172 | # | |
173 | # Here we don't try to actually see if it exists for is valid as that |
|
173 | # Here we don't try to actually see if it exists for is valid as that | |
174 | # is hadled by the connection logic. |
|
174 | # is hadled by the connection logic. | |
175 | # """ |
|
175 | # """ | |
176 | # config = self.master_config |
|
176 | # config = self.master_config | |
177 | # # Find the actual controller key file |
|
177 | # # Find the actual controller key file | |
178 | # if not config.Global.key_file: |
|
178 | # if not config.Global.key_file: | |
179 | # try_this = os.path.join( |
|
179 | # try_this = os.path.join( | |
180 | # config.Global.cluster_dir, |
|
180 | # config.Global.cluster_dir, | |
181 | # config.Global.security_dir, |
|
181 | # config.Global.security_dir, | |
182 | # config.Global.key_file_name |
|
182 | # config.Global.key_file_name | |
183 | # ) |
|
183 | # ) | |
184 | # config.Global.key_file = try_this |
|
184 | # config.Global.key_file = try_this | |
185 |
|
185 | |||
186 | def find_url_file(self): |
|
186 | def find_url_file(self): | |
187 | """Set the key file. |
|
187 | """Set the key file. | |
188 |
|
188 | |||
189 | Here we don't try to actually see if it exists for is valid as that |
|
189 | Here we don't try to actually see if it exists for is valid as that | |
190 | is hadled by the connection logic. |
|
190 | is hadled by the connection logic. | |
191 | """ |
|
191 | """ | |
192 | config = self.master_config |
|
192 | config = self.master_config | |
193 | # Find the actual controller key file |
|
193 | # Find the actual controller key file | |
194 | if not config.Global.url_file: |
|
194 | if not config.Global.url_file: | |
195 | try_this = os.path.join( |
|
195 | try_this = os.path.join( | |
196 | config.Global.cluster_dir, |
|
196 | config.Global.cluster_dir, | |
197 | config.Global.security_dir, |
|
197 | config.Global.security_dir, | |
198 | config.Global.url_file_name |
|
198 | config.Global.url_file_name | |
199 | ) |
|
199 | ) | |
200 | config.Global.url_file = try_this |
|
200 | config.Global.url_file = try_this | |
201 |
|
201 | |||
202 | def construct(self): |
|
202 | def construct(self): | |
203 | # This is the working dir by now. |
|
203 | # This is the working dir by now. | |
204 | sys.path.insert(0, '') |
|
204 | sys.path.insert(0, '') | |
205 | config = self.master_config |
|
205 | config = self.master_config | |
206 | # if os.path.exists(config.Global.key_file) and config.Global.secure: |
|
206 | # if os.path.exists(config.Global.key_file) and config.Global.secure: | |
207 | # config.SessionFactory.exec_key = config.Global.key_file |
|
207 | # config.SessionFactory.exec_key = config.Global.key_file | |
208 | if os.path.exists(config.Global.url_file): |
|
208 | if os.path.exists(config.Global.url_file): | |
209 | with open(config.Global.url_file) as f: |
|
209 | with open(config.Global.url_file) as f: | |
210 | d = json.loads(f.read()) |
|
210 | d = json.loads(f.read()) | |
211 | for k,v in d.iteritems(): |
|
211 | for k,v in d.iteritems(): | |
212 | if isinstance(v, unicode): |
|
212 | if isinstance(v, unicode): | |
213 | d[k] = v.encode() |
|
213 | d[k] = v.encode() | |
214 | if d['exec_key']: |
|
214 | if d['exec_key']: | |
215 | config.SessionFactory.exec_key = d['exec_key'] |
|
215 | config.SessionFactory.exec_key = d['exec_key'] | |
216 | d['url'] = disambiguate_url(d['url'], d['location']) |
|
216 | d['url'] = disambiguate_url(d['url'], d['location']) | |
217 | config.RegistrationFactory.url=d['url'] |
|
217 | config.RegistrationFactory.url=d['url'] | |
218 | config.EngineFactory.location = d['location'] |
|
218 | config.EngineFactory.location = d['location'] | |
219 |
|
219 | |||
220 |
|
220 | |||
221 |
|
221 | |||
222 | config.Kernel.exec_lines = config.Global.exec_lines |
|
222 | config.Kernel.exec_lines = config.Global.exec_lines | |
223 |
|
223 | |||
224 | self.start_mpi() |
|
224 | self.start_mpi() | |
225 |
|
225 | |||
226 | # Create the underlying shell class and EngineService |
|
226 | # Create the underlying shell class and EngineService | |
227 | # shell_class = import_item(self.master_config.Global.shell_class) |
|
227 | # shell_class = import_item(self.master_config.Global.shell_class) | |
228 | try: |
|
228 | try: | |
229 | self.engine = EngineFactory(config=config, logname=self.log.name) |
|
229 | self.engine = EngineFactory(config=config, logname=self.log.name) | |
230 | except: |
|
230 | except: | |
231 | self.log.error("Couldn't start the Engine", exc_info=True) |
|
231 | self.log.error("Couldn't start the Engine", exc_info=True) | |
232 | self.exit(1) |
|
232 | self.exit(1) | |
233 |
|
233 | |||
234 | self.start_logging() |
|
234 | self.start_logging() | |
235 |
|
235 | |||
236 | # Create the service hierarchy |
|
236 | # Create the service hierarchy | |
237 | # self.main_service = service.MultiService() |
|
237 | # self.main_service = service.MultiService() | |
238 | # self.engine_service.setServiceParent(self.main_service) |
|
238 | # self.engine_service.setServiceParent(self.main_service) | |
239 | # self.tub_service = Tub() |
|
239 | # self.tub_service = Tub() | |
240 | # self.tub_service.setServiceParent(self.main_service) |
|
240 | # self.tub_service.setServiceParent(self.main_service) | |
241 | # # This needs to be called before the connection is initiated |
|
241 | # # This needs to be called before the connection is initiated | |
242 | # self.main_service.startService() |
|
242 | # self.main_service.startService() | |
243 |
|
243 | |||
244 | # This initiates the connection to the controller and calls |
|
244 | # This initiates the connection to the controller and calls | |
245 | # register_engine to tell the controller we are ready to do work |
|
245 | # register_engine to tell the controller we are ready to do work | |
246 | # self.engine_connector = EngineConnector(self.tub_service) |
|
246 | # self.engine_connector = EngineConnector(self.tub_service) | |
247 |
|
247 | |||
248 | # self.log.info("Using furl file: %s" % self.master_config.Global.furl_file) |
|
248 | # self.log.info("Using furl file: %s" % self.master_config.Global.furl_file) | |
249 |
|
249 | |||
250 | # reactor.callWhenRunning(self.call_connect) |
|
250 | # reactor.callWhenRunning(self.call_connect) | |
251 |
|
251 | |||
252 |
|
252 | |||
253 | def start_logging(self): |
|
253 | def start_logging(self): | |
254 | super(IPEngineApp, self).start_logging() |
|
254 | super(IPEngineApp, self).start_logging() | |
255 | if self.master_config.Global.log_url: |
|
255 | if self.master_config.Global.log_url: | |
256 | context = self.engine.context |
|
256 | context = self.engine.context | |
257 | lsock = context.socket(zmq.PUB) |
|
257 | lsock = context.socket(zmq.PUB) | |
258 | lsock.connect(self.master_config.Global.log_url) |
|
258 | lsock.connect(self.master_config.Global.log_url) | |
259 | handler = EnginePUBHandler(self.engine, lsock) |
|
259 | handler = EnginePUBHandler(self.engine, lsock) | |
260 | handler.setLevel(self.log_level) |
|
260 | handler.setLevel(self.log_level) | |
261 | self.log.addHandler(handler) |
|
261 | self.log.addHandler(handler) | |
262 |
|
262 | |||
263 | def start_mpi(self): |
|
263 | def start_mpi(self): | |
264 | global mpi |
|
264 | global mpi | |
265 | mpikey = self.master_config.MPI.use |
|
265 | mpikey = self.master_config.MPI.use | |
266 | mpi_import_statement = self.master_config.MPI.get(mpikey, None) |
|
266 | mpi_import_statement = self.master_config.MPI.get(mpikey, None) | |
267 | if mpi_import_statement is not None: |
|
267 | if mpi_import_statement is not None: | |
268 | try: |
|
268 | try: | |
269 | self.log.info("Initializing MPI:") |
|
269 | self.log.info("Initializing MPI:") | |
270 | self.log.info(mpi_import_statement) |
|
270 | self.log.info(mpi_import_statement) | |
271 | exec mpi_import_statement in globals() |
|
271 | exec mpi_import_statement in globals() | |
272 | except: |
|
272 | except: | |
273 | mpi = None |
|
273 | mpi = None | |
274 | else: |
|
274 | else: | |
275 | mpi = None |
|
275 | mpi = None | |
276 |
|
276 | |||
277 |
|
277 | |||
278 | def start_app(self): |
|
278 | def start_app(self): | |
279 | self.engine.start() |
|
279 | self.engine.start() | |
280 | try: |
|
280 | try: | |
281 | self.engine.loop.start() |
|
281 | self.engine.loop.start() | |
282 | except KeyboardInterrupt: |
|
282 | except KeyboardInterrupt: | |
283 | self.log.critical("Engine Interrupted, shutting down...\n") |
|
283 | self.log.critical("Engine Interrupted, shutting down...\n") | |
284 |
|
284 | |||
285 |
|
285 | |||
286 | def launch_new_instance(): |
|
286 | def launch_new_instance(): | |
287 | """Create and run the IPython controller""" |
|
287 | """Create and run the IPython controller""" | |
288 | app = IPEngineApp() |
|
288 | app = IPEngineApp() | |
289 | app.start() |
|
289 | app.start() | |
290 |
|
290 | |||
291 |
|
291 | |||
292 | if __name__ == '__main__': |
|
292 | if __name__ == '__main__': | |
293 | launch_new_instance() |
|
293 | launch_new_instance() | |
294 |
|
294 |
@@ -1,132 +1,132 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | """ |
|
3 | """ | |
4 | A simple IPython logger application |
|
4 | A simple IPython logger application | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 | # Copyright (C) 2011 The IPython Development Team |
|
8 | # Copyright (C) 2011 The IPython Development Team | |
9 | # |
|
9 | # | |
10 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | # Distributed under the terms of the BSD License. The full license is in | |
11 | # the file COPYING, distributed as part of this software. |
|
11 | # the file COPYING, distributed as part of this software. | |
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | # Imports |
|
15 | # Imports | |
16 | #----------------------------------------------------------------------------- |
|
16 | #----------------------------------------------------------------------------- | |
17 |
|
17 | |||
18 | import os |
|
18 | import os | |
19 | import sys |
|
19 | import sys | |
20 |
|
20 | |||
21 | import zmq |
|
21 | import zmq | |
22 |
|
22 | |||
23 | from IPython.parallel.clusterdir import ( |
|
23 | from IPython.parallel.clusterdir import ( | |
24 | ApplicationWithClusterDir, |
|
24 | ApplicationWithClusterDir, | |
25 | ClusterDirConfigLoader |
|
25 | ClusterDirConfigLoader | |
26 | ) |
|
26 | ) | |
27 | from .logwatcher import LogWatcher |
|
27 | from .logwatcher import LogWatcher | |
28 |
|
28 | |||
29 | #----------------------------------------------------------------------------- |
|
29 | #----------------------------------------------------------------------------- | |
30 | # Module level variables |
|
30 | # Module level variables | |
31 | #----------------------------------------------------------------------------- |
|
31 | #----------------------------------------------------------------------------- | |
32 |
|
32 | |||
33 | #: The default config file name for this application |
|
33 | #: The default config file name for this application | |
34 | default_config_file_name = u'iplogger_config.py' |
|
34 | default_config_file_name = u'iplogger_config.py' | |
35 |
|
35 | |||
36 | _description = """Start an IPython logger for parallel computing.\n\n |
|
36 | _description = """Start an IPython logger for parallel computing.\n\n | |
37 |
|
37 | |||
38 | IPython controllers and engines (and your own processes) can broadcast log messages |
|
38 | IPython controllers and engines (and your own processes) can broadcast log messages | |
39 | by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The |
|
39 | by registering a `zmq.log.handlers.PUBHandler` with the `logging` module. The | |
40 | logger can be configured using command line options or using a cluster |
|
40 | logger can be configured using command line options or using a cluster | |
41 | directory. Cluster directories contain config, log and security files and are |
|
41 | directory. Cluster directories contain config, log and security files and are | |
42 |
usually located in your ipython directory and named as "cluster |
|
42 | usually located in your ipython directory and named as "cluster_<profile>". | |
43 | See the --profile and --cluster-dir options for details. |
|
43 | See the --profile and --cluster-dir options for details. | |
44 | """ |
|
44 | """ | |
45 |
|
45 | |||
46 | #----------------------------------------------------------------------------- |
|
46 | #----------------------------------------------------------------------------- | |
47 | # Command line options |
|
47 | # Command line options | |
48 | #----------------------------------------------------------------------------- |
|
48 | #----------------------------------------------------------------------------- | |
49 |
|
49 | |||
50 |
|
50 | |||
51 | class IPLoggerAppConfigLoader(ClusterDirConfigLoader): |
|
51 | class IPLoggerAppConfigLoader(ClusterDirConfigLoader): | |
52 |
|
52 | |||
53 | def _add_arguments(self): |
|
53 | def _add_arguments(self): | |
54 | super(IPLoggerAppConfigLoader, self)._add_arguments() |
|
54 | super(IPLoggerAppConfigLoader, self)._add_arguments() | |
55 | paa = self.parser.add_argument |
|
55 | paa = self.parser.add_argument | |
56 | # Controller config |
|
56 | # Controller config | |
57 | paa('--url', |
|
57 | paa('--url', | |
58 | type=str, dest='LogWatcher.url', |
|
58 | type=str, dest='LogWatcher.url', | |
59 | help='The url the LogWatcher will listen on', |
|
59 | help='The url the LogWatcher will listen on', | |
60 | ) |
|
60 | ) | |
61 | # MPI |
|
61 | # MPI | |
62 | paa('--topics', |
|
62 | paa('--topics', | |
63 | type=str, dest='LogWatcher.topics', nargs='+', |
|
63 | type=str, dest='LogWatcher.topics', nargs='+', | |
64 | help='What topics to subscribe to', |
|
64 | help='What topics to subscribe to', | |
65 | metavar='topics') |
|
65 | metavar='topics') | |
66 | # Global config |
|
66 | # Global config | |
67 | paa('--log-to-file', |
|
67 | paa('--log-to-file', | |
68 | action='store_true', dest='Global.log_to_file', |
|
68 | action='store_true', dest='Global.log_to_file', | |
69 | help='Log to a file in the log directory (default is stdout)') |
|
69 | help='Log to a file in the log directory (default is stdout)') | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | #----------------------------------------------------------------------------- |
|
72 | #----------------------------------------------------------------------------- | |
73 | # Main application |
|
73 | # Main application | |
74 | #----------------------------------------------------------------------------- |
|
74 | #----------------------------------------------------------------------------- | |
75 |
|
75 | |||
76 |
|
76 | |||
77 | class IPLoggerApp(ApplicationWithClusterDir): |
|
77 | class IPLoggerApp(ApplicationWithClusterDir): | |
78 |
|
78 | |||
79 | name = u'iploggerz' |
|
79 | name = u'iploggerz' | |
80 | description = _description |
|
80 | description = _description | |
81 | command_line_loader = IPLoggerAppConfigLoader |
|
81 | command_line_loader = IPLoggerAppConfigLoader | |
82 | default_config_file_name = default_config_file_name |
|
82 | default_config_file_name = default_config_file_name | |
83 | auto_create_cluster_dir = True |
|
83 | auto_create_cluster_dir = True | |
84 |
|
84 | |||
85 | def create_default_config(self): |
|
85 | def create_default_config(self): | |
86 | super(IPLoggerApp, self).create_default_config() |
|
86 | super(IPLoggerApp, self).create_default_config() | |
87 |
|
87 | |||
88 | # The engine should not clean logs as we don't want to remove the |
|
88 | # The engine should not clean logs as we don't want to remove the | |
89 | # active log files of other running engines. |
|
89 | # active log files of other running engines. | |
90 | self.default_config.Global.clean_logs = False |
|
90 | self.default_config.Global.clean_logs = False | |
91 |
|
91 | |||
92 | # If given, this is the actual location of the logger's URL file. |
|
92 | # If given, this is the actual location of the logger's URL file. | |
93 | # If not, this is computed using the profile, app_dir and furl_file_name |
|
93 | # If not, this is computed using the profile, app_dir and furl_file_name | |
94 | self.default_config.Global.url_file_name = u'iplogger.url' |
|
94 | self.default_config.Global.url_file_name = u'iplogger.url' | |
95 | self.default_config.Global.url_file = u'' |
|
95 | self.default_config.Global.url_file = u'' | |
96 |
|
96 | |||
97 | def post_load_command_line_config(self): |
|
97 | def post_load_command_line_config(self): | |
98 | pass |
|
98 | pass | |
99 |
|
99 | |||
100 | def pre_construct(self): |
|
100 | def pre_construct(self): | |
101 | super(IPLoggerApp, self).pre_construct() |
|
101 | super(IPLoggerApp, self).pre_construct() | |
102 |
|
102 | |||
103 | def construct(self): |
|
103 | def construct(self): | |
104 | # This is the working dir by now. |
|
104 | # This is the working dir by now. | |
105 | sys.path.insert(0, '') |
|
105 | sys.path.insert(0, '') | |
106 |
|
106 | |||
107 | self.start_logging() |
|
107 | self.start_logging() | |
108 |
|
108 | |||
109 | try: |
|
109 | try: | |
110 | self.watcher = LogWatcher(config=self.master_config, logname=self.log.name) |
|
110 | self.watcher = LogWatcher(config=self.master_config, logname=self.log.name) | |
111 | except: |
|
111 | except: | |
112 | self.log.error("Couldn't start the LogWatcher", exc_info=True) |
|
112 | self.log.error("Couldn't start the LogWatcher", exc_info=True) | |
113 | self.exit(1) |
|
113 | self.exit(1) | |
114 |
|
114 | |||
115 |
|
115 | |||
116 | def start_app(self): |
|
116 | def start_app(self): | |
117 | try: |
|
117 | try: | |
118 | self.watcher.start() |
|
118 | self.watcher.start() | |
119 | self.watcher.loop.start() |
|
119 | self.watcher.loop.start() | |
120 | except KeyboardInterrupt: |
|
120 | except KeyboardInterrupt: | |
121 | self.log.critical("Logging Interrupted, shutting down...\n") |
|
121 | self.log.critical("Logging Interrupted, shutting down...\n") | |
122 |
|
122 | |||
123 |
|
123 | |||
124 | def launch_new_instance(): |
|
124 | def launch_new_instance(): | |
125 | """Create and run the IPython LogWatcher""" |
|
125 | """Create and run the IPython LogWatcher""" | |
126 | app = IPLoggerApp() |
|
126 | app = IPLoggerApp() | |
127 | app.start() |
|
127 | app.start() | |
128 |
|
128 | |||
129 |
|
129 | |||
130 | if __name__ == '__main__': |
|
130 | if __name__ == '__main__': | |
131 | launch_new_instance() |
|
131 | launch_new_instance() | |
132 |
|
132 |
@@ -1,971 +1,971 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # encoding: utf-8 |
|
2 | # encoding: utf-8 | |
3 | """ |
|
3 | """ | |
4 | Facilities for launching IPython processes asynchronously. |
|
4 | Facilities for launching IPython processes asynchronously. | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | #----------------------------------------------------------------------------- |
|
7 | #----------------------------------------------------------------------------- | |
8 | # Copyright (C) 2008-2009 The IPython Development Team |
|
8 | # Copyright (C) 2008-2009 The IPython Development Team | |
9 | # |
|
9 | # | |
10 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | # Distributed under the terms of the BSD License. The full license is in | |
11 | # the file COPYING, distributed as part of this software. |
|
11 | # the file COPYING, distributed as part of this software. | |
12 | #----------------------------------------------------------------------------- |
|
12 | #----------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | #----------------------------------------------------------------------------- |
|
14 | #----------------------------------------------------------------------------- | |
15 | # Imports |
|
15 | # Imports | |
16 | #----------------------------------------------------------------------------- |
|
16 | #----------------------------------------------------------------------------- | |
17 |
|
17 | |||
18 | import copy |
|
18 | import copy | |
19 | import logging |
|
19 | import logging | |
20 | import os |
|
20 | import os | |
21 | import re |
|
21 | import re | |
22 | import stat |
|
22 | import stat | |
23 |
|
23 | |||
24 | from signal import SIGINT, SIGTERM |
|
24 | from signal import SIGINT, SIGTERM | |
25 | try: |
|
25 | try: | |
26 | from signal import SIGKILL |
|
26 | from signal import SIGKILL | |
27 | except ImportError: |
|
27 | except ImportError: | |
28 | SIGKILL=SIGTERM |
|
28 | SIGKILL=SIGTERM | |
29 |
|
29 | |||
30 | from subprocess import Popen, PIPE, STDOUT |
|
30 | from subprocess import Popen, PIPE, STDOUT | |
31 | try: |
|
31 | try: | |
32 | from subprocess import check_output |
|
32 | from subprocess import check_output | |
33 | except ImportError: |
|
33 | except ImportError: | |
34 | # pre-2.7, define check_output with Popen |
|
34 | # pre-2.7, define check_output with Popen | |
35 | def check_output(*args, **kwargs): |
|
35 | def check_output(*args, **kwargs): | |
36 | kwargs.update(dict(stdout=PIPE)) |
|
36 | kwargs.update(dict(stdout=PIPE)) | |
37 | p = Popen(*args, **kwargs) |
|
37 | p = Popen(*args, **kwargs) | |
38 | out,err = p.communicate() |
|
38 | out,err = p.communicate() | |
39 | return out |
|
39 | return out | |
40 |
|
40 | |||
41 | from zmq.eventloop import ioloop |
|
41 | from zmq.eventloop import ioloop | |
42 |
|
42 | |||
43 | from IPython.external import Itpl |
|
43 | from IPython.external import Itpl | |
44 | # from IPython.config.configurable import Configurable |
|
44 | # from IPython.config.configurable import Configurable | |
45 | from IPython.utils.traitlets import Any, Str, Int, List, Unicode, Dict, Instance, CUnicode |
|
45 | from IPython.utils.traitlets import Any, Str, Int, List, Unicode, Dict, Instance, CUnicode | |
46 | from IPython.utils.path import get_ipython_module_path |
|
46 | from IPython.utils.path import get_ipython_module_path | |
47 | from IPython.utils.process import find_cmd, pycmd2argv, FindCmdError |
|
47 | from IPython.utils.process import find_cmd, pycmd2argv, FindCmdError | |
48 |
|
48 | |||
49 | from .factory import LoggingFactory |
|
49 | from .factory import LoggingFactory | |
50 |
|
50 | |||
51 | # load winhpcjob only on Windows |
|
51 | # load winhpcjob only on Windows | |
52 | try: |
|
52 | try: | |
53 | from .winhpcjob import ( |
|
53 | from .winhpcjob import ( | |
54 | IPControllerTask, IPEngineTask, |
|
54 | IPControllerTask, IPEngineTask, | |
55 | IPControllerJob, IPEngineSetJob |
|
55 | IPControllerJob, IPEngineSetJob | |
56 | ) |
|
56 | ) | |
57 | except ImportError: |
|
57 | except ImportError: | |
58 | pass |
|
58 | pass | |
59 |
|
59 | |||
60 |
|
60 | |||
61 | #----------------------------------------------------------------------------- |
|
61 | #----------------------------------------------------------------------------- | |
62 | # Paths to the kernel apps |
|
62 | # Paths to the kernel apps | |
63 | #----------------------------------------------------------------------------- |
|
63 | #----------------------------------------------------------------------------- | |
64 |
|
64 | |||
65 |
|
65 | |||
66 |
ipcluster |
|
66 | ipcluster_cmd_argv = pycmd2argv(get_ipython_module_path( | |
67 | 'IPython.parallel.ipclusterapp' |
|
67 | 'IPython.parallel.ipclusterapp' | |
68 | )) |
|
68 | )) | |
69 |
|
69 | |||
70 |
ipengine |
|
70 | ipengine_cmd_argv = pycmd2argv(get_ipython_module_path( | |
71 | 'IPython.parallel.ipengineapp' |
|
71 | 'IPython.parallel.ipengineapp' | |
72 | )) |
|
72 | )) | |
73 |
|
73 | |||
74 |
ipcontroller |
|
74 | ipcontroller_cmd_argv = pycmd2argv(get_ipython_module_path( | |
75 | 'IPython.parallel.ipcontrollerapp' |
|
75 | 'IPython.parallel.ipcontrollerapp' | |
76 | )) |
|
76 | )) | |
77 |
|
77 | |||
78 | #----------------------------------------------------------------------------- |
|
78 | #----------------------------------------------------------------------------- | |
79 | # Base launchers and errors |
|
79 | # Base launchers and errors | |
80 | #----------------------------------------------------------------------------- |
|
80 | #----------------------------------------------------------------------------- | |
81 |
|
81 | |||
82 |
|
82 | |||
83 | class LauncherError(Exception): |
|
83 | class LauncherError(Exception): | |
84 | pass |
|
84 | pass | |
85 |
|
85 | |||
86 |
|
86 | |||
87 | class ProcessStateError(LauncherError): |
|
87 | class ProcessStateError(LauncherError): | |
88 | pass |
|
88 | pass | |
89 |
|
89 | |||
90 |
|
90 | |||
91 | class UnknownStatus(LauncherError): |
|
91 | class UnknownStatus(LauncherError): | |
92 | pass |
|
92 | pass | |
93 |
|
93 | |||
94 |
|
94 | |||
95 | class BaseLauncher(LoggingFactory): |
|
95 | class BaseLauncher(LoggingFactory): | |
96 | """An asbtraction for starting, stopping and signaling a process.""" |
|
96 | """An asbtraction for starting, stopping and signaling a process.""" | |
97 |
|
97 | |||
98 | # In all of the launchers, the work_dir is where child processes will be |
|
98 | # In all of the launchers, the work_dir is where child processes will be | |
99 | # run. This will usually be the cluster_dir, but may not be. any work_dir |
|
99 | # run. This will usually be the cluster_dir, but may not be. any work_dir | |
100 | # passed into the __init__ method will override the config value. |
|
100 | # passed into the __init__ method will override the config value. | |
101 | # This should not be used to set the work_dir for the actual engine |
|
101 | # This should not be used to set the work_dir for the actual engine | |
102 | # and controller. Instead, use their own config files or the |
|
102 | # and controller. Instead, use their own config files or the | |
103 | # controller_args, engine_args attributes of the launchers to add |
|
103 | # controller_args, engine_args attributes of the launchers to add | |
104 | # the --work-dir option. |
|
104 | # the --work-dir option. | |
105 | work_dir = Unicode(u'.') |
|
105 | work_dir = Unicode(u'.') | |
106 | loop = Instance('zmq.eventloop.ioloop.IOLoop') |
|
106 | loop = Instance('zmq.eventloop.ioloop.IOLoop') | |
107 |
|
107 | |||
108 | start_data = Any() |
|
108 | start_data = Any() | |
109 | stop_data = Any() |
|
109 | stop_data = Any() | |
110 |
|
110 | |||
111 | def _loop_default(self): |
|
111 | def _loop_default(self): | |
112 | return ioloop.IOLoop.instance() |
|
112 | return ioloop.IOLoop.instance() | |
113 |
|
113 | |||
114 | def __init__(self, work_dir=u'.', config=None, **kwargs): |
|
114 | def __init__(self, work_dir=u'.', config=None, **kwargs): | |
115 | super(BaseLauncher, self).__init__(work_dir=work_dir, config=config, **kwargs) |
|
115 | super(BaseLauncher, self).__init__(work_dir=work_dir, config=config, **kwargs) | |
116 | self.state = 'before' # can be before, running, after |
|
116 | self.state = 'before' # can be before, running, after | |
117 | self.stop_callbacks = [] |
|
117 | self.stop_callbacks = [] | |
118 | self.start_data = None |
|
118 | self.start_data = None | |
119 | self.stop_data = None |
|
119 | self.stop_data = None | |
120 |
|
120 | |||
121 | @property |
|
121 | @property | |
122 | def args(self): |
|
122 | def args(self): | |
123 | """A list of cmd and args that will be used to start the process. |
|
123 | """A list of cmd and args that will be used to start the process. | |
124 |
|
124 | |||
125 | This is what is passed to :func:`spawnProcess` and the first element |
|
125 | This is what is passed to :func:`spawnProcess` and the first element | |
126 | will be the process name. |
|
126 | will be the process name. | |
127 | """ |
|
127 | """ | |
128 | return self.find_args() |
|
128 | return self.find_args() | |
129 |
|
129 | |||
130 | def find_args(self): |
|
130 | def find_args(self): | |
131 | """The ``.args`` property calls this to find the args list. |
|
131 | """The ``.args`` property calls this to find the args list. | |
132 |
|
132 | |||
133 | Subcommand should implement this to construct the cmd and args. |
|
133 | Subcommand should implement this to construct the cmd and args. | |
134 | """ |
|
134 | """ | |
135 | raise NotImplementedError('find_args must be implemented in a subclass') |
|
135 | raise NotImplementedError('find_args must be implemented in a subclass') | |
136 |
|
136 | |||
137 | @property |
|
137 | @property | |
138 | def arg_str(self): |
|
138 | def arg_str(self): | |
139 | """The string form of the program arguments.""" |
|
139 | """The string form of the program arguments.""" | |
140 | return ' '.join(self.args) |
|
140 | return ' '.join(self.args) | |
141 |
|
141 | |||
142 | @property |
|
142 | @property | |
143 | def running(self): |
|
143 | def running(self): | |
144 | """Am I running.""" |
|
144 | """Am I running.""" | |
145 | if self.state == 'running': |
|
145 | if self.state == 'running': | |
146 | return True |
|
146 | return True | |
147 | else: |
|
147 | else: | |
148 | return False |
|
148 | return False | |
149 |
|
149 | |||
150 | def start(self): |
|
150 | def start(self): | |
151 | """Start the process. |
|
151 | """Start the process. | |
152 |
|
152 | |||
153 | This must return a deferred that fires with information about the |
|
153 | This must return a deferred that fires with information about the | |
154 | process starting (like a pid, job id, etc.). |
|
154 | process starting (like a pid, job id, etc.). | |
155 | """ |
|
155 | """ | |
156 | raise NotImplementedError('start must be implemented in a subclass') |
|
156 | raise NotImplementedError('start must be implemented in a subclass') | |
157 |
|
157 | |||
158 | def stop(self): |
|
158 | def stop(self): | |
159 | """Stop the process and notify observers of stopping. |
|
159 | """Stop the process and notify observers of stopping. | |
160 |
|
160 | |||
161 | This must return a deferred that fires with information about the |
|
161 | This must return a deferred that fires with information about the | |
162 | processing stopping, like errors that occur while the process is |
|
162 | processing stopping, like errors that occur while the process is | |
163 | attempting to be shut down. This deferred won't fire when the process |
|
163 | attempting to be shut down. This deferred won't fire when the process | |
164 | actually stops. To observe the actual process stopping, see |
|
164 | actually stops. To observe the actual process stopping, see | |
165 | :func:`observe_stop`. |
|
165 | :func:`observe_stop`. | |
166 | """ |
|
166 | """ | |
167 | raise NotImplementedError('stop must be implemented in a subclass') |
|
167 | raise NotImplementedError('stop must be implemented in a subclass') | |
168 |
|
168 | |||
169 | def on_stop(self, f): |
|
169 | def on_stop(self, f): | |
170 | """Get a deferred that will fire when the process stops. |
|
170 | """Get a deferred that will fire when the process stops. | |
171 |
|
171 | |||
172 | The deferred will fire with data that contains information about |
|
172 | The deferred will fire with data that contains information about | |
173 | the exit status of the process. |
|
173 | the exit status of the process. | |
174 | """ |
|
174 | """ | |
175 | if self.state=='after': |
|
175 | if self.state=='after': | |
176 | return f(self.stop_data) |
|
176 | return f(self.stop_data) | |
177 | else: |
|
177 | else: | |
178 | self.stop_callbacks.append(f) |
|
178 | self.stop_callbacks.append(f) | |
179 |
|
179 | |||
180 | def notify_start(self, data): |
|
180 | def notify_start(self, data): | |
181 | """Call this to trigger startup actions. |
|
181 | """Call this to trigger startup actions. | |
182 |
|
182 | |||
183 | This logs the process startup and sets the state to 'running'. It is |
|
183 | This logs the process startup and sets the state to 'running'. It is | |
184 | a pass-through so it can be used as a callback. |
|
184 | a pass-through so it can be used as a callback. | |
185 | """ |
|
185 | """ | |
186 |
|
186 | |||
187 | self.log.info('Process %r started: %r' % (self.args[0], data)) |
|
187 | self.log.info('Process %r started: %r' % (self.args[0], data)) | |
188 | self.start_data = data |
|
188 | self.start_data = data | |
189 | self.state = 'running' |
|
189 | self.state = 'running' | |
190 | return data |
|
190 | return data | |
191 |
|
191 | |||
192 | def notify_stop(self, data): |
|
192 | def notify_stop(self, data): | |
193 | """Call this to trigger process stop actions. |
|
193 | """Call this to trigger process stop actions. | |
194 |
|
194 | |||
195 | This logs the process stopping and sets the state to 'after'. Call |
|
195 | This logs the process stopping and sets the state to 'after'. Call | |
196 | this to trigger all the deferreds from :func:`observe_stop`.""" |
|
196 | this to trigger all the deferreds from :func:`observe_stop`.""" | |
197 |
|
197 | |||
198 | self.log.info('Process %r stopped: %r' % (self.args[0], data)) |
|
198 | self.log.info('Process %r stopped: %r' % (self.args[0], data)) | |
199 | self.stop_data = data |
|
199 | self.stop_data = data | |
200 | self.state = 'after' |
|
200 | self.state = 'after' | |
201 | for i in range(len(self.stop_callbacks)): |
|
201 | for i in range(len(self.stop_callbacks)): | |
202 | d = self.stop_callbacks.pop() |
|
202 | d = self.stop_callbacks.pop() | |
203 | d(data) |
|
203 | d(data) | |
204 | return data |
|
204 | return data | |
205 |
|
205 | |||
206 | def signal(self, sig): |
|
206 | def signal(self, sig): | |
207 | """Signal the process. |
|
207 | """Signal the process. | |
208 |
|
208 | |||
209 | Return a semi-meaningless deferred after signaling the process. |
|
209 | Return a semi-meaningless deferred after signaling the process. | |
210 |
|
210 | |||
211 | Parameters |
|
211 | Parameters | |
212 | ---------- |
|
212 | ---------- | |
213 | sig : str or int |
|
213 | sig : str or int | |
214 | 'KILL', 'INT', etc., or any signal number |
|
214 | 'KILL', 'INT', etc., or any signal number | |
215 | """ |
|
215 | """ | |
216 | raise NotImplementedError('signal must be implemented in a subclass') |
|
216 | raise NotImplementedError('signal must be implemented in a subclass') | |
217 |
|
217 | |||
218 |
|
218 | |||
219 | #----------------------------------------------------------------------------- |
|
219 | #----------------------------------------------------------------------------- | |
220 | # Local process launchers |
|
220 | # Local process launchers | |
221 | #----------------------------------------------------------------------------- |
|
221 | #----------------------------------------------------------------------------- | |
222 |
|
222 | |||
223 |
|
223 | |||
224 | class LocalProcessLauncher(BaseLauncher): |
|
224 | class LocalProcessLauncher(BaseLauncher): | |
225 | """Start and stop an external process in an asynchronous manner. |
|
225 | """Start and stop an external process in an asynchronous manner. | |
226 |
|
226 | |||
227 | This will launch the external process with a working directory of |
|
227 | This will launch the external process with a working directory of | |
228 | ``self.work_dir``. |
|
228 | ``self.work_dir``. | |
229 | """ |
|
229 | """ | |
230 |
|
230 | |||
231 | # This is used to to construct self.args, which is passed to |
|
231 | # This is used to to construct self.args, which is passed to | |
232 | # spawnProcess. |
|
232 | # spawnProcess. | |
233 | cmd_and_args = List([]) |
|
233 | cmd_and_args = List([]) | |
234 | poll_frequency = Int(100) # in ms |
|
234 | poll_frequency = Int(100) # in ms | |
235 |
|
235 | |||
236 | def __init__(self, work_dir=u'.', config=None, **kwargs): |
|
236 | def __init__(self, work_dir=u'.', config=None, **kwargs): | |
237 | super(LocalProcessLauncher, self).__init__( |
|
237 | super(LocalProcessLauncher, self).__init__( | |
238 | work_dir=work_dir, config=config, **kwargs |
|
238 | work_dir=work_dir, config=config, **kwargs | |
239 | ) |
|
239 | ) | |
240 | self.process = None |
|
240 | self.process = None | |
241 | self.start_deferred = None |
|
241 | self.start_deferred = None | |
242 | self.poller = None |
|
242 | self.poller = None | |
243 |
|
243 | |||
244 | def find_args(self): |
|
244 | def find_args(self): | |
245 | return self.cmd_and_args |
|
245 | return self.cmd_and_args | |
246 |
|
246 | |||
247 | def start(self): |
|
247 | def start(self): | |
248 | if self.state == 'before': |
|
248 | if self.state == 'before': | |
249 | self.process = Popen(self.args, |
|
249 | self.process = Popen(self.args, | |
250 | stdout=PIPE,stderr=PIPE,stdin=PIPE, |
|
250 | stdout=PIPE,stderr=PIPE,stdin=PIPE, | |
251 | env=os.environ, |
|
251 | env=os.environ, | |
252 | cwd=self.work_dir |
|
252 | cwd=self.work_dir | |
253 | ) |
|
253 | ) | |
254 |
|
254 | |||
255 | self.loop.add_handler(self.process.stdout.fileno(), self.handle_stdout, self.loop.READ) |
|
255 | self.loop.add_handler(self.process.stdout.fileno(), self.handle_stdout, self.loop.READ) | |
256 | self.loop.add_handler(self.process.stderr.fileno(), self.handle_stderr, self.loop.READ) |
|
256 | self.loop.add_handler(self.process.stderr.fileno(), self.handle_stderr, self.loop.READ) | |
257 | self.poller = ioloop.PeriodicCallback(self.poll, self.poll_frequency, self.loop) |
|
257 | self.poller = ioloop.PeriodicCallback(self.poll, self.poll_frequency, self.loop) | |
258 | self.poller.start() |
|
258 | self.poller.start() | |
259 | self.notify_start(self.process.pid) |
|
259 | self.notify_start(self.process.pid) | |
260 | else: |
|
260 | else: | |
261 | s = 'The process was already started and has state: %r' % self.state |
|
261 | s = 'The process was already started and has state: %r' % self.state | |
262 | raise ProcessStateError(s) |
|
262 | raise ProcessStateError(s) | |
263 |
|
263 | |||
264 | def stop(self): |
|
264 | def stop(self): | |
265 | return self.interrupt_then_kill() |
|
265 | return self.interrupt_then_kill() | |
266 |
|
266 | |||
267 | def signal(self, sig): |
|
267 | def signal(self, sig): | |
268 | if self.state == 'running': |
|
268 | if self.state == 'running': | |
269 | self.process.send_signal(sig) |
|
269 | self.process.send_signal(sig) | |
270 |
|
270 | |||
271 | def interrupt_then_kill(self, delay=2.0): |
|
271 | def interrupt_then_kill(self, delay=2.0): | |
272 | """Send INT, wait a delay and then send KILL.""" |
|
272 | """Send INT, wait a delay and then send KILL.""" | |
273 | self.signal(SIGINT) |
|
273 | self.signal(SIGINT) | |
274 | self.killer = ioloop.DelayedCallback(lambda : self.signal(SIGKILL), delay*1000, self.loop) |
|
274 | self.killer = ioloop.DelayedCallback(lambda : self.signal(SIGKILL), delay*1000, self.loop) | |
275 | self.killer.start() |
|
275 | self.killer.start() | |
276 |
|
276 | |||
277 | # callbacks, etc: |
|
277 | # callbacks, etc: | |
278 |
|
278 | |||
279 | def handle_stdout(self, fd, events): |
|
279 | def handle_stdout(self, fd, events): | |
280 | line = self.process.stdout.readline() |
|
280 | line = self.process.stdout.readline() | |
281 | # a stopped process will be readable but return empty strings |
|
281 | # a stopped process will be readable but return empty strings | |
282 | if line: |
|
282 | if line: | |
283 | self.log.info(line[:-1]) |
|
283 | self.log.info(line[:-1]) | |
284 | else: |
|
284 | else: | |
285 | self.poll() |
|
285 | self.poll() | |
286 |
|
286 | |||
287 | def handle_stderr(self, fd, events): |
|
287 | def handle_stderr(self, fd, events): | |
288 | line = self.process.stderr.readline() |
|
288 | line = self.process.stderr.readline() | |
289 | # a stopped process will be readable but return empty strings |
|
289 | # a stopped process will be readable but return empty strings | |
290 | if line: |
|
290 | if line: | |
291 | self.log.error(line[:-1]) |
|
291 | self.log.error(line[:-1]) | |
292 | else: |
|
292 | else: | |
293 | self.poll() |
|
293 | self.poll() | |
294 |
|
294 | |||
295 | def poll(self): |
|
295 | def poll(self): | |
296 | status = self.process.poll() |
|
296 | status = self.process.poll() | |
297 | if status is not None: |
|
297 | if status is not None: | |
298 | self.poller.stop() |
|
298 | self.poller.stop() | |
299 | self.loop.remove_handler(self.process.stdout.fileno()) |
|
299 | self.loop.remove_handler(self.process.stdout.fileno()) | |
300 | self.loop.remove_handler(self.process.stderr.fileno()) |
|
300 | self.loop.remove_handler(self.process.stderr.fileno()) | |
301 | self.notify_stop(dict(exit_code=status, pid=self.process.pid)) |
|
301 | self.notify_stop(dict(exit_code=status, pid=self.process.pid)) | |
302 | return status |
|
302 | return status | |
303 |
|
303 | |||
304 | class LocalControllerLauncher(LocalProcessLauncher): |
|
304 | class LocalControllerLauncher(LocalProcessLauncher): | |
305 | """Launch a controller as a regular external process.""" |
|
305 | """Launch a controller as a regular external process.""" | |
306 |
|
306 | |||
307 |
controller_cmd = List(ipcontroller |
|
307 | controller_cmd = List(ipcontroller_cmd_argv, config=True) | |
308 | # Command line arguments to ipcontroller. |
|
308 | # Command line arguments to ipcontroller. | |
309 | controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True) |
|
309 | controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True) | |
310 |
|
310 | |||
311 | def find_args(self): |
|
311 | def find_args(self): | |
312 | return self.controller_cmd + self.controller_args |
|
312 | return self.controller_cmd + self.controller_args | |
313 |
|
313 | |||
314 | def start(self, cluster_dir): |
|
314 | def start(self, cluster_dir): | |
315 | """Start the controller by cluster_dir.""" |
|
315 | """Start the controller by cluster_dir.""" | |
316 | self.controller_args.extend(['--cluster-dir', cluster_dir]) |
|
316 | self.controller_args.extend(['--cluster-dir', cluster_dir]) | |
317 | self.cluster_dir = unicode(cluster_dir) |
|
317 | self.cluster_dir = unicode(cluster_dir) | |
318 | self.log.info("Starting LocalControllerLauncher: %r" % self.args) |
|
318 | self.log.info("Starting LocalControllerLauncher: %r" % self.args) | |
319 | return super(LocalControllerLauncher, self).start() |
|
319 | return super(LocalControllerLauncher, self).start() | |
320 |
|
320 | |||
321 |
|
321 | |||
322 | class LocalEngineLauncher(LocalProcessLauncher): |
|
322 | class LocalEngineLauncher(LocalProcessLauncher): | |
323 | """Launch a single engine as a regular externall process.""" |
|
323 | """Launch a single engine as a regular externall process.""" | |
324 |
|
324 | |||
325 |
engine_cmd = List(ipengine |
|
325 | engine_cmd = List(ipengine_cmd_argv, config=True) | |
326 | # Command line arguments for ipengine. |
|
326 | # Command line arguments for ipengine. | |
327 | engine_args = List( |
|
327 | engine_args = List( | |
328 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
|
328 | ['--log-to-file','--log-level', str(logging.INFO)], config=True | |
329 | ) |
|
329 | ) | |
330 |
|
330 | |||
331 | def find_args(self): |
|
331 | def find_args(self): | |
332 | return self.engine_cmd + self.engine_args |
|
332 | return self.engine_cmd + self.engine_args | |
333 |
|
333 | |||
334 | def start(self, cluster_dir): |
|
334 | def start(self, cluster_dir): | |
335 | """Start the engine by cluster_dir.""" |
|
335 | """Start the engine by cluster_dir.""" | |
336 | self.engine_args.extend(['--cluster-dir', cluster_dir]) |
|
336 | self.engine_args.extend(['--cluster-dir', cluster_dir]) | |
337 | self.cluster_dir = unicode(cluster_dir) |
|
337 | self.cluster_dir = unicode(cluster_dir) | |
338 | return super(LocalEngineLauncher, self).start() |
|
338 | return super(LocalEngineLauncher, self).start() | |
339 |
|
339 | |||
340 |
|
340 | |||
341 | class LocalEngineSetLauncher(BaseLauncher): |
|
341 | class LocalEngineSetLauncher(BaseLauncher): | |
342 | """Launch a set of engines as regular external processes.""" |
|
342 | """Launch a set of engines as regular external processes.""" | |
343 |
|
343 | |||
344 | # Command line arguments for ipengine. |
|
344 | # Command line arguments for ipengine. | |
345 | engine_args = List( |
|
345 | engine_args = List( | |
346 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
|
346 | ['--log-to-file','--log-level', str(logging.INFO)], config=True | |
347 | ) |
|
347 | ) | |
348 | # launcher class |
|
348 | # launcher class | |
349 | launcher_class = LocalEngineLauncher |
|
349 | launcher_class = LocalEngineLauncher | |
350 |
|
350 | |||
351 | launchers = Dict() |
|
351 | launchers = Dict() | |
352 | stop_data = Dict() |
|
352 | stop_data = Dict() | |
353 |
|
353 | |||
354 | def __init__(self, work_dir=u'.', config=None, **kwargs): |
|
354 | def __init__(self, work_dir=u'.', config=None, **kwargs): | |
355 | super(LocalEngineSetLauncher, self).__init__( |
|
355 | super(LocalEngineSetLauncher, self).__init__( | |
356 | work_dir=work_dir, config=config, **kwargs |
|
356 | work_dir=work_dir, config=config, **kwargs | |
357 | ) |
|
357 | ) | |
358 | self.stop_data = {} |
|
358 | self.stop_data = {} | |
359 |
|
359 | |||
360 | def start(self, n, cluster_dir): |
|
360 | def start(self, n, cluster_dir): | |
361 | """Start n engines by profile or cluster_dir.""" |
|
361 | """Start n engines by profile or cluster_dir.""" | |
362 | self.cluster_dir = unicode(cluster_dir) |
|
362 | self.cluster_dir = unicode(cluster_dir) | |
363 | dlist = [] |
|
363 | dlist = [] | |
364 | for i in range(n): |
|
364 | for i in range(n): | |
365 | el = self.launcher_class(work_dir=self.work_dir, config=self.config, logname=self.log.name) |
|
365 | el = self.launcher_class(work_dir=self.work_dir, config=self.config, logname=self.log.name) | |
366 | # Copy the engine args over to each engine launcher. |
|
366 | # Copy the engine args over to each engine launcher. | |
367 | el.engine_args = copy.deepcopy(self.engine_args) |
|
367 | el.engine_args = copy.deepcopy(self.engine_args) | |
368 | el.on_stop(self._notice_engine_stopped) |
|
368 | el.on_stop(self._notice_engine_stopped) | |
369 | d = el.start(cluster_dir) |
|
369 | d = el.start(cluster_dir) | |
370 | if i==0: |
|
370 | if i==0: | |
371 | self.log.info("Starting LocalEngineSetLauncher: %r" % el.args) |
|
371 | self.log.info("Starting LocalEngineSetLauncher: %r" % el.args) | |
372 | self.launchers[i] = el |
|
372 | self.launchers[i] = el | |
373 | dlist.append(d) |
|
373 | dlist.append(d) | |
374 | self.notify_start(dlist) |
|
374 | self.notify_start(dlist) | |
375 | # The consumeErrors here could be dangerous |
|
375 | # The consumeErrors here could be dangerous | |
376 | # dfinal = gatherBoth(dlist, consumeErrors=True) |
|
376 | # dfinal = gatherBoth(dlist, consumeErrors=True) | |
377 | # dfinal.addCallback(self.notify_start) |
|
377 | # dfinal.addCallback(self.notify_start) | |
378 | return dlist |
|
378 | return dlist | |
379 |
|
379 | |||
380 | def find_args(self): |
|
380 | def find_args(self): | |
381 | return ['engine set'] |
|
381 | return ['engine set'] | |
382 |
|
382 | |||
383 | def signal(self, sig): |
|
383 | def signal(self, sig): | |
384 | dlist = [] |
|
384 | dlist = [] | |
385 | for el in self.launchers.itervalues(): |
|
385 | for el in self.launchers.itervalues(): | |
386 | d = el.signal(sig) |
|
386 | d = el.signal(sig) | |
387 | dlist.append(d) |
|
387 | dlist.append(d) | |
388 | # dfinal = gatherBoth(dlist, consumeErrors=True) |
|
388 | # dfinal = gatherBoth(dlist, consumeErrors=True) | |
389 | return dlist |
|
389 | return dlist | |
390 |
|
390 | |||
391 | def interrupt_then_kill(self, delay=1.0): |
|
391 | def interrupt_then_kill(self, delay=1.0): | |
392 | dlist = [] |
|
392 | dlist = [] | |
393 | for el in self.launchers.itervalues(): |
|
393 | for el in self.launchers.itervalues(): | |
394 | d = el.interrupt_then_kill(delay) |
|
394 | d = el.interrupt_then_kill(delay) | |
395 | dlist.append(d) |
|
395 | dlist.append(d) | |
396 | # dfinal = gatherBoth(dlist, consumeErrors=True) |
|
396 | # dfinal = gatherBoth(dlist, consumeErrors=True) | |
397 | return dlist |
|
397 | return dlist | |
398 |
|
398 | |||
399 | def stop(self): |
|
399 | def stop(self): | |
400 | return self.interrupt_then_kill() |
|
400 | return self.interrupt_then_kill() | |
401 |
|
401 | |||
402 | def _notice_engine_stopped(self, data): |
|
402 | def _notice_engine_stopped(self, data): | |
403 | pid = data['pid'] |
|
403 | pid = data['pid'] | |
404 | for idx,el in self.launchers.iteritems(): |
|
404 | for idx,el in self.launchers.iteritems(): | |
405 | if el.process.pid == pid: |
|
405 | if el.process.pid == pid: | |
406 | break |
|
406 | break | |
407 | self.launchers.pop(idx) |
|
407 | self.launchers.pop(idx) | |
408 | self.stop_data[idx] = data |
|
408 | self.stop_data[idx] = data | |
409 | if not self.launchers: |
|
409 | if not self.launchers: | |
410 | self.notify_stop(self.stop_data) |
|
410 | self.notify_stop(self.stop_data) | |
411 |
|
411 | |||
412 |
|
412 | |||
413 | #----------------------------------------------------------------------------- |
|
413 | #----------------------------------------------------------------------------- | |
414 | # MPIExec launchers |
|
414 | # MPIExec launchers | |
415 | #----------------------------------------------------------------------------- |
|
415 | #----------------------------------------------------------------------------- | |
416 |
|
416 | |||
417 |
|
417 | |||
418 | class MPIExecLauncher(LocalProcessLauncher): |
|
418 | class MPIExecLauncher(LocalProcessLauncher): | |
419 | """Launch an external process using mpiexec.""" |
|
419 | """Launch an external process using mpiexec.""" | |
420 |
|
420 | |||
421 | # The mpiexec command to use in starting the process. |
|
421 | # The mpiexec command to use in starting the process. | |
422 | mpi_cmd = List(['mpiexec'], config=True) |
|
422 | mpi_cmd = List(['mpiexec'], config=True) | |
423 | # The command line arguments to pass to mpiexec. |
|
423 | # The command line arguments to pass to mpiexec. | |
424 | mpi_args = List([], config=True) |
|
424 | mpi_args = List([], config=True) | |
425 | # The program to start using mpiexec. |
|
425 | # The program to start using mpiexec. | |
426 | program = List(['date'], config=True) |
|
426 | program = List(['date'], config=True) | |
427 | # The command line argument to the program. |
|
427 | # The command line argument to the program. | |
428 | program_args = List([], config=True) |
|
428 | program_args = List([], config=True) | |
429 | # The number of instances of the program to start. |
|
429 | # The number of instances of the program to start. | |
430 | n = Int(1, config=True) |
|
430 | n = Int(1, config=True) | |
431 |
|
431 | |||
432 | def find_args(self): |
|
432 | def find_args(self): | |
433 | """Build self.args using all the fields.""" |
|
433 | """Build self.args using all the fields.""" | |
434 | return self.mpi_cmd + ['-n', str(self.n)] + self.mpi_args + \ |
|
434 | return self.mpi_cmd + ['-n', str(self.n)] + self.mpi_args + \ | |
435 | self.program + self.program_args |
|
435 | self.program + self.program_args | |
436 |
|
436 | |||
437 | def start(self, n): |
|
437 | def start(self, n): | |
438 | """Start n instances of the program using mpiexec.""" |
|
438 | """Start n instances of the program using mpiexec.""" | |
439 | self.n = n |
|
439 | self.n = n | |
440 | return super(MPIExecLauncher, self).start() |
|
440 | return super(MPIExecLauncher, self).start() | |
441 |
|
441 | |||
442 |
|
442 | |||
443 | class MPIExecControllerLauncher(MPIExecLauncher): |
|
443 | class MPIExecControllerLauncher(MPIExecLauncher): | |
444 | """Launch a controller using mpiexec.""" |
|
444 | """Launch a controller using mpiexec.""" | |
445 |
|
445 | |||
446 |
controller_cmd = List(ipcontroller |
|
446 | controller_cmd = List(ipcontroller_cmd_argv, config=True) | |
447 | # Command line arguments to ipcontroller. |
|
447 | # Command line arguments to ipcontroller. | |
448 | controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True) |
|
448 | controller_args = List(['--log-to-file','--log-level', str(logging.INFO)], config=True) | |
449 | n = Int(1, config=False) |
|
449 | n = Int(1, config=False) | |
450 |
|
450 | |||
451 | def start(self, cluster_dir): |
|
451 | def start(self, cluster_dir): | |
452 | """Start the controller by cluster_dir.""" |
|
452 | """Start the controller by cluster_dir.""" | |
453 | self.controller_args.extend(['--cluster-dir', cluster_dir]) |
|
453 | self.controller_args.extend(['--cluster-dir', cluster_dir]) | |
454 | self.cluster_dir = unicode(cluster_dir) |
|
454 | self.cluster_dir = unicode(cluster_dir) | |
455 | self.log.info("Starting MPIExecControllerLauncher: %r" % self.args) |
|
455 | self.log.info("Starting MPIExecControllerLauncher: %r" % self.args) | |
456 | return super(MPIExecControllerLauncher, self).start(1) |
|
456 | return super(MPIExecControllerLauncher, self).start(1) | |
457 |
|
457 | |||
458 | def find_args(self): |
|
458 | def find_args(self): | |
459 | return self.mpi_cmd + ['-n', self.n] + self.mpi_args + \ |
|
459 | return self.mpi_cmd + ['-n', self.n] + self.mpi_args + \ | |
460 | self.controller_cmd + self.controller_args |
|
460 | self.controller_cmd + self.controller_args | |
461 |
|
461 | |||
462 |
|
462 | |||
463 | class MPIExecEngineSetLauncher(MPIExecLauncher): |
|
463 | class MPIExecEngineSetLauncher(MPIExecLauncher): | |
464 |
|
464 | |||
465 |
program = List(ipengine |
|
465 | program = List(ipengine_cmd_argv, config=True) | |
466 | # Command line arguments for ipengine. |
|
466 | # Command line arguments for ipengine. | |
467 | program_args = List( |
|
467 | program_args = List( | |
468 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
|
468 | ['--log-to-file','--log-level', str(logging.INFO)], config=True | |
469 | ) |
|
469 | ) | |
470 | n = Int(1, config=True) |
|
470 | n = Int(1, config=True) | |
471 |
|
471 | |||
472 | def start(self, n, cluster_dir): |
|
472 | def start(self, n, cluster_dir): | |
473 | """Start n engines by profile or cluster_dir.""" |
|
473 | """Start n engines by profile or cluster_dir.""" | |
474 | self.program_args.extend(['--cluster-dir', cluster_dir]) |
|
474 | self.program_args.extend(['--cluster-dir', cluster_dir]) | |
475 | self.cluster_dir = unicode(cluster_dir) |
|
475 | self.cluster_dir = unicode(cluster_dir) | |
476 | self.n = n |
|
476 | self.n = n | |
477 | self.log.info('Starting MPIExecEngineSetLauncher: %r' % self.args) |
|
477 | self.log.info('Starting MPIExecEngineSetLauncher: %r' % self.args) | |
478 | return super(MPIExecEngineSetLauncher, self).start(n) |
|
478 | return super(MPIExecEngineSetLauncher, self).start(n) | |
479 |
|
479 | |||
480 | #----------------------------------------------------------------------------- |
|
480 | #----------------------------------------------------------------------------- | |
481 | # SSH launchers |
|
481 | # SSH launchers | |
482 | #----------------------------------------------------------------------------- |
|
482 | #----------------------------------------------------------------------------- | |
483 |
|
483 | |||
484 | # TODO: Get SSH Launcher working again. |
|
484 | # TODO: Get SSH Launcher working again. | |
485 |
|
485 | |||
486 | class SSHLauncher(LocalProcessLauncher): |
|
486 | class SSHLauncher(LocalProcessLauncher): | |
487 | """A minimal launcher for ssh. |
|
487 | """A minimal launcher for ssh. | |
488 |
|
488 | |||
489 | To be useful this will probably have to be extended to use the ``sshx`` |
|
489 | To be useful this will probably have to be extended to use the ``sshx`` | |
490 | idea for environment variables. There could be other things this needs |
|
490 | idea for environment variables. There could be other things this needs | |
491 | as well. |
|
491 | as well. | |
492 | """ |
|
492 | """ | |
493 |
|
493 | |||
494 | ssh_cmd = List(['ssh'], config=True) |
|
494 | ssh_cmd = List(['ssh'], config=True) | |
495 | ssh_args = List(['-tt'], config=True) |
|
495 | ssh_args = List(['-tt'], config=True) | |
496 | program = List(['date'], config=True) |
|
496 | program = List(['date'], config=True) | |
497 | program_args = List([], config=True) |
|
497 | program_args = List([], config=True) | |
498 | hostname = CUnicode('', config=True) |
|
498 | hostname = CUnicode('', config=True) | |
499 | user = CUnicode('', config=True) |
|
499 | user = CUnicode('', config=True) | |
500 | location = CUnicode('') |
|
500 | location = CUnicode('') | |
501 |
|
501 | |||
502 | def _hostname_changed(self, name, old, new): |
|
502 | def _hostname_changed(self, name, old, new): | |
503 | if self.user: |
|
503 | if self.user: | |
504 | self.location = u'%s@%s' % (self.user, new) |
|
504 | self.location = u'%s@%s' % (self.user, new) | |
505 | else: |
|
505 | else: | |
506 | self.location = new |
|
506 | self.location = new | |
507 |
|
507 | |||
508 | def _user_changed(self, name, old, new): |
|
508 | def _user_changed(self, name, old, new): | |
509 | self.location = u'%s@%s' % (new, self.hostname) |
|
509 | self.location = u'%s@%s' % (new, self.hostname) | |
510 |
|
510 | |||
511 | def find_args(self): |
|
511 | def find_args(self): | |
512 | return self.ssh_cmd + self.ssh_args + [self.location] + \ |
|
512 | return self.ssh_cmd + self.ssh_args + [self.location] + \ | |
513 | self.program + self.program_args |
|
513 | self.program + self.program_args | |
514 |
|
514 | |||
515 | def start(self, cluster_dir, hostname=None, user=None): |
|
515 | def start(self, cluster_dir, hostname=None, user=None): | |
516 | self.cluster_dir = unicode(cluster_dir) |
|
516 | self.cluster_dir = unicode(cluster_dir) | |
517 | if hostname is not None: |
|
517 | if hostname is not None: | |
518 | self.hostname = hostname |
|
518 | self.hostname = hostname | |
519 | if user is not None: |
|
519 | if user is not None: | |
520 | self.user = user |
|
520 | self.user = user | |
521 |
|
521 | |||
522 | return super(SSHLauncher, self).start() |
|
522 | return super(SSHLauncher, self).start() | |
523 |
|
523 | |||
524 | def signal(self, sig): |
|
524 | def signal(self, sig): | |
525 | if self.state == 'running': |
|
525 | if self.state == 'running': | |
526 | # send escaped ssh connection-closer |
|
526 | # send escaped ssh connection-closer | |
527 | self.process.stdin.write('~.') |
|
527 | self.process.stdin.write('~.') | |
528 | self.process.stdin.flush() |
|
528 | self.process.stdin.flush() | |
529 |
|
529 | |||
530 |
|
530 | |||
531 |
|
531 | |||
532 | class SSHControllerLauncher(SSHLauncher): |
|
532 | class SSHControllerLauncher(SSHLauncher): | |
533 |
|
533 | |||
534 |
program = List(ipcontroller |
|
534 | program = List(ipcontroller_cmd_argv, config=True) | |
535 | # Command line arguments to ipcontroller. |
|
535 | # Command line arguments to ipcontroller. | |
536 | program_args = List(['-r', '--log-to-file','--log-level', str(logging.INFO)], config=True) |
|
536 | program_args = List(['-r', '--log-to-file','--log-level', str(logging.INFO)], config=True) | |
537 |
|
537 | |||
538 |
|
538 | |||
539 | class SSHEngineLauncher(SSHLauncher): |
|
539 | class SSHEngineLauncher(SSHLauncher): | |
540 |
program = List(ipengine |
|
540 | program = List(ipengine_cmd_argv, config=True) | |
541 | # Command line arguments for ipengine. |
|
541 | # Command line arguments for ipengine. | |
542 | program_args = List( |
|
542 | program_args = List( | |
543 | ['--log-to-file','--log-level', str(logging.INFO)], config=True |
|
543 | ['--log-to-file','--log-level', str(logging.INFO)], config=True | |
544 | ) |
|
544 | ) | |
545 |
|
545 | |||
546 | class SSHEngineSetLauncher(LocalEngineSetLauncher): |
|
546 | class SSHEngineSetLauncher(LocalEngineSetLauncher): | |
547 | launcher_class = SSHEngineLauncher |
|
547 | launcher_class = SSHEngineLauncher | |
548 | engines = Dict(config=True) |
|
548 | engines = Dict(config=True) | |
549 |
|
549 | |||
550 | def start(self, n, cluster_dir): |
|
550 | def start(self, n, cluster_dir): | |
551 | """Start engines by profile or cluster_dir. |
|
551 | """Start engines by profile or cluster_dir. | |
552 | `n` is ignored, and the `engines` config property is used instead. |
|
552 | `n` is ignored, and the `engines` config property is used instead. | |
553 | """ |
|
553 | """ | |
554 |
|
554 | |||
555 | self.cluster_dir = unicode(cluster_dir) |
|
555 | self.cluster_dir = unicode(cluster_dir) | |
556 | dlist = [] |
|
556 | dlist = [] | |
557 | for host, n in self.engines.iteritems(): |
|
557 | for host, n in self.engines.iteritems(): | |
558 | if isinstance(n, (tuple, list)): |
|
558 | if isinstance(n, (tuple, list)): | |
559 | n, args = n |
|
559 | n, args = n | |
560 | else: |
|
560 | else: | |
561 | args = copy.deepcopy(self.engine_args) |
|
561 | args = copy.deepcopy(self.engine_args) | |
562 |
|
562 | |||
563 | if '@' in host: |
|
563 | if '@' in host: | |
564 | user,host = host.split('@',1) |
|
564 | user,host = host.split('@',1) | |
565 | else: |
|
565 | else: | |
566 | user=None |
|
566 | user=None | |
567 | for i in range(n): |
|
567 | for i in range(n): | |
568 | el = self.launcher_class(work_dir=self.work_dir, config=self.config, logname=self.log.name) |
|
568 | el = self.launcher_class(work_dir=self.work_dir, config=self.config, logname=self.log.name) | |
569 |
|
569 | |||
570 | # Copy the engine args over to each engine launcher. |
|
570 | # Copy the engine args over to each engine launcher. | |
571 | i |
|
571 | i | |
572 | el.program_args = args |
|
572 | el.program_args = args | |
573 | el.on_stop(self._notice_engine_stopped) |
|
573 | el.on_stop(self._notice_engine_stopped) | |
574 | d = el.start(cluster_dir, user=user, hostname=host) |
|
574 | d = el.start(cluster_dir, user=user, hostname=host) | |
575 | if i==0: |
|
575 | if i==0: | |
576 | self.log.info("Starting SSHEngineSetLauncher: %r" % el.args) |
|
576 | self.log.info("Starting SSHEngineSetLauncher: %r" % el.args) | |
577 | self.launchers[host+str(i)] = el |
|
577 | self.launchers[host+str(i)] = el | |
578 | dlist.append(d) |
|
578 | dlist.append(d) | |
579 | self.notify_start(dlist) |
|
579 | self.notify_start(dlist) | |
580 | return dlist |
|
580 | return dlist | |
581 |
|
581 | |||
582 |
|
582 | |||
583 |
|
583 | |||
584 | #----------------------------------------------------------------------------- |
|
584 | #----------------------------------------------------------------------------- | |
585 | # Windows HPC Server 2008 scheduler launchers |
|
585 | # Windows HPC Server 2008 scheduler launchers | |
586 | #----------------------------------------------------------------------------- |
|
586 | #----------------------------------------------------------------------------- | |
587 |
|
587 | |||
588 |
|
588 | |||
589 | # This is only used on Windows. |
|
589 | # This is only used on Windows. | |
590 | def find_job_cmd(): |
|
590 | def find_job_cmd(): | |
591 | if os.name=='nt': |
|
591 | if os.name=='nt': | |
592 | try: |
|
592 | try: | |
593 | return find_cmd('job') |
|
593 | return find_cmd('job') | |
594 | except FindCmdError: |
|
594 | except FindCmdError: | |
595 | return 'job' |
|
595 | return 'job' | |
596 | else: |
|
596 | else: | |
597 | return 'job' |
|
597 | return 'job' | |
598 |
|
598 | |||
599 |
|
599 | |||
600 | class WindowsHPCLauncher(BaseLauncher): |
|
600 | class WindowsHPCLauncher(BaseLauncher): | |
601 |
|
601 | |||
602 | # A regular expression used to get the job id from the output of the |
|
602 | # A regular expression used to get the job id from the output of the | |
603 | # submit_command. |
|
603 | # submit_command. | |
604 | job_id_regexp = Str(r'\d+', config=True) |
|
604 | job_id_regexp = Str(r'\d+', config=True) | |
605 | # The filename of the instantiated job script. |
|
605 | # The filename of the instantiated job script. | |
606 | job_file_name = CUnicode(u'ipython_job.xml', config=True) |
|
606 | job_file_name = CUnicode(u'ipython_job.xml', config=True) | |
607 | # The full path to the instantiated job script. This gets made dynamically |
|
607 | # The full path to the instantiated job script. This gets made dynamically | |
608 | # by combining the work_dir with the job_file_name. |
|
608 | # by combining the work_dir with the job_file_name. | |
609 | job_file = CUnicode(u'') |
|
609 | job_file = CUnicode(u'') | |
610 | # The hostname of the scheduler to submit the job to |
|
610 | # The hostname of the scheduler to submit the job to | |
611 | scheduler = CUnicode('', config=True) |
|
611 | scheduler = CUnicode('', config=True) | |
612 | job_cmd = CUnicode(find_job_cmd(), config=True) |
|
612 | job_cmd = CUnicode(find_job_cmd(), config=True) | |
613 |
|
613 | |||
614 | def __init__(self, work_dir=u'.', config=None, **kwargs): |
|
614 | def __init__(self, work_dir=u'.', config=None, **kwargs): | |
615 | super(WindowsHPCLauncher, self).__init__( |
|
615 | super(WindowsHPCLauncher, self).__init__( | |
616 | work_dir=work_dir, config=config, **kwargs |
|
616 | work_dir=work_dir, config=config, **kwargs | |
617 | ) |
|
617 | ) | |
618 |
|
618 | |||
619 | @property |
|
619 | @property | |
620 | def job_file(self): |
|
620 | def job_file(self): | |
621 | return os.path.join(self.work_dir, self.job_file_name) |
|
621 | return os.path.join(self.work_dir, self.job_file_name) | |
622 |
|
622 | |||
623 | def write_job_file(self, n): |
|
623 | def write_job_file(self, n): | |
624 | raise NotImplementedError("Implement write_job_file in a subclass.") |
|
624 | raise NotImplementedError("Implement write_job_file in a subclass.") | |
625 |
|
625 | |||
626 | def find_args(self): |
|
626 | def find_args(self): | |
627 | return [u'job.exe'] |
|
627 | return [u'job.exe'] | |
628 |
|
628 | |||
629 | def parse_job_id(self, output): |
|
629 | def parse_job_id(self, output): | |
630 | """Take the output of the submit command and return the job id.""" |
|
630 | """Take the output of the submit command and return the job id.""" | |
631 | m = re.search(self.job_id_regexp, output) |
|
631 | m = re.search(self.job_id_regexp, output) | |
632 | if m is not None: |
|
632 | if m is not None: | |
633 | job_id = m.group() |
|
633 | job_id = m.group() | |
634 | else: |
|
634 | else: | |
635 | raise LauncherError("Job id couldn't be determined: %s" % output) |
|
635 | raise LauncherError("Job id couldn't be determined: %s" % output) | |
636 | self.job_id = job_id |
|
636 | self.job_id = job_id | |
637 | self.log.info('Job started with job id: %r' % job_id) |
|
637 | self.log.info('Job started with job id: %r' % job_id) | |
638 | return job_id |
|
638 | return job_id | |
639 |
|
639 | |||
640 | def start(self, n): |
|
640 | def start(self, n): | |
641 | """Start n copies of the process using the Win HPC job scheduler.""" |
|
641 | """Start n copies of the process using the Win HPC job scheduler.""" | |
642 | self.write_job_file(n) |
|
642 | self.write_job_file(n) | |
643 | args = [ |
|
643 | args = [ | |
644 | 'submit', |
|
644 | 'submit', | |
645 | '/jobfile:%s' % self.job_file, |
|
645 | '/jobfile:%s' % self.job_file, | |
646 | '/scheduler:%s' % self.scheduler |
|
646 | '/scheduler:%s' % self.scheduler | |
647 | ] |
|
647 | ] | |
648 | self.log.info("Starting Win HPC Job: %s" % (self.job_cmd + ' ' + ' '.join(args),)) |
|
648 | self.log.info("Starting Win HPC Job: %s" % (self.job_cmd + ' ' + ' '.join(args),)) | |
649 | # Twisted will raise DeprecationWarnings if we try to pass unicode to this |
|
649 | # Twisted will raise DeprecationWarnings if we try to pass unicode to this | |
650 | output = check_output([self.job_cmd]+args, |
|
650 | output = check_output([self.job_cmd]+args, | |
651 | env=os.environ, |
|
651 | env=os.environ, | |
652 | cwd=self.work_dir, |
|
652 | cwd=self.work_dir, | |
653 | stderr=STDOUT |
|
653 | stderr=STDOUT | |
654 | ) |
|
654 | ) | |
655 | job_id = self.parse_job_id(output) |
|
655 | job_id = self.parse_job_id(output) | |
656 | self.notify_start(job_id) |
|
656 | self.notify_start(job_id) | |
657 | return job_id |
|
657 | return job_id | |
658 |
|
658 | |||
659 | def stop(self): |
|
659 | def stop(self): | |
660 | args = [ |
|
660 | args = [ | |
661 | 'cancel', |
|
661 | 'cancel', | |
662 | self.job_id, |
|
662 | self.job_id, | |
663 | '/scheduler:%s' % self.scheduler |
|
663 | '/scheduler:%s' % self.scheduler | |
664 | ] |
|
664 | ] | |
665 | self.log.info("Stopping Win HPC Job: %s" % (self.job_cmd + ' ' + ' '.join(args),)) |
|
665 | self.log.info("Stopping Win HPC Job: %s" % (self.job_cmd + ' ' + ' '.join(args),)) | |
666 | try: |
|
666 | try: | |
667 | output = check_output([self.job_cmd]+args, |
|
667 | output = check_output([self.job_cmd]+args, | |
668 | env=os.environ, |
|
668 | env=os.environ, | |
669 | cwd=self.work_dir, |
|
669 | cwd=self.work_dir, | |
670 | stderr=STDOUT |
|
670 | stderr=STDOUT | |
671 | ) |
|
671 | ) | |
672 | except: |
|
672 | except: | |
673 | output = 'The job already appears to be stoppped: %r' % self.job_id |
|
673 | output = 'The job already appears to be stoppped: %r' % self.job_id | |
674 | self.notify_stop(dict(job_id=self.job_id, output=output)) # Pass the output of the kill cmd |
|
674 | self.notify_stop(dict(job_id=self.job_id, output=output)) # Pass the output of the kill cmd | |
675 | return output |
|
675 | return output | |
676 |
|
676 | |||
677 |
|
677 | |||
678 | class WindowsHPCControllerLauncher(WindowsHPCLauncher): |
|
678 | class WindowsHPCControllerLauncher(WindowsHPCLauncher): | |
679 |
|
679 | |||
680 | job_file_name = CUnicode(u'ipcontroller_job.xml', config=True) |
|
680 | job_file_name = CUnicode(u'ipcontroller_job.xml', config=True) | |
681 | extra_args = List([], config=False) |
|
681 | extra_args = List([], config=False) | |
682 |
|
682 | |||
683 | def write_job_file(self, n): |
|
683 | def write_job_file(self, n): | |
684 | job = IPControllerJob(config=self.config) |
|
684 | job = IPControllerJob(config=self.config) | |
685 |
|
685 | |||
686 | t = IPControllerTask(config=self.config) |
|
686 | t = IPControllerTask(config=self.config) | |
687 | # The tasks work directory is *not* the actual work directory of |
|
687 | # The tasks work directory is *not* the actual work directory of | |
688 | # the controller. It is used as the base path for the stdout/stderr |
|
688 | # the controller. It is used as the base path for the stdout/stderr | |
689 | # files that the scheduler redirects to. |
|
689 | # files that the scheduler redirects to. | |
690 | t.work_directory = self.cluster_dir |
|
690 | t.work_directory = self.cluster_dir | |
691 | # Add the --cluster-dir and from self.start(). |
|
691 | # Add the --cluster-dir and from self.start(). | |
692 | t.controller_args.extend(self.extra_args) |
|
692 | t.controller_args.extend(self.extra_args) | |
693 | job.add_task(t) |
|
693 | job.add_task(t) | |
694 |
|
694 | |||
695 | self.log.info("Writing job description file: %s" % self.job_file) |
|
695 | self.log.info("Writing job description file: %s" % self.job_file) | |
696 | job.write(self.job_file) |
|
696 | job.write(self.job_file) | |
697 |
|
697 | |||
698 | @property |
|
698 | @property | |
699 | def job_file(self): |
|
699 | def job_file(self): | |
700 | return os.path.join(self.cluster_dir, self.job_file_name) |
|
700 | return os.path.join(self.cluster_dir, self.job_file_name) | |
701 |
|
701 | |||
702 | def start(self, cluster_dir): |
|
702 | def start(self, cluster_dir): | |
703 | """Start the controller by cluster_dir.""" |
|
703 | """Start the controller by cluster_dir.""" | |
704 | self.extra_args = ['--cluster-dir', cluster_dir] |
|
704 | self.extra_args = ['--cluster-dir', cluster_dir] | |
705 | self.cluster_dir = unicode(cluster_dir) |
|
705 | self.cluster_dir = unicode(cluster_dir) | |
706 | return super(WindowsHPCControllerLauncher, self).start(1) |
|
706 | return super(WindowsHPCControllerLauncher, self).start(1) | |
707 |
|
707 | |||
708 |
|
708 | |||
709 | class WindowsHPCEngineSetLauncher(WindowsHPCLauncher): |
|
709 | class WindowsHPCEngineSetLauncher(WindowsHPCLauncher): | |
710 |
|
710 | |||
711 | job_file_name = CUnicode(u'ipengineset_job.xml', config=True) |
|
711 | job_file_name = CUnicode(u'ipengineset_job.xml', config=True) | |
712 | extra_args = List([], config=False) |
|
712 | extra_args = List([], config=False) | |
713 |
|
713 | |||
714 | def write_job_file(self, n): |
|
714 | def write_job_file(self, n): | |
715 | job = IPEngineSetJob(config=self.config) |
|
715 | job = IPEngineSetJob(config=self.config) | |
716 |
|
716 | |||
717 | for i in range(n): |
|
717 | for i in range(n): | |
718 | t = IPEngineTask(config=self.config) |
|
718 | t = IPEngineTask(config=self.config) | |
719 | # The tasks work directory is *not* the actual work directory of |
|
719 | # The tasks work directory is *not* the actual work directory of | |
720 | # the engine. It is used as the base path for the stdout/stderr |
|
720 | # the engine. It is used as the base path for the stdout/stderr | |
721 | # files that the scheduler redirects to. |
|
721 | # files that the scheduler redirects to. | |
722 | t.work_directory = self.cluster_dir |
|
722 | t.work_directory = self.cluster_dir | |
723 | # Add the --cluster-dir and from self.start(). |
|
723 | # Add the --cluster-dir and from self.start(). | |
724 | t.engine_args.extend(self.extra_args) |
|
724 | t.engine_args.extend(self.extra_args) | |
725 | job.add_task(t) |
|
725 | job.add_task(t) | |
726 |
|
726 | |||
727 | self.log.info("Writing job description file: %s" % self.job_file) |
|
727 | self.log.info("Writing job description file: %s" % self.job_file) | |
728 | job.write(self.job_file) |
|
728 | job.write(self.job_file) | |
729 |
|
729 | |||
730 | @property |
|
730 | @property | |
731 | def job_file(self): |
|
731 | def job_file(self): | |
732 | return os.path.join(self.cluster_dir, self.job_file_name) |
|
732 | return os.path.join(self.cluster_dir, self.job_file_name) | |
733 |
|
733 | |||
734 | def start(self, n, cluster_dir): |
|
734 | def start(self, n, cluster_dir): | |
735 | """Start the controller by cluster_dir.""" |
|
735 | """Start the controller by cluster_dir.""" | |
736 | self.extra_args = ['--cluster-dir', cluster_dir] |
|
736 | self.extra_args = ['--cluster-dir', cluster_dir] | |
737 | self.cluster_dir = unicode(cluster_dir) |
|
737 | self.cluster_dir = unicode(cluster_dir) | |
738 | return super(WindowsHPCEngineSetLauncher, self).start(n) |
|
738 | return super(WindowsHPCEngineSetLauncher, self).start(n) | |
739 |
|
739 | |||
740 |
|
740 | |||
741 | #----------------------------------------------------------------------------- |
|
741 | #----------------------------------------------------------------------------- | |
742 | # Batch (PBS) system launchers |
|
742 | # Batch (PBS) system launchers | |
743 | #----------------------------------------------------------------------------- |
|
743 | #----------------------------------------------------------------------------- | |
744 |
|
744 | |||
745 | class BatchSystemLauncher(BaseLauncher): |
|
745 | class BatchSystemLauncher(BaseLauncher): | |
746 | """Launch an external process using a batch system. |
|
746 | """Launch an external process using a batch system. | |
747 |
|
747 | |||
748 | This class is designed to work with UNIX batch systems like PBS, LSF, |
|
748 | This class is designed to work with UNIX batch systems like PBS, LSF, | |
749 | GridEngine, etc. The overall model is that there are different commands |
|
749 | GridEngine, etc. The overall model is that there are different commands | |
750 | like qsub, qdel, etc. that handle the starting and stopping of the process. |
|
750 | like qsub, qdel, etc. that handle the starting and stopping of the process. | |
751 |
|
751 | |||
752 | This class also has the notion of a batch script. The ``batch_template`` |
|
752 | This class also has the notion of a batch script. The ``batch_template`` | |
753 | attribute can be set to a string that is a template for the batch script. |
|
753 | attribute can be set to a string that is a template for the batch script. | |
754 | This template is instantiated using Itpl. Thus the template can use |
|
754 | This template is instantiated using Itpl. Thus the template can use | |
755 | ${n} fot the number of instances. Subclasses can add additional variables |
|
755 | ${n} fot the number of instances. Subclasses can add additional variables | |
756 | to the template dict. |
|
756 | to the template dict. | |
757 | """ |
|
757 | """ | |
758 |
|
758 | |||
759 | # Subclasses must fill these in. See PBSEngineSet |
|
759 | # Subclasses must fill these in. See PBSEngineSet | |
760 | # The name of the command line program used to submit jobs. |
|
760 | # The name of the command line program used to submit jobs. | |
761 | submit_command = List([''], config=True) |
|
761 | submit_command = List([''], config=True) | |
762 | # The name of the command line program used to delete jobs. |
|
762 | # The name of the command line program used to delete jobs. | |
763 | delete_command = List([''], config=True) |
|
763 | delete_command = List([''], config=True) | |
764 | # A regular expression used to get the job id from the output of the |
|
764 | # A regular expression used to get the job id from the output of the | |
765 | # submit_command. |
|
765 | # submit_command. | |
766 | job_id_regexp = CUnicode('', config=True) |
|
766 | job_id_regexp = CUnicode('', config=True) | |
767 | # The string that is the batch script template itself. |
|
767 | # The string that is the batch script template itself. | |
768 | batch_template = CUnicode('', config=True) |
|
768 | batch_template = CUnicode('', config=True) | |
769 | # The file that contains the batch template |
|
769 | # The file that contains the batch template | |
770 | batch_template_file = CUnicode(u'', config=True) |
|
770 | batch_template_file = CUnicode(u'', config=True) | |
771 | # The filename of the instantiated batch script. |
|
771 | # The filename of the instantiated batch script. | |
772 | batch_file_name = CUnicode(u'batch_script', config=True) |
|
772 | batch_file_name = CUnicode(u'batch_script', config=True) | |
773 | # The PBS Queue |
|
773 | # The PBS Queue | |
774 | queue = CUnicode(u'', config=True) |
|
774 | queue = CUnicode(u'', config=True) | |
775 |
|
775 | |||
776 | # not configurable, override in subclasses |
|
776 | # not configurable, override in subclasses | |
777 | # PBS Job Array regex |
|
777 | # PBS Job Array regex | |
778 | job_array_regexp = CUnicode('') |
|
778 | job_array_regexp = CUnicode('') | |
779 | job_array_template = CUnicode('') |
|
779 | job_array_template = CUnicode('') | |
780 | # PBS Queue regex |
|
780 | # PBS Queue regex | |
781 | queue_regexp = CUnicode('') |
|
781 | queue_regexp = CUnicode('') | |
782 | queue_template = CUnicode('') |
|
782 | queue_template = CUnicode('') | |
783 | # The default batch template, override in subclasses |
|
783 | # The default batch template, override in subclasses | |
784 | default_template = CUnicode('') |
|
784 | default_template = CUnicode('') | |
785 | # The full path to the instantiated batch script. |
|
785 | # The full path to the instantiated batch script. | |
786 | batch_file = CUnicode(u'') |
|
786 | batch_file = CUnicode(u'') | |
787 | # the format dict used with batch_template: |
|
787 | # the format dict used with batch_template: | |
788 | context = Dict() |
|
788 | context = Dict() | |
789 |
|
789 | |||
790 |
|
790 | |||
791 | def find_args(self): |
|
791 | def find_args(self): | |
792 | return self.submit_command + [self.batch_file] |
|
792 | return self.submit_command + [self.batch_file] | |
793 |
|
793 | |||
794 | def __init__(self, work_dir=u'.', config=None, **kwargs): |
|
794 | def __init__(self, work_dir=u'.', config=None, **kwargs): | |
795 | super(BatchSystemLauncher, self).__init__( |
|
795 | super(BatchSystemLauncher, self).__init__( | |
796 | work_dir=work_dir, config=config, **kwargs |
|
796 | work_dir=work_dir, config=config, **kwargs | |
797 | ) |
|
797 | ) | |
798 | self.batch_file = os.path.join(self.work_dir, self.batch_file_name) |
|
798 | self.batch_file = os.path.join(self.work_dir, self.batch_file_name) | |
799 |
|
799 | |||
800 | def parse_job_id(self, output): |
|
800 | def parse_job_id(self, output): | |
801 | """Take the output of the submit command and return the job id.""" |
|
801 | """Take the output of the submit command and return the job id.""" | |
802 | m = re.search(self.job_id_regexp, output) |
|
802 | m = re.search(self.job_id_regexp, output) | |
803 | if m is not None: |
|
803 | if m is not None: | |
804 | job_id = m.group() |
|
804 | job_id = m.group() | |
805 | else: |
|
805 | else: | |
806 | raise LauncherError("Job id couldn't be determined: %s" % output) |
|
806 | raise LauncherError("Job id couldn't be determined: %s" % output) | |
807 | self.job_id = job_id |
|
807 | self.job_id = job_id | |
808 | self.log.info('Job submitted with job id: %r' % job_id) |
|
808 | self.log.info('Job submitted with job id: %r' % job_id) | |
809 | return job_id |
|
809 | return job_id | |
810 |
|
810 | |||
811 | def write_batch_script(self, n): |
|
811 | def write_batch_script(self, n): | |
812 | """Instantiate and write the batch script to the work_dir.""" |
|
812 | """Instantiate and write the batch script to the work_dir.""" | |
813 | self.context['n'] = n |
|
813 | self.context['n'] = n | |
814 | self.context['queue'] = self.queue |
|
814 | self.context['queue'] = self.queue | |
815 | print self.context |
|
815 | print self.context | |
816 | # first priority is batch_template if set |
|
816 | # first priority is batch_template if set | |
817 | if self.batch_template_file and not self.batch_template: |
|
817 | if self.batch_template_file and not self.batch_template: | |
818 | # second priority is batch_template_file |
|
818 | # second priority is batch_template_file | |
819 | with open(self.batch_template_file) as f: |
|
819 | with open(self.batch_template_file) as f: | |
820 | self.batch_template = f.read() |
|
820 | self.batch_template = f.read() | |
821 | if not self.batch_template: |
|
821 | if not self.batch_template: | |
822 | # third (last) priority is default_template |
|
822 | # third (last) priority is default_template | |
823 | self.batch_template = self.default_template |
|
823 | self.batch_template = self.default_template | |
824 |
|
824 | |||
825 | regex = re.compile(self.job_array_regexp) |
|
825 | regex = re.compile(self.job_array_regexp) | |
826 | # print regex.search(self.batch_template) |
|
826 | # print regex.search(self.batch_template) | |
827 | if not regex.search(self.batch_template): |
|
827 | if not regex.search(self.batch_template): | |
828 | self.log.info("adding job array settings to batch script") |
|
828 | self.log.info("adding job array settings to batch script") | |
829 | firstline, rest = self.batch_template.split('\n',1) |
|
829 | firstline, rest = self.batch_template.split('\n',1) | |
830 | self.batch_template = u'\n'.join([firstline, self.job_array_template, rest]) |
|
830 | self.batch_template = u'\n'.join([firstline, self.job_array_template, rest]) | |
831 |
|
831 | |||
832 | regex = re.compile(self.queue_regexp) |
|
832 | regex = re.compile(self.queue_regexp) | |
833 | # print regex.search(self.batch_template) |
|
833 | # print regex.search(self.batch_template) | |
834 | if self.queue and not regex.search(self.batch_template): |
|
834 | if self.queue and not regex.search(self.batch_template): | |
835 | self.log.info("adding PBS queue settings to batch script") |
|
835 | self.log.info("adding PBS queue settings to batch script") | |
836 | firstline, rest = self.batch_template.split('\n',1) |
|
836 | firstline, rest = self.batch_template.split('\n',1) | |
837 | self.batch_template = u'\n'.join([firstline, self.queue_template, rest]) |
|
837 | self.batch_template = u'\n'.join([firstline, self.queue_template, rest]) | |
838 |
|
838 | |||
839 | script_as_string = Itpl.itplns(self.batch_template, self.context) |
|
839 | script_as_string = Itpl.itplns(self.batch_template, self.context) | |
840 | self.log.info('Writing instantiated batch script: %s' % self.batch_file) |
|
840 | self.log.info('Writing instantiated batch script: %s' % self.batch_file) | |
841 |
|
841 | |||
842 | with open(self.batch_file, 'w') as f: |
|
842 | with open(self.batch_file, 'w') as f: | |
843 | f.write(script_as_string) |
|
843 | f.write(script_as_string) | |
844 | os.chmod(self.batch_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
|
844 | os.chmod(self.batch_file, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) | |
845 |
|
845 | |||
846 | def start(self, n, cluster_dir): |
|
846 | def start(self, n, cluster_dir): | |
847 | """Start n copies of the process using a batch system.""" |
|
847 | """Start n copies of the process using a batch system.""" | |
848 | # Here we save profile and cluster_dir in the context so they |
|
848 | # Here we save profile and cluster_dir in the context so they | |
849 | # can be used in the batch script template as ${profile} and |
|
849 | # can be used in the batch script template as ${profile} and | |
850 | # ${cluster_dir} |
|
850 | # ${cluster_dir} | |
851 | self.context['cluster_dir'] = cluster_dir |
|
851 | self.context['cluster_dir'] = cluster_dir | |
852 | self.cluster_dir = unicode(cluster_dir) |
|
852 | self.cluster_dir = unicode(cluster_dir) | |
853 | self.write_batch_script(n) |
|
853 | self.write_batch_script(n) | |
854 | output = check_output(self.args, env=os.environ) |
|
854 | output = check_output(self.args, env=os.environ) | |
855 |
|
855 | |||
856 | job_id = self.parse_job_id(output) |
|
856 | job_id = self.parse_job_id(output) | |
857 | self.notify_start(job_id) |
|
857 | self.notify_start(job_id) | |
858 | return job_id |
|
858 | return job_id | |
859 |
|
859 | |||
860 | def stop(self): |
|
860 | def stop(self): | |
861 | output = check_output(self.delete_command+[self.job_id], env=os.environ) |
|
861 | output = check_output(self.delete_command+[self.job_id], env=os.environ) | |
862 | self.notify_stop(dict(job_id=self.job_id, output=output)) # Pass the output of the kill cmd |
|
862 | self.notify_stop(dict(job_id=self.job_id, output=output)) # Pass the output of the kill cmd | |
863 | return output |
|
863 | return output | |
864 |
|
864 | |||
865 |
|
865 | |||
866 | class PBSLauncher(BatchSystemLauncher): |
|
866 | class PBSLauncher(BatchSystemLauncher): | |
867 | """A BatchSystemLauncher subclass for PBS.""" |
|
867 | """A BatchSystemLauncher subclass for PBS.""" | |
868 |
|
868 | |||
869 | submit_command = List(['qsub'], config=True) |
|
869 | submit_command = List(['qsub'], config=True) | |
870 | delete_command = List(['qdel'], config=True) |
|
870 | delete_command = List(['qdel'], config=True) | |
871 | job_id_regexp = CUnicode(r'\d+', config=True) |
|
871 | job_id_regexp = CUnicode(r'\d+', config=True) | |
872 |
|
872 | |||
873 | batch_file = CUnicode(u'') |
|
873 | batch_file = CUnicode(u'') | |
874 | job_array_regexp = CUnicode('#PBS\W+-t\W+[\w\d\-\$]+') |
|
874 | job_array_regexp = CUnicode('#PBS\W+-t\W+[\w\d\-\$]+') | |
875 | job_array_template = CUnicode('#PBS -t 1-$n') |
|
875 | job_array_template = CUnicode('#PBS -t 1-$n') | |
876 | queue_regexp = CUnicode('#PBS\W+-q\W+\$?\w+') |
|
876 | queue_regexp = CUnicode('#PBS\W+-q\W+\$?\w+') | |
877 | queue_template = CUnicode('#PBS -q $queue') |
|
877 | queue_template = CUnicode('#PBS -q $queue') | |
878 |
|
878 | |||
879 |
|
879 | |||
880 | class PBSControllerLauncher(PBSLauncher): |
|
880 | class PBSControllerLauncher(PBSLauncher): | |
881 | """Launch a controller using PBS.""" |
|
881 | """Launch a controller using PBS.""" | |
882 |
|
882 | |||
883 | batch_file_name = CUnicode(u'pbs_controller', config=True) |
|
883 | batch_file_name = CUnicode(u'pbs_controller', config=True) | |
884 | default_template= CUnicode("""#!/bin/sh |
|
884 | default_template= CUnicode("""#!/bin/sh | |
885 | #PBS -V |
|
885 | #PBS -V | |
886 |
#PBS -N ipcontroller |
|
886 | #PBS -N ipcontroller | |
887 | %s --log-to-file --cluster-dir $cluster_dir |
|
887 | %s --log-to-file --cluster-dir $cluster_dir | |
888 |
"""%(' '.join(ipcontroller |
|
888 | """%(' '.join(ipcontroller_cmd_argv))) | |
889 |
|
889 | |||
890 | def start(self, cluster_dir): |
|
890 | def start(self, cluster_dir): | |
891 | """Start the controller by profile or cluster_dir.""" |
|
891 | """Start the controller by profile or cluster_dir.""" | |
892 | self.log.info("Starting PBSControllerLauncher: %r" % self.args) |
|
892 | self.log.info("Starting PBSControllerLauncher: %r" % self.args) | |
893 | return super(PBSControllerLauncher, self).start(1, cluster_dir) |
|
893 | return super(PBSControllerLauncher, self).start(1, cluster_dir) | |
894 |
|
894 | |||
895 |
|
895 | |||
896 | class PBSEngineSetLauncher(PBSLauncher): |
|
896 | class PBSEngineSetLauncher(PBSLauncher): | |
897 | """Launch Engines using PBS""" |
|
897 | """Launch Engines using PBS""" | |
898 | batch_file_name = CUnicode(u'pbs_engines', config=True) |
|
898 | batch_file_name = CUnicode(u'pbs_engines', config=True) | |
899 | default_template= CUnicode(u"""#!/bin/sh |
|
899 | default_template= CUnicode(u"""#!/bin/sh | |
900 | #PBS -V |
|
900 | #PBS -V | |
901 |
#PBS -N ipengine |
|
901 | #PBS -N ipengine | |
902 | %s --cluster-dir $cluster_dir |
|
902 | %s --cluster-dir $cluster_dir | |
903 |
"""%(' '.join(ipengine |
|
903 | """%(' '.join(ipengine_cmd_argv))) | |
904 |
|
904 | |||
905 | def start(self, n, cluster_dir): |
|
905 | def start(self, n, cluster_dir): | |
906 | """Start n engines by profile or cluster_dir.""" |
|
906 | """Start n engines by profile or cluster_dir.""" | |
907 | self.log.info('Starting %i engines with PBSEngineSetLauncher: %r' % (n, self.args)) |
|
907 | self.log.info('Starting %i engines with PBSEngineSetLauncher: %r' % (n, self.args)) | |
908 | return super(PBSEngineSetLauncher, self).start(n, cluster_dir) |
|
908 | return super(PBSEngineSetLauncher, self).start(n, cluster_dir) | |
909 |
|
909 | |||
910 | #SGE is very similar to PBS |
|
910 | #SGE is very similar to PBS | |
911 |
|
911 | |||
912 | class SGELauncher(PBSLauncher): |
|
912 | class SGELauncher(PBSLauncher): | |
913 | """Sun GridEngine is a PBS clone with slightly different syntax""" |
|
913 | """Sun GridEngine is a PBS clone with slightly different syntax""" | |
914 | job_array_regexp = CUnicode('#$$\W+-t\W+[\w\d\-\$]+') |
|
914 | job_array_regexp = CUnicode('#$$\W+-t\W+[\w\d\-\$]+') | |
915 | job_array_template = CUnicode('#$$ -t 1-$n') |
|
915 | job_array_template = CUnicode('#$$ -t 1-$n') | |
916 | queue_regexp = CUnicode('#$$\W+-q\W+\$?\w+') |
|
916 | queue_regexp = CUnicode('#$$\W+-q\W+\$?\w+') | |
917 | queue_template = CUnicode('#$$ -q $queue') |
|
917 | queue_template = CUnicode('#$$ -q $queue') | |
918 |
|
918 | |||
919 | class SGEControllerLauncher(SGELauncher): |
|
919 | class SGEControllerLauncher(SGELauncher): | |
920 | """Launch a controller using SGE.""" |
|
920 | """Launch a controller using SGE.""" | |
921 |
|
921 | |||
922 | batch_file_name = CUnicode(u'sge_controller', config=True) |
|
922 | batch_file_name = CUnicode(u'sge_controller', config=True) | |
923 | default_template= CUnicode(u"""#$$ -V |
|
923 | default_template= CUnicode(u"""#$$ -V | |
924 | #$$ -S /bin/sh |
|
924 | #$$ -S /bin/sh | |
925 |
#$$ -N ipcontroller |
|
925 | #$$ -N ipcontroller | |
926 | %s --log-to-file --cluster-dir $cluster_dir |
|
926 | %s --log-to-file --cluster-dir $cluster_dir | |
927 |
"""%(' '.join(ipcontroller |
|
927 | """%(' '.join(ipcontroller_cmd_argv))) | |
928 |
|
928 | |||
929 | def start(self, cluster_dir): |
|
929 | def start(self, cluster_dir): | |
930 | """Start the controller by profile or cluster_dir.""" |
|
930 | """Start the controller by profile or cluster_dir.""" | |
931 | self.log.info("Starting PBSControllerLauncher: %r" % self.args) |
|
931 | self.log.info("Starting PBSControllerLauncher: %r" % self.args) | |
932 | return super(PBSControllerLauncher, self).start(1, cluster_dir) |
|
932 | return super(PBSControllerLauncher, self).start(1, cluster_dir) | |
933 |
|
933 | |||
934 | class SGEEngineSetLauncher(SGELauncher): |
|
934 | class SGEEngineSetLauncher(SGELauncher): | |
935 | """Launch Engines with SGE""" |
|
935 | """Launch Engines with SGE""" | |
936 | batch_file_name = CUnicode(u'sge_engines', config=True) |
|
936 | batch_file_name = CUnicode(u'sge_engines', config=True) | |
937 | default_template = CUnicode("""#$$ -V |
|
937 | default_template = CUnicode("""#$$ -V | |
938 | #$$ -S /bin/sh |
|
938 | #$$ -S /bin/sh | |
939 |
#$$ -N ipengine |
|
939 | #$$ -N ipengine | |
940 | %s --cluster-dir $cluster_dir |
|
940 | %s --cluster-dir $cluster_dir | |
941 |
"""%(' '.join(ipengine |
|
941 | """%(' '.join(ipengine_cmd_argv))) | |
942 |
|
942 | |||
943 | def start(self, n, cluster_dir): |
|
943 | def start(self, n, cluster_dir): | |
944 | """Start n engines by profile or cluster_dir.""" |
|
944 | """Start n engines by profile or cluster_dir.""" | |
945 | self.log.info('Starting %i engines with SGEEngineSetLauncher: %r' % (n, self.args)) |
|
945 | self.log.info('Starting %i engines with SGEEngineSetLauncher: %r' % (n, self.args)) | |
946 | return super(SGEEngineSetLauncher, self).start(n, cluster_dir) |
|
946 | return super(SGEEngineSetLauncher, self).start(n, cluster_dir) | |
947 |
|
947 | |||
948 |
|
948 | |||
949 | #----------------------------------------------------------------------------- |
|
949 | #----------------------------------------------------------------------------- | |
950 | # A launcher for ipcluster itself! |
|
950 | # A launcher for ipcluster itself! | |
951 | #----------------------------------------------------------------------------- |
|
951 | #----------------------------------------------------------------------------- | |
952 |
|
952 | |||
953 |
|
953 | |||
954 | class IPClusterLauncher(LocalProcessLauncher): |
|
954 | class IPClusterLauncher(LocalProcessLauncher): | |
955 | """Launch the ipcluster program in an external process.""" |
|
955 | """Launch the ipcluster program in an external process.""" | |
956 |
|
956 | |||
957 |
ipcluster_cmd = List(ipcluster |
|
957 | ipcluster_cmd = List(ipcluster_cmd_argv, config=True) | |
958 | # Command line arguments to pass to ipcluster. |
|
958 | # Command line arguments to pass to ipcluster. | |
959 | ipcluster_args = List( |
|
959 | ipcluster_args = List( | |
960 | ['--clean-logs', '--log-to-file', '--log-level', str(logging.INFO)], config=True) |
|
960 | ['--clean-logs', '--log-to-file', '--log-level', str(logging.INFO)], config=True) | |
961 | ipcluster_subcommand = Str('start') |
|
961 | ipcluster_subcommand = Str('start') | |
962 | ipcluster_n = Int(2) |
|
962 | ipcluster_n = Int(2) | |
963 |
|
963 | |||
964 | def find_args(self): |
|
964 | def find_args(self): | |
965 | return self.ipcluster_cmd + [self.ipcluster_subcommand] + \ |
|
965 | return self.ipcluster_cmd + [self.ipcluster_subcommand] + \ | |
966 | ['-n', repr(self.ipcluster_n)] + self.ipcluster_args |
|
966 | ['-n', repr(self.ipcluster_n)] + self.ipcluster_args | |
967 |
|
967 | |||
968 | def start(self): |
|
968 | def start(self): | |
969 | self.log.info("Starting ipcluster: %r" % self.args) |
|
969 | self.log.info("Starting ipcluster: %r" % self.args) | |
970 | return super(IPClusterLauncher, self).start() |
|
970 | return super(IPClusterLauncher, self).start() | |
971 |
|
971 |
@@ -1,98 +1,98 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 |
"""A simple logger object that consolidates messages incoming from ipcluster |
|
2 | """A simple logger object that consolidates messages incoming from ipcluster processes.""" | |
3 |
|
3 | |||
4 | #----------------------------------------------------------------------------- |
|
4 | #----------------------------------------------------------------------------- | |
5 | # Copyright (C) 2011 The IPython Development Team |
|
5 | # Copyright (C) 2011 The IPython Development Team | |
6 | # |
|
6 | # | |
7 | # Distributed under the terms of the BSD License. The full license is in |
|
7 | # Distributed under the terms of the BSD License. The full license is in | |
8 | # the file COPYING, distributed as part of this software. |
|
8 | # the file COPYING, distributed as part of this software. | |
9 | #----------------------------------------------------------------------------- |
|
9 | #----------------------------------------------------------------------------- | |
10 |
|
10 | |||
11 | #----------------------------------------------------------------------------- |
|
11 | #----------------------------------------------------------------------------- | |
12 | # Imports |
|
12 | # Imports | |
13 | #----------------------------------------------------------------------------- |
|
13 | #----------------------------------------------------------------------------- | |
14 |
|
14 | |||
15 |
|
15 | |||
16 | import logging |
|
16 | import logging | |
17 | import sys |
|
17 | import sys | |
18 |
|
18 | |||
19 | import zmq |
|
19 | import zmq | |
20 | from zmq.eventloop import ioloop, zmqstream |
|
20 | from zmq.eventloop import ioloop, zmqstream | |
21 |
|
21 | |||
22 | from IPython.utils.traitlets import Int, Str, Instance, List |
|
22 | from IPython.utils.traitlets import Int, Str, Instance, List | |
23 |
|
23 | |||
24 | from .factory import LoggingFactory |
|
24 | from .factory import LoggingFactory | |
25 |
|
25 | |||
26 | #----------------------------------------------------------------------------- |
|
26 | #----------------------------------------------------------------------------- | |
27 | # Classes |
|
27 | # Classes | |
28 | #----------------------------------------------------------------------------- |
|
28 | #----------------------------------------------------------------------------- | |
29 |
|
29 | |||
30 |
|
30 | |||
31 | class LogWatcher(LoggingFactory): |
|
31 | class LogWatcher(LoggingFactory): | |
32 | """A simple class that receives messages on a SUB socket, as published |
|
32 | """A simple class that receives messages on a SUB socket, as published | |
33 | by subclasses of `zmq.log.handlers.PUBHandler`, and logs them itself. |
|
33 | by subclasses of `zmq.log.handlers.PUBHandler`, and logs them itself. | |
34 |
|
34 | |||
35 | This can subscribe to multiple topics, but defaults to all topics. |
|
35 | This can subscribe to multiple topics, but defaults to all topics. | |
36 | """ |
|
36 | """ | |
37 | # configurables |
|
37 | # configurables | |
38 | topics = List([''], config=True) |
|
38 | topics = List([''], config=True) | |
39 | url = Str('tcp://127.0.0.1:20202', config=True) |
|
39 | url = Str('tcp://127.0.0.1:20202', config=True) | |
40 |
|
40 | |||
41 | # internals |
|
41 | # internals | |
42 | context = Instance(zmq.Context, (), {}) |
|
42 | context = Instance(zmq.Context, (), {}) | |
43 | stream = Instance('zmq.eventloop.zmqstream.ZMQStream') |
|
43 | stream = Instance('zmq.eventloop.zmqstream.ZMQStream') | |
44 | loop = Instance('zmq.eventloop.ioloop.IOLoop') |
|
44 | loop = Instance('zmq.eventloop.ioloop.IOLoop') | |
45 | def _loop_default(self): |
|
45 | def _loop_default(self): | |
46 | return ioloop.IOLoop.instance() |
|
46 | return ioloop.IOLoop.instance() | |
47 |
|
47 | |||
48 | def __init__(self, **kwargs): |
|
48 | def __init__(self, **kwargs): | |
49 | super(LogWatcher, self).__init__(**kwargs) |
|
49 | super(LogWatcher, self).__init__(**kwargs) | |
50 | s = self.context.socket(zmq.SUB) |
|
50 | s = self.context.socket(zmq.SUB) | |
51 | s.bind(self.url) |
|
51 | s.bind(self.url) | |
52 | self.stream = zmqstream.ZMQStream(s, self.loop) |
|
52 | self.stream = zmqstream.ZMQStream(s, self.loop) | |
53 | self.subscribe() |
|
53 | self.subscribe() | |
54 | self.on_trait_change(self.subscribe, 'topics') |
|
54 | self.on_trait_change(self.subscribe, 'topics') | |
55 |
|
55 | |||
56 | def start(self): |
|
56 | def start(self): | |
57 | self.stream.on_recv(self.log_message) |
|
57 | self.stream.on_recv(self.log_message) | |
58 |
|
58 | |||
59 | def stop(self): |
|
59 | def stop(self): | |
60 | self.stream.stop_on_recv() |
|
60 | self.stream.stop_on_recv() | |
61 |
|
61 | |||
62 | def subscribe(self): |
|
62 | def subscribe(self): | |
63 | """Update our SUB socket's subscriptions.""" |
|
63 | """Update our SUB socket's subscriptions.""" | |
64 | self.stream.setsockopt(zmq.UNSUBSCRIBE, '') |
|
64 | self.stream.setsockopt(zmq.UNSUBSCRIBE, '') | |
65 | for topic in self.topics: |
|
65 | for topic in self.topics: | |
66 | self.log.debug("Subscribing to: %r"%topic) |
|
66 | self.log.debug("Subscribing to: %r"%topic) | |
67 | self.stream.setsockopt(zmq.SUBSCRIBE, topic) |
|
67 | self.stream.setsockopt(zmq.SUBSCRIBE, topic) | |
68 |
|
68 | |||
69 | def _extract_level(self, topic_str): |
|
69 | def _extract_level(self, topic_str): | |
70 | """Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')""" |
|
70 | """Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')""" | |
71 | topics = topic_str.split('.') |
|
71 | topics = topic_str.split('.') | |
72 | for idx,t in enumerate(topics): |
|
72 | for idx,t in enumerate(topics): | |
73 | level = getattr(logging, t, None) |
|
73 | level = getattr(logging, t, None) | |
74 | if level is not None: |
|
74 | if level is not None: | |
75 | break |
|
75 | break | |
76 |
|
76 | |||
77 | if level is None: |
|
77 | if level is None: | |
78 | level = logging.INFO |
|
78 | level = logging.INFO | |
79 | else: |
|
79 | else: | |
80 | topics.pop(idx) |
|
80 | topics.pop(idx) | |
81 |
|
81 | |||
82 | return level, '.'.join(topics) |
|
82 | return level, '.'.join(topics) | |
83 |
|
83 | |||
84 |
|
84 | |||
85 | def log_message(self, raw): |
|
85 | def log_message(self, raw): | |
86 | """receive and parse a message, then log it.""" |
|
86 | """receive and parse a message, then log it.""" | |
87 | if len(raw) != 2 or '.' not in raw[0]: |
|
87 | if len(raw) != 2 or '.' not in raw[0]: | |
88 | self.log.error("Invalid log message: %s"%raw) |
|
88 | self.log.error("Invalid log message: %s"%raw) | |
89 | return |
|
89 | return | |
90 | else: |
|
90 | else: | |
91 | topic, msg = raw |
|
91 | topic, msg = raw | |
92 | # don't newline, since log messages always newline: |
|
92 | # don't newline, since log messages always newline: | |
93 | topic,level_name = topic.rsplit('.',1) |
|
93 | topic,level_name = topic.rsplit('.',1) | |
94 | level,topic = self._extract_level(topic) |
|
94 | level,topic = self._extract_level(topic) | |
95 | if msg[-1] == '\n': |
|
95 | if msg[-1] == '\n': | |
96 | msg = msg[:-1] |
|
96 | msg = msg[:-1] | |
97 | logging.log(level, "[%s] %s" % (topic, msg)) |
|
97 | logging.log(level, "[%s] %s" % (topic, msg)) | |
98 |
|
98 |
1 | NO CONTENT: file renamed from IPython/parallel/scripts/ipclusterz to IPython/parallel/scripts/ipcluster |
|
NO CONTENT: file renamed from IPython/parallel/scripts/ipclusterz to IPython/parallel/scripts/ipcluster |
1 | NO CONTENT: file renamed from IPython/parallel/scripts/ipcontrollerz to IPython/parallel/scripts/ipcontroller |
|
NO CONTENT: file renamed from IPython/parallel/scripts/ipcontrollerz to IPython/parallel/scripts/ipcontroller |
1 | NO CONTENT: file renamed from IPython/parallel/scripts/ipenginez to IPython/parallel/scripts/ipengine |
|
NO CONTENT: file renamed from IPython/parallel/scripts/ipenginez to IPython/parallel/scripts/ipengine |
1 | NO CONTENT: file renamed from IPython/parallel/scripts/iploggerz to IPython/parallel/scripts/iplogger |
|
NO CONTENT: file renamed from IPython/parallel/scripts/iploggerz to IPython/parallel/scripts/iplogger |
@@ -1,69 +1,69 b'' | |||||
1 | """toplevel setup/teardown for parallel tests.""" |
|
1 | """toplevel setup/teardown for parallel tests.""" | |
2 |
|
2 | |||
3 | #------------------------------------------------------------------------------- |
|
3 | #------------------------------------------------------------------------------- | |
4 | # Copyright (C) 2011 The IPython Development Team |
|
4 | # Copyright (C) 2011 The IPython Development Team | |
5 | # |
|
5 | # | |
6 | # Distributed under the terms of the BSD License. The full license is in |
|
6 | # Distributed under the terms of the BSD License. The full license is in | |
7 | # the file COPYING, distributed as part of this software. |
|
7 | # the file COPYING, distributed as part of this software. | |
8 | #------------------------------------------------------------------------------- |
|
8 | #------------------------------------------------------------------------------- | |
9 |
|
9 | |||
10 | #------------------------------------------------------------------------------- |
|
10 | #------------------------------------------------------------------------------- | |
11 | # Imports |
|
11 | # Imports | |
12 | #------------------------------------------------------------------------------- |
|
12 | #------------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | import tempfile |
|
14 | import tempfile | |
15 | import time |
|
15 | import time | |
16 | from subprocess import Popen, PIPE, STDOUT |
|
16 | from subprocess import Popen, PIPE, STDOUT | |
17 |
|
17 | |||
18 | from IPython.parallel import client |
|
18 | from IPython.parallel import client | |
19 |
|
19 | |||
20 | processes = [] |
|
20 | processes = [] | |
21 | blackhole = tempfile.TemporaryFile() |
|
21 | blackhole = tempfile.TemporaryFile() | |
22 |
|
22 | |||
23 | # nose setup/teardown |
|
23 | # nose setup/teardown | |
24 |
|
24 | |||
25 | def setup(): |
|
25 | def setup(): | |
26 |
cp = Popen('ipcontroller |
|
26 | cp = Popen('ipcontroller --profile iptest -r --log-level 10 --log-to-file'.split(), stdout=blackhole, stderr=STDOUT) | |
27 | processes.append(cp) |
|
27 | processes.append(cp) | |
28 | time.sleep(.5) |
|
28 | time.sleep(.5) | |
29 | add_engines(1) |
|
29 | add_engines(1) | |
30 | c = client.Client(profile='iptest') |
|
30 | c = client.Client(profile='iptest') | |
31 | while not c.ids: |
|
31 | while not c.ids: | |
32 | time.sleep(.1) |
|
32 | time.sleep(.1) | |
33 | c.spin() |
|
33 | c.spin() | |
34 | c.close() |
|
34 | c.close() | |
35 |
|
35 | |||
36 | def add_engines(n=1, profile='iptest'): |
|
36 | def add_engines(n=1, profile='iptest'): | |
37 | rc = client.Client(profile=profile) |
|
37 | rc = client.Client(profile=profile) | |
38 | base = len(rc) |
|
38 | base = len(rc) | |
39 | eps = [] |
|
39 | eps = [] | |
40 | for i in range(n): |
|
40 | for i in range(n): | |
41 |
ep = Popen(['ipengine |
|
41 | ep = Popen(['ipengine']+ ['--profile', profile, '--log-level', '10', '--log-to-file'], stdout=blackhole, stderr=STDOUT) | |
42 | # ep.start() |
|
42 | # ep.start() | |
43 | processes.append(ep) |
|
43 | processes.append(ep) | |
44 | eps.append(ep) |
|
44 | eps.append(ep) | |
45 | while len(rc) < base+n: |
|
45 | while len(rc) < base+n: | |
46 | time.sleep(.1) |
|
46 | time.sleep(.1) | |
47 | rc.spin() |
|
47 | rc.spin() | |
48 | rc.close() |
|
48 | rc.close() | |
49 | return eps |
|
49 | return eps | |
50 |
|
50 | |||
51 | def teardown(): |
|
51 | def teardown(): | |
52 | time.sleep(1) |
|
52 | time.sleep(1) | |
53 | while processes: |
|
53 | while processes: | |
54 | p = processes.pop() |
|
54 | p = processes.pop() | |
55 | if p.poll() is None: |
|
55 | if p.poll() is None: | |
56 | try: |
|
56 | try: | |
57 | p.terminate() |
|
57 | p.terminate() | |
58 | except Exception, e: |
|
58 | except Exception, e: | |
59 | print e |
|
59 | print e | |
60 | pass |
|
60 | pass | |
61 | if p.poll() is None: |
|
61 | if p.poll() is None: | |
62 | time.sleep(.25) |
|
62 | time.sleep(.25) | |
63 | if p.poll() is None: |
|
63 | if p.poll() is None: | |
64 | try: |
|
64 | try: | |
65 | print 'killing' |
|
65 | print 'killing' | |
66 | p.kill() |
|
66 | p.kill() | |
67 | except: |
|
67 | except: | |
68 | print "couldn't shutdown process: ", p |
|
68 | print "couldn't shutdown process: ", p | |
69 |
|
69 |
@@ -1,108 +1,108 b'' | |||||
1 | """test serialization with newserialized""" |
|
1 | """test serialization with newserialized""" | |
2 |
|
2 | |||
3 | #------------------------------------------------------------------------------- |
|
3 | #------------------------------------------------------------------------------- | |
4 | # Copyright (C) 2011 The IPython Development Team |
|
4 | # Copyright (C) 2011 The IPython Development Team | |
5 | # |
|
5 | # | |
6 | # Distributed under the terms of the BSD License. The full license is in |
|
6 | # Distributed under the terms of the BSD License. The full license is in | |
7 | # the file COPYING, distributed as part of this software. |
|
7 | # the file COPYING, distributed as part of this software. | |
8 | #------------------------------------------------------------------------------- |
|
8 | #------------------------------------------------------------------------------- | |
9 |
|
9 | |||
10 | #------------------------------------------------------------------------------- |
|
10 | #------------------------------------------------------------------------------- | |
11 | # Imports |
|
11 | # Imports | |
12 | #------------------------------------------------------------------------------- |
|
12 | #------------------------------------------------------------------------------- | |
13 |
|
13 | |||
14 | from unittest import TestCase |
|
14 | from unittest import TestCase | |
15 |
|
15 | |||
16 |
from IPython.testing. |
|
16 | from IPython.testing.decorators import parametric | |
17 | from IPython.utils import newserialized as ns |
|
17 | from IPython.utils import newserialized as ns | |
18 | from IPython.utils.pickleutil import can, uncan, CannedObject, CannedFunction |
|
18 | from IPython.utils.pickleutil import can, uncan, CannedObject, CannedFunction | |
19 | from IPython.parallel.tests.clienttest import skip_without |
|
19 | from IPython.parallel.tests.clienttest import skip_without | |
20 |
|
20 | |||
21 |
|
21 | |||
22 | class CanningTestCase(TestCase): |
|
22 | class CanningTestCase(TestCase): | |
23 | def test_canning(self): |
|
23 | def test_canning(self): | |
24 | d = dict(a=5,b=6) |
|
24 | d = dict(a=5,b=6) | |
25 | cd = can(d) |
|
25 | cd = can(d) | |
26 | self.assertTrue(isinstance(cd, dict)) |
|
26 | self.assertTrue(isinstance(cd, dict)) | |
27 |
|
27 | |||
28 | def test_canned_function(self): |
|
28 | def test_canned_function(self): | |
29 | f = lambda : 7 |
|
29 | f = lambda : 7 | |
30 | cf = can(f) |
|
30 | cf = can(f) | |
31 | self.assertTrue(isinstance(cf, CannedFunction)) |
|
31 | self.assertTrue(isinstance(cf, CannedFunction)) | |
32 |
|
32 | |||
33 | @parametric |
|
33 | @parametric | |
34 | def test_can_roundtrip(cls): |
|
34 | def test_can_roundtrip(cls): | |
35 | objs = [ |
|
35 | objs = [ | |
36 | dict(), |
|
36 | dict(), | |
37 | set(), |
|
37 | set(), | |
38 | list(), |
|
38 | list(), | |
39 | ['a',1,['a',1],u'e'], |
|
39 | ['a',1,['a',1],u'e'], | |
40 | ] |
|
40 | ] | |
41 | return map(cls.run_roundtrip, objs) |
|
41 | return map(cls.run_roundtrip, objs) | |
42 |
|
42 | |||
43 | @classmethod |
|
43 | @classmethod | |
44 | def run_roundtrip(self, obj): |
|
44 | def run_roundtrip(self, obj): | |
45 | o = uncan(can(obj)) |
|
45 | o = uncan(can(obj)) | |
46 | assert o == obj, "failed assertion: %r == %r"%(o,obj) |
|
46 | assert o == obj, "failed assertion: %r == %r"%(o,obj) | |
47 |
|
47 | |||
48 | def test_serialized_interfaces(self): |
|
48 | def test_serialized_interfaces(self): | |
49 |
|
49 | |||
50 | us = {'a':10, 'b':range(10)} |
|
50 | us = {'a':10, 'b':range(10)} | |
51 | s = ns.serialize(us) |
|
51 | s = ns.serialize(us) | |
52 | uus = ns.unserialize(s) |
|
52 | uus = ns.unserialize(s) | |
53 | self.assertTrue(isinstance(s, ns.SerializeIt)) |
|
53 | self.assertTrue(isinstance(s, ns.SerializeIt)) | |
54 | self.assertEquals(uus, us) |
|
54 | self.assertEquals(uus, us) | |
55 |
|
55 | |||
56 | def test_pickle_serialized(self): |
|
56 | def test_pickle_serialized(self): | |
57 | obj = {'a':1.45345, 'b':'asdfsdf', 'c':10000L} |
|
57 | obj = {'a':1.45345, 'b':'asdfsdf', 'c':10000L} | |
58 | original = ns.UnSerialized(obj) |
|
58 | original = ns.UnSerialized(obj) | |
59 | originalSer = ns.SerializeIt(original) |
|
59 | originalSer = ns.SerializeIt(original) | |
60 | firstData = originalSer.getData() |
|
60 | firstData = originalSer.getData() | |
61 | firstTD = originalSer.getTypeDescriptor() |
|
61 | firstTD = originalSer.getTypeDescriptor() | |
62 | firstMD = originalSer.getMetadata() |
|
62 | firstMD = originalSer.getMetadata() | |
63 | self.assertEquals(firstTD, 'pickle') |
|
63 | self.assertEquals(firstTD, 'pickle') | |
64 | self.assertEquals(firstMD, {}) |
|
64 | self.assertEquals(firstMD, {}) | |
65 | unSerialized = ns.UnSerializeIt(originalSer) |
|
65 | unSerialized = ns.UnSerializeIt(originalSer) | |
66 | secondObj = unSerialized.getObject() |
|
66 | secondObj = unSerialized.getObject() | |
67 | for k, v in secondObj.iteritems(): |
|
67 | for k, v in secondObj.iteritems(): | |
68 | self.assertEquals(obj[k], v) |
|
68 | self.assertEquals(obj[k], v) | |
69 | secondSer = ns.SerializeIt(ns.UnSerialized(secondObj)) |
|
69 | secondSer = ns.SerializeIt(ns.UnSerialized(secondObj)) | |
70 | self.assertEquals(firstData, secondSer.getData()) |
|
70 | self.assertEquals(firstData, secondSer.getData()) | |
71 | self.assertEquals(firstTD, secondSer.getTypeDescriptor() ) |
|
71 | self.assertEquals(firstTD, secondSer.getTypeDescriptor() ) | |
72 | self.assertEquals(firstMD, secondSer.getMetadata()) |
|
72 | self.assertEquals(firstMD, secondSer.getMetadata()) | |
73 |
|
73 | |||
74 | @skip_without('numpy') |
|
74 | @skip_without('numpy') | |
75 | def test_ndarray_serialized(self): |
|
75 | def test_ndarray_serialized(self): | |
76 | import numpy |
|
76 | import numpy | |
77 | a = numpy.linspace(0.0, 1.0, 1000) |
|
77 | a = numpy.linspace(0.0, 1.0, 1000) | |
78 | unSer1 = ns.UnSerialized(a) |
|
78 | unSer1 = ns.UnSerialized(a) | |
79 | ser1 = ns.SerializeIt(unSer1) |
|
79 | ser1 = ns.SerializeIt(unSer1) | |
80 | td = ser1.getTypeDescriptor() |
|
80 | td = ser1.getTypeDescriptor() | |
81 | self.assertEquals(td, 'ndarray') |
|
81 | self.assertEquals(td, 'ndarray') | |
82 | md = ser1.getMetadata() |
|
82 | md = ser1.getMetadata() | |
83 | self.assertEquals(md['shape'], a.shape) |
|
83 | self.assertEquals(md['shape'], a.shape) | |
84 | self.assertEquals(md['dtype'], a.dtype.str) |
|
84 | self.assertEquals(md['dtype'], a.dtype.str) | |
85 | buff = ser1.getData() |
|
85 | buff = ser1.getData() | |
86 | self.assertEquals(buff, numpy.getbuffer(a)) |
|
86 | self.assertEquals(buff, numpy.getbuffer(a)) | |
87 | s = ns.Serialized(buff, td, md) |
|
87 | s = ns.Serialized(buff, td, md) | |
88 | final = ns.unserialize(s) |
|
88 | final = ns.unserialize(s) | |
89 | self.assertEquals(numpy.getbuffer(a), numpy.getbuffer(final)) |
|
89 | self.assertEquals(numpy.getbuffer(a), numpy.getbuffer(final)) | |
90 | self.assertTrue((a==final).all()) |
|
90 | self.assertTrue((a==final).all()) | |
91 | self.assertEquals(a.dtype.str, final.dtype.str) |
|
91 | self.assertEquals(a.dtype.str, final.dtype.str) | |
92 | self.assertEquals(a.shape, final.shape) |
|
92 | self.assertEquals(a.shape, final.shape) | |
93 | # test non-copying: |
|
93 | # test non-copying: | |
94 | a[2] = 1e9 |
|
94 | a[2] = 1e9 | |
95 | self.assertTrue((a==final).all()) |
|
95 | self.assertTrue((a==final).all()) | |
96 |
|
96 | |||
97 | def test_uncan_function_globals(self): |
|
97 | def test_uncan_function_globals(self): | |
98 | """test that uncanning a module function restores it into its module""" |
|
98 | """test that uncanning a module function restores it into its module""" | |
99 | from re import search |
|
99 | from re import search | |
100 | cf = can(search) |
|
100 | cf = can(search) | |
101 | csearch = uncan(cf) |
|
101 | csearch = uncan(cf) | |
102 | self.assertEqual(csearch.__module__, search.__module__) |
|
102 | self.assertEqual(csearch.__module__, search.__module__) | |
103 | self.assertNotEqual(csearch('asd', 'asdf'), None) |
|
103 | self.assertNotEqual(csearch('asd', 'asdf'), None) | |
104 | csearch = uncan(cf, dict(a=5)) |
|
104 | csearch = uncan(cf, dict(a=5)) | |
105 | self.assertEqual(csearch.__module__, search.__module__) |
|
105 | self.assertEqual(csearch.__module__, search.__module__) | |
106 | self.assertNotEqual(csearch('asd', 'asdf'), None) |
|
106 | self.assertNotEqual(csearch('asd', 'asdf'), None) | |
107 |
|
107 | |||
108 | No newline at end of file |
|
108 |
@@ -1,419 +1,420 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 | """IPython Test Suite Runner. |
|
2 | """IPython Test Suite Runner. | |
3 |
|
3 | |||
4 | This module provides a main entry point to a user script to test IPython |
|
4 | This module provides a main entry point to a user script to test IPython | |
5 | itself from the command line. There are two ways of running this script: |
|
5 | itself from the command line. There are two ways of running this script: | |
6 |
|
6 | |||
7 | 1. With the syntax `iptest all`. This runs our entire test suite by |
|
7 | 1. With the syntax `iptest all`. This runs our entire test suite by | |
8 | calling this script (with different arguments) recursively. This |
|
8 | calling this script (with different arguments) recursively. This | |
9 | causes modules and package to be tested in different processes, using nose |
|
9 | causes modules and package to be tested in different processes, using nose | |
10 | or trial where appropriate. |
|
10 | or trial where appropriate. | |
11 | 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form |
|
11 | 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form | |
12 | the script simply calls nose, but with special command line flags and |
|
12 | the script simply calls nose, but with special command line flags and | |
13 | plugins loaded. |
|
13 | plugins loaded. | |
14 |
|
14 | |||
15 | """ |
|
15 | """ | |
16 |
|
16 | |||
17 | #----------------------------------------------------------------------------- |
|
17 | #----------------------------------------------------------------------------- | |
18 | # Copyright (C) 2009 The IPython Development Team |
|
18 | # Copyright (C) 2009 The IPython Development Team | |
19 | # |
|
19 | # | |
20 | # Distributed under the terms of the BSD License. The full license is in |
|
20 | # Distributed under the terms of the BSD License. The full license is in | |
21 | # the file COPYING, distributed as part of this software. |
|
21 | # the file COPYING, distributed as part of this software. | |
22 | #----------------------------------------------------------------------------- |
|
22 | #----------------------------------------------------------------------------- | |
23 |
|
23 | |||
24 | #----------------------------------------------------------------------------- |
|
24 | #----------------------------------------------------------------------------- | |
25 | # Imports |
|
25 | # Imports | |
26 | #----------------------------------------------------------------------------- |
|
26 | #----------------------------------------------------------------------------- | |
27 |
|
27 | |||
28 | # Stdlib |
|
28 | # Stdlib | |
29 | import os |
|
29 | import os | |
30 | import os.path as path |
|
30 | import os.path as path | |
31 | import signal |
|
31 | import signal | |
32 | import sys |
|
32 | import sys | |
33 | import subprocess |
|
33 | import subprocess | |
34 | import tempfile |
|
34 | import tempfile | |
35 | import time |
|
35 | import time | |
36 | import warnings |
|
36 | import warnings | |
37 |
|
37 | |||
38 | # Note: monkeypatch! |
|
38 | # Note: monkeypatch! | |
39 | # We need to monkeypatch a small problem in nose itself first, before importing |
|
39 | # We need to monkeypatch a small problem in nose itself first, before importing | |
40 | # it for actual use. This should get into nose upstream, but its release cycle |
|
40 | # it for actual use. This should get into nose upstream, but its release cycle | |
41 | # is slow and we need it for our parametric tests to work correctly. |
|
41 | # is slow and we need it for our parametric tests to work correctly. | |
42 | from IPython.testing import nosepatch |
|
42 | from IPython.testing import nosepatch | |
43 | # Now, proceed to import nose itself |
|
43 | # Now, proceed to import nose itself | |
44 | import nose.plugins.builtin |
|
44 | import nose.plugins.builtin | |
45 | from nose.core import TestProgram |
|
45 | from nose.core import TestProgram | |
46 |
|
46 | |||
47 | # Our own imports |
|
47 | # Our own imports | |
48 | from IPython.utils.path import get_ipython_module_path |
|
48 | from IPython.utils.path import get_ipython_module_path | |
49 | from IPython.utils.process import find_cmd, pycmd2argv |
|
49 | from IPython.utils.process import find_cmd, pycmd2argv | |
50 | from IPython.utils.sysinfo import sys_info |
|
50 | from IPython.utils.sysinfo import sys_info | |
51 |
|
51 | |||
52 | from IPython.testing import globalipapp |
|
52 | from IPython.testing import globalipapp | |
53 | from IPython.testing.plugin.ipdoctest import IPythonDoctest |
|
53 | from IPython.testing.plugin.ipdoctest import IPythonDoctest | |
54 | from IPython.external.decorators import KnownFailure |
|
54 | from IPython.external.decorators import KnownFailure | |
55 |
|
55 | |||
56 | pjoin = path.join |
|
56 | pjoin = path.join | |
57 |
|
57 | |||
58 |
|
58 | |||
59 | #----------------------------------------------------------------------------- |
|
59 | #----------------------------------------------------------------------------- | |
60 | # Globals |
|
60 | # Globals | |
61 | #----------------------------------------------------------------------------- |
|
61 | #----------------------------------------------------------------------------- | |
62 |
|
62 | |||
63 |
|
63 | |||
64 | #----------------------------------------------------------------------------- |
|
64 | #----------------------------------------------------------------------------- | |
65 | # Warnings control |
|
65 | # Warnings control | |
66 | #----------------------------------------------------------------------------- |
|
66 | #----------------------------------------------------------------------------- | |
67 |
|
67 | |||
68 | # Twisted generates annoying warnings with Python 2.6, as will do other code |
|
68 | # Twisted generates annoying warnings with Python 2.6, as will do other code | |
69 | # that imports 'sets' as of today |
|
69 | # that imports 'sets' as of today | |
70 | warnings.filterwarnings('ignore', 'the sets module is deprecated', |
|
70 | warnings.filterwarnings('ignore', 'the sets module is deprecated', | |
71 | DeprecationWarning ) |
|
71 | DeprecationWarning ) | |
72 |
|
72 | |||
73 | # This one also comes from Twisted |
|
73 | # This one also comes from Twisted | |
74 | warnings.filterwarnings('ignore', 'the sha module is deprecated', |
|
74 | warnings.filterwarnings('ignore', 'the sha module is deprecated', | |
75 | DeprecationWarning) |
|
75 | DeprecationWarning) | |
76 |
|
76 | |||
77 | # Wx on Fedora11 spits these out |
|
77 | # Wx on Fedora11 spits these out | |
78 | warnings.filterwarnings('ignore', 'wxPython/wxWidgets release number mismatch', |
|
78 | warnings.filterwarnings('ignore', 'wxPython/wxWidgets release number mismatch', | |
79 | UserWarning) |
|
79 | UserWarning) | |
80 |
|
80 | |||
81 | #----------------------------------------------------------------------------- |
|
81 | #----------------------------------------------------------------------------- | |
82 | # Logic for skipping doctests |
|
82 | # Logic for skipping doctests | |
83 | #----------------------------------------------------------------------------- |
|
83 | #----------------------------------------------------------------------------- | |
84 |
|
84 | |||
85 | def test_for(mod, min_version=None): |
|
85 | def test_for(mod, min_version=None): | |
86 | """Test to see if mod is importable.""" |
|
86 | """Test to see if mod is importable.""" | |
87 | try: |
|
87 | try: | |
88 | __import__(mod) |
|
88 | __import__(mod) | |
89 | except (ImportError, RuntimeError): |
|
89 | except (ImportError, RuntimeError): | |
90 | # GTK reports Runtime error if it can't be initialized even if it's |
|
90 | # GTK reports Runtime error if it can't be initialized even if it's | |
91 | # importable. |
|
91 | # importable. | |
92 | return False |
|
92 | return False | |
93 | else: |
|
93 | else: | |
94 | if min_version: |
|
94 | if min_version: | |
95 | return sys.modules[mod].__version__ >= min_version |
|
95 | return sys.modules[mod].__version__ >= min_version | |
96 | else: |
|
96 | else: | |
97 | return True |
|
97 | return True | |
98 |
|
98 | |||
99 | # Global dict where we can store information on what we have and what we don't |
|
99 | # Global dict where we can store information on what we have and what we don't | |
100 | # have available at test run time |
|
100 | # have available at test run time | |
101 | have = {} |
|
101 | have = {} | |
102 |
|
102 | |||
103 | have['curses'] = test_for('_curses') |
|
103 | have['curses'] = test_for('_curses') | |
104 | have['wx'] = test_for('wx') |
|
104 | have['wx'] = test_for('wx') | |
105 | have['wx.aui'] = test_for('wx.aui') |
|
105 | have['wx.aui'] = test_for('wx.aui') | |
106 | have['pexpect'] = test_for('pexpect') |
|
106 | have['pexpect'] = test_for('pexpect') | |
107 | have['zmq'] = test_for('zmq', '2.0.10') |
|
107 | have['zmq'] = test_for('zmq', '2.0.10') | |
108 |
|
108 | |||
109 | #----------------------------------------------------------------------------- |
|
109 | #----------------------------------------------------------------------------- | |
110 | # Functions and classes |
|
110 | # Functions and classes | |
111 | #----------------------------------------------------------------------------- |
|
111 | #----------------------------------------------------------------------------- | |
112 |
|
112 | |||
113 | def report(): |
|
113 | def report(): | |
114 | """Return a string with a summary report of test-related variables.""" |
|
114 | """Return a string with a summary report of test-related variables.""" | |
115 |
|
115 | |||
116 | out = [ sys_info(), '\n'] |
|
116 | out = [ sys_info(), '\n'] | |
117 |
|
117 | |||
118 | avail = [] |
|
118 | avail = [] | |
119 | not_avail = [] |
|
119 | not_avail = [] | |
120 |
|
120 | |||
121 | for k, is_avail in have.items(): |
|
121 | for k, is_avail in have.items(): | |
122 | if is_avail: |
|
122 | if is_avail: | |
123 | avail.append(k) |
|
123 | avail.append(k) | |
124 | else: |
|
124 | else: | |
125 | not_avail.append(k) |
|
125 | not_avail.append(k) | |
126 |
|
126 | |||
127 | if avail: |
|
127 | if avail: | |
128 | out.append('\nTools and libraries available at test time:\n') |
|
128 | out.append('\nTools and libraries available at test time:\n') | |
129 | avail.sort() |
|
129 | avail.sort() | |
130 | out.append(' ' + ' '.join(avail)+'\n') |
|
130 | out.append(' ' + ' '.join(avail)+'\n') | |
131 |
|
131 | |||
132 | if not_avail: |
|
132 | if not_avail: | |
133 | out.append('\nTools and libraries NOT available at test time:\n') |
|
133 | out.append('\nTools and libraries NOT available at test time:\n') | |
134 | not_avail.sort() |
|
134 | not_avail.sort() | |
135 | out.append(' ' + ' '.join(not_avail)+'\n') |
|
135 | out.append(' ' + ' '.join(not_avail)+'\n') | |
136 |
|
136 | |||
137 | return ''.join(out) |
|
137 | return ''.join(out) | |
138 |
|
138 | |||
139 |
|
139 | |||
140 | def make_exclude(): |
|
140 | def make_exclude(): | |
141 | """Make patterns of modules and packages to exclude from testing. |
|
141 | """Make patterns of modules and packages to exclude from testing. | |
142 |
|
142 | |||
143 | For the IPythonDoctest plugin, we need to exclude certain patterns that |
|
143 | For the IPythonDoctest plugin, we need to exclude certain patterns that | |
144 | cause testing problems. We should strive to minimize the number of |
|
144 | cause testing problems. We should strive to minimize the number of | |
145 | skipped modules, since this means untested code. |
|
145 | skipped modules, since this means untested code. | |
146 |
|
146 | |||
147 | These modules and packages will NOT get scanned by nose at all for tests. |
|
147 | These modules and packages will NOT get scanned by nose at all for tests. | |
148 | """ |
|
148 | """ | |
149 | # Simple utility to make IPython paths more readably, we need a lot of |
|
149 | # Simple utility to make IPython paths more readably, we need a lot of | |
150 | # these below |
|
150 | # these below | |
151 | ipjoin = lambda *paths: pjoin('IPython', *paths) |
|
151 | ipjoin = lambda *paths: pjoin('IPython', *paths) | |
152 |
|
152 | |||
153 | exclusions = [ipjoin('external'), |
|
153 | exclusions = [ipjoin('external'), | |
154 | pjoin('IPython_doctest_plugin'), |
|
154 | pjoin('IPython_doctest_plugin'), | |
155 | ipjoin('quarantine'), |
|
155 | ipjoin('quarantine'), | |
156 | ipjoin('deathrow'), |
|
156 | ipjoin('deathrow'), | |
157 | ipjoin('testing', 'attic'), |
|
157 | ipjoin('testing', 'attic'), | |
158 | # This guy is probably attic material |
|
158 | # This guy is probably attic material | |
159 | ipjoin('testing', 'mkdoctests'), |
|
159 | ipjoin('testing', 'mkdoctests'), | |
160 | # Testing inputhook will need a lot of thought, to figure out |
|
160 | # Testing inputhook will need a lot of thought, to figure out | |
161 | # how to have tests that don't lock up with the gui event |
|
161 | # how to have tests that don't lock up with the gui event | |
162 | # loops in the picture |
|
162 | # loops in the picture | |
163 | ipjoin('lib', 'inputhook'), |
|
163 | ipjoin('lib', 'inputhook'), | |
164 | # Config files aren't really importable stand-alone |
|
164 | # Config files aren't really importable stand-alone | |
165 | ipjoin('config', 'default'), |
|
165 | ipjoin('config', 'default'), | |
166 | ipjoin('config', 'profile'), |
|
166 | ipjoin('config', 'profile'), | |
167 | ] |
|
167 | ] | |
168 |
|
168 | |||
169 | if not have['wx']: |
|
169 | if not have['wx']: | |
170 | exclusions.append(ipjoin('lib', 'inputhookwx')) |
|
170 | exclusions.append(ipjoin('lib', 'inputhookwx')) | |
171 |
|
171 | |||
172 | # We do this unconditionally, so that the test suite doesn't import |
|
172 | # We do this unconditionally, so that the test suite doesn't import | |
173 | # gtk, changing the default encoding and masking some unicode bugs. |
|
173 | # gtk, changing the default encoding and masking some unicode bugs. | |
174 | exclusions.append(ipjoin('lib', 'inputhookgtk')) |
|
174 | exclusions.append(ipjoin('lib', 'inputhookgtk')) | |
175 |
|
175 | |||
176 | # These have to be skipped on win32 because the use echo, rm, cd, etc. |
|
176 | # These have to be skipped on win32 because the use echo, rm, cd, etc. | |
177 | # See ticket https://bugs.launchpad.net/bugs/366982 |
|
177 | # See ticket https://bugs.launchpad.net/bugs/366982 | |
178 | if sys.platform == 'win32': |
|
178 | if sys.platform == 'win32': | |
179 | exclusions.append(ipjoin('testing', 'plugin', 'test_exampleip')) |
|
179 | exclusions.append(ipjoin('testing', 'plugin', 'test_exampleip')) | |
180 | exclusions.append(ipjoin('testing', 'plugin', 'dtexample')) |
|
180 | exclusions.append(ipjoin('testing', 'plugin', 'dtexample')) | |
181 |
|
181 | |||
182 | if not have['pexpect']: |
|
182 | if not have['pexpect']: | |
183 | exclusions.extend([ipjoin('scripts', 'irunner'), |
|
183 | exclusions.extend([ipjoin('scripts', 'irunner'), | |
184 | ipjoin('lib', 'irunner')]) |
|
184 | ipjoin('lib', 'irunner')]) | |
185 |
|
185 | |||
186 | if not have['zmq']: |
|
186 | if not have['zmq']: | |
187 | exclusions.append(ipjoin('zmq')) |
|
187 | exclusions.append(ipjoin('zmq')) | |
|
188 | exclusions.append(ipjoin('parallel')) | |||
188 |
|
189 | |||
189 | # This is needed for the reg-exp to match on win32 in the ipdoctest plugin. |
|
190 | # This is needed for the reg-exp to match on win32 in the ipdoctest plugin. | |
190 | if sys.platform == 'win32': |
|
191 | if sys.platform == 'win32': | |
191 | exclusions = [s.replace('\\','\\\\') for s in exclusions] |
|
192 | exclusions = [s.replace('\\','\\\\') for s in exclusions] | |
192 |
|
193 | |||
193 | return exclusions |
|
194 | return exclusions | |
194 |
|
195 | |||
195 |
|
196 | |||
196 | class IPTester(object): |
|
197 | class IPTester(object): | |
197 | """Call that calls iptest or trial in a subprocess. |
|
198 | """Call that calls iptest or trial in a subprocess. | |
198 | """ |
|
199 | """ | |
199 | #: string, name of test runner that will be called |
|
200 | #: string, name of test runner that will be called | |
200 | runner = None |
|
201 | runner = None | |
201 | #: list, parameters for test runner |
|
202 | #: list, parameters for test runner | |
202 | params = None |
|
203 | params = None | |
203 | #: list, arguments of system call to be made to call test runner |
|
204 | #: list, arguments of system call to be made to call test runner | |
204 | call_args = None |
|
205 | call_args = None | |
205 | #: list, process ids of subprocesses we start (for cleanup) |
|
206 | #: list, process ids of subprocesses we start (for cleanup) | |
206 | pids = None |
|
207 | pids = None | |
207 |
|
208 | |||
208 | def __init__(self, runner='iptest', params=None): |
|
209 | def __init__(self, runner='iptest', params=None): | |
209 | """Create new test runner.""" |
|
210 | """Create new test runner.""" | |
210 | p = os.path |
|
211 | p = os.path | |
211 | if runner == 'iptest': |
|
212 | if runner == 'iptest': | |
212 | iptest_app = get_ipython_module_path('IPython.testing.iptest') |
|
213 | iptest_app = get_ipython_module_path('IPython.testing.iptest') | |
213 | self.runner = pycmd2argv(iptest_app) + sys.argv[1:] |
|
214 | self.runner = pycmd2argv(iptest_app) + sys.argv[1:] | |
214 | else: |
|
215 | else: | |
215 | raise Exception('Not a valid test runner: %s' % repr(runner)) |
|
216 | raise Exception('Not a valid test runner: %s' % repr(runner)) | |
216 | if params is None: |
|
217 | if params is None: | |
217 | params = [] |
|
218 | params = [] | |
218 | if isinstance(params, str): |
|
219 | if isinstance(params, str): | |
219 | params = [params] |
|
220 | params = [params] | |
220 | self.params = params |
|
221 | self.params = params | |
221 |
|
222 | |||
222 | # Assemble call |
|
223 | # Assemble call | |
223 | self.call_args = self.runner+self.params |
|
224 | self.call_args = self.runner+self.params | |
224 |
|
225 | |||
225 | # Store pids of anything we start to clean up on deletion, if possible |
|
226 | # Store pids of anything we start to clean up on deletion, if possible | |
226 | # (on posix only, since win32 has no os.kill) |
|
227 | # (on posix only, since win32 has no os.kill) | |
227 | self.pids = [] |
|
228 | self.pids = [] | |
228 |
|
229 | |||
229 | if sys.platform == 'win32': |
|
230 | if sys.platform == 'win32': | |
230 | def _run_cmd(self): |
|
231 | def _run_cmd(self): | |
231 | # On Windows, use os.system instead of subprocess.call, because I |
|
232 | # On Windows, use os.system instead of subprocess.call, because I | |
232 | # was having problems with subprocess and I just don't know enough |
|
233 | # was having problems with subprocess and I just don't know enough | |
233 | # about win32 to debug this reliably. Os.system may be the 'old |
|
234 | # about win32 to debug this reliably. Os.system may be the 'old | |
234 | # fashioned' way to do it, but it works just fine. If someone |
|
235 | # fashioned' way to do it, but it works just fine. If someone | |
235 | # later can clean this up that's fine, as long as the tests run |
|
236 | # later can clean this up that's fine, as long as the tests run | |
236 | # reliably in win32. |
|
237 | # reliably in win32. | |
237 | # What types of problems are you having. They may be related to |
|
238 | # What types of problems are you having. They may be related to | |
238 | # running Python in unboffered mode. BG. |
|
239 | # running Python in unboffered mode. BG. | |
239 | return os.system(' '.join(self.call_args)) |
|
240 | return os.system(' '.join(self.call_args)) | |
240 | else: |
|
241 | else: | |
241 | def _run_cmd(self): |
|
242 | def _run_cmd(self): | |
242 | # print >> sys.stderr, '*** CMD:', ' '.join(self.call_args) # dbg |
|
243 | # print >> sys.stderr, '*** CMD:', ' '.join(self.call_args) # dbg | |
243 | subp = subprocess.Popen(self.call_args) |
|
244 | subp = subprocess.Popen(self.call_args) | |
244 | self.pids.append(subp.pid) |
|
245 | self.pids.append(subp.pid) | |
245 | # If this fails, the pid will be left in self.pids and cleaned up |
|
246 | # If this fails, the pid will be left in self.pids and cleaned up | |
246 | # later, but if the wait call succeeds, then we can clear the |
|
247 | # later, but if the wait call succeeds, then we can clear the | |
247 | # stored pid. |
|
248 | # stored pid. | |
248 | retcode = subp.wait() |
|
249 | retcode = subp.wait() | |
249 | self.pids.pop() |
|
250 | self.pids.pop() | |
250 | return retcode |
|
251 | return retcode | |
251 |
|
252 | |||
252 | def run(self): |
|
253 | def run(self): | |
253 | """Run the stored commands""" |
|
254 | """Run the stored commands""" | |
254 | try: |
|
255 | try: | |
255 | return self._run_cmd() |
|
256 | return self._run_cmd() | |
256 | except: |
|
257 | except: | |
257 | import traceback |
|
258 | import traceback | |
258 | traceback.print_exc() |
|
259 | traceback.print_exc() | |
259 | return 1 # signal failure |
|
260 | return 1 # signal failure | |
260 |
|
261 | |||
261 | def __del__(self): |
|
262 | def __del__(self): | |
262 | """Cleanup on exit by killing any leftover processes.""" |
|
263 | """Cleanup on exit by killing any leftover processes.""" | |
263 |
|
264 | |||
264 | if not hasattr(os, 'kill'): |
|
265 | if not hasattr(os, 'kill'): | |
265 | return |
|
266 | return | |
266 |
|
267 | |||
267 | for pid in self.pids: |
|
268 | for pid in self.pids: | |
268 | try: |
|
269 | try: | |
269 | print 'Cleaning stale PID:', pid |
|
270 | print 'Cleaning stale PID:', pid | |
270 | os.kill(pid, signal.SIGKILL) |
|
271 | os.kill(pid, signal.SIGKILL) | |
271 | except OSError: |
|
272 | except OSError: | |
272 | # This is just a best effort, if we fail or the process was |
|
273 | # This is just a best effort, if we fail or the process was | |
273 | # really gone, ignore it. |
|
274 | # really gone, ignore it. | |
274 | pass |
|
275 | pass | |
275 |
|
276 | |||
276 |
|
277 | |||
277 | def make_runners(): |
|
278 | def make_runners(): | |
278 | """Define the top-level packages that need to be tested. |
|
279 | """Define the top-level packages that need to be tested. | |
279 | """ |
|
280 | """ | |
280 |
|
281 | |||
281 | # Packages to be tested via nose, that only depend on the stdlib |
|
282 | # Packages to be tested via nose, that only depend on the stdlib | |
282 | nose_pkg_names = ['config', 'core', 'extensions', 'frontend', 'lib', |
|
283 | nose_pkg_names = ['config', 'core', 'extensions', 'frontend', 'lib', | |
283 | 'scripts', 'testing', 'utils' ] |
|
284 | 'scripts', 'testing', 'utils' ] | |
284 |
|
285 | |||
285 | # For debugging this code, only load quick stuff |
|
286 | # For debugging this code, only load quick stuff | |
286 | #nose_pkg_names = ['core', 'extensions'] # dbg |
|
287 | #nose_pkg_names = ['core', 'extensions'] # dbg | |
287 |
|
288 | |||
288 | # Make fully qualified package names prepending 'IPython.' to our name lists |
|
289 | # Make fully qualified package names prepending 'IPython.' to our name lists | |
289 | nose_packages = ['IPython.%s' % m for m in nose_pkg_names ] |
|
290 | nose_packages = ['IPython.%s' % m for m in nose_pkg_names ] | |
290 |
|
291 | |||
291 | # Make runners |
|
292 | # Make runners | |
292 | runners = [ (v, IPTester('iptest', params=v)) for v in nose_packages ] |
|
293 | runners = [ (v, IPTester('iptest', params=v)) for v in nose_packages ] | |
293 |
|
294 | |||
294 | return runners |
|
295 | return runners | |
295 |
|
296 | |||
296 |
|
297 | |||
297 | def run_iptest(): |
|
298 | def run_iptest(): | |
298 | """Run the IPython test suite using nose. |
|
299 | """Run the IPython test suite using nose. | |
299 |
|
300 | |||
300 | This function is called when this script is **not** called with the form |
|
301 | This function is called when this script is **not** called with the form | |
301 | `iptest all`. It simply calls nose with appropriate command line flags |
|
302 | `iptest all`. It simply calls nose with appropriate command line flags | |
302 | and accepts all of the standard nose arguments. |
|
303 | and accepts all of the standard nose arguments. | |
303 | """ |
|
304 | """ | |
304 |
|
305 | |||
305 | warnings.filterwarnings('ignore', |
|
306 | warnings.filterwarnings('ignore', | |
306 | 'This will be removed soon. Use IPython.testing.util instead') |
|
307 | 'This will be removed soon. Use IPython.testing.util instead') | |
307 |
|
308 | |||
308 | argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks |
|
309 | argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks | |
309 |
|
310 | |||
310 | # Loading ipdoctest causes problems with Twisted, but |
|
311 | # Loading ipdoctest causes problems with Twisted, but | |
311 | # our test suite runner now separates things and runs |
|
312 | # our test suite runner now separates things and runs | |
312 | # all Twisted tests with trial. |
|
313 | # all Twisted tests with trial. | |
313 | '--with-ipdoctest', |
|
314 | '--with-ipdoctest', | |
314 | '--ipdoctest-tests','--ipdoctest-extension=txt', |
|
315 | '--ipdoctest-tests','--ipdoctest-extension=txt', | |
315 |
|
316 | |||
316 | # We add --exe because of setuptools' imbecility (it |
|
317 | # We add --exe because of setuptools' imbecility (it | |
317 | # blindly does chmod +x on ALL files). Nose does the |
|
318 | # blindly does chmod +x on ALL files). Nose does the | |
318 | # right thing and it tries to avoid executables, |
|
319 | # right thing and it tries to avoid executables, | |
319 | # setuptools unfortunately forces our hand here. This |
|
320 | # setuptools unfortunately forces our hand here. This | |
320 | # has been discussed on the distutils list and the |
|
321 | # has been discussed on the distutils list and the | |
321 | # setuptools devs refuse to fix this problem! |
|
322 | # setuptools devs refuse to fix this problem! | |
322 | '--exe', |
|
323 | '--exe', | |
323 | ] |
|
324 | ] | |
324 |
|
325 | |||
325 | if nose.__version__ >= '0.11': |
|
326 | if nose.__version__ >= '0.11': | |
326 | # I don't fully understand why we need this one, but depending on what |
|
327 | # I don't fully understand why we need this one, but depending on what | |
327 | # directory the test suite is run from, if we don't give it, 0 tests |
|
328 | # directory the test suite is run from, if we don't give it, 0 tests | |
328 | # get run. Specifically, if the test suite is run from the source dir |
|
329 | # get run. Specifically, if the test suite is run from the source dir | |
329 | # with an argument (like 'iptest.py IPython.core', 0 tests are run, |
|
330 | # with an argument (like 'iptest.py IPython.core', 0 tests are run, | |
330 | # even if the same call done in this directory works fine). It appears |
|
331 | # even if the same call done in this directory works fine). It appears | |
331 | # that if the requested package is in the current dir, nose bails early |
|
332 | # that if the requested package is in the current dir, nose bails early | |
332 | # by default. Since it's otherwise harmless, leave it in by default |
|
333 | # by default. Since it's otherwise harmless, leave it in by default | |
333 | # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it. |
|
334 | # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it. | |
334 | argv.append('--traverse-namespace') |
|
335 | argv.append('--traverse-namespace') | |
335 |
|
336 | |||
336 | # Construct list of plugins, omitting the existing doctest plugin, which |
|
337 | # Construct list of plugins, omitting the existing doctest plugin, which | |
337 | # ours replaces (and extends). |
|
338 | # ours replaces (and extends). | |
338 | plugins = [IPythonDoctest(make_exclude()), KnownFailure()] |
|
339 | plugins = [IPythonDoctest(make_exclude()), KnownFailure()] | |
339 | for p in nose.plugins.builtin.plugins: |
|
340 | for p in nose.plugins.builtin.plugins: | |
340 | plug = p() |
|
341 | plug = p() | |
341 | if plug.name == 'doctest': |
|
342 | if plug.name == 'doctest': | |
342 | continue |
|
343 | continue | |
343 | plugins.append(plug) |
|
344 | plugins.append(plug) | |
344 |
|
345 | |||
345 | # We need a global ipython running in this process |
|
346 | # We need a global ipython running in this process | |
346 | globalipapp.start_ipython() |
|
347 | globalipapp.start_ipython() | |
347 | # Now nose can run |
|
348 | # Now nose can run | |
348 | TestProgram(argv=argv, plugins=plugins) |
|
349 | TestProgram(argv=argv, plugins=plugins) | |
349 |
|
350 | |||
350 |
|
351 | |||
351 | def run_iptestall(): |
|
352 | def run_iptestall(): | |
352 | """Run the entire IPython test suite by calling nose and trial. |
|
353 | """Run the entire IPython test suite by calling nose and trial. | |
353 |
|
354 | |||
354 | This function constructs :class:`IPTester` instances for all IPython |
|
355 | This function constructs :class:`IPTester` instances for all IPython | |
355 | modules and package and then runs each of them. This causes the modules |
|
356 | modules and package and then runs each of them. This causes the modules | |
356 | and packages of IPython to be tested each in their own subprocess using |
|
357 | and packages of IPython to be tested each in their own subprocess using | |
357 | nose or twisted.trial appropriately. |
|
358 | nose or twisted.trial appropriately. | |
358 | """ |
|
359 | """ | |
359 |
|
360 | |||
360 | runners = make_runners() |
|
361 | runners = make_runners() | |
361 |
|
362 | |||
362 | # Run the test runners in a temporary dir so we can nuke it when finished |
|
363 | # Run the test runners in a temporary dir so we can nuke it when finished | |
363 | # to clean up any junk files left over by accident. This also makes it |
|
364 | # to clean up any junk files left over by accident. This also makes it | |
364 | # robust against being run in non-writeable directories by mistake, as the |
|
365 | # robust against being run in non-writeable directories by mistake, as the | |
365 | # temp dir will always be user-writeable. |
|
366 | # temp dir will always be user-writeable. | |
366 | curdir = os.getcwd() |
|
367 | curdir = os.getcwd() | |
367 | testdir = tempfile.gettempdir() |
|
368 | testdir = tempfile.gettempdir() | |
368 | os.chdir(testdir) |
|
369 | os.chdir(testdir) | |
369 |
|
370 | |||
370 | # Run all test runners, tracking execution time |
|
371 | # Run all test runners, tracking execution time | |
371 | failed = [] |
|
372 | failed = [] | |
372 | t_start = time.time() |
|
373 | t_start = time.time() | |
373 | try: |
|
374 | try: | |
374 | for (name, runner) in runners: |
|
375 | for (name, runner) in runners: | |
375 | print '*'*70 |
|
376 | print '*'*70 | |
376 | print 'IPython test group:',name |
|
377 | print 'IPython test group:',name | |
377 | res = runner.run() |
|
378 | res = runner.run() | |
378 | if res: |
|
379 | if res: | |
379 | failed.append( (name, runner) ) |
|
380 | failed.append( (name, runner) ) | |
380 | finally: |
|
381 | finally: | |
381 | os.chdir(curdir) |
|
382 | os.chdir(curdir) | |
382 | t_end = time.time() |
|
383 | t_end = time.time() | |
383 | t_tests = t_end - t_start |
|
384 | t_tests = t_end - t_start | |
384 | nrunners = len(runners) |
|
385 | nrunners = len(runners) | |
385 | nfail = len(failed) |
|
386 | nfail = len(failed) | |
386 | # summarize results |
|
387 | # summarize results | |
387 |
|
388 | |||
388 | print '*'*70 |
|
389 | print '*'*70 | |
389 | print 'Test suite completed for system with the following information:' |
|
390 | print 'Test suite completed for system with the following information:' | |
390 | print report() |
|
391 | print report() | |
391 | print 'Ran %s test groups in %.3fs' % (nrunners, t_tests) |
|
392 | print 'Ran %s test groups in %.3fs' % (nrunners, t_tests) | |
392 |
|
393 | |||
393 | print 'Status:' |
|
394 | print 'Status:' | |
394 | if not failed: |
|
395 | if not failed: | |
395 | print 'OK' |
|
396 | print 'OK' | |
396 | else: |
|
397 | else: | |
397 | # If anything went wrong, point out what command to rerun manually to |
|
398 | # If anything went wrong, point out what command to rerun manually to | |
398 | # see the actual errors and individual summary |
|
399 | # see the actual errors and individual summary | |
399 | print 'ERROR - %s out of %s test groups failed.' % (nfail, nrunners) |
|
400 | print 'ERROR - %s out of %s test groups failed.' % (nfail, nrunners) | |
400 | for name, failed_runner in failed: |
|
401 | for name, failed_runner in failed: | |
401 | print '-'*40 |
|
402 | print '-'*40 | |
402 | print 'Runner failed:',name |
|
403 | print 'Runner failed:',name | |
403 | print 'You may wish to rerun this one individually, with:' |
|
404 | print 'You may wish to rerun this one individually, with:' | |
404 | print ' '.join(failed_runner.call_args) |
|
405 | print ' '.join(failed_runner.call_args) | |
405 |
|
406 | |||
406 |
|
407 | |||
407 |
|
408 | |||
408 | def main(): |
|
409 | def main(): | |
409 | for arg in sys.argv[1:]: |
|
410 | for arg in sys.argv[1:]: | |
410 | if arg.startswith('IPython'): |
|
411 | if arg.startswith('IPython'): | |
411 | # This is in-process |
|
412 | # This is in-process | |
412 | run_iptest() |
|
413 | run_iptest() | |
413 | else: |
|
414 | else: | |
414 | # This starts subprocesses |
|
415 | # This starts subprocesses | |
415 | run_iptestall() |
|
416 | run_iptestall() | |
416 |
|
417 | |||
417 |
|
418 | |||
418 | if __name__ == '__main__': |
|
419 | if __name__ == '__main__': | |
419 | main() |
|
420 | main() |
@@ -1,34 +1,33 b'' | |||||
1 | ===================== |
|
1 | ===================== | |
2 | IPython Documentation |
|
2 | IPython Documentation | |
3 | ===================== |
|
3 | ===================== | |
4 |
|
4 | |||
5 | .. htmlonly:: |
|
5 | .. htmlonly:: | |
6 |
|
6 | |||
7 | :Release: |release| |
|
7 | :Release: |release| | |
8 | :Date: |today| |
|
8 | :Date: |today| | |
9 |
|
9 | |||
10 | Welcome to the official IPython documentation. |
|
10 | Welcome to the official IPython documentation. | |
11 |
|
11 | |||
12 | Contents |
|
12 | Contents | |
13 | ======== |
|
13 | ======== | |
14 |
|
14 | |||
15 | .. toctree:: |
|
15 | .. toctree:: | |
16 | :maxdepth: 1 |
|
16 | :maxdepth: 1 | |
17 |
|
17 | |||
18 | overview.txt |
|
18 | overview.txt | |
19 | whatsnew/index.txt |
|
19 | whatsnew/index.txt | |
20 | install/index.txt |
|
20 | install/index.txt | |
21 | interactive/index.txt |
|
21 | interactive/index.txt | |
22 |
|
|
22 | parallel/index.txt | |
23 | parallelz/index.txt |
|
|||
24 | config/index.txt |
|
23 | config/index.txt | |
25 | development/index.txt |
|
24 | development/index.txt | |
26 | api/index.txt |
|
25 | api/index.txt | |
27 | faq.txt |
|
26 | faq.txt | |
28 | about/index.txt |
|
27 | about/index.txt | |
29 |
|
28 | |||
30 | .. htmlonly:: |
|
29 | .. htmlonly:: | |
31 | * :ref:`genindex` |
|
30 | * :ref:`genindex` | |
32 | * :ref:`modindex` |
|
31 | * :ref:`modindex` | |
33 | * :ref:`search` |
|
32 | * :ref:`search` | |
34 |
|
33 |
@@ -1,387 +1,328 b'' | |||||
1 | Overview |
|
1 | Overview | |
2 | ======== |
|
2 | ======== | |
3 |
|
3 | |||
4 | This document describes the steps required to install IPython. IPython is |
|
4 | This document describes the steps required to install IPython. IPython is | |
5 | organized into a number of subpackages, each of which has its own dependencies. |
|
5 | organized into a number of subpackages, each of which has its own dependencies. | |
6 | All of the subpackages come with IPython, so you don't need to download and |
|
6 | All of the subpackages come with IPython, so you don't need to download and | |
7 | install them separately. However, to use a given subpackage, you will need to |
|
7 | install them separately. However, to use a given subpackage, you will need to | |
8 | install all of its dependencies. |
|
8 | install all of its dependencies. | |
9 |
|
9 | |||
10 |
|
10 | |||
11 | Please let us know if you have problems installing IPython or any of its |
|
11 | Please let us know if you have problems installing IPython or any of its | |
12 |
dependencies. Officially, IPython requires Python version 2. |
|
12 | dependencies. Officially, IPython requires Python version 2.6 or 2.7. There | |
13 | have *not* yet started to port IPython to Python 3.0. |
|
13 | is an experimental port of IPython for Python3 `on GitHub | |
|
14 | <https://github.com/ipython/ipython-py3k>`_ | |||
14 |
|
15 | |||
15 | .. warning:: |
|
16 | .. warning:: | |
16 |
|
17 | |||
17 |
Officially, IPython supports Python versions 2. |
|
18 | Officially, IPython supports Python versions 2.6 and 2.7. | |
18 |
|
19 | |||
19 | IPython 0.10 has only been well tested with Python 2.5 and 2.6. Parts of |
|
20 | IPython 0.11 has a hard syntax dependency on 2.6, and will no longer work | |
20 | it may work with Python 2.4, but we do not officially support Python 2.4 |
|
21 | on Python <= 2.5. | |
21 | anymore. If you need to use 2.4, you can still run IPython 0.9. |
|
|||
22 |
|
22 | |||
23 | Some of the installation approaches use the :mod:`setuptools` package and its |
|
23 | Some of the installation approaches use the :mod:`setuptools` package and its | |
24 | :command:`easy_install` command line program. In many scenarios, this provides |
|
24 | :command:`easy_install` command line program. In many scenarios, this provides | |
25 | the most simple method of installing IPython and its dependencies. It is not |
|
25 | the most simple method of installing IPython and its dependencies. It is not | |
26 | required though. More information about :mod:`setuptools` can be found on its |
|
26 | required though. More information about :mod:`setuptools` can be found on its | |
27 | website. |
|
27 | website. | |
28 |
|
28 | |||
29 | More general information about installing Python packages can be found in |
|
29 | More general information about installing Python packages can be found in | |
30 | Python's documentation at http://www.python.org/doc/. |
|
30 | Python's documentation at http://www.python.org/doc/. | |
31 |
|
31 | |||
32 | Quickstart |
|
32 | Quickstart | |
33 | ========== |
|
33 | ========== | |
34 |
|
34 | |||
35 | If you have :mod:`setuptools` installed and you are on OS X or Linux (not |
|
35 | If you have :mod:`setuptools` installed and you are on OS X or Linux (not | |
36 | Windows), the following will download and install IPython *and* the main |
|
36 | Windows), the following will download and install IPython *and* the main | |
37 | optional dependencies: |
|
37 | optional dependencies: | |
38 |
|
38 | |||
39 | .. code-block:: bash |
|
39 | .. code-block:: bash | |
40 |
|
40 | |||
41 |
$ easy_install ipython[ |
|
41 | $ easy_install ipython[zmq,test] | |
42 |
|
42 | |||
43 |
This will get |
|
43 | This will get pyzmq, which is needed for | |
44 | IPython's parallel computing features as well as the nose package, which will |
|
44 | IPython's parallel computing features as well as the nose package, which will | |
45 | enable you to run IPython's test suite. |
|
45 | enable you to run IPython's test suite. | |
46 |
|
46 | |||
47 | .. warning:: |
|
47 | .. warning:: | |
48 |
|
48 | |||
49 | IPython's test system is being refactored and currently the |
|
49 | IPython's test system is being refactored and currently the | |
50 | :command:`iptest` shown below does not work. More details about the |
|
50 | :command:`iptest` shown below does not work. More details about the | |
51 | testing situation can be found :ref:`here <testing>` |
|
51 | testing situation can be found :ref:`here <testing>` | |
52 |
|
52 | |||
53 | To run IPython's test suite, use the :command:`iptest` command: |
|
53 | To run IPython's test suite, use the :command:`iptest` command: | |
54 |
|
54 | |||
55 | .. code-block:: bash |
|
55 | .. code-block:: bash | |
56 |
|
56 | |||
57 | $ iptest |
|
57 | $ iptest | |
58 |
|
58 | |||
59 | Read on for more specific details and instructions for Windows. |
|
59 | Read on for more specific details and instructions for Windows. | |
60 |
|
60 | |||
61 | Installing IPython itself |
|
61 | Installing IPython itself | |
62 | ========================= |
|
62 | ========================= | |
63 |
|
63 | |||
64 | Given a properly built Python, the basic interactive IPython shell will work |
|
64 | Given a properly built Python, the basic interactive IPython shell will work | |
65 | with no external dependencies. However, some Python distributions |
|
65 | with no external dependencies. However, some Python distributions | |
66 | (particularly on Windows and OS X), don't come with a working :mod:`readline` |
|
66 | (particularly on Windows and OS X), don't come with a working :mod:`readline` | |
67 | module. The IPython shell will work without :mod:`readline`, but will lack |
|
67 | module. The IPython shell will work without :mod:`readline`, but will lack | |
68 | many features that users depend on, such as tab completion and command line |
|
68 | many features that users depend on, such as tab completion and command line | |
69 | editing. See below for details of how to make sure you have a working |
|
69 | editing. See below for details of how to make sure you have a working | |
70 | :mod:`readline`. |
|
70 | :mod:`readline`. | |
71 |
|
71 | |||
72 | Installation using easy_install |
|
72 | Installation using easy_install | |
73 | ------------------------------- |
|
73 | ------------------------------- | |
74 |
|
74 | |||
75 | If you have :mod:`setuptools` installed, the easiest way of getting IPython is |
|
75 | If you have :mod:`setuptools` installed, the easiest way of getting IPython is | |
76 | to simple use :command:`easy_install`: |
|
76 | to simple use :command:`easy_install`: | |
77 |
|
77 | |||
78 | .. code-block:: bash |
|
78 | .. code-block:: bash | |
79 |
|
79 | |||
80 | $ easy_install ipython |
|
80 | $ easy_install ipython | |
81 |
|
81 | |||
82 | That's it. |
|
82 | That's it. | |
83 |
|
83 | |||
84 | Installation from source |
|
84 | Installation from source | |
85 | ------------------------ |
|
85 | ------------------------ | |
86 |
|
86 | |||
87 | If you don't want to use :command:`easy_install`, or don't have it installed, |
|
87 | If you don't want to use :command:`easy_install`, or don't have it installed, | |
88 | just grab the latest stable build of IPython from `here |
|
88 | just grab the latest stable build of IPython from `here | |
89 | <http://ipython.scipy.org/dist/>`_. Then do the following: |
|
89 | <http://ipython.scipy.org/dist/>`_. Then do the following: | |
90 |
|
90 | |||
91 | .. code-block:: bash |
|
91 | .. code-block:: bash | |
92 |
|
92 | |||
93 | $ tar -xzf ipython.tar.gz |
|
93 | $ tar -xzf ipython.tar.gz | |
94 | $ cd ipython |
|
94 | $ cd ipython | |
95 | $ python setup.py install |
|
95 | $ python setup.py install | |
96 |
|
96 | |||
97 | If you are installing to a location (like ``/usr/local``) that requires higher |
|
97 | If you are installing to a location (like ``/usr/local``) that requires higher | |
98 | permissions, you may need to run the last command with :command:`sudo`. |
|
98 | permissions, you may need to run the last command with :command:`sudo`. | |
99 |
|
99 | |||
100 | Windows |
|
100 | Windows | |
101 | ------- |
|
101 | ------- | |
102 |
|
102 | |||
103 | There are a few caveats for Windows users. The main issue is that a basic |
|
103 | There are a few caveats for Windows users. The main issue is that a basic | |
104 | ``python setup.py install`` approach won't create ``.bat`` file or Start Menu |
|
104 | ``python setup.py install`` approach won't create ``.bat`` file or Start Menu | |
105 | shortcuts, which most users want. To get an installation with these, you can |
|
105 | shortcuts, which most users want. To get an installation with these, you can | |
106 | use any of the following alternatives: |
|
106 | use any of the following alternatives: | |
107 |
|
107 | |||
108 | 1. Install using :command:`easy_install`. |
|
108 | 1. Install using :command:`easy_install`. | |
109 |
|
109 | |||
110 | 2. Install using our binary ``.exe`` Windows installer, which can be found at |
|
110 | 2. Install using our binary ``.exe`` Windows installer, which can be found at | |
111 | `here <http://ipython.scipy.org/dist/>`_ |
|
111 | `here <http://ipython.scipy.org/dist/>`_ | |
112 |
|
112 | |||
113 | 3. Install from source, but using :mod:`setuptools` (``python setupegg.py |
|
113 | 3. Install from source, but using :mod:`setuptools` (``python setupegg.py | |
114 | install``). |
|
114 | install``). | |
115 |
|
115 | |||
116 | IPython by default runs in a terminal window, but the normal terminal |
|
116 | IPython by default runs in a terminal window, but the normal terminal | |
117 | application supplied by Microsoft Windows is very primitive. You may want to |
|
117 | application supplied by Microsoft Windows is very primitive. You may want to | |
118 | download the excellent and free Console_ application instead, which is a far |
|
118 | download the excellent and free Console_ application instead, which is a far | |
119 | superior tool. You can even configure Console to give you by default an |
|
119 | superior tool. You can even configure Console to give you by default an | |
120 | IPython tab, which is very convenient to create new IPython sessions directly |
|
120 | IPython tab, which is very convenient to create new IPython sessions directly | |
121 | from the working terminal. |
|
121 | from the working terminal. | |
122 |
|
122 | |||
123 | .. _Console: http://sourceforge.net/projects/console |
|
123 | .. _Console: http://sourceforge.net/projects/console | |
124 |
|
124 | |||
125 | Note for Windows 64 bit users: you may have difficulties with the stock |
|
125 | Note for Windows 64 bit users: you may have difficulties with the stock | |
126 | installer on 64 bit systems; in this case (since we currently do not have 64 |
|
126 | installer on 64 bit systems; in this case (since we currently do not have 64 | |
127 | bit builds of the Windows installer) your best bet is to install from source |
|
127 | bit builds of the Windows installer) your best bet is to install from source | |
128 | with the setuptools method indicated in #3 above. See `this bug report`_ for |
|
128 | with the setuptools method indicated in #3 above. See `this bug report`_ for | |
129 | further details. |
|
129 | further details. | |
130 |
|
130 | |||
131 | .. _this bug report: https://bugs.launchpad.net/ipython/+bug/382214 |
|
131 | .. _this bug report: https://bugs.launchpad.net/ipython/+bug/382214 | |
132 |
|
132 | |||
133 |
|
133 | |||
134 | Installing the development version |
|
134 | Installing the development version | |
135 | ---------------------------------- |
|
135 | ---------------------------------- | |
136 |
|
136 | |||
137 | It is also possible to install the development version of IPython from our |
|
137 | It is also possible to install the development version of IPython from our | |
138 | `Bazaar <http://bazaar-vcs.org/>`_ source code repository. To do this you will |
|
138 | `Bazaar <http://bazaar-vcs.org/>`_ source code repository. To do this you will | |
139 | need to have Bazaar installed on your system. Then just do: |
|
139 | need to have Bazaar installed on your system. Then just do: | |
140 |
|
140 | |||
141 | .. code-block:: bash |
|
141 | .. code-block:: bash | |
142 |
|
142 | |||
143 | $ bzr branch lp:ipython |
|
143 | $ bzr branch lp:ipython | |
144 | $ cd ipython |
|
144 | $ cd ipython | |
145 | $ python setup.py install |
|
145 | $ python setup.py install | |
146 |
|
146 | |||
147 | Again, this last step on Windows won't create ``.bat`` files or Start Menu |
|
147 | Again, this last step on Windows won't create ``.bat`` files or Start Menu | |
148 | shortcuts, so you will have to use one of the other approaches listed above. |
|
148 | shortcuts, so you will have to use one of the other approaches listed above. | |
149 |
|
149 | |||
150 | Some users want to be able to follow the development branch as it changes. If |
|
150 | Some users want to be able to follow the development branch as it changes. If | |
151 | you have :mod:`setuptools` installed, this is easy. Simply replace the last |
|
151 | you have :mod:`setuptools` installed, this is easy. Simply replace the last | |
152 | step by: |
|
152 | step by: | |
153 |
|
153 | |||
154 | .. code-block:: bash |
|
154 | .. code-block:: bash | |
155 |
|
155 | |||
156 | $ python setupegg.py develop |
|
156 | $ python setupegg.py develop | |
157 |
|
157 | |||
158 | This creates links in the right places and installs the command line script to |
|
158 | This creates links in the right places and installs the command line script to | |
159 | the appropriate places. Then, if you want to update your IPython at any time, |
|
159 | the appropriate places. Then, if you want to update your IPython at any time, | |
160 | just do: |
|
160 | just do: | |
161 |
|
161 | |||
162 | .. code-block:: bash |
|
162 | .. code-block:: bash | |
163 |
|
163 | |||
164 | $ bzr pull |
|
164 | $ bzr pull | |
165 |
|
165 | |||
166 | Basic optional dependencies |
|
166 | Basic optional dependencies | |
167 | =========================== |
|
167 | =========================== | |
168 |
|
168 | |||
169 | There are a number of basic optional dependencies that most users will want to |
|
169 | There are a number of basic optional dependencies that most users will want to | |
170 | get. These are: |
|
170 | get. These are: | |
171 |
|
171 | |||
172 | * readline (for command line editing, tab completion, etc.) |
|
172 | * readline (for command line editing, tab completion, etc.) | |
173 | * nose (to run the IPython test suite) |
|
173 | * nose (to run the IPython test suite) | |
174 | * pexpect (to use things like irunner) |
|
174 | * pexpect (to use things like irunner) | |
175 |
|
175 | |||
176 | If you are comfortable installing these things yourself, have at it, otherwise |
|
176 | If you are comfortable installing these things yourself, have at it, otherwise | |
177 | read on for more details. |
|
177 | read on for more details. | |
178 |
|
178 | |||
179 | readline |
|
179 | readline | |
180 | -------- |
|
180 | -------- | |
181 |
|
181 | |||
182 | In principle, all Python distributions should come with a working |
|
182 | In principle, all Python distributions should come with a working | |
183 | :mod:`readline` module. But, reality is not quite that simple. There are two |
|
183 | :mod:`readline` module. But, reality is not quite that simple. There are two | |
184 | common situations where you won't have a working :mod:`readline` module: |
|
184 | common situations where you won't have a working :mod:`readline` module: | |
185 |
|
185 | |||
186 | * If you are using the built-in Python on Mac OS X. |
|
186 | * If you are using the built-in Python on Mac OS X. | |
187 |
|
187 | |||
188 | * If you are running Windows, which doesn't have a :mod:`readline` module. |
|
188 | * If you are running Windows, which doesn't have a :mod:`readline` module. | |
189 |
|
189 | |||
190 |
|
190 | |||
191 | On OS X, the built-in Python doesn't not have :mod:`readline` because of |
|
191 | On OS X, the built-in Python doesn't not have :mod:`readline` because of | |
192 | license issues. Starting with OS X 10.5 (Leopard), Apple's built-in Python has |
|
192 | license issues. Starting with OS X 10.5 (Leopard), Apple's built-in Python has | |
193 | a BSD-licensed not-quite-compatible readline replacement. As of IPython 0.9, |
|
193 | a BSD-licensed not-quite-compatible readline replacement. As of IPython 0.9, | |
194 | many of the issues related to the differences between readline and libedit seem |
|
194 | many of the issues related to the differences between readline and libedit seem | |
195 | to have been resolved. While you may find libedit sufficient, we have |
|
195 | to have been resolved. While you may find libedit sufficient, we have | |
196 | occasional reports of bugs with it and several developers who use OS X as their |
|
196 | occasional reports of bugs with it and several developers who use OS X as their | |
197 | main environment consider libedit unacceptable for productive, regular use with |
|
197 | main environment consider libedit unacceptable for productive, regular use with | |
198 | IPython. |
|
198 | IPython. | |
199 |
|
199 | |||
200 | Therefore, we *strongly* recommend that on OS X you get the full |
|
200 | Therefore, we *strongly* recommend that on OS X you get the full | |
201 | :mod:`readline` module. We will *not* consider completion/history problems to |
|
201 | :mod:`readline` module. We will *not* consider completion/history problems to | |
202 | be bugs for IPython if you are using libedit. |
|
202 | be bugs for IPython if you are using libedit. | |
203 |
|
203 | |||
204 | To get a working :mod:`readline` module, just do (with :mod:`setuptools` |
|
204 | To get a working :mod:`readline` module, just do (with :mod:`setuptools` | |
205 | installed): |
|
205 | installed): | |
206 |
|
206 | |||
207 | .. code-block:: bash |
|
207 | .. code-block:: bash | |
208 |
|
208 | |||
209 | $ easy_install readline |
|
209 | $ easy_install readline | |
210 |
|
210 | |||
211 | .. note:: |
|
211 | .. note:: | |
212 |
|
212 | |||
213 | Other Python distributions on OS X (such as fink, MacPorts and the official |
|
213 | Other Python distributions on OS X (such as fink, MacPorts and the official | |
214 | python.org binaries) already have readline installed so you likely don't |
|
214 | python.org binaries) already have readline installed so you likely don't | |
215 | have to do this step. |
|
215 | have to do this step. | |
216 |
|
216 | |||
217 | If needed, the readline egg can be build and installed from source (see the |
|
217 | If needed, the readline egg can be build and installed from source (see the | |
218 | wiki page at http://ipython.scipy.org/moin/InstallationOSXLeopard). |
|
218 | wiki page at http://ipython.scipy.org/moin/InstallationOSXLeopard). | |
219 |
|
219 | |||
220 | On Windows, you will need the PyReadline module. PyReadline is a separate, |
|
220 | On Windows, you will need the PyReadline module. PyReadline is a separate, | |
221 | Windows only implementation of readline that uses native Windows calls through |
|
221 | Windows only implementation of readline that uses native Windows calls through | |
222 | :mod:`ctypes`. The easiest way of installing PyReadline is you use the binary |
|
222 | :mod:`ctypes`. The easiest way of installing PyReadline is you use the binary | |
223 | installer available `here <http://ipython.scipy.org/dist/>`_. The :mod:`ctypes` |
|
223 | installer available `here <http://ipython.scipy.org/dist/>`_. The :mod:`ctypes` | |
224 |
module, which comes with Python 2.5 and greater, is required by PyReadline. |
|
224 | module, which comes with Python 2.5 and greater, is required by PyReadline. | |
225 | is available for Python 2.4 at http://python.net/crew/theller/ctypes. |
|
|||
226 |
|
225 | |||
227 | nose |
|
226 | nose | |
228 | ---- |
|
227 | ---- | |
229 |
|
228 | |||
230 | To run the IPython test suite you will need the :mod:`nose` package. Nose |
|
229 | To run the IPython test suite you will need the :mod:`nose` package. Nose | |
231 | provides a great way of sniffing out and running all of the IPython tests. The |
|
230 | provides a great way of sniffing out and running all of the IPython tests. The | |
232 | simplest way of getting nose, is to use :command:`easy_install`: |
|
231 | simplest way of getting nose, is to use :command:`easy_install`: | |
233 |
|
232 | |||
234 | .. code-block:: bash |
|
233 | .. code-block:: bash | |
235 |
|
234 | |||
236 | $ easy_install nose |
|
235 | $ easy_install nose | |
237 |
|
236 | |||
238 | Another way of getting this is to do: |
|
237 | Another way of getting this is to do: | |
239 |
|
238 | |||
240 | .. code-block:: bash |
|
239 | .. code-block:: bash | |
241 |
|
240 | |||
242 | $ easy_install ipython[test] |
|
241 | $ easy_install ipython[test] | |
243 |
|
242 | |||
244 | For more installation options, see the `nose website |
|
243 | For more installation options, see the `nose website | |
245 | <http://somethingaboutorange.com/mrl/projects/nose/>`_. |
|
244 | <http://somethingaboutorange.com/mrl/projects/nose/>`_. | |
246 |
|
245 | |||
247 | .. warning:: |
|
246 | .. warning:: | |
248 |
|
247 | |||
249 | As described above, the :command:`iptest` command currently doesn't work. |
|
248 | As described above, the :command:`iptest` command currently doesn't work. | |
250 |
|
249 | |||
251 | Once you have nose installed, you can run IPython's test suite using the |
|
250 | Once you have nose installed, you can run IPython's test suite using the | |
252 | iptest command: |
|
251 | iptest command: | |
253 |
|
252 | |||
254 | .. code-block:: bash |
|
253 | .. code-block:: bash | |
255 |
|
254 | |||
256 | $ iptest |
|
255 | $ iptest | |
257 |
|
256 | |||
258 | pexpect |
|
257 | pexpect | |
259 | ------- |
|
258 | ------- | |
260 |
|
259 | |||
261 | The `pexpect <http://www.noah.org/wiki/Pexpect>`_ package is used in IPython's |
|
260 | The `pexpect <http://www.noah.org/wiki/Pexpect>`_ package is used in IPython's | |
262 | :command:`irunner` script. On Unix platforms (including OS X), just do: |
|
261 | :command:`irunner` script. On Unix platforms (including OS X), just do: | |
263 |
|
262 | |||
264 | .. code-block:: bash |
|
263 | .. code-block:: bash | |
265 |
|
264 | |||
266 | $ easy_install pexpect |
|
265 | $ easy_install pexpect | |
267 |
|
266 | |||
268 | Windows users are out of luck as pexpect does not run there. |
|
267 | Windows users are out of luck as pexpect does not run there. | |
269 |
|
268 | |||
270 |
Dependencies for IPython. |
|
269 | Dependencies for IPython.parallel (parallel computing) | |
271 | ==================================================== |
|
270 | ====================================================== | |
272 |
|
271 | |||
273 | The IPython kernel provides a nice architecture for parallel computing. The |
|
272 | :mod:`IPython.kernel` has been replaced by :mod:`IPython.parallel`, | |
274 | main focus of this architecture is on interactive parallel computing. These |
|
273 | which uses ZeroMQ for all communication. | |
275 | features require a number of additional packages: |
|
|||
276 |
|
274 | |||
277 | * zope.interface (yep, we use interfaces) |
|
275 | IPython.parallel provides a nice architecture for parallel computing. The | |
278 | * Twisted (asynchronous networking framework) |
|
276 | main focus of this architecture is on interactive parallel computing. These | |
279 | * Foolscap (a nice, secure network protocol) |
|
277 | features require just one package: pyzmq. See the next section for pyzmq | |
280 | * pyOpenSSL (security for network connections) |
|
278 | details. | |
281 |
|
279 | |||
282 | On a Unix style platform (including OS X), if you want to use |
|
280 | On a Unix style platform (including OS X), if you want to use | |
283 | :mod:`setuptools`, you can just do: |
|
281 | :mod:`setuptools`, you can just do: | |
284 |
|
282 | |||
285 | .. code-block:: bash |
|
283 | .. code-block:: bash | |
286 |
|
284 | |||
287 |
$ easy_install ipython[ |
|
285 | $ easy_install ipython[zmq] # will include pyzmq | |
288 | $ easy_install ipython[security] # pyOpenSSL |
|
|||
289 |
|
||||
290 | zope.interface and Twisted |
|
|||
291 | -------------------------- |
|
|||
292 |
|
||||
293 | Twisted [Twisted]_ and zope.interface [ZopeInterface]_ are used for networking |
|
|||
294 | related things. On Unix style platforms (including OS X), the simplest way of |
|
|||
295 | getting the these is to use :command:`easy_install`: |
|
|||
296 |
|
||||
297 | .. code-block:: bash |
|
|||
298 |
|
||||
299 | $ easy_install zope.interface |
|
|||
300 | $ easy_install Twisted |
|
|||
301 |
|
286 | |||
302 | Of course, you can also download the source tarballs from the Twisted website |
|
287 | Security in IPython.parallel is provided by SSH tunnels. By default, Linux | |
303 | [Twisted]_ and the |
|
288 | and OSX clients will use the shell ssh command, but on Windows, we also | |
304 | `zope.interface page at PyPI <http://pypi.python.org/pypi/zope.interface>`_ |
|
289 | support tunneling with paramiko [paramiko]_. | |
305 | and do the usual ``python setup.py install`` if you prefer. |
|
|||
306 |
|
290 | |||
307 | Windows is a bit different. For zope.interface and Twisted, simply get the |
|
291 | Dependencies for IPython.zmq | |
308 | latest binary ``.exe`` installer from the Twisted website. This installer |
|
292 | ============================ | |
309 | includes both zope.interface and Twisted and should just work. |
|
|||
310 |
|
||||
311 | Foolscap |
|
|||
312 | -------- |
|
|||
313 |
|
||||
314 | Foolscap [Foolscap]_ uses Twisted to provide a very nice secure RPC protocol that we use to implement our parallel computing features. |
|
|||
315 |
|
||||
316 | On all platforms a simple: |
|
|||
317 |
|
||||
318 | .. code-block:: bash |
|
|||
319 |
|
||||
320 | $ easy_install foolscap |
|
|||
321 |
|
||||
322 | should work. You can also download the source tarballs from the `Foolscap |
|
|||
323 | website <http://foolscap.lothar.com/trac>`_ and do ``python setup.py install`` |
|
|||
324 | if you prefer. |
|
|||
325 |
|
||||
326 | pyOpenSSL |
|
|||
327 | --------- |
|
|||
328 |
|
||||
329 | IPython does not work with version 0.7 of pyOpenSSL [pyOpenSSL]_. It is known |
|
|||
330 | to work with version 0.6 and will likely work with the more recent 0.8 and 0.9 |
|
|||
331 | versions. There are a couple of options for getting this: |
|
|||
332 |
|
||||
333 | 1. Most Linux distributions have packages for pyOpenSSL. |
|
|||
334 | 2. The built-in Python 2.5 on OS X 10.5 already has it installed. |
|
|||
335 | 3. There are source tarballs on the pyOpenSSL website. On Unix-like |
|
|||
336 | platforms, these can be built using ``python seutp.py install``. |
|
|||
337 | 4. There is also a binary ``.exe`` Windows installer on the |
|
|||
338 | `pyOpenSSL website <http://pyopenssl.sourceforge.net/>`_. |
|
|||
339 |
|
||||
340 | Dependencies for IPython.frontend (the IPython GUI) |
|
|||
341 | =================================================== |
|
|||
342 |
|
||||
343 | wxPython |
|
|||
344 | -------- |
|
|||
345 |
|
||||
346 | Starting with IPython 0.9, IPython has a new :mod:`IPython.frontend` package |
|
|||
347 | that has a nice wxPython based IPython GUI. As you would expect, this GUI |
|
|||
348 | requires wxPython. Most Linux distributions have wxPython packages available |
|
|||
349 | and the built-in Python on OS X comes with wxPython preinstalled. For Windows, |
|
|||
350 | a binary installer is available on the `wxPython website |
|
|||
351 | <http://www.wxpython.org/>`_. |
|
|||
352 |
|
||||
353 | Dependencies for IPython.zmq (new parallel) |
|
|||
354 | =========================================== |
|
|||
355 |
|
293 | |||
356 | pyzmq |
|
294 | pyzmq | |
357 | ----- |
|
295 | ----- | |
358 |
|
296 | |||
359 | IPython 0.11 introduced some new functionality, including a two-process |
|
297 | IPython 0.11 introduced some new functionality, including a two-process | |
360 | execution model using ZeroMQ for communication [ZeroMQ]_. The Python bindings |
|
298 | execution model using ZeroMQ for communication [ZeroMQ]_. The Python bindings | |
361 | to ZeroMQ are found in the pyzmq project, which is easy_install-able once you |
|
299 | to ZeroMQ are found in the pyzmq project, which is easy_install-able once you | |
362 | have ZeroMQ installed. :mod:`IPython.kernel` is also in the process of being |
|
300 | have ZeroMQ installed (or even if you don't). | |
363 | replaced by :mod:`IPython.zmq.parallel`, which uses ZeroMQ for all |
|
301 | ||
364 | communication. |
|
302 | IPython.zmq depends on pyzmq >= 2.0.10.1, but IPython.parallel requires the more | |
|
303 | recent 2.1.4. 2.1.4 also has binary releases for OSX and Windows, that do not | |||
|
304 | require prior installation of libzmq. | |||
365 |
|
305 | |||
366 | Dependencies for ipython-qtconsole (new GUI) |
|
306 | Dependencies for ipython-qtconsole (new GUI) | |
367 | ============================================ |
|
307 | ============================================ | |
368 |
|
308 | |||
369 | PyQt |
|
309 | PyQt | |
370 | ---- |
|
310 | ---- | |
371 |
|
311 | |||
372 | Also with 0.11, a new GUI was added using the work in :mod:`IPython.zmq`, |
|
312 | Also with 0.11, a new GUI was added using the work in :mod:`IPython.zmq`, | |
373 | which can be launched with ``ipython-qtconsole``. The GUI is built on PyQt , |
|
313 | which can be launched with ``ipython-qtconsole``. The GUI is built on PyQt , | |
374 | which can be installed from the |
|
314 | which can be installed from the | |
375 | `PyQt website <http://www.riverbankcomputing.co.uk/>`_. |
|
315 | `PyQt website <http://www.riverbankcomputing.co.uk/>`_. | |
376 |
|
316 | |||
377 | pygments |
|
317 | pygments | |
378 | -------- |
|
318 | -------- | |
379 |
|
319 | |||
380 |
The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project, |
|
320 | The syntax-highlighting in ``ipython-qtconsole`` is done with the pygments project, | |
|
321 | which is easy_install-able. | |||
381 |
|
322 | |||
382 | .. [Twisted] Twisted matrix. http://twistedmatrix.org |
|
323 | .. [Twisted] Twisted matrix. http://twistedmatrix.org | |
383 | .. [ZopeInterface] http://pypi.python.org/pypi/zope.interface |
|
324 | .. [ZopeInterface] http://pypi.python.org/pypi/zope.interface | |
384 | .. [Foolscap] Foolscap network protocol. http://foolscap.lothar.com/trac |
|
325 | .. [Foolscap] Foolscap network protocol. http://foolscap.lothar.com/trac | |
385 | .. [pyOpenSSL] pyOpenSSL. http://pyopenssl.sourceforge.net |
|
326 | .. [pyOpenSSL] pyOpenSSL. http://pyopenssl.sourceforge.net | |
386 | .. [ZeroMQ] ZeroMQ. http://www.zeromq.org |
|
327 | .. [ZeroMQ] ZeroMQ. http://www.zeromq.org | |
387 |
|
328 | .. [paramiko] paramiko. https://github.com/robey/paramiko |
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_call.pdf to docs/source/parallel/asian_call.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/asian_call.pdf to docs/source/parallel/asian_call.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_call.png to docs/source/parallel/asian_call.png |
|
NO CONTENT: file renamed from docs/source/parallelz/asian_call.png to docs/source/parallel/asian_call.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_put.pdf to docs/source/parallel/asian_put.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/asian_put.pdf to docs/source/parallel/asian_put.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/asian_put.png to docs/source/parallel/asian_put.png |
|
NO CONTENT: file renamed from docs/source/parallelz/asian_put.png to docs/source/parallel/asian_put.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/dag_dependencies.txt to docs/source/parallel/dag_dependencies.txt |
|
NO CONTENT: file renamed from docs/source/parallelz/dag_dependencies.txt to docs/source/parallel/dag_dependencies.txt |
1 | NO CONTENT: file renamed from docs/source/parallelz/dagdeps.pdf to docs/source/parallel/dagdeps.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/dagdeps.pdf to docs/source/parallel/dagdeps.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/dagdeps.png to docs/source/parallel/dagdeps.png |
|
NO CONTENT: file renamed from docs/source/parallelz/dagdeps.png to docs/source/parallel/dagdeps.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/hpc_job_manager.pdf to docs/source/parallel/hpc_job_manager.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/hpc_job_manager.pdf to docs/source/parallel/hpc_job_manager.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/hpc_job_manager.png to docs/source/parallel/hpc_job_manager.png |
|
NO CONTENT: file renamed from docs/source/parallelz/hpc_job_manager.png to docs/source/parallel/hpc_job_manager.png |
@@ -1,12 +1,22 b'' | |||||
1 | .. _parallel_index: |
|
1 | .. _parallel_index: | |
2 |
|
2 | |||
3 | ==================================== |
|
3 | ==================================== | |
4 | Using IPython for parallel computing |
|
4 | Using IPython for parallel computing | |
5 | ==================================== |
|
5 | ==================================== | |
6 |
|
6 | |||
7 | The twisted-based :mod:`IPython.kernel` has been removed, in favor of |
|
7 | .. toctree:: | |
8 | the new 0MQ-based :mod:`IPython.parallel`, whose merge into master is imminent. |
|
8 | :maxdepth: 2 | |
|
9 | ||||
|
10 | parallel_intro.txt | |||
|
11 | parallel_process.txt | |||
|
12 | parallel_multiengine.txt | |||
|
13 | parallel_task.txt | |||
|
14 | parallel_mpi.txt | |||
|
15 | parallel_security.txt | |||
|
16 | parallel_winhpc.txt | |||
|
17 | parallel_demos.txt | |||
|
18 | dag_dependencies.txt | |||
|
19 | parallel_details.txt | |||
|
20 | parallel_transition.txt | |||
|
21 | ||||
9 |
|
22 | |||
10 | Until that code is merged, it can be found in the `newparallel branch |
|
|||
11 | <https://github.com/ipython/ipython/tree/newparallel>`_, and its draft documentation can be |
|
|||
12 | found `here <http://minrk.github.com/ipython-doc/newparallel>`_. No newline at end of file |
|
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_create.pdf to docs/source/parallel/ipcluster_create.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/ipcluster_create.pdf to docs/source/parallel/ipcluster_create.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_create.png to docs/source/parallel/ipcluster_create.png |
|
NO CONTENT: file renamed from docs/source/parallelz/ipcluster_create.png to docs/source/parallel/ipcluster_create.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_start.pdf to docs/source/parallel/ipcluster_start.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/ipcluster_start.pdf to docs/source/parallel/ipcluster_start.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/ipcluster_start.png to docs/source/parallel/ipcluster_start.png |
|
NO CONTENT: file renamed from docs/source/parallelz/ipcluster_start.png to docs/source/parallel/ipcluster_start.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/ipython_shell.pdf to docs/source/parallel/ipython_shell.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/ipython_shell.pdf to docs/source/parallel/ipython_shell.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/ipython_shell.png to docs/source/parallel/ipython_shell.png |
|
NO CONTENT: file renamed from docs/source/parallelz/ipython_shell.png to docs/source/parallel/ipython_shell.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/mec_simple.pdf to docs/source/parallel/mec_simple.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/mec_simple.pdf to docs/source/parallel/mec_simple.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/mec_simple.png to docs/source/parallel/mec_simple.png |
|
NO CONTENT: file renamed from docs/source/parallelz/mec_simple.png to docs/source/parallel/mec_simple.png |
@@ -1,284 +1,284 b'' | |||||
1 | ================= |
|
1 | ================= | |
2 | Parallel examples |
|
2 | Parallel examples | |
3 | ================= |
|
3 | ================= | |
4 |
|
4 | |||
5 | .. note:: |
|
5 | .. note:: | |
6 |
|
6 | |||
7 | Performance numbers from ``IPython.kernel``, not newparallel. |
|
7 | Performance numbers from ``IPython.kernel``, not newparallel. | |
8 |
|
8 | |||
9 | In this section we describe two more involved examples of using an IPython |
|
9 | In this section we describe two more involved examples of using an IPython | |
10 | cluster to perform a parallel computation. In these examples, we will be using |
|
10 | cluster to perform a parallel computation. In these examples, we will be using | |
11 | IPython's "pylab" mode, which enables interactive plotting using the |
|
11 | IPython's "pylab" mode, which enables interactive plotting using the | |
12 | Matplotlib package. IPython can be started in this mode by typing:: |
|
12 | Matplotlib package. IPython can be started in this mode by typing:: | |
13 |
|
13 | |||
14 | ipython --pylab |
|
14 | ipython --pylab | |
15 |
|
15 | |||
16 | at the system command line. |
|
16 | at the system command line. | |
17 |
|
17 | |||
18 | 150 million digits of pi |
|
18 | 150 million digits of pi | |
19 | ======================== |
|
19 | ======================== | |
20 |
|
20 | |||
21 | In this example we would like to study the distribution of digits in the |
|
21 | In this example we would like to study the distribution of digits in the | |
22 | number pi (in base 10). While it is not known if pi is a normal number (a |
|
22 | number pi (in base 10). While it is not known if pi is a normal number (a | |
23 | number is normal in base 10 if 0-9 occur with equal likelihood) numerical |
|
23 | number is normal in base 10 if 0-9 occur with equal likelihood) numerical | |
24 | investigations suggest that it is. We will begin with a serial calculation on |
|
24 | investigations suggest that it is. We will begin with a serial calculation on | |
25 | 10,000 digits of pi and then perform a parallel calculation involving 150 |
|
25 | 10,000 digits of pi and then perform a parallel calculation involving 150 | |
26 | million digits. |
|
26 | million digits. | |
27 |
|
27 | |||
28 | In both the serial and parallel calculation we will be using functions defined |
|
28 | In both the serial and parallel calculation we will be using functions defined | |
29 | in the :file:`pidigits.py` file, which is available in the |
|
29 | in the :file:`pidigits.py` file, which is available in the | |
30 | :file:`docs/examples/newparallel` directory of the IPython source distribution. |
|
30 | :file:`docs/examples/newparallel` directory of the IPython source distribution. | |
31 | These functions provide basic facilities for working with the digits of pi and |
|
31 | These functions provide basic facilities for working with the digits of pi and | |
32 | can be loaded into IPython by putting :file:`pidigits.py` in your current |
|
32 | can be loaded into IPython by putting :file:`pidigits.py` in your current | |
33 | working directory and then doing: |
|
33 | working directory and then doing: | |
34 |
|
34 | |||
35 | .. sourcecode:: ipython |
|
35 | .. sourcecode:: ipython | |
36 |
|
36 | |||
37 | In [1]: run pidigits.py |
|
37 | In [1]: run pidigits.py | |
38 |
|
38 | |||
39 | Serial calculation |
|
39 | Serial calculation | |
40 | ------------------ |
|
40 | ------------------ | |
41 |
|
41 | |||
42 | For the serial calculation, we will use `SymPy <http://www.sympy.org>`_ to |
|
42 | For the serial calculation, we will use `SymPy <http://www.sympy.org>`_ to | |
43 | calculate 10,000 digits of pi and then look at the frequencies of the digits |
|
43 | calculate 10,000 digits of pi and then look at the frequencies of the digits | |
44 | 0-9. Out of 10,000 digits, we expect each digit to occur 1,000 times. While |
|
44 | 0-9. Out of 10,000 digits, we expect each digit to occur 1,000 times. While | |
45 | SymPy is capable of calculating many more digits of pi, our purpose here is to |
|
45 | SymPy is capable of calculating many more digits of pi, our purpose here is to | |
46 | set the stage for the much larger parallel calculation. |
|
46 | set the stage for the much larger parallel calculation. | |
47 |
|
47 | |||
48 | In this example, we use two functions from :file:`pidigits.py`: |
|
48 | In this example, we use two functions from :file:`pidigits.py`: | |
49 | :func:`one_digit_freqs` (which calculates how many times each digit occurs) |
|
49 | :func:`one_digit_freqs` (which calculates how many times each digit occurs) | |
50 | and :func:`plot_one_digit_freqs` (which uses Matplotlib to plot the result). |
|
50 | and :func:`plot_one_digit_freqs` (which uses Matplotlib to plot the result). | |
51 | Here is an interactive IPython session that uses these functions with |
|
51 | Here is an interactive IPython session that uses these functions with | |
52 | SymPy: |
|
52 | SymPy: | |
53 |
|
53 | |||
54 | .. sourcecode:: ipython |
|
54 | .. sourcecode:: ipython | |
55 |
|
55 | |||
56 | In [7]: import sympy |
|
56 | In [7]: import sympy | |
57 |
|
57 | |||
58 | In [8]: pi = sympy.pi.evalf(40) |
|
58 | In [8]: pi = sympy.pi.evalf(40) | |
59 |
|
59 | |||
60 | In [9]: pi |
|
60 | In [9]: pi | |
61 | Out[9]: 3.141592653589793238462643383279502884197 |
|
61 | Out[9]: 3.141592653589793238462643383279502884197 | |
62 |
|
62 | |||
63 | In [10]: pi = sympy.pi.evalf(10000) |
|
63 | In [10]: pi = sympy.pi.evalf(10000) | |
64 |
|
64 | |||
65 | In [11]: digits = (d for d in str(pi)[2:]) # create a sequence of digits |
|
65 | In [11]: digits = (d for d in str(pi)[2:]) # create a sequence of digits | |
66 |
|
66 | |||
67 | In [12]: run pidigits.py # load one_digit_freqs/plot_one_digit_freqs |
|
67 | In [12]: run pidigits.py # load one_digit_freqs/plot_one_digit_freqs | |
68 |
|
68 | |||
69 | In [13]: freqs = one_digit_freqs(digits) |
|
69 | In [13]: freqs = one_digit_freqs(digits) | |
70 |
|
70 | |||
71 | In [14]: plot_one_digit_freqs(freqs) |
|
71 | In [14]: plot_one_digit_freqs(freqs) | |
72 | Out[14]: [<matplotlib.lines.Line2D object at 0x18a55290>] |
|
72 | Out[14]: [<matplotlib.lines.Line2D object at 0x18a55290>] | |
73 |
|
73 | |||
74 | The resulting plot of the single digit counts shows that each digit occurs |
|
74 | The resulting plot of the single digit counts shows that each digit occurs | |
75 | approximately 1,000 times, but that with only 10,000 digits the |
|
75 | approximately 1,000 times, but that with only 10,000 digits the | |
76 | statistical fluctuations are still rather large: |
|
76 | statistical fluctuations are still rather large: | |
77 |
|
77 | |||
78 | .. image:: single_digits.* |
|
78 | .. image:: single_digits.* | |
79 |
|
79 | |||
80 | It is clear that to reduce the relative fluctuations in the counts, we need |
|
80 | It is clear that to reduce the relative fluctuations in the counts, we need | |
81 | to look at many more digits of pi. That brings us to the parallel calculation. |
|
81 | to look at many more digits of pi. That brings us to the parallel calculation. | |
82 |
|
82 | |||
83 | Parallel calculation |
|
83 | Parallel calculation | |
84 | -------------------- |
|
84 | -------------------- | |
85 |
|
85 | |||
86 | Calculating many digits of pi is a challenging computational problem in itself. |
|
86 | Calculating many digits of pi is a challenging computational problem in itself. | |
87 | Because we want to focus on the distribution of digits in this example, we |
|
87 | Because we want to focus on the distribution of digits in this example, we | |
88 | will use pre-computed digit of pi from the website of Professor Yasumasa |
|
88 | will use pre-computed digit of pi from the website of Professor Yasumasa | |
89 | Kanada at the University of Tokyo (http://www.super-computing.org). These |
|
89 | Kanada at the University of Tokyo (http://www.super-computing.org). These | |
90 | digits come in a set of text files (ftp://pi.super-computing.org/.2/pi200m/) |
|
90 | digits come in a set of text files (ftp://pi.super-computing.org/.2/pi200m/) | |
91 | that each have 10 million digits of pi. |
|
91 | that each have 10 million digits of pi. | |
92 |
|
92 | |||
93 | For the parallel calculation, we have copied these files to the local hard |
|
93 | For the parallel calculation, we have copied these files to the local hard | |
94 | drives of the compute nodes. A total of 15 of these files will be used, for a |
|
94 | drives of the compute nodes. A total of 15 of these files will be used, for a | |
95 | total of 150 million digits of pi. To make things a little more interesting we |
|
95 | total of 150 million digits of pi. To make things a little more interesting we | |
96 | will calculate the frequencies of all 2 digits sequences (00-99) and then plot |
|
96 | will calculate the frequencies of all 2 digits sequences (00-99) and then plot | |
97 | the result using a 2D matrix in Matplotlib. |
|
97 | the result using a 2D matrix in Matplotlib. | |
98 |
|
98 | |||
99 | The overall idea of the calculation is simple: each IPython engine will |
|
99 | The overall idea of the calculation is simple: each IPython engine will | |
100 | compute the two digit counts for the digits in a single file. Then in a final |
|
100 | compute the two digit counts for the digits in a single file. Then in a final | |
101 | step the counts from each engine will be added up. To perform this |
|
101 | step the counts from each engine will be added up. To perform this | |
102 | calculation, we will need two top-level functions from :file:`pidigits.py`: |
|
102 | calculation, we will need two top-level functions from :file:`pidigits.py`: | |
103 |
|
103 | |||
104 | .. literalinclude:: ../../examples/newparallel/pidigits.py |
|
104 | .. literalinclude:: ../../examples/newparallel/pidigits.py | |
105 | :language: python |
|
105 | :language: python | |
106 | :lines: 41-56 |
|
106 | :lines: 41-56 | |
107 |
|
107 | |||
108 | We will also use the :func:`plot_two_digit_freqs` function to plot the |
|
108 | We will also use the :func:`plot_two_digit_freqs` function to plot the | |
109 | results. The code to run this calculation in parallel is contained in |
|
109 | results. The code to run this calculation in parallel is contained in | |
110 | :file:`docs/examples/newparallel/parallelpi.py`. This code can be run in parallel |
|
110 | :file:`docs/examples/newparallel/parallelpi.py`. This code can be run in parallel | |
111 | using IPython by following these steps: |
|
111 | using IPython by following these steps: | |
112 |
|
112 | |||
113 |
1. Use :command:`ipcluster |
|
113 | 1. Use :command:`ipcluster` to start 15 engines. We used an 8 core (2 quad | |
114 | core CPUs) cluster with hyperthreading enabled which makes the 8 cores |
|
114 | core CPUs) cluster with hyperthreading enabled which makes the 8 cores | |
115 | looks like 16 (1 controller + 15 engines) in the OS. However, the maximum |
|
115 | looks like 16 (1 controller + 15 engines) in the OS. However, the maximum | |
116 | speedup we can observe is still only 8x. |
|
116 | speedup we can observe is still only 8x. | |
117 | 2. With the file :file:`parallelpi.py` in your current working directory, open |
|
117 | 2. With the file :file:`parallelpi.py` in your current working directory, open | |
118 | up IPython in pylab mode and type ``run parallelpi.py``. This will download |
|
118 | up IPython in pylab mode and type ``run parallelpi.py``. This will download | |
119 | the pi files via ftp the first time you run it, if they are not |
|
119 | the pi files via ftp the first time you run it, if they are not | |
120 | present in the Engines' working directory. |
|
120 | present in the Engines' working directory. | |
121 |
|
121 | |||
122 | When run on our 8 core cluster, we observe a speedup of 7.7x. This is slightly |
|
122 | When run on our 8 core cluster, we observe a speedup of 7.7x. This is slightly | |
123 | less than linear scaling (8x) because the controller is also running on one of |
|
123 | less than linear scaling (8x) because the controller is also running on one of | |
124 | the cores. |
|
124 | the cores. | |
125 |
|
125 | |||
126 | To emphasize the interactive nature of IPython, we now show how the |
|
126 | To emphasize the interactive nature of IPython, we now show how the | |
127 | calculation can also be run by simply typing the commands from |
|
127 | calculation can also be run by simply typing the commands from | |
128 | :file:`parallelpi.py` interactively into IPython: |
|
128 | :file:`parallelpi.py` interactively into IPython: | |
129 |
|
129 | |||
130 | .. sourcecode:: ipython |
|
130 | .. sourcecode:: ipython | |
131 |
|
131 | |||
132 | In [1]: from IPython.parallel import Client |
|
132 | In [1]: from IPython.parallel import Client | |
133 |
|
133 | |||
134 | # The Client allows us to use the engines interactively. |
|
134 | # The Client allows us to use the engines interactively. | |
135 | # We simply pass Client the name of the cluster profile we |
|
135 | # We simply pass Client the name of the cluster profile we | |
136 | # are using. |
|
136 | # are using. | |
137 | In [2]: c = Client(profile='mycluster') |
|
137 | In [2]: c = Client(profile='mycluster') | |
138 | In [3]: view = c.load_balanced_view() |
|
138 | In [3]: view = c.load_balanced_view() | |
139 |
|
139 | |||
140 | In [3]: c.ids |
|
140 | In [3]: c.ids | |
141 | Out[3]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] |
|
141 | Out[3]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] | |
142 |
|
142 | |||
143 | In [4]: run pidigits.py |
|
143 | In [4]: run pidigits.py | |
144 |
|
144 | |||
145 | In [5]: filestring = 'pi200m.ascii.%(i)02dof20' |
|
145 | In [5]: filestring = 'pi200m.ascii.%(i)02dof20' | |
146 |
|
146 | |||
147 | # Create the list of files to process. |
|
147 | # Create the list of files to process. | |
148 | In [6]: files = [filestring % {'i':i} for i in range(1,16)] |
|
148 | In [6]: files = [filestring % {'i':i} for i in range(1,16)] | |
149 |
|
149 | |||
150 | In [7]: files |
|
150 | In [7]: files | |
151 | Out[7]: |
|
151 | Out[7]: | |
152 | ['pi200m.ascii.01of20', |
|
152 | ['pi200m.ascii.01of20', | |
153 | 'pi200m.ascii.02of20', |
|
153 | 'pi200m.ascii.02of20', | |
154 | 'pi200m.ascii.03of20', |
|
154 | 'pi200m.ascii.03of20', | |
155 | 'pi200m.ascii.04of20', |
|
155 | 'pi200m.ascii.04of20', | |
156 | 'pi200m.ascii.05of20', |
|
156 | 'pi200m.ascii.05of20', | |
157 | 'pi200m.ascii.06of20', |
|
157 | 'pi200m.ascii.06of20', | |
158 | 'pi200m.ascii.07of20', |
|
158 | 'pi200m.ascii.07of20', | |
159 | 'pi200m.ascii.08of20', |
|
159 | 'pi200m.ascii.08of20', | |
160 | 'pi200m.ascii.09of20', |
|
160 | 'pi200m.ascii.09of20', | |
161 | 'pi200m.ascii.10of20', |
|
161 | 'pi200m.ascii.10of20', | |
162 | 'pi200m.ascii.11of20', |
|
162 | 'pi200m.ascii.11of20', | |
163 | 'pi200m.ascii.12of20', |
|
163 | 'pi200m.ascii.12of20', | |
164 | 'pi200m.ascii.13of20', |
|
164 | 'pi200m.ascii.13of20', | |
165 | 'pi200m.ascii.14of20', |
|
165 | 'pi200m.ascii.14of20', | |
166 | 'pi200m.ascii.15of20'] |
|
166 | 'pi200m.ascii.15of20'] | |
167 |
|
167 | |||
168 | # download the data files if they don't already exist: |
|
168 | # download the data files if they don't already exist: | |
169 | In [8]: v.map(fetch_pi_file, files) |
|
169 | In [8]: v.map(fetch_pi_file, files) | |
170 |
|
170 | |||
171 | # This is the parallel calculation using the Client.map method |
|
171 | # This is the parallel calculation using the Client.map method | |
172 | # which applies compute_two_digit_freqs to each file in files in parallel. |
|
172 | # which applies compute_two_digit_freqs to each file in files in parallel. | |
173 | In [9]: freqs_all = v.map(compute_two_digit_freqs, files) |
|
173 | In [9]: freqs_all = v.map(compute_two_digit_freqs, files) | |
174 |
|
174 | |||
175 | # Add up the frequencies from each engine. |
|
175 | # Add up the frequencies from each engine. | |
176 | In [10]: freqs = reduce_freqs(freqs_all) |
|
176 | In [10]: freqs = reduce_freqs(freqs_all) | |
177 |
|
177 | |||
178 | In [11]: plot_two_digit_freqs(freqs) |
|
178 | In [11]: plot_two_digit_freqs(freqs) | |
179 | Out[11]: <matplotlib.image.AxesImage object at 0x18beb110> |
|
179 | Out[11]: <matplotlib.image.AxesImage object at 0x18beb110> | |
180 |
|
180 | |||
181 | In [12]: plt.title('2 digit counts of 150m digits of pi') |
|
181 | In [12]: plt.title('2 digit counts of 150m digits of pi') | |
182 | Out[12]: <matplotlib.text.Text object at 0x18d1f9b0> |
|
182 | Out[12]: <matplotlib.text.Text object at 0x18d1f9b0> | |
183 |
|
183 | |||
184 | The resulting plot generated by Matplotlib is shown below. The colors indicate |
|
184 | The resulting plot generated by Matplotlib is shown below. The colors indicate | |
185 | which two digit sequences are more (red) or less (blue) likely to occur in the |
|
185 | which two digit sequences are more (red) or less (blue) likely to occur in the | |
186 | first 150 million digits of pi. We clearly see that the sequence "41" is |
|
186 | first 150 million digits of pi. We clearly see that the sequence "41" is | |
187 | most likely and that "06" and "07" are least likely. Further analysis would |
|
187 | most likely and that "06" and "07" are least likely. Further analysis would | |
188 | show that the relative size of the statistical fluctuations have decreased |
|
188 | show that the relative size of the statistical fluctuations have decreased | |
189 | compared to the 10,000 digit calculation. |
|
189 | compared to the 10,000 digit calculation. | |
190 |
|
190 | |||
191 | .. image:: two_digit_counts.* |
|
191 | .. image:: two_digit_counts.* | |
192 |
|
192 | |||
193 |
|
193 | |||
194 | Parallel options pricing |
|
194 | Parallel options pricing | |
195 | ======================== |
|
195 | ======================== | |
196 |
|
196 | |||
197 | An option is a financial contract that gives the buyer of the contract the |
|
197 | An option is a financial contract that gives the buyer of the contract the | |
198 | right to buy (a "call") or sell (a "put") a secondary asset (a stock for |
|
198 | right to buy (a "call") or sell (a "put") a secondary asset (a stock for | |
199 | example) at a particular date in the future (the expiration date) for a |
|
199 | example) at a particular date in the future (the expiration date) for a | |
200 | pre-agreed upon price (the strike price). For this right, the buyer pays the |
|
200 | pre-agreed upon price (the strike price). For this right, the buyer pays the | |
201 | seller a premium (the option price). There are a wide variety of flavors of |
|
201 | seller a premium (the option price). There are a wide variety of flavors of | |
202 | options (American, European, Asian, etc.) that are useful for different |
|
202 | options (American, European, Asian, etc.) that are useful for different | |
203 | purposes: hedging against risk, speculation, etc. |
|
203 | purposes: hedging against risk, speculation, etc. | |
204 |
|
204 | |||
205 | Much of modern finance is driven by the need to price these contracts |
|
205 | Much of modern finance is driven by the need to price these contracts | |
206 | accurately based on what is known about the properties (such as volatility) of |
|
206 | accurately based on what is known about the properties (such as volatility) of | |
207 | the underlying asset. One method of pricing options is to use a Monte Carlo |
|
207 | the underlying asset. One method of pricing options is to use a Monte Carlo | |
208 | simulation of the underlying asset price. In this example we use this approach |
|
208 | simulation of the underlying asset price. In this example we use this approach | |
209 | to price both European and Asian (path dependent) options for various strike |
|
209 | to price both European and Asian (path dependent) options for various strike | |
210 | prices and volatilities. |
|
210 | prices and volatilities. | |
211 |
|
211 | |||
212 | The code for this example can be found in the :file:`docs/examples/newparallel` |
|
212 | The code for this example can be found in the :file:`docs/examples/newparallel` | |
213 | directory of the IPython source. The function :func:`price_options` in |
|
213 | directory of the IPython source. The function :func:`price_options` in | |
214 | :file:`mcpricer.py` implements the basic Monte Carlo pricing algorithm using |
|
214 | :file:`mcpricer.py` implements the basic Monte Carlo pricing algorithm using | |
215 | the NumPy package and is shown here: |
|
215 | the NumPy package and is shown here: | |
216 |
|
216 | |||
217 | .. literalinclude:: ../../examples/newparallel/mcpricer.py |
|
217 | .. literalinclude:: ../../examples/newparallel/mcpricer.py | |
218 | :language: python |
|
218 | :language: python | |
219 |
|
219 | |||
220 | To run this code in parallel, we will use IPython's :class:`LoadBalancedView` class, |
|
220 | To run this code in parallel, we will use IPython's :class:`LoadBalancedView` class, | |
221 | which distributes work to the engines using dynamic load balancing. This |
|
221 | which distributes work to the engines using dynamic load balancing. This | |
222 | view is a wrapper of the :class:`Client` class shown in |
|
222 | view is a wrapper of the :class:`Client` class shown in | |
223 | the previous example. The parallel calculation using :class:`LoadBalancedView` can |
|
223 | the previous example. The parallel calculation using :class:`LoadBalancedView` can | |
224 | be found in the file :file:`mcpricer.py`. The code in this file creates a |
|
224 | be found in the file :file:`mcpricer.py`. The code in this file creates a | |
225 | :class:`TaskClient` instance and then submits a set of tasks using |
|
225 | :class:`TaskClient` instance and then submits a set of tasks using | |
226 | :meth:`TaskClient.run` that calculate the option prices for different |
|
226 | :meth:`TaskClient.run` that calculate the option prices for different | |
227 | volatilities and strike prices. The results are then plotted as a 2D contour |
|
227 | volatilities and strike prices. The results are then plotted as a 2D contour | |
228 | plot using Matplotlib. |
|
228 | plot using Matplotlib. | |
229 |
|
229 | |||
230 | .. literalinclude:: ../../examples/newparallel/mcdriver.py |
|
230 | .. literalinclude:: ../../examples/newparallel/mcdriver.py | |
231 | :language: python |
|
231 | :language: python | |
232 |
|
232 | |||
233 |
To use this code, start an IPython cluster using :command:`ipcluster |
|
233 | To use this code, start an IPython cluster using :command:`ipcluster`, open | |
234 | IPython in the pylab mode with the file :file:`mcdriver.py` in your current |
|
234 | IPython in the pylab mode with the file :file:`mcdriver.py` in your current | |
235 | working directory and then type: |
|
235 | working directory and then type: | |
236 |
|
236 | |||
237 | .. sourcecode:: ipython |
|
237 | .. sourcecode:: ipython | |
238 |
|
238 | |||
239 | In [7]: run mcdriver.py |
|
239 | In [7]: run mcdriver.py | |
240 | Submitted tasks: [0, 1, 2, ...] |
|
240 | Submitted tasks: [0, 1, 2, ...] | |
241 |
|
241 | |||
242 | Once all the tasks have finished, the results can be plotted using the |
|
242 | Once all the tasks have finished, the results can be plotted using the | |
243 | :func:`plot_options` function. Here we make contour plots of the Asian |
|
243 | :func:`plot_options` function. Here we make contour plots of the Asian | |
244 | call and Asian put options as function of the volatility and strike price: |
|
244 | call and Asian put options as function of the volatility and strike price: | |
245 |
|
245 | |||
246 | .. sourcecode:: ipython |
|
246 | .. sourcecode:: ipython | |
247 |
|
247 | |||
248 | In [8]: plot_options(sigma_vals, K_vals, prices['acall']) |
|
248 | In [8]: plot_options(sigma_vals, K_vals, prices['acall']) | |
249 |
|
249 | |||
250 | In [9]: plt.figure() |
|
250 | In [9]: plt.figure() | |
251 | Out[9]: <matplotlib.figure.Figure object at 0x18c178d0> |
|
251 | Out[9]: <matplotlib.figure.Figure object at 0x18c178d0> | |
252 |
|
252 | |||
253 | In [10]: plot_options(sigma_vals, K_vals, prices['aput']) |
|
253 | In [10]: plot_options(sigma_vals, K_vals, prices['aput']) | |
254 |
|
254 | |||
255 | These results are shown in the two figures below. On a 8 core cluster the |
|
255 | These results are shown in the two figures below. On a 8 core cluster the | |
256 | entire calculation (10 strike prices, 10 volatilities, 100,000 paths for each) |
|
256 | entire calculation (10 strike prices, 10 volatilities, 100,000 paths for each) | |
257 | took 30 seconds in parallel, giving a speedup of 7.7x, which is comparable |
|
257 | took 30 seconds in parallel, giving a speedup of 7.7x, which is comparable | |
258 | to the speedup observed in our previous example. |
|
258 | to the speedup observed in our previous example. | |
259 |
|
259 | |||
260 | .. image:: asian_call.* |
|
260 | .. image:: asian_call.* | |
261 |
|
261 | |||
262 | .. image:: asian_put.* |
|
262 | .. image:: asian_put.* | |
263 |
|
263 | |||
264 | Conclusion |
|
264 | Conclusion | |
265 | ========== |
|
265 | ========== | |
266 |
|
266 | |||
267 | To conclude these examples, we summarize the key features of IPython's |
|
267 | To conclude these examples, we summarize the key features of IPython's | |
268 | parallel architecture that have been demonstrated: |
|
268 | parallel architecture that have been demonstrated: | |
269 |
|
269 | |||
270 | * Serial code can be parallelized often with only a few extra lines of code. |
|
270 | * Serial code can be parallelized often with only a few extra lines of code. | |
271 | We have used the :class:`DirectView` and :class:`LoadBalancedView` classes |
|
271 | We have used the :class:`DirectView` and :class:`LoadBalancedView` classes | |
272 | for this purpose. |
|
272 | for this purpose. | |
273 | * The resulting parallel code can be run without ever leaving the IPython's |
|
273 | * The resulting parallel code can be run without ever leaving the IPython's | |
274 | interactive shell. |
|
274 | interactive shell. | |
275 | * Any data computed in parallel can be explored interactively through |
|
275 | * Any data computed in parallel can be explored interactively through | |
276 | visualization or further numerical calculations. |
|
276 | visualization or further numerical calculations. | |
277 | * We have run these examples on a cluster running Windows HPC Server 2008. |
|
277 | * We have run these examples on a cluster running Windows HPC Server 2008. | |
278 | IPython's built in support for the Windows HPC job scheduler makes it |
|
278 | IPython's built in support for the Windows HPC job scheduler makes it | |
279 | easy to get started with IPython's parallel capabilities. |
|
279 | easy to get started with IPython's parallel capabilities. | |
280 |
|
280 | |||
281 | .. note:: |
|
281 | .. note:: | |
282 |
|
282 | |||
283 | The newparallel code has never been run on Windows HPC Server, so the last |
|
283 | The newparallel code has never been run on Windows HPC Server, so the last | |
284 | conclusion is untested. |
|
284 | conclusion is untested. |
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_details.txt to docs/source/parallel/parallel_details.txt |
|
NO CONTENT: file renamed from docs/source/parallelz/parallel_details.txt to docs/source/parallel/parallel_details.txt |
@@ -1,253 +1,253 b'' | |||||
1 | .. _ip1par: |
|
1 | .. _ip1par: | |
2 |
|
2 | |||
3 | ============================ |
|
3 | ============================ | |
4 | Overview and getting started |
|
4 | Overview and getting started | |
5 | ============================ |
|
5 | ============================ | |
6 |
|
6 | |||
7 | Introduction |
|
7 | Introduction | |
8 | ============ |
|
8 | ============ | |
9 |
|
9 | |||
10 | This section gives an overview of IPython's sophisticated and powerful |
|
10 | This section gives an overview of IPython's sophisticated and powerful | |
11 | architecture for parallel and distributed computing. This architecture |
|
11 | architecture for parallel and distributed computing. This architecture | |
12 | abstracts out parallelism in a very general way, which enables IPython to |
|
12 | abstracts out parallelism in a very general way, which enables IPython to | |
13 | support many different styles of parallelism including: |
|
13 | support many different styles of parallelism including: | |
14 |
|
14 | |||
15 | * Single program, multiple data (SPMD) parallelism. |
|
15 | * Single program, multiple data (SPMD) parallelism. | |
16 | * Multiple program, multiple data (MPMD) parallelism. |
|
16 | * Multiple program, multiple data (MPMD) parallelism. | |
17 | * Message passing using MPI. |
|
17 | * Message passing using MPI. | |
18 | * Task farming. |
|
18 | * Task farming. | |
19 | * Data parallel. |
|
19 | * Data parallel. | |
20 | * Combinations of these approaches. |
|
20 | * Combinations of these approaches. | |
21 | * Custom user defined approaches. |
|
21 | * Custom user defined approaches. | |
22 |
|
22 | |||
23 | Most importantly, IPython enables all types of parallel applications to |
|
23 | Most importantly, IPython enables all types of parallel applications to | |
24 | be developed, executed, debugged and monitored *interactively*. Hence, |
|
24 | be developed, executed, debugged and monitored *interactively*. Hence, | |
25 | the ``I`` in IPython. The following are some example usage cases for IPython: |
|
25 | the ``I`` in IPython. The following are some example usage cases for IPython: | |
26 |
|
26 | |||
27 | * Quickly parallelize algorithms that are embarrassingly parallel |
|
27 | * Quickly parallelize algorithms that are embarrassingly parallel | |
28 | using a number of simple approaches. Many simple things can be |
|
28 | using a number of simple approaches. Many simple things can be | |
29 | parallelized interactively in one or two lines of code. |
|
29 | parallelized interactively in one or two lines of code. | |
30 |
|
30 | |||
31 | * Steer traditional MPI applications on a supercomputer from an |
|
31 | * Steer traditional MPI applications on a supercomputer from an | |
32 | IPython session on your laptop. |
|
32 | IPython session on your laptop. | |
33 |
|
33 | |||
34 | * Analyze and visualize large datasets (that could be remote and/or |
|
34 | * Analyze and visualize large datasets (that could be remote and/or | |
35 | distributed) interactively using IPython and tools like |
|
35 | distributed) interactively using IPython and tools like | |
36 | matplotlib/TVTK. |
|
36 | matplotlib/TVTK. | |
37 |
|
37 | |||
38 | * Develop, test and debug new parallel algorithms |
|
38 | * Develop, test and debug new parallel algorithms | |
39 | (that may use MPI) interactively. |
|
39 | (that may use MPI) interactively. | |
40 |
|
40 | |||
41 | * Tie together multiple MPI jobs running on different systems into |
|
41 | * Tie together multiple MPI jobs running on different systems into | |
42 | one giant distributed and parallel system. |
|
42 | one giant distributed and parallel system. | |
43 |
|
43 | |||
44 | * Start a parallel job on your cluster and then have a remote |
|
44 | * Start a parallel job on your cluster and then have a remote | |
45 | collaborator connect to it and pull back data into their |
|
45 | collaborator connect to it and pull back data into their | |
46 | local IPython session for plotting and analysis. |
|
46 | local IPython session for plotting and analysis. | |
47 |
|
47 | |||
48 | * Run a set of tasks on a set of CPUs using dynamic load balancing. |
|
48 | * Run a set of tasks on a set of CPUs using dynamic load balancing. | |
49 |
|
49 | |||
50 | Architecture overview |
|
50 | Architecture overview | |
51 | ===================== |
|
51 | ===================== | |
52 |
|
52 | |||
53 | The IPython architecture consists of four components: |
|
53 | The IPython architecture consists of four components: | |
54 |
|
54 | |||
55 | * The IPython engine. |
|
55 | * The IPython engine. | |
56 | * The IPython hub. |
|
56 | * The IPython hub. | |
57 | * The IPython schedulers. |
|
57 | * The IPython schedulers. | |
58 | * The controller client. |
|
58 | * The controller client. | |
59 |
|
59 | |||
60 | These components live in the :mod:`IPython.parallel` package and are |
|
60 | These components live in the :mod:`IPython.parallel` package and are | |
61 | installed with IPython. They do, however, have additional dependencies |
|
61 | installed with IPython. They do, however, have additional dependencies | |
62 | that must be installed. For more information, see our |
|
62 | that must be installed. For more information, see our | |
63 | :ref:`installation documentation <install_index>`. |
|
63 | :ref:`installation documentation <install_index>`. | |
64 |
|
64 | |||
65 | .. TODO: include zmq in install_index |
|
65 | .. TODO: include zmq in install_index | |
66 |
|
66 | |||
67 | IPython engine |
|
67 | IPython engine | |
68 | --------------- |
|
68 | --------------- | |
69 |
|
69 | |||
70 | The IPython engine is a Python instance that takes Python commands over a |
|
70 | The IPython engine is a Python instance that takes Python commands over a | |
71 | network connection. Eventually, the IPython engine will be a full IPython |
|
71 | network connection. Eventually, the IPython engine will be a full IPython | |
72 | interpreter, but for now, it is a regular Python interpreter. The engine |
|
72 | interpreter, but for now, it is a regular Python interpreter. The engine | |
73 | can also handle incoming and outgoing Python objects sent over a network |
|
73 | can also handle incoming and outgoing Python objects sent over a network | |
74 | connection. When multiple engines are started, parallel and distributed |
|
74 | connection. When multiple engines are started, parallel and distributed | |
75 | computing becomes possible. An important feature of an IPython engine is |
|
75 | computing becomes possible. An important feature of an IPython engine is | |
76 | that it blocks while user code is being executed. Read on for how the |
|
76 | that it blocks while user code is being executed. Read on for how the | |
77 | IPython controller solves this problem to expose a clean asynchronous API |
|
77 | IPython controller solves this problem to expose a clean asynchronous API | |
78 | to the user. |
|
78 | to the user. | |
79 |
|
79 | |||
80 | IPython controller |
|
80 | IPython controller | |
81 | ------------------ |
|
81 | ------------------ | |
82 |
|
82 | |||
83 | The IPython controller processes provide an interface for working with a set of engines. |
|
83 | The IPython controller processes provide an interface for working with a set of engines. | |
84 | At a general level, the controller is a collection of processes to which IPython engines |
|
84 | At a general level, the controller is a collection of processes to which IPython engines | |
85 | and clients can connect. The controller is composed of a :class:`Hub` and a collection of |
|
85 | and clients can connect. The controller is composed of a :class:`Hub` and a collection of | |
86 | :class:`Schedulers`. These Schedulers are typically run in separate processes but on the |
|
86 | :class:`Schedulers`. These Schedulers are typically run in separate processes but on the | |
87 | same machine as the Hub, but can be run anywhere from local threads or on remote machines. |
|
87 | same machine as the Hub, but can be run anywhere from local threads or on remote machines. | |
88 |
|
88 | |||
89 | The controller also provides a single point of contact for users who wish to |
|
89 | The controller also provides a single point of contact for users who wish to | |
90 | utilize the engines connected to the controller. There are different ways of |
|
90 | utilize the engines connected to the controller. There are different ways of | |
91 | working with a controller. In IPython, all of these models are implemented via |
|
91 | working with a controller. In IPython, all of these models are implemented via | |
92 | the client's :meth:`.View.apply` method, with various arguments, or |
|
92 | the client's :meth:`.View.apply` method, with various arguments, or | |
93 | constructing :class:`.View` objects to represent subsets of engines. The two |
|
93 | constructing :class:`.View` objects to represent subsets of engines. The two | |
94 | primary models for interacting with engines are: |
|
94 | primary models for interacting with engines are: | |
95 |
|
95 | |||
96 | * A **Direct** interface, where engines are addressed explicitly. |
|
96 | * A **Direct** interface, where engines are addressed explicitly. | |
97 | * A **LoadBalanced** interface, where the Scheduler is trusted with assigning work to |
|
97 | * A **LoadBalanced** interface, where the Scheduler is trusted with assigning work to | |
98 | appropriate engines. |
|
98 | appropriate engines. | |
99 |
|
99 | |||
100 | Advanced users can readily extend the View models to enable other |
|
100 | Advanced users can readily extend the View models to enable other | |
101 | styles of parallelism. |
|
101 | styles of parallelism. | |
102 |
|
102 | |||
103 | .. note:: |
|
103 | .. note:: | |
104 |
|
104 | |||
105 | A single controller and set of engines can be used with multiple models |
|
105 | A single controller and set of engines can be used with multiple models | |
106 | simultaneously. This opens the door for lots of interesting things. |
|
106 | simultaneously. This opens the door for lots of interesting things. | |
107 |
|
107 | |||
108 |
|
108 | |||
109 | The Hub |
|
109 | The Hub | |
110 | ******* |
|
110 | ******* | |
111 |
|
111 | |||
112 | The center of an IPython cluster is the Hub. This is the process that keeps |
|
112 | The center of an IPython cluster is the Hub. This is the process that keeps | |
113 | track of engine connections, schedulers, clients, as well as all task requests and |
|
113 | track of engine connections, schedulers, clients, as well as all task requests and | |
114 | results. The primary role of the Hub is to facilitate queries of the cluster state, and |
|
114 | results. The primary role of the Hub is to facilitate queries of the cluster state, and | |
115 | minimize the necessary information required to establish the many connections involved in |
|
115 | minimize the necessary information required to establish the many connections involved in | |
116 | connecting new clients and engines. |
|
116 | connecting new clients and engines. | |
117 |
|
117 | |||
118 |
|
118 | |||
119 | Schedulers |
|
119 | Schedulers | |
120 | ********** |
|
120 | ********** | |
121 |
|
121 | |||
122 | All actions that can be performed on the engine go through a Scheduler. While the engines |
|
122 | All actions that can be performed on the engine go through a Scheduler. While the engines | |
123 | themselves block when user code is run, the schedulers hide that from the user to provide |
|
123 | themselves block when user code is run, the schedulers hide that from the user to provide | |
124 | a fully asynchronous interface to a set of engines. |
|
124 | a fully asynchronous interface to a set of engines. | |
125 |
|
125 | |||
126 |
|
126 | |||
127 | IPython client and views |
|
127 | IPython client and views | |
128 | ------------------------ |
|
128 | ------------------------ | |
129 |
|
129 | |||
130 | There is one primary object, the :class:`~.parallel.Client`, for connecting to a cluster. |
|
130 | There is one primary object, the :class:`~.parallel.Client`, for connecting to a cluster. | |
131 | For each execution model, there is a corresponding :class:`~.parallel.view.View`. These views |
|
131 | For each execution model, there is a corresponding :class:`~.parallel.view.View`. These views | |
132 | allow users to interact with a set of engines through the interface. Here are the two default |
|
132 | allow users to interact with a set of engines through the interface. Here are the two default | |
133 | views: |
|
133 | views: | |
134 |
|
134 | |||
135 | * The :class:`DirectView` class for explicit addressing. |
|
135 | * The :class:`DirectView` class for explicit addressing. | |
136 | * The :class:`LoadBalancedView` class for destination-agnostic scheduling. |
|
136 | * The :class:`LoadBalancedView` class for destination-agnostic scheduling. | |
137 |
|
137 | |||
138 | Security |
|
138 | Security | |
139 | -------- |
|
139 | -------- | |
140 |
|
140 | |||
141 | IPython uses ZeroMQ for networking, which has provided many advantages, but |
|
141 | IPython uses ZeroMQ for networking, which has provided many advantages, but | |
142 | one of the setbacks is its utter lack of security [ZeroMQ]_. By default, no IPython |
|
142 | one of the setbacks is its utter lack of security [ZeroMQ]_. By default, no IPython | |
143 | connections are encrypted, but open ports only listen on localhost. The only |
|
143 | connections are encrypted, but open ports only listen on localhost. The only | |
144 | source of security for IPython is via ssh-tunnel. IPython supports both shell |
|
144 | source of security for IPython is via ssh-tunnel. IPython supports both shell | |
145 | (`openssh`) and `paramiko` based tunnels for connections. There is a key necessary |
|
145 | (`openssh`) and `paramiko` based tunnels for connections. There is a key necessary | |
146 | to submit requests, but due to the lack of encryption, it does not provide |
|
146 | to submit requests, but due to the lack of encryption, it does not provide | |
147 | significant security if loopback traffic is compromised. |
|
147 | significant security if loopback traffic is compromised. | |
148 |
|
148 | |||
149 | In our architecture, the controller is the only process that listens on |
|
149 | In our architecture, the controller is the only process that listens on | |
150 | network ports, and is thus the main point of vulnerability. The standard model |
|
150 | network ports, and is thus the main point of vulnerability. The standard model | |
151 | for secure connections is to designate that the controller listen on |
|
151 | for secure connections is to designate that the controller listen on | |
152 | localhost, and use ssh-tunnels to connect clients and/or |
|
152 | localhost, and use ssh-tunnels to connect clients and/or | |
153 | engines. |
|
153 | engines. | |
154 |
|
154 | |||
155 | To connect and authenticate to the controller an engine or client needs |
|
155 | To connect and authenticate to the controller an engine or client needs | |
156 | some information that the controller has stored in a JSON file. |
|
156 | some information that the controller has stored in a JSON file. | |
157 | Thus, the JSON files need to be copied to a location where |
|
157 | Thus, the JSON files need to be copied to a location where | |
158 | the clients and engines can find them. Typically, this is the |
|
158 | the clients and engines can find them. Typically, this is the | |
159 |
:file:`~/.ipython/cluster |
|
159 | :file:`~/.ipython/cluster_default/security` directory on the host where the | |
160 | client/engine is running (which could be a different host than the controller). |
|
160 | client/engine is running (which could be a different host than the controller). | |
161 | Once the JSON files are copied over, everything should work fine. |
|
161 | Once the JSON files are copied over, everything should work fine. | |
162 |
|
162 | |||
163 | Currently, there are two JSON files that the controller creates: |
|
163 | Currently, there are two JSON files that the controller creates: | |
164 |
|
164 | |||
165 | ipcontroller-engine.json |
|
165 | ipcontroller-engine.json | |
166 | This JSON file has the information necessary for an engine to connect |
|
166 | This JSON file has the information necessary for an engine to connect | |
167 | to a controller. |
|
167 | to a controller. | |
168 |
|
168 | |||
169 | ipcontroller-client.json |
|
169 | ipcontroller-client.json | |
170 | The client's connection information. This may not differ from the engine's, |
|
170 | The client's connection information. This may not differ from the engine's, | |
171 | but since the controller may listen on different ports for clients and |
|
171 | but since the controller may listen on different ports for clients and | |
172 | engines, it is stored separately. |
|
172 | engines, it is stored separately. | |
173 |
|
173 | |||
174 | More details of how these JSON files are used are given below. |
|
174 | More details of how these JSON files are used are given below. | |
175 |
|
175 | |||
176 | A detailed description of the security model and its implementation in IPython |
|
176 | A detailed description of the security model and its implementation in IPython | |
177 | can be found :ref:`here <parallelsecurity>`. |
|
177 | can be found :ref:`here <parallelsecurity>`. | |
178 |
|
178 | |||
179 | .. warning:: |
|
179 | .. warning:: | |
180 |
|
180 | |||
181 | Even at its most secure, the Controller listens on ports on localhost, and |
|
181 | Even at its most secure, the Controller listens on ports on localhost, and | |
182 | every time you make a tunnel, you open a localhost port on the connecting |
|
182 | every time you make a tunnel, you open a localhost port on the connecting | |
183 | machine that points to the Controller. If localhost on the Controller's |
|
183 | machine that points to the Controller. If localhost on the Controller's | |
184 | machine, or the machine of any client or engine, is untrusted, then your |
|
184 | machine, or the machine of any client or engine, is untrusted, then your | |
185 | Controller is insecure. There is no way around this with ZeroMQ. |
|
185 | Controller is insecure. There is no way around this with ZeroMQ. | |
186 |
|
186 | |||
187 |
|
187 | |||
188 |
|
188 | |||
189 | Getting Started |
|
189 | Getting Started | |
190 | =============== |
|
190 | =============== | |
191 |
|
191 | |||
192 | To use IPython for parallel computing, you need to start one instance of the |
|
192 | To use IPython for parallel computing, you need to start one instance of the | |
193 | controller and one or more instances of the engine. Initially, it is best to |
|
193 | controller and one or more instances of the engine. Initially, it is best to | |
194 | simply start a controller and engines on a single host using the |
|
194 | simply start a controller and engines on a single host using the | |
195 |
:command:`ipcluster |
|
195 | :command:`ipcluster` command. To start a controller and 4 engines on your | |
196 | localhost, just do:: |
|
196 | localhost, just do:: | |
197 |
|
197 | |||
198 |
$ ipcluster |
|
198 | $ ipcluster start -n 4 | |
199 |
|
199 | |||
200 | More details about starting the IPython controller and engines can be found |
|
200 | More details about starting the IPython controller and engines can be found | |
201 | :ref:`here <parallel_process>` |
|
201 | :ref:`here <parallel_process>` | |
202 |
|
202 | |||
203 | Once you have started the IPython controller and one or more engines, you |
|
203 | Once you have started the IPython controller and one or more engines, you | |
204 | are ready to use the engines to do something useful. To make sure |
|
204 | are ready to use the engines to do something useful. To make sure | |
205 | everything is working correctly, try the following commands: |
|
205 | everything is working correctly, try the following commands: | |
206 |
|
206 | |||
207 | .. sourcecode:: ipython |
|
207 | .. sourcecode:: ipython | |
208 |
|
208 | |||
209 | In [1]: from IPython.parallel import Client |
|
209 | In [1]: from IPython.parallel import Client | |
210 |
|
210 | |||
211 | In [2]: c = Client() |
|
211 | In [2]: c = Client() | |
212 |
|
212 | |||
213 | In [4]: c.ids |
|
213 | In [4]: c.ids | |
214 | Out[4]: set([0, 1, 2, 3]) |
|
214 | Out[4]: set([0, 1, 2, 3]) | |
215 |
|
215 | |||
216 | In [5]: c[:].apply_sync(lambda : "Hello, World") |
|
216 | In [5]: c[:].apply_sync(lambda : "Hello, World") | |
217 | Out[5]: [ 'Hello, World', 'Hello, World', 'Hello, World', 'Hello, World' ] |
|
217 | Out[5]: [ 'Hello, World', 'Hello, World', 'Hello, World', 'Hello, World' ] | |
218 |
|
218 | |||
219 |
|
219 | |||
220 | When a client is created with no arguments, the client tries to find the corresponding JSON file |
|
220 | When a client is created with no arguments, the client tries to find the corresponding JSON file | |
221 |
in the local `~/.ipython/cluster |
|
221 | in the local `~/.ipython/cluster_default/security` directory. Or if you specified a profile, | |
222 | you can use that with the Client. This should cover most cases: |
|
222 | you can use that with the Client. This should cover most cases: | |
223 |
|
223 | |||
224 | .. sourcecode:: ipython |
|
224 | .. sourcecode:: ipython | |
225 |
|
225 | |||
226 | In [2]: c = Client(profile='myprofile') |
|
226 | In [2]: c = Client(profile='myprofile') | |
227 |
|
227 | |||
228 | If you have put the JSON file in a different location or it has a different name, create the |
|
228 | If you have put the JSON file in a different location or it has a different name, create the | |
229 | client like this: |
|
229 | client like this: | |
230 |
|
230 | |||
231 | .. sourcecode:: ipython |
|
231 | .. sourcecode:: ipython | |
232 |
|
232 | |||
233 | In [2]: c = Client('/path/to/my/ipcontroller-client.json') |
|
233 | In [2]: c = Client('/path/to/my/ipcontroller-client.json') | |
234 |
|
234 | |||
235 | Remember, a client needs to be able to see the Hub's ports to connect. So if they are on a |
|
235 | Remember, a client needs to be able to see the Hub's ports to connect. So if they are on a | |
236 | different machine, you may need to use an ssh server to tunnel access to that machine, |
|
236 | different machine, you may need to use an ssh server to tunnel access to that machine, | |
237 | then you would connect to it with: |
|
237 | then you would connect to it with: | |
238 |
|
238 | |||
239 | .. sourcecode:: ipython |
|
239 | .. sourcecode:: ipython | |
240 |
|
240 | |||
241 | In [2]: c = Client(sshserver='myhub.example.com') |
|
241 | In [2]: c = Client(sshserver='myhub.example.com') | |
242 |
|
242 | |||
243 | Where 'myhub.example.com' is the url or IP address of the machine on |
|
243 | Where 'myhub.example.com' is the url or IP address of the machine on | |
244 | which the Hub process is running (or another machine that has direct access to the Hub's ports). |
|
244 | which the Hub process is running (or another machine that has direct access to the Hub's ports). | |
245 |
|
245 | |||
246 | The SSH server may already be specified in ipcontroller-client.json, if the controller was |
|
246 | The SSH server may already be specified in ipcontroller-client.json, if the controller was | |
247 | instructed at its launch time. |
|
247 | instructed at its launch time. | |
248 |
|
248 | |||
249 | You are now ready to learn more about the :ref:`Direct |
|
249 | You are now ready to learn more about the :ref:`Direct | |
250 | <parallel_multiengine>` and :ref:`LoadBalanced <parallel_task>` interfaces to the |
|
250 | <parallel_multiengine>` and :ref:`LoadBalanced <parallel_task>` interfaces to the | |
251 | controller. |
|
251 | controller. | |
252 |
|
252 | |||
253 | .. [ZeroMQ] ZeroMQ. http://www.zeromq.org |
|
253 | .. [ZeroMQ] ZeroMQ. http://www.zeromq.org |
@@ -1,156 +1,156 b'' | |||||
1 | .. _parallelmpi: |
|
1 | .. _parallelmpi: | |
2 |
|
2 | |||
3 | ======================= |
|
3 | ======================= | |
4 | Using MPI with IPython |
|
4 | Using MPI with IPython | |
5 | ======================= |
|
5 | ======================= | |
6 |
|
6 | |||
7 | .. note:: |
|
7 | .. note:: | |
8 |
|
8 | |||
9 | Not adapted to zmq yet |
|
9 | Not adapted to zmq yet | |
10 | This is out of date wrt ipcluster in general as well |
|
10 | This is out of date wrt ipcluster in general as well | |
11 |
|
11 | |||
12 | Often, a parallel algorithm will require moving data between the engines. One |
|
12 | Often, a parallel algorithm will require moving data between the engines. One | |
13 | way of accomplishing this is by doing a pull and then a push using the |
|
13 | way of accomplishing this is by doing a pull and then a push using the | |
14 | multiengine client. However, this will be slow as all the data has to go |
|
14 | multiengine client. However, this will be slow as all the data has to go | |
15 | through the controller to the client and then back through the controller, to |
|
15 | through the controller to the client and then back through the controller, to | |
16 | its final destination. |
|
16 | its final destination. | |
17 |
|
17 | |||
18 | A much better way of moving data between engines is to use a message passing |
|
18 | A much better way of moving data between engines is to use a message passing | |
19 | library, such as the Message Passing Interface (MPI) [MPI]_. IPython's |
|
19 | library, such as the Message Passing Interface (MPI) [MPI]_. IPython's | |
20 | parallel computing architecture has been designed from the ground up to |
|
20 | parallel computing architecture has been designed from the ground up to | |
21 | integrate with MPI. This document describes how to use MPI with IPython. |
|
21 | integrate with MPI. This document describes how to use MPI with IPython. | |
22 |
|
22 | |||
23 | Additional installation requirements |
|
23 | Additional installation requirements | |
24 | ==================================== |
|
24 | ==================================== | |
25 |
|
25 | |||
26 | If you want to use MPI with IPython, you will need to install: |
|
26 | If you want to use MPI with IPython, you will need to install: | |
27 |
|
27 | |||
28 | * A standard MPI implementation such as OpenMPI [OpenMPI]_ or MPICH. |
|
28 | * A standard MPI implementation such as OpenMPI [OpenMPI]_ or MPICH. | |
29 | * The mpi4py [mpi4py]_ package. |
|
29 | * The mpi4py [mpi4py]_ package. | |
30 |
|
30 | |||
31 | .. note:: |
|
31 | .. note:: | |
32 |
|
32 | |||
33 | The mpi4py package is not a strict requirement. However, you need to |
|
33 | The mpi4py package is not a strict requirement. However, you need to | |
34 | have *some* way of calling MPI from Python. You also need some way of |
|
34 | have *some* way of calling MPI from Python. You also need some way of | |
35 | making sure that :func:`MPI_Init` is called when the IPython engines start |
|
35 | making sure that :func:`MPI_Init` is called when the IPython engines start | |
36 | up. There are a number of ways of doing this and a good number of |
|
36 | up. There are a number of ways of doing this and a good number of | |
37 | associated subtleties. We highly recommend just using mpi4py as it |
|
37 | associated subtleties. We highly recommend just using mpi4py as it | |
38 | takes care of most of these problems. If you want to do something |
|
38 | takes care of most of these problems. If you want to do something | |
39 | different, let us know and we can help you get started. |
|
39 | different, let us know and we can help you get started. | |
40 |
|
40 | |||
41 | Starting the engines with MPI enabled |
|
41 | Starting the engines with MPI enabled | |
42 | ===================================== |
|
42 | ===================================== | |
43 |
|
43 | |||
44 | To use code that calls MPI, there are typically two things that MPI requires. |
|
44 | To use code that calls MPI, there are typically two things that MPI requires. | |
45 |
|
45 | |||
46 | 1. The process that wants to call MPI must be started using |
|
46 | 1. The process that wants to call MPI must be started using | |
47 | :command:`mpiexec` or a batch system (like PBS) that has MPI support. |
|
47 | :command:`mpiexec` or a batch system (like PBS) that has MPI support. | |
48 | 2. Once the process starts, it must call :func:`MPI_Init`. |
|
48 | 2. Once the process starts, it must call :func:`MPI_Init`. | |
49 |
|
49 | |||
50 | There are a couple of ways that you can start the IPython engines and get |
|
50 | There are a couple of ways that you can start the IPython engines and get | |
51 | these things to happen. |
|
51 | these things to happen. | |
52 |
|
52 | |||
53 |
Automatic starting using :command:`mpiexec` and :command:`ipcluster |
|
53 | Automatic starting using :command:`mpiexec` and :command:`ipcluster` | |
54 | -------------------------------------------------------------------- |
|
54 | -------------------------------------------------------------------- | |
55 |
|
55 | |||
56 |
The easiest approach is to use the `mpiexec` mode of :command:`ipcluster |
|
56 | The easiest approach is to use the `mpiexec` mode of :command:`ipcluster`, | |
57 | which will first start a controller and then a set of engines using |
|
57 | which will first start a controller and then a set of engines using | |
58 | :command:`mpiexec`:: |
|
58 | :command:`mpiexec`:: | |
59 |
|
59 | |||
60 |
$ ipcluster |
|
60 | $ ipcluster mpiexec -n 4 | |
61 |
|
61 | |||
62 |
This approach is best as interrupting :command:`ipcluster |
|
62 | This approach is best as interrupting :command:`ipcluster` will automatically | |
63 | stop and clean up the controller and engines. |
|
63 | stop and clean up the controller and engines. | |
64 |
|
64 | |||
65 | Manual starting using :command:`mpiexec` |
|
65 | Manual starting using :command:`mpiexec` | |
66 | ---------------------------------------- |
|
66 | ---------------------------------------- | |
67 |
|
67 | |||
68 | If you want to start the IPython engines using the :command:`mpiexec`, just |
|
68 | If you want to start the IPython engines using the :command:`mpiexec`, just | |
69 | do:: |
|
69 | do:: | |
70 |
|
70 | |||
71 |
$ mpiexec -n 4 ipengine |
|
71 | $ mpiexec -n 4 ipengine --mpi=mpi4py | |
72 |
|
72 | |||
73 | This requires that you already have a controller running and that the FURL |
|
73 | This requires that you already have a controller running and that the FURL | |
74 | files for the engines are in place. We also have built in support for |
|
74 | files for the engines are in place. We also have built in support for | |
75 | PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by |
|
75 | PyTrilinos [PyTrilinos]_, which can be used (assuming is installed) by | |
76 | starting the engines with:: |
|
76 | starting the engines with:: | |
77 |
|
77 | |||
78 |
$ mpiexec -n 4 ipengine |
|
78 | $ mpiexec -n 4 ipengine --mpi=pytrilinos | |
79 |
|
79 | |||
80 |
Automatic starting using PBS and :command:`ipcluster |
|
80 | Automatic starting using PBS and :command:`ipcluster` | |
81 | ------------------------------------------------------ |
|
81 | ------------------------------------------------------ | |
82 |
|
82 | |||
83 |
The :command:`ipcluster |
|
83 | The :command:`ipcluster` command also has built-in integration with PBS. For | |
84 |
more information on this approach, see our documentation on :ref:`ipcluster |
|
84 | more information on this approach, see our documentation on :ref:`ipcluster | |
85 | <parallel_process>`. |
|
85 | <parallel_process>`. | |
86 |
|
86 | |||
87 | Actually using MPI |
|
87 | Actually using MPI | |
88 | ================== |
|
88 | ================== | |
89 |
|
89 | |||
90 | Once the engines are running with MPI enabled, you are ready to go. You can |
|
90 | Once the engines are running with MPI enabled, you are ready to go. You can | |
91 | now call any code that uses MPI in the IPython engines. And, all of this can |
|
91 | now call any code that uses MPI in the IPython engines. And, all of this can | |
92 | be done interactively. Here we show a simple example that uses mpi4py |
|
92 | be done interactively. Here we show a simple example that uses mpi4py | |
93 | [mpi4py]_ version 1.1.0 or later. |
|
93 | [mpi4py]_ version 1.1.0 or later. | |
94 |
|
94 | |||
95 | First, lets define a simply function that uses MPI to calculate the sum of a |
|
95 | First, lets define a simply function that uses MPI to calculate the sum of a | |
96 | distributed array. Save the following text in a file called :file:`psum.py`: |
|
96 | distributed array. Save the following text in a file called :file:`psum.py`: | |
97 |
|
97 | |||
98 | .. sourcecode:: python |
|
98 | .. sourcecode:: python | |
99 |
|
99 | |||
100 | from mpi4py import MPI |
|
100 | from mpi4py import MPI | |
101 | import numpy as np |
|
101 | import numpy as np | |
102 |
|
102 | |||
103 | def psum(a): |
|
103 | def psum(a): | |
104 | s = np.sum(a) |
|
104 | s = np.sum(a) | |
105 | rcvBuf = np.array(0.0,'d') |
|
105 | rcvBuf = np.array(0.0,'d') | |
106 | MPI.COMM_WORLD.Allreduce([s, MPI.DOUBLE], |
|
106 | MPI.COMM_WORLD.Allreduce([s, MPI.DOUBLE], | |
107 | [rcvBuf, MPI.DOUBLE], |
|
107 | [rcvBuf, MPI.DOUBLE], | |
108 | op=MPI.SUM) |
|
108 | op=MPI.SUM) | |
109 | return rcvBuf |
|
109 | return rcvBuf | |
110 |
|
110 | |||
111 | Now, start an IPython cluster:: |
|
111 | Now, start an IPython cluster:: | |
112 |
|
112 | |||
113 |
$ ipcluster |
|
113 | $ ipcluster start -p mpi -n 4 | |
114 |
|
114 | |||
115 | .. note:: |
|
115 | .. note:: | |
116 |
|
116 | |||
117 | It is assumed here that the mpi profile has been set up, as described :ref:`here |
|
117 | It is assumed here that the mpi profile has been set up, as described :ref:`here | |
118 | <parallel_process>`. |
|
118 | <parallel_process>`. | |
119 |
|
119 | |||
120 | Finally, connect to the cluster and use this function interactively. In this |
|
120 | Finally, connect to the cluster and use this function interactively. In this | |
121 | case, we create a random array on each engine and sum up all the random arrays |
|
121 | case, we create a random array on each engine and sum up all the random arrays | |
122 | using our :func:`psum` function: |
|
122 | using our :func:`psum` function: | |
123 |
|
123 | |||
124 | .. sourcecode:: ipython |
|
124 | .. sourcecode:: ipython | |
125 |
|
125 | |||
126 | In [1]: from IPython.parallel import Client |
|
126 | In [1]: from IPython.parallel import Client | |
127 |
|
127 | |||
128 | In [2]: %load_ext parallel_magic |
|
128 | In [2]: %load_ext parallel_magic | |
129 |
|
129 | |||
130 | In [3]: c = Client(profile='mpi') |
|
130 | In [3]: c = Client(profile='mpi') | |
131 |
|
131 | |||
132 | In [4]: view = c[:] |
|
132 | In [4]: view = c[:] | |
133 |
|
133 | |||
134 | In [5]: view.activate() |
|
134 | In [5]: view.activate() | |
135 |
|
135 | |||
136 | # run the contents of the file on each engine: |
|
136 | # run the contents of the file on each engine: | |
137 | In [6]: view.run('psum.py') |
|
137 | In [6]: view.run('psum.py') | |
138 |
|
138 | |||
139 | In [6]: px a = np.random.rand(100) |
|
139 | In [6]: px a = np.random.rand(100) | |
140 | Parallel execution on engines: [0,1,2,3] |
|
140 | Parallel execution on engines: [0,1,2,3] | |
141 |
|
141 | |||
142 | In [8]: px s = psum(a) |
|
142 | In [8]: px s = psum(a) | |
143 | Parallel execution on engines: [0,1,2,3] |
|
143 | Parallel execution on engines: [0,1,2,3] | |
144 |
|
144 | |||
145 | In [9]: view['s'] |
|
145 | In [9]: view['s'] | |
146 | Out[9]: [187.451545803,187.451545803,187.451545803,187.451545803] |
|
146 | Out[9]: [187.451545803,187.451545803,187.451545803,187.451545803] | |
147 |
|
147 | |||
148 | Any Python code that makes calls to MPI can be used in this manner, including |
|
148 | Any Python code that makes calls to MPI can be used in this manner, including | |
149 | compiled C, C++ and Fortran libraries that have been exposed to Python. |
|
149 | compiled C, C++ and Fortran libraries that have been exposed to Python. | |
150 |
|
150 | |||
151 | .. [MPI] Message Passing Interface. http://www-unix.mcs.anl.gov/mpi/ |
|
151 | .. [MPI] Message Passing Interface. http://www-unix.mcs.anl.gov/mpi/ | |
152 | .. [mpi4py] MPI for Python. mpi4py: http://mpi4py.scipy.org/ |
|
152 | .. [mpi4py] MPI for Python. mpi4py: http://mpi4py.scipy.org/ | |
153 | .. [OpenMPI] Open MPI. http://www.open-mpi.org/ |
|
153 | .. [OpenMPI] Open MPI. http://www.open-mpi.org/ | |
154 | .. [PyTrilinos] PyTrilinos. http://trilinos.sandia.gov/packages/pytrilinos/ |
|
154 | .. [PyTrilinos] PyTrilinos. http://trilinos.sandia.gov/packages/pytrilinos/ | |
155 |
|
155 | |||
156 |
|
156 |
@@ -1,843 +1,843 b'' | |||||
1 | .. _parallel_multiengine: |
|
1 | .. _parallel_multiengine: | |
2 |
|
2 | |||
3 | ========================== |
|
3 | ========================== | |
4 | IPython's Direct interface |
|
4 | IPython's Direct interface | |
5 | ========================== |
|
5 | ========================== | |
6 |
|
6 | |||
7 | The direct, or multiengine, interface represents one possible way of working with a set of |
|
7 | The direct, or multiengine, interface represents one possible way of working with a set of | |
8 | IPython engines. The basic idea behind the multiengine interface is that the |
|
8 | IPython engines. The basic idea behind the multiengine interface is that the | |
9 | capabilities of each engine are directly and explicitly exposed to the user. |
|
9 | capabilities of each engine are directly and explicitly exposed to the user. | |
10 | Thus, in the multiengine interface, each engine is given an id that is used to |
|
10 | Thus, in the multiengine interface, each engine is given an id that is used to | |
11 | identify the engine and give it work to do. This interface is very intuitive |
|
11 | identify the engine and give it work to do. This interface is very intuitive | |
12 | and is designed with interactive usage in mind, and is the best place for |
|
12 | and is designed with interactive usage in mind, and is the best place for | |
13 | new users of IPython to begin. |
|
13 | new users of IPython to begin. | |
14 |
|
14 | |||
15 | Starting the IPython controller and engines |
|
15 | Starting the IPython controller and engines | |
16 | =========================================== |
|
16 | =========================================== | |
17 |
|
17 | |||
18 | To follow along with this tutorial, you will need to start the IPython |
|
18 | To follow along with this tutorial, you will need to start the IPython | |
19 | controller and four IPython engines. The simplest way of doing this is to use |
|
19 | controller and four IPython engines. The simplest way of doing this is to use | |
20 |
the :command:`ipcluster |
|
20 | the :command:`ipcluster` command:: | |
21 |
|
21 | |||
22 |
$ ipcluster |
|
22 | $ ipcluster start -n 4 | |
23 |
|
23 | |||
24 | For more detailed information about starting the controller and engines, see |
|
24 | For more detailed information about starting the controller and engines, see | |
25 | our :ref:`introduction <ip1par>` to using IPython for parallel computing. |
|
25 | our :ref:`introduction <ip1par>` to using IPython for parallel computing. | |
26 |
|
26 | |||
27 | Creating a ``Client`` instance |
|
27 | Creating a ``Client`` instance | |
28 | ============================== |
|
28 | ============================== | |
29 |
|
29 | |||
30 | The first step is to import the IPython :mod:`IPython.parallel` |
|
30 | The first step is to import the IPython :mod:`IPython.parallel` | |
31 | module and then create a :class:`.Client` instance: |
|
31 | module and then create a :class:`.Client` instance: | |
32 |
|
32 | |||
33 | .. sourcecode:: ipython |
|
33 | .. sourcecode:: ipython | |
34 |
|
34 | |||
35 | In [1]: from IPython.parallel import Client |
|
35 | In [1]: from IPython.parallel import Client | |
36 |
|
36 | |||
37 | In [2]: rc = Client() |
|
37 | In [2]: rc = Client() | |
38 |
|
38 | |||
39 | This form assumes that the default connection information (stored in |
|
39 | This form assumes that the default connection information (stored in | |
40 |
:file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/cluster |
|
40 | :file:`ipcontroller-client.json` found in :file:`IPYTHON_DIR/cluster_default/security`) is | |
41 | accurate. If the controller was started on a remote machine, you must copy that connection |
|
41 | accurate. If the controller was started on a remote machine, you must copy that connection | |
42 | file to the client machine, or enter its contents as arguments to the Client constructor: |
|
42 | file to the client machine, or enter its contents as arguments to the Client constructor: | |
43 |
|
43 | |||
44 | .. sourcecode:: ipython |
|
44 | .. sourcecode:: ipython | |
45 |
|
45 | |||
46 | # If you have copied the json connector file from the controller: |
|
46 | # If you have copied the json connector file from the controller: | |
47 | In [2]: rc = Client('/path/to/ipcontroller-client.json') |
|
47 | In [2]: rc = Client('/path/to/ipcontroller-client.json') | |
48 | # or to connect with a specific profile you have set up: |
|
48 | # or to connect with a specific profile you have set up: | |
49 | In [3]: rc = Client(profile='mpi') |
|
49 | In [3]: rc = Client(profile='mpi') | |
50 |
|
50 | |||
51 |
|
51 | |||
52 | To make sure there are engines connected to the controller, users can get a list |
|
52 | To make sure there are engines connected to the controller, users can get a list | |
53 | of engine ids: |
|
53 | of engine ids: | |
54 |
|
54 | |||
55 | .. sourcecode:: ipython |
|
55 | .. sourcecode:: ipython | |
56 |
|
56 | |||
57 | In [3]: rc.ids |
|
57 | In [3]: rc.ids | |
58 | Out[3]: [0, 1, 2, 3] |
|
58 | Out[3]: [0, 1, 2, 3] | |
59 |
|
59 | |||
60 | Here we see that there are four engines ready to do work for us. |
|
60 | Here we see that there are four engines ready to do work for us. | |
61 |
|
61 | |||
62 | For direct execution, we will make use of a :class:`DirectView` object, which can be |
|
62 | For direct execution, we will make use of a :class:`DirectView` object, which can be | |
63 | constructed via list-access to the client: |
|
63 | constructed via list-access to the client: | |
64 |
|
64 | |||
65 | .. sourcecode:: ipython |
|
65 | .. sourcecode:: ipython | |
66 |
|
66 | |||
67 | In [4]: dview = rc[:] # use all engines |
|
67 | In [4]: dview = rc[:] # use all engines | |
68 |
|
68 | |||
69 | .. seealso:: |
|
69 | .. seealso:: | |
70 |
|
70 | |||
71 | For more information, see the in-depth explanation of :ref:`Views <parallel_details>`. |
|
71 | For more information, see the in-depth explanation of :ref:`Views <parallel_details>`. | |
72 |
|
72 | |||
73 |
|
73 | |||
74 | Quick and easy parallelism |
|
74 | Quick and easy parallelism | |
75 | ========================== |
|
75 | ========================== | |
76 |
|
76 | |||
77 | In many cases, you simply want to apply a Python function to a sequence of |
|
77 | In many cases, you simply want to apply a Python function to a sequence of | |
78 | objects, but *in parallel*. The client interface provides a simple way |
|
78 | objects, but *in parallel*. The client interface provides a simple way | |
79 | of accomplishing this: using the DirectView's :meth:`~DirectView.map` method. |
|
79 | of accomplishing this: using the DirectView's :meth:`~DirectView.map` method. | |
80 |
|
80 | |||
81 | Parallel map |
|
81 | Parallel map | |
82 | ------------ |
|
82 | ------------ | |
83 |
|
83 | |||
84 | Python's builtin :func:`map` functions allows a function to be applied to a |
|
84 | Python's builtin :func:`map` functions allows a function to be applied to a | |
85 | sequence element-by-element. This type of code is typically trivial to |
|
85 | sequence element-by-element. This type of code is typically trivial to | |
86 | parallelize. In fact, since IPython's interface is all about functions anyway, |
|
86 | parallelize. In fact, since IPython's interface is all about functions anyway, | |
87 | you can just use the builtin :func:`map` with a :class:`RemoteFunction`, or a |
|
87 | you can just use the builtin :func:`map` with a :class:`RemoteFunction`, or a | |
88 | DirectView's :meth:`map` method: |
|
88 | DirectView's :meth:`map` method: | |
89 |
|
89 | |||
90 | .. sourcecode:: ipython |
|
90 | .. sourcecode:: ipython | |
91 |
|
91 | |||
92 | In [62]: serial_result = map(lambda x:x**10, range(32)) |
|
92 | In [62]: serial_result = map(lambda x:x**10, range(32)) | |
93 |
|
93 | |||
94 | In [63]: parallel_result = dview.map_sync(lambda x: x**10, range(32)) |
|
94 | In [63]: parallel_result = dview.map_sync(lambda x: x**10, range(32)) | |
95 |
|
95 | |||
96 | In [67]: serial_result==parallel_result |
|
96 | In [67]: serial_result==parallel_result | |
97 | Out[67]: True |
|
97 | Out[67]: True | |
98 |
|
98 | |||
99 |
|
99 | |||
100 | .. note:: |
|
100 | .. note:: | |
101 |
|
101 | |||
102 | The :class:`DirectView`'s version of :meth:`map` does |
|
102 | The :class:`DirectView`'s version of :meth:`map` does | |
103 | not do dynamic load balancing. For a load balanced version, use a |
|
103 | not do dynamic load balancing. For a load balanced version, use a | |
104 | :class:`LoadBalancedView`. |
|
104 | :class:`LoadBalancedView`. | |
105 |
|
105 | |||
106 | .. seealso:: |
|
106 | .. seealso:: | |
107 |
|
107 | |||
108 | :meth:`map` is implemented via :class:`ParallelFunction`. |
|
108 | :meth:`map` is implemented via :class:`ParallelFunction`. | |
109 |
|
109 | |||
110 | Remote function decorators |
|
110 | Remote function decorators | |
111 | -------------------------- |
|
111 | -------------------------- | |
112 |
|
112 | |||
113 | Remote functions are just like normal functions, but when they are called, |
|
113 | Remote functions are just like normal functions, but when they are called, | |
114 | they execute on one or more engines, rather than locally. IPython provides |
|
114 | they execute on one or more engines, rather than locally. IPython provides | |
115 | two decorators: |
|
115 | two decorators: | |
116 |
|
116 | |||
117 | .. sourcecode:: ipython |
|
117 | .. sourcecode:: ipython | |
118 |
|
118 | |||
119 | In [10]: @dview.remote(block=True) |
|
119 | In [10]: @dview.remote(block=True) | |
120 | ...: def getpid(): |
|
120 | ...: def getpid(): | |
121 | ...: import os |
|
121 | ...: import os | |
122 | ...: return os.getpid() |
|
122 | ...: return os.getpid() | |
123 | ...: |
|
123 | ...: | |
124 |
|
124 | |||
125 | In [11]: getpid() |
|
125 | In [11]: getpid() | |
126 | Out[11]: [12345, 12346, 12347, 12348] |
|
126 | Out[11]: [12345, 12346, 12347, 12348] | |
127 |
|
127 | |||
128 | The ``@parallel`` decorator creates parallel functions, that break up an element-wise |
|
128 | The ``@parallel`` decorator creates parallel functions, that break up an element-wise | |
129 | operations and distribute them, reconstructing the result. |
|
129 | operations and distribute them, reconstructing the result. | |
130 |
|
130 | |||
131 | .. sourcecode:: ipython |
|
131 | .. sourcecode:: ipython | |
132 |
|
132 | |||
133 | In [12]: import numpy as np |
|
133 | In [12]: import numpy as np | |
134 |
|
134 | |||
135 | In [13]: A = np.random.random((64,48)) |
|
135 | In [13]: A = np.random.random((64,48)) | |
136 |
|
136 | |||
137 | In [14]: @dview.parallel(block=True) |
|
137 | In [14]: @dview.parallel(block=True) | |
138 | ...: def pmul(A,B): |
|
138 | ...: def pmul(A,B): | |
139 | ...: return A*B |
|
139 | ...: return A*B | |
140 |
|
140 | |||
141 | In [15]: C_local = A*A |
|
141 | In [15]: C_local = A*A | |
142 |
|
142 | |||
143 | In [16]: C_remote = pmul(A,A) |
|
143 | In [16]: C_remote = pmul(A,A) | |
144 |
|
144 | |||
145 | In [17]: (C_local == C_remote).all() |
|
145 | In [17]: (C_local == C_remote).all() | |
146 | Out[17]: True |
|
146 | Out[17]: True | |
147 |
|
147 | |||
148 | .. seealso:: |
|
148 | .. seealso:: | |
149 |
|
149 | |||
150 | See the docstrings for the :func:`parallel` and :func:`remote` decorators for |
|
150 | See the docstrings for the :func:`parallel` and :func:`remote` decorators for | |
151 | options. |
|
151 | options. | |
152 |
|
152 | |||
153 | Calling Python functions |
|
153 | Calling Python functions | |
154 | ======================== |
|
154 | ======================== | |
155 |
|
155 | |||
156 | The most basic type of operation that can be performed on the engines is to |
|
156 | The most basic type of operation that can be performed on the engines is to | |
157 | execute Python code or call Python functions. Executing Python code can be |
|
157 | execute Python code or call Python functions. Executing Python code can be | |
158 | done in blocking or non-blocking mode (non-blocking is default) using the |
|
158 | done in blocking or non-blocking mode (non-blocking is default) using the | |
159 | :meth:`.View.execute` method, and calling functions can be done via the |
|
159 | :meth:`.View.execute` method, and calling functions can be done via the | |
160 | :meth:`.View.apply` method. |
|
160 | :meth:`.View.apply` method. | |
161 |
|
161 | |||
162 | apply |
|
162 | apply | |
163 | ----- |
|
163 | ----- | |
164 |
|
164 | |||
165 | The main method for doing remote execution (in fact, all methods that |
|
165 | The main method for doing remote execution (in fact, all methods that | |
166 | communicate with the engines are built on top of it), is :meth:`View.apply`. |
|
166 | communicate with the engines are built on top of it), is :meth:`View.apply`. | |
167 |
|
167 | |||
168 | We strive to provide the cleanest interface we can, so `apply` has the following |
|
168 | We strive to provide the cleanest interface we can, so `apply` has the following | |
169 | signature: |
|
169 | signature: | |
170 |
|
170 | |||
171 | .. sourcecode:: python |
|
171 | .. sourcecode:: python | |
172 |
|
172 | |||
173 | view.apply(f, *args, **kwargs) |
|
173 | view.apply(f, *args, **kwargs) | |
174 |
|
174 | |||
175 | There are various ways to call functions with IPython, and these flags are set as |
|
175 | There are various ways to call functions with IPython, and these flags are set as | |
176 | attributes of the View. The ``DirectView`` has just two of these flags: |
|
176 | attributes of the View. The ``DirectView`` has just two of these flags: | |
177 |
|
177 | |||
178 | dv.block : bool |
|
178 | dv.block : bool | |
179 | whether to wait for the result, or return an :class:`AsyncResult` object |
|
179 | whether to wait for the result, or return an :class:`AsyncResult` object | |
180 | immediately |
|
180 | immediately | |
181 | dv.track : bool |
|
181 | dv.track : bool | |
182 | whether to instruct pyzmq to track when |
|
182 | whether to instruct pyzmq to track when | |
183 | This is primarily useful for non-copying sends of numpy arrays that you plan to |
|
183 | This is primarily useful for non-copying sends of numpy arrays that you plan to | |
184 | edit in-place. You need to know when it becomes safe to edit the buffer |
|
184 | edit in-place. You need to know when it becomes safe to edit the buffer | |
185 | without corrupting the message. |
|
185 | without corrupting the message. | |
186 |
|
186 | |||
187 |
|
187 | |||
188 | Creating a view is simple: index-access on a client creates a :class:`.DirectView`. |
|
188 | Creating a view is simple: index-access on a client creates a :class:`.DirectView`. | |
189 |
|
189 | |||
190 | .. sourcecode:: ipython |
|
190 | .. sourcecode:: ipython | |
191 |
|
191 | |||
192 | In [4]: view = rc[1:3] |
|
192 | In [4]: view = rc[1:3] | |
193 | Out[4]: <DirectView [1, 2]> |
|
193 | Out[4]: <DirectView [1, 2]> | |
194 |
|
194 | |||
195 | In [5]: view.apply<tab> |
|
195 | In [5]: view.apply<tab> | |
196 | view.apply view.apply_async view.apply_sync |
|
196 | view.apply view.apply_async view.apply_sync | |
197 |
|
197 | |||
198 | For convenience, you can set block temporarily for a single call with the extra sync/async methods. |
|
198 | For convenience, you can set block temporarily for a single call with the extra sync/async methods. | |
199 |
|
199 | |||
200 | Blocking execution |
|
200 | Blocking execution | |
201 | ------------------ |
|
201 | ------------------ | |
202 |
|
202 | |||
203 | In blocking mode, the :class:`.DirectView` object (called ``dview`` in |
|
203 | In blocking mode, the :class:`.DirectView` object (called ``dview`` in | |
204 | these examples) submits the command to the controller, which places the |
|
204 | these examples) submits the command to the controller, which places the | |
205 | command in the engines' queues for execution. The :meth:`apply` call then |
|
205 | command in the engines' queues for execution. The :meth:`apply` call then | |
206 | blocks until the engines are done executing the command: |
|
206 | blocks until the engines are done executing the command: | |
207 |
|
207 | |||
208 | .. sourcecode:: ipython |
|
208 | .. sourcecode:: ipython | |
209 |
|
209 | |||
210 | In [2]: dview = rc[:] # A DirectView of all engines |
|
210 | In [2]: dview = rc[:] # A DirectView of all engines | |
211 | In [3]: dview.block=True |
|
211 | In [3]: dview.block=True | |
212 | In [4]: dview['a'] = 5 |
|
212 | In [4]: dview['a'] = 5 | |
213 |
|
213 | |||
214 | In [5]: dview['b'] = 10 |
|
214 | In [5]: dview['b'] = 10 | |
215 |
|
215 | |||
216 | In [6]: dview.apply(lambda x: a+b+x, 27) |
|
216 | In [6]: dview.apply(lambda x: a+b+x, 27) | |
217 | Out[6]: [42, 42, 42, 42] |
|
217 | Out[6]: [42, 42, 42, 42] | |
218 |
|
218 | |||
219 | You can also select blocking execution on a call-by-call basis with the :meth:`apply_sync` |
|
219 | You can also select blocking execution on a call-by-call basis with the :meth:`apply_sync` | |
220 | method: |
|
220 | method: | |
221 |
|
221 | |||
222 | In [7]: dview.block=False |
|
222 | In [7]: dview.block=False | |
223 |
|
223 | |||
224 | In [8]: dview.apply_sync(lambda x: a+b+x, 27) |
|
224 | In [8]: dview.apply_sync(lambda x: a+b+x, 27) | |
225 | Out[8]: [42, 42, 42, 42] |
|
225 | Out[8]: [42, 42, 42, 42] | |
226 |
|
226 | |||
227 | Python commands can be executed as strings on specific engines by using a View's ``execute`` |
|
227 | Python commands can be executed as strings on specific engines by using a View's ``execute`` | |
228 | method: |
|
228 | method: | |
229 |
|
229 | |||
230 | .. sourcecode:: ipython |
|
230 | .. sourcecode:: ipython | |
231 |
|
231 | |||
232 | In [6]: rc[::2].execute('c=a+b') |
|
232 | In [6]: rc[::2].execute('c=a+b') | |
233 |
|
233 | |||
234 | In [7]: rc[1::2].execute('c=a-b') |
|
234 | In [7]: rc[1::2].execute('c=a-b') | |
235 |
|
235 | |||
236 | In [8]: dview['c'] # shorthand for dview.pull('c', block=True) |
|
236 | In [8]: dview['c'] # shorthand for dview.pull('c', block=True) | |
237 | Out[8]: [15, -5, 15, -5] |
|
237 | Out[8]: [15, -5, 15, -5] | |
238 |
|
238 | |||
239 |
|
239 | |||
240 | Non-blocking execution |
|
240 | Non-blocking execution | |
241 | ---------------------- |
|
241 | ---------------------- | |
242 |
|
242 | |||
243 | In non-blocking mode, :meth:`apply` submits the command to be executed and |
|
243 | In non-blocking mode, :meth:`apply` submits the command to be executed and | |
244 | then returns a :class:`AsyncResult` object immediately. The |
|
244 | then returns a :class:`AsyncResult` object immediately. The | |
245 | :class:`AsyncResult` object gives you a way of getting a result at a later |
|
245 | :class:`AsyncResult` object gives you a way of getting a result at a later | |
246 | time through its :meth:`get` method. |
|
246 | time through its :meth:`get` method. | |
247 |
|
247 | |||
248 | .. Note:: |
|
248 | .. Note:: | |
249 |
|
249 | |||
250 | The :class:`AsyncResult` object provides a superset of the interface in |
|
250 | The :class:`AsyncResult` object provides a superset of the interface in | |
251 | :py:class:`multiprocessing.pool.AsyncResult`. See the |
|
251 | :py:class:`multiprocessing.pool.AsyncResult`. See the | |
252 | `official Python documentation <http://docs.python.org/library/multiprocessing#multiprocessing.pool.AsyncResult>`_ |
|
252 | `official Python documentation <http://docs.python.org/library/multiprocessing#multiprocessing.pool.AsyncResult>`_ | |
253 | for more. |
|
253 | for more. | |
254 |
|
254 | |||
255 |
|
255 | |||
256 | This allows you to quickly submit long running commands without blocking your |
|
256 | This allows you to quickly submit long running commands without blocking your | |
257 | local Python/IPython session: |
|
257 | local Python/IPython session: | |
258 |
|
258 | |||
259 | .. sourcecode:: ipython |
|
259 | .. sourcecode:: ipython | |
260 |
|
260 | |||
261 | # define our function |
|
261 | # define our function | |
262 | In [6]: def wait(t): |
|
262 | In [6]: def wait(t): | |
263 | ...: import time |
|
263 | ...: import time | |
264 | ...: tic = time.time() |
|
264 | ...: tic = time.time() | |
265 | ...: time.sleep(t) |
|
265 | ...: time.sleep(t) | |
266 | ...: return time.time()-tic |
|
266 | ...: return time.time()-tic | |
267 |
|
267 | |||
268 | # In non-blocking mode |
|
268 | # In non-blocking mode | |
269 | In [7]: ar = dview.apply_async(wait, 2) |
|
269 | In [7]: ar = dview.apply_async(wait, 2) | |
270 |
|
270 | |||
271 | # Now block for the result |
|
271 | # Now block for the result | |
272 | In [8]: ar.get() |
|
272 | In [8]: ar.get() | |
273 | Out[8]: [2.0006198883056641, 1.9997570514678955, 1.9996809959411621, 2.0003249645233154] |
|
273 | Out[8]: [2.0006198883056641, 1.9997570514678955, 1.9996809959411621, 2.0003249645233154] | |
274 |
|
274 | |||
275 | # Again in non-blocking mode |
|
275 | # Again in non-blocking mode | |
276 | In [9]: ar = dview.apply_async(wait, 10) |
|
276 | In [9]: ar = dview.apply_async(wait, 10) | |
277 |
|
277 | |||
278 | # Poll to see if the result is ready |
|
278 | # Poll to see if the result is ready | |
279 | In [10]: ar.ready() |
|
279 | In [10]: ar.ready() | |
280 | Out[10]: False |
|
280 | Out[10]: False | |
281 |
|
281 | |||
282 | # ask for the result, but wait a maximum of 1 second: |
|
282 | # ask for the result, but wait a maximum of 1 second: | |
283 | In [45]: ar.get(1) |
|
283 | In [45]: ar.get(1) | |
284 | --------------------------------------------------------------------------- |
|
284 | --------------------------------------------------------------------------- | |
285 | TimeoutError Traceback (most recent call last) |
|
285 | TimeoutError Traceback (most recent call last) | |
286 | /home/you/<ipython-input-45-7cd858bbb8e0> in <module>() |
|
286 | /home/you/<ipython-input-45-7cd858bbb8e0> in <module>() | |
287 | ----> 1 ar.get(1) |
|
287 | ----> 1 ar.get(1) | |
288 |
|
288 | |||
289 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) |
|
289 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) | |
290 | 62 raise self._exception |
|
290 | 62 raise self._exception | |
291 | 63 else: |
|
291 | 63 else: | |
292 | ---> 64 raise error.TimeoutError("Result not ready.") |
|
292 | ---> 64 raise error.TimeoutError("Result not ready.") | |
293 | 65 |
|
293 | 65 | |
294 | 66 def ready(self): |
|
294 | 66 def ready(self): | |
295 |
|
295 | |||
296 | TimeoutError: Result not ready. |
|
296 | TimeoutError: Result not ready. | |
297 |
|
297 | |||
298 | .. Note:: |
|
298 | .. Note:: | |
299 |
|
299 | |||
300 | Note the import inside the function. This is a common model, to ensure |
|
300 | Note the import inside the function. This is a common model, to ensure | |
301 | that the appropriate modules are imported where the task is run. You can |
|
301 | that the appropriate modules are imported where the task is run. You can | |
302 | also manually import modules into the engine(s) namespace(s) via |
|
302 | also manually import modules into the engine(s) namespace(s) via | |
303 | :meth:`view.execute('import numpy')`. |
|
303 | :meth:`view.execute('import numpy')`. | |
304 |
|
304 | |||
305 | Often, it is desirable to wait until a set of :class:`AsyncResult` objects |
|
305 | Often, it is desirable to wait until a set of :class:`AsyncResult` objects | |
306 | are done. For this, there is a the method :meth:`wait`. This method takes a |
|
306 | are done. For this, there is a the method :meth:`wait`. This method takes a | |
307 | tuple of :class:`AsyncResult` objects (or `msg_ids` or indices to the client's History), |
|
307 | tuple of :class:`AsyncResult` objects (or `msg_ids` or indices to the client's History), | |
308 | and blocks until all of the associated results are ready: |
|
308 | and blocks until all of the associated results are ready: | |
309 |
|
309 | |||
310 | .. sourcecode:: ipython |
|
310 | .. sourcecode:: ipython | |
311 |
|
311 | |||
312 | In [72]: dview.block=False |
|
312 | In [72]: dview.block=False | |
313 |
|
313 | |||
314 | # A trivial list of AsyncResults objects |
|
314 | # A trivial list of AsyncResults objects | |
315 | In [73]: pr_list = [dview.apply_async(wait, 3) for i in range(10)] |
|
315 | In [73]: pr_list = [dview.apply_async(wait, 3) for i in range(10)] | |
316 |
|
316 | |||
317 | # Wait until all of them are done |
|
317 | # Wait until all of them are done | |
318 | In [74]: dview.wait(pr_list) |
|
318 | In [74]: dview.wait(pr_list) | |
319 |
|
319 | |||
320 | # Then, their results are ready using get() or the `.r` attribute |
|
320 | # Then, their results are ready using get() or the `.r` attribute | |
321 | In [75]: pr_list[0].get() |
|
321 | In [75]: pr_list[0].get() | |
322 | Out[75]: [2.9982571601867676, 2.9982588291168213, 2.9987530708312988, 2.9990990161895752] |
|
322 | Out[75]: [2.9982571601867676, 2.9982588291168213, 2.9987530708312988, 2.9990990161895752] | |
323 |
|
323 | |||
324 |
|
324 | |||
325 |
|
325 | |||
326 | The ``block`` and ``targets`` keyword arguments and attributes |
|
326 | The ``block`` and ``targets`` keyword arguments and attributes | |
327 | -------------------------------------------------------------- |
|
327 | -------------------------------------------------------------- | |
328 |
|
328 | |||
329 | Most DirectView methods (excluding :meth:`apply` and :meth:`map`) accept ``block`` and |
|
329 | Most DirectView methods (excluding :meth:`apply` and :meth:`map`) accept ``block`` and | |
330 | ``targets`` as keyword arguments. As we have seen above, these keyword arguments control the |
|
330 | ``targets`` as keyword arguments. As we have seen above, these keyword arguments control the | |
331 | blocking mode and which engines the command is applied to. The :class:`View` class also has |
|
331 | blocking mode and which engines the command is applied to. The :class:`View` class also has | |
332 | :attr:`block` and :attr:`targets` attributes that control the default behavior when the keyword |
|
332 | :attr:`block` and :attr:`targets` attributes that control the default behavior when the keyword | |
333 | arguments are not provided. Thus the following logic is used for :attr:`block` and :attr:`targets`: |
|
333 | arguments are not provided. Thus the following logic is used for :attr:`block` and :attr:`targets`: | |
334 |
|
334 | |||
335 | * If no keyword argument is provided, the instance attributes are used. |
|
335 | * If no keyword argument is provided, the instance attributes are used. | |
336 | * Keyword argument, if provided override the instance attributes for |
|
336 | * Keyword argument, if provided override the instance attributes for | |
337 | the duration of a single call. |
|
337 | the duration of a single call. | |
338 |
|
338 | |||
339 | The following examples demonstrate how to use the instance attributes: |
|
339 | The following examples demonstrate how to use the instance attributes: | |
340 |
|
340 | |||
341 | .. sourcecode:: ipython |
|
341 | .. sourcecode:: ipython | |
342 |
|
342 | |||
343 | In [16]: dview.targets = [0,2] |
|
343 | In [16]: dview.targets = [0,2] | |
344 |
|
344 | |||
345 | In [17]: dview.block = False |
|
345 | In [17]: dview.block = False | |
346 |
|
346 | |||
347 | In [18]: ar = dview.apply(lambda : 10) |
|
347 | In [18]: ar = dview.apply(lambda : 10) | |
348 |
|
348 | |||
349 | In [19]: ar.get() |
|
349 | In [19]: ar.get() | |
350 | Out[19]: [10, 10] |
|
350 | Out[19]: [10, 10] | |
351 |
|
351 | |||
352 | In [16]: dview.targets = v.client.ids # all engines (4) |
|
352 | In [16]: dview.targets = v.client.ids # all engines (4) | |
353 |
|
353 | |||
354 | In [21]: dview.block = True |
|
354 | In [21]: dview.block = True | |
355 |
|
355 | |||
356 | In [22]: dview.apply(lambda : 42) |
|
356 | In [22]: dview.apply(lambda : 42) | |
357 | Out[22]: [42, 42, 42, 42] |
|
357 | Out[22]: [42, 42, 42, 42] | |
358 |
|
358 | |||
359 | The :attr:`block` and :attr:`targets` instance attributes of the |
|
359 | The :attr:`block` and :attr:`targets` instance attributes of the | |
360 | :class:`.DirectView` also determine the behavior of the parallel magic commands. |
|
360 | :class:`.DirectView` also determine the behavior of the parallel magic commands. | |
361 |
|
361 | |||
362 | Parallel magic commands |
|
362 | Parallel magic commands | |
363 | ----------------------- |
|
363 | ----------------------- | |
364 |
|
364 | |||
365 | .. warning:: |
|
365 | .. warning:: | |
366 |
|
366 | |||
367 | The magics have not been changed to work with the zeromq system. The |
|
367 | The magics have not been changed to work with the zeromq system. The | |
368 | magics do work, but *do not* print stdin/out like they used to in IPython.kernel. |
|
368 | magics do work, but *do not* print stdin/out like they used to in IPython.kernel. | |
369 |
|
369 | |||
370 | We provide a few IPython magic commands (``%px``, ``%autopx`` and ``%result``) |
|
370 | We provide a few IPython magic commands (``%px``, ``%autopx`` and ``%result``) | |
371 | that make it more pleasant to execute Python commands on the engines |
|
371 | that make it more pleasant to execute Python commands on the engines | |
372 | interactively. These are simply shortcuts to :meth:`execute` and |
|
372 | interactively. These are simply shortcuts to :meth:`execute` and | |
373 | :meth:`get_result` of the :class:`DirectView`. The ``%px`` magic executes a single |
|
373 | :meth:`get_result` of the :class:`DirectView`. The ``%px`` magic executes a single | |
374 | Python command on the engines specified by the :attr:`targets` attribute of the |
|
374 | Python command on the engines specified by the :attr:`targets` attribute of the | |
375 | :class:`DirectView` instance: |
|
375 | :class:`DirectView` instance: | |
376 |
|
376 | |||
377 | .. sourcecode:: ipython |
|
377 | .. sourcecode:: ipython | |
378 |
|
378 | |||
379 | # load the parallel magic extension: |
|
379 | # load the parallel magic extension: | |
380 | In [21]: %load_ext parallelmagic |
|
380 | In [21]: %load_ext parallelmagic | |
381 |
|
381 | |||
382 | # Create a DirectView for all targets |
|
382 | # Create a DirectView for all targets | |
383 | In [22]: dv = rc[:] |
|
383 | In [22]: dv = rc[:] | |
384 |
|
384 | |||
385 | # Make this DirectView active for parallel magic commands |
|
385 | # Make this DirectView active for parallel magic commands | |
386 | In [23]: dv.activate() |
|
386 | In [23]: dv.activate() | |
387 |
|
387 | |||
388 | In [24]: dv.block=True |
|
388 | In [24]: dv.block=True | |
389 |
|
389 | |||
390 | In [25]: import numpy |
|
390 | In [25]: import numpy | |
391 |
|
391 | |||
392 | In [26]: %px import numpy |
|
392 | In [26]: %px import numpy | |
393 | Parallel execution on engines: [0, 1, 2, 3] |
|
393 | Parallel execution on engines: [0, 1, 2, 3] | |
394 |
|
394 | |||
395 | In [27]: %px a = numpy.random.rand(2,2) |
|
395 | In [27]: %px a = numpy.random.rand(2,2) | |
396 | Parallel execution on engines: [0, 1, 2, 3] |
|
396 | Parallel execution on engines: [0, 1, 2, 3] | |
397 |
|
397 | |||
398 | In [28]: %px ev = numpy.linalg.eigvals(a) |
|
398 | In [28]: %px ev = numpy.linalg.eigvals(a) | |
399 | Parallel execution on engines: [0, 1, 2, 3] |
|
399 | Parallel execution on engines: [0, 1, 2, 3] | |
400 |
|
400 | |||
401 | In [28]: dv['ev'] |
|
401 | In [28]: dv['ev'] | |
402 | Out[28]: [ array([ 1.09522024, -0.09645227]), |
|
402 | Out[28]: [ array([ 1.09522024, -0.09645227]), | |
403 | array([ 1.21435496, -0.35546712]), |
|
403 | array([ 1.21435496, -0.35546712]), | |
404 | array([ 0.72180653, 0.07133042]), |
|
404 | array([ 0.72180653, 0.07133042]), | |
405 | array([ 1.46384341e+00, 1.04353244e-04]) |
|
405 | array([ 1.46384341e+00, 1.04353244e-04]) | |
406 | ] |
|
406 | ] | |
407 |
|
407 | |||
408 | The ``%result`` magic gets the most recent result, or takes an argument |
|
408 | The ``%result`` magic gets the most recent result, or takes an argument | |
409 | specifying the index of the result to be requested. It is simply a shortcut to the |
|
409 | specifying the index of the result to be requested. It is simply a shortcut to the | |
410 | :meth:`get_result` method: |
|
410 | :meth:`get_result` method: | |
411 |
|
411 | |||
412 | .. sourcecode:: ipython |
|
412 | .. sourcecode:: ipython | |
413 |
|
413 | |||
414 | In [29]: dv.apply_async(lambda : ev) |
|
414 | In [29]: dv.apply_async(lambda : ev) | |
415 |
|
415 | |||
416 | In [30]: %result |
|
416 | In [30]: %result | |
417 | Out[30]: [ [ 1.28167017 0.14197338], |
|
417 | Out[30]: [ [ 1.28167017 0.14197338], | |
418 | [-0.14093616 1.27877273], |
|
418 | [-0.14093616 1.27877273], | |
419 | [-0.37023573 1.06779409], |
|
419 | [-0.37023573 1.06779409], | |
420 | [ 0.83664764 -0.25602658] ] |
|
420 | [ 0.83664764 -0.25602658] ] | |
421 |
|
421 | |||
422 | The ``%autopx`` magic switches to a mode where everything you type is executed |
|
422 | The ``%autopx`` magic switches to a mode where everything you type is executed | |
423 | on the engines given by the :attr:`targets` attribute: |
|
423 | on the engines given by the :attr:`targets` attribute: | |
424 |
|
424 | |||
425 | .. sourcecode:: ipython |
|
425 | .. sourcecode:: ipython | |
426 |
|
426 | |||
427 | In [30]: dv.block=False |
|
427 | In [30]: dv.block=False | |
428 |
|
428 | |||
429 | In [31]: %autopx |
|
429 | In [31]: %autopx | |
430 | Auto Parallel Enabled |
|
430 | Auto Parallel Enabled | |
431 | Type %autopx to disable |
|
431 | Type %autopx to disable | |
432 |
|
432 | |||
433 | In [32]: max_evals = [] |
|
433 | In [32]: max_evals = [] | |
434 | <IPython.parallel.asyncresult.AsyncResult object at 0x17b8a70> |
|
434 | <IPython.parallel.asyncresult.AsyncResult object at 0x17b8a70> | |
435 |
|
435 | |||
436 | In [33]: for i in range(100): |
|
436 | In [33]: for i in range(100): | |
437 | ....: a = numpy.random.rand(10,10) |
|
437 | ....: a = numpy.random.rand(10,10) | |
438 | ....: a = a+a.transpose() |
|
438 | ....: a = a+a.transpose() | |
439 | ....: evals = numpy.linalg.eigvals(a) |
|
439 | ....: evals = numpy.linalg.eigvals(a) | |
440 | ....: max_evals.append(evals[0].real) |
|
440 | ....: max_evals.append(evals[0].real) | |
441 | ....: |
|
441 | ....: | |
442 | ....: |
|
442 | ....: | |
443 | <IPython.parallel.asyncresult.AsyncResult object at 0x17af8f0> |
|
443 | <IPython.parallel.asyncresult.AsyncResult object at 0x17af8f0> | |
444 |
|
444 | |||
445 | In [34]: %autopx |
|
445 | In [34]: %autopx | |
446 | Auto Parallel Disabled |
|
446 | Auto Parallel Disabled | |
447 |
|
447 | |||
448 | In [35]: dv.block=True |
|
448 | In [35]: dv.block=True | |
449 |
|
449 | |||
450 | In [36]: px ans= "Average max eigenvalue is: %f"%(sum(max_evals)/len(max_evals)) |
|
450 | In [36]: px ans= "Average max eigenvalue is: %f"%(sum(max_evals)/len(max_evals)) | |
451 | Parallel execution on engines: [0, 1, 2, 3] |
|
451 | Parallel execution on engines: [0, 1, 2, 3] | |
452 |
|
452 | |||
453 | In [37]: dv['ans'] |
|
453 | In [37]: dv['ans'] | |
454 | Out[37]: [ 'Average max eigenvalue is: 10.1387247332', |
|
454 | Out[37]: [ 'Average max eigenvalue is: 10.1387247332', | |
455 | 'Average max eigenvalue is: 10.2076902286', |
|
455 | 'Average max eigenvalue is: 10.2076902286', | |
456 | 'Average max eigenvalue is: 10.1891484655', |
|
456 | 'Average max eigenvalue is: 10.1891484655', | |
457 | 'Average max eigenvalue is: 10.1158837784',] |
|
457 | 'Average max eigenvalue is: 10.1158837784',] | |
458 |
|
458 | |||
459 |
|
459 | |||
460 | Moving Python objects around |
|
460 | Moving Python objects around | |
461 | ============================ |
|
461 | ============================ | |
462 |
|
462 | |||
463 | In addition to calling functions and executing code on engines, you can |
|
463 | In addition to calling functions and executing code on engines, you can | |
464 | transfer Python objects to and from your IPython session and the engines. In |
|
464 | transfer Python objects to and from your IPython session and the engines. In | |
465 | IPython, these operations are called :meth:`push` (sending an object to the |
|
465 | IPython, these operations are called :meth:`push` (sending an object to the | |
466 | engines) and :meth:`pull` (getting an object from the engines). |
|
466 | engines) and :meth:`pull` (getting an object from the engines). | |
467 |
|
467 | |||
468 | Basic push and pull |
|
468 | Basic push and pull | |
469 | ------------------- |
|
469 | ------------------- | |
470 |
|
470 | |||
471 | Here are some examples of how you use :meth:`push` and :meth:`pull`: |
|
471 | Here are some examples of how you use :meth:`push` and :meth:`pull`: | |
472 |
|
472 | |||
473 | .. sourcecode:: ipython |
|
473 | .. sourcecode:: ipython | |
474 |
|
474 | |||
475 | In [38]: dview.push(dict(a=1.03234,b=3453)) |
|
475 | In [38]: dview.push(dict(a=1.03234,b=3453)) | |
476 | Out[38]: [None,None,None,None] |
|
476 | Out[38]: [None,None,None,None] | |
477 |
|
477 | |||
478 | In [39]: dview.pull('a') |
|
478 | In [39]: dview.pull('a') | |
479 | Out[39]: [ 1.03234, 1.03234, 1.03234, 1.03234] |
|
479 | Out[39]: [ 1.03234, 1.03234, 1.03234, 1.03234] | |
480 |
|
480 | |||
481 | In [40]: dview.pull('b', targets=0) |
|
481 | In [40]: dview.pull('b', targets=0) | |
482 | Out[40]: 3453 |
|
482 | Out[40]: 3453 | |
483 |
|
483 | |||
484 | In [41]: dview.pull(('a','b')) |
|
484 | In [41]: dview.pull(('a','b')) | |
485 | Out[41]: [ [1.03234, 3453], [1.03234, 3453], [1.03234, 3453], [1.03234, 3453] ] |
|
485 | Out[41]: [ [1.03234, 3453], [1.03234, 3453], [1.03234, 3453], [1.03234, 3453] ] | |
486 |
|
486 | |||
487 | In [43]: dview.push(dict(c='speed')) |
|
487 | In [43]: dview.push(dict(c='speed')) | |
488 | Out[43]: [None,None,None,None] |
|
488 | Out[43]: [None,None,None,None] | |
489 |
|
489 | |||
490 | In non-blocking mode :meth:`push` and :meth:`pull` also return |
|
490 | In non-blocking mode :meth:`push` and :meth:`pull` also return | |
491 | :class:`AsyncResult` objects: |
|
491 | :class:`AsyncResult` objects: | |
492 |
|
492 | |||
493 | .. sourcecode:: ipython |
|
493 | .. sourcecode:: ipython | |
494 |
|
494 | |||
495 | In [48]: ar = dview.pull('a', block=False) |
|
495 | In [48]: ar = dview.pull('a', block=False) | |
496 |
|
496 | |||
497 | In [49]: ar.get() |
|
497 | In [49]: ar.get() | |
498 | Out[49]: [1.03234, 1.03234, 1.03234, 1.03234] |
|
498 | Out[49]: [1.03234, 1.03234, 1.03234, 1.03234] | |
499 |
|
499 | |||
500 |
|
500 | |||
501 | Dictionary interface |
|
501 | Dictionary interface | |
502 | -------------------- |
|
502 | -------------------- | |
503 |
|
503 | |||
504 | Since a Python namespace is just a :class:`dict`, :class:`DirectView` objects provide |
|
504 | Since a Python namespace is just a :class:`dict`, :class:`DirectView` objects provide | |
505 | dictionary-style access by key and methods such as :meth:`get` and |
|
505 | dictionary-style access by key and methods such as :meth:`get` and | |
506 | :meth:`update` for convenience. This make the remote namespaces of the engines |
|
506 | :meth:`update` for convenience. This make the remote namespaces of the engines | |
507 | appear as a local dictionary. Underneath, these methods call :meth:`apply`: |
|
507 | appear as a local dictionary. Underneath, these methods call :meth:`apply`: | |
508 |
|
508 | |||
509 | .. sourcecode:: ipython |
|
509 | .. sourcecode:: ipython | |
510 |
|
510 | |||
511 | In [51]: dview['a']=['foo','bar'] |
|
511 | In [51]: dview['a']=['foo','bar'] | |
512 |
|
512 | |||
513 | In [52]: dview['a'] |
|
513 | In [52]: dview['a'] | |
514 | Out[52]: [ ['foo', 'bar'], ['foo', 'bar'], ['foo', 'bar'], ['foo', 'bar'] ] |
|
514 | Out[52]: [ ['foo', 'bar'], ['foo', 'bar'], ['foo', 'bar'], ['foo', 'bar'] ] | |
515 |
|
515 | |||
516 | Scatter and gather |
|
516 | Scatter and gather | |
517 | ------------------ |
|
517 | ------------------ | |
518 |
|
518 | |||
519 | Sometimes it is useful to partition a sequence and push the partitions to |
|
519 | Sometimes it is useful to partition a sequence and push the partitions to | |
520 | different engines. In MPI language, this is know as scatter/gather and we |
|
520 | different engines. In MPI language, this is know as scatter/gather and we | |
521 | follow that terminology. However, it is important to remember that in |
|
521 | follow that terminology. However, it is important to remember that in | |
522 | IPython's :class:`Client` class, :meth:`scatter` is from the |
|
522 | IPython's :class:`Client` class, :meth:`scatter` is from the | |
523 | interactive IPython session to the engines and :meth:`gather` is from the |
|
523 | interactive IPython session to the engines and :meth:`gather` is from the | |
524 | engines back to the interactive IPython session. For scatter/gather operations |
|
524 | engines back to the interactive IPython session. For scatter/gather operations | |
525 | between engines, MPI should be used: |
|
525 | between engines, MPI should be used: | |
526 |
|
526 | |||
527 | .. sourcecode:: ipython |
|
527 | .. sourcecode:: ipython | |
528 |
|
528 | |||
529 | In [58]: dview.scatter('a',range(16)) |
|
529 | In [58]: dview.scatter('a',range(16)) | |
530 | Out[58]: [None,None,None,None] |
|
530 | Out[58]: [None,None,None,None] | |
531 |
|
531 | |||
532 | In [59]: dview['a'] |
|
532 | In [59]: dview['a'] | |
533 | Out[59]: [ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ] |
|
533 | Out[59]: [ [0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15] ] | |
534 |
|
534 | |||
535 | In [60]: dview.gather('a') |
|
535 | In [60]: dview.gather('a') | |
536 | Out[60]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] |
|
536 | Out[60]: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] | |
537 |
|
537 | |||
538 | Other things to look at |
|
538 | Other things to look at | |
539 | ======================= |
|
539 | ======================= | |
540 |
|
540 | |||
541 | How to do parallel list comprehensions |
|
541 | How to do parallel list comprehensions | |
542 | -------------------------------------- |
|
542 | -------------------------------------- | |
543 |
|
543 | |||
544 | In many cases list comprehensions are nicer than using the map function. While |
|
544 | In many cases list comprehensions are nicer than using the map function. While | |
545 | we don't have fully parallel list comprehensions, it is simple to get the |
|
545 | we don't have fully parallel list comprehensions, it is simple to get the | |
546 | basic effect using :meth:`scatter` and :meth:`gather`: |
|
546 | basic effect using :meth:`scatter` and :meth:`gather`: | |
547 |
|
547 | |||
548 | .. sourcecode:: ipython |
|
548 | .. sourcecode:: ipython | |
549 |
|
549 | |||
550 | In [66]: dview.scatter('x',range(64)) |
|
550 | In [66]: dview.scatter('x',range(64)) | |
551 |
|
551 | |||
552 | In [67]: %px y = [i**10 for i in x] |
|
552 | In [67]: %px y = [i**10 for i in x] | |
553 | Parallel execution on engines: [0, 1, 2, 3] |
|
553 | Parallel execution on engines: [0, 1, 2, 3] | |
554 | Out[67]: |
|
554 | Out[67]: | |
555 |
|
555 | |||
556 | In [68]: y = dview.gather('y') |
|
556 | In [68]: y = dview.gather('y') | |
557 |
|
557 | |||
558 | In [69]: print y |
|
558 | In [69]: print y | |
559 | [0, 1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824,...] |
|
559 | [0, 1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824,...] | |
560 |
|
560 | |||
561 | Remote imports |
|
561 | Remote imports | |
562 | -------------- |
|
562 | -------------- | |
563 |
|
563 | |||
564 | Sometimes you will want to import packages both in your interactive session |
|
564 | Sometimes you will want to import packages both in your interactive session | |
565 | and on your remote engines. This can be done with the :class:`ContextManager` |
|
565 | and on your remote engines. This can be done with the :class:`ContextManager` | |
566 | created by a DirectView's :meth:`sync_imports` method: |
|
566 | created by a DirectView's :meth:`sync_imports` method: | |
567 |
|
567 | |||
568 | .. sourcecode:: ipython |
|
568 | .. sourcecode:: ipython | |
569 |
|
569 | |||
570 | In [69]: with dview.sync_imports(): |
|
570 | In [69]: with dview.sync_imports(): | |
571 | ...: import numpy |
|
571 | ...: import numpy | |
572 | importing numpy on engine(s) |
|
572 | importing numpy on engine(s) | |
573 |
|
573 | |||
574 | Any imports made inside the block will also be performed on the view's engines. |
|
574 | Any imports made inside the block will also be performed on the view's engines. | |
575 | sync_imports also takes a `local` boolean flag that defaults to True, which specifies |
|
575 | sync_imports also takes a `local` boolean flag that defaults to True, which specifies | |
576 | whether the local imports should also be performed. However, support for `local=False` |
|
576 | whether the local imports should also be performed. However, support for `local=False` | |
577 | has not been implemented, so only packages that can be imported locally will work |
|
577 | has not been implemented, so only packages that can be imported locally will work | |
578 | this way. |
|
578 | this way. | |
579 |
|
579 | |||
580 | You can also specify imports via the ``@require`` decorator. This is a decorator |
|
580 | You can also specify imports via the ``@require`` decorator. This is a decorator | |
581 | designed for use in Dependencies, but can be used to handle remote imports as well. |
|
581 | designed for use in Dependencies, but can be used to handle remote imports as well. | |
582 | Modules or module names passed to ``@require`` will be imported before the decorated |
|
582 | Modules or module names passed to ``@require`` will be imported before the decorated | |
583 | function is called. If they cannot be imported, the decorated function will never |
|
583 | function is called. If they cannot be imported, the decorated function will never | |
584 | execution, and will fail with an UnmetDependencyError. |
|
584 | execution, and will fail with an UnmetDependencyError. | |
585 |
|
585 | |||
586 | .. sourcecode:: ipython |
|
586 | .. sourcecode:: ipython | |
587 |
|
587 | |||
588 | In [69]: from IPython.parallel import require |
|
588 | In [69]: from IPython.parallel import require | |
589 |
|
589 | |||
590 | In [70]: @requre('re'): |
|
590 | In [70]: @requre('re'): | |
591 | ...: def findall(pat, x): |
|
591 | ...: def findall(pat, x): | |
592 | ...: # re is guaranteed to be available |
|
592 | ...: # re is guaranteed to be available | |
593 | ...: return re.findall(pat, x) |
|
593 | ...: return re.findall(pat, x) | |
594 |
|
594 | |||
595 | # you can also pass modules themselves, that you already have locally: |
|
595 | # you can also pass modules themselves, that you already have locally: | |
596 | In [71]: @requre(time): |
|
596 | In [71]: @requre(time): | |
597 | ...: def wait(t): |
|
597 | ...: def wait(t): | |
598 | ...: time.sleep(t) |
|
598 | ...: time.sleep(t) | |
599 | ...: return t |
|
599 | ...: return t | |
600 |
|
600 | |||
601 |
|
601 | |||
602 | Parallel exceptions |
|
602 | Parallel exceptions | |
603 | ------------------- |
|
603 | ------------------- | |
604 |
|
604 | |||
605 | In the multiengine interface, parallel commands can raise Python exceptions, |
|
605 | In the multiengine interface, parallel commands can raise Python exceptions, | |
606 | just like serial commands. But, it is a little subtle, because a single |
|
606 | just like serial commands. But, it is a little subtle, because a single | |
607 | parallel command can actually raise multiple exceptions (one for each engine |
|
607 | parallel command can actually raise multiple exceptions (one for each engine | |
608 | the command was run on). To express this idea, we have a |
|
608 | the command was run on). To express this idea, we have a | |
609 | :exc:`CompositeError` exception class that will be raised in most cases. The |
|
609 | :exc:`CompositeError` exception class that will be raised in most cases. The | |
610 | :exc:`CompositeError` class is a special type of exception that wraps one or |
|
610 | :exc:`CompositeError` class is a special type of exception that wraps one or | |
611 | more other types of exceptions. Here is how it works: |
|
611 | more other types of exceptions. Here is how it works: | |
612 |
|
612 | |||
613 | .. sourcecode:: ipython |
|
613 | .. sourcecode:: ipython | |
614 |
|
614 | |||
615 | In [76]: dview.block=True |
|
615 | In [76]: dview.block=True | |
616 |
|
616 | |||
617 | In [77]: dview.execute('1/0') |
|
617 | In [77]: dview.execute('1/0') | |
618 | --------------------------------------------------------------------------- |
|
618 | --------------------------------------------------------------------------- | |
619 | CompositeError Traceback (most recent call last) |
|
619 | CompositeError Traceback (most recent call last) | |
620 | /home/you/<ipython-input-10-15c2c22dec39> in <module>() |
|
620 | /home/you/<ipython-input-10-15c2c22dec39> in <module>() | |
621 | ----> 1 dview.execute('1/0', block=True) |
|
621 | ----> 1 dview.execute('1/0', block=True) | |
622 |
|
622 | |||
623 | /path/to/site-packages/IPython/parallel/view.py in execute(self, code, block) |
|
623 | /path/to/site-packages/IPython/parallel/view.py in execute(self, code, block) | |
624 | 460 default: self.block |
|
624 | 460 default: self.block | |
625 | 461 """ |
|
625 | 461 """ | |
626 | --> 462 return self.apply_with_flags(util._execute, args=(code,), block=block) |
|
626 | --> 462 return self.apply_with_flags(util._execute, args=(code,), block=block) | |
627 | 463 |
|
627 | 463 | |
628 | 464 def run(self, filename, block=None): |
|
628 | 464 def run(self, filename, block=None): | |
629 |
|
629 | |||
630 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) |
|
630 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) | |
631 |
|
631 | |||
632 | /path/to/site-packages/IPython/parallel/view.py in sync_results(f, self, *args, **kwargs) |
|
632 | /path/to/site-packages/IPython/parallel/view.py in sync_results(f, self, *args, **kwargs) | |
633 | 46 def sync_results(f, self, *args, **kwargs): |
|
633 | 46 def sync_results(f, self, *args, **kwargs): | |
634 | 47 """sync relevant results from self.client to our results attribute.""" |
|
634 | 47 """sync relevant results from self.client to our results attribute.""" | |
635 | ---> 48 ret = f(self, *args, **kwargs) |
|
635 | ---> 48 ret = f(self, *args, **kwargs) | |
636 | 49 delta = self.outstanding.difference(self.client.outstanding) |
|
636 | 49 delta = self.outstanding.difference(self.client.outstanding) | |
637 | 50 completed = self.outstanding.intersection(delta) |
|
637 | 50 completed = self.outstanding.intersection(delta) | |
638 |
|
638 | |||
639 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) |
|
639 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) | |
640 |
|
640 | |||
641 | /path/to/site-packages/IPython/parallel/view.py in save_ids(f, self, *args, **kwargs) |
|
641 | /path/to/site-packages/IPython/parallel/view.py in save_ids(f, self, *args, **kwargs) | |
642 | 35 n_previous = len(self.client.history) |
|
642 | 35 n_previous = len(self.client.history) | |
643 | 36 try: |
|
643 | 36 try: | |
644 | ---> 37 ret = f(self, *args, **kwargs) |
|
644 | ---> 37 ret = f(self, *args, **kwargs) | |
645 | 38 finally: |
|
645 | 38 finally: | |
646 | 39 nmsgs = len(self.client.history) - n_previous |
|
646 | 39 nmsgs = len(self.client.history) - n_previous | |
647 |
|
647 | |||
648 | /path/to/site-packages/IPython/parallel/view.py in apply_with_flags(self, f, args, kwargs, block, track) |
|
648 | /path/to/site-packages/IPython/parallel/view.py in apply_with_flags(self, f, args, kwargs, block, track) | |
649 | 398 if block: |
|
649 | 398 if block: | |
650 | 399 try: |
|
650 | 399 try: | |
651 | --> 400 return ar.get() |
|
651 | --> 400 return ar.get() | |
652 | 401 except KeyboardInterrupt: |
|
652 | 401 except KeyboardInterrupt: | |
653 | 402 pass |
|
653 | 402 pass | |
654 |
|
654 | |||
655 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) |
|
655 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) | |
656 | 87 return self._result |
|
656 | 87 return self._result | |
657 | 88 else: |
|
657 | 88 else: | |
658 | ---> 89 raise self._exception |
|
658 | ---> 89 raise self._exception | |
659 | 90 else: |
|
659 | 90 else: | |
660 | 91 raise error.TimeoutError("Result not ready.") |
|
660 | 91 raise error.TimeoutError("Result not ready.") | |
661 |
|
661 | |||
662 | CompositeError: one or more exceptions from call to method: _execute |
|
662 | CompositeError: one or more exceptions from call to method: _execute | |
663 | [0:apply]: ZeroDivisionError: integer division or modulo by zero |
|
663 | [0:apply]: ZeroDivisionError: integer division or modulo by zero | |
664 | [1:apply]: ZeroDivisionError: integer division or modulo by zero |
|
664 | [1:apply]: ZeroDivisionError: integer division or modulo by zero | |
665 | [2:apply]: ZeroDivisionError: integer division or modulo by zero |
|
665 | [2:apply]: ZeroDivisionError: integer division or modulo by zero | |
666 | [3:apply]: ZeroDivisionError: integer division or modulo by zero |
|
666 | [3:apply]: ZeroDivisionError: integer division or modulo by zero | |
667 |
|
667 | |||
668 |
|
668 | |||
669 | Notice how the error message printed when :exc:`CompositeError` is raised has |
|
669 | Notice how the error message printed when :exc:`CompositeError` is raised has | |
670 | information about the individual exceptions that were raised on each engine. |
|
670 | information about the individual exceptions that were raised on each engine. | |
671 | If you want, you can even raise one of these original exceptions: |
|
671 | If you want, you can even raise one of these original exceptions: | |
672 |
|
672 | |||
673 | .. sourcecode:: ipython |
|
673 | .. sourcecode:: ipython | |
674 |
|
674 | |||
675 | In [80]: try: |
|
675 | In [80]: try: | |
676 | ....: dview.execute('1/0') |
|
676 | ....: dview.execute('1/0') | |
677 | ....: except client.CompositeError, e: |
|
677 | ....: except client.CompositeError, e: | |
678 | ....: e.raise_exception() |
|
678 | ....: e.raise_exception() | |
679 | ....: |
|
679 | ....: | |
680 | ....: |
|
680 | ....: | |
681 | --------------------------------------------------------------------------- |
|
681 | --------------------------------------------------------------------------- | |
682 | ZeroDivisionError Traceback (most recent call last) |
|
682 | ZeroDivisionError Traceback (most recent call last) | |
683 |
|
683 | |||
684 | /ipython1-client-r3021/docs/examples/<ipython console> in <module>() |
|
684 | /ipython1-client-r3021/docs/examples/<ipython console> in <module>() | |
685 |
|
685 | |||
686 | /ipython1-client-r3021/ipython1/kernel/error.pyc in raise_exception(self, excid) |
|
686 | /ipython1-client-r3021/ipython1/kernel/error.pyc in raise_exception(self, excid) | |
687 | 156 raise IndexError("an exception with index %i does not exist"%excid) |
|
687 | 156 raise IndexError("an exception with index %i does not exist"%excid) | |
688 | 157 else: |
|
688 | 157 else: | |
689 | --> 158 raise et, ev, etb |
|
689 | --> 158 raise et, ev, etb | |
690 | 159 |
|
690 | 159 | |
691 | 160 def collect_exceptions(rlist, method): |
|
691 | 160 def collect_exceptions(rlist, method): | |
692 |
|
692 | |||
693 | ZeroDivisionError: integer division or modulo by zero |
|
693 | ZeroDivisionError: integer division or modulo by zero | |
694 |
|
694 | |||
695 | If you are working in IPython, you can simple type ``%debug`` after one of |
|
695 | If you are working in IPython, you can simple type ``%debug`` after one of | |
696 | these :exc:`CompositeError` exceptions is raised, and inspect the exception |
|
696 | these :exc:`CompositeError` exceptions is raised, and inspect the exception | |
697 | instance: |
|
697 | instance: | |
698 |
|
698 | |||
699 | .. sourcecode:: ipython |
|
699 | .. sourcecode:: ipython | |
700 |
|
700 | |||
701 | In [81]: dview.execute('1/0') |
|
701 | In [81]: dview.execute('1/0') | |
702 | --------------------------------------------------------------------------- |
|
702 | --------------------------------------------------------------------------- | |
703 | CompositeError Traceback (most recent call last) |
|
703 | CompositeError Traceback (most recent call last) | |
704 | /home/you/<ipython-input-10-15c2c22dec39> in <module>() |
|
704 | /home/you/<ipython-input-10-15c2c22dec39> in <module>() | |
705 | ----> 1 dview.execute('1/0', block=True) |
|
705 | ----> 1 dview.execute('1/0', block=True) | |
706 |
|
706 | |||
707 | /path/to/site-packages/IPython/parallel/view.py in execute(self, code, block) |
|
707 | /path/to/site-packages/IPython/parallel/view.py in execute(self, code, block) | |
708 | 460 default: self.block |
|
708 | 460 default: self.block | |
709 | 461 """ |
|
709 | 461 """ | |
710 | --> 462 return self.apply_with_flags(util._execute, args=(code,), block=block) |
|
710 | --> 462 return self.apply_with_flags(util._execute, args=(code,), block=block) | |
711 | 463 |
|
711 | 463 | |
712 | 464 def run(self, filename, block=None): |
|
712 | 464 def run(self, filename, block=None): | |
713 |
|
713 | |||
714 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) |
|
714 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) | |
715 |
|
715 | |||
716 | /path/to/site-packages/IPython/parallel/view.py in sync_results(f, self, *args, **kwargs) |
|
716 | /path/to/site-packages/IPython/parallel/view.py in sync_results(f, self, *args, **kwargs) | |
717 | 46 def sync_results(f, self, *args, **kwargs): |
|
717 | 46 def sync_results(f, self, *args, **kwargs): | |
718 | 47 """sync relevant results from self.client to our results attribute.""" |
|
718 | 47 """sync relevant results from self.client to our results attribute.""" | |
719 | ---> 48 ret = f(self, *args, **kwargs) |
|
719 | ---> 48 ret = f(self, *args, **kwargs) | |
720 | 49 delta = self.outstanding.difference(self.client.outstanding) |
|
720 | 49 delta = self.outstanding.difference(self.client.outstanding) | |
721 | 50 completed = self.outstanding.intersection(delta) |
|
721 | 50 completed = self.outstanding.intersection(delta) | |
722 |
|
722 | |||
723 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) |
|
723 | /home/you/<string> in apply_with_flags(self, f, args, kwargs, block, track) | |
724 |
|
724 | |||
725 | /path/to/site-packages/IPython/parallel/view.py in save_ids(f, self, *args, **kwargs) |
|
725 | /path/to/site-packages/IPython/parallel/view.py in save_ids(f, self, *args, **kwargs) | |
726 | 35 n_previous = len(self.client.history) |
|
726 | 35 n_previous = len(self.client.history) | |
727 | 36 try: |
|
727 | 36 try: | |
728 | ---> 37 ret = f(self, *args, **kwargs) |
|
728 | ---> 37 ret = f(self, *args, **kwargs) | |
729 | 38 finally: |
|
729 | 38 finally: | |
730 | 39 nmsgs = len(self.client.history) - n_previous |
|
730 | 39 nmsgs = len(self.client.history) - n_previous | |
731 |
|
731 | |||
732 | /path/to/site-packages/IPython/parallel/view.py in apply_with_flags(self, f, args, kwargs, block, track) |
|
732 | /path/to/site-packages/IPython/parallel/view.py in apply_with_flags(self, f, args, kwargs, block, track) | |
733 | 398 if block: |
|
733 | 398 if block: | |
734 | 399 try: |
|
734 | 399 try: | |
735 | --> 400 return ar.get() |
|
735 | --> 400 return ar.get() | |
736 | 401 except KeyboardInterrupt: |
|
736 | 401 except KeyboardInterrupt: | |
737 | 402 pass |
|
737 | 402 pass | |
738 |
|
738 | |||
739 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) |
|
739 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) | |
740 | 87 return self._result |
|
740 | 87 return self._result | |
741 | 88 else: |
|
741 | 88 else: | |
742 | ---> 89 raise self._exception |
|
742 | ---> 89 raise self._exception | |
743 | 90 else: |
|
743 | 90 else: | |
744 | 91 raise error.TimeoutError("Result not ready.") |
|
744 | 91 raise error.TimeoutError("Result not ready.") | |
745 |
|
745 | |||
746 | CompositeError: one or more exceptions from call to method: _execute |
|
746 | CompositeError: one or more exceptions from call to method: _execute | |
747 | [0:apply]: ZeroDivisionError: integer division or modulo by zero |
|
747 | [0:apply]: ZeroDivisionError: integer division or modulo by zero | |
748 | [1:apply]: ZeroDivisionError: integer division or modulo by zero |
|
748 | [1:apply]: ZeroDivisionError: integer division or modulo by zero | |
749 | [2:apply]: ZeroDivisionError: integer division or modulo by zero |
|
749 | [2:apply]: ZeroDivisionError: integer division or modulo by zero | |
750 | [3:apply]: ZeroDivisionError: integer division or modulo by zero |
|
750 | [3:apply]: ZeroDivisionError: integer division or modulo by zero | |
751 |
|
751 | |||
752 | In [82]: %debug |
|
752 | In [82]: %debug | |
753 | > /path/to/site-packages/IPython/parallel/asyncresult.py(80)get() |
|
753 | > /path/to/site-packages/IPython/parallel/asyncresult.py(80)get() | |
754 | 79 else: |
|
754 | 79 else: | |
755 | ---> 80 raise self._exception |
|
755 | ---> 80 raise self._exception | |
756 | 81 else: |
|
756 | 81 else: | |
757 |
|
757 | |||
758 |
|
758 | |||
759 | # With the debugger running, e is the exceptions instance. We can tab complete |
|
759 | # With the debugger running, e is the exceptions instance. We can tab complete | |
760 | # on it and see the extra methods that are available. |
|
760 | # on it and see the extra methods that are available. | |
761 | ipdb> e. |
|
761 | ipdb> e. | |
762 | e.__class__ e.__getitem__ e.__new__ e.__setstate__ e.args |
|
762 | e.__class__ e.__getitem__ e.__new__ e.__setstate__ e.args | |
763 | e.__delattr__ e.__getslice__ e.__reduce__ e.__str__ e.elist |
|
763 | e.__delattr__ e.__getslice__ e.__reduce__ e.__str__ e.elist | |
764 | e.__dict__ e.__hash__ e.__reduce_ex__ e.__weakref__ e.message |
|
764 | e.__dict__ e.__hash__ e.__reduce_ex__ e.__weakref__ e.message | |
765 | e.__doc__ e.__init__ e.__repr__ e._get_engine_str e.print_tracebacks |
|
765 | e.__doc__ e.__init__ e.__repr__ e._get_engine_str e.print_tracebacks | |
766 | e.__getattribute__ e.__module__ e.__setattr__ e._get_traceback e.raise_exception |
|
766 | e.__getattribute__ e.__module__ e.__setattr__ e._get_traceback e.raise_exception | |
767 | ipdb> e.print_tracebacks() |
|
767 | ipdb> e.print_tracebacks() | |
768 | [0:apply]: |
|
768 | [0:apply]: | |
769 | Traceback (most recent call last): |
|
769 | Traceback (most recent call last): | |
770 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request |
|
770 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request | |
771 | exec code in working, working |
|
771 | exec code in working, working | |
772 | File "<string>", line 1, in <module> |
|
772 | File "<string>", line 1, in <module> | |
773 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute |
|
773 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute | |
774 | exec code in globals() |
|
774 | exec code in globals() | |
775 | File "<string>", line 1, in <module> |
|
775 | File "<string>", line 1, in <module> | |
776 | ZeroDivisionError: integer division or modulo by zero |
|
776 | ZeroDivisionError: integer division or modulo by zero | |
777 |
|
777 | |||
778 |
|
778 | |||
779 | [1:apply]: |
|
779 | [1:apply]: | |
780 | Traceback (most recent call last): |
|
780 | Traceback (most recent call last): | |
781 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request |
|
781 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request | |
782 | exec code in working, working |
|
782 | exec code in working, working | |
783 | File "<string>", line 1, in <module> |
|
783 | File "<string>", line 1, in <module> | |
784 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute |
|
784 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute | |
785 | exec code in globals() |
|
785 | exec code in globals() | |
786 | File "<string>", line 1, in <module> |
|
786 | File "<string>", line 1, in <module> | |
787 | ZeroDivisionError: integer division or modulo by zero |
|
787 | ZeroDivisionError: integer division or modulo by zero | |
788 |
|
788 | |||
789 |
|
789 | |||
790 | [2:apply]: |
|
790 | [2:apply]: | |
791 | Traceback (most recent call last): |
|
791 | Traceback (most recent call last): | |
792 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request |
|
792 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request | |
793 | exec code in working, working |
|
793 | exec code in working, working | |
794 | File "<string>", line 1, in <module> |
|
794 | File "<string>", line 1, in <module> | |
795 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute |
|
795 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute | |
796 | exec code in globals() |
|
796 | exec code in globals() | |
797 | File "<string>", line 1, in <module> |
|
797 | File "<string>", line 1, in <module> | |
798 | ZeroDivisionError: integer division or modulo by zero |
|
798 | ZeroDivisionError: integer division or modulo by zero | |
799 |
|
799 | |||
800 |
|
800 | |||
801 | [3:apply]: |
|
801 | [3:apply]: | |
802 | Traceback (most recent call last): |
|
802 | Traceback (most recent call last): | |
803 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request |
|
803 | File "/path/to/site-packages/IPython/parallel/streamkernel.py", line 332, in apply_request | |
804 | exec code in working, working |
|
804 | exec code in working, working | |
805 | File "<string>", line 1, in <module> |
|
805 | File "<string>", line 1, in <module> | |
806 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute |
|
806 | File "/path/to/site-packages/IPython/parallel/client.py", line 69, in _execute | |
807 | exec code in globals() |
|
807 | exec code in globals() | |
808 | File "<string>", line 1, in <module> |
|
808 | File "<string>", line 1, in <module> | |
809 | ZeroDivisionError: integer division or modulo by zero |
|
809 | ZeroDivisionError: integer division or modulo by zero | |
810 |
|
810 | |||
811 |
|
811 | |||
812 | .. note:: |
|
812 | .. note:: | |
813 |
|
813 | |||
814 | TODO: The above tracebacks are not up to date |
|
814 | TODO: The above tracebacks are not up to date | |
815 |
|
815 | |||
816 |
|
816 | |||
817 | All of this same error handling magic even works in non-blocking mode: |
|
817 | All of this same error handling magic even works in non-blocking mode: | |
818 |
|
818 | |||
819 | .. sourcecode:: ipython |
|
819 | .. sourcecode:: ipython | |
820 |
|
820 | |||
821 | In [83]: dview.block=False |
|
821 | In [83]: dview.block=False | |
822 |
|
822 | |||
823 | In [84]: ar = dview.execute('1/0') |
|
823 | In [84]: ar = dview.execute('1/0') | |
824 |
|
824 | |||
825 | In [85]: ar.get() |
|
825 | In [85]: ar.get() | |
826 | --------------------------------------------------------------------------- |
|
826 | --------------------------------------------------------------------------- | |
827 | CompositeError Traceback (most recent call last) |
|
827 | CompositeError Traceback (most recent call last) | |
828 | /Users/minrk/<ipython-input-3-8531eb3d26fb> in <module>() |
|
828 | /Users/minrk/<ipython-input-3-8531eb3d26fb> in <module>() | |
829 | ----> 1 ar.get() |
|
829 | ----> 1 ar.get() | |
830 |
|
830 | |||
831 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) |
|
831 | /path/to/site-packages/IPython/parallel/asyncresult.pyc in get(self, timeout) | |
832 | 78 return self._result |
|
832 | 78 return self._result | |
833 | 79 else: |
|
833 | 79 else: | |
834 | ---> 80 raise self._exception |
|
834 | ---> 80 raise self._exception | |
835 | 81 else: |
|
835 | 81 else: | |
836 | 82 raise error.TimeoutError("Result not ready.") |
|
836 | 82 raise error.TimeoutError("Result not ready.") | |
837 |
|
837 | |||
838 | CompositeError: one or more exceptions from call to method: _execute |
|
838 | CompositeError: one or more exceptions from call to method: _execute | |
839 | [0:apply]: ZeroDivisionError: integer division or modulo by zero |
|
839 | [0:apply]: ZeroDivisionError: integer division or modulo by zero | |
840 | [1:apply]: ZeroDivisionError: integer division or modulo by zero |
|
840 | [1:apply]: ZeroDivisionError: integer division or modulo by zero | |
841 | [2:apply]: ZeroDivisionError: integer division or modulo by zero |
|
841 | [2:apply]: ZeroDivisionError: integer division or modulo by zero | |
842 | [3:apply]: ZeroDivisionError: integer division or modulo by zero |
|
842 | [3:apply]: ZeroDivisionError: integer division or modulo by zero | |
843 |
|
843 |
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_pi.pdf to docs/source/parallel/parallel_pi.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/parallel_pi.pdf to docs/source/parallel/parallel_pi.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_pi.png to docs/source/parallel/parallel_pi.png |
|
NO CONTENT: file renamed from docs/source/parallelz/parallel_pi.png to docs/source/parallel/parallel_pi.png |
@@ -1,506 +1,506 b'' | |||||
1 | .. _parallel_process: |
|
1 | .. _parallel_process: | |
2 |
|
2 | |||
3 | =========================================== |
|
3 | =========================================== | |
4 | Starting the IPython controller and engines |
|
4 | Starting the IPython controller and engines | |
5 | =========================================== |
|
5 | =========================================== | |
6 |
|
6 | |||
7 | To use IPython for parallel computing, you need to start one instance of |
|
7 | To use IPython for parallel computing, you need to start one instance of | |
8 | the controller and one or more instances of the engine. The controller |
|
8 | the controller and one or more instances of the engine. The controller | |
9 | and each engine can run on different machines or on the same machine. |
|
9 | and each engine can run on different machines or on the same machine. | |
10 | Because of this, there are many different possibilities. |
|
10 | Because of this, there are many different possibilities. | |
11 |
|
11 | |||
12 | Broadly speaking, there are two ways of going about starting a controller and engines: |
|
12 | Broadly speaking, there are two ways of going about starting a controller and engines: | |
13 |
|
13 | |||
14 |
* In an automated manner using the :command:`ipcluster |
|
14 | * In an automated manner using the :command:`ipcluster` command. | |
15 |
* In a more manual way using the :command:`ipcontroller |
|
15 | * In a more manual way using the :command:`ipcontroller` and | |
16 |
:command:`ipengine |
|
16 | :command:`ipengine` commands. | |
17 |
|
17 | |||
18 | This document describes both of these methods. We recommend that new users |
|
18 | This document describes both of these methods. We recommend that new users | |
19 |
start with the :command:`ipcluster |
|
19 | start with the :command:`ipcluster` command as it simplifies many common usage | |
20 | cases. |
|
20 | cases. | |
21 |
|
21 | |||
22 | General considerations |
|
22 | General considerations | |
23 | ====================== |
|
23 | ====================== | |
24 |
|
24 | |||
25 | Before delving into the details about how you can start a controller and |
|
25 | Before delving into the details about how you can start a controller and | |
26 | engines using the various methods, we outline some of the general issues that |
|
26 | engines using the various methods, we outline some of the general issues that | |
27 | come up when starting the controller and engines. These things come up no |
|
27 | come up when starting the controller and engines. These things come up no | |
28 | matter which method you use to start your IPython cluster. |
|
28 | matter which method you use to start your IPython cluster. | |
29 |
|
29 | |||
30 | Let's say that you want to start the controller on ``host0`` and engines on |
|
30 | Let's say that you want to start the controller on ``host0`` and engines on | |
31 | hosts ``host1``-``hostn``. The following steps are then required: |
|
31 | hosts ``host1``-``hostn``. The following steps are then required: | |
32 |
|
32 | |||
33 |
1. Start the controller on ``host0`` by running :command:`ipcontroller |
|
33 | 1. Start the controller on ``host0`` by running :command:`ipcontroller` on | |
34 | ``host0``. |
|
34 | ``host0``. | |
35 | 2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the |
|
35 | 2. Move the JSON file (:file:`ipcontroller-engine.json`) created by the | |
36 | controller from ``host0`` to hosts ``host1``-``hostn``. |
|
36 | controller from ``host0`` to hosts ``host1``-``hostn``. | |
37 | 3. Start the engines on hosts ``host1``-``hostn`` by running |
|
37 | 3. Start the engines on hosts ``host1``-``hostn`` by running | |
38 |
:command:`ipengine |
|
38 | :command:`ipengine`. This command has to be told where the JSON file | |
39 | (:file:`ipcontroller-engine.json`) is located. |
|
39 | (:file:`ipcontroller-engine.json`) is located. | |
40 |
|
40 | |||
41 | At this point, the controller and engines will be connected. By default, the JSON files |
|
41 | At this point, the controller and engines will be connected. By default, the JSON files | |
42 |
created by the controller are put into the :file:`~/.ipython/cluster |
|
42 | created by the controller are put into the :file:`~/.ipython/cluster_default/security` | |
43 | directory. If the engines share a filesystem with the controller, step 2 can be skipped as |
|
43 | directory. If the engines share a filesystem with the controller, step 2 can be skipped as | |
44 | the engines will automatically look at that location. |
|
44 | the engines will automatically look at that location. | |
45 |
|
45 | |||
46 | The final step required to actually use the running controller from a client is to move |
|
46 | The final step required to actually use the running controller from a client is to move | |
47 | the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients |
|
47 | the JSON file :file:`ipcontroller-client.json` from ``host0`` to any host where clients | |
48 |
will be run. If these file are put into the :file:`~/.ipython/cluster |
|
48 | will be run. If these file are put into the :file:`~/.ipython/cluster_default/security` | |
49 | directory of the client's host, they will be found automatically. Otherwise, the full path |
|
49 | directory of the client's host, they will be found automatically. Otherwise, the full path | |
50 | to them has to be passed to the client's constructor. |
|
50 | to them has to be passed to the client's constructor. | |
51 |
|
51 | |||
52 |
Using :command:`ipcluster |
|
52 | Using :command:`ipcluster` | |
53 | =========================== |
|
53 | =========================== | |
54 |
|
54 | |||
55 |
The :command:`ipcluster |
|
55 | The :command:`ipcluster` command provides a simple way of starting a | |
56 | controller and engines in the following situations: |
|
56 | controller and engines in the following situations: | |
57 |
|
57 | |||
58 | 1. When the controller and engines are all run on localhost. This is useful |
|
58 | 1. When the controller and engines are all run on localhost. This is useful | |
59 | for testing or running on a multicore computer. |
|
59 | for testing or running on a multicore computer. | |
60 | 2. When engines are started using the :command:`mpirun` command that comes |
|
60 | 2. When engines are started using the :command:`mpirun` command that comes | |
61 | with most MPI [MPI]_ implementations |
|
61 | with most MPI [MPI]_ implementations | |
62 | 3. When engines are started using the PBS [PBS]_ batch system |
|
62 | 3. When engines are started using the PBS [PBS]_ batch system | |
63 | (or other `qsub` systems, such as SGE). |
|
63 | (or other `qsub` systems, such as SGE). | |
64 | 4. When the controller is started on localhost and the engines are started on |
|
64 | 4. When the controller is started on localhost and the engines are started on | |
65 | remote nodes using :command:`ssh`. |
|
65 | remote nodes using :command:`ssh`. | |
66 | 5. When engines are started using the Windows HPC Server batch system. |
|
66 | 5. When engines are started using the Windows HPC Server batch system. | |
67 |
|
67 | |||
68 | .. note:: |
|
68 | .. note:: | |
69 |
|
69 | |||
70 |
Currently :command:`ipcluster |
|
70 | Currently :command:`ipcluster` requires that the | |
71 | :file:`~/.ipython/cluster_<profile>/security` directory live on a shared filesystem that is |
|
71 | :file:`~/.ipython/cluster_<profile>/security` directory live on a shared filesystem that is | |
72 | seen by both the controller and engines. If you don't have a shared file |
|
72 | seen by both the controller and engines. If you don't have a shared file | |
73 |
system you will need to use :command:`ipcontroller |
|
73 | system you will need to use :command:`ipcontroller` and | |
74 |
:command:`ipengine |
|
74 | :command:`ipengine` directly. | |
75 |
|
75 | |||
76 |
Under the hood, :command:`ipcluster |
|
76 | Under the hood, :command:`ipcluster` just uses :command:`ipcontroller` | |
77 |
and :command:`ipengine |
|
77 | and :command:`ipengine` to perform the steps described above. | |
78 |
|
78 | |||
79 |
The simplest way to use ipcluster |
|
79 | The simplest way to use ipcluster requires no configuration, and will | |
80 | launch a controller and a number of engines on the local machine. For instance, |
|
80 | launch a controller and a number of engines on the local machine. For instance, | |
81 | to start one controller and 4 engines on localhost, just do:: |
|
81 | to start one controller and 4 engines on localhost, just do:: | |
82 |
|
82 | |||
83 |
$ ipcluster |
|
83 | $ ipcluster start -n 4 | |
84 |
|
84 | |||
85 | To see other command line options for the local mode, do:: |
|
85 | To see other command line options for the local mode, do:: | |
86 |
|
86 | |||
87 |
$ ipcluster |
|
87 | $ ipcluster -h | |
88 |
|
88 | |||
89 |
|
89 | |||
90 | Configuring an IPython cluster |
|
90 | Configuring an IPython cluster | |
91 | ============================== |
|
91 | ============================== | |
92 |
|
92 | |||
93 | Cluster configurations are stored as `profiles`. You can create a new profile with:: |
|
93 | Cluster configurations are stored as `profiles`. You can create a new profile with:: | |
94 |
|
94 | |||
95 |
$ ipcluster |
|
95 | $ ipcluster create -p myprofile | |
96 |
|
96 | |||
97 |
This will create the directory :file:`IPYTHONDIR/cluster |
|
97 | This will create the directory :file:`IPYTHONDIR/cluster_myprofile`, and populate it | |
98 | with the default configuration files for the three IPython cluster commands. Once |
|
98 | with the default configuration files for the three IPython cluster commands. Once | |
99 |
you edit those files, you can continue to call ipcluster |
|
99 | you edit those files, you can continue to call ipcluster/ipcontroller/ipengine | |
100 | with no arguments beyond ``-p myprofile``, and any configuration will be maintained. |
|
100 | with no arguments beyond ``-p myprofile``, and any configuration will be maintained. | |
101 |
|
101 | |||
102 | There is no limit to the number of profiles you can have, so you can maintain a profile for each |
|
102 | There is no limit to the number of profiles you can have, so you can maintain a profile for each | |
103 | of your common use cases. The default profile will be used whenever the |
|
103 | of your common use cases. The default profile will be used whenever the | |
104 |
profile argument is not specified, so edit :file:`IPYTHONDIR/cluster |
|
104 | profile argument is not specified, so edit :file:`IPYTHONDIR/cluster_default/*_config.py` to | |
105 | represent your most common use case. |
|
105 | represent your most common use case. | |
106 |
|
106 | |||
107 | The configuration files are loaded with commented-out settings and explanations, |
|
107 | The configuration files are loaded with commented-out settings and explanations, | |
108 | which should cover most of the available possibilities. |
|
108 | which should cover most of the available possibilities. | |
109 |
|
109 | |||
110 |
Using various batch systems with :command:`ipcluster |
|
110 | Using various batch systems with :command:`ipcluster` | |
111 | ------------------------------------------------------ |
|
111 | ------------------------------------------------------ | |
112 |
|
112 | |||
113 |
:command:`ipcluster |
|
113 | :command:`ipcluster` has a notion of Launchers that can start controllers | |
114 | and engines with various remote execution schemes. Currently supported |
|
114 | and engines with various remote execution schemes. Currently supported | |
115 | models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server. |
|
115 | models include `mpiexec`, PBS-style (Torque, SGE), and Windows HPC Server. | |
116 |
|
116 | |||
117 | .. note:: |
|
117 | .. note:: | |
118 |
|
118 | |||
119 | The Launchers and configuration are designed in such a way that advanced |
|
119 | The Launchers and configuration are designed in such a way that advanced | |
120 | users can subclass and configure them to fit their own system that we |
|
120 | users can subclass and configure them to fit their own system that we | |
121 | have not yet supported (such as Condor) |
|
121 | have not yet supported (such as Condor) | |
122 |
|
122 | |||
123 |
Using :command:`ipcluster |
|
123 | Using :command:`ipcluster` in mpiexec/mpirun mode | |
124 | -------------------------------------------------- |
|
124 | -------------------------------------------------- | |
125 |
|
125 | |||
126 |
|
126 | |||
127 | The mpiexec/mpirun mode is useful if you: |
|
127 | The mpiexec/mpirun mode is useful if you: | |
128 |
|
128 | |||
129 | 1. Have MPI installed. |
|
129 | 1. Have MPI installed. | |
130 | 2. Your systems are configured to use the :command:`mpiexec` or |
|
130 | 2. Your systems are configured to use the :command:`mpiexec` or | |
131 | :command:`mpirun` commands to start MPI processes. |
|
131 | :command:`mpirun` commands to start MPI processes. | |
132 |
|
132 | |||
133 | If these are satisfied, you can create a new profile:: |
|
133 | If these are satisfied, you can create a new profile:: | |
134 |
|
134 | |||
135 |
$ ipcluster |
|
135 | $ ipcluster create -p mpi | |
136 |
|
136 | |||
137 |
and edit the file :file:`IPYTHONDIR/cluster |
|
137 | and edit the file :file:`IPYTHONDIR/cluster_mpi/ipcluster_config.py`. | |
138 |
|
138 | |||
139 |
There, instruct ipcluster |
|
139 | There, instruct ipcluster to use the MPIExec launchers by adding the lines: | |
140 |
|
140 | |||
141 | .. sourcecode:: python |
|
141 | .. sourcecode:: python | |
142 |
|
142 | |||
143 | c.Global.engine_launcher = 'IPython.parallel.launcher.MPIExecEngineSetLauncher' |
|
143 | c.Global.engine_launcher = 'IPython.parallel.launcher.MPIExecEngineSetLauncher' | |
144 |
|
144 | |||
145 | If the default MPI configuration is correct, then you can now start your cluster, with:: |
|
145 | If the default MPI configuration is correct, then you can now start your cluster, with:: | |
146 |
|
146 | |||
147 |
$ ipcluster |
|
147 | $ ipcluster start -n 4 -p mpi | |
148 |
|
148 | |||
149 | This does the following: |
|
149 | This does the following: | |
150 |
|
150 | |||
151 | 1. Starts the IPython controller on current host. |
|
151 | 1. Starts the IPython controller on current host. | |
152 | 2. Uses :command:`mpiexec` to start 4 engines. |
|
152 | 2. Uses :command:`mpiexec` to start 4 engines. | |
153 |
|
153 | |||
154 | If you have a reason to also start the Controller with mpi, you can specify: |
|
154 | If you have a reason to also start the Controller with mpi, you can specify: | |
155 |
|
155 | |||
156 | .. sourcecode:: python |
|
156 | .. sourcecode:: python | |
157 |
|
157 | |||
158 | c.Global.controller_launcher = 'IPython.parallel.launcher.MPIExecControllerLauncher' |
|
158 | c.Global.controller_launcher = 'IPython.parallel.launcher.MPIExecControllerLauncher' | |
159 |
|
159 | |||
160 | .. note:: |
|
160 | .. note:: | |
161 |
|
161 | |||
162 | The Controller *will not* be in the same MPI universe as the engines, so there is not |
|
162 | The Controller *will not* be in the same MPI universe as the engines, so there is not | |
163 | much reason to do this unless sysadmins demand it. |
|
163 | much reason to do this unless sysadmins demand it. | |
164 |
|
164 | |||
165 | On newer MPI implementations (such as OpenMPI), this will work even if you |
|
165 | On newer MPI implementations (such as OpenMPI), this will work even if you | |
166 | don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI |
|
166 | don't make any calls to MPI or call :func:`MPI_Init`. However, older MPI | |
167 | implementations actually require each process to call :func:`MPI_Init` upon |
|
167 | implementations actually require each process to call :func:`MPI_Init` upon | |
168 | starting. The easiest way of having this done is to install the mpi4py |
|
168 | starting. The easiest way of having this done is to install the mpi4py | |
169 |
[mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine |
|
169 | [mpi4py]_ package and then specify the ``c.MPI.use`` option in :file:`ipengine_config.py`: | |
170 |
|
170 | |||
171 | .. sourcecode:: python |
|
171 | .. sourcecode:: python | |
172 |
|
172 | |||
173 | c.MPI.use = 'mpi4py' |
|
173 | c.MPI.use = 'mpi4py' | |
174 |
|
174 | |||
175 | Unfortunately, even this won't work for some MPI implementations. If you are |
|
175 | Unfortunately, even this won't work for some MPI implementations. If you are | |
176 | having problems with this, you will likely have to use a custom Python |
|
176 | having problems with this, you will likely have to use a custom Python | |
177 | executable that itself calls :func:`MPI_Init` at the appropriate time. |
|
177 | executable that itself calls :func:`MPI_Init` at the appropriate time. | |
178 | Fortunately, mpi4py comes with such a custom Python executable that is easy to |
|
178 | Fortunately, mpi4py comes with such a custom Python executable that is easy to | |
179 | install and use. However, this custom Python executable approach will not work |
|
179 | install and use. However, this custom Python executable approach will not work | |
180 |
with :command:`ipcluster |
|
180 | with :command:`ipcluster` currently. | |
181 |
|
181 | |||
182 | More details on using MPI with IPython can be found :ref:`here <parallelmpi>`. |
|
182 | More details on using MPI with IPython can be found :ref:`here <parallelmpi>`. | |
183 |
|
183 | |||
184 |
|
184 | |||
185 |
Using :command:`ipcluster |
|
185 | Using :command:`ipcluster` in PBS mode | |
186 | --------------------------------------- |
|
186 | --------------------------------------- | |
187 |
|
187 | |||
188 | The PBS mode uses the Portable Batch System [PBS]_ to start the engines. |
|
188 | The PBS mode uses the Portable Batch System [PBS]_ to start the engines. | |
189 |
|
189 | |||
190 | As usual, we will start by creating a fresh profile:: |
|
190 | As usual, we will start by creating a fresh profile:: | |
191 |
|
191 | |||
192 |
$ ipcluster |
|
192 | $ ipcluster create -p pbs | |
193 |
|
193 | |||
194 |
And in :file:`ipcluster |
|
194 | And in :file:`ipcluster_config.py`, we will select the PBS launchers for the controller | |
195 | and engines: |
|
195 | and engines: | |
196 |
|
196 | |||
197 | .. sourcecode:: python |
|
197 | .. sourcecode:: python | |
198 |
|
198 | |||
199 | c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher' |
|
199 | c.Global.controller_launcher = 'IPython.parallel.launcher.PBSControllerLauncher' | |
200 | c.Global.engine_launcher = 'IPython.parallel.launcher.PBSEngineSetLauncher' |
|
200 | c.Global.engine_launcher = 'IPython.parallel.launcher.PBSEngineSetLauncher' | |
201 |
|
201 | |||
202 | IPython does provide simple default batch templates for PBS and SGE, but you may need |
|
202 | IPython does provide simple default batch templates for PBS and SGE, but you may need | |
203 | to specify your own. Here is a sample PBS script template: |
|
203 | to specify your own. Here is a sample PBS script template: | |
204 |
|
204 | |||
205 | .. sourcecode:: bash |
|
205 | .. sourcecode:: bash | |
206 |
|
206 | |||
207 | #PBS -N ipython |
|
207 | #PBS -N ipython | |
208 | #PBS -j oe |
|
208 | #PBS -j oe | |
209 | #PBS -l walltime=00:10:00 |
|
209 | #PBS -l walltime=00:10:00 | |
210 | #PBS -l nodes=${n/4}:ppn=4 |
|
210 | #PBS -l nodes=${n/4}:ppn=4 | |
211 | #PBS -q $queue |
|
211 | #PBS -q $queue | |
212 |
|
212 | |||
213 | cd $$PBS_O_WORKDIR |
|
213 | cd $$PBS_O_WORKDIR | |
214 | export PATH=$$HOME/usr/local/bin |
|
214 | export PATH=$$HOME/usr/local/bin | |
215 | export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages |
|
215 | export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages | |
216 |
/usr/local/bin/mpiexec -n ${n} ipengine |
|
216 | /usr/local/bin/mpiexec -n ${n} ipengine --cluster_dir=${cluster_dir} | |
217 |
|
217 | |||
218 | There are a few important points about this template: |
|
218 | There are a few important points about this template: | |
219 |
|
219 | |||
220 | 1. This template will be rendered at runtime using IPython's :mod:`Itpl` |
|
220 | 1. This template will be rendered at runtime using IPython's :mod:`Itpl` | |
221 | template engine. |
|
221 | template engine. | |
222 |
|
222 | |||
223 | 2. Instead of putting in the actual number of engines, use the notation |
|
223 | 2. Instead of putting in the actual number of engines, use the notation | |
224 | ``${n}`` to indicate the number of engines to be started. You can also uses |
|
224 | ``${n}`` to indicate the number of engines to be started. You can also uses | |
225 | expressions like ``${n/4}`` in the template to indicate the number of |
|
225 | expressions like ``${n/4}`` in the template to indicate the number of | |
226 | nodes. There will always be a ${n} and ${cluster_dir} variable passed to the template. |
|
226 | nodes. There will always be a ${n} and ${cluster_dir} variable passed to the template. | |
227 | These allow the batch system to know how many engines, and where the configuration |
|
227 | These allow the batch system to know how many engines, and where the configuration | |
228 | files reside. The same is true for the batch queue, with the template variable ``$queue``. |
|
228 | files reside. The same is true for the batch queue, with the template variable ``$queue``. | |
229 |
|
229 | |||
230 | 3. Because ``$`` is a special character used by the template engine, you must |
|
230 | 3. Because ``$`` is a special character used by the template engine, you must | |
231 | escape any ``$`` by using ``$$``. This is important when referring to |
|
231 | escape any ``$`` by using ``$$``. This is important when referring to | |
232 | environment variables in the template, or in SGE, where the config lines start |
|
232 | environment variables in the template, or in SGE, where the config lines start | |
233 | with ``#$``, which will have to be ``#$$``. |
|
233 | with ``#$``, which will have to be ``#$$``. | |
234 |
|
234 | |||
235 |
4. Any options to :command:`ipengine |
|
235 | 4. Any options to :command:`ipengine` can be given in the batch script | |
236 |
template, or in :file:`ipengine |
|
236 | template, or in :file:`ipengine_config.py`. | |
237 |
|
237 | |||
238 | 5. Depending on the configuration of you system, you may have to set |
|
238 | 5. Depending on the configuration of you system, you may have to set | |
239 | environment variables in the script template. |
|
239 | environment variables in the script template. | |
240 |
|
240 | |||
241 | The controller template should be similar, but simpler: |
|
241 | The controller template should be similar, but simpler: | |
242 |
|
242 | |||
243 | .. sourcecode:: bash |
|
243 | .. sourcecode:: bash | |
244 |
|
244 | |||
245 | #PBS -N ipython |
|
245 | #PBS -N ipython | |
246 | #PBS -j oe |
|
246 | #PBS -j oe | |
247 | #PBS -l walltime=00:10:00 |
|
247 | #PBS -l walltime=00:10:00 | |
248 | #PBS -l nodes=1:ppn=4 |
|
248 | #PBS -l nodes=1:ppn=4 | |
249 | #PBS -q $queue |
|
249 | #PBS -q $queue | |
250 |
|
250 | |||
251 | cd $$PBS_O_WORKDIR |
|
251 | cd $$PBS_O_WORKDIR | |
252 | export PATH=$$HOME/usr/local/bin |
|
252 | export PATH=$$HOME/usr/local/bin | |
253 | export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages |
|
253 | export PYTHONPATH=$$HOME/usr/local/lib/python2.7/site-packages | |
254 |
ipcontroller |
|
254 | ipcontroller --cluster_dir=${cluster_dir} | |
255 |
|
255 | |||
256 |
|
256 | |||
257 | Once you have created these scripts, save them with names like |
|
257 | Once you have created these scripts, save them with names like | |
258 |
:file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster |
|
258 | :file:`pbs.engine.template`. Now you can load them into the :file:`ipcluster_config` with: | |
259 |
|
259 | |||
260 | .. sourcecode:: python |
|
260 | .. sourcecode:: python | |
261 |
|
261 | |||
262 | c.PBSEngineSetLauncher.batch_template_file = "pbs.engine.template" |
|
262 | c.PBSEngineSetLauncher.batch_template_file = "pbs.engine.template" | |
263 |
|
263 | |||
264 | c.PBSControllerLauncher.batch_template_file = "pbs.controller.template" |
|
264 | c.PBSControllerLauncher.batch_template_file = "pbs.controller.template" | |
265 |
|
265 | |||
266 |
|
266 | |||
267 |
Alternately, you can just define the templates as strings inside :file:`ipcluster |
|
267 | Alternately, you can just define the templates as strings inside :file:`ipcluster_config`. | |
268 |
|
268 | |||
269 | Whether you are using your own templates or our defaults, the extra configurables available are |
|
269 | Whether you are using your own templates or our defaults, the extra configurables available are | |
270 | the number of engines to launch (``$n``, and the batch system queue to which the jobs are to be |
|
270 | the number of engines to launch (``$n``, and the batch system queue to which the jobs are to be | |
271 | submitted (``$queue``)). These are configurables, and can be specified in |
|
271 | submitted (``$queue``)). These are configurables, and can be specified in | |
272 |
:file:`ipcluster |
|
272 | :file:`ipcluster_config`: | |
273 |
|
273 | |||
274 | .. sourcecode:: python |
|
274 | .. sourcecode:: python | |
275 |
|
275 | |||
276 | c.PBSLauncher.queue = 'veryshort.q' |
|
276 | c.PBSLauncher.queue = 'veryshort.q' | |
277 | c.PBSEngineSetLauncher.n = 64 |
|
277 | c.PBSEngineSetLauncher.n = 64 | |
278 |
|
278 | |||
279 | Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior |
|
279 | Note that assuming you are running PBS on a multi-node cluster, the Controller's default behavior | |
280 | of listening only on localhost is likely too restrictive. In this case, also assuming the |
|
280 | of listening only on localhost is likely too restrictive. In this case, also assuming the | |
281 | nodes are safely behind a firewall, you can simply instruct the Controller to listen for |
|
281 | nodes are safely behind a firewall, you can simply instruct the Controller to listen for | |
282 |
connections on all its interfaces, by adding in :file:`ipcontroller |
|
282 | connections on all its interfaces, by adding in :file:`ipcontroller_config`: | |
283 |
|
283 | |||
284 | .. sourcecode:: python |
|
284 | .. sourcecode:: python | |
285 |
|
285 | |||
286 | c.RegistrationFactory.ip = '*' |
|
286 | c.RegistrationFactory.ip = '*' | |
287 |
|
287 | |||
288 | You can now run the cluster with:: |
|
288 | You can now run the cluster with:: | |
289 |
|
289 | |||
290 |
$ ipcluster |
|
290 | $ ipcluster start -p pbs -n 128 | |
291 |
|
291 | |||
292 |
Additional configuration options can be found in the PBS section of :file:`ipcluster |
|
292 | Additional configuration options can be found in the PBS section of :file:`ipcluster_config`. | |
293 |
|
293 | |||
294 | .. note:: |
|
294 | .. note:: | |
295 |
|
295 | |||
296 | Due to the flexibility of configuration, the PBS launchers work with simple changes |
|
296 | Due to the flexibility of configuration, the PBS launchers work with simple changes | |
297 | to the template for other :command:`qsub`-using systems, such as Sun Grid Engine, |
|
297 | to the template for other :command:`qsub`-using systems, such as Sun Grid Engine, | |
298 | and with further configuration in similar batch systems like Condor. |
|
298 | and with further configuration in similar batch systems like Condor. | |
299 |
|
299 | |||
300 |
|
300 | |||
301 |
Using :command:`ipcluster |
|
301 | Using :command:`ipcluster` in SSH mode | |
302 | --------------------------------------- |
|
302 | --------------------------------------- | |
303 |
|
303 | |||
304 |
|
304 | |||
305 |
The SSH mode uses :command:`ssh` to execute :command:`ipengine |
|
305 | The SSH mode uses :command:`ssh` to execute :command:`ipengine` on remote | |
306 |
nodes and :command:`ipcontroller |
|
306 | nodes and :command:`ipcontroller` can be run remotely as well, or on localhost. | |
307 |
|
307 | |||
308 | .. note:: |
|
308 | .. note:: | |
309 |
|
309 | |||
310 | When using this mode it highly recommended that you have set up SSH keys |
|
310 | When using this mode it highly recommended that you have set up SSH keys | |
311 | and are using ssh-agent [SSH]_ for password-less logins. |
|
311 | and are using ssh-agent [SSH]_ for password-less logins. | |
312 |
|
312 | |||
313 | As usual, we start by creating a clean profile:: |
|
313 | As usual, we start by creating a clean profile:: | |
314 |
|
314 | |||
315 |
$ ipcluster |
|
315 | $ ipcluster create -p ssh | |
316 |
|
316 | |||
317 |
To use this mode, select the SSH launchers in :file:`ipcluster |
|
317 | To use this mode, select the SSH launchers in :file:`ipcluster_config.py`: | |
318 |
|
318 | |||
319 | .. sourcecode:: python |
|
319 | .. sourcecode:: python | |
320 |
|
320 | |||
321 | c.Global.engine_launcher = 'IPython.parallel.launcher.SSHEngineSetLauncher' |
|
321 | c.Global.engine_launcher = 'IPython.parallel.launcher.SSHEngineSetLauncher' | |
322 | # and if the Controller is also to be remote: |
|
322 | # and if the Controller is also to be remote: | |
323 | c.Global.controller_launcher = 'IPython.parallel.launcher.SSHControllerLauncher' |
|
323 | c.Global.controller_launcher = 'IPython.parallel.launcher.SSHControllerLauncher' | |
324 |
|
324 | |||
325 |
|
325 | |||
326 | The controller's remote location and configuration can be specified: |
|
326 | The controller's remote location and configuration can be specified: | |
327 |
|
327 | |||
328 | .. sourcecode:: python |
|
328 | .. sourcecode:: python | |
329 |
|
329 | |||
330 | # Set the user and hostname for the controller |
|
330 | # Set the user and hostname for the controller | |
331 | # c.SSHControllerLauncher.hostname = 'controller.example.com' |
|
331 | # c.SSHControllerLauncher.hostname = 'controller.example.com' | |
332 | # c.SSHControllerLauncher.user = os.environ.get('USER','username') |
|
332 | # c.SSHControllerLauncher.user = os.environ.get('USER','username') | |
333 |
|
333 | |||
334 |
# Set the arguments to be passed to ipcontroller |
|
334 | # Set the arguments to be passed to ipcontroller | |
335 |
# note that remotely launched ipcontroller |
|
335 | # note that remotely launched ipcontroller will not get the contents of | |
336 |
# the local ipcontroller |
|
336 | # the local ipcontroller_config.py unless it resides on the *remote host* | |
337 | # in the location specified by the --cluster_dir argument. |
|
337 | # in the location specified by the --cluster_dir argument. | |
338 | # c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd'] |
|
338 | # c.SSHControllerLauncher.program_args = ['-r', '-ip', '0.0.0.0', '--cluster_dir', '/path/to/cd'] | |
339 |
|
339 | |||
340 | .. note:: |
|
340 | .. note:: | |
341 |
|
341 | |||
342 | SSH mode does not do any file movement, so you will need to distribute configuration |
|
342 | SSH mode does not do any file movement, so you will need to distribute configuration | |
343 | files manually. To aid in this, the `reuse_files` flag defaults to True for ssh-launched |
|
343 | files manually. To aid in this, the `reuse_files` flag defaults to True for ssh-launched | |
344 | Controllers, so you will only need to do this once, unless you override this flag back |
|
344 | Controllers, so you will only need to do this once, unless you override this flag back | |
345 | to False. |
|
345 | to False. | |
346 |
|
346 | |||
347 | Engines are specified in a dictionary, by hostname and the number of engines to be run |
|
347 | Engines are specified in a dictionary, by hostname and the number of engines to be run | |
348 | on that host. |
|
348 | on that host. | |
349 |
|
349 | |||
350 | .. sourcecode:: python |
|
350 | .. sourcecode:: python | |
351 |
|
351 | |||
352 | c.SSHEngineSetLauncher.engines = { 'host1.example.com' : 2, |
|
352 | c.SSHEngineSetLauncher.engines = { 'host1.example.com' : 2, | |
353 | 'host2.example.com' : 5, |
|
353 | 'host2.example.com' : 5, | |
354 | 'host3.example.com' : (1, ['--cluster_dir', '/home/different/location']), |
|
354 | 'host3.example.com' : (1, ['--cluster_dir', '/home/different/location']), | |
355 | 'host4.example.com' : 8 } |
|
355 | 'host4.example.com' : 8 } | |
356 |
|
356 | |||
357 | * The `engines` dict, where the keys are the host we want to run engines on and |
|
357 | * The `engines` dict, where the keys are the host we want to run engines on and | |
358 | the value is the number of engines to run on that host. |
|
358 | the value is the number of engines to run on that host. | |
359 | * on host3, the value is a tuple, where the number of engines is first, and the arguments |
|
359 | * on host3, the value is a tuple, where the number of engines is first, and the arguments | |
360 |
to be passed to :command:`ipengine |
|
360 | to be passed to :command:`ipengine` are the second element. | |
361 |
|
361 | |||
362 | For engines without explicitly specified arguments, the default arguments are set in |
|
362 | For engines without explicitly specified arguments, the default arguments are set in | |
363 | a single location: |
|
363 | a single location: | |
364 |
|
364 | |||
365 | .. sourcecode:: python |
|
365 | .. sourcecode:: python | |
366 |
|
366 | |||
367 |
c.SSHEngineSetLauncher.engine_args = ['--cluster_dir', '/path/to/cluster |
|
367 | c.SSHEngineSetLauncher.engine_args = ['--cluster_dir', '/path/to/cluster_ssh'] | |
368 |
|
368 | |||
369 |
Current limitations of the SSH mode of :command:`ipcluster |
|
369 | Current limitations of the SSH mode of :command:`ipcluster` are: | |
370 |
|
370 | |||
371 | * Untested on Windows. Would require a working :command:`ssh` on Windows. |
|
371 | * Untested on Windows. Would require a working :command:`ssh` on Windows. | |
372 | Also, we are using shell scripts to setup and execute commands on remote |
|
372 | Also, we are using shell scripts to setup and execute commands on remote | |
373 | hosts. |
|
373 | hosts. | |
374 | * No file movement - |
|
374 | * No file movement - | |
375 |
|
375 | |||
376 |
Using the :command:`ipcontroller |
|
376 | Using the :command:`ipcontroller` and :command:`ipengine` commands | |
377 | ==================================================================== |
|
377 | ==================================================================== | |
378 |
|
378 | |||
379 |
It is also possible to use the :command:`ipcontroller |
|
379 | It is also possible to use the :command:`ipcontroller` and :command:`ipengine` | |
380 | commands to start your controller and engines. This approach gives you full |
|
380 | commands to start your controller and engines. This approach gives you full | |
381 | control over all aspects of the startup process. |
|
381 | control over all aspects of the startup process. | |
382 |
|
382 | |||
383 | Starting the controller and engine on your local machine |
|
383 | Starting the controller and engine on your local machine | |
384 | -------------------------------------------------------- |
|
384 | -------------------------------------------------------- | |
385 |
|
385 | |||
386 |
To use :command:`ipcontroller |
|
386 | To use :command:`ipcontroller` and :command:`ipengine` to start things on your | |
387 | local machine, do the following. |
|
387 | local machine, do the following. | |
388 |
|
388 | |||
389 | First start the controller:: |
|
389 | First start the controller:: | |
390 |
|
390 | |||
391 |
$ ipcontroller |
|
391 | $ ipcontroller | |
392 |
|
392 | |||
393 | Next, start however many instances of the engine you want using (repeatedly) |
|
393 | Next, start however many instances of the engine you want using (repeatedly) | |
394 | the command:: |
|
394 | the command:: | |
395 |
|
395 | |||
396 |
$ ipengine |
|
396 | $ ipengine | |
397 |
|
397 | |||
398 | The engines should start and automatically connect to the controller using the |
|
398 | The engines should start and automatically connect to the controller using the | |
399 |
JSON files in :file:`~/.ipython/cluster |
|
399 | JSON files in :file:`~/.ipython/cluster_default/security`. You are now ready to use the | |
400 | controller and engines from IPython. |
|
400 | controller and engines from IPython. | |
401 |
|
401 | |||
402 | .. warning:: |
|
402 | .. warning:: | |
403 |
|
403 | |||
404 | The order of the above operations may be important. You *must* |
|
404 | The order of the above operations may be important. You *must* | |
405 | start the controller before the engines, unless you are reusing connection |
|
405 | start the controller before the engines, unless you are reusing connection | |
406 | information (via `-r`), in which case ordering is not important. |
|
406 | information (via `-r`), in which case ordering is not important. | |
407 |
|
407 | |||
408 | .. note:: |
|
408 | .. note:: | |
409 |
|
409 | |||
410 | On some platforms (OS X), to put the controller and engine into the |
|
410 | On some platforms (OS X), to put the controller and engine into the | |
411 | background you may need to give these commands in the form ``(ipcontroller |
|
411 | background you may need to give these commands in the form ``(ipcontroller | |
412 | &)`` and ``(ipengine &)`` (with the parentheses) for them to work |
|
412 | &)`` and ``(ipengine &)`` (with the parentheses) for them to work | |
413 | properly. |
|
413 | properly. | |
414 |
|
414 | |||
415 | Starting the controller and engines on different hosts |
|
415 | Starting the controller and engines on different hosts | |
416 | ------------------------------------------------------ |
|
416 | ------------------------------------------------------ | |
417 |
|
417 | |||
418 | When the controller and engines are running on different hosts, things are |
|
418 | When the controller and engines are running on different hosts, things are | |
419 | slightly more complicated, but the underlying ideas are the same: |
|
419 | slightly more complicated, but the underlying ideas are the same: | |
420 |
|
420 | |||
421 |
1. Start the controller on a host using :command:`ipcontroller |
|
421 | 1. Start the controller on a host using :command:`ipcontroller`. | |
422 | 2. Copy :file:`ipcontroller-engine.json` from :file:`~/.ipython/cluster_<profile>/security` on |
|
422 | 2. Copy :file:`ipcontroller-engine.json` from :file:`~/.ipython/cluster_<profile>/security` on | |
423 | the controller's host to the host where the engines will run. |
|
423 | the controller's host to the host where the engines will run. | |
424 |
3. Use :command:`ipengine |
|
424 | 3. Use :command:`ipengine` on the engine's hosts to start the engines. | |
425 |
|
425 | |||
426 |
The only thing you have to be careful of is to tell :command:`ipengine |
|
426 | The only thing you have to be careful of is to tell :command:`ipengine` where | |
427 | the :file:`ipcontroller-engine.json` file is located. There are two ways you |
|
427 | the :file:`ipcontroller-engine.json` file is located. There are two ways you | |
428 | can do this: |
|
428 | can do this: | |
429 |
|
429 | |||
430 | * Put :file:`ipcontroller-engine.json` in the :file:`~/.ipython/cluster_<profile>/security` |
|
430 | * Put :file:`ipcontroller-engine.json` in the :file:`~/.ipython/cluster_<profile>/security` | |
431 | directory on the engine's host, where it will be found automatically. |
|
431 | directory on the engine's host, where it will be found automatically. | |
432 |
* Call :command:`ipengine |
|
432 | * Call :command:`ipengine` with the ``--file=full_path_to_the_file`` | |
433 | flag. |
|
433 | flag. | |
434 |
|
434 | |||
435 | The ``--file`` flag works like this:: |
|
435 | The ``--file`` flag works like this:: | |
436 |
|
436 | |||
437 | $ ipengine --file=/path/to/my/ipcontroller-engine.json |
|
437 | $ ipengine --file=/path/to/my/ipcontroller-engine.json | |
438 |
|
438 | |||
439 | .. note:: |
|
439 | .. note:: | |
440 |
|
440 | |||
441 | If the controller's and engine's hosts all have a shared file system |
|
441 | If the controller's and engine's hosts all have a shared file system | |
442 | (:file:`~/.ipython/cluster_<profile>/security` is the same on all of them), then things |
|
442 | (:file:`~/.ipython/cluster_<profile>/security` is the same on all of them), then things | |
443 | will just work! |
|
443 | will just work! | |
444 |
|
444 | |||
445 | Make JSON files persistent |
|
445 | Make JSON files persistent | |
446 | -------------------------- |
|
446 | -------------------------- | |
447 |
|
447 | |||
448 | At fist glance it may seem that that managing the JSON files is a bit |
|
448 | At fist glance it may seem that that managing the JSON files is a bit | |
449 | annoying. Going back to the house and key analogy, copying the JSON around |
|
449 | annoying. Going back to the house and key analogy, copying the JSON around | |
450 | each time you start the controller is like having to make a new key every time |
|
450 | each time you start the controller is like having to make a new key every time | |
451 | you want to unlock the door and enter your house. As with your house, you want |
|
451 | you want to unlock the door and enter your house. As with your house, you want | |
452 | to be able to create the key (or JSON file) once, and then simply use it at |
|
452 | to be able to create the key (or JSON file) once, and then simply use it at | |
453 | any point in the future. |
|
453 | any point in the future. | |
454 |
|
454 | |||
455 | To do this, the only thing you have to do is specify the `-r` flag, so that |
|
455 | To do this, the only thing you have to do is specify the `-r` flag, so that | |
456 | the connection information in the JSON files remains accurate:: |
|
456 | the connection information in the JSON files remains accurate:: | |
457 |
|
457 | |||
458 |
$ ipcontroller |
|
458 | $ ipcontroller -r | |
459 |
|
459 | |||
460 | Then, just copy the JSON files over the first time and you are set. You can |
|
460 | Then, just copy the JSON files over the first time and you are set. You can | |
461 | start and stop the controller and engines any many times as you want in the |
|
461 | start and stop the controller and engines any many times as you want in the | |
462 | future, just make sure to tell the controller to reuse the file. |
|
462 | future, just make sure to tell the controller to reuse the file. | |
463 |
|
463 | |||
464 | .. note:: |
|
464 | .. note:: | |
465 |
|
465 | |||
466 | You may ask the question: what ports does the controller listen on if you |
|
466 | You may ask the question: what ports does the controller listen on if you | |
467 | don't tell is to use specific ones? The default is to use high random port |
|
467 | don't tell is to use specific ones? The default is to use high random port | |
468 | numbers. We do this for two reasons: i) to increase security through |
|
468 | numbers. We do this for two reasons: i) to increase security through | |
469 | obscurity and ii) to multiple controllers on a given host to start and |
|
469 | obscurity and ii) to multiple controllers on a given host to start and | |
470 | automatically use different ports. |
|
470 | automatically use different ports. | |
471 |
|
471 | |||
472 | Log files |
|
472 | Log files | |
473 | --------- |
|
473 | --------- | |
474 |
|
474 | |||
475 | All of the components of IPython have log files associated with them. |
|
475 | All of the components of IPython have log files associated with them. | |
476 | These log files can be extremely useful in debugging problems with |
|
476 | These log files can be extremely useful in debugging problems with | |
477 | IPython and can be found in the directory :file:`~/.ipython/cluster_<profile>/log`. |
|
477 | IPython and can be found in the directory :file:`~/.ipython/cluster_<profile>/log`. | |
478 | Sending the log files to us will often help us to debug any problems. |
|
478 | Sending the log files to us will often help us to debug any problems. | |
479 |
|
479 | |||
480 |
|
480 | |||
481 |
Configuring `ipcontroller |
|
481 | Configuring `ipcontroller` | |
482 | --------------------------- |
|
482 | --------------------------- | |
483 |
|
483 | |||
484 | Ports and addresses |
|
484 | Ports and addresses | |
485 | ******************* |
|
485 | ******************* | |
486 |
|
486 | |||
487 |
|
487 | |||
488 | Database Backend |
|
488 | Database Backend | |
489 | **************** |
|
489 | **************** | |
490 |
|
490 | |||
491 |
|
491 | |||
492 | .. seealso:: |
|
492 | .. seealso:: | |
493 |
|
493 | |||
494 |
|
494 | |||
495 |
|
495 | |||
496 |
Configuring `ipengine |
|
496 | Configuring `ipengine` | |
497 | ----------------------- |
|
497 | ----------------------- | |
498 |
|
498 | |||
499 | .. note:: |
|
499 | .. note:: | |
500 |
|
500 | |||
501 | TODO |
|
501 | TODO | |
502 |
|
502 | |||
503 |
|
503 | |||
504 |
|
504 | |||
505 | .. [PBS] Portable Batch System. http://www.openpbs.org/ |
|
505 | .. [PBS] Portable Batch System. http://www.openpbs.org/ | |
506 | .. [SSH] SSH-Agent http://en.wikipedia.org/wiki/ssh-agent |
|
506 | .. [SSH] SSH-Agent http://en.wikipedia.org/wiki/ssh-agent |
@@ -1,324 +1,324 b'' | |||||
1 | .. _parallelsecurity: |
|
1 | .. _parallelsecurity: | |
2 |
|
2 | |||
3 | =========================== |
|
3 | =========================== | |
4 | Security details of IPython |
|
4 | Security details of IPython | |
5 | =========================== |
|
5 | =========================== | |
6 |
|
6 | |||
7 | .. note:: |
|
7 | .. note:: | |
8 |
|
8 | |||
9 | This section is not thorough, and IPython.zmq needs a thorough security |
|
9 | This section is not thorough, and IPython.zmq needs a thorough security | |
10 | audit. |
|
10 | audit. | |
11 |
|
11 | |||
12 | IPython's :mod:`IPython.zmq` package exposes the full power of the |
|
12 | IPython's :mod:`IPython.zmq` package exposes the full power of the | |
13 | Python interpreter over a TCP/IP network for the purposes of parallel |
|
13 | Python interpreter over a TCP/IP network for the purposes of parallel | |
14 | computing. This feature brings up the important question of IPython's security |
|
14 | computing. This feature brings up the important question of IPython's security | |
15 | model. This document gives details about this model and how it is implemented |
|
15 | model. This document gives details about this model and how it is implemented | |
16 | in IPython's architecture. |
|
16 | in IPython's architecture. | |
17 |
|
17 | |||
18 | Processs and network topology |
|
18 | Processs and network topology | |
19 | ============================= |
|
19 | ============================= | |
20 |
|
20 | |||
21 | To enable parallel computing, IPython has a number of different processes that |
|
21 | To enable parallel computing, IPython has a number of different processes that | |
22 | run. These processes are discussed at length in the IPython documentation and |
|
22 | run. These processes are discussed at length in the IPython documentation and | |
23 | are summarized here: |
|
23 | are summarized here: | |
24 |
|
24 | |||
25 | * The IPython *engine*. This process is a full blown Python |
|
25 | * The IPython *engine*. This process is a full blown Python | |
26 | interpreter in which user code is executed. Multiple |
|
26 | interpreter in which user code is executed. Multiple | |
27 | engines are started to make parallel computing possible. |
|
27 | engines are started to make parallel computing possible. | |
28 | * The IPython *hub*. This process monitors a set of |
|
28 | * The IPython *hub*. This process monitors a set of | |
29 | engines and schedulers, and keeps track of the state of the processes. It listens |
|
29 | engines and schedulers, and keeps track of the state of the processes. It listens | |
30 | for registration connections from engines and clients, and monitor connections |
|
30 | for registration connections from engines and clients, and monitor connections | |
31 | from schedulers. |
|
31 | from schedulers. | |
32 | * The IPython *schedulers*. This is a set of processes that relay commands and results |
|
32 | * The IPython *schedulers*. This is a set of processes that relay commands and results | |
33 | between clients and engines. They are typically on the same machine as the controller, |
|
33 | between clients and engines. They are typically on the same machine as the controller, | |
34 | and listen for connections from engines and clients, but connect to the Hub. |
|
34 | and listen for connections from engines and clients, but connect to the Hub. | |
35 | * The IPython *client*. This process is typically an |
|
35 | * The IPython *client*. This process is typically an | |
36 | interactive Python process that is used to coordinate the |
|
36 | interactive Python process that is used to coordinate the | |
37 | engines to get a parallel computation done. |
|
37 | engines to get a parallel computation done. | |
38 |
|
38 | |||
39 | Collectively, these processes are called the IPython *kernel*, and the hub and schedulers |
|
39 | Collectively, these processes are called the IPython *kernel*, and the hub and schedulers | |
40 | together are referred to as the *controller*. |
|
40 | together are referred to as the *controller*. | |
41 |
|
41 | |||
42 | .. note:: |
|
42 | .. note:: | |
43 |
|
43 | |||
44 | Are these really still referred to as the Kernel? It doesn't seem so to me. 'cluster' |
|
44 | Are these really still referred to as the Kernel? It doesn't seem so to me. 'cluster' | |
45 | seems more accurate. |
|
45 | seems more accurate. | |
46 |
|
46 | |||
47 | -MinRK |
|
47 | -MinRK | |
48 |
|
48 | |||
49 | These processes communicate over any transport supported by ZeroMQ (tcp,pgm,infiniband,ipc) |
|
49 | These processes communicate over any transport supported by ZeroMQ (tcp,pgm,infiniband,ipc) | |
50 | with a well defined topology. The IPython hub and schedulers listen on sockets. Upon |
|
50 | with a well defined topology. The IPython hub and schedulers listen on sockets. Upon | |
51 | starting, an engine connects to a hub and registers itself, which then informs the engine |
|
51 | starting, an engine connects to a hub and registers itself, which then informs the engine | |
52 | of the connection information for the schedulers, and the engine then connects to the |
|
52 | of the connection information for the schedulers, and the engine then connects to the | |
53 | schedulers. These engine/hub and engine/scheduler connections persist for the |
|
53 | schedulers. These engine/hub and engine/scheduler connections persist for the | |
54 | lifetime of each engine. |
|
54 | lifetime of each engine. | |
55 |
|
55 | |||
56 | The IPython client also connects to the controller processes using a number of socket |
|
56 | The IPython client also connects to the controller processes using a number of socket | |
57 | connections. As of writing, this is one socket per scheduler (4), and 3 connections to the |
|
57 | connections. As of writing, this is one socket per scheduler (4), and 3 connections to the | |
58 | hub for a total of 7. These connections persist for the lifetime of the client only. |
|
58 | hub for a total of 7. These connections persist for the lifetime of the client only. | |
59 |
|
59 | |||
60 | A given IPython controller and set of engines engines typically has a relatively |
|
60 | A given IPython controller and set of engines engines typically has a relatively | |
61 | short lifetime. Typically this lifetime corresponds to the duration of a single parallel |
|
61 | short lifetime. Typically this lifetime corresponds to the duration of a single parallel | |
62 | simulation performed by a single user. Finally, the hub, schedulers, engines, and client |
|
62 | simulation performed by a single user. Finally, the hub, schedulers, engines, and client | |
63 | processes typically execute with the permissions of that same user. More specifically, the |
|
63 | processes typically execute with the permissions of that same user. More specifically, the | |
64 | controller and engines are *not* executed as root or with any other superuser permissions. |
|
64 | controller and engines are *not* executed as root or with any other superuser permissions. | |
65 |
|
65 | |||
66 | Application logic |
|
66 | Application logic | |
67 | ================= |
|
67 | ================= | |
68 |
|
68 | |||
69 | When running the IPython kernel to perform a parallel computation, a user |
|
69 | When running the IPython kernel to perform a parallel computation, a user | |
70 | utilizes the IPython client to send Python commands and data through the |
|
70 | utilizes the IPython client to send Python commands and data through the | |
71 | IPython schedulers to the IPython engines, where those commands are executed |
|
71 | IPython schedulers to the IPython engines, where those commands are executed | |
72 | and the data processed. The design of IPython ensures that the client is the |
|
72 | and the data processed. The design of IPython ensures that the client is the | |
73 | only access point for the capabilities of the engines. That is, the only way |
|
73 | only access point for the capabilities of the engines. That is, the only way | |
74 | of addressing the engines is through a client. |
|
74 | of addressing the engines is through a client. | |
75 |
|
75 | |||
76 | A user can utilize the client to instruct the IPython engines to execute |
|
76 | A user can utilize the client to instruct the IPython engines to execute | |
77 | arbitrary Python commands. These Python commands can include calls to the |
|
77 | arbitrary Python commands. These Python commands can include calls to the | |
78 | system shell, access the filesystem, etc., as required by the user's |
|
78 | system shell, access the filesystem, etc., as required by the user's | |
79 | application code. From this perspective, when a user runs an IPython engine on |
|
79 | application code. From this perspective, when a user runs an IPython engine on | |
80 | a host, that engine has the same capabilities and permissions as the user |
|
80 | a host, that engine has the same capabilities and permissions as the user | |
81 | themselves (as if they were logged onto the engine's host with a terminal). |
|
81 | themselves (as if they were logged onto the engine's host with a terminal). | |
82 |
|
82 | |||
83 | Secure network connections |
|
83 | Secure network connections | |
84 | ========================== |
|
84 | ========================== | |
85 |
|
85 | |||
86 | Overview |
|
86 | Overview | |
87 | -------- |
|
87 | -------- | |
88 |
|
88 | |||
89 | ZeroMQ provides exactly no security. For this reason, users of IPython must be very |
|
89 | ZeroMQ provides exactly no security. For this reason, users of IPython must be very | |
90 | careful in managing connections, because an open TCP/IP socket presents access to |
|
90 | careful in managing connections, because an open TCP/IP socket presents access to | |
91 | arbitrary execution as the user on the engine machines. As a result, the default behavior |
|
91 | arbitrary execution as the user on the engine machines. As a result, the default behavior | |
92 | of controller processes is to only listen for clients on the loopback interface, and the |
|
92 | of controller processes is to only listen for clients on the loopback interface, and the | |
93 | client must establish SSH tunnels to connect to the controller processes. |
|
93 | client must establish SSH tunnels to connect to the controller processes. | |
94 |
|
94 | |||
95 | .. warning:: |
|
95 | .. warning:: | |
96 |
|
96 | |||
97 | If the controller's loopback interface is untrusted, then IPython should be considered |
|
97 | If the controller's loopback interface is untrusted, then IPython should be considered | |
98 | vulnerable, and this extends to the loopback of all connected clients, which have |
|
98 | vulnerable, and this extends to the loopback of all connected clients, which have | |
99 | opened a loopback port that is redirected to the controller's loopback port. |
|
99 | opened a loopback port that is redirected to the controller's loopback port. | |
100 |
|
100 | |||
101 |
|
101 | |||
102 | SSH |
|
102 | SSH | |
103 | --- |
|
103 | --- | |
104 |
|
104 | |||
105 | Since ZeroMQ provides no security, SSH tunnels are the primary source of secure |
|
105 | Since ZeroMQ provides no security, SSH tunnels are the primary source of secure | |
106 | connections. A connector file, such as `ipcontroller-client.json`, will contain |
|
106 | connections. A connector file, such as `ipcontroller-client.json`, will contain | |
107 | information for connecting to the controller, possibly including the address of an |
|
107 | information for connecting to the controller, possibly including the address of an | |
108 | ssh-server through with the client is to tunnel. The Client object then creates tunnels |
|
108 | ssh-server through with the client is to tunnel. The Client object then creates tunnels | |
109 | using either [OpenSSH]_ or [Paramiko]_, depending on the platform. If users do not wish to |
|
109 | using either [OpenSSH]_ or [Paramiko]_, depending on the platform. If users do not wish to | |
110 | use OpenSSH or Paramiko, or the tunneling utilities are insufficient, then they may |
|
110 | use OpenSSH or Paramiko, or the tunneling utilities are insufficient, then they may | |
111 | construct the tunnels themselves, and simply connect clients and engines as if the |
|
111 | construct the tunnels themselves, and simply connect clients and engines as if the | |
112 | controller were on loopback on the connecting machine. |
|
112 | controller were on loopback on the connecting machine. | |
113 |
|
113 | |||
114 | .. note:: |
|
114 | .. note:: | |
115 |
|
115 | |||
116 | There is not currently tunneling available for engines. |
|
116 | There is not currently tunneling available for engines. | |
117 |
|
117 | |||
118 | Authentication |
|
118 | Authentication | |
119 | -------------- |
|
119 | -------------- | |
120 |
|
120 | |||
121 | To protect users of shared machines, an execution key is used to authenticate all messages. |
|
121 | To protect users of shared machines, an execution key is used to authenticate all messages. | |
122 |
|
122 | |||
123 | The Session object that handles the message protocol uses a unique key to verify valid |
|
123 | The Session object that handles the message protocol uses a unique key to verify valid | |
124 | messages. This can be any value specified by the user, but the default behavior is a |
|
124 | messages. This can be any value specified by the user, but the default behavior is a | |
125 | pseudo-random 128-bit number, as generated by `uuid.uuid4()`. This key is checked on every |
|
125 | pseudo-random 128-bit number, as generated by `uuid.uuid4()`. This key is checked on every | |
126 | message everywhere it is unpacked (Controller, Engine, and Client) to ensure that it came |
|
126 | message everywhere it is unpacked (Controller, Engine, and Client) to ensure that it came | |
127 | from an authentic user, and no messages that do not contain this key are acted upon in any |
|
127 | from an authentic user, and no messages that do not contain this key are acted upon in any | |
128 | way. |
|
128 | way. | |
129 |
|
129 | |||
130 | There is exactly one key per cluster - it must be the same everywhere. Typically, the |
|
130 | There is exactly one key per cluster - it must be the same everywhere. Typically, the | |
131 | controller creates this key, and stores it in the private connection files |
|
131 | controller creates this key, and stores it in the private connection files | |
132 | `ipython-{engine|client}.json`. These files are typically stored in the |
|
132 | `ipython-{engine|client}.json`. These files are typically stored in the | |
133 |
`~/.ipython/cluster |
|
133 | `~/.ipython/cluster_<profile>/security` directory, and are maintained as readable only by | |
134 | the owner, just as is common practice with a user's keys in their `.ssh` directory. |
|
134 | the owner, just as is common practice with a user's keys in their `.ssh` directory. | |
135 |
|
135 | |||
136 | .. warning:: |
|
136 | .. warning:: | |
137 |
|
137 | |||
138 | It is important to note that the key authentication, as emphasized by the use of |
|
138 | It is important to note that the key authentication, as emphasized by the use of | |
139 | a uuid rather than generating a key with a cryptographic library, provides a |
|
139 | a uuid rather than generating a key with a cryptographic library, provides a | |
140 | defense against *accidental* messages more than it does against malicious attacks. |
|
140 | defense against *accidental* messages more than it does against malicious attacks. | |
141 | If loopback is compromised, it would be trivial for an attacker to intercept messages |
|
141 | If loopback is compromised, it would be trivial for an attacker to intercept messages | |
142 | and deduce the key, as there is no encryption. |
|
142 | and deduce the key, as there is no encryption. | |
143 |
|
143 | |||
144 |
|
144 | |||
145 |
|
145 | |||
146 | Specific security vulnerabilities |
|
146 | Specific security vulnerabilities | |
147 | ================================= |
|
147 | ================================= | |
148 |
|
148 | |||
149 | There are a number of potential security vulnerabilities present in IPython's |
|
149 | There are a number of potential security vulnerabilities present in IPython's | |
150 | architecture. In this section we discuss those vulnerabilities and detail how |
|
150 | architecture. In this section we discuss those vulnerabilities and detail how | |
151 | the security architecture described above prevents them from being exploited. |
|
151 | the security architecture described above prevents them from being exploited. | |
152 |
|
152 | |||
153 | Unauthorized clients |
|
153 | Unauthorized clients | |
154 | -------------------- |
|
154 | -------------------- | |
155 |
|
155 | |||
156 | The IPython client can instruct the IPython engines to execute arbitrary |
|
156 | The IPython client can instruct the IPython engines to execute arbitrary | |
157 | Python code with the permissions of the user who started the engines. If an |
|
157 | Python code with the permissions of the user who started the engines. If an | |
158 | attacker were able to connect their own hostile IPython client to the IPython |
|
158 | attacker were able to connect their own hostile IPython client to the IPython | |
159 | controller, they could instruct the engines to execute code. |
|
159 | controller, they could instruct the engines to execute code. | |
160 |
|
160 | |||
161 |
|
161 | |||
162 | On the first level, this attack is prevented by requiring access to the controller's |
|
162 | On the first level, this attack is prevented by requiring access to the controller's | |
163 | ports, which are recommended to only be open on loopback if the controller is on an |
|
163 | ports, which are recommended to only be open on loopback if the controller is on an | |
164 | untrusted local network. If the attacker does have access to the Controller's ports, then |
|
164 | untrusted local network. If the attacker does have access to the Controller's ports, then | |
165 | the attack is prevented by the capabilities based client authentication of the execution |
|
165 | the attack is prevented by the capabilities based client authentication of the execution | |
166 | key. The relevant authentication information is encoded into the JSON file that clients |
|
166 | key. The relevant authentication information is encoded into the JSON file that clients | |
167 | must present to gain access to the IPython controller. By limiting the distribution of |
|
167 | must present to gain access to the IPython controller. By limiting the distribution of | |
168 | those keys, a user can grant access to only authorized persons, just as with SSH keys. |
|
168 | those keys, a user can grant access to only authorized persons, just as with SSH keys. | |
169 |
|
169 | |||
170 | It is highly unlikely that an execution key could be guessed by an attacker |
|
170 | It is highly unlikely that an execution key could be guessed by an attacker | |
171 | in a brute force guessing attack. A given instance of the IPython controller |
|
171 | in a brute force guessing attack. A given instance of the IPython controller | |
172 | only runs for a relatively short amount of time (on the order of hours). Thus |
|
172 | only runs for a relatively short amount of time (on the order of hours). Thus | |
173 | an attacker would have only a limited amount of time to test a search space of |
|
173 | an attacker would have only a limited amount of time to test a search space of | |
174 | size 2**128. |
|
174 | size 2**128. | |
175 |
|
175 | |||
176 | .. warning:: |
|
176 | .. warning:: | |
177 |
|
177 | |||
178 | If the attacker has gained enough access to intercept loopback connections on |
|
178 | If the attacker has gained enough access to intercept loopback connections on | |
179 | *either* the controller or client, then the key is easily deduced from network |
|
179 | *either* the controller or client, then the key is easily deduced from network | |
180 | traffic. |
|
180 | traffic. | |
181 |
|
181 | |||
182 |
|
182 | |||
183 | Unauthorized engines |
|
183 | Unauthorized engines | |
184 | -------------------- |
|
184 | -------------------- | |
185 |
|
185 | |||
186 | If an attacker were able to connect a hostile engine to a user's controller, |
|
186 | If an attacker were able to connect a hostile engine to a user's controller, | |
187 | the user might unknowingly send sensitive code or data to the hostile engine. |
|
187 | the user might unknowingly send sensitive code or data to the hostile engine. | |
188 | This attacker's engine would then have full access to that code and data. |
|
188 | This attacker's engine would then have full access to that code and data. | |
189 |
|
189 | |||
190 | This type of attack is prevented in the same way as the unauthorized client |
|
190 | This type of attack is prevented in the same way as the unauthorized client | |
191 | attack, through the usage of the capabilities based authentication scheme. |
|
191 | attack, through the usage of the capabilities based authentication scheme. | |
192 |
|
192 | |||
193 | Unauthorized controllers |
|
193 | Unauthorized controllers | |
194 | ------------------------ |
|
194 | ------------------------ | |
195 |
|
195 | |||
196 | It is also possible that an attacker could try to convince a user's IPython |
|
196 | It is also possible that an attacker could try to convince a user's IPython | |
197 | client or engine to connect to a hostile IPython controller. That controller |
|
197 | client or engine to connect to a hostile IPython controller. That controller | |
198 | would then have full access to the code and data sent between the IPython |
|
198 | would then have full access to the code and data sent between the IPython | |
199 | client and the IPython engines. |
|
199 | client and the IPython engines. | |
200 |
|
200 | |||
201 | Again, this attack is prevented through the capabilities in a connection file, which |
|
201 | Again, this attack is prevented through the capabilities in a connection file, which | |
202 | ensure that a client or engine connects to the correct controller. It is also important to |
|
202 | ensure that a client or engine connects to the correct controller. It is also important to | |
203 | note that the connection files also encode the IP address and port that the controller is |
|
203 | note that the connection files also encode the IP address and port that the controller is | |
204 | listening on, so there is little chance of mistakenly connecting to a controller running |
|
204 | listening on, so there is little chance of mistakenly connecting to a controller running | |
205 | on a different IP address and port. |
|
205 | on a different IP address and port. | |
206 |
|
206 | |||
207 | When starting an engine or client, a user must specify the key to use |
|
207 | When starting an engine or client, a user must specify the key to use | |
208 | for that connection. Thus, in order to introduce a hostile controller, the |
|
208 | for that connection. Thus, in order to introduce a hostile controller, the | |
209 | attacker must convince the user to use the key associated with the |
|
209 | attacker must convince the user to use the key associated with the | |
210 | hostile controller. As long as a user is diligent in only using keys from |
|
210 | hostile controller. As long as a user is diligent in only using keys from | |
211 | trusted sources, this attack is not possible. |
|
211 | trusted sources, this attack is not possible. | |
212 |
|
212 | |||
213 | .. note:: |
|
213 | .. note:: | |
214 |
|
214 | |||
215 | I may be wrong, the unauthorized controller may be easier to fake than this. |
|
215 | I may be wrong, the unauthorized controller may be easier to fake than this. | |
216 |
|
216 | |||
217 | Other security measures |
|
217 | Other security measures | |
218 | ======================= |
|
218 | ======================= | |
219 |
|
219 | |||
220 | A number of other measures are taken to further limit the security risks |
|
220 | A number of other measures are taken to further limit the security risks | |
221 | involved in running the IPython kernel. |
|
221 | involved in running the IPython kernel. | |
222 |
|
222 | |||
223 | First, by default, the IPython controller listens on random port numbers. |
|
223 | First, by default, the IPython controller listens on random port numbers. | |
224 | While this can be overridden by the user, in the default configuration, an |
|
224 | While this can be overridden by the user, in the default configuration, an | |
225 | attacker would have to do a port scan to even find a controller to attack. |
|
225 | attacker would have to do a port scan to even find a controller to attack. | |
226 | When coupled with the relatively short running time of a typical controller |
|
226 | When coupled with the relatively short running time of a typical controller | |
227 | (on the order of hours), an attacker would have to work extremely hard and |
|
227 | (on the order of hours), an attacker would have to work extremely hard and | |
228 | extremely *fast* to even find a running controller to attack. |
|
228 | extremely *fast* to even find a running controller to attack. | |
229 |
|
229 | |||
230 | Second, much of the time, especially when run on supercomputers or clusters, |
|
230 | Second, much of the time, especially when run on supercomputers or clusters, | |
231 | the controller is running behind a firewall. Thus, for engines or client to |
|
231 | the controller is running behind a firewall. Thus, for engines or client to | |
232 | connect to the controller: |
|
232 | connect to the controller: | |
233 |
|
233 | |||
234 | * The different processes have to all be behind the firewall. |
|
234 | * The different processes have to all be behind the firewall. | |
235 |
|
235 | |||
236 | or: |
|
236 | or: | |
237 |
|
237 | |||
238 | * The user has to use SSH port forwarding to tunnel the |
|
238 | * The user has to use SSH port forwarding to tunnel the | |
239 | connections through the firewall. |
|
239 | connections through the firewall. | |
240 |
|
240 | |||
241 | In either case, an attacker is presented with additional barriers that prevent |
|
241 | In either case, an attacker is presented with additional barriers that prevent | |
242 | attacking or even probing the system. |
|
242 | attacking or even probing the system. | |
243 |
|
243 | |||
244 | Summary |
|
244 | Summary | |
245 | ======= |
|
245 | ======= | |
246 |
|
246 | |||
247 | IPython's architecture has been carefully designed with security in mind. The |
|
247 | IPython's architecture has been carefully designed with security in mind. The | |
248 | capabilities based authentication model, in conjunction with SSH tunneled |
|
248 | capabilities based authentication model, in conjunction with SSH tunneled | |
249 | TCP/IP channels, address the core potential vulnerabilities in the system, |
|
249 | TCP/IP channels, address the core potential vulnerabilities in the system, | |
250 | while still enabling user's to use the system in open networks. |
|
250 | while still enabling user's to use the system in open networks. | |
251 |
|
251 | |||
252 | Other questions |
|
252 | Other questions | |
253 | =============== |
|
253 | =============== | |
254 |
|
254 | |||
255 | .. note:: |
|
255 | .. note:: | |
256 |
|
256 | |||
257 | this does not apply to ZMQ, but I am sure there will be questions. |
|
257 | this does not apply to ZMQ, but I am sure there will be questions. | |
258 |
|
258 | |||
259 | About keys |
|
259 | About keys | |
260 | ---------- |
|
260 | ---------- | |
261 |
|
261 | |||
262 | Can you clarify the roles of the certificate and its keys versus the FURL, |
|
262 | Can you clarify the roles of the certificate and its keys versus the FURL, | |
263 | which is also called a key? |
|
263 | which is also called a key? | |
264 |
|
264 | |||
265 | The certificate created by IPython processes is a standard public key x509 |
|
265 | The certificate created by IPython processes is a standard public key x509 | |
266 | certificate, that is used by the SSL handshake protocol to setup encrypted |
|
266 | certificate, that is used by the SSL handshake protocol to setup encrypted | |
267 | channel between the controller and the IPython engine or client. This public |
|
267 | channel between the controller and the IPython engine or client. This public | |
268 | and private key associated with this certificate are used only by the SSL |
|
268 | and private key associated with this certificate are used only by the SSL | |
269 | handshake protocol in setting up this encrypted channel. |
|
269 | handshake protocol in setting up this encrypted channel. | |
270 |
|
270 | |||
271 | The FURL serves a completely different and independent purpose from the |
|
271 | The FURL serves a completely different and independent purpose from the | |
272 | key pair associated with the certificate. When we refer to a FURL as a |
|
272 | key pair associated with the certificate. When we refer to a FURL as a | |
273 | key, we are using the word "key" in the capabilities based security model |
|
273 | key, we are using the word "key" in the capabilities based security model | |
274 | sense. This has nothing to do with "key" in the public/private key sense used |
|
274 | sense. This has nothing to do with "key" in the public/private key sense used | |
275 | in the SSL protocol. |
|
275 | in the SSL protocol. | |
276 |
|
276 | |||
277 | With that said the FURL is used as an cryptographic key, to grant |
|
277 | With that said the FURL is used as an cryptographic key, to grant | |
278 | IPython engines and clients access to particular capabilities that the |
|
278 | IPython engines and clients access to particular capabilities that the | |
279 | controller offers. |
|
279 | controller offers. | |
280 |
|
280 | |||
281 | Self signed certificates |
|
281 | Self signed certificates | |
282 | ------------------------ |
|
282 | ------------------------ | |
283 |
|
283 | |||
284 | Is the controller creating a self-signed certificate? Is this created for per |
|
284 | Is the controller creating a self-signed certificate? Is this created for per | |
285 | instance/session, one-time-setup or each-time the controller is started? |
|
285 | instance/session, one-time-setup or each-time the controller is started? | |
286 |
|
286 | |||
287 | The Foolscap network protocol, which handles the SSL protocol details, creates |
|
287 | The Foolscap network protocol, which handles the SSL protocol details, creates | |
288 | a self-signed x509 certificate using OpenSSL for each IPython process. The |
|
288 | a self-signed x509 certificate using OpenSSL for each IPython process. The | |
289 | lifetime of the certificate is handled differently for the IPython controller |
|
289 | lifetime of the certificate is handled differently for the IPython controller | |
290 | and the engines/client. |
|
290 | and the engines/client. | |
291 |
|
291 | |||
292 | For the IPython engines and client, the certificate is only held in memory for |
|
292 | For the IPython engines and client, the certificate is only held in memory for | |
293 | the lifetime of its process. It is never written to disk. |
|
293 | the lifetime of its process. It is never written to disk. | |
294 |
|
294 | |||
295 | For the controller, the certificate can be created anew each time the |
|
295 | For the controller, the certificate can be created anew each time the | |
296 | controller starts or it can be created once and reused each time the |
|
296 | controller starts or it can be created once and reused each time the | |
297 | controller starts. If at any point, the certificate is deleted, a new one is |
|
297 | controller starts. If at any point, the certificate is deleted, a new one is | |
298 | created the next time the controller starts. |
|
298 | created the next time the controller starts. | |
299 |
|
299 | |||
300 | SSL private key |
|
300 | SSL private key | |
301 | --------------- |
|
301 | --------------- | |
302 |
|
302 | |||
303 | How the private key (associated with the certificate) is distributed? |
|
303 | How the private key (associated with the certificate) is distributed? | |
304 |
|
304 | |||
305 | In the usual implementation of the SSL protocol, the private key is never |
|
305 | In the usual implementation of the SSL protocol, the private key is never | |
306 | distributed. We follow this standard always. |
|
306 | distributed. We follow this standard always. | |
307 |
|
307 | |||
308 | SSL versus Foolscap authentication |
|
308 | SSL versus Foolscap authentication | |
309 | ---------------------------------- |
|
309 | ---------------------------------- | |
310 |
|
310 | |||
311 | Many SSL connections only perform one sided authentication (the server to the |
|
311 | Many SSL connections only perform one sided authentication (the server to the | |
312 | client). How is the client authentication in IPython's system related to SSL |
|
312 | client). How is the client authentication in IPython's system related to SSL | |
313 | authentication? |
|
313 | authentication? | |
314 |
|
314 | |||
315 | We perform a two way SSL handshake in which both parties request and verify |
|
315 | We perform a two way SSL handshake in which both parties request and verify | |
316 | the certificate of their peer. This mutual authentication is handled by the |
|
316 | the certificate of their peer. This mutual authentication is handled by the | |
317 | SSL handshake and is separate and independent from the additional |
|
317 | SSL handshake and is separate and independent from the additional | |
318 | authentication steps that the CLIENT and SERVER perform after an encrypted |
|
318 | authentication steps that the CLIENT and SERVER perform after an encrypted | |
319 | channel is established. |
|
319 | channel is established. | |
320 |
|
320 | |||
321 | .. [RFC5246] <http://tools.ietf.org/html/rfc5246> |
|
321 | .. [RFC5246] <http://tools.ietf.org/html/rfc5246> | |
322 |
|
322 | |||
323 | .. [OpenSSH] <http://www.openssh.com/> |
|
323 | .. [OpenSSH] <http://www.openssh.com/> | |
324 | .. [Paramiko] <http://www.lag.net/paramiko/> |
|
324 | .. [Paramiko] <http://www.lag.net/paramiko/> |
@@ -1,418 +1,418 b'' | |||||
1 | .. _parallel_task: |
|
1 | .. _parallel_task: | |
2 |
|
2 | |||
3 | ========================== |
|
3 | ========================== | |
4 | The IPython task interface |
|
4 | The IPython task interface | |
5 | ========================== |
|
5 | ========================== | |
6 |
|
6 | |||
7 | The task interface to the cluster presents the engines as a fault tolerant, |
|
7 | The task interface to the cluster presents the engines as a fault tolerant, | |
8 | dynamic load-balanced system of workers. Unlike the multiengine interface, in |
|
8 | dynamic load-balanced system of workers. Unlike the multiengine interface, in | |
9 | the task interface the user have no direct access to individual engines. By |
|
9 | the task interface the user have no direct access to individual engines. By | |
10 | allowing the IPython scheduler to assign work, this interface is simultaneously |
|
10 | allowing the IPython scheduler to assign work, this interface is simultaneously | |
11 | simpler and more powerful. |
|
11 | simpler and more powerful. | |
12 |
|
12 | |||
13 | Best of all, the user can use both of these interfaces running at the same time |
|
13 | Best of all, the user can use both of these interfaces running at the same time | |
14 | to take advantage of their respective strengths. When the user can break up |
|
14 | to take advantage of their respective strengths. When the user can break up | |
15 | the user's work into segments that do not depend on previous execution, the |
|
15 | the user's work into segments that do not depend on previous execution, the | |
16 | task interface is ideal. But it also has more power and flexibility, allowing |
|
16 | task interface is ideal. But it also has more power and flexibility, allowing | |
17 | the user to guide the distribution of jobs, without having to assign tasks to |
|
17 | the user to guide the distribution of jobs, without having to assign tasks to | |
18 | engines explicitly. |
|
18 | engines explicitly. | |
19 |
|
19 | |||
20 | Starting the IPython controller and engines |
|
20 | Starting the IPython controller and engines | |
21 | =========================================== |
|
21 | =========================================== | |
22 |
|
22 | |||
23 | To follow along with this tutorial, you will need to start the IPython |
|
23 | To follow along with this tutorial, you will need to start the IPython | |
24 | controller and four IPython engines. The simplest way of doing this is to use |
|
24 | controller and four IPython engines. The simplest way of doing this is to use | |
25 |
the :command:`ipcluster |
|
25 | the :command:`ipcluster` command:: | |
26 |
|
26 | |||
27 |
$ ipcluster |
|
27 | $ ipcluster start -n 4 | |
28 |
|
28 | |||
29 | For more detailed information about starting the controller and engines, see |
|
29 | For more detailed information about starting the controller and engines, see | |
30 | our :ref:`introduction <ip1par>` to using IPython for parallel computing. |
|
30 | our :ref:`introduction <ip1par>` to using IPython for parallel computing. | |
31 |
|
31 | |||
32 | Creating a ``Client`` instance |
|
32 | Creating a ``Client`` instance | |
33 | ============================== |
|
33 | ============================== | |
34 |
|
34 | |||
35 | The first step is to import the IPython :mod:`IPython.parallel.client` |
|
35 | The first step is to import the IPython :mod:`IPython.parallel.client` | |
36 | module and then create a :class:`.Client` instance, and we will also be using |
|
36 | module and then create a :class:`.Client` instance, and we will also be using | |
37 | a :class:`LoadBalancedView`, here called `lview`: |
|
37 | a :class:`LoadBalancedView`, here called `lview`: | |
38 |
|
38 | |||
39 | .. sourcecode:: ipython |
|
39 | .. sourcecode:: ipython | |
40 |
|
40 | |||
41 | In [1]: from IPython.parallel import Client |
|
41 | In [1]: from IPython.parallel import Client | |
42 |
|
42 | |||
43 | In [2]: rc = Client() |
|
43 | In [2]: rc = Client() | |
44 |
|
44 | |||
45 |
|
45 | |||
46 | This form assumes that the controller was started on localhost with default |
|
46 | This form assumes that the controller was started on localhost with default | |
47 | configuration. If not, the location of the controller must be given as an |
|
47 | configuration. If not, the location of the controller must be given as an | |
48 | argument to the constructor: |
|
48 | argument to the constructor: | |
49 |
|
49 | |||
50 | .. sourcecode:: ipython |
|
50 | .. sourcecode:: ipython | |
51 |
|
51 | |||
52 | # for a visible LAN controller listening on an external port: |
|
52 | # for a visible LAN controller listening on an external port: | |
53 | In [2]: rc = Client('tcp://192.168.1.16:10101') |
|
53 | In [2]: rc = Client('tcp://192.168.1.16:10101') | |
54 | # or to connect with a specific profile you have set up: |
|
54 | # or to connect with a specific profile you have set up: | |
55 | In [3]: rc = Client(profile='mpi') |
|
55 | In [3]: rc = Client(profile='mpi') | |
56 |
|
56 | |||
57 | For load-balanced execution, we will make use of a :class:`LoadBalancedView` object, which can |
|
57 | For load-balanced execution, we will make use of a :class:`LoadBalancedView` object, which can | |
58 | be constructed via the client's :meth:`load_balanced_view` method: |
|
58 | be constructed via the client's :meth:`load_balanced_view` method: | |
59 |
|
59 | |||
60 | .. sourcecode:: ipython |
|
60 | .. sourcecode:: ipython | |
61 |
|
61 | |||
62 | In [4]: lview = rc.load_balanced_view() # default load-balanced view |
|
62 | In [4]: lview = rc.load_balanced_view() # default load-balanced view | |
63 |
|
63 | |||
64 | .. seealso:: |
|
64 | .. seealso:: | |
65 |
|
65 | |||
66 | For more information, see the in-depth explanation of :ref:`Views <parallel_details>`. |
|
66 | For more information, see the in-depth explanation of :ref:`Views <parallel_details>`. | |
67 |
|
67 | |||
68 |
|
68 | |||
69 | Quick and easy parallelism |
|
69 | Quick and easy parallelism | |
70 | ========================== |
|
70 | ========================== | |
71 |
|
71 | |||
72 | In many cases, you simply want to apply a Python function to a sequence of |
|
72 | In many cases, you simply want to apply a Python function to a sequence of | |
73 | objects, but *in parallel*. Like the multiengine interface, these can be |
|
73 | objects, but *in parallel*. Like the multiengine interface, these can be | |
74 | implemented via the task interface. The exact same tools can perform these |
|
74 | implemented via the task interface. The exact same tools can perform these | |
75 | actions in load-balanced ways as well as multiplexed ways: a parallel version |
|
75 | actions in load-balanced ways as well as multiplexed ways: a parallel version | |
76 | of :func:`map` and :func:`@parallel` function decorator. If one specifies the |
|
76 | of :func:`map` and :func:`@parallel` function decorator. If one specifies the | |
77 | argument `balanced=True`, then they are dynamically load balanced. Thus, if the |
|
77 | argument `balanced=True`, then they are dynamically load balanced. Thus, if the | |
78 | execution time per item varies significantly, you should use the versions in |
|
78 | execution time per item varies significantly, you should use the versions in | |
79 | the task interface. |
|
79 | the task interface. | |
80 |
|
80 | |||
81 | Parallel map |
|
81 | Parallel map | |
82 | ------------ |
|
82 | ------------ | |
83 |
|
83 | |||
84 | To load-balance :meth:`map`,simply use a LoadBalancedView: |
|
84 | To load-balance :meth:`map`,simply use a LoadBalancedView: | |
85 |
|
85 | |||
86 | .. sourcecode:: ipython |
|
86 | .. sourcecode:: ipython | |
87 |
|
87 | |||
88 | In [62]: lview.block = True |
|
88 | In [62]: lview.block = True | |
89 |
|
89 | |||
90 | In [63]: serial_result = map(lambda x:x**10, range(32)) |
|
90 | In [63]: serial_result = map(lambda x:x**10, range(32)) | |
91 |
|
91 | |||
92 | In [64]: parallel_result = lview.map(lambda x:x**10, range(32)) |
|
92 | In [64]: parallel_result = lview.map(lambda x:x**10, range(32)) | |
93 |
|
93 | |||
94 | In [65]: serial_result==parallel_result |
|
94 | In [65]: serial_result==parallel_result | |
95 | Out[65]: True |
|
95 | Out[65]: True | |
96 |
|
96 | |||
97 | Parallel function decorator |
|
97 | Parallel function decorator | |
98 | --------------------------- |
|
98 | --------------------------- | |
99 |
|
99 | |||
100 | Parallel functions are just like normal function, but they can be called on |
|
100 | Parallel functions are just like normal function, but they can be called on | |
101 | sequences and *in parallel*. The multiengine interface provides a decorator |
|
101 | sequences and *in parallel*. The multiengine interface provides a decorator | |
102 | that turns any Python function into a parallel function: |
|
102 | that turns any Python function into a parallel function: | |
103 |
|
103 | |||
104 | .. sourcecode:: ipython |
|
104 | .. sourcecode:: ipython | |
105 |
|
105 | |||
106 | In [10]: @lview.parallel() |
|
106 | In [10]: @lview.parallel() | |
107 | ....: def f(x): |
|
107 | ....: def f(x): | |
108 | ....: return 10.0*x**4 |
|
108 | ....: return 10.0*x**4 | |
109 | ....: |
|
109 | ....: | |
110 |
|
110 | |||
111 | In [11]: f.map(range(32)) # this is done in parallel |
|
111 | In [11]: f.map(range(32)) # this is done in parallel | |
112 | Out[11]: [0.0,10.0,160.0,...] |
|
112 | Out[11]: [0.0,10.0,160.0,...] | |
113 |
|
113 | |||
114 | .. _parallel_dependencies: |
|
114 | .. _parallel_dependencies: | |
115 |
|
115 | |||
116 | Dependencies |
|
116 | Dependencies | |
117 | ============ |
|
117 | ============ | |
118 |
|
118 | |||
119 | Often, pure atomic load-balancing is too primitive for your work. In these cases, you |
|
119 | Often, pure atomic load-balancing is too primitive for your work. In these cases, you | |
120 | may want to associate some kind of `Dependency` that describes when, where, or whether |
|
120 | may want to associate some kind of `Dependency` that describes when, where, or whether | |
121 | a task can be run. In IPython, we provide two types of dependencies: |
|
121 | a task can be run. In IPython, we provide two types of dependencies: | |
122 | `Functional Dependencies`_ and `Graph Dependencies`_ |
|
122 | `Functional Dependencies`_ and `Graph Dependencies`_ | |
123 |
|
123 | |||
124 | .. note:: |
|
124 | .. note:: | |
125 |
|
125 | |||
126 | It is important to note that the pure ZeroMQ scheduler does not support dependencies, |
|
126 | It is important to note that the pure ZeroMQ scheduler does not support dependencies, | |
127 | and you will see errors or warnings if you try to use dependencies with the pure |
|
127 | and you will see errors or warnings if you try to use dependencies with the pure | |
128 | scheduler. |
|
128 | scheduler. | |
129 |
|
129 | |||
130 | Functional Dependencies |
|
130 | Functional Dependencies | |
131 | ----------------------- |
|
131 | ----------------------- | |
132 |
|
132 | |||
133 | Functional dependencies are used to determine whether a given engine is capable of running |
|
133 | Functional dependencies are used to determine whether a given engine is capable of running | |
134 | a particular task. This is implemented via a special :class:`Exception` class, |
|
134 | a particular task. This is implemented via a special :class:`Exception` class, | |
135 | :class:`UnmetDependency`, found in `IPython.parallel.error`. Its use is very simple: |
|
135 | :class:`UnmetDependency`, found in `IPython.parallel.error`. Its use is very simple: | |
136 | if a task fails with an UnmetDependency exception, then the scheduler, instead of relaying |
|
136 | if a task fails with an UnmetDependency exception, then the scheduler, instead of relaying | |
137 | the error up to the client like any other error, catches the error, and submits the task |
|
137 | the error up to the client like any other error, catches the error, and submits the task | |
138 | to a different engine. This will repeat indefinitely, and a task will never be submitted |
|
138 | to a different engine. This will repeat indefinitely, and a task will never be submitted | |
139 | to a given engine a second time. |
|
139 | to a given engine a second time. | |
140 |
|
140 | |||
141 | You can manually raise the :class:`UnmetDependency` yourself, but IPython has provided |
|
141 | You can manually raise the :class:`UnmetDependency` yourself, but IPython has provided | |
142 | some decorators for facilitating this behavior. |
|
142 | some decorators for facilitating this behavior. | |
143 |
|
143 | |||
144 | There are two decorators and a class used for functional dependencies: |
|
144 | There are two decorators and a class used for functional dependencies: | |
145 |
|
145 | |||
146 | .. sourcecode:: ipython |
|
146 | .. sourcecode:: ipython | |
147 |
|
147 | |||
148 | In [9]: from IPython.parallel.dependency import depend, require, dependent |
|
148 | In [9]: from IPython.parallel.dependency import depend, require, dependent | |
149 |
|
149 | |||
150 | @require |
|
150 | @require | |
151 | ******** |
|
151 | ******** | |
152 |
|
152 | |||
153 | The simplest sort of dependency is requiring that a Python module is available. The |
|
153 | The simplest sort of dependency is requiring that a Python module is available. The | |
154 | ``@require`` decorator lets you define a function that will only run on engines where names |
|
154 | ``@require`` decorator lets you define a function that will only run on engines where names | |
155 | you specify are importable: |
|
155 | you specify are importable: | |
156 |
|
156 | |||
157 | .. sourcecode:: ipython |
|
157 | .. sourcecode:: ipython | |
158 |
|
158 | |||
159 | In [10]: @require('numpy', 'zmq') |
|
159 | In [10]: @require('numpy', 'zmq') | |
160 | ...: def myfunc(): |
|
160 | ...: def myfunc(): | |
161 | ...: return dostuff() |
|
161 | ...: return dostuff() | |
162 |
|
162 | |||
163 | Now, any time you apply :func:`myfunc`, the task will only run on a machine that has |
|
163 | Now, any time you apply :func:`myfunc`, the task will only run on a machine that has | |
164 | numpy and pyzmq available, and when :func:`myfunc` is called, numpy and zmq will be imported. |
|
164 | numpy and pyzmq available, and when :func:`myfunc` is called, numpy and zmq will be imported. | |
165 |
|
165 | |||
166 | @depend |
|
166 | @depend | |
167 | ******* |
|
167 | ******* | |
168 |
|
168 | |||
169 | The ``@depend`` decorator lets you decorate any function with any *other* function to |
|
169 | The ``@depend`` decorator lets you decorate any function with any *other* function to | |
170 | evaluate the dependency. The dependency function will be called at the start of the task, |
|
170 | evaluate the dependency. The dependency function will be called at the start of the task, | |
171 | and if it returns ``False``, then the dependency will be considered unmet, and the task |
|
171 | and if it returns ``False``, then the dependency will be considered unmet, and the task | |
172 | will be assigned to another engine. If the dependency returns *anything other than |
|
172 | will be assigned to another engine. If the dependency returns *anything other than | |
173 | ``False``*, the rest of the task will continue. |
|
173 | ``False``*, the rest of the task will continue. | |
174 |
|
174 | |||
175 | .. sourcecode:: ipython |
|
175 | .. sourcecode:: ipython | |
176 |
|
176 | |||
177 | In [10]: def platform_specific(plat): |
|
177 | In [10]: def platform_specific(plat): | |
178 | ...: import sys |
|
178 | ...: import sys | |
179 | ...: return sys.platform == plat |
|
179 | ...: return sys.platform == plat | |
180 |
|
180 | |||
181 | In [11]: @depend(platform_specific, 'darwin') |
|
181 | In [11]: @depend(platform_specific, 'darwin') | |
182 | ...: def mactask(): |
|
182 | ...: def mactask(): | |
183 | ...: do_mac_stuff() |
|
183 | ...: do_mac_stuff() | |
184 |
|
184 | |||
185 | In [12]: @depend(platform_specific, 'nt') |
|
185 | In [12]: @depend(platform_specific, 'nt') | |
186 | ...: def wintask(): |
|
186 | ...: def wintask(): | |
187 | ...: do_windows_stuff() |
|
187 | ...: do_windows_stuff() | |
188 |
|
188 | |||
189 | In this case, any time you apply ``mytask``, it will only run on an OSX machine. |
|
189 | In this case, any time you apply ``mytask``, it will only run on an OSX machine. | |
190 | ``@depend`` is just like ``apply``, in that it has a ``@depend(f,*args,**kwargs)`` |
|
190 | ``@depend`` is just like ``apply``, in that it has a ``@depend(f,*args,**kwargs)`` | |
191 | signature. |
|
191 | signature. | |
192 |
|
192 | |||
193 | dependents |
|
193 | dependents | |
194 | ********** |
|
194 | ********** | |
195 |
|
195 | |||
196 | You don't have to use the decorators on your tasks, if for instance you may want |
|
196 | You don't have to use the decorators on your tasks, if for instance you may want | |
197 | to run tasks with a single function but varying dependencies, you can directly construct |
|
197 | to run tasks with a single function but varying dependencies, you can directly construct | |
198 | the :class:`dependent` object that the decorators use: |
|
198 | the :class:`dependent` object that the decorators use: | |
199 |
|
199 | |||
200 | .. sourcecode::ipython |
|
200 | .. sourcecode::ipython | |
201 |
|
201 | |||
202 | In [13]: def mytask(*args): |
|
202 | In [13]: def mytask(*args): | |
203 | ...: dostuff() |
|
203 | ...: dostuff() | |
204 |
|
204 | |||
205 | In [14]: mactask = dependent(mytask, platform_specific, 'darwin') |
|
205 | In [14]: mactask = dependent(mytask, platform_specific, 'darwin') | |
206 | # this is the same as decorating the declaration of mytask with @depend |
|
206 | # this is the same as decorating the declaration of mytask with @depend | |
207 | # but you can do it again: |
|
207 | # but you can do it again: | |
208 |
|
208 | |||
209 | In [15]: wintask = dependent(mytask, platform_specific, 'nt') |
|
209 | In [15]: wintask = dependent(mytask, platform_specific, 'nt') | |
210 |
|
210 | |||
211 | # in general: |
|
211 | # in general: | |
212 | In [16]: t = dependent(f, g, *dargs, **dkwargs) |
|
212 | In [16]: t = dependent(f, g, *dargs, **dkwargs) | |
213 |
|
213 | |||
214 | # is equivalent to: |
|
214 | # is equivalent to: | |
215 | In [17]: @depend(g, *dargs, **dkwargs) |
|
215 | In [17]: @depend(g, *dargs, **dkwargs) | |
216 | ...: def t(a,b,c): |
|
216 | ...: def t(a,b,c): | |
217 | ...: # contents of f |
|
217 | ...: # contents of f | |
218 |
|
218 | |||
219 | Graph Dependencies |
|
219 | Graph Dependencies | |
220 | ------------------ |
|
220 | ------------------ | |
221 |
|
221 | |||
222 | Sometimes you want to restrict the time and/or location to run a given task as a function |
|
222 | Sometimes you want to restrict the time and/or location to run a given task as a function | |
223 | of the time and/or location of other tasks. This is implemented via a subclass of |
|
223 | of the time and/or location of other tasks. This is implemented via a subclass of | |
224 | :class:`set`, called a :class:`Dependency`. A Dependency is just a set of `msg_ids` |
|
224 | :class:`set`, called a :class:`Dependency`. A Dependency is just a set of `msg_ids` | |
225 | corresponding to tasks, and a few attributes to guide how to decide when the Dependency |
|
225 | corresponding to tasks, and a few attributes to guide how to decide when the Dependency | |
226 | has been met. |
|
226 | has been met. | |
227 |
|
227 | |||
228 | The switches we provide for interpreting whether a given dependency set has been met: |
|
228 | The switches we provide for interpreting whether a given dependency set has been met: | |
229 |
|
229 | |||
230 | any|all |
|
230 | any|all | |
231 | Whether the dependency is considered met if *any* of the dependencies are done, or |
|
231 | Whether the dependency is considered met if *any* of the dependencies are done, or | |
232 | only after *all* of them have finished. This is set by a Dependency's :attr:`all` |
|
232 | only after *all* of them have finished. This is set by a Dependency's :attr:`all` | |
233 | boolean attribute, which defaults to ``True``. |
|
233 | boolean attribute, which defaults to ``True``. | |
234 |
|
234 | |||
235 | success [default: True] |
|
235 | success [default: True] | |
236 | Whether to consider tasks that succeeded as fulfilling dependencies. |
|
236 | Whether to consider tasks that succeeded as fulfilling dependencies. | |
237 |
|
237 | |||
238 | failure [default : False] |
|
238 | failure [default : False] | |
239 | Whether to consider tasks that failed as fulfilling dependencies. |
|
239 | Whether to consider tasks that failed as fulfilling dependencies. | |
240 | using `failure=True,success=False` is useful for setting up cleanup tasks, to be run |
|
240 | using `failure=True,success=False` is useful for setting up cleanup tasks, to be run | |
241 | only when tasks have failed. |
|
241 | only when tasks have failed. | |
242 |
|
242 | |||
243 | Sometimes you want to run a task after another, but only if that task succeeded. In this case, |
|
243 | Sometimes you want to run a task after another, but only if that task succeeded. In this case, | |
244 | ``success`` should be ``True`` and ``failure`` should be ``False``. However sometimes you may |
|
244 | ``success`` should be ``True`` and ``failure`` should be ``False``. However sometimes you may | |
245 | not care whether the task succeeds, and always want the second task to run, in which case you |
|
245 | not care whether the task succeeds, and always want the second task to run, in which case you | |
246 | should use `success=failure=True`. The default behavior is to only use successes. |
|
246 | should use `success=failure=True`. The default behavior is to only use successes. | |
247 |
|
247 | |||
248 | There are other switches for interpretation that are made at the *task* level. These are |
|
248 | There are other switches for interpretation that are made at the *task* level. These are | |
249 | specified via keyword arguments to the client's :meth:`apply` method. |
|
249 | specified via keyword arguments to the client's :meth:`apply` method. | |
250 |
|
250 | |||
251 | after,follow |
|
251 | after,follow | |
252 | You may want to run a task *after* a given set of dependencies have been run and/or |
|
252 | You may want to run a task *after* a given set of dependencies have been run and/or | |
253 | run it *where* another set of dependencies are met. To support this, every task has an |
|
253 | run it *where* another set of dependencies are met. To support this, every task has an | |
254 | `after` dependency to restrict time, and a `follow` dependency to restrict |
|
254 | `after` dependency to restrict time, and a `follow` dependency to restrict | |
255 | destination. |
|
255 | destination. | |
256 |
|
256 | |||
257 | timeout |
|
257 | timeout | |
258 | You may also want to set a time-limit for how long the scheduler should wait before a |
|
258 | You may also want to set a time-limit for how long the scheduler should wait before a | |
259 | task's dependencies are met. This is done via a `timeout`, which defaults to 0, which |
|
259 | task's dependencies are met. This is done via a `timeout`, which defaults to 0, which | |
260 | indicates that the task should never timeout. If the timeout is reached, and the |
|
260 | indicates that the task should never timeout. If the timeout is reached, and the | |
261 | scheduler still hasn't been able to assign the task to an engine, the task will fail |
|
261 | scheduler still hasn't been able to assign the task to an engine, the task will fail | |
262 | with a :class:`DependencyTimeout`. |
|
262 | with a :class:`DependencyTimeout`. | |
263 |
|
263 | |||
264 | .. note:: |
|
264 | .. note:: | |
265 |
|
265 | |||
266 | Dependencies only work within the task scheduler. You cannot instruct a load-balanced |
|
266 | Dependencies only work within the task scheduler. You cannot instruct a load-balanced | |
267 | task to run after a job submitted via the MUX interface. |
|
267 | task to run after a job submitted via the MUX interface. | |
268 |
|
268 | |||
269 | The simplest form of Dependencies is with `all=True,success=True,failure=False`. In these cases, |
|
269 | The simplest form of Dependencies is with `all=True,success=True,failure=False`. In these cases, | |
270 | you can skip using Dependency objects, and just pass msg_ids or AsyncResult objects as the |
|
270 | you can skip using Dependency objects, and just pass msg_ids or AsyncResult objects as the | |
271 | `follow` and `after` keywords to :meth:`client.apply`: |
|
271 | `follow` and `after` keywords to :meth:`client.apply`: | |
272 |
|
272 | |||
273 | .. sourcecode:: ipython |
|
273 | .. sourcecode:: ipython | |
274 |
|
274 | |||
275 | In [14]: client.block=False |
|
275 | In [14]: client.block=False | |
276 |
|
276 | |||
277 | In [15]: ar = lview.apply(f, args, kwargs) |
|
277 | In [15]: ar = lview.apply(f, args, kwargs) | |
278 |
|
278 | |||
279 | In [16]: ar2 = lview.apply(f2) |
|
279 | In [16]: ar2 = lview.apply(f2) | |
280 |
|
280 | |||
281 | In [17]: ar3 = lview.apply_with_flags(f3, after=[ar,ar2]) |
|
281 | In [17]: ar3 = lview.apply_with_flags(f3, after=[ar,ar2]) | |
282 |
|
282 | |||
283 | In [17]: ar4 = lview.apply_with_flags(f3, follow=[ar], timeout=2.5) |
|
283 | In [17]: ar4 = lview.apply_with_flags(f3, follow=[ar], timeout=2.5) | |
284 |
|
284 | |||
285 |
|
285 | |||
286 | .. seealso:: |
|
286 | .. seealso:: | |
287 |
|
287 | |||
288 | Some parallel workloads can be described as a `Directed Acyclic Graph |
|
288 | Some parallel workloads can be described as a `Directed Acyclic Graph | |
289 | <http://en.wikipedia.org/wiki/Directed_acyclic_graph>`_, or DAG. See :ref:`DAG |
|
289 | <http://en.wikipedia.org/wiki/Directed_acyclic_graph>`_, or DAG. See :ref:`DAG | |
290 | Dependencies <dag_dependencies>` for an example demonstrating how to use map a NetworkX DAG |
|
290 | Dependencies <dag_dependencies>` for an example demonstrating how to use map a NetworkX DAG | |
291 | onto task dependencies. |
|
291 | onto task dependencies. | |
292 |
|
292 | |||
293 |
|
293 | |||
294 |
|
294 | |||
295 | Impossible Dependencies |
|
295 | Impossible Dependencies | |
296 | *********************** |
|
296 | *********************** | |
297 |
|
297 | |||
298 | The schedulers do perform some analysis on graph dependencies to determine whether they |
|
298 | The schedulers do perform some analysis on graph dependencies to determine whether they | |
299 | are not possible to be met. If the scheduler does discover that a dependency cannot be |
|
299 | are not possible to be met. If the scheduler does discover that a dependency cannot be | |
300 | met, then the task will fail with an :class:`ImpossibleDependency` error. This way, if the |
|
300 | met, then the task will fail with an :class:`ImpossibleDependency` error. This way, if the | |
301 | scheduler realized that a task can never be run, it won't sit indefinitely in the |
|
301 | scheduler realized that a task can never be run, it won't sit indefinitely in the | |
302 | scheduler clogging the pipeline. |
|
302 | scheduler clogging the pipeline. | |
303 |
|
303 | |||
304 | The basic cases that are checked: |
|
304 | The basic cases that are checked: | |
305 |
|
305 | |||
306 | * depending on nonexistent messages |
|
306 | * depending on nonexistent messages | |
307 | * `follow` dependencies were run on more than one machine and `all=True` |
|
307 | * `follow` dependencies were run on more than one machine and `all=True` | |
308 | * any dependencies failed and `all=True,success=True,failures=False` |
|
308 | * any dependencies failed and `all=True,success=True,failures=False` | |
309 | * all dependencies failed and `all=False,success=True,failure=False` |
|
309 | * all dependencies failed and `all=False,success=True,failure=False` | |
310 |
|
310 | |||
311 | .. warning:: |
|
311 | .. warning:: | |
312 |
|
312 | |||
313 | This analysis has not been proven to be rigorous, so it is likely possible for tasks |
|
313 | This analysis has not been proven to be rigorous, so it is likely possible for tasks | |
314 | to become impossible to run in obscure situations, so a timeout may be a good choice. |
|
314 | to become impossible to run in obscure situations, so a timeout may be a good choice. | |
315 |
|
315 | |||
316 | .. _parallel_schedulers: |
|
316 | .. _parallel_schedulers: | |
317 |
|
317 | |||
318 | Schedulers |
|
318 | Schedulers | |
319 | ========== |
|
319 | ========== | |
320 |
|
320 | |||
321 | There are a variety of valid ways to determine where jobs should be assigned in a |
|
321 | There are a variety of valid ways to determine where jobs should be assigned in a | |
322 | load-balancing situation. In IPython, we support several standard schemes, and |
|
322 | load-balancing situation. In IPython, we support several standard schemes, and | |
323 | even make it easy to define your own. The scheme can be selected via the ``--scheme`` |
|
323 | even make it easy to define your own. The scheme can be selected via the ``--scheme`` | |
324 |
argument to :command:`ipcontroller |
|
324 | argument to :command:`ipcontroller`, or in the :attr:`HubFactory.scheme` attribute | |
325 | of a controller config object. |
|
325 | of a controller config object. | |
326 |
|
326 | |||
327 | The built-in routing schemes: |
|
327 | The built-in routing schemes: | |
328 |
|
328 | |||
329 | To select one of these schemes, simply do:: |
|
329 | To select one of these schemes, simply do:: | |
330 |
|
330 | |||
331 |
$ ipcontroller |
|
331 | $ ipcontroller --scheme <schemename> | |
332 | for instance: |
|
332 | for instance: | |
333 |
$ ipcontroller |
|
333 | $ ipcontroller --scheme lru | |
334 |
|
334 | |||
335 | lru: Least Recently Used |
|
335 | lru: Least Recently Used | |
336 |
|
336 | |||
337 | Always assign work to the least-recently-used engine. A close relative of |
|
337 | Always assign work to the least-recently-used engine. A close relative of | |
338 | round-robin, it will be fair with respect to the number of tasks, agnostic |
|
338 | round-robin, it will be fair with respect to the number of tasks, agnostic | |
339 | with respect to runtime of each task. |
|
339 | with respect to runtime of each task. | |
340 |
|
340 | |||
341 | plainrandom: Plain Random |
|
341 | plainrandom: Plain Random | |
342 |
|
342 | |||
343 | Randomly picks an engine on which to run. |
|
343 | Randomly picks an engine on which to run. | |
344 |
|
344 | |||
345 | twobin: Two-Bin Random |
|
345 | twobin: Two-Bin Random | |
346 |
|
346 | |||
347 | **Requires numpy** |
|
347 | **Requires numpy** | |
348 |
|
348 | |||
349 | Pick two engines at random, and use the LRU of the two. This is known to be better |
|
349 | Pick two engines at random, and use the LRU of the two. This is known to be better | |
350 | than plain random in many cases, but requires a small amount of computation. |
|
350 | than plain random in many cases, but requires a small amount of computation. | |
351 |
|
351 | |||
352 | leastload: Least Load |
|
352 | leastload: Least Load | |
353 |
|
353 | |||
354 | **This is the default scheme** |
|
354 | **This is the default scheme** | |
355 |
|
355 | |||
356 | Always assign tasks to the engine with the fewest outstanding tasks (LRU breaks tie). |
|
356 | Always assign tasks to the engine with the fewest outstanding tasks (LRU breaks tie). | |
357 |
|
357 | |||
358 | weighted: Weighted Two-Bin Random |
|
358 | weighted: Weighted Two-Bin Random | |
359 |
|
359 | |||
360 | **Requires numpy** |
|
360 | **Requires numpy** | |
361 |
|
361 | |||
362 | Pick two engines at random using the number of outstanding tasks as inverse weights, |
|
362 | Pick two engines at random using the number of outstanding tasks as inverse weights, | |
363 | and use the one with the lower load. |
|
363 | and use the one with the lower load. | |
364 |
|
364 | |||
365 |
|
365 | |||
366 | Pure ZMQ Scheduler |
|
366 | Pure ZMQ Scheduler | |
367 | ------------------ |
|
367 | ------------------ | |
368 |
|
368 | |||
369 | For maximum throughput, the 'pure' scheme is not Python at all, but a C-level |
|
369 | For maximum throughput, the 'pure' scheme is not Python at all, but a C-level | |
370 | :class:`MonitoredQueue` from PyZMQ, which uses a ZeroMQ ``XREQ`` socket to perform all |
|
370 | :class:`MonitoredQueue` from PyZMQ, which uses a ZeroMQ ``XREQ`` socket to perform all | |
371 | load-balancing. This scheduler does not support any of the advanced features of the Python |
|
371 | load-balancing. This scheduler does not support any of the advanced features of the Python | |
372 | :class:`.Scheduler`. |
|
372 | :class:`.Scheduler`. | |
373 |
|
373 | |||
374 | Disabled features when using the ZMQ Scheduler: |
|
374 | Disabled features when using the ZMQ Scheduler: | |
375 |
|
375 | |||
376 | * Engine unregistration |
|
376 | * Engine unregistration | |
377 | Task farming will be disabled if an engine unregisters. |
|
377 | Task farming will be disabled if an engine unregisters. | |
378 | Further, if an engine is unregistered during computation, the scheduler may not recover. |
|
378 | Further, if an engine is unregistered during computation, the scheduler may not recover. | |
379 | * Dependencies |
|
379 | * Dependencies | |
380 | Since there is no Python logic inside the Scheduler, routing decisions cannot be made |
|
380 | Since there is no Python logic inside the Scheduler, routing decisions cannot be made | |
381 | based on message content. |
|
381 | based on message content. | |
382 | * Early destination notification |
|
382 | * Early destination notification | |
383 | The Python schedulers know which engine gets which task, and notify the Hub. This |
|
383 | The Python schedulers know which engine gets which task, and notify the Hub. This | |
384 | allows graceful handling of Engines coming and going. There is no way to know |
|
384 | allows graceful handling of Engines coming and going. There is no way to know | |
385 | where ZeroMQ messages have gone, so there is no way to know what tasks are on which |
|
385 | where ZeroMQ messages have gone, so there is no way to know what tasks are on which | |
386 | engine until they *finish*. This makes recovery from engine shutdown very difficult. |
|
386 | engine until they *finish*. This makes recovery from engine shutdown very difficult. | |
387 |
|
387 | |||
388 |
|
388 | |||
389 | .. note:: |
|
389 | .. note:: | |
390 |
|
390 | |||
391 | TODO: performance comparisons |
|
391 | TODO: performance comparisons | |
392 |
|
392 | |||
393 |
|
393 | |||
394 | More details |
|
394 | More details | |
395 | ============ |
|
395 | ============ | |
396 |
|
396 | |||
397 | The :class:`LoadBalancedView` has many more powerful features that allow quite a bit |
|
397 | The :class:`LoadBalancedView` has many more powerful features that allow quite a bit | |
398 | of flexibility in how tasks are defined and run. The next places to look are |
|
398 | of flexibility in how tasks are defined and run. The next places to look are | |
399 | in the following classes: |
|
399 | in the following classes: | |
400 |
|
400 | |||
401 | * :class:`IPython.parallel.view.LoadBalancedView` |
|
401 | * :class:`IPython.parallel.view.LoadBalancedView` | |
402 | * :class:`IPython.parallel.asyncresult.AsyncResult` |
|
402 | * :class:`IPython.parallel.asyncresult.AsyncResult` | |
403 | * :meth:`IPython.parallel.view.LoadBalancedView.apply` |
|
403 | * :meth:`IPython.parallel.view.LoadBalancedView.apply` | |
404 | * :mod:`IPython.parallel.dependency` |
|
404 | * :mod:`IPython.parallel.dependency` | |
405 |
|
405 | |||
406 | The following is an overview of how to use these classes together: |
|
406 | The following is an overview of how to use these classes together: | |
407 |
|
407 | |||
408 | 1. Create a :class:`Client` and :class:`LoadBalancedView` |
|
408 | 1. Create a :class:`Client` and :class:`LoadBalancedView` | |
409 | 2. Define some functions to be run as tasks |
|
409 | 2. Define some functions to be run as tasks | |
410 | 3. Submit your tasks to using the :meth:`apply` method of your |
|
410 | 3. Submit your tasks to using the :meth:`apply` method of your | |
411 | :class:`LoadBalancedView` instance. |
|
411 | :class:`LoadBalancedView` instance. | |
412 | 4. Use :meth:`Client.get_result` to get the results of the |
|
412 | 4. Use :meth:`Client.get_result` to get the results of the | |
413 | tasks, or use the :meth:`AsyncResult.get` method of the results to wait |
|
413 | tasks, or use the :meth:`AsyncResult.get` method of the results to wait | |
414 | for and then receive the results. |
|
414 | for and then receive the results. | |
415 |
|
415 | |||
416 | .. seealso:: |
|
416 | .. seealso:: | |
417 |
|
417 | |||
418 | A demo of :ref:`DAG Dependencies <dag_dependencies>` with NetworkX and IPython. |
|
418 | A demo of :ref:`DAG Dependencies <dag_dependencies>` with NetworkX and IPython. |
1 | NO CONTENT: file renamed from docs/source/parallelz/parallel_transition.txt to docs/source/parallel/parallel_transition.txt |
|
NO CONTENT: file renamed from docs/source/parallelz/parallel_transition.txt to docs/source/parallel/parallel_transition.txt |
@@ -1,334 +1,334 b'' | |||||
1 | ============================================ |
|
1 | ============================================ | |
2 | Getting started with Windows HPC Server 2008 |
|
2 | Getting started with Windows HPC Server 2008 | |
3 | ============================================ |
|
3 | ============================================ | |
4 |
|
4 | |||
5 | .. note:: |
|
5 | .. note:: | |
6 |
|
6 | |||
7 | Not adapted to zmq yet |
|
7 | Not adapted to zmq yet | |
8 |
|
8 | |||
9 | Introduction |
|
9 | Introduction | |
10 | ============ |
|
10 | ============ | |
11 |
|
11 | |||
12 | The Python programming language is an increasingly popular language for |
|
12 | The Python programming language is an increasingly popular language for | |
13 | numerical computing. This is due to a unique combination of factors. First, |
|
13 | numerical computing. This is due to a unique combination of factors. First, | |
14 | Python is a high-level and *interactive* language that is well matched to |
|
14 | Python is a high-level and *interactive* language that is well matched to | |
15 | interactive numerical work. Second, it is easy (often times trivial) to |
|
15 | interactive numerical work. Second, it is easy (often times trivial) to | |
16 | integrate legacy C/C++/Fortran code into Python. Third, a large number of |
|
16 | integrate legacy C/C++/Fortran code into Python. Third, a large number of | |
17 | high-quality open source projects provide all the needed building blocks for |
|
17 | high-quality open source projects provide all the needed building blocks for | |
18 | numerical computing: numerical arrays (NumPy), algorithms (SciPy), 2D/3D |
|
18 | numerical computing: numerical arrays (NumPy), algorithms (SciPy), 2D/3D | |
19 | Visualization (Matplotlib, Mayavi, Chaco), Symbolic Mathematics (Sage, Sympy) |
|
19 | Visualization (Matplotlib, Mayavi, Chaco), Symbolic Mathematics (Sage, Sympy) | |
20 | and others. |
|
20 | and others. | |
21 |
|
21 | |||
22 | The IPython project is a core part of this open-source toolchain and is |
|
22 | The IPython project is a core part of this open-source toolchain and is | |
23 | focused on creating a comprehensive environment for interactive and |
|
23 | focused on creating a comprehensive environment for interactive and | |
24 | exploratory computing in the Python programming language. It enables all of |
|
24 | exploratory computing in the Python programming language. It enables all of | |
25 | the above tools to be used interactively and consists of two main components: |
|
25 | the above tools to be used interactively and consists of two main components: | |
26 |
|
26 | |||
27 | * An enhanced interactive Python shell with support for interactive plotting |
|
27 | * An enhanced interactive Python shell with support for interactive plotting | |
28 | and visualization. |
|
28 | and visualization. | |
29 | * An architecture for interactive parallel computing. |
|
29 | * An architecture for interactive parallel computing. | |
30 |
|
30 | |||
31 | With these components, it is possible to perform all aspects of a parallel |
|
31 | With these components, it is possible to perform all aspects of a parallel | |
32 | computation interactively. This type of workflow is particularly relevant in |
|
32 | computation interactively. This type of workflow is particularly relevant in | |
33 | scientific and numerical computing where algorithms, code and data are |
|
33 | scientific and numerical computing where algorithms, code and data are | |
34 | continually evolving as the user/developer explores a problem. The broad |
|
34 | continually evolving as the user/developer explores a problem. The broad | |
35 | treads in computing (commodity clusters, multicore, cloud computing, etc.) |
|
35 | treads in computing (commodity clusters, multicore, cloud computing, etc.) | |
36 | make these capabilities of IPython particularly relevant. |
|
36 | make these capabilities of IPython particularly relevant. | |
37 |
|
37 | |||
38 | While IPython is a cross platform tool, it has particularly strong support for |
|
38 | While IPython is a cross platform tool, it has particularly strong support for | |
39 | Windows based compute clusters running Windows HPC Server 2008. This document |
|
39 | Windows based compute clusters running Windows HPC Server 2008. This document | |
40 | describes how to get started with IPython on Windows HPC Server 2008. The |
|
40 | describes how to get started with IPython on Windows HPC Server 2008. The | |
41 | content and emphasis here is practical: installing IPython, configuring |
|
41 | content and emphasis here is practical: installing IPython, configuring | |
42 | IPython to use the Windows job scheduler and running example parallel programs |
|
42 | IPython to use the Windows job scheduler and running example parallel programs | |
43 | interactively. A more complete description of IPython's parallel computing |
|
43 | interactively. A more complete description of IPython's parallel computing | |
44 | capabilities can be found in IPython's online documentation |
|
44 | capabilities can be found in IPython's online documentation | |
45 | (http://ipython.scipy.org/moin/Documentation). |
|
45 | (http://ipython.scipy.org/moin/Documentation). | |
46 |
|
46 | |||
47 | Setting up your Windows cluster |
|
47 | Setting up your Windows cluster | |
48 | =============================== |
|
48 | =============================== | |
49 |
|
49 | |||
50 | This document assumes that you already have a cluster running Windows |
|
50 | This document assumes that you already have a cluster running Windows | |
51 | HPC Server 2008. Here is a broad overview of what is involved with setting up |
|
51 | HPC Server 2008. Here is a broad overview of what is involved with setting up | |
52 | such a cluster: |
|
52 | such a cluster: | |
53 |
|
53 | |||
54 | 1. Install Windows Server 2008 on the head and compute nodes in the cluster. |
|
54 | 1. Install Windows Server 2008 on the head and compute nodes in the cluster. | |
55 | 2. Setup the network configuration on each host. Each host should have a |
|
55 | 2. Setup the network configuration on each host. Each host should have a | |
56 | static IP address. |
|
56 | static IP address. | |
57 | 3. On the head node, activate the "Active Directory Domain Services" role |
|
57 | 3. On the head node, activate the "Active Directory Domain Services" role | |
58 | and make the head node the domain controller. |
|
58 | and make the head node the domain controller. | |
59 | 4. Join the compute nodes to the newly created Active Directory (AD) domain. |
|
59 | 4. Join the compute nodes to the newly created Active Directory (AD) domain. | |
60 | 5. Setup user accounts in the domain with shared home directories. |
|
60 | 5. Setup user accounts in the domain with shared home directories. | |
61 | 6. Install the HPC Pack 2008 on the head node to create a cluster. |
|
61 | 6. Install the HPC Pack 2008 on the head node to create a cluster. | |
62 | 7. Install the HPC Pack 2008 on the compute nodes. |
|
62 | 7. Install the HPC Pack 2008 on the compute nodes. | |
63 |
|
63 | |||
64 | More details about installing and configuring Windows HPC Server 2008 can be |
|
64 | More details about installing and configuring Windows HPC Server 2008 can be | |
65 | found on the Windows HPC Home Page (http://www.microsoft.com/hpc). Regardless |
|
65 | found on the Windows HPC Home Page (http://www.microsoft.com/hpc). Regardless | |
66 | of what steps you follow to set up your cluster, the remainder of this |
|
66 | of what steps you follow to set up your cluster, the remainder of this | |
67 | document will assume that: |
|
67 | document will assume that: | |
68 |
|
68 | |||
69 | * There are domain users that can log on to the AD domain and submit jobs |
|
69 | * There are domain users that can log on to the AD domain and submit jobs | |
70 | to the cluster scheduler. |
|
70 | to the cluster scheduler. | |
71 | * These domain users have shared home directories. While shared home |
|
71 | * These domain users have shared home directories. While shared home | |
72 | directories are not required to use IPython, they make it much easier to |
|
72 | directories are not required to use IPython, they make it much easier to | |
73 | use IPython. |
|
73 | use IPython. | |
74 |
|
74 | |||
75 | Installation of IPython and its dependencies |
|
75 | Installation of IPython and its dependencies | |
76 | ============================================ |
|
76 | ============================================ | |
77 |
|
77 | |||
78 | IPython and all of its dependencies are freely available and open source. |
|
78 | IPython and all of its dependencies are freely available and open source. | |
79 | These packages provide a powerful and cost-effective approach to numerical and |
|
79 | These packages provide a powerful and cost-effective approach to numerical and | |
80 | scientific computing on Windows. The following dependencies are needed to run |
|
80 | scientific computing on Windows. The following dependencies are needed to run | |
81 | IPython on Windows: |
|
81 | IPython on Windows: | |
82 |
|
82 | |||
83 | * Python 2.6 or 2.7 (http://www.python.org) |
|
83 | * Python 2.6 or 2.7 (http://www.python.org) | |
84 | * pywin32 (http://sourceforge.net/projects/pywin32/) |
|
84 | * pywin32 (http://sourceforge.net/projects/pywin32/) | |
85 | * PyReadline (https://launchpad.net/pyreadline) |
|
85 | * PyReadline (https://launchpad.net/pyreadline) | |
86 | * pyzmq (http://github.com/zeromq/pyzmq/downloads) |
|
86 | * pyzmq (http://github.com/zeromq/pyzmq/downloads) | |
87 | * IPython (http://ipython.scipy.org) |
|
87 | * IPython (http://ipython.scipy.org) | |
88 |
|
88 | |||
89 | In addition, the following dependencies are needed to run the demos described |
|
89 | In addition, the following dependencies are needed to run the demos described | |
90 | in this document. |
|
90 | in this document. | |
91 |
|
91 | |||
92 | * NumPy and SciPy (http://www.scipy.org) |
|
92 | * NumPy and SciPy (http://www.scipy.org) | |
93 | * Matplotlib (http://matplotlib.sourceforge.net/) |
|
93 | * Matplotlib (http://matplotlib.sourceforge.net/) | |
94 |
|
94 | |||
95 | The easiest way of obtaining these dependencies is through the Enthought |
|
95 | The easiest way of obtaining these dependencies is through the Enthought | |
96 | Python Distribution (EPD) (http://www.enthought.com/products/epd.php). EPD is |
|
96 | Python Distribution (EPD) (http://www.enthought.com/products/epd.php). EPD is | |
97 | produced by Enthought, Inc. and contains all of these packages and others in a |
|
97 | produced by Enthought, Inc. and contains all of these packages and others in a | |
98 | single installer and is available free for academic users. While it is also |
|
98 | single installer and is available free for academic users. While it is also | |
99 | possible to download and install each package individually, this is a tedious |
|
99 | possible to download and install each package individually, this is a tedious | |
100 | process. Thus, we highly recommend using EPD to install these packages on |
|
100 | process. Thus, we highly recommend using EPD to install these packages on | |
101 | Windows. |
|
101 | Windows. | |
102 |
|
102 | |||
103 | Regardless of how you install the dependencies, here are the steps you will |
|
103 | Regardless of how you install the dependencies, here are the steps you will | |
104 | need to follow: |
|
104 | need to follow: | |
105 |
|
105 | |||
106 | 1. Install all of the packages listed above, either individually or using EPD |
|
106 | 1. Install all of the packages listed above, either individually or using EPD | |
107 | on the head node, compute nodes and user workstations. |
|
107 | on the head node, compute nodes and user workstations. | |
108 |
|
108 | |||
109 | 2. Make sure that :file:`C:\\Python27` and :file:`C:\\Python27\\Scripts` are |
|
109 | 2. Make sure that :file:`C:\\Python27` and :file:`C:\\Python27\\Scripts` are | |
110 | in the system :envvar:`%PATH%` variable on each node. |
|
110 | in the system :envvar:`%PATH%` variable on each node. | |
111 |
|
111 | |||
112 | 3. Install the latest development version of IPython. This can be done by |
|
112 | 3. Install the latest development version of IPython. This can be done by | |
113 | downloading the the development version from the IPython website |
|
113 | downloading the the development version from the IPython website | |
114 | (http://ipython.scipy.org) and following the installation instructions. |
|
114 | (http://ipython.scipy.org) and following the installation instructions. | |
115 |
|
115 | |||
116 | Further details about installing IPython or its dependencies can be found in |
|
116 | Further details about installing IPython or its dependencies can be found in | |
117 | the online IPython documentation (http://ipython.scipy.org/moin/Documentation) |
|
117 | the online IPython documentation (http://ipython.scipy.org/moin/Documentation) | |
118 | Once you are finished with the installation, you can try IPython out by |
|
118 | Once you are finished with the installation, you can try IPython out by | |
119 | opening a Windows Command Prompt and typing ``ipython``. This will |
|
119 | opening a Windows Command Prompt and typing ``ipython``. This will | |
120 | start IPython's interactive shell and you should see something like the |
|
120 | start IPython's interactive shell and you should see something like the | |
121 | following screenshot: |
|
121 | following screenshot: | |
122 |
|
122 | |||
123 | .. image:: ipython_shell.* |
|
123 | .. image:: ipython_shell.* | |
124 |
|
124 | |||
125 | Starting an IPython cluster |
|
125 | Starting an IPython cluster | |
126 | =========================== |
|
126 | =========================== | |
127 |
|
127 | |||
128 | To use IPython's parallel computing capabilities, you will need to start an |
|
128 | To use IPython's parallel computing capabilities, you will need to start an | |
129 | IPython cluster. An IPython cluster consists of one controller and multiple |
|
129 | IPython cluster. An IPython cluster consists of one controller and multiple | |
130 | engines: |
|
130 | engines: | |
131 |
|
131 | |||
132 | IPython controller |
|
132 | IPython controller | |
133 | The IPython controller manages the engines and acts as a gateway between |
|
133 | The IPython controller manages the engines and acts as a gateway between | |
134 | the engines and the client, which runs in the user's interactive IPython |
|
134 | the engines and the client, which runs in the user's interactive IPython | |
135 | session. The controller is started using the :command:`ipcontroller` |
|
135 | session. The controller is started using the :command:`ipcontroller` | |
136 | command. |
|
136 | command. | |
137 |
|
137 | |||
138 | IPython engine |
|
138 | IPython engine | |
139 | IPython engines run a user's Python code in parallel on the compute nodes. |
|
139 | IPython engines run a user's Python code in parallel on the compute nodes. | |
140 | Engines are starting using the :command:`ipengine` command. |
|
140 | Engines are starting using the :command:`ipengine` command. | |
141 |
|
141 | |||
142 | Once these processes are started, a user can run Python code interactively and |
|
142 | Once these processes are started, a user can run Python code interactively and | |
143 | in parallel on the engines from within the IPython shell using an appropriate |
|
143 | in parallel on the engines from within the IPython shell using an appropriate | |
144 | client. This includes the ability to interact with, plot and visualize data |
|
144 | client. This includes the ability to interact with, plot and visualize data | |
145 | from the engines. |
|
145 | from the engines. | |
146 |
|
146 | |||
147 |
IPython has a command line program called :command:`ipcluster |
|
147 | IPython has a command line program called :command:`ipcluster` that automates | |
148 | all aspects of starting the controller and engines on the compute nodes. |
|
148 | all aspects of starting the controller and engines on the compute nodes. | |
149 |
:command:`ipcluster |
|
149 | :command:`ipcluster` has full support for the Windows HPC job scheduler, | |
150 |
meaning that :command:`ipcluster |
|
150 | meaning that :command:`ipcluster` can use this job scheduler to start the | |
151 | controller and engines. In our experience, the Windows HPC job scheduler is |
|
151 | controller and engines. In our experience, the Windows HPC job scheduler is | |
152 | particularly well suited for interactive applications, such as IPython. Once |
|
152 | particularly well suited for interactive applications, such as IPython. Once | |
153 |
:command:`ipcluster |
|
153 | :command:`ipcluster` is configured properly, a user can start an IPython | |
154 | cluster from their local workstation almost instantly, without having to log |
|
154 | cluster from their local workstation almost instantly, without having to log | |
155 | on to the head node (as is typically required by Unix based job schedulers). |
|
155 | on to the head node (as is typically required by Unix based job schedulers). | |
156 | This enables a user to move seamlessly between serial and parallel |
|
156 | This enables a user to move seamlessly between serial and parallel | |
157 | computations. |
|
157 | computations. | |
158 |
|
158 | |||
159 |
In this section we show how to use :command:`ipcluster |
|
159 | In this section we show how to use :command:`ipcluster` to start an IPython | |
160 | cluster using the Windows HPC Server 2008 job scheduler. To make sure that |
|
160 | cluster using the Windows HPC Server 2008 job scheduler. To make sure that | |
161 |
:command:`ipcluster |
|
161 | :command:`ipcluster` is installed and working properly, you should first try | |
162 | to start an IPython cluster on your local host. To do this, open a Windows |
|
162 | to start an IPython cluster on your local host. To do this, open a Windows | |
163 | Command Prompt and type the following command:: |
|
163 | Command Prompt and type the following command:: | |
164 |
|
164 | |||
165 |
ipcluster |
|
165 | ipcluster start -n 2 | |
166 |
|
166 | |||
167 | You should see a number of messages printed to the screen, ending with |
|
167 | You should see a number of messages printed to the screen, ending with | |
168 | "IPython cluster: started". The result should look something like the following |
|
168 | "IPython cluster: started". The result should look something like the following | |
169 | screenshot: |
|
169 | screenshot: | |
170 |
|
170 | |||
171 | .. image:: ipcluster_start.* |
|
171 | .. image:: ipcluster_start.* | |
172 |
|
172 | |||
173 | At this point, the controller and two engines are running on your local host. |
|
173 | At this point, the controller and two engines are running on your local host. | |
174 | This configuration is useful for testing and for situations where you want to |
|
174 | This configuration is useful for testing and for situations where you want to | |
175 | take advantage of multiple cores on your local computer. |
|
175 | take advantage of multiple cores on your local computer. | |
176 |
|
176 | |||
177 |
Now that we have confirmed that :command:`ipcluster |
|
177 | Now that we have confirmed that :command:`ipcluster` is working properly, we | |
178 | describe how to configure and run an IPython cluster on an actual compute |
|
178 | describe how to configure and run an IPython cluster on an actual compute | |
179 | cluster running Windows HPC Server 2008. Here is an outline of the needed |
|
179 | cluster running Windows HPC Server 2008. Here is an outline of the needed | |
180 | steps: |
|
180 | steps: | |
181 |
|
181 | |||
182 |
1. Create a cluster profile using: ``ipcluster |
|
182 | 1. Create a cluster profile using: ``ipcluster create -p mycluster`` | |
183 |
|
183 | |||
184 | 2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster` |
|
184 | 2. Edit configuration files in the directory :file:`.ipython\\cluster_mycluster` | |
185 |
|
185 | |||
186 | 3. Start the cluster using: ``ipcluser start -p mycluster -n 32`` |
|
186 | 3. Start the cluster using: ``ipcluser start -p mycluster -n 32`` | |
187 |
|
187 | |||
188 | Creating a cluster profile |
|
188 | Creating a cluster profile | |
189 | -------------------------- |
|
189 | -------------------------- | |
190 |
|
190 | |||
191 | In most cases, you will have to create a cluster profile to use IPython on a |
|
191 | In most cases, you will have to create a cluster profile to use IPython on a | |
192 | cluster. A cluster profile is a name (like "mycluster") that is associated |
|
192 | cluster. A cluster profile is a name (like "mycluster") that is associated | |
193 | with a particular cluster configuration. The profile name is used by |
|
193 | with a particular cluster configuration. The profile name is used by | |
194 |
:command:`ipcluster |
|
194 | :command:`ipcluster` when working with the cluster. | |
195 |
|
195 | |||
196 | Associated with each cluster profile is a cluster directory. This cluster |
|
196 | Associated with each cluster profile is a cluster directory. This cluster | |
197 | directory is a specially named directory (typically located in the |
|
197 | directory is a specially named directory (typically located in the | |
198 | :file:`.ipython` subdirectory of your home directory) that contains the |
|
198 | :file:`.ipython` subdirectory of your home directory) that contains the | |
199 | configuration files for a particular cluster profile, as well as log files and |
|
199 | configuration files for a particular cluster profile, as well as log files and | |
200 | security keys. The naming convention for cluster directories is: |
|
200 | security keys. The naming convention for cluster directories is: | |
201 | :file:`cluster_<profile name>`. Thus, the cluster directory for a profile named |
|
201 | :file:`cluster_<profile name>`. Thus, the cluster directory for a profile named | |
202 | "foo" would be :file:`.ipython\\cluster_foo`. |
|
202 | "foo" would be :file:`.ipython\\cluster_foo`. | |
203 |
|
203 | |||
204 | To create a new cluster profile (named "mycluster") and the associated cluster |
|
204 | To create a new cluster profile (named "mycluster") and the associated cluster | |
205 | directory, type the following command at the Windows Command Prompt:: |
|
205 | directory, type the following command at the Windows Command Prompt:: | |
206 |
|
206 | |||
207 |
ipcluster |
|
207 | ipcluster create -p mycluster | |
208 |
|
208 | |||
209 | The output of this command is shown in the screenshot below. Notice how |
|
209 | The output of this command is shown in the screenshot below. Notice how | |
210 |
:command:`ipcluster |
|
210 | :command:`ipcluster` prints out the location of the newly created cluster | |
211 | directory. |
|
211 | directory. | |
212 |
|
212 | |||
213 | .. image:: ipcluster_create.* |
|
213 | .. image:: ipcluster_create.* | |
214 |
|
214 | |||
215 | Configuring a cluster profile |
|
215 | Configuring a cluster profile | |
216 | ----------------------------- |
|
216 | ----------------------------- | |
217 |
|
217 | |||
218 | Next, you will need to configure the newly created cluster profile by editing |
|
218 | Next, you will need to configure the newly created cluster profile by editing | |
219 | the following configuration files in the cluster directory: |
|
219 | the following configuration files in the cluster directory: | |
220 |
|
220 | |||
221 |
* :file:`ipcluster |
|
221 | * :file:`ipcluster_config.py` | |
222 | * :file:`ipcontroller_config.py` |
|
222 | * :file:`ipcontroller_config.py` | |
223 | * :file:`ipengine_config.py` |
|
223 | * :file:`ipengine_config.py` | |
224 |
|
224 | |||
225 |
When :command:`ipcluster |
|
225 | When :command:`ipcluster` is run, these configuration files are used to | |
226 | determine how the engines and controller will be started. In most cases, |
|
226 | determine how the engines and controller will be started. In most cases, | |
227 | you will only have to set a few of the attributes in these files. |
|
227 | you will only have to set a few of the attributes in these files. | |
228 |
|
228 | |||
229 |
To configure :command:`ipcluster |
|
229 | To configure :command:`ipcluster` to use the Windows HPC job scheduler, you | |
230 | will need to edit the following attributes in the file |
|
230 | will need to edit the following attributes in the file | |
231 |
:file:`ipcluster |
|
231 | :file:`ipcluster_config.py`:: | |
232 |
|
232 | |||
233 |
# Set these at the top of the file to tell ipcluster |
|
233 | # Set these at the top of the file to tell ipcluster to use the | |
234 | # Windows HPC job scheduler. |
|
234 | # Windows HPC job scheduler. | |
235 | c.Global.controller_launcher = \ |
|
235 | c.Global.controller_launcher = \ | |
236 | 'IPython.parallel.launcher.WindowsHPCControllerLauncher' |
|
236 | 'IPython.parallel.launcher.WindowsHPCControllerLauncher' | |
237 | c.Global.engine_launcher = \ |
|
237 | c.Global.engine_launcher = \ | |
238 | 'IPython.parallel.launcher.WindowsHPCEngineSetLauncher' |
|
238 | 'IPython.parallel.launcher.WindowsHPCEngineSetLauncher' | |
239 |
|
239 | |||
240 | # Set these to the host name of the scheduler (head node) of your cluster. |
|
240 | # Set these to the host name of the scheduler (head node) of your cluster. | |
241 | c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE' |
|
241 | c.WindowsHPCControllerLauncher.scheduler = 'HEADNODE' | |
242 | c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE' |
|
242 | c.WindowsHPCEngineSetLauncher.scheduler = 'HEADNODE' | |
243 |
|
243 | |||
244 | There are a number of other configuration attributes that can be set, but |
|
244 | There are a number of other configuration attributes that can be set, but | |
245 | in most cases these will be sufficient to get you started. |
|
245 | in most cases these will be sufficient to get you started. | |
246 |
|
246 | |||
247 | .. warning:: |
|
247 | .. warning:: | |
248 | If any of your configuration attributes involve specifying the location |
|
248 | If any of your configuration attributes involve specifying the location | |
249 | of shared directories or files, you must make sure that you use UNC paths |
|
249 | of shared directories or files, you must make sure that you use UNC paths | |
250 | like :file:`\\\\host\\share`. It is also important that you specify |
|
250 | like :file:`\\\\host\\share`. It is also important that you specify | |
251 | these paths using raw Python strings: ``r'\\host\share'`` to make sure |
|
251 | these paths using raw Python strings: ``r'\\host\share'`` to make sure | |
252 | that the backslashes are properly escaped. |
|
252 | that the backslashes are properly escaped. | |
253 |
|
253 | |||
254 | Starting the cluster profile |
|
254 | Starting the cluster profile | |
255 | ---------------------------- |
|
255 | ---------------------------- | |
256 |
|
256 | |||
257 | Once a cluster profile has been configured, starting an IPython cluster using |
|
257 | Once a cluster profile has been configured, starting an IPython cluster using | |
258 | the profile is simple:: |
|
258 | the profile is simple:: | |
259 |
|
259 | |||
260 |
ipcluster |
|
260 | ipcluster start -p mycluster -n 32 | |
261 |
|
261 | |||
262 |
The ``-n`` option tells :command:`ipcluster |
|
262 | The ``-n`` option tells :command:`ipcluster` how many engines to start (in | |
263 | this case 32). Stopping the cluster is as simple as typing Control-C. |
|
263 | this case 32). Stopping the cluster is as simple as typing Control-C. | |
264 |
|
264 | |||
265 | Using the HPC Job Manager |
|
265 | Using the HPC Job Manager | |
266 | ------------------------- |
|
266 | ------------------------- | |
267 |
|
267 | |||
268 |
When ``ipcluster |
|
268 | When ``ipcluster start`` is run the first time, :command:`ipcluster` creates | |
269 | two XML job description files in the cluster directory: |
|
269 | two XML job description files in the cluster directory: | |
270 |
|
270 | |||
271 | * :file:`ipcontroller_job.xml` |
|
271 | * :file:`ipcontroller_job.xml` | |
272 | * :file:`ipengineset_job.xml` |
|
272 | * :file:`ipengineset_job.xml` | |
273 |
|
273 | |||
274 | Once these files have been created, they can be imported into the HPC Job |
|
274 | Once these files have been created, they can be imported into the HPC Job | |
275 | Manager application. Then, the controller and engines for that profile can be |
|
275 | Manager application. Then, the controller and engines for that profile can be | |
276 |
started using the HPC Job Manager directly, without using :command:`ipcluster |
|
276 | started using the HPC Job Manager directly, without using :command:`ipcluster`. | |
277 |
However, anytime the cluster profile is re-configured, ``ipcluster |
|
277 | However, anytime the cluster profile is re-configured, ``ipcluster start`` | |
278 | must be run again to regenerate the XML job description files. The |
|
278 | must be run again to regenerate the XML job description files. The | |
279 | following screenshot shows what the HPC Job Manager interface looks like |
|
279 | following screenshot shows what the HPC Job Manager interface looks like | |
280 | with a running IPython cluster. |
|
280 | with a running IPython cluster. | |
281 |
|
281 | |||
282 | .. image:: hpc_job_manager.* |
|
282 | .. image:: hpc_job_manager.* | |
283 |
|
283 | |||
284 | Performing a simple interactive parallel computation |
|
284 | Performing a simple interactive parallel computation | |
285 | ==================================================== |
|
285 | ==================================================== | |
286 |
|
286 | |||
287 | Once you have started your IPython cluster, you can start to use it. To do |
|
287 | Once you have started your IPython cluster, you can start to use it. To do | |
288 | this, open up a new Windows Command Prompt and start up IPython's interactive |
|
288 | this, open up a new Windows Command Prompt and start up IPython's interactive | |
289 | shell by typing:: |
|
289 | shell by typing:: | |
290 |
|
290 | |||
291 | ipython |
|
291 | ipython | |
292 |
|
292 | |||
293 | Then you can create a :class:`MultiEngineClient` instance for your profile and |
|
293 | Then you can create a :class:`MultiEngineClient` instance for your profile and | |
294 | use the resulting instance to do a simple interactive parallel computation. In |
|
294 | use the resulting instance to do a simple interactive parallel computation. In | |
295 | the code and screenshot that follows, we take a simple Python function and |
|
295 | the code and screenshot that follows, we take a simple Python function and | |
296 | apply it to each element of an array of integers in parallel using the |
|
296 | apply it to each element of an array of integers in parallel using the | |
297 | :meth:`MultiEngineClient.map` method: |
|
297 | :meth:`MultiEngineClient.map` method: | |
298 |
|
298 | |||
299 | .. sourcecode:: ipython |
|
299 | .. sourcecode:: ipython | |
300 |
|
300 | |||
301 | In [1]: from IPython.parallel import * |
|
301 | In [1]: from IPython.parallel import * | |
302 |
|
302 | |||
303 | In [2]: c = MultiEngineClient(profile='mycluster') |
|
303 | In [2]: c = MultiEngineClient(profile='mycluster') | |
304 |
|
304 | |||
305 | In [3]: mec.get_ids() |
|
305 | In [3]: mec.get_ids() | |
306 | Out[3]: [0, 1, 2, 3, 4, 5, 67, 8, 9, 10, 11, 12, 13, 14] |
|
306 | Out[3]: [0, 1, 2, 3, 4, 5, 67, 8, 9, 10, 11, 12, 13, 14] | |
307 |
|
307 | |||
308 | In [4]: def f(x): |
|
308 | In [4]: def f(x): | |
309 | ...: return x**10 |
|
309 | ...: return x**10 | |
310 |
|
310 | |||
311 | In [5]: mec.map(f, range(15)) # f is applied in parallel |
|
311 | In [5]: mec.map(f, range(15)) # f is applied in parallel | |
312 | Out[5]: |
|
312 | Out[5]: | |
313 | [0, |
|
313 | [0, | |
314 | 1, |
|
314 | 1, | |
315 | 1024, |
|
315 | 1024, | |
316 | 59049, |
|
316 | 59049, | |
317 | 1048576, |
|
317 | 1048576, | |
318 | 9765625, |
|
318 | 9765625, | |
319 | 60466176, |
|
319 | 60466176, | |
320 | 282475249, |
|
320 | 282475249, | |
321 | 1073741824, |
|
321 | 1073741824, | |
322 | 3486784401L, |
|
322 | 3486784401L, | |
323 | 10000000000L, |
|
323 | 10000000000L, | |
324 | 25937424601L, |
|
324 | 25937424601L, | |
325 | 61917364224L, |
|
325 | 61917364224L, | |
326 | 137858491849L, |
|
326 | 137858491849L, | |
327 | 289254654976L] |
|
327 | 289254654976L] | |
328 |
|
328 | |||
329 | The :meth:`map` method has the same signature as Python's builtin :func:`map` |
|
329 | The :meth:`map` method has the same signature as Python's builtin :func:`map` | |
330 | function, but runs the calculation in parallel. More involved examples of using |
|
330 | function, but runs the calculation in parallel. More involved examples of using | |
331 | :class:`MultiEngineClient` are provided in the examples that follow. |
|
331 | :class:`MultiEngineClient` are provided in the examples that follow. | |
332 |
|
332 | |||
333 | .. image:: mec_simple.* |
|
333 | .. image:: mec_simple.* | |
334 |
|
334 |
1 | NO CONTENT: file renamed from docs/source/parallelz/simpledag.pdf to docs/source/parallel/simpledag.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/simpledag.pdf to docs/source/parallel/simpledag.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/simpledag.png to docs/source/parallel/simpledag.png |
|
NO CONTENT: file renamed from docs/source/parallelz/simpledag.png to docs/source/parallel/simpledag.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/single_digits.pdf to docs/source/parallel/single_digits.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/single_digits.pdf to docs/source/parallel/single_digits.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/single_digits.png to docs/source/parallel/single_digits.png |
|
NO CONTENT: file renamed from docs/source/parallelz/single_digits.png to docs/source/parallel/single_digits.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/two_digit_counts.pdf to docs/source/parallel/two_digit_counts.pdf |
|
NO CONTENT: file renamed from docs/source/parallelz/two_digit_counts.pdf to docs/source/parallel/two_digit_counts.pdf |
1 | NO CONTENT: file renamed from docs/source/parallelz/two_digit_counts.png to docs/source/parallel/two_digit_counts.png |
|
NO CONTENT: file renamed from docs/source/parallelz/two_digit_counts.png to docs/source/parallel/two_digit_counts.png |
1 | NO CONTENT: file renamed from docs/source/parallelz/winhpc_index.txt to docs/source/parallel/winhpc_index.txt |
|
NO CONTENT: file renamed from docs/source/parallelz/winhpc_index.txt to docs/source/parallel/winhpc_index.txt |
@@ -1,252 +1,252 b'' | |||||
1 | #!/usr/bin/env python |
|
1 | #!/usr/bin/env python | |
2 | # -*- coding: utf-8 -*- |
|
2 | # -*- coding: utf-8 -*- | |
3 | """Setup script for IPython. |
|
3 | """Setup script for IPython. | |
4 |
|
4 | |||
5 | Under Posix environments it works like a typical setup.py script. |
|
5 | Under Posix environments it works like a typical setup.py script. | |
6 | Under Windows, the command sdist is not supported, since IPython |
|
6 | Under Windows, the command sdist is not supported, since IPython | |
7 | requires utilities which are not available under Windows.""" |
|
7 | requires utilities which are not available under Windows.""" | |
8 |
|
8 | |||
9 | #----------------------------------------------------------------------------- |
|
9 | #----------------------------------------------------------------------------- | |
10 | # Copyright (c) 2008-2010, IPython Development Team. |
|
10 | # Copyright (c) 2008-2010, IPython Development Team. | |
11 | # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu> |
|
11 | # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu> | |
12 | # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de> |
|
12 | # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de> | |
13 | # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu> |
|
13 | # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu> | |
14 | # |
|
14 | # | |
15 | # Distributed under the terms of the Modified BSD License. |
|
15 | # Distributed under the terms of the Modified BSD License. | |
16 | # |
|
16 | # | |
17 | # The full license is in the file COPYING.txt, distributed with this software. |
|
17 | # The full license is in the file COPYING.txt, distributed with this software. | |
18 | #----------------------------------------------------------------------------- |
|
18 | #----------------------------------------------------------------------------- | |
19 |
|
19 | |||
20 | #----------------------------------------------------------------------------- |
|
20 | #----------------------------------------------------------------------------- | |
21 | # Minimal Python version sanity check |
|
21 | # Minimal Python version sanity check | |
22 | #----------------------------------------------------------------------------- |
|
22 | #----------------------------------------------------------------------------- | |
23 |
|
23 | |||
24 | import sys |
|
24 | import sys | |
25 |
|
25 | |||
26 | # This check is also made in IPython/__init__, don't forget to update both when |
|
26 | # This check is also made in IPython/__init__, don't forget to update both when | |
27 | # changing Python version requirements. |
|
27 | # changing Python version requirements. | |
28 | if sys.version[0:3] < '2.6': |
|
28 | if sys.version[0:3] < '2.6': | |
29 | error = """\ |
|
29 | error = """\ | |
30 | ERROR: 'IPython requires Python Version 2.6 or above.' |
|
30 | ERROR: 'IPython requires Python Version 2.6 or above.' | |
31 | Exiting.""" |
|
31 | Exiting.""" | |
32 | print >> sys.stderr, error |
|
32 | print >> sys.stderr, error | |
33 | sys.exit(1) |
|
33 | sys.exit(1) | |
34 |
|
34 | |||
35 | # At least we're on the python version we need, move on. |
|
35 | # At least we're on the python version we need, move on. | |
36 |
|
36 | |||
37 | #------------------------------------------------------------------------------- |
|
37 | #------------------------------------------------------------------------------- | |
38 | # Imports |
|
38 | # Imports | |
39 | #------------------------------------------------------------------------------- |
|
39 | #------------------------------------------------------------------------------- | |
40 |
|
40 | |||
41 | # Stdlib imports |
|
41 | # Stdlib imports | |
42 | import os |
|
42 | import os | |
43 | import shutil |
|
43 | import shutil | |
44 |
|
44 | |||
45 | from glob import glob |
|
45 | from glob import glob | |
46 |
|
46 | |||
47 | # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly |
|
47 | # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly | |
48 | # update it when the contents of directories change. |
|
48 | # update it when the contents of directories change. | |
49 | if os.path.exists('MANIFEST'): os.remove('MANIFEST') |
|
49 | if os.path.exists('MANIFEST'): os.remove('MANIFEST') | |
50 |
|
50 | |||
51 | from distutils.core import setup |
|
51 | from distutils.core import setup | |
52 |
|
52 | |||
53 | # Our own imports |
|
53 | # Our own imports | |
54 | from IPython.utils.path import target_update |
|
54 | from IPython.utils.path import target_update | |
55 |
|
55 | |||
56 | from setupbase import ( |
|
56 | from setupbase import ( | |
57 | setup_args, |
|
57 | setup_args, | |
58 | find_packages, |
|
58 | find_packages, | |
59 | find_package_data, |
|
59 | find_package_data, | |
60 | find_scripts, |
|
60 | find_scripts, | |
61 | find_data_files, |
|
61 | find_data_files, | |
62 | check_for_dependencies, |
|
62 | check_for_dependencies, | |
63 | record_commit_info, |
|
63 | record_commit_info, | |
64 | ) |
|
64 | ) | |
65 |
|
65 | |||
66 | isfile = os.path.isfile |
|
66 | isfile = os.path.isfile | |
67 | pjoin = os.path.join |
|
67 | pjoin = os.path.join | |
68 |
|
68 | |||
69 | #----------------------------------------------------------------------------- |
|
69 | #----------------------------------------------------------------------------- | |
70 | # Function definitions |
|
70 | # Function definitions | |
71 | #----------------------------------------------------------------------------- |
|
71 | #----------------------------------------------------------------------------- | |
72 |
|
72 | |||
73 | def cleanup(): |
|
73 | def cleanup(): | |
74 | """Clean up the junk left around by the build process""" |
|
74 | """Clean up the junk left around by the build process""" | |
75 | if "develop" not in sys.argv: |
|
75 | if "develop" not in sys.argv: | |
76 | try: |
|
76 | try: | |
77 | shutil.rmtree('ipython.egg-info') |
|
77 | shutil.rmtree('ipython.egg-info') | |
78 | except: |
|
78 | except: | |
79 | try: |
|
79 | try: | |
80 | os.unlink('ipython.egg-info') |
|
80 | os.unlink('ipython.egg-info') | |
81 | except: |
|
81 | except: | |
82 | pass |
|
82 | pass | |
83 |
|
83 | |||
84 | #------------------------------------------------------------------------------- |
|
84 | #------------------------------------------------------------------------------- | |
85 | # Handle OS specific things |
|
85 | # Handle OS specific things | |
86 | #------------------------------------------------------------------------------- |
|
86 | #------------------------------------------------------------------------------- | |
87 |
|
87 | |||
88 | if os.name == 'posix': |
|
88 | if os.name == 'posix': | |
89 | os_name = 'posix' |
|
89 | os_name = 'posix' | |
90 | elif os.name in ['nt','dos']: |
|
90 | elif os.name in ['nt','dos']: | |
91 | os_name = 'windows' |
|
91 | os_name = 'windows' | |
92 | else: |
|
92 | else: | |
93 | print 'Unsupported operating system:',os.name |
|
93 | print 'Unsupported operating system:',os.name | |
94 | sys.exit(1) |
|
94 | sys.exit(1) | |
95 |
|
95 | |||
96 | # Under Windows, 'sdist' has not been supported. Now that the docs build with |
|
96 | # Under Windows, 'sdist' has not been supported. Now that the docs build with | |
97 | # Sphinx it might work, but let's not turn it on until someone confirms that it |
|
97 | # Sphinx it might work, but let's not turn it on until someone confirms that it | |
98 | # actually works. |
|
98 | # actually works. | |
99 | if os_name == 'windows' and 'sdist' in sys.argv: |
|
99 | if os_name == 'windows' and 'sdist' in sys.argv: | |
100 | print 'The sdist command is not available under Windows. Exiting.' |
|
100 | print 'The sdist command is not available under Windows. Exiting.' | |
101 | sys.exit(1) |
|
101 | sys.exit(1) | |
102 |
|
102 | |||
103 | #------------------------------------------------------------------------------- |
|
103 | #------------------------------------------------------------------------------- | |
104 | # Things related to the IPython documentation |
|
104 | # Things related to the IPython documentation | |
105 | #------------------------------------------------------------------------------- |
|
105 | #------------------------------------------------------------------------------- | |
106 |
|
106 | |||
107 | # update the manuals when building a source dist |
|
107 | # update the manuals when building a source dist | |
108 | if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'): |
|
108 | if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'): | |
109 | import textwrap |
|
109 | import textwrap | |
110 |
|
110 | |||
111 | # List of things to be updated. Each entry is a triplet of args for |
|
111 | # List of things to be updated. Each entry is a triplet of args for | |
112 | # target_update() |
|
112 | # target_update() | |
113 | to_update = [ |
|
113 | to_update = [ | |
114 | # FIXME - Disabled for now: we need to redo an automatic way |
|
114 | # FIXME - Disabled for now: we need to redo an automatic way | |
115 | # of generating the magic info inside the rst. |
|
115 | # of generating the magic info inside the rst. | |
116 | #('docs/magic.tex', |
|
116 | #('docs/magic.tex', | |
117 | #['IPython/Magic.py'], |
|
117 | #['IPython/Magic.py'], | |
118 | #"cd doc && ./update_magic.sh" ), |
|
118 | #"cd doc && ./update_magic.sh" ), | |
119 |
|
119 | |||
120 | ('docs/man/ipcluster.1.gz', |
|
120 | ('docs/man/ipcluster.1.gz', | |
121 | ['docs/man/ipcluster.1'], |
|
121 | ['docs/man/ipcluster.1'], | |
122 | 'cd docs/man && gzip -9c ipcluster.1 > ipcluster.1.gz'), |
|
122 | 'cd docs/man && gzip -9c ipcluster.1 > ipcluster.1.gz'), | |
123 |
|
123 | |||
124 | ('docs/man/ipcontroller.1.gz', |
|
124 | ('docs/man/ipcontroller.1.gz', | |
125 | ['docs/man/ipcontroller.1'], |
|
125 | ['docs/man/ipcontroller.1'], | |
126 | 'cd docs/man && gzip -9c ipcontroller.1 > ipcontroller.1.gz'), |
|
126 | 'cd docs/man && gzip -9c ipcontroller.1 > ipcontroller.1.gz'), | |
127 |
|
127 | |||
128 | ('docs/man/ipengine.1.gz', |
|
128 | ('docs/man/ipengine.1.gz', | |
129 | ['docs/man/ipengine.1'], |
|
129 | ['docs/man/ipengine.1'], | |
130 | 'cd docs/man && gzip -9c ipengine.1 > ipengine.1.gz'), |
|
130 | 'cd docs/man && gzip -9c ipengine.1 > ipengine.1.gz'), | |
131 |
|
131 | |||
132 | ('docs/man/ipython.1.gz', |
|
132 | ('docs/man/ipython.1.gz', | |
133 | ['docs/man/ipython.1'], |
|
133 | ['docs/man/ipython.1'], | |
134 | 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'), |
|
134 | 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'), | |
135 |
|
135 | |||
136 | ('docs/man/ipython-wx.1.gz', |
|
136 | ('docs/man/ipython-wx.1.gz', | |
137 | ['docs/man/ipython-wx.1'], |
|
137 | ['docs/man/ipython-wx.1'], | |
138 | 'cd docs/man && gzip -9c ipython-wx.1 > ipython-wx.1.gz'), |
|
138 | 'cd docs/man && gzip -9c ipython-wx.1 > ipython-wx.1.gz'), | |
139 |
|
139 | |||
140 | ('docs/man/ipythonx.1.gz', |
|
140 | ('docs/man/ipythonx.1.gz', | |
141 | ['docs/man/ipythonx.1'], |
|
141 | ['docs/man/ipythonx.1'], | |
142 | 'cd docs/man && gzip -9c ipythonx.1 > ipythonx.1.gz'), |
|
142 | 'cd docs/man && gzip -9c ipythonx.1 > ipythonx.1.gz'), | |
143 |
|
143 | |||
144 | ('docs/man/irunner.1.gz', |
|
144 | ('docs/man/irunner.1.gz', | |
145 | ['docs/man/irunner.1'], |
|
145 | ['docs/man/irunner.1'], | |
146 | 'cd docs/man && gzip -9c irunner.1 > irunner.1.gz'), |
|
146 | 'cd docs/man && gzip -9c irunner.1 > irunner.1.gz'), | |
147 |
|
147 | |||
148 | ('docs/man/pycolor.1.gz', |
|
148 | ('docs/man/pycolor.1.gz', | |
149 | ['docs/man/pycolor.1'], |
|
149 | ['docs/man/pycolor.1'], | |
150 | 'cd docs/man && gzip -9c pycolor.1 > pycolor.1.gz'), |
|
150 | 'cd docs/man && gzip -9c pycolor.1 > pycolor.1.gz'), | |
151 | ] |
|
151 | ] | |
152 |
|
152 | |||
153 | # Only build the docs if sphinx is present |
|
153 | # Only build the docs if sphinx is present | |
154 | try: |
|
154 | try: | |
155 | import sphinx |
|
155 | import sphinx | |
156 | except ImportError: |
|
156 | except ImportError: | |
157 | pass |
|
157 | pass | |
158 | else: |
|
158 | else: | |
159 | # The Makefile calls the do_sphinx scripts to build html and pdf, so |
|
159 | # The Makefile calls the do_sphinx scripts to build html and pdf, so | |
160 | # just one target is enough to cover all manual generation |
|
160 | # just one target is enough to cover all manual generation | |
161 |
|
161 | |||
162 | # First, compute all the dependencies that can force us to rebuild the |
|
162 | # First, compute all the dependencies that can force us to rebuild the | |
163 | # docs. Start with the main release file that contains metadata |
|
163 | # docs. Start with the main release file that contains metadata | |
164 | docdeps = ['IPython/core/release.py'] |
|
164 | docdeps = ['IPython/core/release.py'] | |
165 | # Inculde all the reST sources |
|
165 | # Inculde all the reST sources | |
166 | pjoin = os.path.join |
|
166 | pjoin = os.path.join | |
167 | for dirpath,dirnames,filenames in os.walk('docs/source'): |
|
167 | for dirpath,dirnames,filenames in os.walk('docs/source'): | |
168 | if dirpath in ['_static','_templates']: |
|
168 | if dirpath in ['_static','_templates']: | |
169 | continue |
|
169 | continue | |
170 | docdeps += [ pjoin(dirpath,f) for f in filenames |
|
170 | docdeps += [ pjoin(dirpath,f) for f in filenames | |
171 | if f.endswith('.txt') ] |
|
171 | if f.endswith('.txt') ] | |
172 | # and the examples |
|
172 | # and the examples | |
173 | for dirpath,dirnames,filenames in os.walk('docs/example'): |
|
173 | for dirpath,dirnames,filenames in os.walk('docs/example'): | |
174 | docdeps += [ pjoin(dirpath,f) for f in filenames |
|
174 | docdeps += [ pjoin(dirpath,f) for f in filenames | |
175 | if not f.endswith('~') ] |
|
175 | if not f.endswith('~') ] | |
176 | # then, make them all dependencies for the main PDF (the html will get |
|
176 | # then, make them all dependencies for the main PDF (the html will get | |
177 | # auto-generated as well). |
|
177 | # auto-generated as well). | |
178 | to_update.append( |
|
178 | to_update.append( | |
179 | ('docs/dist/ipython.pdf', |
|
179 | ('docs/dist/ipython.pdf', | |
180 | docdeps, |
|
180 | docdeps, | |
181 | "cd docs && make dist") |
|
181 | "cd docs && make dist") | |
182 | ) |
|
182 | ) | |
183 |
|
183 | |||
184 | [ target_update(*t) for t in to_update ] |
|
184 | [ target_update(*t) for t in to_update ] | |
185 |
|
185 | |||
186 | #--------------------------------------------------------------------------- |
|
186 | #--------------------------------------------------------------------------- | |
187 | # Find all the packages, package data, scripts and data_files |
|
187 | # Find all the packages, package data, scripts and data_files | |
188 | #--------------------------------------------------------------------------- |
|
188 | #--------------------------------------------------------------------------- | |
189 |
|
189 | |||
190 | packages = find_packages() |
|
190 | packages = find_packages() | |
191 | package_data = find_package_data() |
|
191 | package_data = find_package_data() | |
192 | scripts = find_scripts() |
|
192 | scripts = find_scripts() | |
193 | data_files = find_data_files() |
|
193 | data_files = find_data_files() | |
194 |
|
194 | |||
195 | #--------------------------------------------------------------------------- |
|
195 | #--------------------------------------------------------------------------- | |
196 | # Handle dependencies and setuptools specific things |
|
196 | # Handle dependencies and setuptools specific things | |
197 | #--------------------------------------------------------------------------- |
|
197 | #--------------------------------------------------------------------------- | |
198 |
|
198 | |||
199 | # For some commands, use setuptools. Note that we do NOT list install here! |
|
199 | # For some commands, use setuptools. Note that we do NOT list install here! | |
200 | # If you want a setuptools-enhanced install, just run 'setupegg.py install' |
|
200 | # If you want a setuptools-enhanced install, just run 'setupegg.py install' | |
201 | if len(set(('develop', 'sdist', 'release', 'bdist_egg', 'bdist_rpm', |
|
201 | if len(set(('develop', 'sdist', 'release', 'bdist_egg', 'bdist_rpm', | |
202 | 'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info', |
|
202 | 'bdist', 'bdist_dumb', 'bdist_wininst', 'install_egg_info', | |
203 | 'build_sphinx', 'egg_info', 'easy_install', 'upload', |
|
203 | 'build_sphinx', 'egg_info', 'easy_install', 'upload', | |
204 | )).intersection(sys.argv)) > 0: |
|
204 | )).intersection(sys.argv)) > 0: | |
205 | import setuptools |
|
205 | import setuptools | |
206 |
|
206 | |||
207 | # This dict is used for passing extra arguments that are setuptools |
|
207 | # This dict is used for passing extra arguments that are setuptools | |
208 | # specific to setup |
|
208 | # specific to setup | |
209 | setuptools_extra_args = {} |
|
209 | setuptools_extra_args = {} | |
210 |
|
210 | |||
211 | if 'setuptools' in sys.modules: |
|
211 | if 'setuptools' in sys.modules: | |
212 | setuptools_extra_args['zip_safe'] = False |
|
212 | setuptools_extra_args['zip_safe'] = False | |
213 | setuptools_extra_args['entry_points'] = { |
|
213 | setuptools_extra_args['entry_points'] = { | |
214 | 'console_scripts': [ |
|
214 | 'console_scripts': [ | |
215 | 'ipython = IPython.frontend.terminal.ipapp:launch_new_instance', |
|
215 | 'ipython = IPython.frontend.terminal.ipapp:launch_new_instance', | |
216 | 'ipython-qtconsole = IPython.frontend.qt.console.ipythonqt:main', |
|
216 | 'ipython-qtconsole = IPython.frontend.qt.console.ipythonqt:main', | |
217 | 'pycolor = IPython.utils.PyColorize:main', |
|
217 | 'pycolor = IPython.utils.PyColorize:main', | |
218 |
'ipcontroller |
|
218 | 'ipcontroller = IPython.parallel.ipcontrollerapp:launch_new_instance', | |
219 |
'ipengine |
|
219 | 'ipengine = IPython.parallel.ipengineapp:launch_new_instance', | |
220 |
'iplogger |
|
220 | 'iplogger = IPython.parallel.iploggerapp:launch_new_instance', | |
221 |
'ipcluster |
|
221 | 'ipcluster = IPython.parallel.ipclusterapp:launch_new_instance', | |
222 | 'iptest = IPython.testing.iptest:main', |
|
222 | 'iptest = IPython.testing.iptest:main', | |
223 | 'irunner = IPython.lib.irunner:main' |
|
223 | 'irunner = IPython.lib.irunner:main' | |
224 | ] |
|
224 | ] | |
225 | } |
|
225 | } | |
226 | setup_args['extras_require'] = dict( |
|
226 | setup_args['extras_require'] = dict( | |
227 | zmq = 'pyzmq>=2.0.10', |
|
227 | zmq = 'pyzmq>=2.0.10.1', | |
228 | doc='Sphinx>=0.3', |
|
228 | doc='Sphinx>=0.3', | |
229 | test='nose>=0.10.1', |
|
229 | test='nose>=0.10.1', | |
230 | security='pyOpenSSL>=0.6' |
|
230 | security='pyOpenSSL>=0.6' | |
231 | ) |
|
231 | ) | |
232 | else: |
|
232 | else: | |
233 | # If we are running without setuptools, call this function which will |
|
233 | # If we are running without setuptools, call this function which will | |
234 | # check for dependencies an inform the user what is needed. This is |
|
234 | # check for dependencies an inform the user what is needed. This is | |
235 | # just to make life easy for users. |
|
235 | # just to make life easy for users. | |
236 | check_for_dependencies() |
|
236 | check_for_dependencies() | |
237 |
|
237 | |||
238 | #--------------------------------------------------------------------------- |
|
238 | #--------------------------------------------------------------------------- | |
239 | # Do the actual setup now |
|
239 | # Do the actual setup now | |
240 | #--------------------------------------------------------------------------- |
|
240 | #--------------------------------------------------------------------------- | |
241 |
|
241 | |||
242 | setup_args['cmdclass'] = {'build_py': record_commit_info('IPython')} |
|
242 | setup_args['cmdclass'] = {'build_py': record_commit_info('IPython')} | |
243 | setup_args['packages'] = packages |
|
243 | setup_args['packages'] = packages | |
244 | setup_args['package_data'] = package_data |
|
244 | setup_args['package_data'] = package_data | |
245 | setup_args['scripts'] = scripts |
|
245 | setup_args['scripts'] = scripts | |
246 | setup_args['data_files'] = data_files |
|
246 | setup_args['data_files'] = data_files | |
247 | setup_args.update(setuptools_extra_args) |
|
247 | setup_args.update(setuptools_extra_args) | |
248 |
|
248 | |||
249 |
|
249 | |||
250 | if __name__ == '__main__': |
|
250 | if __name__ == '__main__': | |
251 | setup(**setup_args) |
|
251 | setup(**setup_args) | |
252 | cleanup() |
|
252 | cleanup() |
@@ -1,380 +1,380 b'' | |||||
1 | # encoding: utf-8 |
|
1 | # encoding: utf-8 | |
2 | """ |
|
2 | """ | |
3 | This module defines the things that are used in setup.py for building IPython |
|
3 | This module defines the things that are used in setup.py for building IPython | |
4 |
|
4 | |||
5 | This includes: |
|
5 | This includes: | |
6 |
|
6 | |||
7 | * The basic arguments to setup |
|
7 | * The basic arguments to setup | |
8 | * Functions for finding things like packages, package data, etc. |
|
8 | * Functions for finding things like packages, package data, etc. | |
9 | * A function for checking dependencies. |
|
9 | * A function for checking dependencies. | |
10 | """ |
|
10 | """ | |
11 | from __future__ import print_function |
|
11 | from __future__ import print_function | |
12 |
|
12 | |||
13 | #------------------------------------------------------------------------------- |
|
13 | #------------------------------------------------------------------------------- | |
14 | # Copyright (C) 2008 The IPython Development Team |
|
14 | # Copyright (C) 2008 The IPython Development Team | |
15 | # |
|
15 | # | |
16 | # Distributed under the terms of the BSD License. The full license is in |
|
16 | # Distributed under the terms of the BSD License. The full license is in | |
17 | # the file COPYING, distributed as part of this software. |
|
17 | # the file COPYING, distributed as part of this software. | |
18 | #------------------------------------------------------------------------------- |
|
18 | #------------------------------------------------------------------------------- | |
19 |
|
19 | |||
20 | #------------------------------------------------------------------------------- |
|
20 | #------------------------------------------------------------------------------- | |
21 | # Imports |
|
21 | # Imports | |
22 | #------------------------------------------------------------------------------- |
|
22 | #------------------------------------------------------------------------------- | |
23 | import os |
|
23 | import os | |
24 | import sys |
|
24 | import sys | |
25 |
|
25 | |||
26 | from ConfigParser import ConfigParser |
|
26 | from ConfigParser import ConfigParser | |
27 | from distutils.command.build_py import build_py |
|
27 | from distutils.command.build_py import build_py | |
28 | from glob import glob |
|
28 | from glob import glob | |
29 |
|
29 | |||
30 | from setupext import install_data_ext |
|
30 | from setupext import install_data_ext | |
31 |
|
31 | |||
32 | #------------------------------------------------------------------------------- |
|
32 | #------------------------------------------------------------------------------- | |
33 | # Useful globals and utility functions |
|
33 | # Useful globals and utility functions | |
34 | #------------------------------------------------------------------------------- |
|
34 | #------------------------------------------------------------------------------- | |
35 |
|
35 | |||
36 | # A few handy globals |
|
36 | # A few handy globals | |
37 | isfile = os.path.isfile |
|
37 | isfile = os.path.isfile | |
38 | pjoin = os.path.join |
|
38 | pjoin = os.path.join | |
39 |
|
39 | |||
40 | def oscmd(s): |
|
40 | def oscmd(s): | |
41 | print(">", s) |
|
41 | print(">", s) | |
42 | os.system(s) |
|
42 | os.system(s) | |
43 |
|
43 | |||
44 | # A little utility we'll need below, since glob() does NOT allow you to do |
|
44 | # A little utility we'll need below, since glob() does NOT allow you to do | |
45 | # exclusion on multiple endings! |
|
45 | # exclusion on multiple endings! | |
46 | def file_doesnt_endwith(test,endings): |
|
46 | def file_doesnt_endwith(test,endings): | |
47 | """Return true if test is a file and its name does NOT end with any |
|
47 | """Return true if test is a file and its name does NOT end with any | |
48 | of the strings listed in endings.""" |
|
48 | of the strings listed in endings.""" | |
49 | if not isfile(test): |
|
49 | if not isfile(test): | |
50 | return False |
|
50 | return False | |
51 | for e in endings: |
|
51 | for e in endings: | |
52 | if test.endswith(e): |
|
52 | if test.endswith(e): | |
53 | return False |
|
53 | return False | |
54 | return True |
|
54 | return True | |
55 |
|
55 | |||
56 | #--------------------------------------------------------------------------- |
|
56 | #--------------------------------------------------------------------------- | |
57 | # Basic project information |
|
57 | # Basic project information | |
58 | #--------------------------------------------------------------------------- |
|
58 | #--------------------------------------------------------------------------- | |
59 |
|
59 | |||
60 | # release.py contains version, authors, license, url, keywords, etc. |
|
60 | # release.py contains version, authors, license, url, keywords, etc. | |
61 | execfile(pjoin('IPython','core','release.py')) |
|
61 | execfile(pjoin('IPython','core','release.py')) | |
62 |
|
62 | |||
63 | # Create a dict with the basic information |
|
63 | # Create a dict with the basic information | |
64 | # This dict is eventually passed to setup after additional keys are added. |
|
64 | # This dict is eventually passed to setup after additional keys are added. | |
65 | setup_args = dict( |
|
65 | setup_args = dict( | |
66 | name = name, |
|
66 | name = name, | |
67 | version = version, |
|
67 | version = version, | |
68 | description = description, |
|
68 | description = description, | |
69 | long_description = long_description, |
|
69 | long_description = long_description, | |
70 | author = author, |
|
70 | author = author, | |
71 | author_email = author_email, |
|
71 | author_email = author_email, | |
72 | url = url, |
|
72 | url = url, | |
73 | download_url = download_url, |
|
73 | download_url = download_url, | |
74 | license = license, |
|
74 | license = license, | |
75 | platforms = platforms, |
|
75 | platforms = platforms, | |
76 | keywords = keywords, |
|
76 | keywords = keywords, | |
77 | cmdclass = {'install_data': install_data_ext}, |
|
77 | cmdclass = {'install_data': install_data_ext}, | |
78 | ) |
|
78 | ) | |
79 |
|
79 | |||
80 |
|
80 | |||
81 | #--------------------------------------------------------------------------- |
|
81 | #--------------------------------------------------------------------------- | |
82 | # Find packages |
|
82 | # Find packages | |
83 | #--------------------------------------------------------------------------- |
|
83 | #--------------------------------------------------------------------------- | |
84 |
|
84 | |||
85 | def add_package(packages,pname,config=False,tests=False,scripts=False, |
|
85 | def add_package(packages,pname,config=False,tests=False,scripts=False, | |
86 | others=None): |
|
86 | others=None): | |
87 | """ |
|
87 | """ | |
88 | Add a package to the list of packages, including certain subpackages. |
|
88 | Add a package to the list of packages, including certain subpackages. | |
89 | """ |
|
89 | """ | |
90 | packages.append('.'.join(['IPython',pname])) |
|
90 | packages.append('.'.join(['IPython',pname])) | |
91 | if config: |
|
91 | if config: | |
92 | packages.append('.'.join(['IPython',pname,'config'])) |
|
92 | packages.append('.'.join(['IPython',pname,'config'])) | |
93 | if tests: |
|
93 | if tests: | |
94 | packages.append('.'.join(['IPython',pname,'tests'])) |
|
94 | packages.append('.'.join(['IPython',pname,'tests'])) | |
95 | if scripts: |
|
95 | if scripts: | |
96 | packages.append('.'.join(['IPython',pname,'scripts'])) |
|
96 | packages.append('.'.join(['IPython',pname,'scripts'])) | |
97 | if others is not None: |
|
97 | if others is not None: | |
98 | for o in others: |
|
98 | for o in others: | |
99 | packages.append('.'.join(['IPython',pname,o])) |
|
99 | packages.append('.'.join(['IPython',pname,o])) | |
100 |
|
100 | |||
101 | def find_packages(): |
|
101 | def find_packages(): | |
102 | """ |
|
102 | """ | |
103 | Find all of IPython's packages. |
|
103 | Find all of IPython's packages. | |
104 | """ |
|
104 | """ | |
105 | packages = ['IPython'] |
|
105 | packages = ['IPython'] | |
106 | add_package(packages, 'config', tests=True, others=['default','profile']) |
|
106 | add_package(packages, 'config', tests=True, others=['default','profile']) | |
107 | add_package(packages, 'core', tests=True) |
|
107 | add_package(packages, 'core', tests=True) | |
108 | add_package(packages, 'deathrow', tests=True) |
|
108 | add_package(packages, 'deathrow', tests=True) | |
109 | add_package(packages, 'extensions') |
|
109 | add_package(packages, 'extensions') | |
110 | add_package(packages, 'external') |
|
110 | add_package(packages, 'external') | |
111 | add_package(packages, 'external.argparse') |
|
111 | add_package(packages, 'external.argparse') | |
112 | add_package(packages, 'external.configobj') |
|
112 | add_package(packages, 'external.configobj') | |
113 | add_package(packages, 'external.decorator') |
|
113 | add_package(packages, 'external.decorator') | |
114 | add_package(packages, 'external.decorators') |
|
114 | add_package(packages, 'external.decorators') | |
115 | add_package(packages, 'external.guid') |
|
115 | add_package(packages, 'external.guid') | |
116 | add_package(packages, 'external.Itpl') |
|
116 | add_package(packages, 'external.Itpl') | |
117 | add_package(packages, 'external.mglob') |
|
117 | add_package(packages, 'external.mglob') | |
118 | add_package(packages, 'external.path') |
|
118 | add_package(packages, 'external.path') | |
119 | add_package(packages, 'external.pexpect') |
|
119 | add_package(packages, 'external.pexpect') | |
120 | add_package(packages, 'external.pyparsing') |
|
120 | add_package(packages, 'external.pyparsing') | |
121 | add_package(packages, 'external.simplegeneric') |
|
121 | add_package(packages, 'external.simplegeneric') | |
122 | add_package(packages, 'external.ssh') |
|
122 | add_package(packages, 'external.ssh') | |
123 | add_package(packages, 'external.validate') |
|
123 | add_package(packages, 'external.validate') | |
124 | add_package(packages, 'kernel') |
|
124 | add_package(packages, 'kernel') | |
125 | add_package(packages, 'frontend') |
|
125 | add_package(packages, 'frontend') | |
126 | add_package(packages, 'frontend.qt') |
|
126 | add_package(packages, 'frontend.qt') | |
127 | add_package(packages, 'frontend.qt.console', tests=True) |
|
127 | add_package(packages, 'frontend.qt.console', tests=True) | |
128 | add_package(packages, 'frontend.terminal', tests=True) |
|
128 | add_package(packages, 'frontend.terminal', tests=True) | |
129 | add_package(packages, 'lib', tests=True) |
|
129 | add_package(packages, 'lib', tests=True) | |
|
130 | add_package(packages, 'parallel', tests=True) | |||
130 | add_package(packages, 'quarantine', tests=True) |
|
131 | add_package(packages, 'quarantine', tests=True) | |
131 | add_package(packages, 'scripts') |
|
132 | add_package(packages, 'scripts') | |
132 | add_package(packages, 'testing', tests=True) |
|
133 | add_package(packages, 'testing', tests=True) | |
133 | add_package(packages, 'testing.plugin', tests=False) |
|
134 | add_package(packages, 'testing.plugin', tests=False) | |
134 | add_package(packages, 'utils', tests=True) |
|
135 | add_package(packages, 'utils', tests=True) | |
135 | add_package(packages, 'zmq') |
|
136 | add_package(packages, 'zmq') | |
136 | add_package(packages, 'zmq.pylab') |
|
137 | add_package(packages, 'zmq.pylab') | |
137 | add_package(packages, 'parallel') |
|
|||
138 | return packages |
|
138 | return packages | |
139 |
|
139 | |||
140 | #--------------------------------------------------------------------------- |
|
140 | #--------------------------------------------------------------------------- | |
141 | # Find package data |
|
141 | # Find package data | |
142 | #--------------------------------------------------------------------------- |
|
142 | #--------------------------------------------------------------------------- | |
143 |
|
143 | |||
144 | def find_package_data(): |
|
144 | def find_package_data(): | |
145 | """ |
|
145 | """ | |
146 | Find IPython's package_data. |
|
146 | Find IPython's package_data. | |
147 | """ |
|
147 | """ | |
148 | # This is not enough for these things to appear in an sdist. |
|
148 | # This is not enough for these things to appear in an sdist. | |
149 | # We need to muck with the MANIFEST to get this to work |
|
149 | # We need to muck with the MANIFEST to get this to work | |
150 | package_data = { |
|
150 | package_data = { | |
151 | 'IPython.config.userconfig' : ['*'], |
|
151 | 'IPython.config.userconfig' : ['*'], | |
152 | 'IPython.testing' : ['*.txt'] |
|
152 | 'IPython.testing' : ['*.txt'] | |
153 | } |
|
153 | } | |
154 | return package_data |
|
154 | return package_data | |
155 |
|
155 | |||
156 |
|
156 | |||
157 | #--------------------------------------------------------------------------- |
|
157 | #--------------------------------------------------------------------------- | |
158 | # Find data files |
|
158 | # Find data files | |
159 | #--------------------------------------------------------------------------- |
|
159 | #--------------------------------------------------------------------------- | |
160 |
|
160 | |||
161 | def make_dir_struct(tag,base,out_base): |
|
161 | def make_dir_struct(tag,base,out_base): | |
162 | """Make the directory structure of all files below a starting dir. |
|
162 | """Make the directory structure of all files below a starting dir. | |
163 |
|
163 | |||
164 | This is just a convenience routine to help build a nested directory |
|
164 | This is just a convenience routine to help build a nested directory | |
165 | hierarchy because distutils is too stupid to do this by itself. |
|
165 | hierarchy because distutils is too stupid to do this by itself. | |
166 |
|
166 | |||
167 | XXX - this needs a proper docstring! |
|
167 | XXX - this needs a proper docstring! | |
168 | """ |
|
168 | """ | |
169 |
|
169 | |||
170 | # we'll use these a lot below |
|
170 | # we'll use these a lot below | |
171 | lbase = len(base) |
|
171 | lbase = len(base) | |
172 | pathsep = os.path.sep |
|
172 | pathsep = os.path.sep | |
173 | lpathsep = len(pathsep) |
|
173 | lpathsep = len(pathsep) | |
174 |
|
174 | |||
175 | out = [] |
|
175 | out = [] | |
176 | for (dirpath,dirnames,filenames) in os.walk(base): |
|
176 | for (dirpath,dirnames,filenames) in os.walk(base): | |
177 | # we need to strip out the dirpath from the base to map it to the |
|
177 | # we need to strip out the dirpath from the base to map it to the | |
178 | # output (installation) path. This requires possibly stripping the |
|
178 | # output (installation) path. This requires possibly stripping the | |
179 | # path separator, because otherwise pjoin will not work correctly |
|
179 | # path separator, because otherwise pjoin will not work correctly | |
180 | # (pjoin('foo/','/bar') returns '/bar'). |
|
180 | # (pjoin('foo/','/bar') returns '/bar'). | |
181 |
|
181 | |||
182 | dp_eff = dirpath[lbase:] |
|
182 | dp_eff = dirpath[lbase:] | |
183 | if dp_eff.startswith(pathsep): |
|
183 | if dp_eff.startswith(pathsep): | |
184 | dp_eff = dp_eff[lpathsep:] |
|
184 | dp_eff = dp_eff[lpathsep:] | |
185 | # The output path must be anchored at the out_base marker |
|
185 | # The output path must be anchored at the out_base marker | |
186 | out_path = pjoin(out_base,dp_eff) |
|
186 | out_path = pjoin(out_base,dp_eff) | |
187 | # Now we can generate the final filenames. Since os.walk only produces |
|
187 | # Now we can generate the final filenames. Since os.walk only produces | |
188 | # filenames, we must join back with the dirpath to get full valid file |
|
188 | # filenames, we must join back with the dirpath to get full valid file | |
189 | # paths: |
|
189 | # paths: | |
190 | pfiles = [pjoin(dirpath,f) for f in filenames] |
|
190 | pfiles = [pjoin(dirpath,f) for f in filenames] | |
191 | # Finally, generate the entry we need, which is a pari of (output |
|
191 | # Finally, generate the entry we need, which is a pari of (output | |
192 | # path, files) for use as a data_files parameter in install_data. |
|
192 | # path, files) for use as a data_files parameter in install_data. | |
193 | out.append((out_path, pfiles)) |
|
193 | out.append((out_path, pfiles)) | |
194 |
|
194 | |||
195 | return out |
|
195 | return out | |
196 |
|
196 | |||
197 |
|
197 | |||
198 | def find_data_files(): |
|
198 | def find_data_files(): | |
199 | """ |
|
199 | """ | |
200 | Find IPython's data_files. |
|
200 | Find IPython's data_files. | |
201 |
|
201 | |||
202 | Most of these are docs. |
|
202 | Most of these are docs. | |
203 | """ |
|
203 | """ | |
204 |
|
204 | |||
205 | docdirbase = pjoin('share', 'doc', 'ipython') |
|
205 | docdirbase = pjoin('share', 'doc', 'ipython') | |
206 | manpagebase = pjoin('share', 'man', 'man1') |
|
206 | manpagebase = pjoin('share', 'man', 'man1') | |
207 |
|
207 | |||
208 | # Simple file lists can be made by hand |
|
208 | # Simple file lists can be made by hand | |
209 | manpages = filter(isfile, glob(pjoin('docs','man','*.1.gz'))) |
|
209 | manpages = filter(isfile, glob(pjoin('docs','man','*.1.gz'))) | |
210 | igridhelpfiles = filter(isfile, |
|
210 | igridhelpfiles = filter(isfile, | |
211 | glob(pjoin('IPython','extensions','igrid_help.*'))) |
|
211 | glob(pjoin('IPython','extensions','igrid_help.*'))) | |
212 |
|
212 | |||
213 | # For nested structures, use the utility above |
|
213 | # For nested structures, use the utility above | |
214 | example_files = make_dir_struct( |
|
214 | example_files = make_dir_struct( | |
215 | 'data', |
|
215 | 'data', | |
216 | pjoin('docs','examples'), |
|
216 | pjoin('docs','examples'), | |
217 | pjoin(docdirbase,'examples') |
|
217 | pjoin(docdirbase,'examples') | |
218 | ) |
|
218 | ) | |
219 | manual_files = make_dir_struct( |
|
219 | manual_files = make_dir_struct( | |
220 | 'data', |
|
220 | 'data', | |
221 | pjoin('docs','dist'), |
|
221 | pjoin('docs','dist'), | |
222 | pjoin(docdirbase,'manual') |
|
222 | pjoin(docdirbase,'manual') | |
223 | ) |
|
223 | ) | |
224 |
|
224 | |||
225 | # And assemble the entire output list |
|
225 | # And assemble the entire output list | |
226 | data_files = [ (manpagebase, manpages), |
|
226 | data_files = [ (manpagebase, manpages), | |
227 | (pjoin(docdirbase, 'extensions'), igridhelpfiles), |
|
227 | (pjoin(docdirbase, 'extensions'), igridhelpfiles), | |
228 | ] + manual_files + example_files |
|
228 | ] + manual_files + example_files | |
229 |
|
229 | |||
230 | return data_files |
|
230 | return data_files | |
231 |
|
231 | |||
232 |
|
232 | |||
233 | def make_man_update_target(manpage): |
|
233 | def make_man_update_target(manpage): | |
234 | """Return a target_update-compliant tuple for the given manpage. |
|
234 | """Return a target_update-compliant tuple for the given manpage. | |
235 |
|
235 | |||
236 | Parameters |
|
236 | Parameters | |
237 | ---------- |
|
237 | ---------- | |
238 | manpage : string |
|
238 | manpage : string | |
239 | Name of the manpage, must include the section number (trailing number). |
|
239 | Name of the manpage, must include the section number (trailing number). | |
240 |
|
240 | |||
241 | Example |
|
241 | Example | |
242 | ------- |
|
242 | ------- | |
243 |
|
243 | |||
244 | >>> make_man_update_target('ipython.1') #doctest: +NORMALIZE_WHITESPACE |
|
244 | >>> make_man_update_target('ipython.1') #doctest: +NORMALIZE_WHITESPACE | |
245 | ('docs/man/ipython.1.gz', |
|
245 | ('docs/man/ipython.1.gz', | |
246 | ['docs/man/ipython.1'], |
|
246 | ['docs/man/ipython.1'], | |
247 | 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz') |
|
247 | 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz') | |
248 | """ |
|
248 | """ | |
249 | man_dir = pjoin('docs', 'man') |
|
249 | man_dir = pjoin('docs', 'man') | |
250 | manpage_gz = manpage + '.gz' |
|
250 | manpage_gz = manpage + '.gz' | |
251 | manpath = pjoin(man_dir, manpage) |
|
251 | manpath = pjoin(man_dir, manpage) | |
252 | manpath_gz = pjoin(man_dir, manpage_gz) |
|
252 | manpath_gz = pjoin(man_dir, manpage_gz) | |
253 | gz_cmd = ( "cd %(man_dir)s && gzip -9c %(manpage)s > %(manpage_gz)s" % |
|
253 | gz_cmd = ( "cd %(man_dir)s && gzip -9c %(manpage)s > %(manpage_gz)s" % | |
254 | locals() ) |
|
254 | locals() ) | |
255 | return (manpath_gz, [manpath], gz_cmd) |
|
255 | return (manpath_gz, [manpath], gz_cmd) | |
256 |
|
256 | |||
257 | #--------------------------------------------------------------------------- |
|
257 | #--------------------------------------------------------------------------- | |
258 | # Find scripts |
|
258 | # Find scripts | |
259 | #--------------------------------------------------------------------------- |
|
259 | #--------------------------------------------------------------------------- | |
260 |
|
260 | |||
261 | def find_scripts(): |
|
261 | def find_scripts(): | |
262 | """ |
|
262 | """ | |
263 | Find IPython's scripts. |
|
263 | Find IPython's scripts. | |
264 | """ |
|
264 | """ | |
265 | parallel_scripts = pjoin('IPython','parallel','scripts') |
|
265 | parallel_scripts = pjoin('IPython','parallel','scripts') | |
266 | main_scripts = pjoin('IPython','scripts') |
|
266 | main_scripts = pjoin('IPython','scripts') | |
267 | scripts = [ |
|
267 | scripts = [ | |
268 |
pjoin(parallel_scripts, 'ipengine |
|
268 | pjoin(parallel_scripts, 'ipengine'), | |
269 |
pjoin(parallel_scripts, 'ipcontroller |
|
269 | pjoin(parallel_scripts, 'ipcontroller'), | |
270 |
pjoin(parallel_scripts, 'ipcluster |
|
270 | pjoin(parallel_scripts, 'ipcluster'), | |
271 |
pjoin(parallel_scripts, 'iplogger |
|
271 | pjoin(parallel_scripts, 'iplogger'), | |
272 | pjoin(main_scripts, 'ipython'), |
|
272 | pjoin(main_scripts, 'ipython'), | |
273 | pjoin(main_scripts, 'ipython-qtconsole'), |
|
273 | pjoin(main_scripts, 'ipython-qtconsole'), | |
274 | pjoin(main_scripts, 'pycolor'), |
|
274 | pjoin(main_scripts, 'pycolor'), | |
275 | pjoin(main_scripts, 'irunner'), |
|
275 | pjoin(main_scripts, 'irunner'), | |
276 | pjoin(main_scripts, 'iptest') |
|
276 | pjoin(main_scripts, 'iptest') | |
277 | ] |
|
277 | ] | |
278 |
|
278 | |||
279 | # Script to be run by the windows binary installer after the default setup |
|
279 | # Script to be run by the windows binary installer after the default setup | |
280 | # routine, to add shortcuts and similar windows-only things. Windows |
|
280 | # routine, to add shortcuts and similar windows-only things. Windows | |
281 | # post-install scripts MUST reside in the scripts/ dir, otherwise distutils |
|
281 | # post-install scripts MUST reside in the scripts/ dir, otherwise distutils | |
282 | # doesn't find them. |
|
282 | # doesn't find them. | |
283 | if 'bdist_wininst' in sys.argv: |
|
283 | if 'bdist_wininst' in sys.argv: | |
284 | if len(sys.argv) > 2 and \ |
|
284 | if len(sys.argv) > 2 and \ | |
285 | ('sdist' in sys.argv or 'bdist_rpm' in sys.argv): |
|
285 | ('sdist' in sys.argv or 'bdist_rpm' in sys.argv): | |
286 | print("ERROR: bdist_wininst must be run alone. Exiting.", |
|
286 | print("ERROR: bdist_wininst must be run alone. Exiting.", | |
287 | file=sys.stderr) |
|
287 | file=sys.stderr) | |
288 | sys.exit(1) |
|
288 | sys.exit(1) | |
289 | scripts.append(pjoin('scripts','ipython_win_post_install.py')) |
|
289 | scripts.append(pjoin('scripts','ipython_win_post_install.py')) | |
290 |
|
290 | |||
291 | return scripts |
|
291 | return scripts | |
292 |
|
292 | |||
293 | #--------------------------------------------------------------------------- |
|
293 | #--------------------------------------------------------------------------- | |
294 | # Verify all dependencies |
|
294 | # Verify all dependencies | |
295 | #--------------------------------------------------------------------------- |
|
295 | #--------------------------------------------------------------------------- | |
296 |
|
296 | |||
297 | def check_for_dependencies(): |
|
297 | def check_for_dependencies(): | |
298 | """Check for IPython's dependencies. |
|
298 | """Check for IPython's dependencies. | |
299 |
|
299 | |||
300 | This function should NOT be called if running under setuptools! |
|
300 | This function should NOT be called if running under setuptools! | |
301 | """ |
|
301 | """ | |
302 | from setupext.setupext import ( |
|
302 | from setupext.setupext import ( | |
303 | print_line, print_raw, print_status, |
|
303 | print_line, print_raw, print_status, | |
304 | check_for_sphinx, check_for_pygments, |
|
304 | check_for_sphinx, check_for_pygments, | |
305 | check_for_nose, check_for_pexpect, |
|
305 | check_for_nose, check_for_pexpect, | |
306 | check_for_pyzmq |
|
306 | check_for_pyzmq | |
307 | ) |
|
307 | ) | |
308 | print_line() |
|
308 | print_line() | |
309 | print_raw("BUILDING IPYTHON") |
|
309 | print_raw("BUILDING IPYTHON") | |
310 | print_status('python', sys.version) |
|
310 | print_status('python', sys.version) | |
311 | print_status('platform', sys.platform) |
|
311 | print_status('platform', sys.platform) | |
312 | if sys.platform == 'win32': |
|
312 | if sys.platform == 'win32': | |
313 | print_status('Windows version', sys.getwindowsversion()) |
|
313 | print_status('Windows version', sys.getwindowsversion()) | |
314 |
|
314 | |||
315 | print_raw("") |
|
315 | print_raw("") | |
316 | print_raw("OPTIONAL DEPENDENCIES") |
|
316 | print_raw("OPTIONAL DEPENDENCIES") | |
317 |
|
317 | |||
318 | check_for_sphinx() |
|
318 | check_for_sphinx() | |
319 | check_for_pygments() |
|
319 | check_for_pygments() | |
320 | check_for_nose() |
|
320 | check_for_nose() | |
321 | check_for_pexpect() |
|
321 | check_for_pexpect() | |
322 | check_for_pyzmq() |
|
322 | check_for_pyzmq() | |
323 |
|
323 | |||
324 |
|
324 | |||
325 | def record_commit_info(pkg_dir, build_cmd=build_py): |
|
325 | def record_commit_info(pkg_dir, build_cmd=build_py): | |
326 | """ Return extended build command class for recording commit |
|
326 | """ Return extended build command class for recording commit | |
327 |
|
327 | |||
328 | The extended command tries to run git to find the current commit, getting |
|
328 | The extended command tries to run git to find the current commit, getting | |
329 | the empty string if it fails. It then writes the commit hash into a file |
|
329 | the empty string if it fails. It then writes the commit hash into a file | |
330 | in the `pkg_dir` path, named ``.git_commit_info.ini``. |
|
330 | in the `pkg_dir` path, named ``.git_commit_info.ini``. | |
331 |
|
331 | |||
332 | In due course this information can be used by the package after it is |
|
332 | In due course this information can be used by the package after it is | |
333 | installed, to tell you what commit it was installed from if known. |
|
333 | installed, to tell you what commit it was installed from if known. | |
334 |
|
334 | |||
335 | To make use of this system, you need a package with a .git_commit_info.ini |
|
335 | To make use of this system, you need a package with a .git_commit_info.ini | |
336 | file - e.g. ``myproject/.git_commit_info.ini`` - that might well look like |
|
336 | file - e.g. ``myproject/.git_commit_info.ini`` - that might well look like | |
337 | this:: |
|
337 | this:: | |
338 |
|
338 | |||
339 | # This is an ini file that may contain information about the code state |
|
339 | # This is an ini file that may contain information about the code state | |
340 | [commit hash] |
|
340 | [commit hash] | |
341 | # The line below may contain a valid hash if it has been substituted |
|
341 | # The line below may contain a valid hash if it has been substituted | |
342 | # during 'git archive' |
|
342 | # during 'git archive' | |
343 | archive_subst_hash=$Format:%h$ |
|
343 | archive_subst_hash=$Format:%h$ | |
344 | # This line may be modified by the install process |
|
344 | # This line may be modified by the install process | |
345 | install_hash= |
|
345 | install_hash= | |
346 |
|
346 | |||
347 | The .git_commit_info file above is also designed to be used with git |
|
347 | The .git_commit_info file above is also designed to be used with git | |
348 | substitution - so you probably also want a ``.gitattributes`` file in the |
|
348 | substitution - so you probably also want a ``.gitattributes`` file in the | |
349 | root directory of your working tree that contains something like this:: |
|
349 | root directory of your working tree that contains something like this:: | |
350 |
|
350 | |||
351 | myproject/.git_commit_info.ini export-subst |
|
351 | myproject/.git_commit_info.ini export-subst | |
352 |
|
352 | |||
353 | That will cause the ``.git_commit_info.ini`` file to get filled in by ``git |
|
353 | That will cause the ``.git_commit_info.ini`` file to get filled in by ``git | |
354 | archive`` - useful in case someone makes such an archive - for example with |
|
354 | archive`` - useful in case someone makes such an archive - for example with | |
355 | via the github 'download source' button. |
|
355 | via the github 'download source' button. | |
356 |
|
356 | |||
357 | Although all the above will work as is, you might consider having something |
|
357 | Although all the above will work as is, you might consider having something | |
358 | like a ``get_info()`` function in your package to display the commit |
|
358 | like a ``get_info()`` function in your package to display the commit | |
359 | information at the terminal. See the ``pkg_info.py`` module in the nipy |
|
359 | information at the terminal. See the ``pkg_info.py`` module in the nipy | |
360 | package for an example. |
|
360 | package for an example. | |
361 | """ |
|
361 | """ | |
362 | class MyBuildPy(build_cmd): |
|
362 | class MyBuildPy(build_cmd): | |
363 | ''' Subclass to write commit data into installation tree ''' |
|
363 | ''' Subclass to write commit data into installation tree ''' | |
364 | def run(self): |
|
364 | def run(self): | |
365 | build_py.run(self) |
|
365 | build_py.run(self) | |
366 | import subprocess |
|
366 | import subprocess | |
367 | proc = subprocess.Popen('git rev-parse --short HEAD', |
|
367 | proc = subprocess.Popen('git rev-parse --short HEAD', | |
368 | stdout=subprocess.PIPE, |
|
368 | stdout=subprocess.PIPE, | |
369 | stderr=subprocess.PIPE, |
|
369 | stderr=subprocess.PIPE, | |
370 | shell=True) |
|
370 | shell=True) | |
371 | repo_commit, _ = proc.communicate() |
|
371 | repo_commit, _ = proc.communicate() | |
372 | # We write the installation commit even if it's empty |
|
372 | # We write the installation commit even if it's empty | |
373 | cfg_parser = ConfigParser() |
|
373 | cfg_parser = ConfigParser() | |
374 | cfg_parser.read(pjoin(pkg_dir, '.git_commit_info.ini')) |
|
374 | cfg_parser.read(pjoin(pkg_dir, '.git_commit_info.ini')) | |
375 | cfg_parser.set('commit hash', 'install_hash', repo_commit) |
|
375 | cfg_parser.set('commit hash', 'install_hash', repo_commit) | |
376 | out_pth = pjoin(self.build_lib, pkg_dir, '.git_commit_info.ini') |
|
376 | out_pth = pjoin(self.build_lib, pkg_dir, '.git_commit_info.ini') | |
377 | out_file = open(out_pth, 'wt') |
|
377 | out_file = open(out_pth, 'wt') | |
378 | cfg_parser.write(out_file) |
|
378 | cfg_parser.write(out_file) | |
379 | out_file.close() |
|
379 | out_file.close() | |
380 | return MyBuildPy |
|
380 | return MyBuildPy |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
1 | NO CONTENT: file was removed |
|
NO CONTENT: file was removed |
General Comments 0
You need to be logged in to leave comments.
Login now