Show More
@@ -1,1033 +1,1041 | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | # encoding: utf-8 |
|
3 | 3 | |
|
4 | 4 | """Start an IPython cluster = (controller + engines).""" |
|
5 | 5 | |
|
6 | 6 | #----------------------------------------------------------------------------- |
|
7 | 7 | # Copyright (C) 2008 The IPython Development Team |
|
8 | 8 | # |
|
9 | 9 | # Distributed under the terms of the BSD License. The full license is in |
|
10 | 10 | # the file COPYING, distributed as part of this software. |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | # Imports |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | import os |
|
18 | 18 | import re |
|
19 | 19 | import sys |
|
20 | 20 | import signal |
|
21 | 21 | import stat |
|
22 | 22 | import tempfile |
|
23 | 23 | pjoin = os.path.join |
|
24 | 24 | |
|
25 | 25 | from twisted.internet import reactor, defer |
|
26 | 26 | from twisted.internet.protocol import ProcessProtocol |
|
27 | 27 | from twisted.internet.error import ProcessDone, ProcessTerminated |
|
28 | 28 | from twisted.internet.utils import getProcessOutput |
|
29 | 29 | from twisted.python import failure, log |
|
30 | 30 | |
|
31 | 31 | from IPython.external import argparse |
|
32 | 32 | from IPython.external import Itpl |
|
33 | 33 | from IPython.genutils import ( |
|
34 | 34 | get_ipython_dir, |
|
35 | 35 | get_log_dir, |
|
36 | 36 | get_security_dir, |
|
37 | 37 | num_cpus |
|
38 | 38 | ) |
|
39 | 39 | from IPython.kernel.fcutil import have_crypto |
|
40 | 40 | |
|
41 | 41 | # Create various ipython directories if they don't exist. |
|
42 | 42 | # This must be done before IPython.kernel.config is imported. |
|
43 | 43 | from IPython.iplib import user_setup |
|
44 | 44 | if os.name == 'posix': |
|
45 | 45 | rc_suffix = '' |
|
46 | 46 | else: |
|
47 | 47 | rc_suffix = '.ini' |
|
48 | 48 | user_setup(get_ipython_dir(), rc_suffix, mode='install', interactive=False) |
|
49 | 49 | get_log_dir() |
|
50 | 50 | get_security_dir() |
|
51 | 51 | |
|
52 | 52 | from IPython.kernel.config import config_manager as kernel_config_manager |
|
53 | 53 | from IPython.kernel.error import SecurityError, FileTimeoutError |
|
54 | 54 | from IPython.kernel.fcutil import have_crypto |
|
55 | 55 | from IPython.kernel.twistedutil import gatherBoth, wait_for_file |
|
56 | 56 | from IPython.kernel.util import printer |
|
57 | 57 | |
|
58 | 58 | #----------------------------------------------------------------------------- |
|
59 | 59 | # General process handling code |
|
60 | 60 | #----------------------------------------------------------------------------- |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | class ProcessStateError(Exception): |
|
64 | 64 | pass |
|
65 | 65 | |
|
66 | 66 | class UnknownStatus(Exception): |
|
67 | 67 | pass |
|
68 | 68 | |
|
69 | 69 | class LauncherProcessProtocol(ProcessProtocol): |
|
70 | 70 | """ |
|
71 | 71 | A ProcessProtocol to go with the ProcessLauncher. |
|
72 | 72 | """ |
|
73 | 73 | def __init__(self, process_launcher): |
|
74 | 74 | self.process_launcher = process_launcher |
|
75 | 75 | |
|
76 | 76 | def connectionMade(self): |
|
77 | 77 | self.process_launcher.fire_start_deferred(self.transport.pid) |
|
78 | 78 | |
|
79 | 79 | def processEnded(self, status): |
|
80 | 80 | value = status.value |
|
81 | 81 | if isinstance(value, ProcessDone): |
|
82 | 82 | self.process_launcher.fire_stop_deferred(0) |
|
83 | 83 | elif isinstance(value, ProcessTerminated): |
|
84 | 84 | self.process_launcher.fire_stop_deferred( |
|
85 | 85 | {'exit_code':value.exitCode, |
|
86 | 86 | 'signal':value.signal, |
|
87 | 87 | 'status':value.status |
|
88 | 88 | } |
|
89 | 89 | ) |
|
90 | 90 | else: |
|
91 | 91 | raise UnknownStatus("unknown exit status, this is probably a bug in Twisted") |
|
92 | 92 | |
|
93 | 93 | def outReceived(self, data): |
|
94 | 94 | log.msg(data) |
|
95 | 95 | |
|
96 | 96 | def errReceived(self, data): |
|
97 | 97 | log.err(data) |
|
98 | 98 | |
|
99 | 99 | class ProcessLauncher(object): |
|
100 | 100 | """ |
|
101 | 101 | Start and stop an external process in an asynchronous manner. |
|
102 | 102 | |
|
103 | 103 | Currently this uses deferreds to notify other parties of process state |
|
104 | 104 | changes. This is an awkward design and should be moved to using |
|
105 | 105 | a formal NotificationCenter. |
|
106 | 106 | """ |
|
107 | 107 | def __init__(self, cmd_and_args): |
|
108 | 108 | self.cmd = cmd_and_args[0] |
|
109 | 109 | self.args = cmd_and_args |
|
110 | 110 | self._reset() |
|
111 | 111 | |
|
112 | 112 | def _reset(self): |
|
113 | 113 | self.process_protocol = None |
|
114 | 114 | self.pid = None |
|
115 | 115 | self.start_deferred = None |
|
116 | 116 | self.stop_deferreds = [] |
|
117 | 117 | self.state = 'before' # before, running, or after |
|
118 | 118 | |
|
119 | 119 | @property |
|
120 | 120 | def running(self): |
|
121 | 121 | if self.state == 'running': |
|
122 | 122 | return True |
|
123 | 123 | else: |
|
124 | 124 | return False |
|
125 | 125 | |
|
126 | 126 | def fire_start_deferred(self, pid): |
|
127 | 127 | self.pid = pid |
|
128 | 128 | self.state = 'running' |
|
129 | 129 | log.msg('Process %r has started with pid=%i' % (self.args, pid)) |
|
130 | 130 | self.start_deferred.callback(pid) |
|
131 | 131 | |
|
132 | 132 | def start(self): |
|
133 | 133 | if self.state == 'before': |
|
134 | 134 | self.process_protocol = LauncherProcessProtocol(self) |
|
135 | 135 | self.start_deferred = defer.Deferred() |
|
136 | 136 | self.process_transport = reactor.spawnProcess( |
|
137 | 137 | self.process_protocol, |
|
138 | 138 | self.cmd, |
|
139 | 139 | self.args, |
|
140 | 140 | env=os.environ |
|
141 | 141 | ) |
|
142 | 142 | return self.start_deferred |
|
143 | 143 | else: |
|
144 | 144 | s = 'the process has already been started and has state: %r' % \ |
|
145 | 145 | self.state |
|
146 | 146 | return defer.fail(ProcessStateError(s)) |
|
147 | 147 | |
|
148 | 148 | def get_stop_deferred(self): |
|
149 | 149 | if self.state == 'running' or self.state == 'before': |
|
150 | 150 | d = defer.Deferred() |
|
151 | 151 | self.stop_deferreds.append(d) |
|
152 | 152 | return d |
|
153 | 153 | else: |
|
154 | 154 | s = 'this process is already complete' |
|
155 | 155 | return defer.fail(ProcessStateError(s)) |
|
156 | 156 | |
|
157 | 157 | def fire_stop_deferred(self, exit_code): |
|
158 | 158 | log.msg('Process %r has stopped with %r' % (self.args, exit_code)) |
|
159 | 159 | self.state = 'after' |
|
160 | 160 | for d in self.stop_deferreds: |
|
161 | 161 | d.callback(exit_code) |
|
162 | 162 | |
|
163 | 163 | def signal(self, sig): |
|
164 | 164 | """ |
|
165 | 165 | Send a signal to the process. |
|
166 | 166 | |
|
167 | 167 | The argument sig can be ('KILL','INT', etc.) or any signal number. |
|
168 | 168 | """ |
|
169 | 169 | if self.state == 'running': |
|
170 | 170 | self.process_transport.signalProcess(sig) |
|
171 | 171 | |
|
172 | 172 | # def __del__(self): |
|
173 | 173 | # self.signal('KILL') |
|
174 | 174 | |
|
175 | 175 | def interrupt_then_kill(self, delay=1.0): |
|
176 | 176 | self.signal('INT') |
|
177 | 177 | reactor.callLater(delay, self.signal, 'KILL') |
|
178 | 178 | |
|
179 | 179 | |
|
180 | 180 | #----------------------------------------------------------------------------- |
|
181 | 181 | # Code for launching controller and engines |
|
182 | 182 | #----------------------------------------------------------------------------- |
|
183 | 183 | |
|
184 | 184 | |
|
185 | 185 | class ControllerLauncher(ProcessLauncher): |
|
186 | 186 | |
|
187 | 187 | def __init__(self, extra_args=None): |
|
188 | 188 | if sys.platform == 'win32': |
|
189 | 189 | # This logic is needed because the ipcontroller script doesn't |
|
190 | 190 | # always get installed in the same way or in the same location. |
|
191 | 191 | from IPython.kernel.scripts import ipcontroller |
|
192 | 192 | script_location = ipcontroller.__file__.replace('.pyc', '.py') |
|
193 | 193 | # The -u option here turns on unbuffered output, which is required |
|
194 | 194 | # on Win32 to prevent wierd conflict and problems with Twisted. |
|
195 | 195 | # Also, use sys.executable to make sure we are picking up the |
|
196 | 196 | # right python exe. |
|
197 | 197 | args = [sys.executable, '-u', script_location] |
|
198 | 198 | else: |
|
199 | 199 | args = ['ipcontroller'] |
|
200 | 200 | self.extra_args = extra_args |
|
201 | 201 | if extra_args is not None: |
|
202 | 202 | args.extend(extra_args) |
|
203 | 203 | |
|
204 | 204 | ProcessLauncher.__init__(self, args) |
|
205 | 205 | |
|
206 | 206 | |
|
207 | 207 | class EngineLauncher(ProcessLauncher): |
|
208 | 208 | |
|
209 | 209 | def __init__(self, extra_args=None): |
|
210 | 210 | if sys.platform == 'win32': |
|
211 | 211 | # This logic is needed because the ipcontroller script doesn't |
|
212 | 212 | # always get installed in the same way or in the same location. |
|
213 | 213 | from IPython.kernel.scripts import ipengine |
|
214 | 214 | script_location = ipengine.__file__.replace('.pyc', '.py') |
|
215 | 215 | # The -u option here turns on unbuffered output, which is required |
|
216 | 216 | # on Win32 to prevent wierd conflict and problems with Twisted. |
|
217 | 217 | # Also, use sys.executable to make sure we are picking up the |
|
218 | 218 | # right python exe. |
|
219 | 219 | args = [sys.executable, '-u', script_location] |
|
220 | 220 | else: |
|
221 | 221 | args = ['ipengine'] |
|
222 | 222 | self.extra_args = extra_args |
|
223 | 223 | if extra_args is not None: |
|
224 | 224 | args.extend(extra_args) |
|
225 | 225 | |
|
226 | 226 | ProcessLauncher.__init__(self, args) |
|
227 | 227 | |
|
228 | 228 | |
|
229 | 229 | class LocalEngineSet(object): |
|
230 | 230 | |
|
231 | 231 | def __init__(self, extra_args=None): |
|
232 | 232 | self.extra_args = extra_args |
|
233 | 233 | self.launchers = [] |
|
234 | 234 | |
|
235 | 235 | def start(self, n): |
|
236 | 236 | dlist = [] |
|
237 | 237 | for i in range(n): |
|
238 | 238 | print "starting engine:", i |
|
239 | 239 | el = EngineLauncher(extra_args=self.extra_args) |
|
240 | 240 | d = el.start() |
|
241 | 241 | self.launchers.append(el) |
|
242 | 242 | dlist.append(d) |
|
243 | 243 | dfinal = gatherBoth(dlist, consumeErrors=True) |
|
244 | 244 | dfinal.addCallback(self._handle_start) |
|
245 | 245 | return dfinal |
|
246 | 246 | |
|
247 | 247 | def _handle_start(self, r): |
|
248 | 248 | log.msg('Engines started with pids: %r' % r) |
|
249 | 249 | return r |
|
250 | 250 | |
|
251 | 251 | def _handle_stop(self, r): |
|
252 | 252 | log.msg('Engines received signal: %r' % r) |
|
253 | 253 | return r |
|
254 | 254 | |
|
255 | 255 | def signal(self, sig): |
|
256 | 256 | dlist = [] |
|
257 | 257 | for el in self.launchers: |
|
258 | 258 | d = el.get_stop_deferred() |
|
259 | 259 | dlist.append(d) |
|
260 | 260 | el.signal(sig) |
|
261 | 261 | dfinal = gatherBoth(dlist, consumeErrors=True) |
|
262 | 262 | dfinal.addCallback(self._handle_stop) |
|
263 | 263 | return dfinal |
|
264 | 264 | |
|
265 | 265 | def interrupt_then_kill(self, delay=1.0): |
|
266 | 266 | dlist = [] |
|
267 | 267 | for el in self.launchers: |
|
268 | 268 | d = el.get_stop_deferred() |
|
269 | 269 | dlist.append(d) |
|
270 | 270 | el.interrupt_then_kill(delay) |
|
271 | 271 | dfinal = gatherBoth(dlist, consumeErrors=True) |
|
272 | 272 | dfinal.addCallback(self._handle_stop) |
|
273 | 273 | return dfinal |
|
274 | 274 | |
|
275 | 275 | class BatchEngineSet(object): |
|
276 | 276 | |
|
277 | 277 | # Subclasses must fill these in. See PBSEngineSet/SGEEngineSet |
|
278 | 278 | name = '' |
|
279 | 279 | submit_command = '' |
|
280 | 280 | delete_command = '' |
|
281 | 281 | job_id_regexp = '' |
|
282 | 282 | job_array_regexp = '' |
|
283 | 283 | job_array_template = '' |
|
284 | 284 | queue_regexp = '' |
|
285 | 285 | queue_template = '' |
|
286 | 286 | default_template = '' |
|
287 | 287 | |
|
288 | 288 | def __init__(self, template_file, queue, **kwargs): |
|
289 | 289 | self.template_file = template_file |
|
290 | 290 | self.queue = queue |
|
291 | 291 | |
|
292 | 292 | def parse_job_id(self, output): |
|
293 | 293 | m = re.search(self.job_id_regexp, output) |
|
294 | 294 | if m is not None: |
|
295 | 295 | job_id = m.group() |
|
296 | 296 | else: |
|
297 | 297 | raise Exception("job id couldn't be determined: %s" % output) |
|
298 | 298 | self.job_id = job_id |
|
299 | 299 | log.msg('Job started with job id: %r' % job_id) |
|
300 | 300 | return job_id |
|
301 | 301 | |
|
302 | 302 | def handle_error(self, f): |
|
303 | 303 | f.printTraceback() |
|
304 | 304 | f.raiseException() |
|
305 | 305 | |
|
306 | 306 | def start(self, n): |
|
307 | 307 | log.msg("starting %d engines" % n) |
|
308 | 308 | self._temp_file = tempfile.NamedTemporaryFile() |
|
309 | 309 | os.chmod(self._temp_file.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
|
310 | 310 | if self.template_file: |
|
311 | 311 | log.msg("Using %s script %s" % (self.name, self.template_file)) |
|
312 | 312 | contents = open(self.template_file, 'r').read() |
|
313 | 313 | new_script = contents |
|
314 | 314 | regex = re.compile(self.job_array_regexp) |
|
315 | 315 | if not regex.search(contents): |
|
316 | 316 | log.msg("adding job array settings to %s script" % self.name) |
|
317 | 317 | new_script = self.job_array_template % n +'\n' + new_script |
|
318 | 318 | print self.queue_regexp |
|
319 | 319 | regex = re.compile(self.queue_regexp) |
|
320 | 320 | print regex.search(contents) |
|
321 | 321 | if self.queue and not regex.search(contents): |
|
322 | 322 | log.msg("adding queue settings to %s script" % self.name) |
|
323 | 323 | new_script = self.queue_template % self.queue + '\n' + new_script |
|
324 | 324 | if new_script != contents: |
|
325 | 325 | self._temp_file.write(new_script) |
|
326 | 326 | self.template_file = self._temp_file.name |
|
327 | 327 | else: |
|
328 | 328 | default_script = self.default_template % n |
|
329 | 329 | if self.queue: |
|
330 | 330 | default_script = self.queue_template % self.queue + \ |
|
331 | 331 | '\n' + default_script |
|
332 | 332 | log.msg("using default ipengine %s script: \n%s" % |
|
333 | 333 | (self.name, default_script)) |
|
334 | 334 | self._temp_file.file.write(default_script) |
|
335 | 335 | self.template_file = self._temp_file.name |
|
336 | 336 | self._temp_file.file.close() |
|
337 | 337 | d = getProcessOutput(self.submit_command, |
|
338 | 338 | [self.template_file], |
|
339 | 339 | env=os.environ) |
|
340 | 340 | d.addCallback(self.parse_job_id) |
|
341 | 341 | d.addErrback(self.handle_error) |
|
342 | 342 | return d |
|
343 | 343 | |
|
344 | 344 | def kill(self): |
|
345 | 345 | d = getProcessOutput(self.delete_command, |
|
346 | 346 | [self.job_id],env=os.environ) |
|
347 | 347 | return d |
|
348 | 348 | |
|
349 | 349 | class PBSEngineSet(BatchEngineSet): |
|
350 | 350 | |
|
351 | 351 | name = 'PBS' |
|
352 | 352 | submit_command = 'qsub' |
|
353 | 353 | delete_command = 'qdel' |
|
354 | 354 | job_id_regexp = '\d+' |
|
355 | 355 | job_array_regexp = '#PBS[ \t]+-t[ \t]+\d+' |
|
356 | 356 | job_array_template = '#PBS -t 1-%d' |
|
357 | 357 | queue_regexp = '#PBS[ \t]+-q[ \t]+\w+' |
|
358 | 358 | queue_template = '#PBS -q %s' |
|
359 | 359 | default_template="""#!/bin/sh |
|
360 | 360 | #PBS -V |
|
361 | 361 | #PBS -t 1-%d |
|
362 | 362 | #PBS -N ipengine |
|
363 | 363 | eid=$(($PBS_ARRAYID - 1)) |
|
364 | 364 | ipengine --logfile=ipengine${eid}.log |
|
365 | 365 | """ |
|
366 | 366 | |
|
367 | 367 | class SGEEngineSet(PBSEngineSet): |
|
368 | 368 | |
|
369 | 369 | name = 'SGE' |
|
370 | 370 | job_array_regexp = '#\$[ \t]+-t[ \t]+\d+' |
|
371 | 371 | job_array_template = '#$ -t 1-%d' |
|
372 | 372 | queue_regexp = '#\$[ \t]+-q[ \t]+\w+' |
|
373 | 373 | queue_template = '#$ -q %s' |
|
374 | 374 | default_template="""#$ -V |
|
375 | 375 | #$ -S /bin/sh |
|
376 | 376 | #$ -t 1-%d |
|
377 | 377 | #$ -N ipengine |
|
378 | 378 | eid=$(($SGE_TASK_ID - 1)) |
|
379 | 379 | ipengine --logfile=ipengine${eid}.log |
|
380 | 380 | """ |
|
381 | 381 | |
|
382 | 382 | class LSFEngineSet(PBSEngineSet): |
|
383 | 383 | |
|
384 | 384 | name = 'LSF' |
|
385 | 385 | submit_command = 'bsub' |
|
386 | 386 | delete_command = 'bkill' |
|
387 | 387 | job_array_regexp = '#BSUB[ \t]-J+\w+\[\d+-\d+\]' |
|
388 | 388 | job_array_template = '#BSUB -J ipengine[1-%d]' |
|
389 | 389 | queue_regexp = '#BSUB[ \t]+-q[ \t]+\w+' |
|
390 | 390 | queue_template = '#BSUB -q %s' |
|
391 | 391 | default_template="""#!/bin/sh |
|
392 | 392 | #BSUB -J ipengine[1-%d] |
|
393 | 393 | eid=$(($LSB_JOBINDEX - 1)) |
|
394 | 394 | ipengine --logfile=ipengine${eid}.log |
|
395 | 395 | """ |
|
396 | 396 | bsub_wrapper="""#!/bin/sh |
|
397 | 397 | bsub < $1 |
|
398 | 398 | """ |
|
399 | 399 | |
|
400 | 400 | def __init__(self, template_file, queue, **kwargs): |
|
401 | 401 | self._bsub_wrapper = self._make_bsub_wrapper() |
|
402 | 402 | self.submit_command = self._bsub_wrapper.name |
|
403 | 403 | PBSEngineSet.__init__(self,template_file, queue, **kwargs) |
|
404 | 404 | |
|
405 | 405 | def _make_bsub_wrapper(self): |
|
406 | 406 | bsub_wrapper = tempfile.NamedTemporaryFile() |
|
407 | 407 | bsub_wrapper.write(self.bsub_wrapper) |
|
408 | 408 | bsub_wrapper.file.close() |
|
409 | 409 | os.chmod(bsub_wrapper.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) |
|
410 | 410 | return bsub_wrapper |
|
411 | 411 | |
|
412 | 412 | sshx_template_prefix="""#!/bin/sh |
|
413 | 413 | """ |
|
414 | 414 | sshx_template_suffix=""""$@" &> /dev/null & |
|
415 | 415 | echo $! |
|
416 | 416 | """ |
|
417 | 417 | |
|
418 | 418 | engine_killer_template="""#!/bin/sh |
|
419 | 419 | ps -fu `whoami` | grep '[i]pengine' | awk '{print $2}' | xargs kill -TERM |
|
420 | 420 | """ |
|
421 | 421 | |
|
422 | def escape_strings(val): | |
|
423 | val = val.replace('(','\(') | |
|
424 | val = val.replace(')','\)') | |
|
425 | if ' ' in val: | |
|
426 | val = '"%s"'%val | |
|
427 | return val | |
|
428 | ||
|
422 | 429 | class SSHEngineSet(object): |
|
423 | 430 | sshx_template_prefix=sshx_template_prefix |
|
424 | 431 | sshx_template_suffix=sshx_template_suffix |
|
425 | 432 | engine_killer_template=engine_killer_template |
|
426 | 433 | |
|
427 | 434 | def __init__(self, engine_hosts, sshx=None, copyenvs=None, ipengine="ipengine"): |
|
428 | 435 | """Start a controller on localhost and engines using ssh. |
|
429 | 436 | |
|
430 | 437 | The engine_hosts argument is a dict with hostnames as keys and |
|
431 | 438 | the number of engine (int) as values. sshx is the name of a local |
|
432 | 439 | file that will be used to run remote commands. This file is used |
|
433 | 440 | to setup the environment properly. |
|
434 | 441 | """ |
|
435 | 442 | |
|
436 | 443 | self.temp_dir = tempfile.gettempdir() |
|
437 | 444 | if sshx is not None: |
|
438 | 445 | self.sshx = sshx |
|
439 | 446 | else: |
|
440 | 447 | # Write the sshx.sh file locally from our template. |
|
441 | 448 | self.sshx = os.path.join( |
|
442 | 449 | self.temp_dir, |
|
443 | 450 | '%s-main-sshx.sh' % os.environ['USER'] |
|
444 | 451 | ) |
|
445 | 452 | f = open(self.sshx, 'w') |
|
446 | 453 | f.writelines(self.sshx_template_prefix) |
|
447 | 454 | if copyenvs: |
|
448 | for key, val in os.environ.items(): | |
|
449 | f.writelines('export %s=%s\n'%(key,val)) | |
|
455 | for key, val in sorted(os.environ.items()): | |
|
456 | newval = escape_strings(val) | |
|
457 | f.writelines('export %s=%s\n'%(key,newval)) | |
|
450 | 458 | f.writelines(self.sshx_template_suffix) |
|
451 | 459 | f.close() |
|
452 | 460 | self.engine_command = ipengine |
|
453 | 461 | self.engine_hosts = engine_hosts |
|
454 | 462 | # Write the engine killer script file locally from our template. |
|
455 | 463 | self.engine_killer = os.path.join( |
|
456 | 464 | self.temp_dir, |
|
457 | 465 | '%s-local-engine_killer.sh' % os.environ['USER'] |
|
458 | 466 | ) |
|
459 | 467 | f = open(self.engine_killer, 'w') |
|
460 | 468 | f.writelines(self.engine_killer_template) |
|
461 | 469 | f.close() |
|
462 | 470 | |
|
463 | 471 | def start(self, send_furl=False): |
|
464 | 472 | dlist = [] |
|
465 | 473 | for host in self.engine_hosts.keys(): |
|
466 | 474 | count = self.engine_hosts[host] |
|
467 | 475 | d = self._start(host, count, send_furl) |
|
468 | 476 | dlist.append(d) |
|
469 | 477 | return gatherBoth(dlist, consumeErrors=True) |
|
470 | 478 | |
|
471 | 479 | def _start(self, hostname, count=1, send_furl=False): |
|
472 | 480 | if send_furl: |
|
473 | 481 | d = self._scp_furl(hostname) |
|
474 | 482 | else: |
|
475 | 483 | d = defer.succeed(None) |
|
476 | 484 | d.addCallback(lambda r: self._scp_sshx(hostname)) |
|
477 | 485 | d.addCallback(lambda r: self._ssh_engine(hostname, count)) |
|
478 | 486 | return d |
|
479 | 487 | |
|
480 | 488 | def _scp_furl(self, hostname): |
|
481 | 489 | scp_cmd = "scp ~/.ipython/security/ipcontroller-engine.furl %s:.ipython/security/" % (hostname) |
|
482 | 490 | cmd_list = scp_cmd.split() |
|
483 | 491 | cmd_list[1] = os.path.expanduser(cmd_list[1]) |
|
484 | 492 | log.msg('Copying furl file: %s' % scp_cmd) |
|
485 | 493 | d = getProcessOutput(cmd_list[0], cmd_list[1:], env=os.environ) |
|
486 | 494 | return d |
|
487 | 495 | |
|
488 | 496 | def _scp_sshx(self, hostname): |
|
489 | 497 | scp_cmd = "scp %s %s:%s/%s-sshx.sh" % ( |
|
490 | 498 | self.sshx, hostname, |
|
491 | 499 | self.temp_dir, os.environ['USER'] |
|
492 | 500 | ) |
|
493 | 501 | |
|
494 | 502 | log.msg("Copying sshx: %s" % scp_cmd) |
|
495 | 503 | sshx_scp = scp_cmd.split() |
|
496 | 504 | d = getProcessOutput(sshx_scp[0], sshx_scp[1:], env=os.environ) |
|
497 | 505 | return d |
|
498 | 506 | |
|
499 | 507 | def _ssh_engine(self, hostname, count): |
|
500 | 508 | exec_engine = "ssh %s sh %s/%s-sshx.sh %s" % ( |
|
501 | 509 | hostname, self.temp_dir, |
|
502 | 510 | os.environ['USER'], self.engine_command |
|
503 | 511 | ) |
|
504 | 512 | cmds = exec_engine.split() |
|
505 | 513 | dlist = [] |
|
506 | 514 | log.msg("about to start engines...") |
|
507 | 515 | for i in range(count): |
|
508 | 516 | log.msg('Starting engines: %s' % exec_engine) |
|
509 | 517 | d = getProcessOutput(cmds[0], cmds[1:], env=os.environ) |
|
510 | 518 | dlist.append(d) |
|
511 | 519 | return gatherBoth(dlist, consumeErrors=True) |
|
512 | 520 | |
|
513 | 521 | def kill(self): |
|
514 | 522 | dlist = [] |
|
515 | 523 | for host in self.engine_hosts.keys(): |
|
516 | 524 | d = self._killall(host) |
|
517 | 525 | dlist.append(d) |
|
518 | 526 | return gatherBoth(dlist, consumeErrors=True) |
|
519 | 527 | |
|
520 | 528 | def _killall(self, hostname): |
|
521 | 529 | d = self._scp_engine_killer(hostname) |
|
522 | 530 | d.addCallback(lambda r: self._ssh_kill(hostname)) |
|
523 | 531 | # d.addErrback(self._exec_err) |
|
524 | 532 | return d |
|
525 | 533 | |
|
526 | 534 | def _scp_engine_killer(self, hostname): |
|
527 | 535 | scp_cmd = "scp %s %s:%s/%s-engine_killer.sh" % ( |
|
528 | 536 | self.engine_killer, |
|
529 | 537 | hostname, |
|
530 | 538 | self.temp_dir, |
|
531 | 539 | os.environ['USER'] |
|
532 | 540 | ) |
|
533 | 541 | cmds = scp_cmd.split() |
|
534 | 542 | log.msg('Copying engine_killer: %s' % scp_cmd) |
|
535 | 543 | d = getProcessOutput(cmds[0], cmds[1:], env=os.environ) |
|
536 | 544 | return d |
|
537 | 545 | |
|
538 | 546 | def _ssh_kill(self, hostname): |
|
539 | 547 | kill_cmd = "ssh %s sh %s/%s-engine_killer.sh" % ( |
|
540 | 548 | hostname, |
|
541 | 549 | self.temp_dir, |
|
542 | 550 | os.environ['USER'] |
|
543 | 551 | ) |
|
544 | 552 | log.msg('Killing engine: %s' % kill_cmd) |
|
545 | 553 | kill_cmd = kill_cmd.split() |
|
546 | 554 | d = getProcessOutput(kill_cmd[0], kill_cmd[1:], env=os.environ) |
|
547 | 555 | return d |
|
548 | 556 | |
|
549 | 557 | def _exec_err(self, r): |
|
550 | 558 | log.msg(r) |
|
551 | 559 | |
|
552 | 560 | #----------------------------------------------------------------------------- |
|
553 | 561 | # Main functions for the different types of clusters |
|
554 | 562 | #----------------------------------------------------------------------------- |
|
555 | 563 | |
|
556 | 564 | # TODO: |
|
557 | 565 | # The logic in these codes should be moved into classes like LocalCluster |
|
558 | 566 | # MpirunCluster, PBSCluster, etc. This would remove alot of the duplications. |
|
559 | 567 | # The main functions should then just parse the command line arguments, create |
|
560 | 568 | # the appropriate class and call a 'start' method. |
|
561 | 569 | |
|
562 | 570 | |
|
563 | 571 | def check_security(args, cont_args): |
|
564 | 572 | """Check to see if we should run with SSL support.""" |
|
565 | 573 | if (not args.x or not args.y) and not have_crypto: |
|
566 | 574 | log.err(""" |
|
567 | 575 | OpenSSL/pyOpenSSL is not available, so we can't run in secure mode. |
|
568 | 576 | Try running ipcluster with the -xy flags: ipcluster local -xy -n 4""") |
|
569 | 577 | reactor.stop() |
|
570 | 578 | return False |
|
571 | 579 | if args.x: |
|
572 | 580 | cont_args.append('-x') |
|
573 | 581 | if args.y: |
|
574 | 582 | cont_args.append('-y') |
|
575 | 583 | return True |
|
576 | 584 | |
|
577 | 585 | |
|
578 | 586 | def check_reuse(args, cont_args): |
|
579 | 587 | """Check to see if we should try to resuse FURL files.""" |
|
580 | 588 | if args.r: |
|
581 | 589 | cont_args.append('-r') |
|
582 | 590 | if args.client_port == 0 or args.engine_port == 0: |
|
583 | 591 | log.err(""" |
|
584 | 592 | To reuse FURL files, you must also set the client and engine ports using |
|
585 | 593 | the --client-port and --engine-port options.""") |
|
586 | 594 | reactor.stop() |
|
587 | 595 | return False |
|
588 | 596 | cont_args.append('--client-port=%i' % args.client_port) |
|
589 | 597 | cont_args.append('--engine-port=%i' % args.engine_port) |
|
590 | 598 | return True |
|
591 | 599 | |
|
592 | 600 | |
|
593 | 601 | def _err_and_stop(f): |
|
594 | 602 | """Errback to log a failure and halt the reactor on a fatal error.""" |
|
595 | 603 | log.err(f) |
|
596 | 604 | reactor.stop() |
|
597 | 605 | |
|
598 | 606 | |
|
599 | 607 | def _delay_start(cont_pid, start_engines, furl_file, reuse): |
|
600 | 608 | """Wait for controller to create FURL files and the start the engines.""" |
|
601 | 609 | if not reuse: |
|
602 | 610 | if os.path.isfile(furl_file): |
|
603 | 611 | os.unlink(furl_file) |
|
604 | 612 | log.msg('Waiting for controller to finish starting...') |
|
605 | 613 | d = wait_for_file(furl_file, delay=0.2, max_tries=50) |
|
606 | 614 | d.addCallback(lambda _: log.msg('Controller started')) |
|
607 | 615 | d.addCallback(lambda _: start_engines(cont_pid)) |
|
608 | 616 | return d |
|
609 | 617 | |
|
610 | 618 | |
|
611 | 619 | def main_local(args): |
|
612 | 620 | cont_args = [] |
|
613 | 621 | cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) |
|
614 | 622 | |
|
615 | 623 | # Check security settings before proceeding |
|
616 | 624 | if not check_security(args, cont_args): |
|
617 | 625 | return |
|
618 | 626 | |
|
619 | 627 | # See if we are reusing FURL files |
|
620 | 628 | if not check_reuse(args, cont_args): |
|
621 | 629 | return |
|
622 | 630 | |
|
623 | 631 | cl = ControllerLauncher(extra_args=cont_args) |
|
624 | 632 | dstart = cl.start() |
|
625 | 633 | def start_engines(cont_pid): |
|
626 | 634 | engine_args = [] |
|
627 | 635 | engine_args.append('--logfile=%s' % \ |
|
628 | 636 | pjoin(args.logdir,'ipengine%s-' % cont_pid)) |
|
629 | 637 | eset = LocalEngineSet(extra_args=engine_args) |
|
630 | 638 | def shutdown(signum, frame): |
|
631 | 639 | log.msg('Stopping local cluster') |
|
632 | 640 | # We are still playing with the times here, but these seem |
|
633 | 641 | # to be reliable in allowing everything to exit cleanly. |
|
634 | 642 | eset.interrupt_then_kill(0.5) |
|
635 | 643 | cl.interrupt_then_kill(0.5) |
|
636 | 644 | reactor.callLater(1.0, reactor.stop) |
|
637 | 645 | signal.signal(signal.SIGINT,shutdown) |
|
638 | 646 | d = eset.start(args.n) |
|
639 | 647 | return d |
|
640 | 648 | config = kernel_config_manager.get_config_obj() |
|
641 | 649 | furl_file = config['controller']['engine_furl_file'] |
|
642 | 650 | dstart.addCallback(_delay_start, start_engines, furl_file, args.r) |
|
643 | 651 | dstart.addErrback(_err_and_stop) |
|
644 | 652 | |
|
645 | 653 | |
|
646 | 654 | def main_mpi(args): |
|
647 | 655 | cont_args = [] |
|
648 | 656 | cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) |
|
649 | 657 | |
|
650 | 658 | # Check security settings before proceeding |
|
651 | 659 | if not check_security(args, cont_args): |
|
652 | 660 | return |
|
653 | 661 | |
|
654 | 662 | # See if we are reusing FURL files |
|
655 | 663 | if not check_reuse(args, cont_args): |
|
656 | 664 | return |
|
657 | 665 | |
|
658 | 666 | cl = ControllerLauncher(extra_args=cont_args) |
|
659 | 667 | dstart = cl.start() |
|
660 | 668 | def start_engines(cont_pid): |
|
661 | 669 | raw_args = [args.cmd] |
|
662 | 670 | raw_args.extend(['-n',str(args.n)]) |
|
663 | 671 | raw_args.append('ipengine') |
|
664 | 672 | raw_args.append('-l') |
|
665 | 673 | raw_args.append(pjoin(args.logdir,'ipengine%s-' % cont_pid)) |
|
666 | 674 | if args.mpi: |
|
667 | 675 | raw_args.append('--mpi=%s' % args.mpi) |
|
668 | 676 | eset = ProcessLauncher(raw_args) |
|
669 | 677 | def shutdown(signum, frame): |
|
670 | 678 | log.msg('Stopping local cluster') |
|
671 | 679 | # We are still playing with the times here, but these seem |
|
672 | 680 | # to be reliable in allowing everything to exit cleanly. |
|
673 | 681 | eset.interrupt_then_kill(1.0) |
|
674 | 682 | cl.interrupt_then_kill(1.0) |
|
675 | 683 | reactor.callLater(2.0, reactor.stop) |
|
676 | 684 | signal.signal(signal.SIGINT,shutdown) |
|
677 | 685 | d = eset.start() |
|
678 | 686 | return d |
|
679 | 687 | config = kernel_config_manager.get_config_obj() |
|
680 | 688 | furl_file = config['controller']['engine_furl_file'] |
|
681 | 689 | dstart.addCallback(_delay_start, start_engines, furl_file, args.r) |
|
682 | 690 | dstart.addErrback(_err_and_stop) |
|
683 | 691 | |
|
684 | 692 | |
|
685 | 693 | def main_pbs(args): |
|
686 | 694 | cont_args = [] |
|
687 | 695 | cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) |
|
688 | 696 | |
|
689 | 697 | # Check security settings before proceeding |
|
690 | 698 | if not check_security(args, cont_args): |
|
691 | 699 | return |
|
692 | 700 | |
|
693 | 701 | # See if we are reusing FURL files |
|
694 | 702 | if not check_reuse(args, cont_args): |
|
695 | 703 | return |
|
696 | 704 | |
|
697 | 705 | if args.pbsscript and not os.path.isfile(args.pbsscript): |
|
698 | 706 | log.err('PBS script does not exist: %s' % args.pbsscript) |
|
699 | 707 | return |
|
700 | 708 | |
|
701 | 709 | cl = ControllerLauncher(extra_args=cont_args) |
|
702 | 710 | dstart = cl.start() |
|
703 | 711 | def start_engines(r): |
|
704 | 712 | pbs_set = PBSEngineSet(args.pbsscript, args.pbsqueue) |
|
705 | 713 | def shutdown(signum, frame): |
|
706 | 714 | log.msg('Stopping PBS cluster') |
|
707 | 715 | d = pbs_set.kill() |
|
708 | 716 | d.addBoth(lambda _: cl.interrupt_then_kill(1.0)) |
|
709 | 717 | d.addBoth(lambda _: reactor.callLater(2.0, reactor.stop)) |
|
710 | 718 | signal.signal(signal.SIGINT,shutdown) |
|
711 | 719 | d = pbs_set.start(args.n) |
|
712 | 720 | return d |
|
713 | 721 | config = kernel_config_manager.get_config_obj() |
|
714 | 722 | furl_file = config['controller']['engine_furl_file'] |
|
715 | 723 | dstart.addCallback(_delay_start, start_engines, furl_file, args.r) |
|
716 | 724 | dstart.addErrback(_err_and_stop) |
|
717 | 725 | |
|
718 | 726 | def main_sge(args): |
|
719 | 727 | cont_args = [] |
|
720 | 728 | cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) |
|
721 | 729 | |
|
722 | 730 | # Check security settings before proceeding |
|
723 | 731 | if not check_security(args, cont_args): |
|
724 | 732 | return |
|
725 | 733 | |
|
726 | 734 | # See if we are reusing FURL files |
|
727 | 735 | if not check_reuse(args, cont_args): |
|
728 | 736 | return |
|
729 | 737 | |
|
730 | 738 | if args.sgescript and not os.path.isfile(args.sgescript): |
|
731 | 739 | log.err('SGE script does not exist: %s' % args.sgescript) |
|
732 | 740 | return |
|
733 | 741 | |
|
734 | 742 | cl = ControllerLauncher(extra_args=cont_args) |
|
735 | 743 | dstart = cl.start() |
|
736 | 744 | def start_engines(r): |
|
737 | 745 | sge_set = SGEEngineSet(args.sgescript, args.sgequeue) |
|
738 | 746 | def shutdown(signum, frame): |
|
739 | 747 | log.msg('Stopping sge cluster') |
|
740 | 748 | d = sge_set.kill() |
|
741 | 749 | d.addBoth(lambda _: cl.interrupt_then_kill(1.0)) |
|
742 | 750 | d.addBoth(lambda _: reactor.callLater(2.0, reactor.stop)) |
|
743 | 751 | signal.signal(signal.SIGINT,shutdown) |
|
744 | 752 | d = sge_set.start(args.n) |
|
745 | 753 | return d |
|
746 | 754 | config = kernel_config_manager.get_config_obj() |
|
747 | 755 | furl_file = config['controller']['engine_furl_file'] |
|
748 | 756 | dstart.addCallback(_delay_start, start_engines, furl_file, args.r) |
|
749 | 757 | dstart.addErrback(_err_and_stop) |
|
750 | 758 | |
|
751 | 759 | def main_lsf(args): |
|
752 | 760 | cont_args = [] |
|
753 | 761 | cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) |
|
754 | 762 | |
|
755 | 763 | # Check security settings before proceeding |
|
756 | 764 | if not check_security(args, cont_args): |
|
757 | 765 | return |
|
758 | 766 | |
|
759 | 767 | # See if we are reusing FURL files |
|
760 | 768 | if not check_reuse(args, cont_args): |
|
761 | 769 | return |
|
762 | 770 | |
|
763 | 771 | if args.lsfscript and not os.path.isfile(args.lsfscript): |
|
764 | 772 | log.err('LSF script does not exist: %s' % args.lsfscript) |
|
765 | 773 | return |
|
766 | 774 | |
|
767 | 775 | cl = ControllerLauncher(extra_args=cont_args) |
|
768 | 776 | dstart = cl.start() |
|
769 | 777 | def start_engines(r): |
|
770 | 778 | lsf_set = LSFEngineSet(args.lsfscript, args.lsfqueue) |
|
771 | 779 | def shutdown(signum, frame): |
|
772 | 780 | log.msg('Stopping LSF cluster') |
|
773 | 781 | d = lsf_set.kill() |
|
774 | 782 | d.addBoth(lambda _: cl.interrupt_then_kill(1.0)) |
|
775 | 783 | d.addBoth(lambda _: reactor.callLater(2.0, reactor.stop)) |
|
776 | 784 | signal.signal(signal.SIGINT,shutdown) |
|
777 | 785 | d = lsf_set.start(args.n) |
|
778 | 786 | return d |
|
779 | 787 | config = kernel_config_manager.get_config_obj() |
|
780 | 788 | furl_file = config['controller']['engine_furl_file'] |
|
781 | 789 | dstart.addCallback(_delay_start, start_engines, furl_file, args.r) |
|
782 | 790 | dstart.addErrback(_err_and_stop) |
|
783 | 791 | |
|
784 | 792 | |
|
785 | 793 | def main_ssh(args): |
|
786 | 794 | """Start a controller on localhost and engines using ssh. |
|
787 | 795 | |
|
788 | 796 | Your clusterfile should look like:: |
|
789 | 797 | |
|
790 | 798 | send_furl = False # True, if you want |
|
791 | 799 | engines = { |
|
792 | 800 | 'engine_host1' : engine_count, |
|
793 | 801 | 'engine_host2' : engine_count2 |
|
794 | 802 | } |
|
795 | 803 | """ |
|
796 | 804 | clusterfile = {} |
|
797 | 805 | execfile(args.clusterfile, clusterfile) |
|
798 | 806 | if not clusterfile.has_key('send_furl'): |
|
799 | 807 | clusterfile['send_furl'] = False |
|
800 | 808 | |
|
801 | 809 | cont_args = [] |
|
802 | 810 | cont_args.append('--logfile=%s' % pjoin(args.logdir,'ipcontroller')) |
|
803 | 811 | |
|
804 | 812 | # Check security settings before proceeding |
|
805 | 813 | if not check_security(args, cont_args): |
|
806 | 814 | return |
|
807 | 815 | |
|
808 | 816 | # See if we are reusing FURL files |
|
809 | 817 | if not check_reuse(args, cont_args): |
|
810 | 818 | return |
|
811 | 819 | |
|
812 | 820 | cl = ControllerLauncher(extra_args=cont_args) |
|
813 | 821 | dstart = cl.start() |
|
814 | 822 | def start_engines(cont_pid): |
|
815 | 823 | ssh_set = SSHEngineSet(clusterfile['engines'], sshx=args.sshx, |
|
816 | 824 | copyenvs=args.copyenvs) |
|
817 | 825 | def shutdown(signum, frame): |
|
818 | 826 | d = ssh_set.kill() |
|
819 | 827 | cl.interrupt_then_kill(1.0) |
|
820 | 828 | reactor.callLater(2.0, reactor.stop) |
|
821 | 829 | signal.signal(signal.SIGINT,shutdown) |
|
822 | 830 | d = ssh_set.start(clusterfile['send_furl']) |
|
823 | 831 | return d |
|
824 | 832 | config = kernel_config_manager.get_config_obj() |
|
825 | 833 | furl_file = config['controller']['engine_furl_file'] |
|
826 | 834 | dstart.addCallback(_delay_start, start_engines, furl_file, args.r) |
|
827 | 835 | dstart.addErrback(_err_and_stop) |
|
828 | 836 | |
|
829 | 837 | |
|
830 | 838 | def get_args(): |
|
831 | 839 | base_parser = argparse.ArgumentParser(add_help=False) |
|
832 | 840 | base_parser.add_argument( |
|
833 | 841 | '-r', |
|
834 | 842 | action='store_true', |
|
835 | 843 | dest='r', |
|
836 | 844 | help='try to reuse FURL files. Use with --client-port and --engine-port' |
|
837 | 845 | ) |
|
838 | 846 | base_parser.add_argument( |
|
839 | 847 | '--client-port', |
|
840 | 848 | type=int, |
|
841 | 849 | dest='client_port', |
|
842 | 850 | help='the port the controller will listen on for client connections', |
|
843 | 851 | default=0 |
|
844 | 852 | ) |
|
845 | 853 | base_parser.add_argument( |
|
846 | 854 | '--engine-port', |
|
847 | 855 | type=int, |
|
848 | 856 | dest='engine_port', |
|
849 | 857 | help='the port the controller will listen on for engine connections', |
|
850 | 858 | default=0 |
|
851 | 859 | ) |
|
852 | 860 | base_parser.add_argument( |
|
853 | 861 | '-x', |
|
854 | 862 | action='store_true', |
|
855 | 863 | dest='x', |
|
856 | 864 | help='turn off client security' |
|
857 | 865 | ) |
|
858 | 866 | base_parser.add_argument( |
|
859 | 867 | '-y', |
|
860 | 868 | action='store_true', |
|
861 | 869 | dest='y', |
|
862 | 870 | help='turn off engine security' |
|
863 | 871 | ) |
|
864 | 872 | base_parser.add_argument( |
|
865 | 873 | "--logdir", |
|
866 | 874 | type=str, |
|
867 | 875 | dest="logdir", |
|
868 | 876 | help="directory to put log files (default=$IPYTHONDIR/log)", |
|
869 | 877 | default=pjoin(get_ipython_dir(),'log') |
|
870 | 878 | ) |
|
871 | 879 | base_parser.add_argument( |
|
872 | 880 | "-n", |
|
873 | 881 | "--num", |
|
874 | 882 | type=int, |
|
875 | 883 | dest="n", |
|
876 | 884 | default=2, |
|
877 | 885 | help="the number of engines to start" |
|
878 | 886 | ) |
|
879 | 887 | |
|
880 | 888 | parser = argparse.ArgumentParser( |
|
881 | 889 | description='IPython cluster startup. This starts a controller and\ |
|
882 | 890 | engines using various approaches. Use the IPYTHONDIR environment\ |
|
883 | 891 | variable to change your IPython directory from the default of\ |
|
884 | 892 | .ipython or _ipython. The log and security subdirectories of your\ |
|
885 | 893 | IPython directory will be used by this script for log files and\ |
|
886 | 894 | security files.' |
|
887 | 895 | ) |
|
888 | 896 | subparsers = parser.add_subparsers( |
|
889 | 897 | help='available cluster types. For help, do "ipcluster TYPE --help"') |
|
890 | 898 | |
|
891 | 899 | parser_local = subparsers.add_parser( |
|
892 | 900 | 'local', |
|
893 | 901 | help='run a local cluster', |
|
894 | 902 | parents=[base_parser] |
|
895 | 903 | ) |
|
896 | 904 | parser_local.set_defaults(func=main_local) |
|
897 | 905 | |
|
898 | 906 | parser_mpirun = subparsers.add_parser( |
|
899 | 907 | 'mpirun', |
|
900 | 908 | help='run a cluster using mpirun (mpiexec also works)', |
|
901 | 909 | parents=[base_parser] |
|
902 | 910 | ) |
|
903 | 911 | parser_mpirun.add_argument( |
|
904 | 912 | "--mpi", |
|
905 | 913 | type=str, |
|
906 | 914 | dest="mpi", # Don't put a default here to allow no MPI support |
|
907 | 915 | help="how to call MPI_Init (default=mpi4py)" |
|
908 | 916 | ) |
|
909 | 917 | parser_mpirun.set_defaults(func=main_mpi, cmd='mpirun') |
|
910 | 918 | |
|
911 | 919 | parser_mpiexec = subparsers.add_parser( |
|
912 | 920 | 'mpiexec', |
|
913 | 921 | help='run a cluster using mpiexec (mpirun also works)', |
|
914 | 922 | parents=[base_parser] |
|
915 | 923 | ) |
|
916 | 924 | parser_mpiexec.add_argument( |
|
917 | 925 | "--mpi", |
|
918 | 926 | type=str, |
|
919 | 927 | dest="mpi", # Don't put a default here to allow no MPI support |
|
920 | 928 | help="how to call MPI_Init (default=mpi4py)" |
|
921 | 929 | ) |
|
922 | 930 | parser_mpiexec.set_defaults(func=main_mpi, cmd='mpiexec') |
|
923 | 931 | |
|
924 | 932 | parser_pbs = subparsers.add_parser( |
|
925 | 933 | 'pbs', |
|
926 | 934 | help='run a pbs cluster', |
|
927 | 935 | parents=[base_parser] |
|
928 | 936 | ) |
|
929 | 937 | parser_pbs.add_argument( |
|
930 | 938 | '-s', |
|
931 | 939 | '--pbs-script', |
|
932 | 940 | type=str, |
|
933 | 941 | dest='pbsscript', |
|
934 | 942 | help='PBS script template', |
|
935 | 943 | default='' |
|
936 | 944 | ) |
|
937 | 945 | parser_pbs.add_argument( |
|
938 | 946 | '-q', |
|
939 | 947 | '--queue', |
|
940 | 948 | type=str, |
|
941 | 949 | dest='pbsqueue', |
|
942 | 950 | help='PBS queue to use when starting the engines', |
|
943 | 951 | default=None, |
|
944 | 952 | ) |
|
945 | 953 | parser_pbs.set_defaults(func=main_pbs) |
|
946 | 954 | |
|
947 | 955 | parser_sge = subparsers.add_parser( |
|
948 | 956 | 'sge', |
|
949 | 957 | help='run an sge cluster', |
|
950 | 958 | parents=[base_parser] |
|
951 | 959 | ) |
|
952 | 960 | parser_sge.add_argument( |
|
953 | 961 | '-s', |
|
954 | 962 | '--sge-script', |
|
955 | 963 | type=str, |
|
956 | 964 | dest='sgescript', |
|
957 | 965 | help='SGE script template', |
|
958 | 966 | default='' # SGEEngineSet will create one if not specified |
|
959 | 967 | ) |
|
960 | 968 | parser_sge.add_argument( |
|
961 | 969 | '-q', |
|
962 | 970 | '--queue', |
|
963 | 971 | type=str, |
|
964 | 972 | dest='sgequeue', |
|
965 | 973 | help='SGE queue to use when starting the engines', |
|
966 | 974 | default=None, |
|
967 | 975 | ) |
|
968 | 976 | parser_sge.set_defaults(func=main_sge) |
|
969 | 977 | |
|
970 | 978 | parser_lsf = subparsers.add_parser( |
|
971 | 979 | 'lsf', |
|
972 | 980 | help='run an lsf cluster', |
|
973 | 981 | parents=[base_parser] |
|
974 | 982 | ) |
|
975 | 983 | |
|
976 | 984 | parser_lsf.add_argument( |
|
977 | 985 | '-s', |
|
978 | 986 | '--lsf-script', |
|
979 | 987 | type=str, |
|
980 | 988 | dest='lsfscript', |
|
981 | 989 | help='LSF script template', |
|
982 | 990 | default='' # LSFEngineSet will create one if not specified |
|
983 | 991 | ) |
|
984 | 992 | |
|
985 | 993 | parser_lsf.add_argument( |
|
986 | 994 | '-q', |
|
987 | 995 | '--queue', |
|
988 | 996 | type=str, |
|
989 | 997 | dest='lsfqueue', |
|
990 | 998 | help='LSF queue to use when starting the engines', |
|
991 | 999 | default=None, |
|
992 | 1000 | ) |
|
993 | 1001 | parser_lsf.set_defaults(func=main_lsf) |
|
994 | 1002 | |
|
995 | 1003 | parser_ssh = subparsers.add_parser( |
|
996 | 1004 | 'ssh', |
|
997 | 1005 | help='run a cluster using ssh, should have ssh-keys setup', |
|
998 | 1006 | parents=[base_parser] |
|
999 | 1007 | ) |
|
1000 | 1008 | parser_ssh.add_argument( |
|
1001 | 1009 | '-e', |
|
1002 | 1010 | '--copyenvs', |
|
1003 | 1011 | action='store_true', |
|
1004 | 1012 | dest='copyenvs', |
|
1005 | 1013 | help='Copy current shell environment to remote location', |
|
1006 | 1014 | default=False, |
|
1007 | 1015 | ) |
|
1008 | 1016 | parser_ssh.add_argument( |
|
1009 | 1017 | '--clusterfile', |
|
1010 | 1018 | type=str, |
|
1011 | 1019 | dest='clusterfile', |
|
1012 | 1020 | help='python file describing the cluster', |
|
1013 | 1021 | default='clusterfile.py', |
|
1014 | 1022 | ) |
|
1015 | 1023 | parser_ssh.add_argument( |
|
1016 | 1024 | '--sshx', |
|
1017 | 1025 | type=str, |
|
1018 | 1026 | dest='sshx', |
|
1019 | 1027 | help='sshx launcher helper' |
|
1020 | 1028 | ) |
|
1021 | 1029 | parser_ssh.set_defaults(func=main_ssh) |
|
1022 | 1030 | |
|
1023 | 1031 | args = parser.parse_args() |
|
1024 | 1032 | return args |
|
1025 | 1033 | |
|
1026 | 1034 | def main(): |
|
1027 | 1035 | args = get_args() |
|
1028 | 1036 | reactor.callWhenRunning(args.func, args) |
|
1029 | 1037 | log.startLogging(sys.stdout) |
|
1030 | 1038 | reactor.run() |
|
1031 | 1039 | |
|
1032 | 1040 | if __name__ == '__main__': |
|
1033 | 1041 | main() |
General Comments 0
You need to be logged in to leave comments.
Login now