Show More
@@ -1,393 +1,393 b'' | |||
|
1 | 1 | """ A minimal application base mixin for all ZMQ based IPython frontends. |
|
2 | 2 | |
|
3 | 3 | This is not a complete console app, as subprocess will not be able to receive |
|
4 | 4 | input, there is no real readline support, among other limitations. This is a |
|
5 | 5 | refactoring of what used to be the IPython/qt/console/qtconsoleapp.py |
|
6 | 6 | |
|
7 | 7 | Authors: |
|
8 | 8 | |
|
9 | 9 | * Evan Patterson |
|
10 | 10 | * Min RK |
|
11 | 11 | * Erik Tollerud |
|
12 | 12 | * Fernando Perez |
|
13 | 13 | * Bussonnier Matthias |
|
14 | 14 | * Thomas Kluyver |
|
15 | 15 | * Paul Ivanov |
|
16 | 16 | |
|
17 | 17 | """ |
|
18 | 18 | |
|
19 | 19 | #----------------------------------------------------------------------------- |
|
20 | 20 | # Imports |
|
21 | 21 | #----------------------------------------------------------------------------- |
|
22 | 22 | |
|
23 | 23 | # stdlib imports |
|
24 | 24 | import atexit |
|
25 | 25 | import json |
|
26 | 26 | import os |
|
27 | 27 | import signal |
|
28 | 28 | import sys |
|
29 | 29 | import uuid |
|
30 | 30 | |
|
31 | 31 | |
|
32 | 32 | # Local imports |
|
33 | 33 | from IPython.config.application import boolean_flag |
|
34 | 34 | from IPython.core.profiledir import ProfileDir |
|
35 | 35 | from IPython.kernel.blocking import BlockingKernelClient |
|
36 | 36 | from IPython.kernel import KernelManager |
|
37 | 37 | from IPython.kernel import tunnel_to_kernel, find_connection_file, swallow_argv |
|
38 | 38 | from IPython.utils.path import filefind |
|
39 | 39 | from IPython.utils.py3compat import str_to_bytes |
|
40 | 40 | from IPython.utils.traitlets import ( |
|
41 | 41 | Dict, List, Unicode, CUnicode, Int, CBool, Any |
|
42 | 42 | ) |
|
43 | 43 | from IPython.kernel.zmq.kernelapp import ( |
|
44 | 44 | kernel_flags, |
|
45 | 45 | kernel_aliases, |
|
46 | 46 | IPKernelApp |
|
47 | 47 | ) |
|
48 | 48 | from IPython.kernel.zmq.session import Session, default_secure |
|
49 | 49 | from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell |
|
50 | 50 | from IPython.kernel.connect import ConnectionFileMixin |
|
51 | 51 | |
|
52 | 52 | #----------------------------------------------------------------------------- |
|
53 | 53 | # Network Constants |
|
54 | 54 | #----------------------------------------------------------------------------- |
|
55 | 55 | |
|
56 |
from IPython.utils.localinterfaces import |
|
|
56 | from IPython.utils.localinterfaces import localhost | |
|
57 | 57 | |
|
58 | 58 | #----------------------------------------------------------------------------- |
|
59 | 59 | # Globals |
|
60 | 60 | #----------------------------------------------------------------------------- |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | #----------------------------------------------------------------------------- |
|
64 | 64 | # Aliases and Flags |
|
65 | 65 | #----------------------------------------------------------------------------- |
|
66 | 66 | |
|
67 | 67 | flags = dict(kernel_flags) |
|
68 | 68 | |
|
69 | 69 | # the flags that are specific to the frontend |
|
70 | 70 | # these must be scrubbed before being passed to the kernel, |
|
71 | 71 | # or it will raise an error on unrecognized flags |
|
72 | 72 | app_flags = { |
|
73 | 73 | 'existing' : ({'IPythonConsoleApp' : {'existing' : 'kernel*.json'}}, |
|
74 | 74 | "Connect to an existing kernel. If no argument specified, guess most recent"), |
|
75 | 75 | } |
|
76 | 76 | app_flags.update(boolean_flag( |
|
77 | 77 | 'confirm-exit', 'IPythonConsoleApp.confirm_exit', |
|
78 | 78 | """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', |
|
79 | 79 | to force a direct exit without any confirmation. |
|
80 | 80 | """, |
|
81 | 81 | """Don't prompt the user when exiting. This will terminate the kernel |
|
82 | 82 | if it is owned by the frontend, and leave it alive if it is external. |
|
83 | 83 | """ |
|
84 | 84 | )) |
|
85 | 85 | flags.update(app_flags) |
|
86 | 86 | |
|
87 | 87 | aliases = dict(kernel_aliases) |
|
88 | 88 | |
|
89 | 89 | # also scrub aliases from the frontend |
|
90 | 90 | app_aliases = dict( |
|
91 | 91 | ip = 'IPythonConsoleApp.ip', |
|
92 | 92 | transport = 'IPythonConsoleApp.transport', |
|
93 | 93 | hb = 'IPythonConsoleApp.hb_port', |
|
94 | 94 | shell = 'IPythonConsoleApp.shell_port', |
|
95 | 95 | iopub = 'IPythonConsoleApp.iopub_port', |
|
96 | 96 | stdin = 'IPythonConsoleApp.stdin_port', |
|
97 | 97 | existing = 'IPythonConsoleApp.existing', |
|
98 | 98 | f = 'IPythonConsoleApp.connection_file', |
|
99 | 99 | |
|
100 | 100 | |
|
101 | 101 | ssh = 'IPythonConsoleApp.sshserver', |
|
102 | 102 | ) |
|
103 | 103 | aliases.update(app_aliases) |
|
104 | 104 | |
|
105 | 105 | #----------------------------------------------------------------------------- |
|
106 | 106 | # Classes |
|
107 | 107 | #----------------------------------------------------------------------------- |
|
108 | 108 | |
|
109 | 109 | #----------------------------------------------------------------------------- |
|
110 | 110 | # IPythonConsole |
|
111 | 111 | #----------------------------------------------------------------------------- |
|
112 | 112 | |
|
113 | 113 | classes = [IPKernelApp, ZMQInteractiveShell, KernelManager, ProfileDir, Session] |
|
114 | 114 | |
|
115 | 115 | try: |
|
116 | 116 | from IPython.kernel.zmq.pylab.backend_inline import InlineBackend |
|
117 | 117 | except ImportError: |
|
118 | 118 | pass |
|
119 | 119 | else: |
|
120 | 120 | classes.append(InlineBackend) |
|
121 | 121 | |
|
122 | 122 | class IPythonConsoleApp(ConnectionFileMixin): |
|
123 | 123 | name = 'ipython-console-mixin' |
|
124 | 124 | |
|
125 | 125 | description = """ |
|
126 | 126 | The IPython Mixin Console. |
|
127 | 127 | |
|
128 | 128 | This class contains the common portions of console client (QtConsole, |
|
129 | 129 | ZMQ-based terminal console, etc). It is not a full console, in that |
|
130 | 130 | launched terminal subprocesses will not be able to accept input. |
|
131 | 131 | |
|
132 | 132 | The Console using this mixing supports various extra features beyond |
|
133 | 133 | the single-process Terminal IPython shell, such as connecting to |
|
134 | 134 | existing kernel, via: |
|
135 | 135 | |
|
136 | 136 | ipython <appname> --existing |
|
137 | 137 | |
|
138 | 138 | as well as tunnel via SSH |
|
139 | 139 | |
|
140 | 140 | """ |
|
141 | 141 | |
|
142 | 142 | classes = classes |
|
143 | 143 | flags = Dict(flags) |
|
144 | 144 | aliases = Dict(aliases) |
|
145 | 145 | kernel_manager_class = KernelManager |
|
146 | 146 | kernel_client_class = BlockingKernelClient |
|
147 | 147 | |
|
148 | 148 | kernel_argv = List(Unicode) |
|
149 | 149 | # frontend flags&aliases to be stripped when building kernel_argv |
|
150 | 150 | frontend_flags = Any(app_flags) |
|
151 | 151 | frontend_aliases = Any(app_aliases) |
|
152 | 152 | |
|
153 | 153 | # create requested profiles by default, if they don't exist: |
|
154 | 154 | auto_create = CBool(True) |
|
155 | 155 | # connection info: |
|
156 | 156 | |
|
157 | 157 | sshserver = Unicode('', config=True, |
|
158 | 158 | help="""The SSH server to use to connect to the kernel.""") |
|
159 | 159 | sshkey = Unicode('', config=True, |
|
160 | 160 | help="""Path to the ssh key to use for logging in to the ssh server.""") |
|
161 | 161 | |
|
162 | 162 | hb_port = Int(0, config=True, |
|
163 | 163 | help="set the heartbeat port [default: random]") |
|
164 | 164 | shell_port = Int(0, config=True, |
|
165 | 165 | help="set the shell (ROUTER) port [default: random]") |
|
166 | 166 | iopub_port = Int(0, config=True, |
|
167 | 167 | help="set the iopub (PUB) port [default: random]") |
|
168 | 168 | stdin_port = Int(0, config=True, |
|
169 | 169 | help="set the stdin (DEALER) port [default: random]") |
|
170 | 170 | connection_file = Unicode('', config=True, |
|
171 | 171 | help="""JSON file in which to store connection info [default: kernel-<pid>.json] |
|
172 | 172 | |
|
173 | 173 | This file will contain the IP, ports, and authentication key needed to connect |
|
174 | 174 | clients to this kernel. By default, this file will be created in the security-dir |
|
175 | 175 | of the current profile, but can be specified by absolute path. |
|
176 | 176 | """) |
|
177 | 177 | def _connection_file_default(self): |
|
178 | 178 | return 'kernel-%i.json' % os.getpid() |
|
179 | 179 | |
|
180 | 180 | existing = CUnicode('', config=True, |
|
181 | 181 | help="""Connect to an already running kernel""") |
|
182 | 182 | |
|
183 | 183 | confirm_exit = CBool(True, config=True, |
|
184 | 184 | help=""" |
|
185 | 185 | Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', |
|
186 | 186 | to force a direct exit without any confirmation.""", |
|
187 | 187 | ) |
|
188 | 188 | |
|
189 | 189 | |
|
190 | 190 | def build_kernel_argv(self, argv=None): |
|
191 | 191 | """build argv to be passed to kernel subprocess""" |
|
192 | 192 | if argv is None: |
|
193 | 193 | argv = sys.argv[1:] |
|
194 | 194 | self.kernel_argv = swallow_argv(argv, self.frontend_aliases, self.frontend_flags) |
|
195 | 195 | # kernel should inherit default config file from frontend |
|
196 | 196 | self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name) |
|
197 | 197 | |
|
198 | 198 | def init_connection_file(self): |
|
199 | 199 | """find the connection file, and load the info if found. |
|
200 | 200 | |
|
201 | 201 | The current working directory and the current profile's security |
|
202 | 202 | directory will be searched for the file if it is not given by |
|
203 | 203 | absolute path. |
|
204 | 204 | |
|
205 | 205 | When attempting to connect to an existing kernel and the `--existing` |
|
206 | 206 | argument does not match an existing file, it will be interpreted as a |
|
207 | 207 | fileglob, and the matching file in the current profile's security dir |
|
208 | 208 | with the latest access time will be used. |
|
209 | 209 | |
|
210 | 210 | After this method is called, self.connection_file contains the *full path* |
|
211 | 211 | to the connection file, never just its name. |
|
212 | 212 | """ |
|
213 | 213 | if self.existing: |
|
214 | 214 | try: |
|
215 | 215 | cf = find_connection_file(self.existing) |
|
216 | 216 | except Exception: |
|
217 | 217 | self.log.critical("Could not find existing kernel connection file %s", self.existing) |
|
218 | 218 | self.exit(1) |
|
219 | 219 | self.log.info("Connecting to existing kernel: %s" % cf) |
|
220 | 220 | self.connection_file = cf |
|
221 | 221 | else: |
|
222 | 222 | # not existing, check if we are going to write the file |
|
223 | 223 | # and ensure that self.connection_file is a full path, not just the shortname |
|
224 | 224 | try: |
|
225 | 225 | cf = find_connection_file(self.connection_file) |
|
226 | 226 | except Exception: |
|
227 | 227 | # file might not exist |
|
228 | 228 | if self.connection_file == os.path.basename(self.connection_file): |
|
229 | 229 | # just shortname, put it in security dir |
|
230 | 230 | cf = os.path.join(self.profile_dir.security_dir, self.connection_file) |
|
231 | 231 | else: |
|
232 | 232 | cf = self.connection_file |
|
233 | 233 | self.connection_file = cf |
|
234 | 234 | |
|
235 | 235 | # should load_connection_file only be used for existing? |
|
236 | 236 | # as it is now, this allows reusing ports if an existing |
|
237 | 237 | # file is requested |
|
238 | 238 | try: |
|
239 | 239 | self.load_connection_file() |
|
240 | 240 | except Exception: |
|
241 | 241 | self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) |
|
242 | 242 | self.exit(1) |
|
243 | 243 | |
|
244 | 244 | def load_connection_file(self): |
|
245 | 245 | """load ip/port/hmac config from JSON connection file""" |
|
246 | 246 | # this is identical to IPKernelApp.load_connection_file |
|
247 | 247 | # perhaps it can be centralized somewhere? |
|
248 | 248 | try: |
|
249 | 249 | fname = filefind(self.connection_file, ['.', self.profile_dir.security_dir]) |
|
250 | 250 | except IOError: |
|
251 | 251 | self.log.debug("Connection File not found: %s", self.connection_file) |
|
252 | 252 | return |
|
253 | 253 | self.log.debug(u"Loading connection file %s", fname) |
|
254 | 254 | with open(fname) as f: |
|
255 | 255 | cfg = json.load(f) |
|
256 | 256 | self.transport = cfg.get('transport', 'tcp') |
|
257 |
self.ip = cfg.get('ip', |
|
|
257 | self.ip = cfg.get('ip', localhost()) | |
|
258 | 258 | |
|
259 | 259 | for channel in ('hb', 'shell', 'iopub', 'stdin', 'control'): |
|
260 | 260 | name = channel + '_port' |
|
261 | 261 | if getattr(self, name) == 0 and name in cfg: |
|
262 | 262 | # not overridden by config or cl_args |
|
263 | 263 | setattr(self, name, cfg[name]) |
|
264 | 264 | if 'key' in cfg: |
|
265 | 265 | self.config.Session.key = str_to_bytes(cfg['key']) |
|
266 | 266 | if 'signature_scheme' in cfg: |
|
267 | 267 | self.config.Session.signature_scheme = cfg['signature_scheme'] |
|
268 | 268 | |
|
269 | 269 | def init_ssh(self): |
|
270 | 270 | """set up ssh tunnels, if needed.""" |
|
271 | 271 | if not self.existing or (not self.sshserver and not self.sshkey): |
|
272 | 272 | return |
|
273 | 273 | self.load_connection_file() |
|
274 | 274 | |
|
275 | 275 | transport = self.transport |
|
276 | 276 | ip = self.ip |
|
277 | 277 | |
|
278 | 278 | if transport != 'tcp': |
|
279 | 279 | self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport) |
|
280 | 280 | sys.exit(-1) |
|
281 | 281 | |
|
282 | 282 | if self.sshkey and not self.sshserver: |
|
283 | 283 | # specifying just the key implies that we are connecting directly |
|
284 | 284 | self.sshserver = ip |
|
285 |
ip = |
|
|
285 | ip = localhost() | |
|
286 | 286 | |
|
287 | 287 | # build connection dict for tunnels: |
|
288 | 288 | info = dict(ip=ip, |
|
289 | 289 | shell_port=self.shell_port, |
|
290 | 290 | iopub_port=self.iopub_port, |
|
291 | 291 | stdin_port=self.stdin_port, |
|
292 | 292 | hb_port=self.hb_port |
|
293 | 293 | ) |
|
294 | 294 | |
|
295 | 295 | self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver)) |
|
296 | 296 | |
|
297 | 297 | # tunnels return a new set of ports, which will be on localhost: |
|
298 |
self.ip = |
|
|
298 | self.ip = localhost() | |
|
299 | 299 | try: |
|
300 | 300 | newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) |
|
301 | 301 | except: |
|
302 | 302 | # even catch KeyboardInterrupt |
|
303 | 303 | self.log.error("Could not setup tunnels", exc_info=True) |
|
304 | 304 | self.exit(1) |
|
305 | 305 | |
|
306 | 306 | self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports |
|
307 | 307 | |
|
308 | 308 | cf = self.connection_file |
|
309 | 309 | base,ext = os.path.splitext(cf) |
|
310 | 310 | base = os.path.basename(base) |
|
311 | 311 | self.connection_file = os.path.basename(base)+'-ssh'+ext |
|
312 | 312 | self.log.critical("To connect another client via this tunnel, use:") |
|
313 | 313 | self.log.critical("--existing %s" % self.connection_file) |
|
314 | 314 | |
|
315 | 315 | def _new_connection_file(self): |
|
316 | 316 | cf = '' |
|
317 | 317 | while not cf: |
|
318 | 318 | # we don't need a 128b id to distinguish kernels, use more readable |
|
319 | 319 | # 48b node segment (12 hex chars). Users running more than 32k simultaneous |
|
320 | 320 | # kernels can subclass. |
|
321 | 321 | ident = str(uuid.uuid4()).split('-')[-1] |
|
322 | 322 | cf = os.path.join(self.profile_dir.security_dir, 'kernel-%s.json' % ident) |
|
323 | 323 | # only keep if it's actually new. Protect against unlikely collision |
|
324 | 324 | # in 48b random search space |
|
325 | 325 | cf = cf if not os.path.exists(cf) else '' |
|
326 | 326 | return cf |
|
327 | 327 | |
|
328 | 328 | def init_kernel_manager(self): |
|
329 | 329 | # Don't let Qt or ZMQ swallow KeyboardInterupts. |
|
330 | 330 | if self.existing: |
|
331 | 331 | self.kernel_manager = None |
|
332 | 332 | return |
|
333 | 333 | signal.signal(signal.SIGINT, signal.SIG_DFL) |
|
334 | 334 | |
|
335 | 335 | # Create a KernelManager and start a kernel. |
|
336 | 336 | self.kernel_manager = self.kernel_manager_class( |
|
337 | 337 | ip=self.ip, |
|
338 | 338 | transport=self.transport, |
|
339 | 339 | shell_port=self.shell_port, |
|
340 | 340 | iopub_port=self.iopub_port, |
|
341 | 341 | stdin_port=self.stdin_port, |
|
342 | 342 | hb_port=self.hb_port, |
|
343 | 343 | connection_file=self.connection_file, |
|
344 | 344 | parent=self, |
|
345 | 345 | ) |
|
346 | 346 | self.kernel_manager.client_factory = self.kernel_client_class |
|
347 | 347 | self.kernel_manager.start_kernel(extra_arguments=self.kernel_argv) |
|
348 | 348 | atexit.register(self.kernel_manager.cleanup_ipc_files) |
|
349 | 349 | |
|
350 | 350 | if self.sshserver: |
|
351 | 351 | # ssh, write new connection file |
|
352 | 352 | self.kernel_manager.write_connection_file() |
|
353 | 353 | |
|
354 | 354 | # in case KM defaults / ssh writing changes things: |
|
355 | 355 | km = self.kernel_manager |
|
356 | 356 | self.shell_port=km.shell_port |
|
357 | 357 | self.iopub_port=km.iopub_port |
|
358 | 358 | self.stdin_port=km.stdin_port |
|
359 | 359 | self.hb_port=km.hb_port |
|
360 | 360 | self.connection_file = km.connection_file |
|
361 | 361 | |
|
362 | 362 | atexit.register(self.kernel_manager.cleanup_connection_file) |
|
363 | 363 | |
|
364 | 364 | def init_kernel_client(self): |
|
365 | 365 | if self.kernel_manager is not None: |
|
366 | 366 | self.kernel_client = self.kernel_manager.client() |
|
367 | 367 | else: |
|
368 | 368 | self.kernel_client = self.kernel_client_class( |
|
369 | 369 | ip=self.ip, |
|
370 | 370 | transport=self.transport, |
|
371 | 371 | shell_port=self.shell_port, |
|
372 | 372 | iopub_port=self.iopub_port, |
|
373 | 373 | stdin_port=self.stdin_port, |
|
374 | 374 | hb_port=self.hb_port, |
|
375 | 375 | connection_file=self.connection_file, |
|
376 | 376 | parent=self, |
|
377 | 377 | ) |
|
378 | 378 | |
|
379 | 379 | self.kernel_client.start_channels() |
|
380 | 380 | |
|
381 | 381 | |
|
382 | 382 | |
|
383 | 383 | def initialize(self, argv=None): |
|
384 | 384 | """ |
|
385 | 385 | Classes which mix this class in should call: |
|
386 | 386 | IPythonConsoleApp.initialize(self,argv) |
|
387 | 387 | """ |
|
388 | 388 | self.init_connection_file() |
|
389 | 389 | default_secure(self.config) |
|
390 | 390 | self.init_ssh() |
|
391 | 391 | self.init_kernel_manager() |
|
392 | 392 | self.init_kernel_client() |
|
393 | 393 |
@@ -1,726 +1,728 b'' | |||
|
1 | 1 | # coding: utf-8 |
|
2 | 2 | """A tornado based IPython notebook server. |
|
3 | 3 | |
|
4 | 4 | Authors: |
|
5 | 5 | |
|
6 | 6 | * Brian Granger |
|
7 | 7 | """ |
|
8 | 8 | #----------------------------------------------------------------------------- |
|
9 | 9 | # Copyright (C) 2013 The IPython Development Team |
|
10 | 10 | # |
|
11 | 11 | # Distributed under the terms of the BSD License. The full license is in |
|
12 | 12 | # the file COPYING, distributed as part of this software. |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | # Imports |
|
17 | 17 | #----------------------------------------------------------------------------- |
|
18 | 18 | |
|
19 | 19 | # stdlib |
|
20 | 20 | import errno |
|
21 | 21 | import logging |
|
22 | 22 | import os |
|
23 | 23 | import random |
|
24 | 24 | import select |
|
25 | 25 | import signal |
|
26 | 26 | import socket |
|
27 | 27 | import sys |
|
28 | 28 | import threading |
|
29 | 29 | import time |
|
30 | 30 | import webbrowser |
|
31 | 31 | |
|
32 | 32 | |
|
33 | 33 | # Third party |
|
34 | 34 | # check for pyzmq 2.1.11 |
|
35 | 35 | from IPython.utils.zmqrelated import check_for_zmq |
|
36 | 36 | check_for_zmq('2.1.11', 'IPython.html') |
|
37 | 37 | |
|
38 | 38 | from jinja2 import Environment, FileSystemLoader |
|
39 | 39 | |
|
40 | 40 | # Install the pyzmq ioloop. This has to be done before anything else from |
|
41 | 41 | # tornado is imported. |
|
42 | 42 | from zmq.eventloop import ioloop |
|
43 | 43 | ioloop.install() |
|
44 | 44 | |
|
45 | 45 | # check for tornado 2.1.0 |
|
46 | 46 | msg = "The IPython Notebook requires tornado >= 2.1.0" |
|
47 | 47 | try: |
|
48 | 48 | import tornado |
|
49 | 49 | except ImportError: |
|
50 | 50 | raise ImportError(msg) |
|
51 | 51 | try: |
|
52 | 52 | version_info = tornado.version_info |
|
53 | 53 | except AttributeError: |
|
54 | 54 | raise ImportError(msg + ", but you have < 1.1.0") |
|
55 | 55 | if version_info < (2,1,0): |
|
56 | 56 | raise ImportError(msg + ", but you have %s" % tornado.version) |
|
57 | 57 | |
|
58 | 58 | from tornado import httpserver |
|
59 | 59 | from tornado import web |
|
60 | 60 | |
|
61 | 61 | # Our own libraries |
|
62 | 62 | from IPython.html import DEFAULT_STATIC_FILES_PATH |
|
63 | 63 | |
|
64 | 64 | from .services.kernels.kernelmanager import MappingKernelManager |
|
65 | 65 | from .services.notebooks.nbmanager import NotebookManager |
|
66 | 66 | from .services.notebooks.filenbmanager import FileNotebookManager |
|
67 | 67 | from .services.clusters.clustermanager import ClusterManager |
|
68 | 68 | |
|
69 | 69 | from .base.handlers import AuthenticatedFileHandler, FileFindHandler |
|
70 | 70 | |
|
71 | 71 | from IPython.config.application import catch_config_error, boolean_flag |
|
72 | 72 | from IPython.core.application import BaseIPythonApplication |
|
73 | 73 | from IPython.consoleapp import IPythonConsoleApp |
|
74 | 74 | from IPython.kernel import swallow_argv |
|
75 | 75 | from IPython.kernel.zmq.session import default_secure |
|
76 | 76 | from IPython.kernel.zmq.kernelapp import ( |
|
77 | 77 | kernel_flags, |
|
78 | 78 | kernel_aliases, |
|
79 | 79 | ) |
|
80 | 80 | from IPython.utils.importstring import import_item |
|
81 |
from IPython.utils.localinterfaces import |
|
|
81 | from IPython.utils.localinterfaces import localhost | |
|
82 | 82 | from IPython.utils import submodule |
|
83 | 83 | from IPython.utils.traitlets import ( |
|
84 | 84 | Dict, Unicode, Integer, List, Bool, Bytes, |
|
85 | 85 | DottedObjectName |
|
86 | 86 | ) |
|
87 | 87 | from IPython.utils import py3compat |
|
88 | 88 | from IPython.utils.path import filefind |
|
89 | 89 | |
|
90 | 90 | from .utils import url_path_join |
|
91 | 91 | |
|
92 | 92 | #----------------------------------------------------------------------------- |
|
93 | 93 | # Module globals |
|
94 | 94 | #----------------------------------------------------------------------------- |
|
95 | 95 | |
|
96 | 96 | _examples = """ |
|
97 | 97 | ipython notebook # start the notebook |
|
98 | 98 | ipython notebook --profile=sympy # use the sympy profile |
|
99 | 99 | ipython notebook --certfile=mycert.pem # use SSL/TLS certificate |
|
100 | 100 | """ |
|
101 | 101 | |
|
102 | 102 | #----------------------------------------------------------------------------- |
|
103 | 103 | # Helper functions |
|
104 | 104 | #----------------------------------------------------------------------------- |
|
105 | 105 | |
|
106 | 106 | def random_ports(port, n): |
|
107 | 107 | """Generate a list of n random ports near the given port. |
|
108 | 108 | |
|
109 | 109 | The first 5 ports will be sequential, and the remaining n-5 will be |
|
110 | 110 | randomly selected in the range [port-2*n, port+2*n]. |
|
111 | 111 | """ |
|
112 | 112 | for i in range(min(5, n)): |
|
113 | 113 | yield port + i |
|
114 | 114 | for i in range(n-5): |
|
115 | 115 | yield port + random.randint(-2*n, 2*n) |
|
116 | 116 | |
|
117 | 117 | def load_handlers(name): |
|
118 | 118 | """Load the (URL pattern, handler) tuples for each component.""" |
|
119 | 119 | name = 'IPython.html.' + name |
|
120 | 120 | mod = __import__(name, fromlist=['default_handlers']) |
|
121 | 121 | return mod.default_handlers |
|
122 | 122 | |
|
123 | 123 | #----------------------------------------------------------------------------- |
|
124 | 124 | # The Tornado web application |
|
125 | 125 | #----------------------------------------------------------------------------- |
|
126 | 126 | |
|
127 | 127 | class NotebookWebApplication(web.Application): |
|
128 | 128 | |
|
129 | 129 | def __init__(self, ipython_app, kernel_manager, notebook_manager, |
|
130 | 130 | cluster_manager, log, |
|
131 | 131 | base_project_url, settings_overrides): |
|
132 | 132 | |
|
133 | 133 | settings = self.init_settings( |
|
134 | 134 | ipython_app, kernel_manager, notebook_manager, cluster_manager, |
|
135 | 135 | log, base_project_url, settings_overrides) |
|
136 | 136 | handlers = self.init_handlers(settings) |
|
137 | 137 | |
|
138 | 138 | super(NotebookWebApplication, self).__init__(handlers, **settings) |
|
139 | 139 | |
|
140 | 140 | def init_settings(self, ipython_app, kernel_manager, notebook_manager, |
|
141 | 141 | cluster_manager, log, |
|
142 | 142 | base_project_url, settings_overrides): |
|
143 | 143 | # Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and |
|
144 | 144 | # base_project_url will always be unicode, which will in turn |
|
145 | 145 | # make the patterns unicode, and ultimately result in unicode |
|
146 | 146 | # keys in kwargs to handler._execute(**kwargs) in tornado. |
|
147 | 147 | # This enforces that base_project_url be ascii in that situation. |
|
148 | 148 | # |
|
149 | 149 | # Note that the URLs these patterns check against are escaped, |
|
150 | 150 | # and thus guaranteed to be ASCII: 'hΓ©llo' is really 'h%C3%A9llo'. |
|
151 | 151 | base_project_url = py3compat.unicode_to_str(base_project_url, 'ascii') |
|
152 | 152 | template_path = settings_overrides.get("template_path", os.path.join(os.path.dirname(__file__), "templates")) |
|
153 | 153 | settings = dict( |
|
154 | 154 | # basics |
|
155 | 155 | base_project_url=base_project_url, |
|
156 | 156 | base_kernel_url=ipython_app.base_kernel_url, |
|
157 | 157 | template_path=template_path, |
|
158 | 158 | static_path=ipython_app.static_file_path, |
|
159 | 159 | static_handler_class = FileFindHandler, |
|
160 | 160 | static_url_prefix = url_path_join(base_project_url,'/static/'), |
|
161 | 161 | |
|
162 | 162 | # authentication |
|
163 | 163 | cookie_secret=ipython_app.cookie_secret, |
|
164 | 164 | login_url=url_path_join(base_project_url,'/login'), |
|
165 | 165 | password=ipython_app.password, |
|
166 | 166 | |
|
167 | 167 | # managers |
|
168 | 168 | kernel_manager=kernel_manager, |
|
169 | 169 | notebook_manager=notebook_manager, |
|
170 | 170 | cluster_manager=cluster_manager, |
|
171 | 171 | |
|
172 | 172 | # IPython stuff |
|
173 | 173 | mathjax_url=ipython_app.mathjax_url, |
|
174 | 174 | config=ipython_app.config, |
|
175 | 175 | use_less=ipython_app.use_less, |
|
176 | 176 | jinja2_env=Environment(loader=FileSystemLoader(template_path)), |
|
177 | 177 | ) |
|
178 | 178 | |
|
179 | 179 | # allow custom overrides for the tornado web app. |
|
180 | 180 | settings.update(settings_overrides) |
|
181 | 181 | return settings |
|
182 | 182 | |
|
183 | 183 | def init_handlers(self, settings): |
|
184 | 184 | # Load the (URL pattern, handler) tuples for each component. |
|
185 | 185 | handlers = [] |
|
186 | 186 | handlers.extend(load_handlers('base.handlers')) |
|
187 | 187 | handlers.extend(load_handlers('tree.handlers')) |
|
188 | 188 | handlers.extend(load_handlers('auth.login')) |
|
189 | 189 | handlers.extend(load_handlers('auth.logout')) |
|
190 | 190 | handlers.extend(load_handlers('notebook.handlers')) |
|
191 | 191 | handlers.extend(load_handlers('services.kernels.handlers')) |
|
192 | 192 | handlers.extend(load_handlers('services.notebooks.handlers')) |
|
193 | 193 | handlers.extend(load_handlers('services.clusters.handlers')) |
|
194 | 194 | handlers.extend([ |
|
195 | 195 | (r"/files/(.*)", AuthenticatedFileHandler, {'path' : settings['notebook_manager'].notebook_dir}), |
|
196 | 196 | ]) |
|
197 | 197 | # prepend base_project_url onto the patterns that we match |
|
198 | 198 | new_handlers = [] |
|
199 | 199 | for handler in handlers: |
|
200 | 200 | pattern = url_path_join(settings['base_project_url'], handler[0]) |
|
201 | 201 | new_handler = tuple([pattern] + list(handler[1:])) |
|
202 | 202 | new_handlers.append(new_handler) |
|
203 | 203 | return new_handlers |
|
204 | 204 | |
|
205 | 205 | |
|
206 | 206 | |
|
207 | 207 | #----------------------------------------------------------------------------- |
|
208 | 208 | # Aliases and Flags |
|
209 | 209 | #----------------------------------------------------------------------------- |
|
210 | 210 | |
|
211 | 211 | flags = dict(kernel_flags) |
|
212 | 212 | flags['no-browser']=( |
|
213 | 213 | {'NotebookApp' : {'open_browser' : False}}, |
|
214 | 214 | "Don't open the notebook in a browser after startup." |
|
215 | 215 | ) |
|
216 | 216 | flags['no-mathjax']=( |
|
217 | 217 | {'NotebookApp' : {'enable_mathjax' : False}}, |
|
218 | 218 | """Disable MathJax |
|
219 | 219 | |
|
220 | 220 | MathJax is the javascript library IPython uses to render math/LaTeX. It is |
|
221 | 221 | very large, so you may want to disable it if you have a slow internet |
|
222 | 222 | connection, or for offline use of the notebook. |
|
223 | 223 | |
|
224 | 224 | When disabled, equations etc. will appear as their untransformed TeX source. |
|
225 | 225 | """ |
|
226 | 226 | ) |
|
227 | 227 | |
|
228 | 228 | # Add notebook manager flags |
|
229 | 229 | flags.update(boolean_flag('script', 'FileNotebookManager.save_script', |
|
230 | 230 | 'Auto-save a .py script everytime the .ipynb notebook is saved', |
|
231 | 231 | 'Do not auto-save .py scripts for every notebook')) |
|
232 | 232 | |
|
233 | 233 | # the flags that are specific to the frontend |
|
234 | 234 | # these must be scrubbed before being passed to the kernel, |
|
235 | 235 | # or it will raise an error on unrecognized flags |
|
236 | 236 | notebook_flags = ['no-browser', 'no-mathjax', 'script', 'no-script'] |
|
237 | 237 | |
|
238 | 238 | aliases = dict(kernel_aliases) |
|
239 | 239 | |
|
240 | 240 | aliases.update({ |
|
241 | 241 | 'ip': 'NotebookApp.ip', |
|
242 | 242 | 'port': 'NotebookApp.port', |
|
243 | 243 | 'port-retries': 'NotebookApp.port_retries', |
|
244 | 244 | 'transport': 'KernelManager.transport', |
|
245 | 245 | 'keyfile': 'NotebookApp.keyfile', |
|
246 | 246 | 'certfile': 'NotebookApp.certfile', |
|
247 | 247 | 'notebook-dir': 'NotebookManager.notebook_dir', |
|
248 | 248 | 'browser': 'NotebookApp.browser', |
|
249 | 249 | }) |
|
250 | 250 | |
|
251 | 251 | # remove ipkernel flags that are singletons, and don't make sense in |
|
252 | 252 | # multi-kernel evironment: |
|
253 | 253 | aliases.pop('f', None) |
|
254 | 254 | |
|
255 | 255 | notebook_aliases = [u'port', u'port-retries', u'ip', u'keyfile', u'certfile', |
|
256 | 256 | u'notebook-dir', u'profile', u'profile-dir'] |
|
257 | 257 | |
|
258 | 258 | #----------------------------------------------------------------------------- |
|
259 | 259 | # NotebookApp |
|
260 | 260 | #----------------------------------------------------------------------------- |
|
261 | 261 | |
|
262 | 262 | class NotebookApp(BaseIPythonApplication): |
|
263 | 263 | |
|
264 | 264 | name = 'ipython-notebook' |
|
265 | 265 | |
|
266 | 266 | description = """ |
|
267 | 267 | The IPython HTML Notebook. |
|
268 | 268 | |
|
269 | 269 | This launches a Tornado based HTML Notebook Server that serves up an |
|
270 | 270 | HTML5/Javascript Notebook client. |
|
271 | 271 | """ |
|
272 | 272 | examples = _examples |
|
273 | 273 | |
|
274 | 274 | classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager, |
|
275 | 275 | FileNotebookManager] |
|
276 | 276 | flags = Dict(flags) |
|
277 | 277 | aliases = Dict(aliases) |
|
278 | 278 | |
|
279 | 279 | kernel_argv = List(Unicode) |
|
280 | 280 | |
|
281 | 281 | def _log_level_default(self): |
|
282 | 282 | return logging.INFO |
|
283 | 283 | |
|
284 | 284 | def _log_format_default(self): |
|
285 | 285 | """override default log format to include time""" |
|
286 | 286 | return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s" |
|
287 | 287 | |
|
288 | 288 | # create requested profiles by default, if they don't exist: |
|
289 | 289 | auto_create = Bool(True) |
|
290 | 290 | |
|
291 | 291 | # file to be opened in the notebook server |
|
292 | 292 | file_to_run = Unicode('') |
|
293 | 293 | |
|
294 | 294 | # Network related information. |
|
295 | 295 | |
|
296 |
ip = Unicode( |
|
|
296 | ip = Unicode(config=True, | |
|
297 | 297 | help="The IP address the notebook server will listen on." |
|
298 | 298 | ) |
|
299 | def _ip_default(self): | |
|
300 | return localhost() | |
|
299 | 301 | |
|
300 | 302 | def _ip_changed(self, name, old, new): |
|
301 | 303 | if new == u'*': self.ip = u'' |
|
302 | 304 | |
|
303 | 305 | port = Integer(8888, config=True, |
|
304 | 306 | help="The port the notebook server will listen on." |
|
305 | 307 | ) |
|
306 | 308 | port_retries = Integer(50, config=True, |
|
307 | 309 | help="The number of additional ports to try if the specified port is not available." |
|
308 | 310 | ) |
|
309 | 311 | |
|
310 | 312 | certfile = Unicode(u'', config=True, |
|
311 | 313 | help="""The full path to an SSL/TLS certificate file.""" |
|
312 | 314 | ) |
|
313 | 315 | |
|
314 | 316 | keyfile = Unicode(u'', config=True, |
|
315 | 317 | help="""The full path to a private key file for usage with SSL/TLS.""" |
|
316 | 318 | ) |
|
317 | 319 | |
|
318 | 320 | cookie_secret = Bytes(b'', config=True, |
|
319 | 321 | help="""The random bytes used to secure cookies. |
|
320 | 322 | By default this is a new random number every time you start the Notebook. |
|
321 | 323 | Set it to a value in a config file to enable logins to persist across server sessions. |
|
322 | 324 | |
|
323 | 325 | Note: Cookie secrets should be kept private, do not share config files with |
|
324 | 326 | cookie_secret stored in plaintext (you can read the value from a file). |
|
325 | 327 | """ |
|
326 | 328 | ) |
|
327 | 329 | def _cookie_secret_default(self): |
|
328 | 330 | return os.urandom(1024) |
|
329 | 331 | |
|
330 | 332 | password = Unicode(u'', config=True, |
|
331 | 333 | help="""Hashed password to use for web authentication. |
|
332 | 334 | |
|
333 | 335 | To generate, type in a python/IPython shell: |
|
334 | 336 | |
|
335 | 337 | from IPython.lib import passwd; passwd() |
|
336 | 338 | |
|
337 | 339 | The string should be of the form type:salt:hashed-password. |
|
338 | 340 | """ |
|
339 | 341 | ) |
|
340 | 342 | |
|
341 | 343 | open_browser = Bool(True, config=True, |
|
342 | 344 | help="""Whether to open in a browser after starting. |
|
343 | 345 | The specific browser used is platform dependent and |
|
344 | 346 | determined by the python standard library `webbrowser` |
|
345 | 347 | module, unless it is overridden using the --browser |
|
346 | 348 | (NotebookApp.browser) configuration option. |
|
347 | 349 | """) |
|
348 | 350 | |
|
349 | 351 | browser = Unicode(u'', config=True, |
|
350 | 352 | help="""Specify what command to use to invoke a web |
|
351 | 353 | browser when opening the notebook. If not specified, the |
|
352 | 354 | default browser will be determined by the `webbrowser` |
|
353 | 355 | standard library module, which allows setting of the |
|
354 | 356 | BROWSER environment variable to override it. |
|
355 | 357 | """) |
|
356 | 358 | |
|
357 | 359 | use_less = Bool(False, config=True, |
|
358 | 360 | help="""Wether to use Browser Side less-css parsing |
|
359 | 361 | instead of compiled css version in templates that allows |
|
360 | 362 | it. This is mainly convenient when working on the less |
|
361 | 363 | file to avoid a build step, or if user want to overwrite |
|
362 | 364 | some of the less variables without having to recompile |
|
363 | 365 | everything. |
|
364 | 366 | |
|
365 | 367 | You will need to install the less.js component in the static directory |
|
366 | 368 | either in the source tree or in your profile folder. |
|
367 | 369 | """) |
|
368 | 370 | |
|
369 | 371 | webapp_settings = Dict(config=True, |
|
370 | 372 | help="Supply overrides for the tornado.web.Application that the " |
|
371 | 373 | "IPython notebook uses.") |
|
372 | 374 | |
|
373 | 375 | enable_mathjax = Bool(True, config=True, |
|
374 | 376 | help="""Whether to enable MathJax for typesetting math/TeX |
|
375 | 377 | |
|
376 | 378 | MathJax is the javascript library IPython uses to render math/LaTeX. It is |
|
377 | 379 | very large, so you may want to disable it if you have a slow internet |
|
378 | 380 | connection, or for offline use of the notebook. |
|
379 | 381 | |
|
380 | 382 | When disabled, equations etc. will appear as their untransformed TeX source. |
|
381 | 383 | """ |
|
382 | 384 | ) |
|
383 | 385 | def _enable_mathjax_changed(self, name, old, new): |
|
384 | 386 | """set mathjax url to empty if mathjax is disabled""" |
|
385 | 387 | if not new: |
|
386 | 388 | self.mathjax_url = u'' |
|
387 | 389 | |
|
388 | 390 | base_project_url = Unicode('/', config=True, |
|
389 | 391 | help='''The base URL for the notebook server. |
|
390 | 392 | |
|
391 | 393 | Leading and trailing slashes can be omitted, |
|
392 | 394 | and will automatically be added. |
|
393 | 395 | ''') |
|
394 | 396 | def _base_project_url_changed(self, name, old, new): |
|
395 | 397 | if not new.startswith('/'): |
|
396 | 398 | self.base_project_url = '/'+new |
|
397 | 399 | elif not new.endswith('/'): |
|
398 | 400 | self.base_project_url = new+'/' |
|
399 | 401 | |
|
400 | 402 | base_kernel_url = Unicode('/', config=True, |
|
401 | 403 | help='''The base URL for the kernel server |
|
402 | 404 | |
|
403 | 405 | Leading and trailing slashes can be omitted, |
|
404 | 406 | and will automatically be added. |
|
405 | 407 | ''') |
|
406 | 408 | def _base_kernel_url_changed(self, name, old, new): |
|
407 | 409 | if not new.startswith('/'): |
|
408 | 410 | self.base_kernel_url = '/'+new |
|
409 | 411 | elif not new.endswith('/'): |
|
410 | 412 | self.base_kernel_url = new+'/' |
|
411 | 413 | |
|
412 | 414 | websocket_url = Unicode("", config=True, |
|
413 | 415 | help="""The base URL for the websocket server, |
|
414 | 416 | if it differs from the HTTP server (hint: it almost certainly doesn't). |
|
415 | 417 | |
|
416 | 418 | Should be in the form of an HTTP origin: ws[s]://hostname[:port] |
|
417 | 419 | """ |
|
418 | 420 | ) |
|
419 | 421 | |
|
420 | 422 | extra_static_paths = List(Unicode, config=True, |
|
421 | 423 | help="""Extra paths to search for serving static files. |
|
422 | 424 | |
|
423 | 425 | This allows adding javascript/css to be available from the notebook server machine, |
|
424 | 426 | or overriding individual files in the IPython""" |
|
425 | 427 | ) |
|
426 | 428 | def _extra_static_paths_default(self): |
|
427 | 429 | return [os.path.join(self.profile_dir.location, 'static')] |
|
428 | 430 | |
|
429 | 431 | @property |
|
430 | 432 | def static_file_path(self): |
|
431 | 433 | """return extra paths + the default location""" |
|
432 | 434 | return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH] |
|
433 | 435 | |
|
434 | 436 | mathjax_url = Unicode("", config=True, |
|
435 | 437 | help="""The url for MathJax.js.""" |
|
436 | 438 | ) |
|
437 | 439 | def _mathjax_url_default(self): |
|
438 | 440 | if not self.enable_mathjax: |
|
439 | 441 | return u'' |
|
440 | 442 | static_url_prefix = self.webapp_settings.get("static_url_prefix", |
|
441 | 443 | url_path_join(self.base_project_url, "static") |
|
442 | 444 | ) |
|
443 | 445 | try: |
|
444 | 446 | mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), self.static_file_path) |
|
445 | 447 | except IOError: |
|
446 | 448 | if self.certfile: |
|
447 | 449 | # HTTPS: load from Rackspace CDN, because SSL certificate requires it |
|
448 | 450 | base = u"https://c328740.ssl.cf1.rackcdn.com" |
|
449 | 451 | else: |
|
450 | 452 | base = u"http://cdn.mathjax.org" |
|
451 | 453 | |
|
452 | 454 | url = base + u"/mathjax/latest/MathJax.js" |
|
453 | 455 | self.log.info("Using MathJax from CDN: %s", url) |
|
454 | 456 | return url |
|
455 | 457 | else: |
|
456 | 458 | self.log.info("Using local MathJax from %s" % mathjax) |
|
457 | 459 | return url_path_join(static_url_prefix, u"mathjax/MathJax.js") |
|
458 | 460 | |
|
459 | 461 | def _mathjax_url_changed(self, name, old, new): |
|
460 | 462 | if new and not self.enable_mathjax: |
|
461 | 463 | # enable_mathjax=False overrides mathjax_url |
|
462 | 464 | self.mathjax_url = u'' |
|
463 | 465 | else: |
|
464 | 466 | self.log.info("Using MathJax: %s", new) |
|
465 | 467 | |
|
466 | 468 | notebook_manager_class = DottedObjectName('IPython.html.services.notebooks.filenbmanager.FileNotebookManager', |
|
467 | 469 | config=True, |
|
468 | 470 | help='The notebook manager class to use.') |
|
469 | 471 | |
|
470 | 472 | trust_xheaders = Bool(False, config=True, |
|
471 | 473 | help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers" |
|
472 | 474 | "sent by the upstream reverse proxy. Neccesary if the proxy handles SSL") |
|
473 | 475 | ) |
|
474 | 476 | |
|
475 | 477 | def parse_command_line(self, argv=None): |
|
476 | 478 | super(NotebookApp, self).parse_command_line(argv) |
|
477 | 479 | |
|
478 | 480 | if self.extra_args: |
|
479 | 481 | f = os.path.abspath(self.extra_args[0]) |
|
480 | 482 | if os.path.isdir(f): |
|
481 | 483 | nbdir = f |
|
482 | 484 | else: |
|
483 | 485 | self.file_to_run = f |
|
484 | 486 | nbdir = os.path.dirname(f) |
|
485 | 487 | self.config.NotebookManager.notebook_dir = nbdir |
|
486 | 488 | |
|
487 | 489 | def init_kernel_argv(self): |
|
488 | 490 | """construct the kernel arguments""" |
|
489 | 491 | # Scrub frontend-specific flags |
|
490 | 492 | self.kernel_argv = swallow_argv(self.argv, notebook_aliases, notebook_flags) |
|
491 | 493 | # Kernel should inherit default config file from frontend |
|
492 | 494 | self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name) |
|
493 | 495 | # Kernel should get *absolute* path to profile directory |
|
494 | 496 | self.kernel_argv.extend(["--profile-dir", self.profile_dir.location]) |
|
495 | 497 | |
|
496 | 498 | def init_configurables(self): |
|
497 | 499 | # force Session default to be secure |
|
498 | 500 | default_secure(self.config) |
|
499 | 501 | self.kernel_manager = MappingKernelManager( |
|
500 | 502 | parent=self, log=self.log, kernel_argv=self.kernel_argv, |
|
501 | 503 | connection_dir = self.profile_dir.security_dir, |
|
502 | 504 | ) |
|
503 | 505 | kls = import_item(self.notebook_manager_class) |
|
504 | 506 | self.notebook_manager = kls(parent=self, log=self.log) |
|
505 | 507 | self.notebook_manager.load_notebook_names() |
|
506 | 508 | self.cluster_manager = ClusterManager(parent=self, log=self.log) |
|
507 | 509 | self.cluster_manager.update_profiles() |
|
508 | 510 | |
|
509 | 511 | def init_logging(self): |
|
510 | 512 | # This prevents double log messages because tornado use a root logger that |
|
511 | 513 | # self.log is a child of. The logging module dipatches log messages to a log |
|
512 | 514 | # and all of its ancenstors until propagate is set to False. |
|
513 | 515 | self.log.propagate = False |
|
514 | 516 | |
|
515 | 517 | # hook up tornado 3's loggers to our app handlers |
|
516 | 518 | for name in ('access', 'application', 'general'): |
|
517 | 519 | logging.getLogger('tornado.%s' % name).handlers = self.log.handlers |
|
518 | 520 | |
|
519 | 521 | def init_webapp(self): |
|
520 | 522 | """initialize tornado webapp and httpserver""" |
|
521 | 523 | self.web_app = NotebookWebApplication( |
|
522 | 524 | self, self.kernel_manager, self.notebook_manager, |
|
523 | 525 | self.cluster_manager, self.log, |
|
524 | 526 | self.base_project_url, self.webapp_settings |
|
525 | 527 | ) |
|
526 | 528 | if self.certfile: |
|
527 | 529 | ssl_options = dict(certfile=self.certfile) |
|
528 | 530 | if self.keyfile: |
|
529 | 531 | ssl_options['keyfile'] = self.keyfile |
|
530 | 532 | else: |
|
531 | 533 | ssl_options = None |
|
532 | 534 | self.web_app.password = self.password |
|
533 | 535 | self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options, |
|
534 | 536 | xheaders=self.trust_xheaders) |
|
535 | 537 | if not self.ip: |
|
536 | 538 | warning = "WARNING: The notebook server is listening on all IP addresses" |
|
537 | 539 | if ssl_options is None: |
|
538 | 540 | self.log.critical(warning + " and not using encryption. This " |
|
539 | 541 | "is not recommended.") |
|
540 | 542 | if not self.password: |
|
541 | 543 | self.log.critical(warning + " and not using authentication. " |
|
542 | 544 | "This is highly insecure and not recommended.") |
|
543 | 545 | success = None |
|
544 | 546 | for port in random_ports(self.port, self.port_retries+1): |
|
545 | 547 | try: |
|
546 | 548 | self.http_server.listen(port, self.ip) |
|
547 | 549 | except socket.error as e: |
|
548 | 550 | # XXX: remove the e.errno == -9 block when we require |
|
549 | 551 | # tornado >= 3.0 |
|
550 | 552 | if e.errno == -9 and tornado.version_info[0] < 3: |
|
551 | 553 | # The flags passed to socket.getaddrinfo from |
|
552 | 554 | # tornado.netutils.bind_sockets can cause "gaierror: |
|
553 | 555 | # [Errno -9] Address family for hostname not supported" |
|
554 | 556 | # when the interface is not associated, for example. |
|
555 | 557 | # Changing the flags to exclude socket.AI_ADDRCONFIG does |
|
556 | 558 | # not cause this error, but the only way to do this is to |
|
557 | 559 | # monkeypatch socket to remove the AI_ADDRCONFIG attribute |
|
558 | 560 | saved_AI_ADDRCONFIG = socket.AI_ADDRCONFIG |
|
559 | 561 | self.log.warn('Monkeypatching socket to fix tornado bug') |
|
560 | 562 | del(socket.AI_ADDRCONFIG) |
|
561 | 563 | try: |
|
562 | 564 | # retry the tornado call without AI_ADDRCONFIG flags |
|
563 | 565 | self.http_server.listen(port, self.ip) |
|
564 | 566 | except socket.error as e2: |
|
565 | 567 | e = e2 |
|
566 | 568 | else: |
|
567 | 569 | self.port = port |
|
568 | 570 | success = True |
|
569 | 571 | break |
|
570 | 572 | # restore the monekypatch |
|
571 | 573 | socket.AI_ADDRCONFIG = saved_AI_ADDRCONFIG |
|
572 | 574 | if e.errno != errno.EADDRINUSE: |
|
573 | 575 | raise |
|
574 | 576 | self.log.info('The port %i is already in use, trying another random port.' % port) |
|
575 | 577 | else: |
|
576 | 578 | self.port = port |
|
577 | 579 | success = True |
|
578 | 580 | break |
|
579 | 581 | if not success: |
|
580 | 582 | self.log.critical('ERROR: the notebook server could not be started because ' |
|
581 | 583 | 'no available port could be found.') |
|
582 | 584 | self.exit(1) |
|
583 | 585 | |
|
584 | 586 | def init_signal(self): |
|
585 | 587 | if not sys.platform.startswith('win'): |
|
586 | 588 | signal.signal(signal.SIGINT, self._handle_sigint) |
|
587 | 589 | signal.signal(signal.SIGTERM, self._signal_stop) |
|
588 | 590 | if hasattr(signal, 'SIGUSR1'): |
|
589 | 591 | # Windows doesn't support SIGUSR1 |
|
590 | 592 | signal.signal(signal.SIGUSR1, self._signal_info) |
|
591 | 593 | if hasattr(signal, 'SIGINFO'): |
|
592 | 594 | # only on BSD-based systems |
|
593 | 595 | signal.signal(signal.SIGINFO, self._signal_info) |
|
594 | 596 | |
|
595 | 597 | def _handle_sigint(self, sig, frame): |
|
596 | 598 | """SIGINT handler spawns confirmation dialog""" |
|
597 | 599 | # register more forceful signal handler for ^C^C case |
|
598 | 600 | signal.signal(signal.SIGINT, self._signal_stop) |
|
599 | 601 | # request confirmation dialog in bg thread, to avoid |
|
600 | 602 | # blocking the App |
|
601 | 603 | thread = threading.Thread(target=self._confirm_exit) |
|
602 | 604 | thread.daemon = True |
|
603 | 605 | thread.start() |
|
604 | 606 | |
|
605 | 607 | def _restore_sigint_handler(self): |
|
606 | 608 | """callback for restoring original SIGINT handler""" |
|
607 | 609 | signal.signal(signal.SIGINT, self._handle_sigint) |
|
608 | 610 | |
|
609 | 611 | def _confirm_exit(self): |
|
610 | 612 | """confirm shutdown on ^C |
|
611 | 613 | |
|
612 | 614 | A second ^C, or answering 'y' within 5s will cause shutdown, |
|
613 | 615 | otherwise original SIGINT handler will be restored. |
|
614 | 616 | |
|
615 | 617 | This doesn't work on Windows. |
|
616 | 618 | """ |
|
617 | 619 | # FIXME: remove this delay when pyzmq dependency is >= 2.1.11 |
|
618 | 620 | time.sleep(0.1) |
|
619 | 621 | info = self.log.info |
|
620 | 622 | info('interrupted') |
|
621 | 623 | print self.notebook_info() |
|
622 | 624 | sys.stdout.write("Shutdown this notebook server (y/[n])? ") |
|
623 | 625 | sys.stdout.flush() |
|
624 | 626 | r,w,x = select.select([sys.stdin], [], [], 5) |
|
625 | 627 | if r: |
|
626 | 628 | line = sys.stdin.readline() |
|
627 | 629 | if line.lower().startswith('y'): |
|
628 | 630 | self.log.critical("Shutdown confirmed") |
|
629 | 631 | ioloop.IOLoop.instance().stop() |
|
630 | 632 | return |
|
631 | 633 | else: |
|
632 | 634 | print "No answer for 5s:", |
|
633 | 635 | print "resuming operation..." |
|
634 | 636 | # no answer, or answer is no: |
|
635 | 637 | # set it back to original SIGINT handler |
|
636 | 638 | # use IOLoop.add_callback because signal.signal must be called |
|
637 | 639 | # from main thread |
|
638 | 640 | ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler) |
|
639 | 641 | |
|
640 | 642 | def _signal_stop(self, sig, frame): |
|
641 | 643 | self.log.critical("received signal %s, stopping", sig) |
|
642 | 644 | ioloop.IOLoop.instance().stop() |
|
643 | 645 | |
|
644 | 646 | def _signal_info(self, sig, frame): |
|
645 | 647 | print self.notebook_info() |
|
646 | 648 | |
|
647 | 649 | def init_components(self): |
|
648 | 650 | """Check the components submodule, and warn if it's unclean""" |
|
649 | 651 | status = submodule.check_submodule_status() |
|
650 | 652 | if status == 'missing': |
|
651 | 653 | self.log.warn("components submodule missing, running `git submodule update`") |
|
652 | 654 | submodule.update_submodules(submodule.ipython_parent()) |
|
653 | 655 | elif status == 'unclean': |
|
654 | 656 | self.log.warn("components submodule unclean, you may see 404s on static/components") |
|
655 | 657 | self.log.warn("run `setup.py submodule` or `git submodule update` to update") |
|
656 | 658 | |
|
657 | 659 | |
|
658 | 660 | @catch_config_error |
|
659 | 661 | def initialize(self, argv=None): |
|
660 | 662 | self.init_logging() |
|
661 | 663 | super(NotebookApp, self).initialize(argv) |
|
662 | 664 | self.init_kernel_argv() |
|
663 | 665 | self.init_configurables() |
|
664 | 666 | self.init_components() |
|
665 | 667 | self.init_webapp() |
|
666 | 668 | self.init_signal() |
|
667 | 669 | |
|
668 | 670 | def cleanup_kernels(self): |
|
669 | 671 | """Shutdown all kernels. |
|
670 | 672 | |
|
671 | 673 | The kernels will shutdown themselves when this process no longer exists, |
|
672 | 674 | but explicit shutdown allows the KernelManagers to cleanup the connection files. |
|
673 | 675 | """ |
|
674 | 676 | self.log.info('Shutting down kernels') |
|
675 | 677 | self.kernel_manager.shutdown_all() |
|
676 | 678 | |
|
677 | 679 | def notebook_info(self): |
|
678 | 680 | "Return the current working directory and the server url information" |
|
679 | 681 | mgr_info = self.notebook_manager.info_string() + "\n" |
|
680 | 682 | return mgr_info +"The IPython Notebook is running at: %s" % self._url |
|
681 | 683 | |
|
682 | 684 | def start(self): |
|
683 | 685 | """ Start the IPython Notebook server app, after initialization |
|
684 | 686 | |
|
685 | 687 | This method takes no arguments so all configuration and initialization |
|
686 | 688 | must be done prior to calling this method.""" |
|
687 | 689 | ip = self.ip if self.ip else '[all ip addresses on your system]' |
|
688 | 690 | proto = 'https' if self.certfile else 'http' |
|
689 | 691 | info = self.log.info |
|
690 | 692 | self._url = "%s://%s:%i%s" % (proto, ip, self.port, |
|
691 | 693 | self.base_project_url) |
|
692 | 694 | for line in self.notebook_info().split("\n"): |
|
693 | 695 | info(line) |
|
694 | 696 | info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).") |
|
695 | 697 | |
|
696 | 698 | if self.open_browser or self.file_to_run: |
|
697 |
ip = self.ip or |
|
|
699 | ip = self.ip or localhost() | |
|
698 | 700 | try: |
|
699 | 701 | browser = webbrowser.get(self.browser or None) |
|
700 | 702 | except webbrowser.Error as e: |
|
701 | 703 | self.log.warn('No web browser found: %s.' % e) |
|
702 | 704 | browser = None |
|
703 | 705 | |
|
704 | 706 | if self.file_to_run: |
|
705 | 707 | name, _ = os.path.splitext(os.path.basename(self.file_to_run)) |
|
706 | 708 | url = self.notebook_manager.rev_mapping.get(name, '') |
|
707 | 709 | else: |
|
708 | 710 | url = '' |
|
709 | 711 | if browser: |
|
710 | 712 | b = lambda : browser.open("%s://%s:%i%s%s" % (proto, ip, |
|
711 | 713 | self.port, self.base_project_url, url), new=2) |
|
712 | 714 | threading.Thread(target=b).start() |
|
713 | 715 | try: |
|
714 | 716 | ioloop.IOLoop.instance().start() |
|
715 | 717 | except KeyboardInterrupt: |
|
716 | 718 | info("Interrupted...") |
|
717 | 719 | finally: |
|
718 | 720 | self.cleanup_kernels() |
|
719 | 721 | |
|
720 | 722 | |
|
721 | 723 | #----------------------------------------------------------------------------- |
|
722 | 724 | # Main entry point |
|
723 | 725 | #----------------------------------------------------------------------------- |
|
724 | 726 | |
|
725 | 727 | launch_new_instance = NotebookApp.launch_instance |
|
726 | 728 |
@@ -1,556 +1,558 b'' | |||
|
1 | 1 | """Utilities for connecting to kernels |
|
2 | 2 | |
|
3 | 3 | Authors: |
|
4 | 4 | |
|
5 | 5 | * Min Ragan-Kelley |
|
6 | 6 | |
|
7 | 7 | """ |
|
8 | 8 | |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | # Copyright (C) 2013 The IPython Development Team |
|
11 | 11 | # |
|
12 | 12 | # Distributed under the terms of the BSD License. The full license is in |
|
13 | 13 | # the file COPYING, distributed as part of this software. |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | # Imports |
|
18 | 18 | #----------------------------------------------------------------------------- |
|
19 | 19 | |
|
20 | 20 | from __future__ import absolute_import |
|
21 | 21 | |
|
22 | 22 | import glob |
|
23 | 23 | import json |
|
24 | 24 | import os |
|
25 | 25 | import socket |
|
26 | 26 | import sys |
|
27 | 27 | from getpass import getpass |
|
28 | 28 | from subprocess import Popen, PIPE |
|
29 | 29 | import tempfile |
|
30 | 30 | |
|
31 | 31 | import zmq |
|
32 | 32 | |
|
33 | 33 | # external imports |
|
34 | 34 | from IPython.external.ssh import tunnel |
|
35 | 35 | |
|
36 | 36 | # IPython imports |
|
37 | 37 | from IPython.config import Configurable |
|
38 | 38 | from IPython.core.profiledir import ProfileDir |
|
39 |
from IPython.utils.localinterfaces import |
|
|
39 | from IPython.utils.localinterfaces import localhost | |
|
40 | 40 | from IPython.utils.path import filefind, get_ipython_dir |
|
41 | 41 | from IPython.utils.py3compat import str_to_bytes, bytes_to_str |
|
42 | 42 | from IPython.utils.traitlets import ( |
|
43 | 43 | Bool, Integer, Unicode, CaselessStrEnum, |
|
44 | 44 | ) |
|
45 | 45 | |
|
46 | 46 | |
|
47 | 47 | #----------------------------------------------------------------------------- |
|
48 | 48 | # Working with Connection Files |
|
49 | 49 | #----------------------------------------------------------------------------- |
|
50 | 50 | |
|
51 | 51 | def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0, |
|
52 |
control_port=0, ip= |
|
|
52 | control_port=0, ip='', key=b'', transport='tcp', | |
|
53 | 53 | signature_scheme='hmac-sha256', |
|
54 | 54 | ): |
|
55 | 55 | """Generates a JSON config file, including the selection of random ports. |
|
56 | 56 | |
|
57 | 57 | Parameters |
|
58 | 58 | ---------- |
|
59 | 59 | |
|
60 | 60 | fname : unicode |
|
61 | 61 | The path to the file to write |
|
62 | 62 | |
|
63 | 63 | shell_port : int, optional |
|
64 | 64 | The port to use for ROUTER (shell) channel. |
|
65 | 65 | |
|
66 | 66 | iopub_port : int, optional |
|
67 | 67 | The port to use for the SUB channel. |
|
68 | 68 | |
|
69 | 69 | stdin_port : int, optional |
|
70 | 70 | The port to use for the ROUTER (raw input) channel. |
|
71 | 71 | |
|
72 | 72 | control_port : int, optional |
|
73 | 73 | The port to use for the ROUTER (control) channel. |
|
74 | 74 | |
|
75 | 75 | hb_port : int, optional |
|
76 | 76 | The port to use for the heartbeat REP channel. |
|
77 | 77 | |
|
78 | 78 | ip : str, optional |
|
79 | 79 | The ip address the kernel will bind to. |
|
80 | 80 | |
|
81 | 81 | key : str, optional |
|
82 | 82 | The Session key used for message authentication. |
|
83 | 83 | |
|
84 | 84 | signature_scheme : str, optional |
|
85 | 85 | The scheme used for message authentication. |
|
86 | 86 | This has the form 'digest-hash', where 'digest' |
|
87 | 87 | is the scheme used for digests, and 'hash' is the name of the hash function |
|
88 | 88 | used by the digest scheme. |
|
89 | 89 | Currently, 'hmac' is the only supported digest scheme, |
|
90 | 90 | and 'sha256' is the default hash function. |
|
91 | 91 | |
|
92 | 92 | """ |
|
93 | if not ip: | |
|
94 | ip = localhost() | |
|
93 | 95 | # default to temporary connector file |
|
94 | 96 | if not fname: |
|
95 | 97 | fname = tempfile.mktemp('.json') |
|
96 | 98 | |
|
97 | 99 | # Find open ports as necessary. |
|
98 | 100 | |
|
99 | 101 | ports = [] |
|
100 | 102 | ports_needed = int(shell_port <= 0) + \ |
|
101 | 103 | int(iopub_port <= 0) + \ |
|
102 | 104 | int(stdin_port <= 0) + \ |
|
103 | 105 | int(control_port <= 0) + \ |
|
104 | 106 | int(hb_port <= 0) |
|
105 | 107 | if transport == 'tcp': |
|
106 | 108 | for i in range(ports_needed): |
|
107 | 109 | sock = socket.socket() |
|
108 | 110 | sock.bind(('', 0)) |
|
109 | 111 | ports.append(sock) |
|
110 | 112 | for i, sock in enumerate(ports): |
|
111 | 113 | port = sock.getsockname()[1] |
|
112 | 114 | sock.close() |
|
113 | 115 | ports[i] = port |
|
114 | 116 | else: |
|
115 | 117 | N = 1 |
|
116 | 118 | for i in range(ports_needed): |
|
117 | 119 | while os.path.exists("%s-%s" % (ip, str(N))): |
|
118 | 120 | N += 1 |
|
119 | 121 | ports.append(N) |
|
120 | 122 | N += 1 |
|
121 | 123 | if shell_port <= 0: |
|
122 | 124 | shell_port = ports.pop(0) |
|
123 | 125 | if iopub_port <= 0: |
|
124 | 126 | iopub_port = ports.pop(0) |
|
125 | 127 | if stdin_port <= 0: |
|
126 | 128 | stdin_port = ports.pop(0) |
|
127 | 129 | if control_port <= 0: |
|
128 | 130 | control_port = ports.pop(0) |
|
129 | 131 | if hb_port <= 0: |
|
130 | 132 | hb_port = ports.pop(0) |
|
131 | 133 | |
|
132 | 134 | cfg = dict( shell_port=shell_port, |
|
133 | 135 | iopub_port=iopub_port, |
|
134 | 136 | stdin_port=stdin_port, |
|
135 | 137 | control_port=control_port, |
|
136 | 138 | hb_port=hb_port, |
|
137 | 139 | ) |
|
138 | 140 | cfg['ip'] = ip |
|
139 | 141 | cfg['key'] = bytes_to_str(key) |
|
140 | 142 | cfg['transport'] = transport |
|
141 | 143 | cfg['signature_scheme'] = signature_scheme |
|
142 | 144 | |
|
143 | 145 | with open(fname, 'w') as f: |
|
144 | 146 | f.write(json.dumps(cfg, indent=2)) |
|
145 | 147 | |
|
146 | 148 | return fname, cfg |
|
147 | 149 | |
|
148 | 150 | |
|
149 | 151 | def get_connection_file(app=None): |
|
150 | 152 | """Return the path to the connection file of an app |
|
151 | 153 | |
|
152 | 154 | Parameters |
|
153 | 155 | ---------- |
|
154 | 156 | app : IPKernelApp instance [optional] |
|
155 | 157 | If unspecified, the currently running app will be used |
|
156 | 158 | """ |
|
157 | 159 | if app is None: |
|
158 | 160 | from IPython.kernel.zmq.kernelapp import IPKernelApp |
|
159 | 161 | if not IPKernelApp.initialized(): |
|
160 | 162 | raise RuntimeError("app not specified, and not in a running Kernel") |
|
161 | 163 | |
|
162 | 164 | app = IPKernelApp.instance() |
|
163 | 165 | return filefind(app.connection_file, ['.', app.profile_dir.security_dir]) |
|
164 | 166 | |
|
165 | 167 | |
|
166 | 168 | def find_connection_file(filename, profile=None): |
|
167 | 169 | """find a connection file, and return its absolute path. |
|
168 | 170 | |
|
169 | 171 | The current working directory and the profile's security |
|
170 | 172 | directory will be searched for the file if it is not given by |
|
171 | 173 | absolute path. |
|
172 | 174 | |
|
173 | 175 | If profile is unspecified, then the current running application's |
|
174 | 176 | profile will be used, or 'default', if not run from IPython. |
|
175 | 177 | |
|
176 | 178 | If the argument does not match an existing file, it will be interpreted as a |
|
177 | 179 | fileglob, and the matching file in the profile's security dir with |
|
178 | 180 | the latest access time will be used. |
|
179 | 181 | |
|
180 | 182 | Parameters |
|
181 | 183 | ---------- |
|
182 | 184 | filename : str |
|
183 | 185 | The connection file or fileglob to search for. |
|
184 | 186 | profile : str [optional] |
|
185 | 187 | The name of the profile to use when searching for the connection file, |
|
186 | 188 | if different from the current IPython session or 'default'. |
|
187 | 189 | |
|
188 | 190 | Returns |
|
189 | 191 | ------- |
|
190 | 192 | str : The absolute path of the connection file. |
|
191 | 193 | """ |
|
192 | 194 | from IPython.core.application import BaseIPythonApplication as IPApp |
|
193 | 195 | try: |
|
194 | 196 | # quick check for absolute path, before going through logic |
|
195 | 197 | return filefind(filename) |
|
196 | 198 | except IOError: |
|
197 | 199 | pass |
|
198 | 200 | |
|
199 | 201 | if profile is None: |
|
200 | 202 | # profile unspecified, check if running from an IPython app |
|
201 | 203 | if IPApp.initialized(): |
|
202 | 204 | app = IPApp.instance() |
|
203 | 205 | profile_dir = app.profile_dir |
|
204 | 206 | else: |
|
205 | 207 | # not running in IPython, use default profile |
|
206 | 208 | profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), 'default') |
|
207 | 209 | else: |
|
208 | 210 | # find profiledir by profile name: |
|
209 | 211 | profile_dir = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), profile) |
|
210 | 212 | security_dir = profile_dir.security_dir |
|
211 | 213 | |
|
212 | 214 | try: |
|
213 | 215 | # first, try explicit name |
|
214 | 216 | return filefind(filename, ['.', security_dir]) |
|
215 | 217 | except IOError: |
|
216 | 218 | pass |
|
217 | 219 | |
|
218 | 220 | # not found by full name |
|
219 | 221 | |
|
220 | 222 | if '*' in filename: |
|
221 | 223 | # given as a glob already |
|
222 | 224 | pat = filename |
|
223 | 225 | else: |
|
224 | 226 | # accept any substring match |
|
225 | 227 | pat = '*%s*' % filename |
|
226 | 228 | matches = glob.glob( os.path.join(security_dir, pat) ) |
|
227 | 229 | if not matches: |
|
228 | 230 | raise IOError("Could not find %r in %r" % (filename, security_dir)) |
|
229 | 231 | elif len(matches) == 1: |
|
230 | 232 | return matches[0] |
|
231 | 233 | else: |
|
232 | 234 | # get most recent match, by access time: |
|
233 | 235 | return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1] |
|
234 | 236 | |
|
235 | 237 | |
|
236 | 238 | def get_connection_info(connection_file=None, unpack=False, profile=None): |
|
237 | 239 | """Return the connection information for the current Kernel. |
|
238 | 240 | |
|
239 | 241 | Parameters |
|
240 | 242 | ---------- |
|
241 | 243 | connection_file : str [optional] |
|
242 | 244 | The connection file to be used. Can be given by absolute path, or |
|
243 | 245 | IPython will search in the security directory of a given profile. |
|
244 | 246 | If run from IPython, |
|
245 | 247 | |
|
246 | 248 | If unspecified, the connection file for the currently running |
|
247 | 249 | IPython Kernel will be used, which is only allowed from inside a kernel. |
|
248 | 250 | unpack : bool [default: False] |
|
249 | 251 | if True, return the unpacked dict, otherwise just the string contents |
|
250 | 252 | of the file. |
|
251 | 253 | profile : str [optional] |
|
252 | 254 | The name of the profile to use when searching for the connection file, |
|
253 | 255 | if different from the current IPython session or 'default'. |
|
254 | 256 | |
|
255 | 257 | |
|
256 | 258 | Returns |
|
257 | 259 | ------- |
|
258 | 260 | The connection dictionary of the current kernel, as string or dict, |
|
259 | 261 | depending on `unpack`. |
|
260 | 262 | """ |
|
261 | 263 | if connection_file is None: |
|
262 | 264 | # get connection file from current kernel |
|
263 | 265 | cf = get_connection_file() |
|
264 | 266 | else: |
|
265 | 267 | # connection file specified, allow shortnames: |
|
266 | 268 | cf = find_connection_file(connection_file, profile=profile) |
|
267 | 269 | |
|
268 | 270 | with open(cf) as f: |
|
269 | 271 | info = f.read() |
|
270 | 272 | |
|
271 | 273 | if unpack: |
|
272 | 274 | info = json.loads(info) |
|
273 | 275 | # ensure key is bytes: |
|
274 | 276 | info['key'] = str_to_bytes(info.get('key', '')) |
|
275 | 277 | return info |
|
276 | 278 | |
|
277 | 279 | |
|
278 | 280 | def connect_qtconsole(connection_file=None, argv=None, profile=None): |
|
279 | 281 | """Connect a qtconsole to the current kernel. |
|
280 | 282 | |
|
281 | 283 | This is useful for connecting a second qtconsole to a kernel, or to a |
|
282 | 284 | local notebook. |
|
283 | 285 | |
|
284 | 286 | Parameters |
|
285 | 287 | ---------- |
|
286 | 288 | connection_file : str [optional] |
|
287 | 289 | The connection file to be used. Can be given by absolute path, or |
|
288 | 290 | IPython will search in the security directory of a given profile. |
|
289 | 291 | If run from IPython, |
|
290 | 292 | |
|
291 | 293 | If unspecified, the connection file for the currently running |
|
292 | 294 | IPython Kernel will be used, which is only allowed from inside a kernel. |
|
293 | 295 | argv : list [optional] |
|
294 | 296 | Any extra args to be passed to the console. |
|
295 | 297 | profile : str [optional] |
|
296 | 298 | The name of the profile to use when searching for the connection file, |
|
297 | 299 | if different from the current IPython session or 'default'. |
|
298 | 300 | |
|
299 | 301 | |
|
300 | 302 | Returns |
|
301 | 303 | ------- |
|
302 | 304 | subprocess.Popen instance running the qtconsole frontend |
|
303 | 305 | """ |
|
304 | 306 | argv = [] if argv is None else argv |
|
305 | 307 | |
|
306 | 308 | if connection_file is None: |
|
307 | 309 | # get connection file from current kernel |
|
308 | 310 | cf = get_connection_file() |
|
309 | 311 | else: |
|
310 | 312 | cf = find_connection_file(connection_file, profile=profile) |
|
311 | 313 | |
|
312 | 314 | cmd = ';'.join([ |
|
313 | 315 | "from IPython.qt.console import qtconsoleapp", |
|
314 | 316 | "qtconsoleapp.main()" |
|
315 | 317 | ]) |
|
316 | 318 | |
|
317 | 319 | return Popen([sys.executable, '-c', cmd, '--existing', cf] + argv, |
|
318 | 320 | stdout=PIPE, stderr=PIPE, close_fds=(sys.platform != 'win32'), |
|
319 | 321 | ) |
|
320 | 322 | |
|
321 | 323 | |
|
322 | 324 | def tunnel_to_kernel(connection_info, sshserver, sshkey=None): |
|
323 | 325 | """tunnel connections to a kernel via ssh |
|
324 | 326 | |
|
325 | 327 | This will open four SSH tunnels from localhost on this machine to the |
|
326 | 328 | ports associated with the kernel. They can be either direct |
|
327 | 329 | localhost-localhost tunnels, or if an intermediate server is necessary, |
|
328 | 330 | the kernel must be listening on a public IP. |
|
329 | 331 | |
|
330 | 332 | Parameters |
|
331 | 333 | ---------- |
|
332 | 334 | connection_info : dict or str (path) |
|
333 | 335 | Either a connection dict, or the path to a JSON connection file |
|
334 | 336 | sshserver : str |
|
335 | 337 | The ssh sever to use to tunnel to the kernel. Can be a full |
|
336 | 338 | `user@server:port` string. ssh config aliases are respected. |
|
337 | 339 | sshkey : str [optional] |
|
338 | 340 | Path to file containing ssh key to use for authentication. |
|
339 | 341 | Only necessary if your ssh config does not already associate |
|
340 | 342 | a keyfile with the host. |
|
341 | 343 | |
|
342 | 344 | Returns |
|
343 | 345 | ------- |
|
344 | 346 | |
|
345 | 347 | (shell, iopub, stdin, hb) : ints |
|
346 | 348 | The four ports on localhost that have been forwarded to the kernel. |
|
347 | 349 | """ |
|
348 | 350 | if isinstance(connection_info, basestring): |
|
349 | 351 | # it's a path, unpack it |
|
350 | 352 | with open(connection_info) as f: |
|
351 | 353 | connection_info = json.loads(f.read()) |
|
352 | 354 | |
|
353 | 355 | cf = connection_info |
|
354 | 356 | |
|
355 | 357 | lports = tunnel.select_random_ports(4) |
|
356 | 358 | rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'] |
|
357 | 359 | |
|
358 | 360 | remote_ip = cf['ip'] |
|
359 | 361 | |
|
360 | 362 | if tunnel.try_passwordless_ssh(sshserver, sshkey): |
|
361 | 363 | password=False |
|
362 | 364 | else: |
|
363 | 365 | password = getpass("SSH Password for %s: "%sshserver) |
|
364 | 366 | |
|
365 | 367 | for lp,rp in zip(lports, rports): |
|
366 | 368 | tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password) |
|
367 | 369 | |
|
368 | 370 | return tuple(lports) |
|
369 | 371 | |
|
370 | 372 | |
|
371 | 373 | #----------------------------------------------------------------------------- |
|
372 | 374 | # Mixin for classes that work with connection files |
|
373 | 375 | #----------------------------------------------------------------------------- |
|
374 | 376 | |
|
375 | 377 | channel_socket_types = { |
|
376 | 378 | 'hb' : zmq.REQ, |
|
377 | 379 | 'shell' : zmq.DEALER, |
|
378 | 380 | 'iopub' : zmq.SUB, |
|
379 | 381 | 'stdin' : zmq.DEALER, |
|
380 | 382 | 'control': zmq.DEALER, |
|
381 | 383 | } |
|
382 | 384 | |
|
383 | 385 | port_names = [ "%s_port" % channel for channel in ('shell', 'stdin', 'iopub', 'hb', 'control')] |
|
384 | 386 | |
|
385 | 387 | class ConnectionFileMixin(Configurable): |
|
386 | 388 | """Mixin for configurable classes that work with connection files""" |
|
387 | 389 | |
|
388 | 390 | # The addresses for the communication channels |
|
389 | 391 | connection_file = Unicode('') |
|
390 | 392 | _connection_file_written = Bool(False) |
|
391 | 393 | |
|
392 | 394 | transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True) |
|
393 | 395 | |
|
394 |
ip = Unicode( |
|
|
396 | ip = Unicode(config=True, | |
|
395 | 397 | help="""Set the kernel\'s IP address [default localhost]. |
|
396 | 398 | If the IP address is something other than localhost, then |
|
397 | 399 | Consoles on other machines will be able to connect |
|
398 | 400 | to the Kernel, so be careful!""" |
|
399 | 401 | ) |
|
400 | 402 | |
|
401 | 403 | def _ip_default(self): |
|
402 | 404 | if self.transport == 'ipc': |
|
403 | 405 | if self.connection_file: |
|
404 | 406 | return os.path.splitext(self.connection_file)[0] + '-ipc' |
|
405 | 407 | else: |
|
406 | 408 | return 'kernel-ipc' |
|
407 | 409 | else: |
|
408 |
return |
|
|
410 | return localhost() | |
|
409 | 411 | |
|
410 | 412 | def _ip_changed(self, name, old, new): |
|
411 | 413 | if new == '*': |
|
412 | 414 | self.ip = '0.0.0.0' |
|
413 | 415 | |
|
414 | 416 | # protected traits |
|
415 | 417 | |
|
416 | 418 | shell_port = Integer(0) |
|
417 | 419 | iopub_port = Integer(0) |
|
418 | 420 | stdin_port = Integer(0) |
|
419 | 421 | control_port = Integer(0) |
|
420 | 422 | hb_port = Integer(0) |
|
421 | 423 | |
|
422 | 424 | @property |
|
423 | 425 | def ports(self): |
|
424 | 426 | return [ getattr(self, name) for name in port_names ] |
|
425 | 427 | |
|
426 | 428 | #-------------------------------------------------------------------------- |
|
427 | 429 | # Connection and ipc file management |
|
428 | 430 | #-------------------------------------------------------------------------- |
|
429 | 431 | |
|
430 | 432 | def get_connection_info(self): |
|
431 | 433 | """return the connection info as a dict""" |
|
432 | 434 | return dict( |
|
433 | 435 | transport=self.transport, |
|
434 | 436 | ip=self.ip, |
|
435 | 437 | shell_port=self.shell_port, |
|
436 | 438 | iopub_port=self.iopub_port, |
|
437 | 439 | stdin_port=self.stdin_port, |
|
438 | 440 | hb_port=self.hb_port, |
|
439 | 441 | control_port=self.control_port, |
|
440 | 442 | signature_scheme=self.session.signature_scheme, |
|
441 | 443 | key=self.session.key, |
|
442 | 444 | ) |
|
443 | 445 | |
|
444 | 446 | def cleanup_connection_file(self): |
|
445 | 447 | """Cleanup connection file *if we wrote it* |
|
446 | 448 | |
|
447 | 449 | Will not raise if the connection file was already removed somehow. |
|
448 | 450 | """ |
|
449 | 451 | if self._connection_file_written: |
|
450 | 452 | # cleanup connection files on full shutdown of kernel we started |
|
451 | 453 | self._connection_file_written = False |
|
452 | 454 | try: |
|
453 | 455 | os.remove(self.connection_file) |
|
454 | 456 | except (IOError, OSError, AttributeError): |
|
455 | 457 | pass |
|
456 | 458 | |
|
457 | 459 | def cleanup_ipc_files(self): |
|
458 | 460 | """Cleanup ipc files if we wrote them.""" |
|
459 | 461 | if self.transport != 'ipc': |
|
460 | 462 | return |
|
461 | 463 | for port in self.ports: |
|
462 | 464 | ipcfile = "%s-%i" % (self.ip, port) |
|
463 | 465 | try: |
|
464 | 466 | os.remove(ipcfile) |
|
465 | 467 | except (IOError, OSError): |
|
466 | 468 | pass |
|
467 | 469 | |
|
468 | 470 | def write_connection_file(self): |
|
469 | 471 | """Write connection info to JSON dict in self.connection_file.""" |
|
470 | 472 | if self._connection_file_written: |
|
471 | 473 | return |
|
472 | 474 | |
|
473 | 475 | self.connection_file, cfg = write_connection_file(self.connection_file, |
|
474 | 476 | transport=self.transport, ip=self.ip, key=self.session.key, |
|
475 | 477 | stdin_port=self.stdin_port, iopub_port=self.iopub_port, |
|
476 | 478 | shell_port=self.shell_port, hb_port=self.hb_port, |
|
477 | 479 | control_port=self.control_port, |
|
478 | 480 | signature_scheme=self.session.signature_scheme, |
|
479 | 481 | ) |
|
480 | 482 | # write_connection_file also sets default ports: |
|
481 | 483 | for name in port_names: |
|
482 | 484 | setattr(self, name, cfg[name]) |
|
483 | 485 | |
|
484 | 486 | self._connection_file_written = True |
|
485 | 487 | |
|
486 | 488 | def load_connection_file(self): |
|
487 | 489 | """Load connection info from JSON dict in self.connection_file.""" |
|
488 | 490 | with open(self.connection_file) as f: |
|
489 | 491 | cfg = json.loads(f.read()) |
|
490 | 492 | |
|
491 | 493 | self.transport = cfg.get('transport', 'tcp') |
|
492 | 494 | self.ip = cfg['ip'] |
|
493 | 495 | for name in port_names: |
|
494 | 496 | setattr(self, name, cfg[name]) |
|
495 | 497 | if 'key' in cfg: |
|
496 | 498 | self.session.key = str_to_bytes(cfg['key']) |
|
497 | 499 | if cfg.get('signature_scheme'): |
|
498 | 500 | self.session.signature_scheme = cfg['signature_scheme'] |
|
499 | 501 | |
|
500 | 502 | #-------------------------------------------------------------------------- |
|
501 | 503 | # Creating connected sockets |
|
502 | 504 | #-------------------------------------------------------------------------- |
|
503 | 505 | |
|
504 | 506 | def _make_url(self, channel): |
|
505 | 507 | """Make a ZeroMQ URL for a given channel.""" |
|
506 | 508 | transport = self.transport |
|
507 | 509 | ip = self.ip |
|
508 | 510 | port = getattr(self, '%s_port' % channel) |
|
509 | 511 | |
|
510 | 512 | if transport == 'tcp': |
|
511 | 513 | return "tcp://%s:%i" % (ip, port) |
|
512 | 514 | else: |
|
513 | 515 | return "%s://%s-%s" % (transport, ip, port) |
|
514 | 516 | |
|
515 | 517 | def _create_connected_socket(self, channel, identity=None): |
|
516 | 518 | """Create a zmq Socket and connect it to the kernel.""" |
|
517 | 519 | url = self._make_url(channel) |
|
518 | 520 | socket_type = channel_socket_types[channel] |
|
519 | 521 | self.log.info("Connecting to: %s" % url) |
|
520 | 522 | sock = self.context.socket(socket_type) |
|
521 | 523 | if identity: |
|
522 | 524 | sock.identity = identity |
|
523 | 525 | sock.connect(url) |
|
524 | 526 | return sock |
|
525 | 527 | |
|
526 | 528 | def connect_iopub(self, identity=None): |
|
527 | 529 | """return zmq Socket connected to the IOPub channel""" |
|
528 | 530 | sock = self._create_connected_socket('iopub', identity=identity) |
|
529 | 531 | sock.setsockopt(zmq.SUBSCRIBE, b'') |
|
530 | 532 | return sock |
|
531 | 533 | |
|
532 | 534 | def connect_shell(self, identity=None): |
|
533 | 535 | """return zmq Socket connected to the Shell channel""" |
|
534 | 536 | return self._create_connected_socket('shell', identity=identity) |
|
535 | 537 | |
|
536 | 538 | def connect_stdin(self, identity=None): |
|
537 | 539 | """return zmq Socket connected to the StdIn channel""" |
|
538 | 540 | return self._create_connected_socket('stdin', identity=identity) |
|
539 | 541 | |
|
540 | 542 | def connect_hb(self, identity=None): |
|
541 | 543 | """return zmq Socket connected to the Heartbeat channel""" |
|
542 | 544 | return self._create_connected_socket('hb', identity=identity) |
|
543 | 545 | |
|
544 | 546 | def connect_control(self, identity=None): |
|
545 | 547 | """return zmq Socket connected to the Heartbeat channel""" |
|
546 | 548 | return self._create_connected_socket('control', identity=identity) |
|
547 | 549 | |
|
548 | 550 | |
|
549 | 551 | __all__ = [ |
|
550 | 552 | 'write_connection_file', |
|
551 | 553 | 'get_connection_file', |
|
552 | 554 | 'find_connection_file', |
|
553 | 555 | 'get_connection_info', |
|
554 | 556 | 'connect_qtconsole', |
|
555 | 557 | 'tunnel_to_kernel', |
|
556 | 558 | ] |
@@ -1,379 +1,378 b'' | |||
|
1 | """Base class to manage a running kernel | |
|
2 | """ | |
|
1 | """Base class to manage a running kernel""" | |
|
3 | 2 | |
|
4 | 3 | #----------------------------------------------------------------------------- |
|
5 | 4 | # Copyright (C) 2013 The IPython Development Team |
|
6 | 5 | # |
|
7 | 6 | # Distributed under the terms of the BSD License. The full license is in |
|
8 | 7 | # the file COPYING, distributed as part of this software. |
|
9 | 8 | #----------------------------------------------------------------------------- |
|
10 | 9 | |
|
11 | 10 | #----------------------------------------------------------------------------- |
|
12 | 11 | # Imports |
|
13 | 12 | #----------------------------------------------------------------------------- |
|
14 | 13 | |
|
15 | 14 | from __future__ import absolute_import |
|
16 | 15 | |
|
17 | 16 | # Standard library imports |
|
18 | 17 | import signal |
|
19 | 18 | import sys |
|
20 | 19 | import time |
|
21 | 20 | |
|
22 | 21 | import zmq |
|
23 | 22 | |
|
24 | 23 | # Local imports |
|
25 | 24 | from IPython.config.configurable import LoggingConfigurable |
|
26 | 25 | from IPython.utils.importstring import import_item |
|
27 |
from IPython.utils.localinterfaces import |
|
|
26 | from IPython.utils.localinterfaces import is_local_ip, local_ips | |
|
28 | 27 | from IPython.utils.traitlets import ( |
|
29 | 28 | Any, Instance, Unicode, List, Bool, Type, DottedObjectName |
|
30 | 29 | ) |
|
31 | 30 | from IPython.kernel import ( |
|
32 | 31 | make_ipkernel_cmd, |
|
33 | 32 | launch_kernel, |
|
34 | 33 | ) |
|
35 | 34 | from .connect import ConnectionFileMixin |
|
36 | 35 | from .zmq.session import Session |
|
37 | 36 | from .managerabc import ( |
|
38 | 37 | KernelManagerABC |
|
39 | 38 | ) |
|
40 | 39 | |
|
41 | 40 | #----------------------------------------------------------------------------- |
|
42 | 41 | # Main kernel manager class |
|
43 | 42 | #----------------------------------------------------------------------------- |
|
44 | 43 | |
|
45 | 44 | class KernelManager(LoggingConfigurable, ConnectionFileMixin): |
|
46 | 45 | """Manages a single kernel in a subprocess on this host. |
|
47 | 46 | |
|
48 | 47 | This version starts kernels with Popen. |
|
49 | 48 | """ |
|
50 | 49 | |
|
51 | 50 | # The PyZMQ Context to use for communication with the kernel. |
|
52 | 51 | context = Instance(zmq.Context) |
|
53 | 52 | def _context_default(self): |
|
54 | 53 | return zmq.Context.instance() |
|
55 | 54 | |
|
56 | 55 | # The Session to use for communication with the kernel. |
|
57 | 56 | session = Instance(Session) |
|
58 | 57 | def _session_default(self): |
|
59 | 58 | return Session(parent=self) |
|
60 | 59 | |
|
61 | 60 | # the class to create with our `client` method |
|
62 | 61 | client_class = DottedObjectName('IPython.kernel.blocking.BlockingKernelClient') |
|
63 | 62 | client_factory = Type() |
|
64 | 63 | def _client_class_changed(self, name, old, new): |
|
65 | 64 | self.client_factory = import_item(str(new)) |
|
66 | 65 | |
|
67 | 66 | # The kernel process with which the KernelManager is communicating. |
|
68 | 67 | # generally a Popen instance |
|
69 | 68 | kernel = Any() |
|
70 | 69 | |
|
71 | 70 | kernel_cmd = List(Unicode, config=True, |
|
72 | 71 | help="""The Popen Command to launch the kernel. |
|
73 | 72 | Override this if you have a custom |
|
74 | 73 | """ |
|
75 | 74 | ) |
|
76 | 75 | |
|
77 | 76 | def _kernel_cmd_changed(self, name, old, new): |
|
78 | 77 | self.ipython_kernel = False |
|
79 | 78 | |
|
80 | 79 | ipython_kernel = Bool(True) |
|
81 | 80 | |
|
82 | 81 | # Protected traits |
|
83 | 82 | _launch_args = Any() |
|
84 | 83 | _control_socket = Any() |
|
85 | 84 | |
|
86 | 85 | _restarter = Any() |
|
87 | 86 | |
|
88 | 87 | autorestart = Bool(False, config=True, |
|
89 | 88 | help="""Should we autorestart the kernel if it dies.""" |
|
90 | 89 | ) |
|
91 | 90 | |
|
92 | 91 | def __del__(self): |
|
93 | 92 | self._close_control_socket() |
|
94 | 93 | self.cleanup_connection_file() |
|
95 | 94 | |
|
96 | 95 | #-------------------------------------------------------------------------- |
|
97 | 96 | # Kernel restarter |
|
98 | 97 | #-------------------------------------------------------------------------- |
|
99 | 98 | |
|
100 | 99 | def start_restarter(self): |
|
101 | 100 | pass |
|
102 | 101 | |
|
103 | 102 | def stop_restarter(self): |
|
104 | 103 | pass |
|
105 | 104 | |
|
106 | 105 | def add_restart_callback(self, callback, event='restart'): |
|
107 | 106 | """register a callback to be called when a kernel is restarted""" |
|
108 | 107 | if self._restarter is None: |
|
109 | 108 | return |
|
110 | 109 | self._restarter.add_callback(callback, event) |
|
111 | 110 | |
|
112 | 111 | def remove_restart_callback(self, callback, event='restart'): |
|
113 | 112 | """unregister a callback to be called when a kernel is restarted""" |
|
114 | 113 | if self._restarter is None: |
|
115 | 114 | return |
|
116 | 115 | self._restarter.remove_callback(callback, event) |
|
117 | 116 | |
|
118 | 117 | #-------------------------------------------------------------------------- |
|
119 | 118 | # create a Client connected to our Kernel |
|
120 | 119 | #-------------------------------------------------------------------------- |
|
121 | 120 | |
|
122 | 121 | def client(self, **kwargs): |
|
123 | 122 | """Create a client configured to connect to our kernel""" |
|
124 | 123 | if self.client_factory is None: |
|
125 | 124 | self.client_factory = import_item(self.client_class) |
|
126 | 125 | |
|
127 | 126 | kw = {} |
|
128 | 127 | kw.update(self.get_connection_info()) |
|
129 | 128 | kw.update(dict( |
|
130 | 129 | connection_file=self.connection_file, |
|
131 | 130 | session=self.session, |
|
132 | 131 | parent=self, |
|
133 | 132 | )) |
|
134 | 133 | |
|
135 | 134 | # add kwargs last, for manual overrides |
|
136 | 135 | kw.update(kwargs) |
|
137 | 136 | return self.client_factory(**kw) |
|
138 | 137 | |
|
139 | 138 | #-------------------------------------------------------------------------- |
|
140 | 139 | # Kernel management |
|
141 | 140 | #-------------------------------------------------------------------------- |
|
142 | 141 | |
|
143 | 142 | def format_kernel_cmd(self, **kw): |
|
144 | 143 | """format templated args (e.g. {connection_file})""" |
|
145 | 144 | if self.kernel_cmd: |
|
146 | 145 | cmd = self.kernel_cmd |
|
147 | 146 | else: |
|
148 | 147 | cmd = make_ipkernel_cmd( |
|
149 | 148 | 'from IPython.kernel.zmq.kernelapp import main; main()', |
|
150 | 149 | **kw |
|
151 | 150 | ) |
|
152 | 151 | ns = dict(connection_file=self.connection_file) |
|
153 | 152 | ns.update(self._launch_args) |
|
154 | 153 | return [ c.format(**ns) for c in cmd ] |
|
155 | 154 | |
|
156 | 155 | def _launch_kernel(self, kernel_cmd, **kw): |
|
157 | 156 | """actually launch the kernel |
|
158 | 157 | |
|
159 | 158 | override in a subclass to launch kernel subprocesses differently |
|
160 | 159 | """ |
|
161 | 160 | return launch_kernel(kernel_cmd, **kw) |
|
162 | 161 | |
|
163 | 162 | # Control socket used for polite kernel shutdown |
|
164 | 163 | |
|
165 | 164 | def _connect_control_socket(self): |
|
166 | 165 | if self._control_socket is None: |
|
167 | 166 | self._control_socket = self.connect_control() |
|
168 | 167 | self._control_socket.linger = 100 |
|
169 | 168 | |
|
170 | 169 | def _close_control_socket(self): |
|
171 | 170 | if self._control_socket is None: |
|
172 | 171 | return |
|
173 | 172 | self._control_socket.close() |
|
174 | 173 | self._control_socket = None |
|
175 | 174 | |
|
176 | 175 | def start_kernel(self, **kw): |
|
177 | 176 | """Starts a kernel on this host in a separate process. |
|
178 | 177 | |
|
179 | 178 | If random ports (port=0) are being used, this method must be called |
|
180 | 179 | before the channels are created. |
|
181 | 180 | |
|
182 | 181 | Parameters: |
|
183 | 182 | ----------- |
|
184 | 183 | **kw : optional |
|
185 | 184 | keyword arguments that are passed down to build the kernel_cmd |
|
186 | 185 | and launching the kernel (e.g. Popen kwargs). |
|
187 | 186 | """ |
|
188 |
if self.transport == 'tcp' and self.ip |
|
|
187 | if self.transport == 'tcp' and not is_local_ip(self.ip): | |
|
189 | 188 | raise RuntimeError("Can only launch a kernel on a local interface. " |
|
190 | 189 | "Make sure that the '*_address' attributes are " |
|
191 | 190 | "configured properly. " |
|
192 |
"Currently valid addresses are: %s"% |
|
|
191 | "Currently valid addresses are: %s" % local_ips() | |
|
193 | 192 | ) |
|
194 | 193 | |
|
195 | 194 | # write connection file / get default ports |
|
196 | 195 | self.write_connection_file() |
|
197 | 196 | |
|
198 | 197 | # save kwargs for use in restart |
|
199 | 198 | self._launch_args = kw.copy() |
|
200 | 199 | # build the Popen cmd |
|
201 | 200 | kernel_cmd = self.format_kernel_cmd(**kw) |
|
202 | 201 | # launch the kernel subprocess |
|
203 | 202 | self.kernel = self._launch_kernel(kernel_cmd, |
|
204 | 203 | ipython_kernel=self.ipython_kernel, |
|
205 | 204 | **kw) |
|
206 | 205 | self.start_restarter() |
|
207 | 206 | self._connect_control_socket() |
|
208 | 207 | |
|
209 | 208 | def _send_shutdown_request(self, restart=False): |
|
210 | 209 | """TODO: send a shutdown request via control channel""" |
|
211 | 210 | content = dict(restart=restart) |
|
212 | 211 | msg = self.session.msg("shutdown_request", content=content) |
|
213 | 212 | self.session.send(self._control_socket, msg) |
|
214 | 213 | |
|
215 | 214 | def shutdown_kernel(self, now=False, restart=False): |
|
216 | 215 | """Attempts to the stop the kernel process cleanly. |
|
217 | 216 | |
|
218 | 217 | This attempts to shutdown the kernels cleanly by: |
|
219 | 218 | |
|
220 | 219 | 1. Sending it a shutdown message over the shell channel. |
|
221 | 220 | 2. If that fails, the kernel is shutdown forcibly by sending it |
|
222 | 221 | a signal. |
|
223 | 222 | |
|
224 | 223 | Parameters: |
|
225 | 224 | ----------- |
|
226 | 225 | now : bool |
|
227 | 226 | Should the kernel be forcible killed *now*. This skips the |
|
228 | 227 | first, nice shutdown attempt. |
|
229 | 228 | restart: bool |
|
230 | 229 | Will this kernel be restarted after it is shutdown. When this |
|
231 | 230 | is True, connection files will not be cleaned up. |
|
232 | 231 | """ |
|
233 | 232 | # Stop monitoring for restarting while we shutdown. |
|
234 | 233 | self.stop_restarter() |
|
235 | 234 | |
|
236 | 235 | # FIXME: Shutdown does not work on Windows due to ZMQ errors! |
|
237 | 236 | if sys.platform == 'win32': |
|
238 | 237 | self._kill_kernel() |
|
239 | 238 | return |
|
240 | 239 | |
|
241 | 240 | if now: |
|
242 | 241 | if self.has_kernel: |
|
243 | 242 | self._kill_kernel() |
|
244 | 243 | else: |
|
245 | 244 | # Don't send any additional kernel kill messages immediately, to give |
|
246 | 245 | # the kernel a chance to properly execute shutdown actions. Wait for at |
|
247 | 246 | # most 1s, checking every 0.1s. |
|
248 | 247 | self._send_shutdown_request(restart=restart) |
|
249 | 248 | for i in range(10): |
|
250 | 249 | if self.is_alive(): |
|
251 | 250 | time.sleep(0.1) |
|
252 | 251 | else: |
|
253 | 252 | break |
|
254 | 253 | else: |
|
255 | 254 | # OK, we've waited long enough. |
|
256 | 255 | if self.has_kernel: |
|
257 | 256 | self._kill_kernel() |
|
258 | 257 | |
|
259 | 258 | if not restart: |
|
260 | 259 | self.cleanup_connection_file() |
|
261 | 260 | self.cleanup_ipc_files() |
|
262 | 261 | else: |
|
263 | 262 | self.cleanup_ipc_files() |
|
264 | 263 | |
|
265 | 264 | def restart_kernel(self, now=False, **kw): |
|
266 | 265 | """Restarts a kernel with the arguments that were used to launch it. |
|
267 | 266 | |
|
268 | 267 | If the old kernel was launched with random ports, the same ports will be |
|
269 | 268 | used for the new kernel. The same connection file is used again. |
|
270 | 269 | |
|
271 | 270 | Parameters |
|
272 | 271 | ---------- |
|
273 | 272 | now : bool, optional |
|
274 | 273 | If True, the kernel is forcefully restarted *immediately*, without |
|
275 | 274 | having a chance to do any cleanup action. Otherwise the kernel is |
|
276 | 275 | given 1s to clean up before a forceful restart is issued. |
|
277 | 276 | |
|
278 | 277 | In all cases the kernel is restarted, the only difference is whether |
|
279 | 278 | it is given a chance to perform a clean shutdown or not. |
|
280 | 279 | |
|
281 | 280 | **kw : optional |
|
282 | 281 | Any options specified here will overwrite those used to launch the |
|
283 | 282 | kernel. |
|
284 | 283 | """ |
|
285 | 284 | if self._launch_args is None: |
|
286 | 285 | raise RuntimeError("Cannot restart the kernel. " |
|
287 | 286 | "No previous call to 'start_kernel'.") |
|
288 | 287 | else: |
|
289 | 288 | # Stop currently running kernel. |
|
290 | 289 | self.shutdown_kernel(now=now, restart=True) |
|
291 | 290 | |
|
292 | 291 | # Start new kernel. |
|
293 | 292 | self._launch_args.update(kw) |
|
294 | 293 | self.start_kernel(**self._launch_args) |
|
295 | 294 | |
|
296 | 295 | # FIXME: Messages get dropped in Windows due to probable ZMQ bug |
|
297 | 296 | # unless there is some delay here. |
|
298 | 297 | if sys.platform == 'win32': |
|
299 | 298 | time.sleep(0.2) |
|
300 | 299 | |
|
301 | 300 | @property |
|
302 | 301 | def has_kernel(self): |
|
303 | 302 | """Has a kernel been started that we are managing.""" |
|
304 | 303 | return self.kernel is not None |
|
305 | 304 | |
|
306 | 305 | def _kill_kernel(self): |
|
307 | 306 | """Kill the running kernel. |
|
308 | 307 | |
|
309 | 308 | This is a private method, callers should use shutdown_kernel(now=True). |
|
310 | 309 | """ |
|
311 | 310 | if self.has_kernel: |
|
312 | 311 | |
|
313 | 312 | # Signal the kernel to terminate (sends SIGKILL on Unix and calls |
|
314 | 313 | # TerminateProcess() on Win32). |
|
315 | 314 | try: |
|
316 | 315 | self.kernel.kill() |
|
317 | 316 | except OSError as e: |
|
318 | 317 | # In Windows, we will get an Access Denied error if the process |
|
319 | 318 | # has already terminated. Ignore it. |
|
320 | 319 | if sys.platform == 'win32': |
|
321 | 320 | if e.winerror != 5: |
|
322 | 321 | raise |
|
323 | 322 | # On Unix, we may get an ESRCH error if the process has already |
|
324 | 323 | # terminated. Ignore it. |
|
325 | 324 | else: |
|
326 | 325 | from errno import ESRCH |
|
327 | 326 | if e.errno != ESRCH: |
|
328 | 327 | raise |
|
329 | 328 | |
|
330 | 329 | # Block until the kernel terminates. |
|
331 | 330 | self.kernel.wait() |
|
332 | 331 | self.kernel = None |
|
333 | 332 | else: |
|
334 | 333 | raise RuntimeError("Cannot kill kernel. No kernel is running!") |
|
335 | 334 | |
|
336 | 335 | def interrupt_kernel(self): |
|
337 | 336 | """Interrupts the kernel by sending it a signal. |
|
338 | 337 | |
|
339 | 338 | Unlike ``signal_kernel``, this operation is well supported on all |
|
340 | 339 | platforms. |
|
341 | 340 | """ |
|
342 | 341 | if self.has_kernel: |
|
343 | 342 | if sys.platform == 'win32': |
|
344 | 343 | from .zmq.parentpoller import ParentPollerWindows as Poller |
|
345 | 344 | Poller.send_interrupt(self.kernel.win32_interrupt_event) |
|
346 | 345 | else: |
|
347 | 346 | self.kernel.send_signal(signal.SIGINT) |
|
348 | 347 | else: |
|
349 | 348 | raise RuntimeError("Cannot interrupt kernel. No kernel is running!") |
|
350 | 349 | |
|
351 | 350 | def signal_kernel(self, signum): |
|
352 | 351 | """Sends a signal to the kernel. |
|
353 | 352 | |
|
354 | 353 | Note that since only SIGTERM is supported on Windows, this function is |
|
355 | 354 | only useful on Unix systems. |
|
356 | 355 | """ |
|
357 | 356 | if self.has_kernel: |
|
358 | 357 | self.kernel.send_signal(signum) |
|
359 | 358 | else: |
|
360 | 359 | raise RuntimeError("Cannot signal kernel. No kernel is running!") |
|
361 | 360 | |
|
362 | 361 | def is_alive(self): |
|
363 | 362 | """Is the kernel process still running?""" |
|
364 | 363 | if self.has_kernel: |
|
365 | 364 | if self.kernel.poll() is None: |
|
366 | 365 | return True |
|
367 | 366 | else: |
|
368 | 367 | return False |
|
369 | 368 | else: |
|
370 | 369 | # we don't have a kernel |
|
371 | 370 | return False |
|
372 | 371 | |
|
373 | 372 | |
|
374 | 373 | #----------------------------------------------------------------------------- |
|
375 | 374 | # ABC Registration |
|
376 | 375 | #----------------------------------------------------------------------------- |
|
377 | 376 | |
|
378 | 377 | KernelManagerABC.register(KernelManager) |
|
379 | 378 |
@@ -1,78 +1,78 b'' | |||
|
1 | 1 | """Tests for the notebook kernel and session manager.""" |
|
2 | 2 | |
|
3 | 3 | from subprocess import PIPE |
|
4 | 4 | import time |
|
5 | 5 | from unittest import TestCase |
|
6 | 6 | |
|
7 | 7 | from IPython.testing import decorators as dec |
|
8 | 8 | |
|
9 | 9 | from IPython.config.loader import Config |
|
10 |
from IPython.utils.localinterfaces import |
|
|
10 | from IPython.utils.localinterfaces import localhost | |
|
11 | 11 | from IPython.kernel import KernelManager |
|
12 | 12 | from IPython.kernel.multikernelmanager import MultiKernelManager |
|
13 | 13 | |
|
14 | 14 | class TestKernelManager(TestCase): |
|
15 | 15 | |
|
16 | 16 | def _get_tcp_km(self): |
|
17 | 17 | c = Config() |
|
18 | 18 | km = MultiKernelManager(config=c) |
|
19 | 19 | return km |
|
20 | 20 | |
|
21 | 21 | def _get_ipc_km(self): |
|
22 | 22 | c = Config() |
|
23 | 23 | c.KernelManager.transport = 'ipc' |
|
24 | 24 | c.KernelManager.ip = 'test' |
|
25 | 25 | km = MultiKernelManager(config=c) |
|
26 | 26 | return km |
|
27 | 27 | |
|
28 | 28 | def _run_lifecycle(self, km): |
|
29 | 29 | kid = km.start_kernel(stdout=PIPE, stderr=PIPE) |
|
30 | 30 | self.assertTrue(km.is_alive(kid)) |
|
31 | 31 | self.assertTrue(kid in km) |
|
32 | 32 | self.assertTrue(kid in km.list_kernel_ids()) |
|
33 | 33 | self.assertEqual(len(km),1) |
|
34 | 34 | km.restart_kernel(kid, now=True) |
|
35 | 35 | self.assertTrue(km.is_alive(kid)) |
|
36 | 36 | self.assertTrue(kid in km.list_kernel_ids()) |
|
37 | 37 | km.interrupt_kernel(kid) |
|
38 | 38 | k = km.get_kernel(kid) |
|
39 | 39 | self.assertTrue(isinstance(k, KernelManager)) |
|
40 | 40 | km.shutdown_kernel(kid, now=True) |
|
41 | 41 | self.assertTrue(not kid in km) |
|
42 | 42 | |
|
43 | 43 | def _run_cinfo(self, km, transport, ip): |
|
44 | 44 | kid = km.start_kernel(stdout=PIPE, stderr=PIPE) |
|
45 | 45 | k = km.get_kernel(kid) |
|
46 | 46 | cinfo = km.get_connection_info(kid) |
|
47 | 47 | self.assertEqual(transport, cinfo['transport']) |
|
48 | 48 | self.assertEqual(ip, cinfo['ip']) |
|
49 | 49 | self.assertTrue('stdin_port' in cinfo) |
|
50 | 50 | self.assertTrue('iopub_port' in cinfo) |
|
51 | 51 | stream = km.connect_iopub(kid) |
|
52 | 52 | stream.close() |
|
53 | 53 | self.assertTrue('shell_port' in cinfo) |
|
54 | 54 | stream = km.connect_shell(kid) |
|
55 | 55 | stream.close() |
|
56 | 56 | self.assertTrue('hb_port' in cinfo) |
|
57 | 57 | stream = km.connect_hb(kid) |
|
58 | 58 | stream.close() |
|
59 | 59 | km.shutdown_kernel(kid, now=True) |
|
60 | 60 | |
|
61 | 61 | def test_tcp_lifecycle(self): |
|
62 | 62 | km = self._get_tcp_km() |
|
63 | 63 | self._run_lifecycle(km) |
|
64 | 64 | |
|
65 | 65 | def test_tcp_cinfo(self): |
|
66 | 66 | km = self._get_tcp_km() |
|
67 |
self._run_cinfo(km, 'tcp', |
|
|
67 | self._run_cinfo(km, 'tcp', localhost()) | |
|
68 | 68 | |
|
69 | 69 | @dec.skip_win32 |
|
70 | 70 | def test_ipc_lifecycle(self): |
|
71 | 71 | km = self._get_ipc_km() |
|
72 | 72 | self._run_lifecycle(km) |
|
73 | 73 | |
|
74 | 74 | @dec.skip_win32 |
|
75 | 75 | def test_ipc_cinfo(self): |
|
76 | 76 | km = self._get_ipc_km() |
|
77 | 77 | self._run_cinfo(km, 'ipc', 'test') |
|
78 | 78 |
@@ -1,65 +1,67 b'' | |||
|
1 | 1 | """The client and server for a basic ping-pong style heartbeat. |
|
2 | 2 | """ |
|
3 | 3 | |
|
4 | 4 | #----------------------------------------------------------------------------- |
|
5 | 5 | # Copyright (C) 2008-2011 The IPython Development Team |
|
6 | 6 | # |
|
7 | 7 | # Distributed under the terms of the BSD License. The full license is in |
|
8 | 8 | # the file COPYING, distributed as part of this software. |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | |
|
11 | 11 | #----------------------------------------------------------------------------- |
|
12 | 12 | # Imports |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | import errno |
|
16 | 16 | import os |
|
17 | 17 | import socket |
|
18 | 18 | from threading import Thread |
|
19 | 19 | |
|
20 | 20 | import zmq |
|
21 | 21 | |
|
22 |
from IPython.utils.localinterfaces import |
|
|
22 | from IPython.utils.localinterfaces import localhost | |
|
23 | 23 | |
|
24 | 24 | #----------------------------------------------------------------------------- |
|
25 | 25 | # Code |
|
26 | 26 | #----------------------------------------------------------------------------- |
|
27 | 27 | |
|
28 | 28 | |
|
29 | 29 | class Heartbeat(Thread): |
|
30 | 30 | "A simple ping-pong style heartbeat that runs in a thread." |
|
31 | 31 | |
|
32 |
def __init__(self, context, addr= |
|
|
32 | def __init__(self, context, addr=None): | |
|
33 | if addr is None: | |
|
34 | addr = ('tcp', localhost(), 0) | |
|
33 | 35 | Thread.__init__(self) |
|
34 | 36 | self.context = context |
|
35 | 37 | self.transport, self.ip, self.port = addr |
|
36 | 38 | if self.port == 0: |
|
37 | 39 | if addr[0] == 'tcp': |
|
38 | 40 | s = socket.socket() |
|
39 | 41 | # '*' means all interfaces to 0MQ, which is '' to socket.socket |
|
40 | 42 | s.bind(('' if self.ip == '*' else self.ip, 0)) |
|
41 | 43 | self.port = s.getsockname()[1] |
|
42 | 44 | s.close() |
|
43 | 45 | elif addr[0] == 'ipc': |
|
44 | 46 | self.port = 1 |
|
45 | 47 | while os.path.exists("%s-%s" % (self.ip, self.port)): |
|
46 | 48 | self.port = self.port + 1 |
|
47 | 49 | else: |
|
48 | 50 | raise ValueError("Unrecognized zmq transport: %s" % addr[0]) |
|
49 | 51 | self.addr = (self.ip, self.port) |
|
50 | 52 | self.daemon = True |
|
51 | 53 | |
|
52 | 54 | def run(self): |
|
53 | 55 | self.socket = self.context.socket(zmq.REP) |
|
54 | 56 | c = ':' if self.transport == 'tcp' else '-' |
|
55 | 57 | self.socket.bind('%s://%s' % (self.transport, self.ip) + c + str(self.port)) |
|
56 | 58 | while True: |
|
57 | 59 | try: |
|
58 | 60 | zmq.device(zmq.FORWARDER, self.socket, self.socket) |
|
59 | 61 | except zmq.ZMQError as e: |
|
60 | 62 | if e.errno == errno.EINTR: |
|
61 | 63 | continue |
|
62 | 64 | else: |
|
63 | 65 | raise |
|
64 | 66 | else: |
|
65 | 67 | break |
@@ -1,472 +1,473 b'' | |||
|
1 | 1 | """An Application for launching a kernel |
|
2 | 2 | |
|
3 | 3 | Authors |
|
4 | 4 | ------- |
|
5 | 5 | * MinRK |
|
6 | 6 | """ |
|
7 | 7 | #----------------------------------------------------------------------------- |
|
8 | 8 | # Copyright (C) 2011 The IPython Development Team |
|
9 | 9 | # |
|
10 | 10 | # Distributed under the terms of the BSD License. The full license is in |
|
11 | 11 | # the file COPYING.txt, distributed as part of this software. |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | # Imports |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | |
|
18 | 18 | from __future__ import print_function |
|
19 | 19 | |
|
20 | 20 | # Standard library imports |
|
21 | 21 | import atexit |
|
22 | 22 | import json |
|
23 | 23 | import os |
|
24 | 24 | import sys |
|
25 | 25 | import signal |
|
26 | 26 | |
|
27 | 27 | # System library imports |
|
28 | 28 | import zmq |
|
29 | 29 | from zmq.eventloop import ioloop |
|
30 | 30 | from zmq.eventloop.zmqstream import ZMQStream |
|
31 | 31 | |
|
32 | 32 | # IPython imports |
|
33 | 33 | from IPython.core.ultratb import FormattedTB |
|
34 | 34 | from IPython.core.application import ( |
|
35 | 35 | BaseIPythonApplication, base_flags, base_aliases, catch_config_error |
|
36 | 36 | ) |
|
37 | 37 | from IPython.core.profiledir import ProfileDir |
|
38 | 38 | from IPython.core.shellapp import ( |
|
39 | 39 | InteractiveShellApp, shell_flags, shell_aliases |
|
40 | 40 | ) |
|
41 | 41 | from IPython.utils import io |
|
42 |
from IPython.utils.localinterfaces import |
|
|
42 | from IPython.utils.localinterfaces import localhost | |
|
43 | 43 | from IPython.utils.path import filefind |
|
44 | 44 | from IPython.utils.py3compat import str_to_bytes |
|
45 | 45 | from IPython.utils.traitlets import ( |
|
46 | 46 | Any, Instance, Dict, Unicode, Integer, Bool, CaselessStrEnum, |
|
47 | 47 | DottedObjectName, |
|
48 | 48 | ) |
|
49 | 49 | from IPython.utils.importstring import import_item |
|
50 | 50 | from IPython.kernel import write_connection_file |
|
51 | 51 | |
|
52 | 52 | # local imports |
|
53 | 53 | from heartbeat import Heartbeat |
|
54 | 54 | from ipkernel import Kernel |
|
55 | 55 | from parentpoller import ParentPollerUnix, ParentPollerWindows |
|
56 | 56 | from session import ( |
|
57 | 57 | Session, session_flags, session_aliases, default_secure, |
|
58 | 58 | ) |
|
59 | 59 | from zmqshell import ZMQInteractiveShell |
|
60 | 60 | |
|
61 | 61 | #----------------------------------------------------------------------------- |
|
62 | 62 | # Flags and Aliases |
|
63 | 63 | #----------------------------------------------------------------------------- |
|
64 | 64 | |
|
65 | 65 | kernel_aliases = dict(base_aliases) |
|
66 | 66 | kernel_aliases.update({ |
|
67 | 67 | 'ip' : 'IPKernelApp.ip', |
|
68 | 68 | 'hb' : 'IPKernelApp.hb_port', |
|
69 | 69 | 'shell' : 'IPKernelApp.shell_port', |
|
70 | 70 | 'iopub' : 'IPKernelApp.iopub_port', |
|
71 | 71 | 'stdin' : 'IPKernelApp.stdin_port', |
|
72 | 72 | 'control' : 'IPKernelApp.control_port', |
|
73 | 73 | 'f' : 'IPKernelApp.connection_file', |
|
74 | 74 | 'parent': 'IPKernelApp.parent_handle', |
|
75 | 75 | 'transport': 'IPKernelApp.transport', |
|
76 | 76 | }) |
|
77 | 77 | if sys.platform.startswith('win'): |
|
78 | 78 | kernel_aliases['interrupt'] = 'IPKernelApp.interrupt' |
|
79 | 79 | |
|
80 | 80 | kernel_flags = dict(base_flags) |
|
81 | 81 | kernel_flags.update({ |
|
82 | 82 | 'no-stdout' : ( |
|
83 | 83 | {'IPKernelApp' : {'no_stdout' : True}}, |
|
84 | 84 | "redirect stdout to the null device"), |
|
85 | 85 | 'no-stderr' : ( |
|
86 | 86 | {'IPKernelApp' : {'no_stderr' : True}}, |
|
87 | 87 | "redirect stderr to the null device"), |
|
88 | 88 | 'pylab' : ( |
|
89 | 89 | {'IPKernelApp' : {'pylab' : 'auto'}}, |
|
90 | 90 | """Pre-load matplotlib and numpy for interactive use with |
|
91 | 91 | the default matplotlib backend."""), |
|
92 | 92 | }) |
|
93 | 93 | |
|
94 | 94 | # inherit flags&aliases for any IPython shell apps |
|
95 | 95 | kernel_aliases.update(shell_aliases) |
|
96 | 96 | kernel_flags.update(shell_flags) |
|
97 | 97 | |
|
98 | 98 | # inherit flags&aliases for Sessions |
|
99 | 99 | kernel_aliases.update(session_aliases) |
|
100 | 100 | kernel_flags.update(session_flags) |
|
101 | 101 | |
|
102 | 102 | _ctrl_c_message = """\ |
|
103 | 103 | NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work. |
|
104 | 104 | |
|
105 | 105 | To exit, you will have to explicitly quit this process, by either sending |
|
106 | 106 | "quit" from a client, or using Ctrl-\\ in UNIX-like environments. |
|
107 | 107 | |
|
108 | 108 | To read more about this, see https://github.com/ipython/ipython/issues/2049 |
|
109 | 109 | |
|
110 | 110 | """ |
|
111 | 111 | |
|
112 | 112 | #----------------------------------------------------------------------------- |
|
113 | 113 | # Application class for starting an IPython Kernel |
|
114 | 114 | #----------------------------------------------------------------------------- |
|
115 | 115 | |
|
116 | 116 | class IPKernelApp(BaseIPythonApplication, InteractiveShellApp): |
|
117 | 117 | name='ipkernel' |
|
118 | 118 | aliases = Dict(kernel_aliases) |
|
119 | 119 | flags = Dict(kernel_flags) |
|
120 | 120 | classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session] |
|
121 | 121 | # the kernel class, as an importstring |
|
122 | 122 | kernel_class = DottedObjectName('IPython.kernel.zmq.ipkernel.Kernel', config=True, |
|
123 | 123 | help="""The Kernel subclass to be used. |
|
124 | 124 | |
|
125 | 125 | This should allow easy re-use of the IPKernelApp entry point |
|
126 | 126 | to configure and launch kernels other than IPython's own. |
|
127 | 127 | """) |
|
128 | 128 | kernel = Any() |
|
129 | 129 | poller = Any() # don't restrict this even though current pollers are all Threads |
|
130 | 130 | heartbeat = Instance(Heartbeat) |
|
131 | 131 | session = Instance('IPython.kernel.zmq.session.Session') |
|
132 | 132 | ports = Dict() |
|
133 | 133 | |
|
134 | 134 | # ipkernel doesn't get its own config file |
|
135 | 135 | def _config_file_name_default(self): |
|
136 | 136 | return 'ipython_config.py' |
|
137 | 137 | |
|
138 | 138 | # inherit config file name from parent: |
|
139 | 139 | parent_appname = Unicode(config=True) |
|
140 | 140 | def _parent_appname_changed(self, name, old, new): |
|
141 | 141 | if self.config_file_specified: |
|
142 | 142 | # it was manually specified, ignore |
|
143 | 143 | return |
|
144 | 144 | self.config_file_name = new.replace('-','_') + u'_config.py' |
|
145 | 145 | # don't let this count as specifying the config file |
|
146 | 146 | self.config_file_specified.remove(self.config_file_name) |
|
147 | 147 | |
|
148 | 148 | # connection info: |
|
149 | 149 | transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True) |
|
150 | 150 | ip = Unicode(config=True, |
|
151 | 151 | help="Set the IP or interface on which the kernel will listen.") |
|
152 | 152 | def _ip_default(self): |
|
153 | 153 | if self.transport == 'ipc': |
|
154 | 154 | if self.connection_file: |
|
155 | 155 | return os.path.splitext(self.abs_connection_file)[0] + '-ipc' |
|
156 | 156 | else: |
|
157 | 157 | return 'kernel-ipc' |
|
158 | 158 | else: |
|
159 |
return |
|
|
159 | return localhost() | |
|
160 | ||
|
160 | 161 | hb_port = Integer(0, config=True, help="set the heartbeat port [default: random]") |
|
161 | 162 | shell_port = Integer(0, config=True, help="set the shell (ROUTER) port [default: random]") |
|
162 | 163 | iopub_port = Integer(0, config=True, help="set the iopub (PUB) port [default: random]") |
|
163 | 164 | stdin_port = Integer(0, config=True, help="set the stdin (ROUTER) port [default: random]") |
|
164 | 165 | control_port = Integer(0, config=True, help="set the control (ROUTER) port [default: random]") |
|
165 | 166 | connection_file = Unicode('', config=True, |
|
166 | 167 | help="""JSON file in which to store connection info [default: kernel-<pid>.json] |
|
167 | 168 | |
|
168 | 169 | This file will contain the IP, ports, and authentication key needed to connect |
|
169 | 170 | clients to this kernel. By default, this file will be created in the security dir |
|
170 | 171 | of the current profile, but can be specified by absolute path. |
|
171 | 172 | """) |
|
172 | 173 | @property |
|
173 | 174 | def abs_connection_file(self): |
|
174 | 175 | if os.path.basename(self.connection_file) == self.connection_file: |
|
175 | 176 | return os.path.join(self.profile_dir.security_dir, self.connection_file) |
|
176 | 177 | else: |
|
177 | 178 | return self.connection_file |
|
178 | 179 | |
|
179 | 180 | |
|
180 | 181 | # streams, etc. |
|
181 | 182 | no_stdout = Bool(False, config=True, help="redirect stdout to the null device") |
|
182 | 183 | no_stderr = Bool(False, config=True, help="redirect stderr to the null device") |
|
183 | 184 | outstream_class = DottedObjectName('IPython.kernel.zmq.iostream.OutStream', |
|
184 | 185 | config=True, help="The importstring for the OutStream factory") |
|
185 | 186 | displayhook_class = DottedObjectName('IPython.kernel.zmq.displayhook.ZMQDisplayHook', |
|
186 | 187 | config=True, help="The importstring for the DisplayHook factory") |
|
187 | 188 | |
|
188 | 189 | # polling |
|
189 | 190 | parent_handle = Integer(0, config=True, |
|
190 | 191 | help="""kill this process if its parent dies. On Windows, the argument |
|
191 | 192 | specifies the HANDLE of the parent process, otherwise it is simply boolean. |
|
192 | 193 | """) |
|
193 | 194 | interrupt = Integer(0, config=True, |
|
194 | 195 | help="""ONLY USED ON WINDOWS |
|
195 | 196 | Interrupt this process when the parent is signaled. |
|
196 | 197 | """) |
|
197 | 198 | |
|
198 | 199 | def init_crash_handler(self): |
|
199 | 200 | # Install minimal exception handling |
|
200 | 201 | sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor', |
|
201 | 202 | ostream=sys.__stdout__) |
|
202 | 203 | |
|
203 | 204 | def init_poller(self): |
|
204 | 205 | if sys.platform == 'win32': |
|
205 | 206 | if self.interrupt or self.parent_handle: |
|
206 | 207 | self.poller = ParentPollerWindows(self.interrupt, self.parent_handle) |
|
207 | 208 | elif self.parent_handle: |
|
208 | 209 | self.poller = ParentPollerUnix() |
|
209 | 210 | |
|
210 | 211 | def _bind_socket(self, s, port): |
|
211 | 212 | iface = '%s://%s' % (self.transport, self.ip) |
|
212 | 213 | if self.transport == 'tcp': |
|
213 | 214 | if port <= 0: |
|
214 | 215 | port = s.bind_to_random_port(iface) |
|
215 | 216 | else: |
|
216 | 217 | s.bind("tcp://%s:%i" % (self.ip, port)) |
|
217 | 218 | elif self.transport == 'ipc': |
|
218 | 219 | if port <= 0: |
|
219 | 220 | port = 1 |
|
220 | 221 | path = "%s-%i" % (self.ip, port) |
|
221 | 222 | while os.path.exists(path): |
|
222 | 223 | port = port + 1 |
|
223 | 224 | path = "%s-%i" % (self.ip, port) |
|
224 | 225 | else: |
|
225 | 226 | path = "%s-%i" % (self.ip, port) |
|
226 | 227 | s.bind("ipc://%s" % path) |
|
227 | 228 | return port |
|
228 | 229 | |
|
229 | 230 | def load_connection_file(self): |
|
230 | 231 | """load ip/port/hmac config from JSON connection file""" |
|
231 | 232 | try: |
|
232 | 233 | fname = filefind(self.connection_file, ['.', self.profile_dir.security_dir]) |
|
233 | 234 | except IOError: |
|
234 | 235 | self.log.debug("Connection file not found: %s", self.connection_file) |
|
235 | 236 | # This means I own it, so I will clean it up: |
|
236 | 237 | atexit.register(self.cleanup_connection_file) |
|
237 | 238 | return |
|
238 | 239 | self.log.debug(u"Loading connection file %s", fname) |
|
239 | 240 | with open(fname) as f: |
|
240 | 241 | s = f.read() |
|
241 | 242 | cfg = json.loads(s) |
|
242 | 243 | self.transport = cfg.get('transport', self.transport) |
|
243 | 244 | if self.ip == self._ip_default() and 'ip' in cfg: |
|
244 | 245 | # not overridden by config or cl_args |
|
245 | 246 | self.ip = cfg['ip'] |
|
246 | 247 | for channel in ('hb', 'shell', 'iopub', 'stdin', 'control'): |
|
247 | 248 | name = channel + '_port' |
|
248 | 249 | if getattr(self, name) == 0 and name in cfg: |
|
249 | 250 | # not overridden by config or cl_args |
|
250 | 251 | setattr(self, name, cfg[name]) |
|
251 | 252 | if 'key' in cfg: |
|
252 | 253 | self.config.Session.key = str_to_bytes(cfg['key']) |
|
253 | 254 | |
|
254 | 255 | def write_connection_file(self): |
|
255 | 256 | """write connection info to JSON file""" |
|
256 | 257 | cf = self.abs_connection_file |
|
257 | 258 | self.log.debug("Writing connection file: %s", cf) |
|
258 | 259 | write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport, |
|
259 | 260 | shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port, |
|
260 | 261 | iopub_port=self.iopub_port, control_port=self.control_port) |
|
261 | 262 | |
|
262 | 263 | def cleanup_connection_file(self): |
|
263 | 264 | cf = self.abs_connection_file |
|
264 | 265 | self.log.debug("Cleaning up connection file: %s", cf) |
|
265 | 266 | try: |
|
266 | 267 | os.remove(cf) |
|
267 | 268 | except (IOError, OSError): |
|
268 | 269 | pass |
|
269 | 270 | |
|
270 | 271 | self.cleanup_ipc_files() |
|
271 | 272 | |
|
272 | 273 | def cleanup_ipc_files(self): |
|
273 | 274 | """cleanup ipc files if we wrote them""" |
|
274 | 275 | if self.transport != 'ipc': |
|
275 | 276 | return |
|
276 | 277 | for port in (self.shell_port, self.iopub_port, self.stdin_port, self.hb_port, self.control_port): |
|
277 | 278 | ipcfile = "%s-%i" % (self.ip, port) |
|
278 | 279 | try: |
|
279 | 280 | os.remove(ipcfile) |
|
280 | 281 | except (IOError, OSError): |
|
281 | 282 | pass |
|
282 | 283 | |
|
283 | 284 | def init_connection_file(self): |
|
284 | 285 | if not self.connection_file: |
|
285 | 286 | self.connection_file = "kernel-%s.json"%os.getpid() |
|
286 | 287 | try: |
|
287 | 288 | self.load_connection_file() |
|
288 | 289 | except Exception: |
|
289 | 290 | self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) |
|
290 | 291 | self.exit(1) |
|
291 | 292 | |
|
292 | 293 | def init_sockets(self): |
|
293 | 294 | # Create a context, a session, and the kernel sockets. |
|
294 | 295 | self.log.info("Starting the kernel at pid: %i", os.getpid()) |
|
295 | 296 | context = zmq.Context.instance() |
|
296 | 297 | # Uncomment this to try closing the context. |
|
297 | 298 | # atexit.register(context.term) |
|
298 | 299 | |
|
299 | 300 | self.shell_socket = context.socket(zmq.ROUTER) |
|
300 | 301 | self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) |
|
301 | 302 | self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port) |
|
302 | 303 | |
|
303 | 304 | self.iopub_socket = context.socket(zmq.PUB) |
|
304 | 305 | self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port) |
|
305 | 306 | self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port) |
|
306 | 307 | |
|
307 | 308 | self.stdin_socket = context.socket(zmq.ROUTER) |
|
308 | 309 | self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) |
|
309 | 310 | self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port) |
|
310 | 311 | |
|
311 | 312 | self.control_socket = context.socket(zmq.ROUTER) |
|
312 | 313 | self.control_port = self._bind_socket(self.control_socket, self.control_port) |
|
313 | 314 | self.log.debug("control ROUTER Channel on port: %i" % self.control_port) |
|
314 | 315 | |
|
315 | 316 | def init_heartbeat(self): |
|
316 | 317 | """start the heart beating""" |
|
317 | 318 | # heartbeat doesn't share context, because it mustn't be blocked |
|
318 | 319 | # by the GIL, which is accessed by libzmq when freeing zero-copy messages |
|
319 | 320 | hb_ctx = zmq.Context() |
|
320 | 321 | self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port)) |
|
321 | 322 | self.hb_port = self.heartbeat.port |
|
322 | 323 | self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port) |
|
323 | 324 | self.heartbeat.start() |
|
324 | 325 | |
|
325 | 326 | def log_connection_info(self): |
|
326 | 327 | """display connection info, and store ports""" |
|
327 | 328 | basename = os.path.basename(self.connection_file) |
|
328 | 329 | if basename == self.connection_file or \ |
|
329 | 330 | os.path.dirname(self.connection_file) == self.profile_dir.security_dir: |
|
330 | 331 | # use shortname |
|
331 | 332 | tail = basename |
|
332 | 333 | if self.profile != 'default': |
|
333 | 334 | tail += " --profile %s" % self.profile |
|
334 | 335 | else: |
|
335 | 336 | tail = self.connection_file |
|
336 | 337 | lines = [ |
|
337 | 338 | "To connect another client to this kernel, use:", |
|
338 | 339 | " --existing %s" % tail, |
|
339 | 340 | ] |
|
340 | 341 | # log connection info |
|
341 | 342 | # info-level, so often not shown. |
|
342 | 343 | # frontends should use the %connect_info magic |
|
343 | 344 | # to see the connection info |
|
344 | 345 | for line in lines: |
|
345 | 346 | self.log.info(line) |
|
346 | 347 | # also raw print to the terminal if no parent_handle (`ipython kernel`) |
|
347 | 348 | if not self.parent_handle: |
|
348 | 349 | io.rprint(_ctrl_c_message) |
|
349 | 350 | for line in lines: |
|
350 | 351 | io.rprint(line) |
|
351 | 352 | |
|
352 | 353 | self.ports = dict(shell=self.shell_port, iopub=self.iopub_port, |
|
353 | 354 | stdin=self.stdin_port, hb=self.hb_port, |
|
354 | 355 | control=self.control_port) |
|
355 | 356 | |
|
356 | 357 | def init_session(self): |
|
357 | 358 | """create our session object""" |
|
358 | 359 | default_secure(self.config) |
|
359 | 360 | self.session = Session(parent=self, username=u'kernel') |
|
360 | 361 | |
|
361 | 362 | def init_blackhole(self): |
|
362 | 363 | """redirects stdout/stderr to devnull if necessary""" |
|
363 | 364 | if self.no_stdout or self.no_stderr: |
|
364 | 365 | blackhole = open(os.devnull, 'w') |
|
365 | 366 | if self.no_stdout: |
|
366 | 367 | sys.stdout = sys.__stdout__ = blackhole |
|
367 | 368 | if self.no_stderr: |
|
368 | 369 | sys.stderr = sys.__stderr__ = blackhole |
|
369 | 370 | |
|
370 | 371 | def init_io(self): |
|
371 | 372 | """Redirect input streams and set a display hook.""" |
|
372 | 373 | if self.outstream_class: |
|
373 | 374 | outstream_factory = import_item(str(self.outstream_class)) |
|
374 | 375 | sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout') |
|
375 | 376 | sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr') |
|
376 | 377 | if self.displayhook_class: |
|
377 | 378 | displayhook_factory = import_item(str(self.displayhook_class)) |
|
378 | 379 | sys.displayhook = displayhook_factory(self.session, self.iopub_socket) |
|
379 | 380 | |
|
380 | 381 | def init_signal(self): |
|
381 | 382 | signal.signal(signal.SIGINT, signal.SIG_IGN) |
|
382 | 383 | |
|
383 | 384 | def init_kernel(self): |
|
384 | 385 | """Create the Kernel object itself""" |
|
385 | 386 | shell_stream = ZMQStream(self.shell_socket) |
|
386 | 387 | control_stream = ZMQStream(self.control_socket) |
|
387 | 388 | |
|
388 | 389 | kernel_factory = import_item(str(self.kernel_class)) |
|
389 | 390 | |
|
390 | 391 | kernel = kernel_factory(parent=self, session=self.session, |
|
391 | 392 | shell_streams=[shell_stream, control_stream], |
|
392 | 393 | iopub_socket=self.iopub_socket, |
|
393 | 394 | stdin_socket=self.stdin_socket, |
|
394 | 395 | log=self.log, |
|
395 | 396 | profile_dir=self.profile_dir, |
|
396 | 397 | user_ns=self.user_ns, |
|
397 | 398 | ) |
|
398 | 399 | kernel.record_ports(self.ports) |
|
399 | 400 | self.kernel = kernel |
|
400 | 401 | |
|
401 | 402 | def init_gui_pylab(self): |
|
402 | 403 | """Enable GUI event loop integration, taking pylab into account.""" |
|
403 | 404 | |
|
404 | 405 | # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab` |
|
405 | 406 | # to ensure that any exception is printed straight to stderr. |
|
406 | 407 | # Normally _showtraceback associates the reply with an execution, |
|
407 | 408 | # which means frontends will never draw it, as this exception |
|
408 | 409 | # is not associated with any execute request. |
|
409 | 410 | |
|
410 | 411 | shell = self.shell |
|
411 | 412 | _showtraceback = shell._showtraceback |
|
412 | 413 | try: |
|
413 | 414 | # replace pyerr-sending traceback with stderr |
|
414 | 415 | def print_tb(etype, evalue, stb): |
|
415 | 416 | print ("GUI event loop or pylab initialization failed", |
|
416 | 417 | file=io.stderr) |
|
417 | 418 | print (shell.InteractiveTB.stb2text(stb), file=io.stderr) |
|
418 | 419 | shell._showtraceback = print_tb |
|
419 | 420 | InteractiveShellApp.init_gui_pylab(self) |
|
420 | 421 | finally: |
|
421 | 422 | shell._showtraceback = _showtraceback |
|
422 | 423 | |
|
423 | 424 | def init_shell(self): |
|
424 | 425 | self.shell = self.kernel.shell |
|
425 | 426 | self.shell.configurables.append(self) |
|
426 | 427 | |
|
427 | 428 | @catch_config_error |
|
428 | 429 | def initialize(self, argv=None): |
|
429 | 430 | super(IPKernelApp, self).initialize(argv) |
|
430 | 431 | self.init_blackhole() |
|
431 | 432 | self.init_connection_file() |
|
432 | 433 | self.init_session() |
|
433 | 434 | self.init_poller() |
|
434 | 435 | self.init_sockets() |
|
435 | 436 | self.init_heartbeat() |
|
436 | 437 | # writing/displaying connection info must be *after* init_sockets/heartbeat |
|
437 | 438 | self.log_connection_info() |
|
438 | 439 | self.write_connection_file() |
|
439 | 440 | self.init_io() |
|
440 | 441 | self.init_signal() |
|
441 | 442 | self.init_kernel() |
|
442 | 443 | # shell init steps |
|
443 | 444 | self.init_path() |
|
444 | 445 | self.init_shell() |
|
445 | 446 | self.init_gui_pylab() |
|
446 | 447 | self.init_extensions() |
|
447 | 448 | self.init_code() |
|
448 | 449 | # flush stdout/stderr, so that anything written to these streams during |
|
449 | 450 | # initialization do not get associated with the first execution request |
|
450 | 451 | sys.stdout.flush() |
|
451 | 452 | sys.stderr.flush() |
|
452 | 453 | |
|
453 | 454 | def start(self): |
|
454 | 455 | if self.poller is not None: |
|
455 | 456 | self.poller.start() |
|
456 | 457 | self.kernel.start() |
|
457 | 458 | try: |
|
458 | 459 | ioloop.IOLoop.instance().start() |
|
459 | 460 | except KeyboardInterrupt: |
|
460 | 461 | pass |
|
461 | 462 | |
|
462 | 463 | launch_new_instance = IPKernelApp.launch_instance |
|
463 | 464 | |
|
464 | 465 | def main(): |
|
465 | 466 | """Run an IPKernel as an application""" |
|
466 | 467 | app = IPKernelApp.instance() |
|
467 | 468 | app.initialize() |
|
468 | 469 | app.start() |
|
469 | 470 | |
|
470 | 471 | |
|
471 | 472 | if __name__ == '__main__': |
|
472 | 473 | main() |
@@ -1,547 +1,547 b'' | |||
|
1 | 1 | #!/usr/bin/env python |
|
2 | 2 | # encoding: utf-8 |
|
3 | 3 | """ |
|
4 | 4 | The IPython controller application. |
|
5 | 5 | |
|
6 | 6 | Authors: |
|
7 | 7 | |
|
8 | 8 | * Brian Granger |
|
9 | 9 | * MinRK |
|
10 | 10 | |
|
11 | 11 | """ |
|
12 | 12 | |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 |
# Copyright (C) 2008 |
|
|
14 | # Copyright (C) 2008 The IPython Development Team | |
|
15 | 15 | # |
|
16 | 16 | # Distributed under the terms of the BSD License. The full license is in |
|
17 | 17 | # the file COPYING, distributed as part of this software. |
|
18 | 18 | #----------------------------------------------------------------------------- |
|
19 | 19 | |
|
20 | 20 | #----------------------------------------------------------------------------- |
|
21 | 21 | # Imports |
|
22 | 22 | #----------------------------------------------------------------------------- |
|
23 | 23 | |
|
24 | 24 | from __future__ import with_statement |
|
25 | 25 | |
|
26 | 26 | import json |
|
27 | 27 | import os |
|
28 | 28 | import stat |
|
29 | 29 | import sys |
|
30 | 30 | |
|
31 | 31 | from multiprocessing import Process |
|
32 | 32 | from signal import signal, SIGINT, SIGABRT, SIGTERM |
|
33 | 33 | |
|
34 | 34 | import zmq |
|
35 | 35 | from zmq.devices import ProcessMonitoredQueue |
|
36 | 36 | from zmq.log.handlers import PUBHandler |
|
37 | 37 | |
|
38 | 38 | from IPython.core.profiledir import ProfileDir |
|
39 | 39 | |
|
40 | 40 | from IPython.parallel.apps.baseapp import ( |
|
41 | 41 | BaseParallelApplication, |
|
42 | 42 | base_aliases, |
|
43 | 43 | base_flags, |
|
44 | 44 | catch_config_error, |
|
45 | 45 | ) |
|
46 | 46 | from IPython.utils.importstring import import_item |
|
47 |
from IPython.utils.localinterfaces import |
|
|
47 | from IPython.utils.localinterfaces import localhost, public_ips | |
|
48 | 48 | from IPython.utils.traitlets import Instance, Unicode, Bool, List, Dict, TraitError |
|
49 | 49 | |
|
50 | 50 | from IPython.kernel.zmq.session import ( |
|
51 | 51 | Session, session_aliases, session_flags, default_secure |
|
52 | 52 | ) |
|
53 | 53 | |
|
54 | 54 | from IPython.parallel.controller.heartmonitor import HeartMonitor |
|
55 | 55 | from IPython.parallel.controller.hub import HubFactory |
|
56 | 56 | from IPython.parallel.controller.scheduler import TaskScheduler,launch_scheduler |
|
57 | 57 | from IPython.parallel.controller.dictdb import DictDB |
|
58 | 58 | |
|
59 | 59 | from IPython.parallel.util import split_url, disambiguate_url, set_hwm |
|
60 | 60 | |
|
61 | 61 | # conditional import of SQLiteDB / MongoDB backend class |
|
62 | 62 | real_dbs = [] |
|
63 | 63 | |
|
64 | 64 | try: |
|
65 | 65 | from IPython.parallel.controller.sqlitedb import SQLiteDB |
|
66 | 66 | except ImportError: |
|
67 | 67 | pass |
|
68 | 68 | else: |
|
69 | 69 | real_dbs.append(SQLiteDB) |
|
70 | 70 | |
|
71 | 71 | try: |
|
72 | 72 | from IPython.parallel.controller.mongodb import MongoDB |
|
73 | 73 | except ImportError: |
|
74 | 74 | pass |
|
75 | 75 | else: |
|
76 | 76 | real_dbs.append(MongoDB) |
|
77 | 77 | |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | #----------------------------------------------------------------------------- |
|
81 | 81 | # Module level variables |
|
82 | 82 | #----------------------------------------------------------------------------- |
|
83 | 83 | |
|
84 | 84 | |
|
85 | 85 | _description = """Start the IPython controller for parallel computing. |
|
86 | 86 | |
|
87 | 87 | The IPython controller provides a gateway between the IPython engines and |
|
88 | 88 | clients. The controller needs to be started before the engines and can be |
|
89 | 89 | configured using command line options or using a cluster directory. Cluster |
|
90 | 90 | directories contain config, log and security files and are usually located in |
|
91 | 91 | your ipython directory and named as "profile_name". See the `profile` |
|
92 | 92 | and `profile-dir` options for details. |
|
93 | 93 | """ |
|
94 | 94 | |
|
95 | 95 | _examples = """ |
|
96 | 96 | ipcontroller --ip=192.168.0.1 --port=1000 # listen on ip, port for engines |
|
97 | 97 | ipcontroller --scheme=pure # use the pure zeromq scheduler |
|
98 | 98 | """ |
|
99 | 99 | |
|
100 | 100 | |
|
101 | 101 | #----------------------------------------------------------------------------- |
|
102 | 102 | # The main application |
|
103 | 103 | #----------------------------------------------------------------------------- |
|
104 | 104 | flags = {} |
|
105 | 105 | flags.update(base_flags) |
|
106 | 106 | flags.update({ |
|
107 | 107 | 'usethreads' : ( {'IPControllerApp' : {'use_threads' : True}}, |
|
108 | 108 | 'Use threads instead of processes for the schedulers'), |
|
109 | 109 | 'sqlitedb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.sqlitedb.SQLiteDB'}}, |
|
110 | 110 | 'use the SQLiteDB backend'), |
|
111 | 111 | 'mongodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.mongodb.MongoDB'}}, |
|
112 | 112 | 'use the MongoDB backend'), |
|
113 | 113 | 'dictdb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.DictDB'}}, |
|
114 | 114 | 'use the in-memory DictDB backend'), |
|
115 | 115 | 'nodb' : ({'HubFactory' : {'db_class' : 'IPython.parallel.controller.dictdb.NoDB'}}, |
|
116 | 116 | """use dummy DB backend, which doesn't store any information. |
|
117 | 117 | |
|
118 | 118 | This is the default as of IPython 0.13. |
|
119 | 119 | |
|
120 | 120 | To enable delayed or repeated retrieval of results from the Hub, |
|
121 | 121 | select one of the true db backends. |
|
122 | 122 | """), |
|
123 | 123 | 'reuse' : ({'IPControllerApp' : {'reuse_files' : True}}, |
|
124 | 124 | 'reuse existing json connection files'), |
|
125 | 125 | 'restore' : ({'IPControllerApp' : {'restore_engines' : True, 'reuse_files' : True}}, |
|
126 | 126 | 'Attempt to restore engines from a JSON file. ' |
|
127 | 127 | 'For use when resuming a crashed controller'), |
|
128 | 128 | }) |
|
129 | 129 | |
|
130 | 130 | flags.update(session_flags) |
|
131 | 131 | |
|
132 | 132 | aliases = dict( |
|
133 | 133 | ssh = 'IPControllerApp.ssh_server', |
|
134 | 134 | enginessh = 'IPControllerApp.engine_ssh_server', |
|
135 | 135 | location = 'IPControllerApp.location', |
|
136 | 136 | |
|
137 | 137 | url = 'HubFactory.url', |
|
138 | 138 | ip = 'HubFactory.ip', |
|
139 | 139 | transport = 'HubFactory.transport', |
|
140 | 140 | port = 'HubFactory.regport', |
|
141 | 141 | |
|
142 | 142 | ping = 'HeartMonitor.period', |
|
143 | 143 | |
|
144 | 144 | scheme = 'TaskScheduler.scheme_name', |
|
145 | 145 | hwm = 'TaskScheduler.hwm', |
|
146 | 146 | ) |
|
147 | 147 | aliases.update(base_aliases) |
|
148 | 148 | aliases.update(session_aliases) |
|
149 | 149 | |
|
150 | 150 | class IPControllerApp(BaseParallelApplication): |
|
151 | 151 | |
|
152 | 152 | name = u'ipcontroller' |
|
153 | 153 | description = _description |
|
154 | 154 | examples = _examples |
|
155 | 155 | classes = [ProfileDir, Session, HubFactory, TaskScheduler, HeartMonitor, DictDB] + real_dbs |
|
156 | 156 | |
|
157 | 157 | # change default to True |
|
158 | 158 | auto_create = Bool(True, config=True, |
|
159 | 159 | help="""Whether to create profile dir if it doesn't exist.""") |
|
160 | 160 | |
|
161 | 161 | reuse_files = Bool(False, config=True, |
|
162 | 162 | help="""Whether to reuse existing json connection files. |
|
163 | 163 | If False, connection files will be removed on a clean exit. |
|
164 | 164 | """ |
|
165 | 165 | ) |
|
166 | 166 | restore_engines = Bool(False, config=True, |
|
167 | 167 | help="""Reload engine state from JSON file |
|
168 | 168 | """ |
|
169 | 169 | ) |
|
170 | 170 | ssh_server = Unicode(u'', config=True, |
|
171 | 171 | help="""ssh url for clients to use when connecting to the Controller |
|
172 | 172 | processes. It should be of the form: [user@]server[:port]. The |
|
173 | 173 | Controller's listening addresses must be accessible from the ssh server""", |
|
174 | 174 | ) |
|
175 | 175 | engine_ssh_server = Unicode(u'', config=True, |
|
176 | 176 | help="""ssh url for engines to use when connecting to the Controller |
|
177 | 177 | processes. It should be of the form: [user@]server[:port]. The |
|
178 | 178 | Controller's listening addresses must be accessible from the ssh server""", |
|
179 | 179 | ) |
|
180 | 180 | location = Unicode(u'', config=True, |
|
181 | 181 | help="""The external IP or domain name of the Controller, used for disambiguating |
|
182 | 182 | engine and client connections.""", |
|
183 | 183 | ) |
|
184 | 184 | import_statements = List([], config=True, |
|
185 | 185 | help="import statements to be run at startup. Necessary in some environments" |
|
186 | 186 | ) |
|
187 | 187 | |
|
188 | 188 | use_threads = Bool(False, config=True, |
|
189 | 189 | help='Use threads instead of processes for the schedulers', |
|
190 | 190 | ) |
|
191 | 191 | |
|
192 | 192 | engine_json_file = Unicode('ipcontroller-engine.json', config=True, |
|
193 | 193 | help="JSON filename where engine connection info will be stored.") |
|
194 | 194 | client_json_file = Unicode('ipcontroller-client.json', config=True, |
|
195 | 195 | help="JSON filename where client connection info will be stored.") |
|
196 | 196 | |
|
197 | 197 | def _cluster_id_changed(self, name, old, new): |
|
198 | 198 | super(IPControllerApp, self)._cluster_id_changed(name, old, new) |
|
199 | 199 | self.engine_json_file = "%s-engine.json" % self.name |
|
200 | 200 | self.client_json_file = "%s-client.json" % self.name |
|
201 | 201 | |
|
202 | 202 | |
|
203 | 203 | # internal |
|
204 | 204 | children = List() |
|
205 | 205 | mq_class = Unicode('zmq.devices.ProcessMonitoredQueue') |
|
206 | 206 | |
|
207 | 207 | def _use_threads_changed(self, name, old, new): |
|
208 | 208 | self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process') |
|
209 | 209 | |
|
210 | 210 | write_connection_files = Bool(True, |
|
211 | 211 | help="""Whether to write connection files to disk. |
|
212 | 212 | True in all cases other than runs with `reuse_files=True` *after the first* |
|
213 | 213 | """ |
|
214 | 214 | ) |
|
215 | 215 | |
|
216 | 216 | aliases = Dict(aliases) |
|
217 | 217 | flags = Dict(flags) |
|
218 | 218 | |
|
219 | 219 | |
|
220 | 220 | def save_connection_dict(self, fname, cdict): |
|
221 | 221 | """save a connection dict to json file.""" |
|
222 | 222 | c = self.config |
|
223 | 223 | url = cdict['registration'] |
|
224 | 224 | location = cdict['location'] |
|
225 | 225 | |
|
226 | 226 | if not location: |
|
227 |
if |
|
|
228 |
location = |
|
|
227 | if public_ips(): | |
|
228 | location = public_ips()[-1] | |
|
229 | 229 | else: |
|
230 | 230 | self.log.warn("Could not identify this machine's IP, assuming %s." |
|
231 | 231 | " You may need to specify '--location=<external_ip_address>' to help" |
|
232 |
" IPython decide when to connect via loopback." % |
|
|
233 |
location = |
|
|
232 | " IPython decide when to connect via loopback." % localhost() ) | |
|
233 | location = localhost() | |
|
234 | 234 | cdict['location'] = location |
|
235 | 235 | fname = os.path.join(self.profile_dir.security_dir, fname) |
|
236 | 236 | self.log.info("writing connection info to %s", fname) |
|
237 | 237 | with open(fname, 'w') as f: |
|
238 | 238 | f.write(json.dumps(cdict, indent=2)) |
|
239 | 239 | os.chmod(fname, stat.S_IRUSR|stat.S_IWUSR) |
|
240 | 240 | |
|
241 | 241 | def load_config_from_json(self): |
|
242 | 242 | """load config from existing json connector files.""" |
|
243 | 243 | c = self.config |
|
244 | 244 | self.log.debug("loading config from JSON") |
|
245 | 245 | |
|
246 | 246 | # load engine config |
|
247 | 247 | |
|
248 | 248 | fname = os.path.join(self.profile_dir.security_dir, self.engine_json_file) |
|
249 | 249 | self.log.info("loading connection info from %s", fname) |
|
250 | 250 | with open(fname) as f: |
|
251 | 251 | ecfg = json.loads(f.read()) |
|
252 | 252 | |
|
253 | 253 | # json gives unicode, Session.key wants bytes |
|
254 | 254 | c.Session.key = ecfg['key'].encode('ascii') |
|
255 | 255 | |
|
256 | 256 | xport,ip = ecfg['interface'].split('://') |
|
257 | 257 | |
|
258 | 258 | c.HubFactory.engine_ip = ip |
|
259 | 259 | c.HubFactory.engine_transport = xport |
|
260 | 260 | |
|
261 | 261 | self.location = ecfg['location'] |
|
262 | 262 | if not self.engine_ssh_server: |
|
263 | 263 | self.engine_ssh_server = ecfg['ssh'] |
|
264 | 264 | |
|
265 | 265 | # load client config |
|
266 | 266 | |
|
267 | 267 | fname = os.path.join(self.profile_dir.security_dir, self.client_json_file) |
|
268 | 268 | self.log.info("loading connection info from %s", fname) |
|
269 | 269 | with open(fname) as f: |
|
270 | 270 | ccfg = json.loads(f.read()) |
|
271 | 271 | |
|
272 | 272 | for key in ('key', 'registration', 'pack', 'unpack', 'signature_scheme'): |
|
273 | 273 | assert ccfg[key] == ecfg[key], "mismatch between engine and client info: %r" % key |
|
274 | 274 | |
|
275 | 275 | xport,addr = ccfg['interface'].split('://') |
|
276 | 276 | |
|
277 | 277 | c.HubFactory.client_transport = xport |
|
278 | 278 | c.HubFactory.client_ip = ip |
|
279 | 279 | if not self.ssh_server: |
|
280 | 280 | self.ssh_server = ccfg['ssh'] |
|
281 | 281 | |
|
282 | 282 | # load port config: |
|
283 | 283 | c.HubFactory.regport = ecfg['registration'] |
|
284 | 284 | c.HubFactory.hb = (ecfg['hb_ping'], ecfg['hb_pong']) |
|
285 | 285 | c.HubFactory.control = (ccfg['control'], ecfg['control']) |
|
286 | 286 | c.HubFactory.mux = (ccfg['mux'], ecfg['mux']) |
|
287 | 287 | c.HubFactory.task = (ccfg['task'], ecfg['task']) |
|
288 | 288 | c.HubFactory.iopub = (ccfg['iopub'], ecfg['iopub']) |
|
289 | 289 | c.HubFactory.notifier_port = ccfg['notification'] |
|
290 | 290 | |
|
291 | 291 | def cleanup_connection_files(self): |
|
292 | 292 | if self.reuse_files: |
|
293 | 293 | self.log.debug("leaving JSON connection files for reuse") |
|
294 | 294 | return |
|
295 | 295 | self.log.debug("cleaning up JSON connection files") |
|
296 | 296 | for f in (self.client_json_file, self.engine_json_file): |
|
297 | 297 | f = os.path.join(self.profile_dir.security_dir, f) |
|
298 | 298 | try: |
|
299 | 299 | os.remove(f) |
|
300 | 300 | except Exception as e: |
|
301 | 301 | self.log.error("Failed to cleanup connection file: %s", e) |
|
302 | 302 | else: |
|
303 | 303 | self.log.debug(u"removed %s", f) |
|
304 | 304 | |
|
305 | 305 | def load_secondary_config(self): |
|
306 | 306 | """secondary config, loading from JSON and setting defaults""" |
|
307 | 307 | if self.reuse_files: |
|
308 | 308 | try: |
|
309 | 309 | self.load_config_from_json() |
|
310 | 310 | except (AssertionError,IOError) as e: |
|
311 | 311 | self.log.error("Could not load config from JSON: %s" % e) |
|
312 | 312 | else: |
|
313 | 313 | # successfully loaded config from JSON, and reuse=True |
|
314 | 314 | # no need to wite back the same file |
|
315 | 315 | self.write_connection_files = False |
|
316 | 316 | |
|
317 | 317 | # switch Session.key default to secure |
|
318 | 318 | default_secure(self.config) |
|
319 | 319 | self.log.debug("Config changed") |
|
320 | 320 | self.log.debug(repr(self.config)) |
|
321 | 321 | |
|
322 | 322 | def init_hub(self): |
|
323 | 323 | c = self.config |
|
324 | 324 | |
|
325 | 325 | self.do_import_statements() |
|
326 | 326 | |
|
327 | 327 | try: |
|
328 | 328 | self.factory = HubFactory(config=c, log=self.log) |
|
329 | 329 | # self.start_logging() |
|
330 | 330 | self.factory.init_hub() |
|
331 | 331 | except TraitError: |
|
332 | 332 | raise |
|
333 | 333 | except Exception: |
|
334 | 334 | self.log.error("Couldn't construct the Controller", exc_info=True) |
|
335 | 335 | self.exit(1) |
|
336 | 336 | |
|
337 | 337 | if self.write_connection_files: |
|
338 | 338 | # save to new json config files |
|
339 | 339 | f = self.factory |
|
340 | 340 | base = { |
|
341 | 341 | 'key' : f.session.key.decode('ascii'), |
|
342 | 342 | 'location' : self.location, |
|
343 | 343 | 'pack' : f.session.packer, |
|
344 | 344 | 'unpack' : f.session.unpacker, |
|
345 | 345 | 'signature_scheme' : f.session.signature_scheme, |
|
346 | 346 | } |
|
347 | 347 | |
|
348 | 348 | cdict = {'ssh' : self.ssh_server} |
|
349 | 349 | cdict.update(f.client_info) |
|
350 | 350 | cdict.update(base) |
|
351 | 351 | self.save_connection_dict(self.client_json_file, cdict) |
|
352 | 352 | |
|
353 | 353 | edict = {'ssh' : self.engine_ssh_server} |
|
354 | 354 | edict.update(f.engine_info) |
|
355 | 355 | edict.update(base) |
|
356 | 356 | self.save_connection_dict(self.engine_json_file, edict) |
|
357 | 357 | |
|
358 | 358 | fname = "engines%s.json" % self.cluster_id |
|
359 | 359 | self.factory.hub.engine_state_file = os.path.join(self.profile_dir.log_dir, fname) |
|
360 | 360 | if self.restore_engines: |
|
361 | 361 | self.factory.hub._load_engine_state() |
|
362 | 362 | |
|
363 | 363 | def init_schedulers(self): |
|
364 | 364 | children = self.children |
|
365 | 365 | mq = import_item(str(self.mq_class)) |
|
366 | 366 | |
|
367 | 367 | f = self.factory |
|
368 | 368 | ident = f.session.bsession |
|
369 | 369 | # disambiguate url, in case of * |
|
370 | 370 | monitor_url = disambiguate_url(f.monitor_url) |
|
371 | 371 | # maybe_inproc = 'inproc://monitor' if self.use_threads else monitor_url |
|
372 | 372 | # IOPub relay (in a Process) |
|
373 | 373 | q = mq(zmq.PUB, zmq.SUB, zmq.PUB, b'N/A',b'iopub') |
|
374 | 374 | q.bind_in(f.client_url('iopub')) |
|
375 | 375 | q.setsockopt_in(zmq.IDENTITY, ident + b"_iopub") |
|
376 | 376 | q.bind_out(f.engine_url('iopub')) |
|
377 | 377 | q.setsockopt_out(zmq.SUBSCRIBE, b'') |
|
378 | 378 | q.connect_mon(monitor_url) |
|
379 | 379 | q.daemon=True |
|
380 | 380 | children.append(q) |
|
381 | 381 | |
|
382 | 382 | # Multiplexer Queue (in a Process) |
|
383 | 383 | q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'in', b'out') |
|
384 | 384 | |
|
385 | 385 | q.bind_in(f.client_url('mux')) |
|
386 | 386 | q.setsockopt_in(zmq.IDENTITY, b'mux_in') |
|
387 | 387 | q.bind_out(f.engine_url('mux')) |
|
388 | 388 | q.setsockopt_out(zmq.IDENTITY, b'mux_out') |
|
389 | 389 | q.connect_mon(monitor_url) |
|
390 | 390 | q.daemon=True |
|
391 | 391 | children.append(q) |
|
392 | 392 | |
|
393 | 393 | # Control Queue (in a Process) |
|
394 | 394 | q = mq(zmq.ROUTER, zmq.ROUTER, zmq.PUB, b'incontrol', b'outcontrol') |
|
395 | 395 | q.bind_in(f.client_url('control')) |
|
396 | 396 | q.setsockopt_in(zmq.IDENTITY, b'control_in') |
|
397 | 397 | q.bind_out(f.engine_url('control')) |
|
398 | 398 | q.setsockopt_out(zmq.IDENTITY, b'control_out') |
|
399 | 399 | q.connect_mon(monitor_url) |
|
400 | 400 | q.daemon=True |
|
401 | 401 | children.append(q) |
|
402 | 402 | try: |
|
403 | 403 | scheme = self.config.TaskScheduler.scheme_name |
|
404 | 404 | except AttributeError: |
|
405 | 405 | scheme = TaskScheduler.scheme_name.get_default_value() |
|
406 | 406 | # Task Queue (in a Process) |
|
407 | 407 | if scheme == 'pure': |
|
408 | 408 | self.log.warn("task::using pure DEALER Task scheduler") |
|
409 | 409 | q = mq(zmq.ROUTER, zmq.DEALER, zmq.PUB, b'intask', b'outtask') |
|
410 | 410 | # q.setsockopt_out(zmq.HWM, hub.hwm) |
|
411 | 411 | q.bind_in(f.client_url('task')) |
|
412 | 412 | q.setsockopt_in(zmq.IDENTITY, b'task_in') |
|
413 | 413 | q.bind_out(f.engine_url('task')) |
|
414 | 414 | q.setsockopt_out(zmq.IDENTITY, b'task_out') |
|
415 | 415 | q.connect_mon(monitor_url) |
|
416 | 416 | q.daemon=True |
|
417 | 417 | children.append(q) |
|
418 | 418 | elif scheme == 'none': |
|
419 | 419 | self.log.warn("task::using no Task scheduler") |
|
420 | 420 | |
|
421 | 421 | else: |
|
422 | 422 | self.log.info("task::using Python %s Task scheduler"%scheme) |
|
423 | 423 | sargs = (f.client_url('task'), f.engine_url('task'), |
|
424 | 424 | monitor_url, disambiguate_url(f.client_url('notification')), |
|
425 | 425 | disambiguate_url(f.client_url('registration')), |
|
426 | 426 | ) |
|
427 | 427 | kwargs = dict(logname='scheduler', loglevel=self.log_level, |
|
428 | 428 | log_url = self.log_url, config=dict(self.config)) |
|
429 | 429 | if 'Process' in self.mq_class: |
|
430 | 430 | # run the Python scheduler in a Process |
|
431 | 431 | q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs) |
|
432 | 432 | q.daemon=True |
|
433 | 433 | children.append(q) |
|
434 | 434 | else: |
|
435 | 435 | # single-threaded Controller |
|
436 | 436 | kwargs['in_thread'] = True |
|
437 | 437 | launch_scheduler(*sargs, **kwargs) |
|
438 | 438 | |
|
439 | 439 | # set unlimited HWM for all relay devices |
|
440 | 440 | if hasattr(zmq, 'SNDHWM'): |
|
441 | 441 | q = children[0] |
|
442 | 442 | q.setsockopt_in(zmq.RCVHWM, 0) |
|
443 | 443 | q.setsockopt_out(zmq.SNDHWM, 0) |
|
444 | 444 | |
|
445 | 445 | for q in children[1:]: |
|
446 | 446 | if not hasattr(q, 'setsockopt_in'): |
|
447 | 447 | continue |
|
448 | 448 | q.setsockopt_in(zmq.SNDHWM, 0) |
|
449 | 449 | q.setsockopt_in(zmq.RCVHWM, 0) |
|
450 | 450 | q.setsockopt_out(zmq.SNDHWM, 0) |
|
451 | 451 | q.setsockopt_out(zmq.RCVHWM, 0) |
|
452 | 452 | q.setsockopt_mon(zmq.SNDHWM, 0) |
|
453 | 453 | |
|
454 | 454 | |
|
455 | 455 | def terminate_children(self): |
|
456 | 456 | child_procs = [] |
|
457 | 457 | for child in self.children: |
|
458 | 458 | if isinstance(child, ProcessMonitoredQueue): |
|
459 | 459 | child_procs.append(child.launcher) |
|
460 | 460 | elif isinstance(child, Process): |
|
461 | 461 | child_procs.append(child) |
|
462 | 462 | if child_procs: |
|
463 | 463 | self.log.critical("terminating children...") |
|
464 | 464 | for child in child_procs: |
|
465 | 465 | try: |
|
466 | 466 | child.terminate() |
|
467 | 467 | except OSError: |
|
468 | 468 | # already dead |
|
469 | 469 | pass |
|
470 | 470 | |
|
471 | 471 | def handle_signal(self, sig, frame): |
|
472 | 472 | self.log.critical("Received signal %i, shutting down", sig) |
|
473 | 473 | self.terminate_children() |
|
474 | 474 | self.loop.stop() |
|
475 | 475 | |
|
476 | 476 | def init_signal(self): |
|
477 | 477 | for sig in (SIGINT, SIGABRT, SIGTERM): |
|
478 | 478 | signal(sig, self.handle_signal) |
|
479 | 479 | |
|
480 | 480 | def do_import_statements(self): |
|
481 | 481 | statements = self.import_statements |
|
482 | 482 | for s in statements: |
|
483 | 483 | try: |
|
484 | 484 | self.log.msg("Executing statement: '%s'" % s) |
|
485 | 485 | exec s in globals(), locals() |
|
486 | 486 | except: |
|
487 | 487 | self.log.msg("Error running statement: %s" % s) |
|
488 | 488 | |
|
489 | 489 | def forward_logging(self): |
|
490 | 490 | if self.log_url: |
|
491 | 491 | self.log.info("Forwarding logging to %s"%self.log_url) |
|
492 | 492 | context = zmq.Context.instance() |
|
493 | 493 | lsock = context.socket(zmq.PUB) |
|
494 | 494 | lsock.connect(self.log_url) |
|
495 | 495 | handler = PUBHandler(lsock) |
|
496 | 496 | handler.root_topic = 'controller' |
|
497 | 497 | handler.setLevel(self.log_level) |
|
498 | 498 | self.log.addHandler(handler) |
|
499 | 499 | |
|
500 | 500 | @catch_config_error |
|
501 | 501 | def initialize(self, argv=None): |
|
502 | 502 | super(IPControllerApp, self).initialize(argv) |
|
503 | 503 | self.forward_logging() |
|
504 | 504 | self.load_secondary_config() |
|
505 | 505 | self.init_hub() |
|
506 | 506 | self.init_schedulers() |
|
507 | 507 | |
|
508 | 508 | def start(self): |
|
509 | 509 | # Start the subprocesses: |
|
510 | 510 | self.factory.start() |
|
511 | 511 | # children must be started before signals are setup, |
|
512 | 512 | # otherwise signal-handling will fire multiple times |
|
513 | 513 | for child in self.children: |
|
514 | 514 | child.start() |
|
515 | 515 | self.init_signal() |
|
516 | 516 | |
|
517 | 517 | self.write_pid_file(overwrite=True) |
|
518 | 518 | |
|
519 | 519 | try: |
|
520 | 520 | self.factory.loop.start() |
|
521 | 521 | except KeyboardInterrupt: |
|
522 | 522 | self.log.critical("Interrupted, Exiting...\n") |
|
523 | 523 | finally: |
|
524 | 524 | self.cleanup_connection_files() |
|
525 | 525 | |
|
526 | 526 | |
|
527 | 527 | def launch_new_instance(*args, **kwargs): |
|
528 | 528 | """Create and run the IPython controller""" |
|
529 | 529 | if sys.platform == 'win32': |
|
530 | 530 | # make sure we don't get called from a multiprocessing subprocess |
|
531 | 531 | # this can result in infinite Controllers being started on Windows |
|
532 | 532 | # which doesn't have a proper fork, so multiprocessing is wonky |
|
533 | 533 | |
|
534 | 534 | # this only comes up when IPython has been installed using vanilla |
|
535 | 535 | # setuptools, and *not* distribute. |
|
536 | 536 | import multiprocessing |
|
537 | 537 | p = multiprocessing.current_process() |
|
538 | 538 | # the main process has name 'MainProcess' |
|
539 | 539 | # subprocesses will have names like 'Process-1' |
|
540 | 540 | if p.name != 'MainProcess': |
|
541 | 541 | # we are a subprocess, don't start another Controller! |
|
542 | 542 | return |
|
543 | 543 | return IPControllerApp.launch_instance(*args, **kwargs) |
|
544 | 544 | |
|
545 | 545 | |
|
546 | 546 | if __name__ == '__main__': |
|
547 | 547 | launch_new_instance() |
@@ -1,115 +1,117 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | A simple logger object that consolidates messages incoming from ipcluster processes. |
|
3 | 3 | |
|
4 | 4 | Authors: |
|
5 | 5 | |
|
6 | 6 | * MinRK |
|
7 | 7 | |
|
8 | 8 | """ |
|
9 | 9 | |
|
10 | 10 | #----------------------------------------------------------------------------- |
|
11 | 11 | # Copyright (C) 2011 The IPython Development Team |
|
12 | 12 | # |
|
13 | 13 | # Distributed under the terms of the BSD License. The full license is in |
|
14 | 14 | # the file COPYING, distributed as part of this software. |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | |
|
17 | 17 | #----------------------------------------------------------------------------- |
|
18 | 18 | # Imports |
|
19 | 19 | #----------------------------------------------------------------------------- |
|
20 | 20 | |
|
21 | 21 | |
|
22 | 22 | import logging |
|
23 | 23 | import sys |
|
24 | 24 | |
|
25 | 25 | import zmq |
|
26 | 26 | from zmq.eventloop import ioloop, zmqstream |
|
27 | 27 | |
|
28 | 28 | from IPython.config.configurable import LoggingConfigurable |
|
29 |
from IPython.utils.localinterfaces import |
|
|
29 | from IPython.utils.localinterfaces import localhost | |
|
30 | 30 | from IPython.utils.traitlets import Int, Unicode, Instance, List |
|
31 | 31 | |
|
32 | 32 | #----------------------------------------------------------------------------- |
|
33 | 33 | # Classes |
|
34 | 34 | #----------------------------------------------------------------------------- |
|
35 | 35 | |
|
36 | 36 | |
|
37 | 37 | class LogWatcher(LoggingConfigurable): |
|
38 | 38 | """A simple class that receives messages on a SUB socket, as published |
|
39 | 39 | by subclasses of `zmq.log.handlers.PUBHandler`, and logs them itself. |
|
40 | 40 | |
|
41 | 41 | This can subscribe to multiple topics, but defaults to all topics. |
|
42 | 42 | """ |
|
43 | 43 | |
|
44 | 44 | # configurables |
|
45 | 45 | topics = List([''], config=True, |
|
46 | 46 | help="The ZMQ topics to subscribe to. Default is to subscribe to all messages") |
|
47 |
url = Unicode( |
|
|
47 | url = Unicode(config=True, | |
|
48 | 48 | help="ZMQ url on which to listen for log messages") |
|
49 | def _url_default(self): | |
|
50 | return 'tcp://%s:20202' % localhost() | |
|
49 | 51 | |
|
50 | 52 | # internals |
|
51 | 53 | stream = Instance('zmq.eventloop.zmqstream.ZMQStream') |
|
52 | 54 | |
|
53 | 55 | context = Instance(zmq.Context) |
|
54 | 56 | def _context_default(self): |
|
55 | 57 | return zmq.Context.instance() |
|
56 | 58 | |
|
57 | 59 | loop = Instance(zmq.eventloop.ioloop.IOLoop) |
|
58 | 60 | def _loop_default(self): |
|
59 | 61 | return ioloop.IOLoop.instance() |
|
60 | 62 | |
|
61 | 63 | def __init__(self, **kwargs): |
|
62 | 64 | super(LogWatcher, self).__init__(**kwargs) |
|
63 | 65 | s = self.context.socket(zmq.SUB) |
|
64 | 66 | s.bind(self.url) |
|
65 | 67 | self.stream = zmqstream.ZMQStream(s, self.loop) |
|
66 | 68 | self.subscribe() |
|
67 | 69 | self.on_trait_change(self.subscribe, 'topics') |
|
68 | 70 | |
|
69 | 71 | def start(self): |
|
70 | 72 | self.stream.on_recv(self.log_message) |
|
71 | 73 | |
|
72 | 74 | def stop(self): |
|
73 | 75 | self.stream.stop_on_recv() |
|
74 | 76 | |
|
75 | 77 | def subscribe(self): |
|
76 | 78 | """Update our SUB socket's subscriptions.""" |
|
77 | 79 | self.stream.setsockopt(zmq.UNSUBSCRIBE, '') |
|
78 | 80 | if '' in self.topics: |
|
79 | 81 | self.log.debug("Subscribing to: everything") |
|
80 | 82 | self.stream.setsockopt(zmq.SUBSCRIBE, '') |
|
81 | 83 | else: |
|
82 | 84 | for topic in self.topics: |
|
83 | 85 | self.log.debug("Subscribing to: %r"%(topic)) |
|
84 | 86 | self.stream.setsockopt(zmq.SUBSCRIBE, topic) |
|
85 | 87 | |
|
86 | 88 | def _extract_level(self, topic_str): |
|
87 | 89 | """Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')""" |
|
88 | 90 | topics = topic_str.split('.') |
|
89 | 91 | for idx,t in enumerate(topics): |
|
90 | 92 | level = getattr(logging, t, None) |
|
91 | 93 | if level is not None: |
|
92 | 94 | break |
|
93 | 95 | |
|
94 | 96 | if level is None: |
|
95 | 97 | level = logging.INFO |
|
96 | 98 | else: |
|
97 | 99 | topics.pop(idx) |
|
98 | 100 | |
|
99 | 101 | return level, '.'.join(topics) |
|
100 | 102 | |
|
101 | 103 | |
|
102 | 104 | def log_message(self, raw): |
|
103 | 105 | """receive and parse a message, then log it.""" |
|
104 | 106 | if len(raw) != 2 or '.' not in raw[0]: |
|
105 | 107 | self.log.error("Invalid log message: %s"%raw) |
|
106 | 108 | return |
|
107 | 109 | else: |
|
108 | 110 | topic, msg = raw |
|
109 | 111 | # don't newline, since log messages always newline: |
|
110 | 112 | topic,level_name = topic.rsplit('.',1) |
|
111 | 113 | level,topic = self._extract_level(topic) |
|
112 | 114 | if msg[-1] == '\n': |
|
113 | 115 | msg = msg[:-1] |
|
114 | 116 | self.log.log(level, "[%s] %s" % (topic, msg)) |
|
115 | 117 |
@@ -1,1858 +1,1858 b'' | |||
|
1 | 1 | """A semi-synchronous Client for the ZMQ cluster |
|
2 | 2 | |
|
3 | 3 | Authors: |
|
4 | 4 | |
|
5 | 5 | * MinRK |
|
6 | 6 | """ |
|
7 | 7 | #----------------------------------------------------------------------------- |
|
8 | 8 | # Copyright (C) 2010-2011 The IPython Development Team |
|
9 | 9 | # |
|
10 | 10 | # Distributed under the terms of the BSD License. The full license is in |
|
11 | 11 | # the file COPYING, distributed as part of this software. |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | # Imports |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | |
|
18 | 18 | import os |
|
19 | 19 | import json |
|
20 | 20 | import sys |
|
21 | 21 | from threading import Thread, Event |
|
22 | 22 | import time |
|
23 | 23 | import warnings |
|
24 | 24 | from datetime import datetime |
|
25 | 25 | from getpass import getpass |
|
26 | 26 | from pprint import pprint |
|
27 | 27 | |
|
28 | 28 | pjoin = os.path.join |
|
29 | 29 | |
|
30 | 30 | import zmq |
|
31 | 31 | # from zmq.eventloop import ioloop, zmqstream |
|
32 | 32 | |
|
33 | 33 | from IPython.config.configurable import MultipleInstanceError |
|
34 | 34 | from IPython.core.application import BaseIPythonApplication |
|
35 | 35 | from IPython.core.profiledir import ProfileDir, ProfileDirError |
|
36 | 36 | |
|
37 | 37 | from IPython.utils.capture import RichOutput |
|
38 | 38 | from IPython.utils.coloransi import TermColors |
|
39 | 39 | from IPython.utils.jsonutil import rekey |
|
40 |
from IPython.utils.localinterfaces import |
|
|
40 | from IPython.utils.localinterfaces import localhost, is_local_ip | |
|
41 | 41 | from IPython.utils.path import get_ipython_dir |
|
42 | 42 | from IPython.utils.py3compat import cast_bytes |
|
43 | 43 | from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode, |
|
44 | 44 | Dict, List, Bool, Set, Any) |
|
45 | 45 | from IPython.external.decorator import decorator |
|
46 | 46 | from IPython.external.ssh import tunnel |
|
47 | 47 | |
|
48 | 48 | from IPython.parallel import Reference |
|
49 | 49 | from IPython.parallel import error |
|
50 | 50 | from IPython.parallel import util |
|
51 | 51 | |
|
52 | 52 | from IPython.kernel.zmq.session import Session, Message |
|
53 | 53 | from IPython.kernel.zmq import serialize |
|
54 | 54 | |
|
55 | 55 | from .asyncresult import AsyncResult, AsyncHubResult |
|
56 | 56 | from .view import DirectView, LoadBalancedView |
|
57 | 57 | |
|
58 | 58 | if sys.version_info[0] >= 3: |
|
59 | 59 | # xrange is used in a couple 'isinstance' tests in py2 |
|
60 | 60 | # should be just 'range' in 3k |
|
61 | 61 | xrange = range |
|
62 | 62 | |
|
63 | 63 | #-------------------------------------------------------------------------- |
|
64 | 64 | # Decorators for Client methods |
|
65 | 65 | #-------------------------------------------------------------------------- |
|
66 | 66 | |
|
67 | 67 | @decorator |
|
68 | 68 | def spin_first(f, self, *args, **kwargs): |
|
69 | 69 | """Call spin() to sync state prior to calling the method.""" |
|
70 | 70 | self.spin() |
|
71 | 71 | return f(self, *args, **kwargs) |
|
72 | 72 | |
|
73 | 73 | |
|
74 | 74 | #-------------------------------------------------------------------------- |
|
75 | 75 | # Classes |
|
76 | 76 | #-------------------------------------------------------------------------- |
|
77 | 77 | |
|
78 | 78 | |
|
79 | 79 | class ExecuteReply(RichOutput): |
|
80 | 80 | """wrapper for finished Execute results""" |
|
81 | 81 | def __init__(self, msg_id, content, metadata): |
|
82 | 82 | self.msg_id = msg_id |
|
83 | 83 | self._content = content |
|
84 | 84 | self.execution_count = content['execution_count'] |
|
85 | 85 | self.metadata = metadata |
|
86 | 86 | |
|
87 | 87 | # RichOutput overrides |
|
88 | 88 | |
|
89 | 89 | @property |
|
90 | 90 | def source(self): |
|
91 | 91 | pyout = self.metadata['pyout'] |
|
92 | 92 | if pyout: |
|
93 | 93 | return pyout.get('source', '') |
|
94 | 94 | |
|
95 | 95 | @property |
|
96 | 96 | def data(self): |
|
97 | 97 | pyout = self.metadata['pyout'] |
|
98 | 98 | if pyout: |
|
99 | 99 | return pyout.get('data', {}) |
|
100 | 100 | |
|
101 | 101 | @property |
|
102 | 102 | def _metadata(self): |
|
103 | 103 | pyout = self.metadata['pyout'] |
|
104 | 104 | if pyout: |
|
105 | 105 | return pyout.get('metadata', {}) |
|
106 | 106 | |
|
107 | 107 | def display(self): |
|
108 | 108 | from IPython.display import publish_display_data |
|
109 | 109 | publish_display_data(self.source, self.data, self.metadata) |
|
110 | 110 | |
|
111 | 111 | def _repr_mime_(self, mime): |
|
112 | 112 | if mime not in self.data: |
|
113 | 113 | return |
|
114 | 114 | data = self.data[mime] |
|
115 | 115 | if mime in self._metadata: |
|
116 | 116 | return data, self._metadata[mime] |
|
117 | 117 | else: |
|
118 | 118 | return data |
|
119 | 119 | |
|
120 | 120 | def __getitem__(self, key): |
|
121 | 121 | return self.metadata[key] |
|
122 | 122 | |
|
123 | 123 | def __getattr__(self, key): |
|
124 | 124 | if key not in self.metadata: |
|
125 | 125 | raise AttributeError(key) |
|
126 | 126 | return self.metadata[key] |
|
127 | 127 | |
|
128 | 128 | def __repr__(self): |
|
129 | 129 | pyout = self.metadata['pyout'] or {'data':{}} |
|
130 | 130 | text_out = pyout['data'].get('text/plain', '') |
|
131 | 131 | if len(text_out) > 32: |
|
132 | 132 | text_out = text_out[:29] + '...' |
|
133 | 133 | |
|
134 | 134 | return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out) |
|
135 | 135 | |
|
136 | 136 | def _repr_pretty_(self, p, cycle): |
|
137 | 137 | pyout = self.metadata['pyout'] or {'data':{}} |
|
138 | 138 | text_out = pyout['data'].get('text/plain', '') |
|
139 | 139 | |
|
140 | 140 | if not text_out: |
|
141 | 141 | return |
|
142 | 142 | |
|
143 | 143 | try: |
|
144 | 144 | ip = get_ipython() |
|
145 | 145 | except NameError: |
|
146 | 146 | colors = "NoColor" |
|
147 | 147 | else: |
|
148 | 148 | colors = ip.colors |
|
149 | 149 | |
|
150 | 150 | if colors == "NoColor": |
|
151 | 151 | out = normal = "" |
|
152 | 152 | else: |
|
153 | 153 | out = TermColors.Red |
|
154 | 154 | normal = TermColors.Normal |
|
155 | 155 | |
|
156 | 156 | if '\n' in text_out and not text_out.startswith('\n'): |
|
157 | 157 | # add newline for multiline reprs |
|
158 | 158 | text_out = '\n' + text_out |
|
159 | 159 | |
|
160 | 160 | p.text( |
|
161 | 161 | out + u'Out[%i:%i]: ' % ( |
|
162 | 162 | self.metadata['engine_id'], self.execution_count |
|
163 | 163 | ) + normal + text_out |
|
164 | 164 | ) |
|
165 | 165 | |
|
166 | 166 | |
|
167 | 167 | class Metadata(dict): |
|
168 | 168 | """Subclass of dict for initializing metadata values. |
|
169 | 169 | |
|
170 | 170 | Attribute access works on keys. |
|
171 | 171 | |
|
172 | 172 | These objects have a strict set of keys - errors will raise if you try |
|
173 | 173 | to add new keys. |
|
174 | 174 | """ |
|
175 | 175 | def __init__(self, *args, **kwargs): |
|
176 | 176 | dict.__init__(self) |
|
177 | 177 | md = {'msg_id' : None, |
|
178 | 178 | 'submitted' : None, |
|
179 | 179 | 'started' : None, |
|
180 | 180 | 'completed' : None, |
|
181 | 181 | 'received' : None, |
|
182 | 182 | 'engine_uuid' : None, |
|
183 | 183 | 'engine_id' : None, |
|
184 | 184 | 'follow' : None, |
|
185 | 185 | 'after' : None, |
|
186 | 186 | 'status' : None, |
|
187 | 187 | |
|
188 | 188 | 'pyin' : None, |
|
189 | 189 | 'pyout' : None, |
|
190 | 190 | 'pyerr' : None, |
|
191 | 191 | 'stdout' : '', |
|
192 | 192 | 'stderr' : '', |
|
193 | 193 | 'outputs' : [], |
|
194 | 194 | 'data': {}, |
|
195 | 195 | 'outputs_ready' : False, |
|
196 | 196 | } |
|
197 | 197 | self.update(md) |
|
198 | 198 | self.update(dict(*args, **kwargs)) |
|
199 | 199 | |
|
200 | 200 | def __getattr__(self, key): |
|
201 | 201 | """getattr aliased to getitem""" |
|
202 | 202 | if key in self.iterkeys(): |
|
203 | 203 | return self[key] |
|
204 | 204 | else: |
|
205 | 205 | raise AttributeError(key) |
|
206 | 206 | |
|
207 | 207 | def __setattr__(self, key, value): |
|
208 | 208 | """setattr aliased to setitem, with strict""" |
|
209 | 209 | if key in self.iterkeys(): |
|
210 | 210 | self[key] = value |
|
211 | 211 | else: |
|
212 | 212 | raise AttributeError(key) |
|
213 | 213 | |
|
214 | 214 | def __setitem__(self, key, value): |
|
215 | 215 | """strict static key enforcement""" |
|
216 | 216 | if key in self.iterkeys(): |
|
217 | 217 | dict.__setitem__(self, key, value) |
|
218 | 218 | else: |
|
219 | 219 | raise KeyError(key) |
|
220 | 220 | |
|
221 | 221 | |
|
222 | 222 | class Client(HasTraits): |
|
223 | 223 | """A semi-synchronous client to the IPython ZMQ cluster |
|
224 | 224 | |
|
225 | 225 | Parameters |
|
226 | 226 | ---------- |
|
227 | 227 | |
|
228 | 228 | url_file : str/unicode; path to ipcontroller-client.json |
|
229 | 229 | This JSON file should contain all the information needed to connect to a cluster, |
|
230 | 230 | and is likely the only argument needed. |
|
231 | 231 | Connection information for the Hub's registration. If a json connector |
|
232 | 232 | file is given, then likely no further configuration is necessary. |
|
233 | 233 | [Default: use profile] |
|
234 | 234 | profile : bytes |
|
235 | 235 | The name of the Cluster profile to be used to find connector information. |
|
236 | 236 | If run from an IPython application, the default profile will be the same |
|
237 | 237 | as the running application, otherwise it will be 'default'. |
|
238 | 238 | cluster_id : str |
|
239 | 239 | String id to added to runtime files, to prevent name collisions when using |
|
240 | 240 | multiple clusters with a single profile simultaneously. |
|
241 | 241 | When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json' |
|
242 | 242 | Since this is text inserted into filenames, typical recommendations apply: |
|
243 | 243 | Simple character strings are ideal, and spaces are not recommended (but |
|
244 | 244 | should generally work) |
|
245 | 245 | context : zmq.Context |
|
246 | 246 | Pass an existing zmq.Context instance, otherwise the client will create its own. |
|
247 | 247 | debug : bool |
|
248 | 248 | flag for lots of message printing for debug purposes |
|
249 | 249 | timeout : int/float |
|
250 | 250 | time (in seconds) to wait for connection replies from the Hub |
|
251 | 251 | [Default: 10] |
|
252 | 252 | |
|
253 | 253 | #-------------- session related args ---------------- |
|
254 | 254 | |
|
255 | 255 | config : Config object |
|
256 | 256 | If specified, this will be relayed to the Session for configuration |
|
257 | 257 | username : str |
|
258 | 258 | set username for the session object |
|
259 | 259 | |
|
260 | 260 | #-------------- ssh related args ---------------- |
|
261 | 261 | # These are args for configuring the ssh tunnel to be used |
|
262 | 262 | # credentials are used to forward connections over ssh to the Controller |
|
263 | 263 | # Note that the ip given in `addr` needs to be relative to sshserver |
|
264 | 264 | # The most basic case is to leave addr as pointing to localhost (127.0.0.1), |
|
265 | 265 | # and set sshserver as the same machine the Controller is on. However, |
|
266 | 266 | # the only requirement is that sshserver is able to see the Controller |
|
267 | 267 | # (i.e. is within the same trusted network). |
|
268 | 268 | |
|
269 | 269 | sshserver : str |
|
270 | 270 | A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port' |
|
271 | 271 | If keyfile or password is specified, and this is not, it will default to |
|
272 | 272 | the ip given in addr. |
|
273 | 273 | sshkey : str; path to ssh private key file |
|
274 | 274 | This specifies a key to be used in ssh login, default None. |
|
275 | 275 | Regular default ssh keys will be used without specifying this argument. |
|
276 | 276 | password : str |
|
277 | 277 | Your ssh password to sshserver. Note that if this is left None, |
|
278 | 278 | you will be prompted for it if passwordless key based login is unavailable. |
|
279 | 279 | paramiko : bool |
|
280 | 280 | flag for whether to use paramiko instead of shell ssh for tunneling. |
|
281 | 281 | [default: True on win32, False else] |
|
282 | 282 | |
|
283 | 283 | |
|
284 | 284 | Attributes |
|
285 | 285 | ---------- |
|
286 | 286 | |
|
287 | 287 | ids : list of int engine IDs |
|
288 | 288 | requesting the ids attribute always synchronizes |
|
289 | 289 | the registration state. To request ids without synchronization, |
|
290 | 290 | use semi-private _ids attributes. |
|
291 | 291 | |
|
292 | 292 | history : list of msg_ids |
|
293 | 293 | a list of msg_ids, keeping track of all the execution |
|
294 | 294 | messages you have submitted in order. |
|
295 | 295 | |
|
296 | 296 | outstanding : set of msg_ids |
|
297 | 297 | a set of msg_ids that have been submitted, but whose |
|
298 | 298 | results have not yet been received. |
|
299 | 299 | |
|
300 | 300 | results : dict |
|
301 | 301 | a dict of all our results, keyed by msg_id |
|
302 | 302 | |
|
303 | 303 | block : bool |
|
304 | 304 | determines default behavior when block not specified |
|
305 | 305 | in execution methods |
|
306 | 306 | |
|
307 | 307 | Methods |
|
308 | 308 | ------- |
|
309 | 309 | |
|
310 | 310 | spin |
|
311 | 311 | flushes incoming results and registration state changes |
|
312 | 312 | control methods spin, and requesting `ids` also ensures up to date |
|
313 | 313 | |
|
314 | 314 | wait |
|
315 | 315 | wait on one or more msg_ids |
|
316 | 316 | |
|
317 | 317 | execution methods |
|
318 | 318 | apply |
|
319 | 319 | legacy: execute, run |
|
320 | 320 | |
|
321 | 321 | data movement |
|
322 | 322 | push, pull, scatter, gather |
|
323 | 323 | |
|
324 | 324 | query methods |
|
325 | 325 | queue_status, get_result, purge, result_status |
|
326 | 326 | |
|
327 | 327 | control methods |
|
328 | 328 | abort, shutdown |
|
329 | 329 | |
|
330 | 330 | """ |
|
331 | 331 | |
|
332 | 332 | |
|
333 | 333 | block = Bool(False) |
|
334 | 334 | outstanding = Set() |
|
335 | 335 | results = Instance('collections.defaultdict', (dict,)) |
|
336 | 336 | metadata = Instance('collections.defaultdict', (Metadata,)) |
|
337 | 337 | history = List() |
|
338 | 338 | debug = Bool(False) |
|
339 | 339 | _spin_thread = Any() |
|
340 | 340 | _stop_spinning = Any() |
|
341 | 341 | |
|
342 | 342 | profile=Unicode() |
|
343 | 343 | def _profile_default(self): |
|
344 | 344 | if BaseIPythonApplication.initialized(): |
|
345 | 345 | # an IPython app *might* be running, try to get its profile |
|
346 | 346 | try: |
|
347 | 347 | return BaseIPythonApplication.instance().profile |
|
348 | 348 | except (AttributeError, MultipleInstanceError): |
|
349 | 349 | # could be a *different* subclass of config.Application, |
|
350 | 350 | # which would raise one of these two errors. |
|
351 | 351 | return u'default' |
|
352 | 352 | else: |
|
353 | 353 | return u'default' |
|
354 | 354 | |
|
355 | 355 | |
|
356 | 356 | _outstanding_dict = Instance('collections.defaultdict', (set,)) |
|
357 | 357 | _ids = List() |
|
358 | 358 | _connected=Bool(False) |
|
359 | 359 | _ssh=Bool(False) |
|
360 | 360 | _context = Instance('zmq.Context') |
|
361 | 361 | _config = Dict() |
|
362 | 362 | _engines=Instance(util.ReverseDict, (), {}) |
|
363 | 363 | # _hub_socket=Instance('zmq.Socket') |
|
364 | 364 | _query_socket=Instance('zmq.Socket') |
|
365 | 365 | _control_socket=Instance('zmq.Socket') |
|
366 | 366 | _iopub_socket=Instance('zmq.Socket') |
|
367 | 367 | _notification_socket=Instance('zmq.Socket') |
|
368 | 368 | _mux_socket=Instance('zmq.Socket') |
|
369 | 369 | _task_socket=Instance('zmq.Socket') |
|
370 | 370 | _task_scheme=Unicode() |
|
371 | 371 | _closed = False |
|
372 | 372 | _ignored_control_replies=Integer(0) |
|
373 | 373 | _ignored_hub_replies=Integer(0) |
|
374 | 374 | |
|
375 | 375 | def __new__(self, *args, **kw): |
|
376 | 376 | # don't raise on positional args |
|
377 | 377 | return HasTraits.__new__(self, **kw) |
|
378 | 378 | |
|
379 | 379 | def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None, |
|
380 | 380 | context=None, debug=False, |
|
381 | 381 | sshserver=None, sshkey=None, password=None, paramiko=None, |
|
382 | 382 | timeout=10, cluster_id=None, **extra_args |
|
383 | 383 | ): |
|
384 | 384 | if profile: |
|
385 | 385 | super(Client, self).__init__(debug=debug, profile=profile) |
|
386 | 386 | else: |
|
387 | 387 | super(Client, self).__init__(debug=debug) |
|
388 | 388 | if context is None: |
|
389 | 389 | context = zmq.Context.instance() |
|
390 | 390 | self._context = context |
|
391 | 391 | self._stop_spinning = Event() |
|
392 | 392 | |
|
393 | 393 | if 'url_or_file' in extra_args: |
|
394 | 394 | url_file = extra_args['url_or_file'] |
|
395 | 395 | warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning) |
|
396 | 396 | |
|
397 | 397 | if url_file and util.is_url(url_file): |
|
398 | 398 | raise ValueError("single urls cannot be specified, url-files must be used.") |
|
399 | 399 | |
|
400 | 400 | self._setup_profile_dir(self.profile, profile_dir, ipython_dir) |
|
401 | 401 | |
|
402 | 402 | if self._cd is not None: |
|
403 | 403 | if url_file is None: |
|
404 | 404 | if not cluster_id: |
|
405 | 405 | client_json = 'ipcontroller-client.json' |
|
406 | 406 | else: |
|
407 | 407 | client_json = 'ipcontroller-%s-client.json' % cluster_id |
|
408 | 408 | url_file = pjoin(self._cd.security_dir, client_json) |
|
409 | 409 | if url_file is None: |
|
410 | 410 | raise ValueError( |
|
411 | 411 | "I can't find enough information to connect to a hub!" |
|
412 | 412 | " Please specify at least one of url_file or profile." |
|
413 | 413 | ) |
|
414 | 414 | |
|
415 | 415 | with open(url_file) as f: |
|
416 | 416 | cfg = json.load(f) |
|
417 | 417 | |
|
418 | 418 | self._task_scheme = cfg['task_scheme'] |
|
419 | 419 | |
|
420 | 420 | # sync defaults from args, json: |
|
421 | 421 | if sshserver: |
|
422 | 422 | cfg['ssh'] = sshserver |
|
423 | 423 | |
|
424 | 424 | location = cfg.setdefault('location', None) |
|
425 | 425 | |
|
426 | 426 | proto,addr = cfg['interface'].split('://') |
|
427 | 427 | addr = util.disambiguate_ip_address(addr, location) |
|
428 | 428 | cfg['interface'] = "%s://%s" % (proto, addr) |
|
429 | 429 | |
|
430 | 430 | # turn interface,port into full urls: |
|
431 | 431 | for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'): |
|
432 | 432 | cfg[key] = cfg['interface'] + ':%i' % cfg[key] |
|
433 | 433 | |
|
434 | 434 | url = cfg['registration'] |
|
435 | 435 | |
|
436 |
if location is not None and addr == |
|
|
436 | if location is not None and addr == localhost(): | |
|
437 | 437 | # location specified, and connection is expected to be local |
|
438 |
if |
|
|
438 | if not is_local_ip(location) and not sshserver: | |
|
439 | 439 | # load ssh from JSON *only* if the controller is not on |
|
440 | 440 | # this machine |
|
441 | 441 | sshserver=cfg['ssh'] |
|
442 |
if |
|
|
442 | if not is_local_ip(location) and not sshserver: | |
|
443 | 443 | # warn if no ssh specified, but SSH is probably needed |
|
444 | 444 | # This is only a warning, because the most likely cause |
|
445 | 445 | # is a local Controller on a laptop whose IP is dynamic |
|
446 | 446 | warnings.warn(""" |
|
447 | 447 | Controller appears to be listening on localhost, but not on this machine. |
|
448 | 448 | If this is true, you should specify Client(...,sshserver='you@%s') |
|
449 | 449 | or instruct your controller to listen on an external IP."""%location, |
|
450 | 450 | RuntimeWarning) |
|
451 | 451 | elif not sshserver: |
|
452 | 452 | # otherwise sync with cfg |
|
453 | 453 | sshserver = cfg['ssh'] |
|
454 | 454 | |
|
455 | 455 | self._config = cfg |
|
456 | 456 | |
|
457 | 457 | self._ssh = bool(sshserver or sshkey or password) |
|
458 | 458 | if self._ssh and sshserver is None: |
|
459 | 459 | # default to ssh via localhost |
|
460 | 460 | sshserver = addr |
|
461 | 461 | if self._ssh and password is None: |
|
462 | 462 | if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko): |
|
463 | 463 | password=False |
|
464 | 464 | else: |
|
465 | 465 | password = getpass("SSH Password for %s: "%sshserver) |
|
466 | 466 | ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko) |
|
467 | 467 | |
|
468 | 468 | # configure and construct the session |
|
469 | 469 | try: |
|
470 | 470 | extra_args['packer'] = cfg['pack'] |
|
471 | 471 | extra_args['unpacker'] = cfg['unpack'] |
|
472 | 472 | extra_args['key'] = cast_bytes(cfg['key']) |
|
473 | 473 | extra_args['signature_scheme'] = cfg['signature_scheme'] |
|
474 | 474 | except KeyError as exc: |
|
475 | 475 | msg = '\n'.join([ |
|
476 | 476 | "Connection file is invalid (missing '{}'), possibly from an old version of IPython.", |
|
477 | 477 | "If you are reusing connection files, remove them and start ipcontroller again." |
|
478 | 478 | ]) |
|
479 | 479 | raise ValueError(msg.format(exc.message)) |
|
480 | 480 | |
|
481 | 481 | self.session = Session(**extra_args) |
|
482 | 482 | |
|
483 | 483 | self._query_socket = self._context.socket(zmq.DEALER) |
|
484 | 484 | |
|
485 | 485 | if self._ssh: |
|
486 | 486 | tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs) |
|
487 | 487 | else: |
|
488 | 488 | self._query_socket.connect(cfg['registration']) |
|
489 | 489 | |
|
490 | 490 | self.session.debug = self.debug |
|
491 | 491 | |
|
492 | 492 | self._notification_handlers = {'registration_notification' : self._register_engine, |
|
493 | 493 | 'unregistration_notification' : self._unregister_engine, |
|
494 | 494 | 'shutdown_notification' : lambda msg: self.close(), |
|
495 | 495 | } |
|
496 | 496 | self._queue_handlers = {'execute_reply' : self._handle_execute_reply, |
|
497 | 497 | 'apply_reply' : self._handle_apply_reply} |
|
498 | 498 | |
|
499 | 499 | try: |
|
500 | 500 | self._connect(sshserver, ssh_kwargs, timeout) |
|
501 | 501 | except: |
|
502 | 502 | self.close(linger=0) |
|
503 | 503 | raise |
|
504 | 504 | |
|
505 | 505 | # last step: setup magics, if we are in IPython: |
|
506 | 506 | |
|
507 | 507 | try: |
|
508 | 508 | ip = get_ipython() |
|
509 | 509 | except NameError: |
|
510 | 510 | return |
|
511 | 511 | else: |
|
512 | 512 | if 'px' not in ip.magics_manager.magics: |
|
513 | 513 | # in IPython but we are the first Client. |
|
514 | 514 | # activate a default view for parallel magics. |
|
515 | 515 | self.activate() |
|
516 | 516 | |
|
517 | 517 | def __del__(self): |
|
518 | 518 | """cleanup sockets, but _not_ context.""" |
|
519 | 519 | self.close() |
|
520 | 520 | |
|
521 | 521 | def _setup_profile_dir(self, profile, profile_dir, ipython_dir): |
|
522 | 522 | if ipython_dir is None: |
|
523 | 523 | ipython_dir = get_ipython_dir() |
|
524 | 524 | if profile_dir is not None: |
|
525 | 525 | try: |
|
526 | 526 | self._cd = ProfileDir.find_profile_dir(profile_dir) |
|
527 | 527 | return |
|
528 | 528 | except ProfileDirError: |
|
529 | 529 | pass |
|
530 | 530 | elif profile is not None: |
|
531 | 531 | try: |
|
532 | 532 | self._cd = ProfileDir.find_profile_dir_by_name( |
|
533 | 533 | ipython_dir, profile) |
|
534 | 534 | return |
|
535 | 535 | except ProfileDirError: |
|
536 | 536 | pass |
|
537 | 537 | self._cd = None |
|
538 | 538 | |
|
539 | 539 | def _update_engines(self, engines): |
|
540 | 540 | """Update our engines dict and _ids from a dict of the form: {id:uuid}.""" |
|
541 | 541 | for k,v in engines.iteritems(): |
|
542 | 542 | eid = int(k) |
|
543 | 543 | if eid not in self._engines: |
|
544 | 544 | self._ids.append(eid) |
|
545 | 545 | self._engines[eid] = v |
|
546 | 546 | self._ids = sorted(self._ids) |
|
547 | 547 | if sorted(self._engines.keys()) != range(len(self._engines)) and \ |
|
548 | 548 | self._task_scheme == 'pure' and self._task_socket: |
|
549 | 549 | self._stop_scheduling_tasks() |
|
550 | 550 | |
|
551 | 551 | def _stop_scheduling_tasks(self): |
|
552 | 552 | """Stop scheduling tasks because an engine has been unregistered |
|
553 | 553 | from a pure ZMQ scheduler. |
|
554 | 554 | """ |
|
555 | 555 | self._task_socket.close() |
|
556 | 556 | self._task_socket = None |
|
557 | 557 | msg = "An engine has been unregistered, and we are using pure " +\ |
|
558 | 558 | "ZMQ task scheduling. Task farming will be disabled." |
|
559 | 559 | if self.outstanding: |
|
560 | 560 | msg += " If you were running tasks when this happened, " +\ |
|
561 | 561 | "some `outstanding` msg_ids may never resolve." |
|
562 | 562 | warnings.warn(msg, RuntimeWarning) |
|
563 | 563 | |
|
564 | 564 | def _build_targets(self, targets): |
|
565 | 565 | """Turn valid target IDs or 'all' into two lists: |
|
566 | 566 | (int_ids, uuids). |
|
567 | 567 | """ |
|
568 | 568 | if not self._ids: |
|
569 | 569 | # flush notification socket if no engines yet, just in case |
|
570 | 570 | if not self.ids: |
|
571 | 571 | raise error.NoEnginesRegistered("Can't build targets without any engines") |
|
572 | 572 | |
|
573 | 573 | if targets is None: |
|
574 | 574 | targets = self._ids |
|
575 | 575 | elif isinstance(targets, basestring): |
|
576 | 576 | if targets.lower() == 'all': |
|
577 | 577 | targets = self._ids |
|
578 | 578 | else: |
|
579 | 579 | raise TypeError("%r not valid str target, must be 'all'"%(targets)) |
|
580 | 580 | elif isinstance(targets, int): |
|
581 | 581 | if targets < 0: |
|
582 | 582 | targets = self.ids[targets] |
|
583 | 583 | if targets not in self._ids: |
|
584 | 584 | raise IndexError("No such engine: %i"%targets) |
|
585 | 585 | targets = [targets] |
|
586 | 586 | |
|
587 | 587 | if isinstance(targets, slice): |
|
588 | 588 | indices = range(len(self._ids))[targets] |
|
589 | 589 | ids = self.ids |
|
590 | 590 | targets = [ ids[i] for i in indices ] |
|
591 | 591 | |
|
592 | 592 | if not isinstance(targets, (tuple, list, xrange)): |
|
593 | 593 | raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets))) |
|
594 | 594 | |
|
595 | 595 | return [cast_bytes(self._engines[t]) for t in targets], list(targets) |
|
596 | 596 | |
|
597 | 597 | def _connect(self, sshserver, ssh_kwargs, timeout): |
|
598 | 598 | """setup all our socket connections to the cluster. This is called from |
|
599 | 599 | __init__.""" |
|
600 | 600 | |
|
601 | 601 | # Maybe allow reconnecting? |
|
602 | 602 | if self._connected: |
|
603 | 603 | return |
|
604 | 604 | self._connected=True |
|
605 | 605 | |
|
606 | 606 | def connect_socket(s, url): |
|
607 | 607 | if self._ssh: |
|
608 | 608 | return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs) |
|
609 | 609 | else: |
|
610 | 610 | return s.connect(url) |
|
611 | 611 | |
|
612 | 612 | self.session.send(self._query_socket, 'connection_request') |
|
613 | 613 | # use Poller because zmq.select has wrong units in pyzmq 2.1.7 |
|
614 | 614 | poller = zmq.Poller() |
|
615 | 615 | poller.register(self._query_socket, zmq.POLLIN) |
|
616 | 616 | # poll expects milliseconds, timeout is seconds |
|
617 | 617 | evts = poller.poll(timeout*1000) |
|
618 | 618 | if not evts: |
|
619 | 619 | raise error.TimeoutError("Hub connection request timed out") |
|
620 | 620 | idents,msg = self.session.recv(self._query_socket,mode=0) |
|
621 | 621 | if self.debug: |
|
622 | 622 | pprint(msg) |
|
623 | 623 | content = msg['content'] |
|
624 | 624 | # self._config['registration'] = dict(content) |
|
625 | 625 | cfg = self._config |
|
626 | 626 | if content['status'] == 'ok': |
|
627 | 627 | self._mux_socket = self._context.socket(zmq.DEALER) |
|
628 | 628 | connect_socket(self._mux_socket, cfg['mux']) |
|
629 | 629 | |
|
630 | 630 | self._task_socket = self._context.socket(zmq.DEALER) |
|
631 | 631 | connect_socket(self._task_socket, cfg['task']) |
|
632 | 632 | |
|
633 | 633 | self._notification_socket = self._context.socket(zmq.SUB) |
|
634 | 634 | self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'') |
|
635 | 635 | connect_socket(self._notification_socket, cfg['notification']) |
|
636 | 636 | |
|
637 | 637 | self._control_socket = self._context.socket(zmq.DEALER) |
|
638 | 638 | connect_socket(self._control_socket, cfg['control']) |
|
639 | 639 | |
|
640 | 640 | self._iopub_socket = self._context.socket(zmq.SUB) |
|
641 | 641 | self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'') |
|
642 | 642 | connect_socket(self._iopub_socket, cfg['iopub']) |
|
643 | 643 | |
|
644 | 644 | self._update_engines(dict(content['engines'])) |
|
645 | 645 | else: |
|
646 | 646 | self._connected = False |
|
647 | 647 | raise Exception("Failed to connect!") |
|
648 | 648 | |
|
649 | 649 | #-------------------------------------------------------------------------- |
|
650 | 650 | # handlers and callbacks for incoming messages |
|
651 | 651 | #-------------------------------------------------------------------------- |
|
652 | 652 | |
|
653 | 653 | def _unwrap_exception(self, content): |
|
654 | 654 | """unwrap exception, and remap engine_id to int.""" |
|
655 | 655 | e = error.unwrap_exception(content) |
|
656 | 656 | # print e.traceback |
|
657 | 657 | if e.engine_info: |
|
658 | 658 | e_uuid = e.engine_info['engine_uuid'] |
|
659 | 659 | eid = self._engines[e_uuid] |
|
660 | 660 | e.engine_info['engine_id'] = eid |
|
661 | 661 | return e |
|
662 | 662 | |
|
663 | 663 | def _extract_metadata(self, msg): |
|
664 | 664 | header = msg['header'] |
|
665 | 665 | parent = msg['parent_header'] |
|
666 | 666 | msg_meta = msg['metadata'] |
|
667 | 667 | content = msg['content'] |
|
668 | 668 | md = {'msg_id' : parent['msg_id'], |
|
669 | 669 | 'received' : datetime.now(), |
|
670 | 670 | 'engine_uuid' : msg_meta.get('engine', None), |
|
671 | 671 | 'follow' : msg_meta.get('follow', []), |
|
672 | 672 | 'after' : msg_meta.get('after', []), |
|
673 | 673 | 'status' : content['status'], |
|
674 | 674 | } |
|
675 | 675 | |
|
676 | 676 | if md['engine_uuid'] is not None: |
|
677 | 677 | md['engine_id'] = self._engines.get(md['engine_uuid'], None) |
|
678 | 678 | |
|
679 | 679 | if 'date' in parent: |
|
680 | 680 | md['submitted'] = parent['date'] |
|
681 | 681 | if 'started' in msg_meta: |
|
682 | 682 | md['started'] = msg_meta['started'] |
|
683 | 683 | if 'date' in header: |
|
684 | 684 | md['completed'] = header['date'] |
|
685 | 685 | return md |
|
686 | 686 | |
|
687 | 687 | def _register_engine(self, msg): |
|
688 | 688 | """Register a new engine, and update our connection info.""" |
|
689 | 689 | content = msg['content'] |
|
690 | 690 | eid = content['id'] |
|
691 | 691 | d = {eid : content['uuid']} |
|
692 | 692 | self._update_engines(d) |
|
693 | 693 | |
|
694 | 694 | def _unregister_engine(self, msg): |
|
695 | 695 | """Unregister an engine that has died.""" |
|
696 | 696 | content = msg['content'] |
|
697 | 697 | eid = int(content['id']) |
|
698 | 698 | if eid in self._ids: |
|
699 | 699 | self._ids.remove(eid) |
|
700 | 700 | uuid = self._engines.pop(eid) |
|
701 | 701 | |
|
702 | 702 | self._handle_stranded_msgs(eid, uuid) |
|
703 | 703 | |
|
704 | 704 | if self._task_socket and self._task_scheme == 'pure': |
|
705 | 705 | self._stop_scheduling_tasks() |
|
706 | 706 | |
|
707 | 707 | def _handle_stranded_msgs(self, eid, uuid): |
|
708 | 708 | """Handle messages known to be on an engine when the engine unregisters. |
|
709 | 709 | |
|
710 | 710 | It is possible that this will fire prematurely - that is, an engine will |
|
711 | 711 | go down after completing a result, and the client will be notified |
|
712 | 712 | of the unregistration and later receive the successful result. |
|
713 | 713 | """ |
|
714 | 714 | |
|
715 | 715 | outstanding = self._outstanding_dict[uuid] |
|
716 | 716 | |
|
717 | 717 | for msg_id in list(outstanding): |
|
718 | 718 | if msg_id in self.results: |
|
719 | 719 | # we already |
|
720 | 720 | continue |
|
721 | 721 | try: |
|
722 | 722 | raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id)) |
|
723 | 723 | except: |
|
724 | 724 | content = error.wrap_exception() |
|
725 | 725 | # build a fake message: |
|
726 | 726 | msg = self.session.msg('apply_reply', content=content) |
|
727 | 727 | msg['parent_header']['msg_id'] = msg_id |
|
728 | 728 | msg['metadata']['engine'] = uuid |
|
729 | 729 | self._handle_apply_reply(msg) |
|
730 | 730 | |
|
731 | 731 | def _handle_execute_reply(self, msg): |
|
732 | 732 | """Save the reply to an execute_request into our results. |
|
733 | 733 | |
|
734 | 734 | execute messages are never actually used. apply is used instead. |
|
735 | 735 | """ |
|
736 | 736 | |
|
737 | 737 | parent = msg['parent_header'] |
|
738 | 738 | msg_id = parent['msg_id'] |
|
739 | 739 | if msg_id not in self.outstanding: |
|
740 | 740 | if msg_id in self.history: |
|
741 | 741 | print ("got stale result: %s"%msg_id) |
|
742 | 742 | else: |
|
743 | 743 | print ("got unknown result: %s"%msg_id) |
|
744 | 744 | else: |
|
745 | 745 | self.outstanding.remove(msg_id) |
|
746 | 746 | |
|
747 | 747 | content = msg['content'] |
|
748 | 748 | header = msg['header'] |
|
749 | 749 | |
|
750 | 750 | # construct metadata: |
|
751 | 751 | md = self.metadata[msg_id] |
|
752 | 752 | md.update(self._extract_metadata(msg)) |
|
753 | 753 | # is this redundant? |
|
754 | 754 | self.metadata[msg_id] = md |
|
755 | 755 | |
|
756 | 756 | e_outstanding = self._outstanding_dict[md['engine_uuid']] |
|
757 | 757 | if msg_id in e_outstanding: |
|
758 | 758 | e_outstanding.remove(msg_id) |
|
759 | 759 | |
|
760 | 760 | # construct result: |
|
761 | 761 | if content['status'] == 'ok': |
|
762 | 762 | self.results[msg_id] = ExecuteReply(msg_id, content, md) |
|
763 | 763 | elif content['status'] == 'aborted': |
|
764 | 764 | self.results[msg_id] = error.TaskAborted(msg_id) |
|
765 | 765 | elif content['status'] == 'resubmitted': |
|
766 | 766 | # TODO: handle resubmission |
|
767 | 767 | pass |
|
768 | 768 | else: |
|
769 | 769 | self.results[msg_id] = self._unwrap_exception(content) |
|
770 | 770 | |
|
771 | 771 | def _handle_apply_reply(self, msg): |
|
772 | 772 | """Save the reply to an apply_request into our results.""" |
|
773 | 773 | parent = msg['parent_header'] |
|
774 | 774 | msg_id = parent['msg_id'] |
|
775 | 775 | if msg_id not in self.outstanding: |
|
776 | 776 | if msg_id in self.history: |
|
777 | 777 | print ("got stale result: %s"%msg_id) |
|
778 | 778 | print self.results[msg_id] |
|
779 | 779 | print msg |
|
780 | 780 | else: |
|
781 | 781 | print ("got unknown result: %s"%msg_id) |
|
782 | 782 | else: |
|
783 | 783 | self.outstanding.remove(msg_id) |
|
784 | 784 | content = msg['content'] |
|
785 | 785 | header = msg['header'] |
|
786 | 786 | |
|
787 | 787 | # construct metadata: |
|
788 | 788 | md = self.metadata[msg_id] |
|
789 | 789 | md.update(self._extract_metadata(msg)) |
|
790 | 790 | # is this redundant? |
|
791 | 791 | self.metadata[msg_id] = md |
|
792 | 792 | |
|
793 | 793 | e_outstanding = self._outstanding_dict[md['engine_uuid']] |
|
794 | 794 | if msg_id in e_outstanding: |
|
795 | 795 | e_outstanding.remove(msg_id) |
|
796 | 796 | |
|
797 | 797 | # construct result: |
|
798 | 798 | if content['status'] == 'ok': |
|
799 | 799 | self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0] |
|
800 | 800 | elif content['status'] == 'aborted': |
|
801 | 801 | self.results[msg_id] = error.TaskAborted(msg_id) |
|
802 | 802 | elif content['status'] == 'resubmitted': |
|
803 | 803 | # TODO: handle resubmission |
|
804 | 804 | pass |
|
805 | 805 | else: |
|
806 | 806 | self.results[msg_id] = self._unwrap_exception(content) |
|
807 | 807 | |
|
808 | 808 | def _flush_notifications(self): |
|
809 | 809 | """Flush notifications of engine registrations waiting |
|
810 | 810 | in ZMQ queue.""" |
|
811 | 811 | idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK) |
|
812 | 812 | while msg is not None: |
|
813 | 813 | if self.debug: |
|
814 | 814 | pprint(msg) |
|
815 | 815 | msg_type = msg['header']['msg_type'] |
|
816 | 816 | handler = self._notification_handlers.get(msg_type, None) |
|
817 | 817 | if handler is None: |
|
818 | 818 | raise Exception("Unhandled message type: %s" % msg_type) |
|
819 | 819 | else: |
|
820 | 820 | handler(msg) |
|
821 | 821 | idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK) |
|
822 | 822 | |
|
823 | 823 | def _flush_results(self, sock): |
|
824 | 824 | """Flush task or queue results waiting in ZMQ queue.""" |
|
825 | 825 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
826 | 826 | while msg is not None: |
|
827 | 827 | if self.debug: |
|
828 | 828 | pprint(msg) |
|
829 | 829 | msg_type = msg['header']['msg_type'] |
|
830 | 830 | handler = self._queue_handlers.get(msg_type, None) |
|
831 | 831 | if handler is None: |
|
832 | 832 | raise Exception("Unhandled message type: %s" % msg_type) |
|
833 | 833 | else: |
|
834 | 834 | handler(msg) |
|
835 | 835 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
836 | 836 | |
|
837 | 837 | def _flush_control(self, sock): |
|
838 | 838 | """Flush replies from the control channel waiting |
|
839 | 839 | in the ZMQ queue. |
|
840 | 840 | |
|
841 | 841 | Currently: ignore them.""" |
|
842 | 842 | if self._ignored_control_replies <= 0: |
|
843 | 843 | return |
|
844 | 844 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
845 | 845 | while msg is not None: |
|
846 | 846 | self._ignored_control_replies -= 1 |
|
847 | 847 | if self.debug: |
|
848 | 848 | pprint(msg) |
|
849 | 849 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
850 | 850 | |
|
851 | 851 | def _flush_ignored_control(self): |
|
852 | 852 | """flush ignored control replies""" |
|
853 | 853 | while self._ignored_control_replies > 0: |
|
854 | 854 | self.session.recv(self._control_socket) |
|
855 | 855 | self._ignored_control_replies -= 1 |
|
856 | 856 | |
|
857 | 857 | def _flush_ignored_hub_replies(self): |
|
858 | 858 | ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK) |
|
859 | 859 | while msg is not None: |
|
860 | 860 | ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK) |
|
861 | 861 | |
|
862 | 862 | def _flush_iopub(self, sock): |
|
863 | 863 | """Flush replies from the iopub channel waiting |
|
864 | 864 | in the ZMQ queue. |
|
865 | 865 | """ |
|
866 | 866 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
867 | 867 | while msg is not None: |
|
868 | 868 | if self.debug: |
|
869 | 869 | pprint(msg) |
|
870 | 870 | parent = msg['parent_header'] |
|
871 | 871 | # ignore IOPub messages with no parent. |
|
872 | 872 | # Caused by print statements or warnings from before the first execution. |
|
873 | 873 | if not parent: |
|
874 | 874 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
875 | 875 | continue |
|
876 | 876 | msg_id = parent['msg_id'] |
|
877 | 877 | content = msg['content'] |
|
878 | 878 | header = msg['header'] |
|
879 | 879 | msg_type = msg['header']['msg_type'] |
|
880 | 880 | |
|
881 | 881 | # init metadata: |
|
882 | 882 | md = self.metadata[msg_id] |
|
883 | 883 | |
|
884 | 884 | if msg_type == 'stream': |
|
885 | 885 | name = content['name'] |
|
886 | 886 | s = md[name] or '' |
|
887 | 887 | md[name] = s + content['data'] |
|
888 | 888 | elif msg_type == 'pyerr': |
|
889 | 889 | md.update({'pyerr' : self._unwrap_exception(content)}) |
|
890 | 890 | elif msg_type == 'pyin': |
|
891 | 891 | md.update({'pyin' : content['code']}) |
|
892 | 892 | elif msg_type == 'display_data': |
|
893 | 893 | md['outputs'].append(content) |
|
894 | 894 | elif msg_type == 'pyout': |
|
895 | 895 | md['pyout'] = content |
|
896 | 896 | elif msg_type == 'data_message': |
|
897 | 897 | data, remainder = serialize.unserialize_object(msg['buffers']) |
|
898 | 898 | md['data'].update(data) |
|
899 | 899 | elif msg_type == 'status': |
|
900 | 900 | # idle message comes after all outputs |
|
901 | 901 | if content['execution_state'] == 'idle': |
|
902 | 902 | md['outputs_ready'] = True |
|
903 | 903 | else: |
|
904 | 904 | # unhandled msg_type (status, etc.) |
|
905 | 905 | pass |
|
906 | 906 | |
|
907 | 907 | # reduntant? |
|
908 | 908 | self.metadata[msg_id] = md |
|
909 | 909 | |
|
910 | 910 | idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK) |
|
911 | 911 | |
|
912 | 912 | #-------------------------------------------------------------------------- |
|
913 | 913 | # len, getitem |
|
914 | 914 | #-------------------------------------------------------------------------- |
|
915 | 915 | |
|
916 | 916 | def __len__(self): |
|
917 | 917 | """len(client) returns # of engines.""" |
|
918 | 918 | return len(self.ids) |
|
919 | 919 | |
|
920 | 920 | def __getitem__(self, key): |
|
921 | 921 | """index access returns DirectView multiplexer objects |
|
922 | 922 | |
|
923 | 923 | Must be int, slice, or list/tuple/xrange of ints""" |
|
924 | 924 | if not isinstance(key, (int, slice, tuple, list, xrange)): |
|
925 | 925 | raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key))) |
|
926 | 926 | else: |
|
927 | 927 | return self.direct_view(key) |
|
928 | 928 | |
|
929 | 929 | #-------------------------------------------------------------------------- |
|
930 | 930 | # Begin public methods |
|
931 | 931 | #-------------------------------------------------------------------------- |
|
932 | 932 | |
|
933 | 933 | @property |
|
934 | 934 | def ids(self): |
|
935 | 935 | """Always up-to-date ids property.""" |
|
936 | 936 | self._flush_notifications() |
|
937 | 937 | # always copy: |
|
938 | 938 | return list(self._ids) |
|
939 | 939 | |
|
940 | 940 | def activate(self, targets='all', suffix=''): |
|
941 | 941 | """Create a DirectView and register it with IPython magics |
|
942 | 942 | |
|
943 | 943 | Defines the magics `%px, %autopx, %pxresult, %%px` |
|
944 | 944 | |
|
945 | 945 | Parameters |
|
946 | 946 | ---------- |
|
947 | 947 | |
|
948 | 948 | targets: int, list of ints, or 'all' |
|
949 | 949 | The engines on which the view's magics will run |
|
950 | 950 | suffix: str [default: ''] |
|
951 | 951 | The suffix, if any, for the magics. This allows you to have |
|
952 | 952 | multiple views associated with parallel magics at the same time. |
|
953 | 953 | |
|
954 | 954 | e.g. ``rc.activate(targets=0, suffix='0')`` will give you |
|
955 | 955 | the magics ``%px0``, ``%pxresult0``, etc. for running magics just |
|
956 | 956 | on engine 0. |
|
957 | 957 | """ |
|
958 | 958 | view = self.direct_view(targets) |
|
959 | 959 | view.block = True |
|
960 | 960 | view.activate(suffix) |
|
961 | 961 | return view |
|
962 | 962 | |
|
963 | 963 | def close(self, linger=None): |
|
964 | 964 | """Close my zmq Sockets |
|
965 | 965 | |
|
966 | 966 | If `linger`, set the zmq LINGER socket option, |
|
967 | 967 | which allows discarding of messages. |
|
968 | 968 | """ |
|
969 | 969 | if self._closed: |
|
970 | 970 | return |
|
971 | 971 | self.stop_spin_thread() |
|
972 | 972 | snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ] |
|
973 | 973 | for name in snames: |
|
974 | 974 | socket = getattr(self, name) |
|
975 | 975 | if socket is not None and not socket.closed: |
|
976 | 976 | if linger is not None: |
|
977 | 977 | socket.close(linger=linger) |
|
978 | 978 | else: |
|
979 | 979 | socket.close() |
|
980 | 980 | self._closed = True |
|
981 | 981 | |
|
982 | 982 | def _spin_every(self, interval=1): |
|
983 | 983 | """target func for use in spin_thread""" |
|
984 | 984 | while True: |
|
985 | 985 | if self._stop_spinning.is_set(): |
|
986 | 986 | return |
|
987 | 987 | time.sleep(interval) |
|
988 | 988 | self.spin() |
|
989 | 989 | |
|
990 | 990 | def spin_thread(self, interval=1): |
|
991 | 991 | """call Client.spin() in a background thread on some regular interval |
|
992 | 992 | |
|
993 | 993 | This helps ensure that messages don't pile up too much in the zmq queue |
|
994 | 994 | while you are working on other things, or just leaving an idle terminal. |
|
995 | 995 | |
|
996 | 996 | It also helps limit potential padding of the `received` timestamp |
|
997 | 997 | on AsyncResult objects, used for timings. |
|
998 | 998 | |
|
999 | 999 | Parameters |
|
1000 | 1000 | ---------- |
|
1001 | 1001 | |
|
1002 | 1002 | interval : float, optional |
|
1003 | 1003 | The interval on which to spin the client in the background thread |
|
1004 | 1004 | (simply passed to time.sleep). |
|
1005 | 1005 | |
|
1006 | 1006 | Notes |
|
1007 | 1007 | ----- |
|
1008 | 1008 | |
|
1009 | 1009 | For precision timing, you may want to use this method to put a bound |
|
1010 | 1010 | on the jitter (in seconds) in `received` timestamps used |
|
1011 | 1011 | in AsyncResult.wall_time. |
|
1012 | 1012 | |
|
1013 | 1013 | """ |
|
1014 | 1014 | if self._spin_thread is not None: |
|
1015 | 1015 | self.stop_spin_thread() |
|
1016 | 1016 | self._stop_spinning.clear() |
|
1017 | 1017 | self._spin_thread = Thread(target=self._spin_every, args=(interval,)) |
|
1018 | 1018 | self._spin_thread.daemon = True |
|
1019 | 1019 | self._spin_thread.start() |
|
1020 | 1020 | |
|
1021 | 1021 | def stop_spin_thread(self): |
|
1022 | 1022 | """stop background spin_thread, if any""" |
|
1023 | 1023 | if self._spin_thread is not None: |
|
1024 | 1024 | self._stop_spinning.set() |
|
1025 | 1025 | self._spin_thread.join() |
|
1026 | 1026 | self._spin_thread = None |
|
1027 | 1027 | |
|
1028 | 1028 | def spin(self): |
|
1029 | 1029 | """Flush any registration notifications and execution results |
|
1030 | 1030 | waiting in the ZMQ queue. |
|
1031 | 1031 | """ |
|
1032 | 1032 | if self._notification_socket: |
|
1033 | 1033 | self._flush_notifications() |
|
1034 | 1034 | if self._iopub_socket: |
|
1035 | 1035 | self._flush_iopub(self._iopub_socket) |
|
1036 | 1036 | if self._mux_socket: |
|
1037 | 1037 | self._flush_results(self._mux_socket) |
|
1038 | 1038 | if self._task_socket: |
|
1039 | 1039 | self._flush_results(self._task_socket) |
|
1040 | 1040 | if self._control_socket: |
|
1041 | 1041 | self._flush_control(self._control_socket) |
|
1042 | 1042 | if self._query_socket: |
|
1043 | 1043 | self._flush_ignored_hub_replies() |
|
1044 | 1044 | |
|
1045 | 1045 | def wait(self, jobs=None, timeout=-1): |
|
1046 | 1046 | """waits on one or more `jobs`, for up to `timeout` seconds. |
|
1047 | 1047 | |
|
1048 | 1048 | Parameters |
|
1049 | 1049 | ---------- |
|
1050 | 1050 | |
|
1051 | 1051 | jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects |
|
1052 | 1052 | ints are indices to self.history |
|
1053 | 1053 | strs are msg_ids |
|
1054 | 1054 | default: wait on all outstanding messages |
|
1055 | 1055 | timeout : float |
|
1056 | 1056 | a time in seconds, after which to give up. |
|
1057 | 1057 | default is -1, which means no timeout |
|
1058 | 1058 | |
|
1059 | 1059 | Returns |
|
1060 | 1060 | ------- |
|
1061 | 1061 | |
|
1062 | 1062 | True : when all msg_ids are done |
|
1063 | 1063 | False : timeout reached, some msg_ids still outstanding |
|
1064 | 1064 | """ |
|
1065 | 1065 | tic = time.time() |
|
1066 | 1066 | if jobs is None: |
|
1067 | 1067 | theids = self.outstanding |
|
1068 | 1068 | else: |
|
1069 | 1069 | if isinstance(jobs, (int, basestring, AsyncResult)): |
|
1070 | 1070 | jobs = [jobs] |
|
1071 | 1071 | theids = set() |
|
1072 | 1072 | for job in jobs: |
|
1073 | 1073 | if isinstance(job, int): |
|
1074 | 1074 | # index access |
|
1075 | 1075 | job = self.history[job] |
|
1076 | 1076 | elif isinstance(job, AsyncResult): |
|
1077 | 1077 | map(theids.add, job.msg_ids) |
|
1078 | 1078 | continue |
|
1079 | 1079 | theids.add(job) |
|
1080 | 1080 | if not theids.intersection(self.outstanding): |
|
1081 | 1081 | return True |
|
1082 | 1082 | self.spin() |
|
1083 | 1083 | while theids.intersection(self.outstanding): |
|
1084 | 1084 | if timeout >= 0 and ( time.time()-tic ) > timeout: |
|
1085 | 1085 | break |
|
1086 | 1086 | time.sleep(1e-3) |
|
1087 | 1087 | self.spin() |
|
1088 | 1088 | return len(theids.intersection(self.outstanding)) == 0 |
|
1089 | 1089 | |
|
1090 | 1090 | #-------------------------------------------------------------------------- |
|
1091 | 1091 | # Control methods |
|
1092 | 1092 | #-------------------------------------------------------------------------- |
|
1093 | 1093 | |
|
1094 | 1094 | @spin_first |
|
1095 | 1095 | def clear(self, targets=None, block=None): |
|
1096 | 1096 | """Clear the namespace in target(s).""" |
|
1097 | 1097 | block = self.block if block is None else block |
|
1098 | 1098 | targets = self._build_targets(targets)[0] |
|
1099 | 1099 | for t in targets: |
|
1100 | 1100 | self.session.send(self._control_socket, 'clear_request', content={}, ident=t) |
|
1101 | 1101 | error = False |
|
1102 | 1102 | if block: |
|
1103 | 1103 | self._flush_ignored_control() |
|
1104 | 1104 | for i in range(len(targets)): |
|
1105 | 1105 | idents,msg = self.session.recv(self._control_socket,0) |
|
1106 | 1106 | if self.debug: |
|
1107 | 1107 | pprint(msg) |
|
1108 | 1108 | if msg['content']['status'] != 'ok': |
|
1109 | 1109 | error = self._unwrap_exception(msg['content']) |
|
1110 | 1110 | else: |
|
1111 | 1111 | self._ignored_control_replies += len(targets) |
|
1112 | 1112 | if error: |
|
1113 | 1113 | raise error |
|
1114 | 1114 | |
|
1115 | 1115 | |
|
1116 | 1116 | @spin_first |
|
1117 | 1117 | def abort(self, jobs=None, targets=None, block=None): |
|
1118 | 1118 | """Abort specific jobs from the execution queues of target(s). |
|
1119 | 1119 | |
|
1120 | 1120 | This is a mechanism to prevent jobs that have already been submitted |
|
1121 | 1121 | from executing. |
|
1122 | 1122 | |
|
1123 | 1123 | Parameters |
|
1124 | 1124 | ---------- |
|
1125 | 1125 | |
|
1126 | 1126 | jobs : msg_id, list of msg_ids, or AsyncResult |
|
1127 | 1127 | The jobs to be aborted |
|
1128 | 1128 | |
|
1129 | 1129 | If unspecified/None: abort all outstanding jobs. |
|
1130 | 1130 | |
|
1131 | 1131 | """ |
|
1132 | 1132 | block = self.block if block is None else block |
|
1133 | 1133 | jobs = jobs if jobs is not None else list(self.outstanding) |
|
1134 | 1134 | targets = self._build_targets(targets)[0] |
|
1135 | 1135 | |
|
1136 | 1136 | msg_ids = [] |
|
1137 | 1137 | if isinstance(jobs, (basestring,AsyncResult)): |
|
1138 | 1138 | jobs = [jobs] |
|
1139 | 1139 | bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs) |
|
1140 | 1140 | if bad_ids: |
|
1141 | 1141 | raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0]) |
|
1142 | 1142 | for j in jobs: |
|
1143 | 1143 | if isinstance(j, AsyncResult): |
|
1144 | 1144 | msg_ids.extend(j.msg_ids) |
|
1145 | 1145 | else: |
|
1146 | 1146 | msg_ids.append(j) |
|
1147 | 1147 | content = dict(msg_ids=msg_ids) |
|
1148 | 1148 | for t in targets: |
|
1149 | 1149 | self.session.send(self._control_socket, 'abort_request', |
|
1150 | 1150 | content=content, ident=t) |
|
1151 | 1151 | error = False |
|
1152 | 1152 | if block: |
|
1153 | 1153 | self._flush_ignored_control() |
|
1154 | 1154 | for i in range(len(targets)): |
|
1155 | 1155 | idents,msg = self.session.recv(self._control_socket,0) |
|
1156 | 1156 | if self.debug: |
|
1157 | 1157 | pprint(msg) |
|
1158 | 1158 | if msg['content']['status'] != 'ok': |
|
1159 | 1159 | error = self._unwrap_exception(msg['content']) |
|
1160 | 1160 | else: |
|
1161 | 1161 | self._ignored_control_replies += len(targets) |
|
1162 | 1162 | if error: |
|
1163 | 1163 | raise error |
|
1164 | 1164 | |
|
1165 | 1165 | @spin_first |
|
1166 | 1166 | def shutdown(self, targets='all', restart=False, hub=False, block=None): |
|
1167 | 1167 | """Terminates one or more engine processes, optionally including the hub. |
|
1168 | 1168 | |
|
1169 | 1169 | Parameters |
|
1170 | 1170 | ---------- |
|
1171 | 1171 | |
|
1172 | 1172 | targets: list of ints or 'all' [default: all] |
|
1173 | 1173 | Which engines to shutdown. |
|
1174 | 1174 | hub: bool [default: False] |
|
1175 | 1175 | Whether to include the Hub. hub=True implies targets='all'. |
|
1176 | 1176 | block: bool [default: self.block] |
|
1177 | 1177 | Whether to wait for clean shutdown replies or not. |
|
1178 | 1178 | restart: bool [default: False] |
|
1179 | 1179 | NOT IMPLEMENTED |
|
1180 | 1180 | whether to restart engines after shutting them down. |
|
1181 | 1181 | """ |
|
1182 | 1182 | from IPython.parallel.error import NoEnginesRegistered |
|
1183 | 1183 | if restart: |
|
1184 | 1184 | raise NotImplementedError("Engine restart is not yet implemented") |
|
1185 | 1185 | |
|
1186 | 1186 | block = self.block if block is None else block |
|
1187 | 1187 | if hub: |
|
1188 | 1188 | targets = 'all' |
|
1189 | 1189 | try: |
|
1190 | 1190 | targets = self._build_targets(targets)[0] |
|
1191 | 1191 | except NoEnginesRegistered: |
|
1192 | 1192 | targets = [] |
|
1193 | 1193 | for t in targets: |
|
1194 | 1194 | self.session.send(self._control_socket, 'shutdown_request', |
|
1195 | 1195 | content={'restart':restart},ident=t) |
|
1196 | 1196 | error = False |
|
1197 | 1197 | if block or hub: |
|
1198 | 1198 | self._flush_ignored_control() |
|
1199 | 1199 | for i in range(len(targets)): |
|
1200 | 1200 | idents,msg = self.session.recv(self._control_socket, 0) |
|
1201 | 1201 | if self.debug: |
|
1202 | 1202 | pprint(msg) |
|
1203 | 1203 | if msg['content']['status'] != 'ok': |
|
1204 | 1204 | error = self._unwrap_exception(msg['content']) |
|
1205 | 1205 | else: |
|
1206 | 1206 | self._ignored_control_replies += len(targets) |
|
1207 | 1207 | |
|
1208 | 1208 | if hub: |
|
1209 | 1209 | time.sleep(0.25) |
|
1210 | 1210 | self.session.send(self._query_socket, 'shutdown_request') |
|
1211 | 1211 | idents,msg = self.session.recv(self._query_socket, 0) |
|
1212 | 1212 | if self.debug: |
|
1213 | 1213 | pprint(msg) |
|
1214 | 1214 | if msg['content']['status'] != 'ok': |
|
1215 | 1215 | error = self._unwrap_exception(msg['content']) |
|
1216 | 1216 | |
|
1217 | 1217 | if error: |
|
1218 | 1218 | raise error |
|
1219 | 1219 | |
|
1220 | 1220 | #-------------------------------------------------------------------------- |
|
1221 | 1221 | # Execution related methods |
|
1222 | 1222 | #-------------------------------------------------------------------------- |
|
1223 | 1223 | |
|
1224 | 1224 | def _maybe_raise(self, result): |
|
1225 | 1225 | """wrapper for maybe raising an exception if apply failed.""" |
|
1226 | 1226 | if isinstance(result, error.RemoteError): |
|
1227 | 1227 | raise result |
|
1228 | 1228 | |
|
1229 | 1229 | return result |
|
1230 | 1230 | |
|
1231 | 1231 | def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False, |
|
1232 | 1232 | ident=None): |
|
1233 | 1233 | """construct and send an apply message via a socket. |
|
1234 | 1234 | |
|
1235 | 1235 | This is the principal method with which all engine execution is performed by views. |
|
1236 | 1236 | """ |
|
1237 | 1237 | |
|
1238 | 1238 | if self._closed: |
|
1239 | 1239 | raise RuntimeError("Client cannot be used after its sockets have been closed") |
|
1240 | 1240 | |
|
1241 | 1241 | # defaults: |
|
1242 | 1242 | args = args if args is not None else [] |
|
1243 | 1243 | kwargs = kwargs if kwargs is not None else {} |
|
1244 | 1244 | metadata = metadata if metadata is not None else {} |
|
1245 | 1245 | |
|
1246 | 1246 | # validate arguments |
|
1247 | 1247 | if not callable(f) and not isinstance(f, Reference): |
|
1248 | 1248 | raise TypeError("f must be callable, not %s"%type(f)) |
|
1249 | 1249 | if not isinstance(args, (tuple, list)): |
|
1250 | 1250 | raise TypeError("args must be tuple or list, not %s"%type(args)) |
|
1251 | 1251 | if not isinstance(kwargs, dict): |
|
1252 | 1252 | raise TypeError("kwargs must be dict, not %s"%type(kwargs)) |
|
1253 | 1253 | if not isinstance(metadata, dict): |
|
1254 | 1254 | raise TypeError("metadata must be dict, not %s"%type(metadata)) |
|
1255 | 1255 | |
|
1256 | 1256 | bufs = serialize.pack_apply_message(f, args, kwargs, |
|
1257 | 1257 | buffer_threshold=self.session.buffer_threshold, |
|
1258 | 1258 | item_threshold=self.session.item_threshold, |
|
1259 | 1259 | ) |
|
1260 | 1260 | |
|
1261 | 1261 | msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident, |
|
1262 | 1262 | metadata=metadata, track=track) |
|
1263 | 1263 | |
|
1264 | 1264 | msg_id = msg['header']['msg_id'] |
|
1265 | 1265 | self.outstanding.add(msg_id) |
|
1266 | 1266 | if ident: |
|
1267 | 1267 | # possibly routed to a specific engine |
|
1268 | 1268 | if isinstance(ident, list): |
|
1269 | 1269 | ident = ident[-1] |
|
1270 | 1270 | if ident in self._engines.values(): |
|
1271 | 1271 | # save for later, in case of engine death |
|
1272 | 1272 | self._outstanding_dict[ident].add(msg_id) |
|
1273 | 1273 | self.history.append(msg_id) |
|
1274 | 1274 | self.metadata[msg_id]['submitted'] = datetime.now() |
|
1275 | 1275 | |
|
1276 | 1276 | return msg |
|
1277 | 1277 | |
|
1278 | 1278 | def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None): |
|
1279 | 1279 | """construct and send an execute request via a socket. |
|
1280 | 1280 | |
|
1281 | 1281 | """ |
|
1282 | 1282 | |
|
1283 | 1283 | if self._closed: |
|
1284 | 1284 | raise RuntimeError("Client cannot be used after its sockets have been closed") |
|
1285 | 1285 | |
|
1286 | 1286 | # defaults: |
|
1287 | 1287 | metadata = metadata if metadata is not None else {} |
|
1288 | 1288 | |
|
1289 | 1289 | # validate arguments |
|
1290 | 1290 | if not isinstance(code, basestring): |
|
1291 | 1291 | raise TypeError("code must be text, not %s" % type(code)) |
|
1292 | 1292 | if not isinstance(metadata, dict): |
|
1293 | 1293 | raise TypeError("metadata must be dict, not %s" % type(metadata)) |
|
1294 | 1294 | |
|
1295 | 1295 | content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={}) |
|
1296 | 1296 | |
|
1297 | 1297 | |
|
1298 | 1298 | msg = self.session.send(socket, "execute_request", content=content, ident=ident, |
|
1299 | 1299 | metadata=metadata) |
|
1300 | 1300 | |
|
1301 | 1301 | msg_id = msg['header']['msg_id'] |
|
1302 | 1302 | self.outstanding.add(msg_id) |
|
1303 | 1303 | if ident: |
|
1304 | 1304 | # possibly routed to a specific engine |
|
1305 | 1305 | if isinstance(ident, list): |
|
1306 | 1306 | ident = ident[-1] |
|
1307 | 1307 | if ident in self._engines.values(): |
|
1308 | 1308 | # save for later, in case of engine death |
|
1309 | 1309 | self._outstanding_dict[ident].add(msg_id) |
|
1310 | 1310 | self.history.append(msg_id) |
|
1311 | 1311 | self.metadata[msg_id]['submitted'] = datetime.now() |
|
1312 | 1312 | |
|
1313 | 1313 | return msg |
|
1314 | 1314 | |
|
1315 | 1315 | #-------------------------------------------------------------------------- |
|
1316 | 1316 | # construct a View object |
|
1317 | 1317 | #-------------------------------------------------------------------------- |
|
1318 | 1318 | |
|
1319 | 1319 | def load_balanced_view(self, targets=None): |
|
1320 | 1320 | """construct a DirectView object. |
|
1321 | 1321 | |
|
1322 | 1322 | If no arguments are specified, create a LoadBalancedView |
|
1323 | 1323 | using all engines. |
|
1324 | 1324 | |
|
1325 | 1325 | Parameters |
|
1326 | 1326 | ---------- |
|
1327 | 1327 | |
|
1328 | 1328 | targets: list,slice,int,etc. [default: use all engines] |
|
1329 | 1329 | The subset of engines across which to load-balance |
|
1330 | 1330 | """ |
|
1331 | 1331 | if targets == 'all': |
|
1332 | 1332 | targets = None |
|
1333 | 1333 | if targets is not None: |
|
1334 | 1334 | targets = self._build_targets(targets)[1] |
|
1335 | 1335 | return LoadBalancedView(client=self, socket=self._task_socket, targets=targets) |
|
1336 | 1336 | |
|
1337 | 1337 | def direct_view(self, targets='all'): |
|
1338 | 1338 | """construct a DirectView object. |
|
1339 | 1339 | |
|
1340 | 1340 | If no targets are specified, create a DirectView using all engines. |
|
1341 | 1341 | |
|
1342 | 1342 | rc.direct_view('all') is distinguished from rc[:] in that 'all' will |
|
1343 | 1343 | evaluate the target engines at each execution, whereas rc[:] will connect to |
|
1344 | 1344 | all *current* engines, and that list will not change. |
|
1345 | 1345 | |
|
1346 | 1346 | That is, 'all' will always use all engines, whereas rc[:] will not use |
|
1347 | 1347 | engines added after the DirectView is constructed. |
|
1348 | 1348 | |
|
1349 | 1349 | Parameters |
|
1350 | 1350 | ---------- |
|
1351 | 1351 | |
|
1352 | 1352 | targets: list,slice,int,etc. [default: use all engines] |
|
1353 | 1353 | The engines to use for the View |
|
1354 | 1354 | """ |
|
1355 | 1355 | single = isinstance(targets, int) |
|
1356 | 1356 | # allow 'all' to be lazily evaluated at each execution |
|
1357 | 1357 | if targets != 'all': |
|
1358 | 1358 | targets = self._build_targets(targets)[1] |
|
1359 | 1359 | if single: |
|
1360 | 1360 | targets = targets[0] |
|
1361 | 1361 | return DirectView(client=self, socket=self._mux_socket, targets=targets) |
|
1362 | 1362 | |
|
1363 | 1363 | #-------------------------------------------------------------------------- |
|
1364 | 1364 | # Query methods |
|
1365 | 1365 | #-------------------------------------------------------------------------- |
|
1366 | 1366 | |
|
1367 | 1367 | @spin_first |
|
1368 | 1368 | def get_result(self, indices_or_msg_ids=None, block=None): |
|
1369 | 1369 | """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object. |
|
1370 | 1370 | |
|
1371 | 1371 | If the client already has the results, no request to the Hub will be made. |
|
1372 | 1372 | |
|
1373 | 1373 | This is a convenient way to construct AsyncResult objects, which are wrappers |
|
1374 | 1374 | that include metadata about execution, and allow for awaiting results that |
|
1375 | 1375 | were not submitted by this Client. |
|
1376 | 1376 | |
|
1377 | 1377 | It can also be a convenient way to retrieve the metadata associated with |
|
1378 | 1378 | blocking execution, since it always retrieves |
|
1379 | 1379 | |
|
1380 | 1380 | Examples |
|
1381 | 1381 | -------- |
|
1382 | 1382 | :: |
|
1383 | 1383 | |
|
1384 | 1384 | In [10]: r = client.apply() |
|
1385 | 1385 | |
|
1386 | 1386 | Parameters |
|
1387 | 1387 | ---------- |
|
1388 | 1388 | |
|
1389 | 1389 | indices_or_msg_ids : integer history index, str msg_id, or list of either |
|
1390 | 1390 | The indices or msg_ids of indices to be retrieved |
|
1391 | 1391 | |
|
1392 | 1392 | block : bool |
|
1393 | 1393 | Whether to wait for the result to be done |
|
1394 | 1394 | |
|
1395 | 1395 | Returns |
|
1396 | 1396 | ------- |
|
1397 | 1397 | |
|
1398 | 1398 | AsyncResult |
|
1399 | 1399 | A single AsyncResult object will always be returned. |
|
1400 | 1400 | |
|
1401 | 1401 | AsyncHubResult |
|
1402 | 1402 | A subclass of AsyncResult that retrieves results from the Hub |
|
1403 | 1403 | |
|
1404 | 1404 | """ |
|
1405 | 1405 | block = self.block if block is None else block |
|
1406 | 1406 | if indices_or_msg_ids is None: |
|
1407 | 1407 | indices_or_msg_ids = -1 |
|
1408 | 1408 | |
|
1409 | 1409 | single_result = False |
|
1410 | 1410 | if not isinstance(indices_or_msg_ids, (list,tuple)): |
|
1411 | 1411 | indices_or_msg_ids = [indices_or_msg_ids] |
|
1412 | 1412 | single_result = True |
|
1413 | 1413 | |
|
1414 | 1414 | theids = [] |
|
1415 | 1415 | for id in indices_or_msg_ids: |
|
1416 | 1416 | if isinstance(id, int): |
|
1417 | 1417 | id = self.history[id] |
|
1418 | 1418 | if not isinstance(id, basestring): |
|
1419 | 1419 | raise TypeError("indices must be str or int, not %r"%id) |
|
1420 | 1420 | theids.append(id) |
|
1421 | 1421 | |
|
1422 | 1422 | local_ids = filter(lambda msg_id: msg_id in self.outstanding or msg_id in self.results, theids) |
|
1423 | 1423 | remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids) |
|
1424 | 1424 | |
|
1425 | 1425 | # given single msg_id initially, get_result shot get the result itself, |
|
1426 | 1426 | # not a length-one list |
|
1427 | 1427 | if single_result: |
|
1428 | 1428 | theids = theids[0] |
|
1429 | 1429 | |
|
1430 | 1430 | if remote_ids: |
|
1431 | 1431 | ar = AsyncHubResult(self, msg_ids=theids) |
|
1432 | 1432 | else: |
|
1433 | 1433 | ar = AsyncResult(self, msg_ids=theids) |
|
1434 | 1434 | |
|
1435 | 1435 | if block: |
|
1436 | 1436 | ar.wait() |
|
1437 | 1437 | |
|
1438 | 1438 | return ar |
|
1439 | 1439 | |
|
1440 | 1440 | @spin_first |
|
1441 | 1441 | def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None): |
|
1442 | 1442 | """Resubmit one or more tasks. |
|
1443 | 1443 | |
|
1444 | 1444 | in-flight tasks may not be resubmitted. |
|
1445 | 1445 | |
|
1446 | 1446 | Parameters |
|
1447 | 1447 | ---------- |
|
1448 | 1448 | |
|
1449 | 1449 | indices_or_msg_ids : integer history index, str msg_id, or list of either |
|
1450 | 1450 | The indices or msg_ids of indices to be retrieved |
|
1451 | 1451 | |
|
1452 | 1452 | block : bool |
|
1453 | 1453 | Whether to wait for the result to be done |
|
1454 | 1454 | |
|
1455 | 1455 | Returns |
|
1456 | 1456 | ------- |
|
1457 | 1457 | |
|
1458 | 1458 | AsyncHubResult |
|
1459 | 1459 | A subclass of AsyncResult that retrieves results from the Hub |
|
1460 | 1460 | |
|
1461 | 1461 | """ |
|
1462 | 1462 | block = self.block if block is None else block |
|
1463 | 1463 | if indices_or_msg_ids is None: |
|
1464 | 1464 | indices_or_msg_ids = -1 |
|
1465 | 1465 | |
|
1466 | 1466 | if not isinstance(indices_or_msg_ids, (list,tuple)): |
|
1467 | 1467 | indices_or_msg_ids = [indices_or_msg_ids] |
|
1468 | 1468 | |
|
1469 | 1469 | theids = [] |
|
1470 | 1470 | for id in indices_or_msg_ids: |
|
1471 | 1471 | if isinstance(id, int): |
|
1472 | 1472 | id = self.history[id] |
|
1473 | 1473 | if not isinstance(id, basestring): |
|
1474 | 1474 | raise TypeError("indices must be str or int, not %r"%id) |
|
1475 | 1475 | theids.append(id) |
|
1476 | 1476 | |
|
1477 | 1477 | content = dict(msg_ids = theids) |
|
1478 | 1478 | |
|
1479 | 1479 | self.session.send(self._query_socket, 'resubmit_request', content) |
|
1480 | 1480 | |
|
1481 | 1481 | zmq.select([self._query_socket], [], []) |
|
1482 | 1482 | idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK) |
|
1483 | 1483 | if self.debug: |
|
1484 | 1484 | pprint(msg) |
|
1485 | 1485 | content = msg['content'] |
|
1486 | 1486 | if content['status'] != 'ok': |
|
1487 | 1487 | raise self._unwrap_exception(content) |
|
1488 | 1488 | mapping = content['resubmitted'] |
|
1489 | 1489 | new_ids = [ mapping[msg_id] for msg_id in theids ] |
|
1490 | 1490 | |
|
1491 | 1491 | ar = AsyncHubResult(self, msg_ids=new_ids) |
|
1492 | 1492 | |
|
1493 | 1493 | if block: |
|
1494 | 1494 | ar.wait() |
|
1495 | 1495 | |
|
1496 | 1496 | return ar |
|
1497 | 1497 | |
|
1498 | 1498 | @spin_first |
|
1499 | 1499 | def result_status(self, msg_ids, status_only=True): |
|
1500 | 1500 | """Check on the status of the result(s) of the apply request with `msg_ids`. |
|
1501 | 1501 | |
|
1502 | 1502 | If status_only is False, then the actual results will be retrieved, else |
|
1503 | 1503 | only the status of the results will be checked. |
|
1504 | 1504 | |
|
1505 | 1505 | Parameters |
|
1506 | 1506 | ---------- |
|
1507 | 1507 | |
|
1508 | 1508 | msg_ids : list of msg_ids |
|
1509 | 1509 | if int: |
|
1510 | 1510 | Passed as index to self.history for convenience. |
|
1511 | 1511 | status_only : bool (default: True) |
|
1512 | 1512 | if False: |
|
1513 | 1513 | Retrieve the actual results of completed tasks. |
|
1514 | 1514 | |
|
1515 | 1515 | Returns |
|
1516 | 1516 | ------- |
|
1517 | 1517 | |
|
1518 | 1518 | results : dict |
|
1519 | 1519 | There will always be the keys 'pending' and 'completed', which will |
|
1520 | 1520 | be lists of msg_ids that are incomplete or complete. If `status_only` |
|
1521 | 1521 | is False, then completed results will be keyed by their `msg_id`. |
|
1522 | 1522 | """ |
|
1523 | 1523 | if not isinstance(msg_ids, (list,tuple)): |
|
1524 | 1524 | msg_ids = [msg_ids] |
|
1525 | 1525 | |
|
1526 | 1526 | theids = [] |
|
1527 | 1527 | for msg_id in msg_ids: |
|
1528 | 1528 | if isinstance(msg_id, int): |
|
1529 | 1529 | msg_id = self.history[msg_id] |
|
1530 | 1530 | if not isinstance(msg_id, basestring): |
|
1531 | 1531 | raise TypeError("msg_ids must be str, not %r"%msg_id) |
|
1532 | 1532 | theids.append(msg_id) |
|
1533 | 1533 | |
|
1534 | 1534 | completed = [] |
|
1535 | 1535 | local_results = {} |
|
1536 | 1536 | |
|
1537 | 1537 | # comment this block out to temporarily disable local shortcut: |
|
1538 | 1538 | for msg_id in theids: |
|
1539 | 1539 | if msg_id in self.results: |
|
1540 | 1540 | completed.append(msg_id) |
|
1541 | 1541 | local_results[msg_id] = self.results[msg_id] |
|
1542 | 1542 | theids.remove(msg_id) |
|
1543 | 1543 | |
|
1544 | 1544 | if theids: # some not locally cached |
|
1545 | 1545 | content = dict(msg_ids=theids, status_only=status_only) |
|
1546 | 1546 | msg = self.session.send(self._query_socket, "result_request", content=content) |
|
1547 | 1547 | zmq.select([self._query_socket], [], []) |
|
1548 | 1548 | idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK) |
|
1549 | 1549 | if self.debug: |
|
1550 | 1550 | pprint(msg) |
|
1551 | 1551 | content = msg['content'] |
|
1552 | 1552 | if content['status'] != 'ok': |
|
1553 | 1553 | raise self._unwrap_exception(content) |
|
1554 | 1554 | buffers = msg['buffers'] |
|
1555 | 1555 | else: |
|
1556 | 1556 | content = dict(completed=[],pending=[]) |
|
1557 | 1557 | |
|
1558 | 1558 | content['completed'].extend(completed) |
|
1559 | 1559 | |
|
1560 | 1560 | if status_only: |
|
1561 | 1561 | return content |
|
1562 | 1562 | |
|
1563 | 1563 | failures = [] |
|
1564 | 1564 | # load cached results into result: |
|
1565 | 1565 | content.update(local_results) |
|
1566 | 1566 | |
|
1567 | 1567 | # update cache with results: |
|
1568 | 1568 | for msg_id in sorted(theids): |
|
1569 | 1569 | if msg_id in content['completed']: |
|
1570 | 1570 | rec = content[msg_id] |
|
1571 | 1571 | parent = rec['header'] |
|
1572 | 1572 | header = rec['result_header'] |
|
1573 | 1573 | rcontent = rec['result_content'] |
|
1574 | 1574 | iodict = rec['io'] |
|
1575 | 1575 | if isinstance(rcontent, str): |
|
1576 | 1576 | rcontent = self.session.unpack(rcontent) |
|
1577 | 1577 | |
|
1578 | 1578 | md = self.metadata[msg_id] |
|
1579 | 1579 | md_msg = dict( |
|
1580 | 1580 | content=rcontent, |
|
1581 | 1581 | parent_header=parent, |
|
1582 | 1582 | header=header, |
|
1583 | 1583 | metadata=rec['result_metadata'], |
|
1584 | 1584 | ) |
|
1585 | 1585 | md.update(self._extract_metadata(md_msg)) |
|
1586 | 1586 | if rec.get('received'): |
|
1587 | 1587 | md['received'] = rec['received'] |
|
1588 | 1588 | md.update(iodict) |
|
1589 | 1589 | |
|
1590 | 1590 | if rcontent['status'] == 'ok': |
|
1591 | 1591 | if header['msg_type'] == 'apply_reply': |
|
1592 | 1592 | res,buffers = serialize.unserialize_object(buffers) |
|
1593 | 1593 | elif header['msg_type'] == 'execute_reply': |
|
1594 | 1594 | res = ExecuteReply(msg_id, rcontent, md) |
|
1595 | 1595 | else: |
|
1596 | 1596 | raise KeyError("unhandled msg type: %r" % header['msg_type']) |
|
1597 | 1597 | else: |
|
1598 | 1598 | res = self._unwrap_exception(rcontent) |
|
1599 | 1599 | failures.append(res) |
|
1600 | 1600 | |
|
1601 | 1601 | self.results[msg_id] = res |
|
1602 | 1602 | content[msg_id] = res |
|
1603 | 1603 | |
|
1604 | 1604 | if len(theids) == 1 and failures: |
|
1605 | 1605 | raise failures[0] |
|
1606 | 1606 | |
|
1607 | 1607 | error.collect_exceptions(failures, "result_status") |
|
1608 | 1608 | return content |
|
1609 | 1609 | |
|
1610 | 1610 | @spin_first |
|
1611 | 1611 | def queue_status(self, targets='all', verbose=False): |
|
1612 | 1612 | """Fetch the status of engine queues. |
|
1613 | 1613 | |
|
1614 | 1614 | Parameters |
|
1615 | 1615 | ---------- |
|
1616 | 1616 | |
|
1617 | 1617 | targets : int/str/list of ints/strs |
|
1618 | 1618 | the engines whose states are to be queried. |
|
1619 | 1619 | default : all |
|
1620 | 1620 | verbose : bool |
|
1621 | 1621 | Whether to return lengths only, or lists of ids for each element |
|
1622 | 1622 | """ |
|
1623 | 1623 | if targets == 'all': |
|
1624 | 1624 | # allow 'all' to be evaluated on the engine |
|
1625 | 1625 | engine_ids = None |
|
1626 | 1626 | else: |
|
1627 | 1627 | engine_ids = self._build_targets(targets)[1] |
|
1628 | 1628 | content = dict(targets=engine_ids, verbose=verbose) |
|
1629 | 1629 | self.session.send(self._query_socket, "queue_request", content=content) |
|
1630 | 1630 | idents,msg = self.session.recv(self._query_socket, 0) |
|
1631 | 1631 | if self.debug: |
|
1632 | 1632 | pprint(msg) |
|
1633 | 1633 | content = msg['content'] |
|
1634 | 1634 | status = content.pop('status') |
|
1635 | 1635 | if status != 'ok': |
|
1636 | 1636 | raise self._unwrap_exception(content) |
|
1637 | 1637 | content = rekey(content) |
|
1638 | 1638 | if isinstance(targets, int): |
|
1639 | 1639 | return content[targets] |
|
1640 | 1640 | else: |
|
1641 | 1641 | return content |
|
1642 | 1642 | |
|
1643 | 1643 | def _build_msgids_from_target(self, targets=None): |
|
1644 | 1644 | """Build a list of msg_ids from the list of engine targets""" |
|
1645 | 1645 | if not targets: # needed as _build_targets otherwise uses all engines |
|
1646 | 1646 | return [] |
|
1647 | 1647 | target_ids = self._build_targets(targets)[0] |
|
1648 | 1648 | return filter(lambda md_id: self.metadata[md_id]["engine_uuid"] in target_ids, self.metadata) |
|
1649 | 1649 | |
|
1650 | 1650 | def _build_msgids_from_jobs(self, jobs=None): |
|
1651 | 1651 | """Build a list of msg_ids from "jobs" """ |
|
1652 | 1652 | if not jobs: |
|
1653 | 1653 | return [] |
|
1654 | 1654 | msg_ids = [] |
|
1655 | 1655 | if isinstance(jobs, (basestring,AsyncResult)): |
|
1656 | 1656 | jobs = [jobs] |
|
1657 | 1657 | bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs) |
|
1658 | 1658 | if bad_ids: |
|
1659 | 1659 | raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0]) |
|
1660 | 1660 | for j in jobs: |
|
1661 | 1661 | if isinstance(j, AsyncResult): |
|
1662 | 1662 | msg_ids.extend(j.msg_ids) |
|
1663 | 1663 | else: |
|
1664 | 1664 | msg_ids.append(j) |
|
1665 | 1665 | return msg_ids |
|
1666 | 1666 | |
|
1667 | 1667 | def purge_local_results(self, jobs=[], targets=[]): |
|
1668 | 1668 | """Clears the client caches of results and frees such memory. |
|
1669 | 1669 | |
|
1670 | 1670 | Individual results can be purged by msg_id, or the entire |
|
1671 | 1671 | history of specific targets can be purged. |
|
1672 | 1672 | |
|
1673 | 1673 | Use `purge_local_results('all')` to scrub everything from the Clients's db. |
|
1674 | 1674 | |
|
1675 | 1675 | The client must have no outstanding tasks before purging the caches. |
|
1676 | 1676 | Raises `AssertionError` if there are still outstanding tasks. |
|
1677 | 1677 | |
|
1678 | 1678 | After this call all `AsyncResults` are invalid and should be discarded. |
|
1679 | 1679 | |
|
1680 | 1680 | If you must "reget" the results, you can still do so by using |
|
1681 | 1681 | `client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will |
|
1682 | 1682 | redownload the results from the hub if they are still available |
|
1683 | 1683 | (i.e `client.purge_hub_results(...)` has not been called. |
|
1684 | 1684 | |
|
1685 | 1685 | Parameters |
|
1686 | 1686 | ---------- |
|
1687 | 1687 | |
|
1688 | 1688 | jobs : str or list of str or AsyncResult objects |
|
1689 | 1689 | the msg_ids whose results should be purged. |
|
1690 | 1690 | targets : int/str/list of ints/strs |
|
1691 | 1691 | The targets, by int_id, whose entire results are to be purged. |
|
1692 | 1692 | |
|
1693 | 1693 | default : None |
|
1694 | 1694 | """ |
|
1695 | 1695 | assert not self.outstanding, "Can't purge a client with outstanding tasks!" |
|
1696 | 1696 | |
|
1697 | 1697 | if not targets and not jobs: |
|
1698 | 1698 | raise ValueError("Must specify at least one of `targets` and `jobs`") |
|
1699 | 1699 | |
|
1700 | 1700 | if jobs == 'all': |
|
1701 | 1701 | self.results.clear() |
|
1702 | 1702 | self.metadata.clear() |
|
1703 | 1703 | return |
|
1704 | 1704 | else: |
|
1705 | 1705 | msg_ids = [] |
|
1706 | 1706 | msg_ids.extend(self._build_msgids_from_target(targets)) |
|
1707 | 1707 | msg_ids.extend(self._build_msgids_from_jobs(jobs)) |
|
1708 | 1708 | map(self.results.pop, msg_ids) |
|
1709 | 1709 | map(self.metadata.pop, msg_ids) |
|
1710 | 1710 | |
|
1711 | 1711 | |
|
1712 | 1712 | @spin_first |
|
1713 | 1713 | def purge_hub_results(self, jobs=[], targets=[]): |
|
1714 | 1714 | """Tell the Hub to forget results. |
|
1715 | 1715 | |
|
1716 | 1716 | Individual results can be purged by msg_id, or the entire |
|
1717 | 1717 | history of specific targets can be purged. |
|
1718 | 1718 | |
|
1719 | 1719 | Use `purge_results('all')` to scrub everything from the Hub's db. |
|
1720 | 1720 | |
|
1721 | 1721 | Parameters |
|
1722 | 1722 | ---------- |
|
1723 | 1723 | |
|
1724 | 1724 | jobs : str or list of str or AsyncResult objects |
|
1725 | 1725 | the msg_ids whose results should be forgotten. |
|
1726 | 1726 | targets : int/str/list of ints/strs |
|
1727 | 1727 | The targets, by int_id, whose entire history is to be purged. |
|
1728 | 1728 | |
|
1729 | 1729 | default : None |
|
1730 | 1730 | """ |
|
1731 | 1731 | if not targets and not jobs: |
|
1732 | 1732 | raise ValueError("Must specify at least one of `targets` and `jobs`") |
|
1733 | 1733 | if targets: |
|
1734 | 1734 | targets = self._build_targets(targets)[1] |
|
1735 | 1735 | |
|
1736 | 1736 | # construct msg_ids from jobs |
|
1737 | 1737 | if jobs == 'all': |
|
1738 | 1738 | msg_ids = jobs |
|
1739 | 1739 | else: |
|
1740 | 1740 | msg_ids = self._build_msgids_from_jobs(jobs) |
|
1741 | 1741 | |
|
1742 | 1742 | content = dict(engine_ids=targets, msg_ids=msg_ids) |
|
1743 | 1743 | self.session.send(self._query_socket, "purge_request", content=content) |
|
1744 | 1744 | idents, msg = self.session.recv(self._query_socket, 0) |
|
1745 | 1745 | if self.debug: |
|
1746 | 1746 | pprint(msg) |
|
1747 | 1747 | content = msg['content'] |
|
1748 | 1748 | if content['status'] != 'ok': |
|
1749 | 1749 | raise self._unwrap_exception(content) |
|
1750 | 1750 | |
|
1751 | 1751 | def purge_results(self, jobs=[], targets=[]): |
|
1752 | 1752 | """Clears the cached results from both the hub and the local client |
|
1753 | 1753 | |
|
1754 | 1754 | Individual results can be purged by msg_id, or the entire |
|
1755 | 1755 | history of specific targets can be purged. |
|
1756 | 1756 | |
|
1757 | 1757 | Use `purge_results('all')` to scrub every cached result from both the Hub's and |
|
1758 | 1758 | the Client's db. |
|
1759 | 1759 | |
|
1760 | 1760 | Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with |
|
1761 | 1761 | the same arguments. |
|
1762 | 1762 | |
|
1763 | 1763 | Parameters |
|
1764 | 1764 | ---------- |
|
1765 | 1765 | |
|
1766 | 1766 | jobs : str or list of str or AsyncResult objects |
|
1767 | 1767 | the msg_ids whose results should be forgotten. |
|
1768 | 1768 | targets : int/str/list of ints/strs |
|
1769 | 1769 | The targets, by int_id, whose entire history is to be purged. |
|
1770 | 1770 | |
|
1771 | 1771 | default : None |
|
1772 | 1772 | """ |
|
1773 | 1773 | self.purge_local_results(jobs=jobs, targets=targets) |
|
1774 | 1774 | self.purge_hub_results(jobs=jobs, targets=targets) |
|
1775 | 1775 | |
|
1776 | 1776 | def purge_everything(self): |
|
1777 | 1777 | """Clears all content from previous Tasks from both the hub and the local client |
|
1778 | 1778 | |
|
1779 | 1779 | In addition to calling `purge_results("all")` it also deletes the history and |
|
1780 | 1780 | other bookkeeping lists. |
|
1781 | 1781 | """ |
|
1782 | 1782 | self.purge_results("all") |
|
1783 | 1783 | self.history = [] |
|
1784 | 1784 | self.session.digest_history.clear() |
|
1785 | 1785 | |
|
1786 | 1786 | @spin_first |
|
1787 | 1787 | def hub_history(self): |
|
1788 | 1788 | """Get the Hub's history |
|
1789 | 1789 | |
|
1790 | 1790 | Just like the Client, the Hub has a history, which is a list of msg_ids. |
|
1791 | 1791 | This will contain the history of all clients, and, depending on configuration, |
|
1792 | 1792 | may contain history across multiple cluster sessions. |
|
1793 | 1793 | |
|
1794 | 1794 | Any msg_id returned here is a valid argument to `get_result`. |
|
1795 | 1795 | |
|
1796 | 1796 | Returns |
|
1797 | 1797 | ------- |
|
1798 | 1798 | |
|
1799 | 1799 | msg_ids : list of strs |
|
1800 | 1800 | list of all msg_ids, ordered by task submission time. |
|
1801 | 1801 | """ |
|
1802 | 1802 | |
|
1803 | 1803 | self.session.send(self._query_socket, "history_request", content={}) |
|
1804 | 1804 | idents, msg = self.session.recv(self._query_socket, 0) |
|
1805 | 1805 | |
|
1806 | 1806 | if self.debug: |
|
1807 | 1807 | pprint(msg) |
|
1808 | 1808 | content = msg['content'] |
|
1809 | 1809 | if content['status'] != 'ok': |
|
1810 | 1810 | raise self._unwrap_exception(content) |
|
1811 | 1811 | else: |
|
1812 | 1812 | return content['history'] |
|
1813 | 1813 | |
|
1814 | 1814 | @spin_first |
|
1815 | 1815 | def db_query(self, query, keys=None): |
|
1816 | 1816 | """Query the Hub's TaskRecord database |
|
1817 | 1817 | |
|
1818 | 1818 | This will return a list of task record dicts that match `query` |
|
1819 | 1819 | |
|
1820 | 1820 | Parameters |
|
1821 | 1821 | ---------- |
|
1822 | 1822 | |
|
1823 | 1823 | query : mongodb query dict |
|
1824 | 1824 | The search dict. See mongodb query docs for details. |
|
1825 | 1825 | keys : list of strs [optional] |
|
1826 | 1826 | The subset of keys to be returned. The default is to fetch everything but buffers. |
|
1827 | 1827 | 'msg_id' will *always* be included. |
|
1828 | 1828 | """ |
|
1829 | 1829 | if isinstance(keys, basestring): |
|
1830 | 1830 | keys = [keys] |
|
1831 | 1831 | content = dict(query=query, keys=keys) |
|
1832 | 1832 | self.session.send(self._query_socket, "db_request", content=content) |
|
1833 | 1833 | idents, msg = self.session.recv(self._query_socket, 0) |
|
1834 | 1834 | if self.debug: |
|
1835 | 1835 | pprint(msg) |
|
1836 | 1836 | content = msg['content'] |
|
1837 | 1837 | if content['status'] != 'ok': |
|
1838 | 1838 | raise self._unwrap_exception(content) |
|
1839 | 1839 | |
|
1840 | 1840 | records = content['records'] |
|
1841 | 1841 | |
|
1842 | 1842 | buffer_lens = content['buffer_lens'] |
|
1843 | 1843 | result_buffer_lens = content['result_buffer_lens'] |
|
1844 | 1844 | buffers = msg['buffers'] |
|
1845 | 1845 | has_bufs = buffer_lens is not None |
|
1846 | 1846 | has_rbufs = result_buffer_lens is not None |
|
1847 | 1847 | for i,rec in enumerate(records): |
|
1848 | 1848 | # relink buffers |
|
1849 | 1849 | if has_bufs: |
|
1850 | 1850 | blen = buffer_lens[i] |
|
1851 | 1851 | rec['buffers'], buffers = buffers[:blen],buffers[blen:] |
|
1852 | 1852 | if has_rbufs: |
|
1853 | 1853 | blen = result_buffer_lens[i] |
|
1854 | 1854 | rec['result_buffers'], buffers = buffers[:blen],buffers[blen:] |
|
1855 | 1855 | |
|
1856 | 1856 | return records |
|
1857 | 1857 | |
|
1858 | 1858 | __all__ = [ 'Client' ] |
@@ -1,1417 +1,1422 b'' | |||
|
1 | 1 | """The IPython Controller Hub with 0MQ |
|
2 | 2 | This is the master object that handles connections from engines and clients, |
|
3 | 3 | and monitors traffic through the various queues. |
|
4 | 4 | |
|
5 | 5 | Authors: |
|
6 | 6 | |
|
7 | 7 | * Min RK |
|
8 | 8 | """ |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | # Copyright (C) 2010-2011 The IPython Development Team |
|
11 | 11 | # |
|
12 | 12 | # Distributed under the terms of the BSD License. The full license is in |
|
13 | 13 | # the file COPYING, distributed as part of this software. |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | # Imports |
|
18 | 18 | #----------------------------------------------------------------------------- |
|
19 | 19 | from __future__ import print_function |
|
20 | 20 | |
|
21 | 21 | import json |
|
22 | 22 | import os |
|
23 | 23 | import sys |
|
24 | 24 | import time |
|
25 | 25 | from datetime import datetime |
|
26 | 26 | |
|
27 | 27 | import zmq |
|
28 | 28 | from zmq.eventloop import ioloop |
|
29 | 29 | from zmq.eventloop.zmqstream import ZMQStream |
|
30 | 30 | |
|
31 | 31 | # internal: |
|
32 | 32 | from IPython.utils.importstring import import_item |
|
33 |
from IPython.utils.localinterfaces import |
|
|
33 | from IPython.utils.localinterfaces import localhost | |
|
34 | 34 | from IPython.utils.py3compat import cast_bytes |
|
35 | 35 | from IPython.utils.traitlets import ( |
|
36 | 36 | HasTraits, Instance, Integer, Unicode, Dict, Set, Tuple, CBytes, DottedObjectName |
|
37 | 37 | ) |
|
38 | 38 | |
|
39 | 39 | from IPython.parallel import error, util |
|
40 | 40 | from IPython.parallel.factory import RegistrationFactory |
|
41 | 41 | |
|
42 | 42 | from IPython.kernel.zmq.session import SessionFactory |
|
43 | 43 | |
|
44 | 44 | from .heartmonitor import HeartMonitor |
|
45 | 45 | |
|
46 | 46 | #----------------------------------------------------------------------------- |
|
47 | 47 | # Code |
|
48 | 48 | #----------------------------------------------------------------------------- |
|
49 | 49 | |
|
50 | 50 | def _passer(*args, **kwargs): |
|
51 | 51 | return |
|
52 | 52 | |
|
53 | 53 | def _printer(*args, **kwargs): |
|
54 | 54 | print (args) |
|
55 | 55 | print (kwargs) |
|
56 | 56 | |
|
57 | 57 | def empty_record(): |
|
58 | 58 | """Return an empty dict with all record keys.""" |
|
59 | 59 | return { |
|
60 | 60 | 'msg_id' : None, |
|
61 | 61 | 'header' : None, |
|
62 | 62 | 'metadata' : None, |
|
63 | 63 | 'content': None, |
|
64 | 64 | 'buffers': None, |
|
65 | 65 | 'submitted': None, |
|
66 | 66 | 'client_uuid' : None, |
|
67 | 67 | 'engine_uuid' : None, |
|
68 | 68 | 'started': None, |
|
69 | 69 | 'completed': None, |
|
70 | 70 | 'resubmitted': None, |
|
71 | 71 | 'received': None, |
|
72 | 72 | 'result_header' : None, |
|
73 | 73 | 'result_metadata' : None, |
|
74 | 74 | 'result_content' : None, |
|
75 | 75 | 'result_buffers' : None, |
|
76 | 76 | 'queue' : None, |
|
77 | 77 | 'pyin' : None, |
|
78 | 78 | 'pyout': None, |
|
79 | 79 | 'pyerr': None, |
|
80 | 80 | 'stdout': '', |
|
81 | 81 | 'stderr': '', |
|
82 | 82 | } |
|
83 | 83 | |
|
84 | 84 | def init_record(msg): |
|
85 | 85 | """Initialize a TaskRecord based on a request.""" |
|
86 | 86 | header = msg['header'] |
|
87 | 87 | return { |
|
88 | 88 | 'msg_id' : header['msg_id'], |
|
89 | 89 | 'header' : header, |
|
90 | 90 | 'content': msg['content'], |
|
91 | 91 | 'metadata': msg['metadata'], |
|
92 | 92 | 'buffers': msg['buffers'], |
|
93 | 93 | 'submitted': header['date'], |
|
94 | 94 | 'client_uuid' : None, |
|
95 | 95 | 'engine_uuid' : None, |
|
96 | 96 | 'started': None, |
|
97 | 97 | 'completed': None, |
|
98 | 98 | 'resubmitted': None, |
|
99 | 99 | 'received': None, |
|
100 | 100 | 'result_header' : None, |
|
101 | 101 | 'result_metadata': None, |
|
102 | 102 | 'result_content' : None, |
|
103 | 103 | 'result_buffers' : None, |
|
104 | 104 | 'queue' : None, |
|
105 | 105 | 'pyin' : None, |
|
106 | 106 | 'pyout': None, |
|
107 | 107 | 'pyerr': None, |
|
108 | 108 | 'stdout': '', |
|
109 | 109 | 'stderr': '', |
|
110 | 110 | } |
|
111 | 111 | |
|
112 | 112 | |
|
113 | 113 | class EngineConnector(HasTraits): |
|
114 | 114 | """A simple object for accessing the various zmq connections of an object. |
|
115 | 115 | Attributes are: |
|
116 | 116 | id (int): engine ID |
|
117 | 117 | uuid (unicode): engine UUID |
|
118 | 118 | pending: set of msg_ids |
|
119 | 119 | stallback: DelayedCallback for stalled registration |
|
120 | 120 | """ |
|
121 | 121 | |
|
122 | 122 | id = Integer(0) |
|
123 | 123 | uuid = Unicode() |
|
124 | 124 | pending = Set() |
|
125 | 125 | stallback = Instance(ioloop.DelayedCallback) |
|
126 | 126 | |
|
127 | 127 | |
|
128 | 128 | _db_shortcuts = { |
|
129 | 129 | 'sqlitedb' : 'IPython.parallel.controller.sqlitedb.SQLiteDB', |
|
130 | 130 | 'mongodb' : 'IPython.parallel.controller.mongodb.MongoDB', |
|
131 | 131 | 'dictdb' : 'IPython.parallel.controller.dictdb.DictDB', |
|
132 | 132 | 'nodb' : 'IPython.parallel.controller.dictdb.NoDB', |
|
133 | 133 | } |
|
134 | 134 | |
|
135 | 135 | class HubFactory(RegistrationFactory): |
|
136 | 136 | """The Configurable for setting up a Hub.""" |
|
137 | 137 | |
|
138 | 138 | # port-pairs for monitoredqueues: |
|
139 | 139 | hb = Tuple(Integer,Integer,config=True, |
|
140 | 140 | help="""PUB/ROUTER Port pair for Engine heartbeats""") |
|
141 | 141 | def _hb_default(self): |
|
142 | 142 | return tuple(util.select_random_ports(2)) |
|
143 | 143 | |
|
144 | 144 | mux = Tuple(Integer,Integer,config=True, |
|
145 | 145 | help="""Client/Engine Port pair for MUX queue""") |
|
146 | 146 | |
|
147 | 147 | def _mux_default(self): |
|
148 | 148 | return tuple(util.select_random_ports(2)) |
|
149 | 149 | |
|
150 | 150 | task = Tuple(Integer,Integer,config=True, |
|
151 | 151 | help="""Client/Engine Port pair for Task queue""") |
|
152 | 152 | def _task_default(self): |
|
153 | 153 | return tuple(util.select_random_ports(2)) |
|
154 | 154 | |
|
155 | 155 | control = Tuple(Integer,Integer,config=True, |
|
156 | 156 | help="""Client/Engine Port pair for Control queue""") |
|
157 | 157 | |
|
158 | 158 | def _control_default(self): |
|
159 | 159 | return tuple(util.select_random_ports(2)) |
|
160 | 160 | |
|
161 | 161 | iopub = Tuple(Integer,Integer,config=True, |
|
162 | 162 | help="""Client/Engine Port pair for IOPub relay""") |
|
163 | 163 | |
|
164 | 164 | def _iopub_default(self): |
|
165 | 165 | return tuple(util.select_random_ports(2)) |
|
166 | 166 | |
|
167 | 167 | # single ports: |
|
168 | 168 | mon_port = Integer(config=True, |
|
169 | 169 | help="""Monitor (SUB) port for queue traffic""") |
|
170 | 170 | |
|
171 | 171 | def _mon_port_default(self): |
|
172 | 172 | return util.select_random_ports(1)[0] |
|
173 | 173 | |
|
174 | 174 | notifier_port = Integer(config=True, |
|
175 | 175 | help="""PUB port for sending engine status notifications""") |
|
176 | 176 | |
|
177 | 177 | def _notifier_port_default(self): |
|
178 | 178 | return util.select_random_ports(1)[0] |
|
179 | 179 | |
|
180 |
engine_ip = Unicode( |
|
|
180 | engine_ip = Unicode(config=True, | |
|
181 | 181 | help="IP on which to listen for engine connections. [default: loopback]") |
|
182 | def _engine_ip_default(self): | |
|
183 | return localhost() | |
|
182 | 184 | engine_transport = Unicode('tcp', config=True, |
|
183 | 185 | help="0MQ transport for engine connections. [default: tcp]") |
|
184 | 186 | |
|
185 |
client_ip = Unicode( |
|
|
187 | client_ip = Unicode(config=True, | |
|
186 | 188 | help="IP on which to listen for client connections. [default: loopback]") |
|
187 | 189 | client_transport = Unicode('tcp', config=True, |
|
188 | 190 | help="0MQ transport for client connections. [default : tcp]") |
|
189 | 191 | |
|
190 |
monitor_ip = Unicode( |
|
|
192 | monitor_ip = Unicode(config=True, | |
|
191 | 193 | help="IP on which to listen for monitor messages. [default: loopback]") |
|
192 | 194 | monitor_transport = Unicode('tcp', config=True, |
|
193 | 195 | help="0MQ transport for monitor messages. [default : tcp]") |
|
196 | ||
|
197 | _client_ip_default = _monitor_ip_default = _engine_ip_default | |
|
198 | ||
|
194 | 199 | |
|
195 | 200 | monitor_url = Unicode('') |
|
196 | 201 | |
|
197 | 202 | db_class = DottedObjectName('NoDB', |
|
198 | 203 | config=True, help="""The class to use for the DB backend |
|
199 | 204 | |
|
200 | 205 | Options include: |
|
201 | 206 | |
|
202 | 207 | SQLiteDB: SQLite |
|
203 | 208 | MongoDB : use MongoDB |
|
204 | 209 | DictDB : in-memory storage (fastest, but be mindful of memory growth of the Hub) |
|
205 | 210 | NoDB : disable database altogether (default) |
|
206 | 211 | |
|
207 | 212 | """) |
|
208 | 213 | |
|
209 | 214 | # not configurable |
|
210 | 215 | db = Instance('IPython.parallel.controller.dictdb.BaseDB') |
|
211 | 216 | heartmonitor = Instance('IPython.parallel.controller.heartmonitor.HeartMonitor') |
|
212 | 217 | |
|
213 | 218 | def _ip_changed(self, name, old, new): |
|
214 | 219 | self.engine_ip = new |
|
215 | 220 | self.client_ip = new |
|
216 | 221 | self.monitor_ip = new |
|
217 | 222 | self._update_monitor_url() |
|
218 | 223 | |
|
219 | 224 | def _update_monitor_url(self): |
|
220 | 225 | self.monitor_url = "%s://%s:%i" % (self.monitor_transport, self.monitor_ip, self.mon_port) |
|
221 | 226 | |
|
222 | 227 | def _transport_changed(self, name, old, new): |
|
223 | 228 | self.engine_transport = new |
|
224 | 229 | self.client_transport = new |
|
225 | 230 | self.monitor_transport = new |
|
226 | 231 | self._update_monitor_url() |
|
227 | 232 | |
|
228 | 233 | def __init__(self, **kwargs): |
|
229 | 234 | super(HubFactory, self).__init__(**kwargs) |
|
230 | 235 | self._update_monitor_url() |
|
231 | 236 | |
|
232 | 237 | |
|
233 | 238 | def construct(self): |
|
234 | 239 | self.init_hub() |
|
235 | 240 | |
|
236 | 241 | def start(self): |
|
237 | 242 | self.heartmonitor.start() |
|
238 | 243 | self.log.info("Heartmonitor started") |
|
239 | 244 | |
|
240 | 245 | def client_url(self, channel): |
|
241 | 246 | """return full zmq url for a named client channel""" |
|
242 | 247 | return "%s://%s:%i" % (self.client_transport, self.client_ip, self.client_info[channel]) |
|
243 | 248 | |
|
244 | 249 | def engine_url(self, channel): |
|
245 | 250 | """return full zmq url for a named engine channel""" |
|
246 | 251 | return "%s://%s:%i" % (self.engine_transport, self.engine_ip, self.engine_info[channel]) |
|
247 | 252 | |
|
248 | 253 | def init_hub(self): |
|
249 | 254 | """construct Hub object""" |
|
250 | 255 | |
|
251 | 256 | ctx = self.context |
|
252 | 257 | loop = self.loop |
|
253 | 258 | |
|
254 | 259 | try: |
|
255 | 260 | scheme = self.config.TaskScheduler.scheme_name |
|
256 | 261 | except AttributeError: |
|
257 | 262 | from .scheduler import TaskScheduler |
|
258 | 263 | scheme = TaskScheduler.scheme_name.get_default_value() |
|
259 | 264 | |
|
260 | 265 | # build connection dicts |
|
261 | 266 | engine = self.engine_info = { |
|
262 | 267 | 'interface' : "%s://%s" % (self.engine_transport, self.engine_ip), |
|
263 | 268 | 'registration' : self.regport, |
|
264 | 269 | 'control' : self.control[1], |
|
265 | 270 | 'mux' : self.mux[1], |
|
266 | 271 | 'hb_ping' : self.hb[0], |
|
267 | 272 | 'hb_pong' : self.hb[1], |
|
268 | 273 | 'task' : self.task[1], |
|
269 | 274 | 'iopub' : self.iopub[1], |
|
270 | 275 | } |
|
271 | 276 | |
|
272 | 277 | client = self.client_info = { |
|
273 | 278 | 'interface' : "%s://%s" % (self.client_transport, self.client_ip), |
|
274 | 279 | 'registration' : self.regport, |
|
275 | 280 | 'control' : self.control[0], |
|
276 | 281 | 'mux' : self.mux[0], |
|
277 | 282 | 'task' : self.task[0], |
|
278 | 283 | 'task_scheme' : scheme, |
|
279 | 284 | 'iopub' : self.iopub[0], |
|
280 | 285 | 'notification' : self.notifier_port, |
|
281 | 286 | } |
|
282 | 287 | |
|
283 | 288 | self.log.debug("Hub engine addrs: %s", self.engine_info) |
|
284 | 289 | self.log.debug("Hub client addrs: %s", self.client_info) |
|
285 | 290 | |
|
286 | 291 | # Registrar socket |
|
287 | 292 | q = ZMQStream(ctx.socket(zmq.ROUTER), loop) |
|
288 | 293 | util.set_hwm(q, 0) |
|
289 | 294 | q.bind(self.client_url('registration')) |
|
290 | 295 | self.log.info("Hub listening on %s for registration.", self.client_url('registration')) |
|
291 | 296 | if self.client_ip != self.engine_ip: |
|
292 | 297 | q.bind(self.engine_url('registration')) |
|
293 | 298 | self.log.info("Hub listening on %s for registration.", self.engine_url('registration')) |
|
294 | 299 | |
|
295 | 300 | ### Engine connections ### |
|
296 | 301 | |
|
297 | 302 | # heartbeat |
|
298 | 303 | hpub = ctx.socket(zmq.PUB) |
|
299 | 304 | hpub.bind(self.engine_url('hb_ping')) |
|
300 | 305 | hrep = ctx.socket(zmq.ROUTER) |
|
301 | 306 | util.set_hwm(hrep, 0) |
|
302 | 307 | hrep.bind(self.engine_url('hb_pong')) |
|
303 | 308 | self.heartmonitor = HeartMonitor(loop=loop, parent=self, log=self.log, |
|
304 | 309 | pingstream=ZMQStream(hpub,loop), |
|
305 | 310 | pongstream=ZMQStream(hrep,loop) |
|
306 | 311 | ) |
|
307 | 312 | |
|
308 | 313 | ### Client connections ### |
|
309 | 314 | |
|
310 | 315 | # Notifier socket |
|
311 | 316 | n = ZMQStream(ctx.socket(zmq.PUB), loop) |
|
312 | 317 | n.bind(self.client_url('notification')) |
|
313 | 318 | |
|
314 | 319 | ### build and launch the queues ### |
|
315 | 320 | |
|
316 | 321 | # monitor socket |
|
317 | 322 | sub = ctx.socket(zmq.SUB) |
|
318 | 323 | sub.setsockopt(zmq.SUBSCRIBE, b"") |
|
319 | 324 | sub.bind(self.monitor_url) |
|
320 | 325 | sub.bind('inproc://monitor') |
|
321 | 326 | sub = ZMQStream(sub, loop) |
|
322 | 327 | |
|
323 | 328 | # connect the db |
|
324 | 329 | db_class = _db_shortcuts.get(self.db_class.lower(), self.db_class) |
|
325 | 330 | self.log.info('Hub using DB backend: %r', (db_class.split('.')[-1])) |
|
326 | 331 | self.db = import_item(str(db_class))(session=self.session.session, |
|
327 | 332 | parent=self, log=self.log) |
|
328 | 333 | time.sleep(.25) |
|
329 | 334 | |
|
330 | 335 | # resubmit stream |
|
331 | 336 | r = ZMQStream(ctx.socket(zmq.DEALER), loop) |
|
332 | 337 | url = util.disambiguate_url(self.client_url('task')) |
|
333 | 338 | r.connect(url) |
|
334 | 339 | |
|
335 | 340 | self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor, |
|
336 | 341 | query=q, notifier=n, resubmit=r, db=self.db, |
|
337 | 342 | engine_info=self.engine_info, client_info=self.client_info, |
|
338 | 343 | log=self.log) |
|
339 | 344 | |
|
340 | 345 | |
|
341 | 346 | class Hub(SessionFactory): |
|
342 | 347 | """The IPython Controller Hub with 0MQ connections |
|
343 | 348 | |
|
344 | 349 | Parameters |
|
345 | 350 | ========== |
|
346 | 351 | loop: zmq IOLoop instance |
|
347 | 352 | session: Session object |
|
348 | 353 | <removed> context: zmq context for creating new connections (?) |
|
349 | 354 | queue: ZMQStream for monitoring the command queue (SUB) |
|
350 | 355 | query: ZMQStream for engine registration and client queries requests (ROUTER) |
|
351 | 356 | heartbeat: HeartMonitor object checking the pulse of the engines |
|
352 | 357 | notifier: ZMQStream for broadcasting engine registration changes (PUB) |
|
353 | 358 | db: connection to db for out of memory logging of commands |
|
354 | 359 | NotImplemented |
|
355 | 360 | engine_info: dict of zmq connection information for engines to connect |
|
356 | 361 | to the queues. |
|
357 | 362 | client_info: dict of zmq connection information for engines to connect |
|
358 | 363 | to the queues. |
|
359 | 364 | """ |
|
360 | 365 | |
|
361 | 366 | engine_state_file = Unicode() |
|
362 | 367 | |
|
363 | 368 | # internal data structures: |
|
364 | 369 | ids=Set() # engine IDs |
|
365 | 370 | keytable=Dict() |
|
366 | 371 | by_ident=Dict() |
|
367 | 372 | engines=Dict() |
|
368 | 373 | clients=Dict() |
|
369 | 374 | hearts=Dict() |
|
370 | 375 | pending=Set() |
|
371 | 376 | queues=Dict() # pending msg_ids keyed by engine_id |
|
372 | 377 | tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id |
|
373 | 378 | completed=Dict() # completed msg_ids keyed by engine_id |
|
374 | 379 | all_completed=Set() # completed msg_ids keyed by engine_id |
|
375 | 380 | dead_engines=Set() # completed msg_ids keyed by engine_id |
|
376 | 381 | unassigned=Set() # set of task msg_ds not yet assigned a destination |
|
377 | 382 | incoming_registrations=Dict() |
|
378 | 383 | registration_timeout=Integer() |
|
379 | 384 | _idcounter=Integer(0) |
|
380 | 385 | |
|
381 | 386 | # objects from constructor: |
|
382 | 387 | query=Instance(ZMQStream) |
|
383 | 388 | monitor=Instance(ZMQStream) |
|
384 | 389 | notifier=Instance(ZMQStream) |
|
385 | 390 | resubmit=Instance(ZMQStream) |
|
386 | 391 | heartmonitor=Instance(HeartMonitor) |
|
387 | 392 | db=Instance(object) |
|
388 | 393 | client_info=Dict() |
|
389 | 394 | engine_info=Dict() |
|
390 | 395 | |
|
391 | 396 | |
|
392 | 397 | def __init__(self, **kwargs): |
|
393 | 398 | """ |
|
394 | 399 | # universal: |
|
395 | 400 | loop: IOLoop for creating future connections |
|
396 | 401 | session: streamsession for sending serialized data |
|
397 | 402 | # engine: |
|
398 | 403 | queue: ZMQStream for monitoring queue messages |
|
399 | 404 | query: ZMQStream for engine+client registration and client requests |
|
400 | 405 | heartbeat: HeartMonitor object for tracking engines |
|
401 | 406 | # extra: |
|
402 | 407 | db: ZMQStream for db connection (NotImplemented) |
|
403 | 408 | engine_info: zmq address/protocol dict for engine connections |
|
404 | 409 | client_info: zmq address/protocol dict for client connections |
|
405 | 410 | """ |
|
406 | 411 | |
|
407 | 412 | super(Hub, self).__init__(**kwargs) |
|
408 | 413 | self.registration_timeout = max(10000, 5*self.heartmonitor.period) |
|
409 | 414 | |
|
410 | 415 | # register our callbacks |
|
411 | 416 | self.query.on_recv(self.dispatch_query) |
|
412 | 417 | self.monitor.on_recv(self.dispatch_monitor_traffic) |
|
413 | 418 | |
|
414 | 419 | self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure) |
|
415 | 420 | self.heartmonitor.add_new_heart_handler(self.handle_new_heart) |
|
416 | 421 | |
|
417 | 422 | self.monitor_handlers = {b'in' : self.save_queue_request, |
|
418 | 423 | b'out': self.save_queue_result, |
|
419 | 424 | b'intask': self.save_task_request, |
|
420 | 425 | b'outtask': self.save_task_result, |
|
421 | 426 | b'tracktask': self.save_task_destination, |
|
422 | 427 | b'incontrol': _passer, |
|
423 | 428 | b'outcontrol': _passer, |
|
424 | 429 | b'iopub': self.save_iopub_message, |
|
425 | 430 | } |
|
426 | 431 | |
|
427 | 432 | self.query_handlers = {'queue_request': self.queue_status, |
|
428 | 433 | 'result_request': self.get_results, |
|
429 | 434 | 'history_request': self.get_history, |
|
430 | 435 | 'db_request': self.db_query, |
|
431 | 436 | 'purge_request': self.purge_results, |
|
432 | 437 | 'load_request': self.check_load, |
|
433 | 438 | 'resubmit_request': self.resubmit_task, |
|
434 | 439 | 'shutdown_request': self.shutdown_request, |
|
435 | 440 | 'registration_request' : self.register_engine, |
|
436 | 441 | 'unregistration_request' : self.unregister_engine, |
|
437 | 442 | 'connection_request': self.connection_request, |
|
438 | 443 | } |
|
439 | 444 | |
|
440 | 445 | # ignore resubmit replies |
|
441 | 446 | self.resubmit.on_recv(lambda msg: None, copy=False) |
|
442 | 447 | |
|
443 | 448 | self.log.info("hub::created hub") |
|
444 | 449 | |
|
445 | 450 | @property |
|
446 | 451 | def _next_id(self): |
|
447 | 452 | """gemerate a new ID. |
|
448 | 453 | |
|
449 | 454 | No longer reuse old ids, just count from 0.""" |
|
450 | 455 | newid = self._idcounter |
|
451 | 456 | self._idcounter += 1 |
|
452 | 457 | return newid |
|
453 | 458 | # newid = 0 |
|
454 | 459 | # incoming = [id[0] for id in self.incoming_registrations.itervalues()] |
|
455 | 460 | # # print newid, self.ids, self.incoming_registrations |
|
456 | 461 | # while newid in self.ids or newid in incoming: |
|
457 | 462 | # newid += 1 |
|
458 | 463 | # return newid |
|
459 | 464 | |
|
460 | 465 | #----------------------------------------------------------------------------- |
|
461 | 466 | # message validation |
|
462 | 467 | #----------------------------------------------------------------------------- |
|
463 | 468 | |
|
464 | 469 | def _validate_targets(self, targets): |
|
465 | 470 | """turn any valid targets argument into a list of integer ids""" |
|
466 | 471 | if targets is None: |
|
467 | 472 | # default to all |
|
468 | 473 | return self.ids |
|
469 | 474 | |
|
470 | 475 | if isinstance(targets, (int,str,unicode)): |
|
471 | 476 | # only one target specified |
|
472 | 477 | targets = [targets] |
|
473 | 478 | _targets = [] |
|
474 | 479 | for t in targets: |
|
475 | 480 | # map raw identities to ids |
|
476 | 481 | if isinstance(t, (str,unicode)): |
|
477 | 482 | t = self.by_ident.get(cast_bytes(t), t) |
|
478 | 483 | _targets.append(t) |
|
479 | 484 | targets = _targets |
|
480 | 485 | bad_targets = [ t for t in targets if t not in self.ids ] |
|
481 | 486 | if bad_targets: |
|
482 | 487 | raise IndexError("No Such Engine: %r" % bad_targets) |
|
483 | 488 | if not targets: |
|
484 | 489 | raise IndexError("No Engines Registered") |
|
485 | 490 | return targets |
|
486 | 491 | |
|
487 | 492 | #----------------------------------------------------------------------------- |
|
488 | 493 | # dispatch methods (1 per stream) |
|
489 | 494 | #----------------------------------------------------------------------------- |
|
490 | 495 | |
|
491 | 496 | |
|
492 | 497 | @util.log_errors |
|
493 | 498 | def dispatch_monitor_traffic(self, msg): |
|
494 | 499 | """all ME and Task queue messages come through here, as well as |
|
495 | 500 | IOPub traffic.""" |
|
496 | 501 | self.log.debug("monitor traffic: %r", msg[0]) |
|
497 | 502 | switch = msg[0] |
|
498 | 503 | try: |
|
499 | 504 | idents, msg = self.session.feed_identities(msg[1:]) |
|
500 | 505 | except ValueError: |
|
501 | 506 | idents=[] |
|
502 | 507 | if not idents: |
|
503 | 508 | self.log.error("Monitor message without topic: %r", msg) |
|
504 | 509 | return |
|
505 | 510 | handler = self.monitor_handlers.get(switch, None) |
|
506 | 511 | if handler is not None: |
|
507 | 512 | handler(idents, msg) |
|
508 | 513 | else: |
|
509 | 514 | self.log.error("Unrecognized monitor topic: %r", switch) |
|
510 | 515 | |
|
511 | 516 | |
|
512 | 517 | @util.log_errors |
|
513 | 518 | def dispatch_query(self, msg): |
|
514 | 519 | """Route registration requests and queries from clients.""" |
|
515 | 520 | try: |
|
516 | 521 | idents, msg = self.session.feed_identities(msg) |
|
517 | 522 | except ValueError: |
|
518 | 523 | idents = [] |
|
519 | 524 | if not idents: |
|
520 | 525 | self.log.error("Bad Query Message: %r", msg) |
|
521 | 526 | return |
|
522 | 527 | client_id = idents[0] |
|
523 | 528 | try: |
|
524 | 529 | msg = self.session.unserialize(msg, content=True) |
|
525 | 530 | except Exception: |
|
526 | 531 | content = error.wrap_exception() |
|
527 | 532 | self.log.error("Bad Query Message: %r", msg, exc_info=True) |
|
528 | 533 | self.session.send(self.query, "hub_error", ident=client_id, |
|
529 | 534 | content=content) |
|
530 | 535 | return |
|
531 | 536 | # print client_id, header, parent, content |
|
532 | 537 | #switch on message type: |
|
533 | 538 | msg_type = msg['header']['msg_type'] |
|
534 | 539 | self.log.info("client::client %r requested %r", client_id, msg_type) |
|
535 | 540 | handler = self.query_handlers.get(msg_type, None) |
|
536 | 541 | try: |
|
537 | 542 | assert handler is not None, "Bad Message Type: %r" % msg_type |
|
538 | 543 | except: |
|
539 | 544 | content = error.wrap_exception() |
|
540 | 545 | self.log.error("Bad Message Type: %r", msg_type, exc_info=True) |
|
541 | 546 | self.session.send(self.query, "hub_error", ident=client_id, |
|
542 | 547 | content=content) |
|
543 | 548 | return |
|
544 | 549 | |
|
545 | 550 | else: |
|
546 | 551 | handler(idents, msg) |
|
547 | 552 | |
|
548 | 553 | def dispatch_db(self, msg): |
|
549 | 554 | """""" |
|
550 | 555 | raise NotImplementedError |
|
551 | 556 | |
|
552 | 557 | #--------------------------------------------------------------------------- |
|
553 | 558 | # handler methods (1 per event) |
|
554 | 559 | #--------------------------------------------------------------------------- |
|
555 | 560 | |
|
556 | 561 | #----------------------- Heartbeat -------------------------------------- |
|
557 | 562 | |
|
558 | 563 | def handle_new_heart(self, heart): |
|
559 | 564 | """handler to attach to heartbeater. |
|
560 | 565 | Called when a new heart starts to beat. |
|
561 | 566 | Triggers completion of registration.""" |
|
562 | 567 | self.log.debug("heartbeat::handle_new_heart(%r)", heart) |
|
563 | 568 | if heart not in self.incoming_registrations: |
|
564 | 569 | self.log.info("heartbeat::ignoring new heart: %r", heart) |
|
565 | 570 | else: |
|
566 | 571 | self.finish_registration(heart) |
|
567 | 572 | |
|
568 | 573 | |
|
569 | 574 | def handle_heart_failure(self, heart): |
|
570 | 575 | """handler to attach to heartbeater. |
|
571 | 576 | called when a previously registered heart fails to respond to beat request. |
|
572 | 577 | triggers unregistration""" |
|
573 | 578 | self.log.debug("heartbeat::handle_heart_failure(%r)", heart) |
|
574 | 579 | eid = self.hearts.get(heart, None) |
|
575 | 580 | uuid = self.engines[eid].uuid |
|
576 | 581 | if eid is None or self.keytable[eid] in self.dead_engines: |
|
577 | 582 | self.log.info("heartbeat::ignoring heart failure %r (not an engine or already dead)", heart) |
|
578 | 583 | else: |
|
579 | 584 | self.unregister_engine(heart, dict(content=dict(id=eid, queue=uuid))) |
|
580 | 585 | |
|
581 | 586 | #----------------------- MUX Queue Traffic ------------------------------ |
|
582 | 587 | |
|
583 | 588 | def save_queue_request(self, idents, msg): |
|
584 | 589 | if len(idents) < 2: |
|
585 | 590 | self.log.error("invalid identity prefix: %r", idents) |
|
586 | 591 | return |
|
587 | 592 | queue_id, client_id = idents[:2] |
|
588 | 593 | try: |
|
589 | 594 | msg = self.session.unserialize(msg) |
|
590 | 595 | except Exception: |
|
591 | 596 | self.log.error("queue::client %r sent invalid message to %r: %r", client_id, queue_id, msg, exc_info=True) |
|
592 | 597 | return |
|
593 | 598 | |
|
594 | 599 | eid = self.by_ident.get(queue_id, None) |
|
595 | 600 | if eid is None: |
|
596 | 601 | self.log.error("queue::target %r not registered", queue_id) |
|
597 | 602 | self.log.debug("queue:: valid are: %r", self.by_ident.keys()) |
|
598 | 603 | return |
|
599 | 604 | record = init_record(msg) |
|
600 | 605 | msg_id = record['msg_id'] |
|
601 | 606 | self.log.info("queue::client %r submitted request %r to %s", client_id, msg_id, eid) |
|
602 | 607 | # Unicode in records |
|
603 | 608 | record['engine_uuid'] = queue_id.decode('ascii') |
|
604 | 609 | record['client_uuid'] = msg['header']['session'] |
|
605 | 610 | record['queue'] = 'mux' |
|
606 | 611 | |
|
607 | 612 | try: |
|
608 | 613 | # it's posible iopub arrived first: |
|
609 | 614 | existing = self.db.get_record(msg_id) |
|
610 | 615 | for key,evalue in existing.iteritems(): |
|
611 | 616 | rvalue = record.get(key, None) |
|
612 | 617 | if evalue and rvalue and evalue != rvalue: |
|
613 | 618 | self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue) |
|
614 | 619 | elif evalue and not rvalue: |
|
615 | 620 | record[key] = evalue |
|
616 | 621 | try: |
|
617 | 622 | self.db.update_record(msg_id, record) |
|
618 | 623 | except Exception: |
|
619 | 624 | self.log.error("DB Error updating record %r", msg_id, exc_info=True) |
|
620 | 625 | except KeyError: |
|
621 | 626 | try: |
|
622 | 627 | self.db.add_record(msg_id, record) |
|
623 | 628 | except Exception: |
|
624 | 629 | self.log.error("DB Error adding record %r", msg_id, exc_info=True) |
|
625 | 630 | |
|
626 | 631 | |
|
627 | 632 | self.pending.add(msg_id) |
|
628 | 633 | self.queues[eid].append(msg_id) |
|
629 | 634 | |
|
630 | 635 | def save_queue_result(self, idents, msg): |
|
631 | 636 | if len(idents) < 2: |
|
632 | 637 | self.log.error("invalid identity prefix: %r", idents) |
|
633 | 638 | return |
|
634 | 639 | |
|
635 | 640 | client_id, queue_id = idents[:2] |
|
636 | 641 | try: |
|
637 | 642 | msg = self.session.unserialize(msg) |
|
638 | 643 | except Exception: |
|
639 | 644 | self.log.error("queue::engine %r sent invalid message to %r: %r", |
|
640 | 645 | queue_id, client_id, msg, exc_info=True) |
|
641 | 646 | return |
|
642 | 647 | |
|
643 | 648 | eid = self.by_ident.get(queue_id, None) |
|
644 | 649 | if eid is None: |
|
645 | 650 | self.log.error("queue::unknown engine %r is sending a reply: ", queue_id) |
|
646 | 651 | return |
|
647 | 652 | |
|
648 | 653 | parent = msg['parent_header'] |
|
649 | 654 | if not parent: |
|
650 | 655 | return |
|
651 | 656 | msg_id = parent['msg_id'] |
|
652 | 657 | if msg_id in self.pending: |
|
653 | 658 | self.pending.remove(msg_id) |
|
654 | 659 | self.all_completed.add(msg_id) |
|
655 | 660 | self.queues[eid].remove(msg_id) |
|
656 | 661 | self.completed[eid].append(msg_id) |
|
657 | 662 | self.log.info("queue::request %r completed on %s", msg_id, eid) |
|
658 | 663 | elif msg_id not in self.all_completed: |
|
659 | 664 | # it could be a result from a dead engine that died before delivering the |
|
660 | 665 | # result |
|
661 | 666 | self.log.warn("queue:: unknown msg finished %r", msg_id) |
|
662 | 667 | return |
|
663 | 668 | # update record anyway, because the unregistration could have been premature |
|
664 | 669 | rheader = msg['header'] |
|
665 | 670 | md = msg['metadata'] |
|
666 | 671 | completed = rheader['date'] |
|
667 | 672 | started = md.get('started', None) |
|
668 | 673 | result = { |
|
669 | 674 | 'result_header' : rheader, |
|
670 | 675 | 'result_metadata': md, |
|
671 | 676 | 'result_content': msg['content'], |
|
672 | 677 | 'received': datetime.now(), |
|
673 | 678 | 'started' : started, |
|
674 | 679 | 'completed' : completed |
|
675 | 680 | } |
|
676 | 681 | |
|
677 | 682 | result['result_buffers'] = msg['buffers'] |
|
678 | 683 | try: |
|
679 | 684 | self.db.update_record(msg_id, result) |
|
680 | 685 | except Exception: |
|
681 | 686 | self.log.error("DB Error updating record %r", msg_id, exc_info=True) |
|
682 | 687 | |
|
683 | 688 | |
|
684 | 689 | #--------------------- Task Queue Traffic ------------------------------ |
|
685 | 690 | |
|
686 | 691 | def save_task_request(self, idents, msg): |
|
687 | 692 | """Save the submission of a task.""" |
|
688 | 693 | client_id = idents[0] |
|
689 | 694 | |
|
690 | 695 | try: |
|
691 | 696 | msg = self.session.unserialize(msg) |
|
692 | 697 | except Exception: |
|
693 | 698 | self.log.error("task::client %r sent invalid task message: %r", |
|
694 | 699 | client_id, msg, exc_info=True) |
|
695 | 700 | return |
|
696 | 701 | record = init_record(msg) |
|
697 | 702 | |
|
698 | 703 | record['client_uuid'] = msg['header']['session'] |
|
699 | 704 | record['queue'] = 'task' |
|
700 | 705 | header = msg['header'] |
|
701 | 706 | msg_id = header['msg_id'] |
|
702 | 707 | self.pending.add(msg_id) |
|
703 | 708 | self.unassigned.add(msg_id) |
|
704 | 709 | try: |
|
705 | 710 | # it's posible iopub arrived first: |
|
706 | 711 | existing = self.db.get_record(msg_id) |
|
707 | 712 | if existing['resubmitted']: |
|
708 | 713 | for key in ('submitted', 'client_uuid', 'buffers'): |
|
709 | 714 | # don't clobber these keys on resubmit |
|
710 | 715 | # submitted and client_uuid should be different |
|
711 | 716 | # and buffers might be big, and shouldn't have changed |
|
712 | 717 | record.pop(key) |
|
713 | 718 | # still check content,header which should not change |
|
714 | 719 | # but are not expensive to compare as buffers |
|
715 | 720 | |
|
716 | 721 | for key,evalue in existing.iteritems(): |
|
717 | 722 | if key.endswith('buffers'): |
|
718 | 723 | # don't compare buffers |
|
719 | 724 | continue |
|
720 | 725 | rvalue = record.get(key, None) |
|
721 | 726 | if evalue and rvalue and evalue != rvalue: |
|
722 | 727 | self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue) |
|
723 | 728 | elif evalue and not rvalue: |
|
724 | 729 | record[key] = evalue |
|
725 | 730 | try: |
|
726 | 731 | self.db.update_record(msg_id, record) |
|
727 | 732 | except Exception: |
|
728 | 733 | self.log.error("DB Error updating record %r", msg_id, exc_info=True) |
|
729 | 734 | except KeyError: |
|
730 | 735 | try: |
|
731 | 736 | self.db.add_record(msg_id, record) |
|
732 | 737 | except Exception: |
|
733 | 738 | self.log.error("DB Error adding record %r", msg_id, exc_info=True) |
|
734 | 739 | except Exception: |
|
735 | 740 | self.log.error("DB Error saving task request %r", msg_id, exc_info=True) |
|
736 | 741 | |
|
737 | 742 | def save_task_result(self, idents, msg): |
|
738 | 743 | """save the result of a completed task.""" |
|
739 | 744 | client_id = idents[0] |
|
740 | 745 | try: |
|
741 | 746 | msg = self.session.unserialize(msg) |
|
742 | 747 | except Exception: |
|
743 | 748 | self.log.error("task::invalid task result message send to %r: %r", |
|
744 | 749 | client_id, msg, exc_info=True) |
|
745 | 750 | return |
|
746 | 751 | |
|
747 | 752 | parent = msg['parent_header'] |
|
748 | 753 | if not parent: |
|
749 | 754 | # print msg |
|
750 | 755 | self.log.warn("Task %r had no parent!", msg) |
|
751 | 756 | return |
|
752 | 757 | msg_id = parent['msg_id'] |
|
753 | 758 | if msg_id in self.unassigned: |
|
754 | 759 | self.unassigned.remove(msg_id) |
|
755 | 760 | |
|
756 | 761 | header = msg['header'] |
|
757 | 762 | md = msg['metadata'] |
|
758 | 763 | engine_uuid = md.get('engine', u'') |
|
759 | 764 | eid = self.by_ident.get(cast_bytes(engine_uuid), None) |
|
760 | 765 | |
|
761 | 766 | status = md.get('status', None) |
|
762 | 767 | |
|
763 | 768 | if msg_id in self.pending: |
|
764 | 769 | self.log.info("task::task %r finished on %s", msg_id, eid) |
|
765 | 770 | self.pending.remove(msg_id) |
|
766 | 771 | self.all_completed.add(msg_id) |
|
767 | 772 | if eid is not None: |
|
768 | 773 | if status != 'aborted': |
|
769 | 774 | self.completed[eid].append(msg_id) |
|
770 | 775 | if msg_id in self.tasks[eid]: |
|
771 | 776 | self.tasks[eid].remove(msg_id) |
|
772 | 777 | completed = header['date'] |
|
773 | 778 | started = md.get('started', None) |
|
774 | 779 | result = { |
|
775 | 780 | 'result_header' : header, |
|
776 | 781 | 'result_metadata': msg['metadata'], |
|
777 | 782 | 'result_content': msg['content'], |
|
778 | 783 | 'started' : started, |
|
779 | 784 | 'completed' : completed, |
|
780 | 785 | 'received' : datetime.now(), |
|
781 | 786 | 'engine_uuid': engine_uuid, |
|
782 | 787 | } |
|
783 | 788 | |
|
784 | 789 | result['result_buffers'] = msg['buffers'] |
|
785 | 790 | try: |
|
786 | 791 | self.db.update_record(msg_id, result) |
|
787 | 792 | except Exception: |
|
788 | 793 | self.log.error("DB Error saving task request %r", msg_id, exc_info=True) |
|
789 | 794 | |
|
790 | 795 | else: |
|
791 | 796 | self.log.debug("task::unknown task %r finished", msg_id) |
|
792 | 797 | |
|
793 | 798 | def save_task_destination(self, idents, msg): |
|
794 | 799 | try: |
|
795 | 800 | msg = self.session.unserialize(msg, content=True) |
|
796 | 801 | except Exception: |
|
797 | 802 | self.log.error("task::invalid task tracking message", exc_info=True) |
|
798 | 803 | return |
|
799 | 804 | content = msg['content'] |
|
800 | 805 | # print (content) |
|
801 | 806 | msg_id = content['msg_id'] |
|
802 | 807 | engine_uuid = content['engine_id'] |
|
803 | 808 | eid = self.by_ident[cast_bytes(engine_uuid)] |
|
804 | 809 | |
|
805 | 810 | self.log.info("task::task %r arrived on %r", msg_id, eid) |
|
806 | 811 | if msg_id in self.unassigned: |
|
807 | 812 | self.unassigned.remove(msg_id) |
|
808 | 813 | # else: |
|
809 | 814 | # self.log.debug("task::task %r not listed as MIA?!"%(msg_id)) |
|
810 | 815 | |
|
811 | 816 | self.tasks[eid].append(msg_id) |
|
812 | 817 | # self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid)) |
|
813 | 818 | try: |
|
814 | 819 | self.db.update_record(msg_id, dict(engine_uuid=engine_uuid)) |
|
815 | 820 | except Exception: |
|
816 | 821 | self.log.error("DB Error saving task destination %r", msg_id, exc_info=True) |
|
817 | 822 | |
|
818 | 823 | |
|
819 | 824 | def mia_task_request(self, idents, msg): |
|
820 | 825 | raise NotImplementedError |
|
821 | 826 | client_id = idents[0] |
|
822 | 827 | # content = dict(mia=self.mia,status='ok') |
|
823 | 828 | # self.session.send('mia_reply', content=content, idents=client_id) |
|
824 | 829 | |
|
825 | 830 | |
|
826 | 831 | #--------------------- IOPub Traffic ------------------------------ |
|
827 | 832 | |
|
828 | 833 | def save_iopub_message(self, topics, msg): |
|
829 | 834 | """save an iopub message into the db""" |
|
830 | 835 | # print (topics) |
|
831 | 836 | try: |
|
832 | 837 | msg = self.session.unserialize(msg, content=True) |
|
833 | 838 | except Exception: |
|
834 | 839 | self.log.error("iopub::invalid IOPub message", exc_info=True) |
|
835 | 840 | return |
|
836 | 841 | |
|
837 | 842 | parent = msg['parent_header'] |
|
838 | 843 | if not parent: |
|
839 | 844 | self.log.warn("iopub::IOPub message lacks parent: %r", msg) |
|
840 | 845 | return |
|
841 | 846 | msg_id = parent['msg_id'] |
|
842 | 847 | msg_type = msg['header']['msg_type'] |
|
843 | 848 | content = msg['content'] |
|
844 | 849 | |
|
845 | 850 | # ensure msg_id is in db |
|
846 | 851 | try: |
|
847 | 852 | rec = self.db.get_record(msg_id) |
|
848 | 853 | except KeyError: |
|
849 | 854 | rec = empty_record() |
|
850 | 855 | rec['msg_id'] = msg_id |
|
851 | 856 | self.db.add_record(msg_id, rec) |
|
852 | 857 | # stream |
|
853 | 858 | d = {} |
|
854 | 859 | if msg_type == 'stream': |
|
855 | 860 | name = content['name'] |
|
856 | 861 | s = rec[name] or '' |
|
857 | 862 | d[name] = s + content['data'] |
|
858 | 863 | |
|
859 | 864 | elif msg_type == 'pyerr': |
|
860 | 865 | d['pyerr'] = content |
|
861 | 866 | elif msg_type == 'pyin': |
|
862 | 867 | d['pyin'] = content['code'] |
|
863 | 868 | elif msg_type in ('display_data', 'pyout'): |
|
864 | 869 | d[msg_type] = content |
|
865 | 870 | elif msg_type == 'status': |
|
866 | 871 | pass |
|
867 | 872 | elif msg_type == 'data_pub': |
|
868 | 873 | self.log.info("ignored data_pub message for %s" % msg_id) |
|
869 | 874 | else: |
|
870 | 875 | self.log.warn("unhandled iopub msg_type: %r", msg_type) |
|
871 | 876 | |
|
872 | 877 | if not d: |
|
873 | 878 | return |
|
874 | 879 | |
|
875 | 880 | try: |
|
876 | 881 | self.db.update_record(msg_id, d) |
|
877 | 882 | except Exception: |
|
878 | 883 | self.log.error("DB Error saving iopub message %r", msg_id, exc_info=True) |
|
879 | 884 | |
|
880 | 885 | |
|
881 | 886 | |
|
882 | 887 | #------------------------------------------------------------------------- |
|
883 | 888 | # Registration requests |
|
884 | 889 | #------------------------------------------------------------------------- |
|
885 | 890 | |
|
886 | 891 | def connection_request(self, client_id, msg): |
|
887 | 892 | """Reply with connection addresses for clients.""" |
|
888 | 893 | self.log.info("client::client %r connected", client_id) |
|
889 | 894 | content = dict(status='ok') |
|
890 | 895 | jsonable = {} |
|
891 | 896 | for k,v in self.keytable.iteritems(): |
|
892 | 897 | if v not in self.dead_engines: |
|
893 | 898 | jsonable[str(k)] = v |
|
894 | 899 | content['engines'] = jsonable |
|
895 | 900 | self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id) |
|
896 | 901 | |
|
897 | 902 | def register_engine(self, reg, msg): |
|
898 | 903 | """Register a new engine.""" |
|
899 | 904 | content = msg['content'] |
|
900 | 905 | try: |
|
901 | 906 | uuid = content['uuid'] |
|
902 | 907 | except KeyError: |
|
903 | 908 | self.log.error("registration::queue not specified", exc_info=True) |
|
904 | 909 | return |
|
905 | 910 | |
|
906 | 911 | eid = self._next_id |
|
907 | 912 | |
|
908 | 913 | self.log.debug("registration::register_engine(%i, %r)", eid, uuid) |
|
909 | 914 | |
|
910 | 915 | content = dict(id=eid,status='ok',hb_period=self.heartmonitor.period) |
|
911 | 916 | # check if requesting available IDs: |
|
912 | 917 | if cast_bytes(uuid) in self.by_ident: |
|
913 | 918 | try: |
|
914 | 919 | raise KeyError("uuid %r in use" % uuid) |
|
915 | 920 | except: |
|
916 | 921 | content = error.wrap_exception() |
|
917 | 922 | self.log.error("uuid %r in use", uuid, exc_info=True) |
|
918 | 923 | else: |
|
919 | 924 | for h, ec in self.incoming_registrations.iteritems(): |
|
920 | 925 | if uuid == h: |
|
921 | 926 | try: |
|
922 | 927 | raise KeyError("heart_id %r in use" % uuid) |
|
923 | 928 | except: |
|
924 | 929 | self.log.error("heart_id %r in use", uuid, exc_info=True) |
|
925 | 930 | content = error.wrap_exception() |
|
926 | 931 | break |
|
927 | 932 | elif uuid == ec.uuid: |
|
928 | 933 | try: |
|
929 | 934 | raise KeyError("uuid %r in use" % uuid) |
|
930 | 935 | except: |
|
931 | 936 | self.log.error("uuid %r in use", uuid, exc_info=True) |
|
932 | 937 | content = error.wrap_exception() |
|
933 | 938 | break |
|
934 | 939 | |
|
935 | 940 | msg = self.session.send(self.query, "registration_reply", |
|
936 | 941 | content=content, |
|
937 | 942 | ident=reg) |
|
938 | 943 | |
|
939 | 944 | heart = cast_bytes(uuid) |
|
940 | 945 | |
|
941 | 946 | if content['status'] == 'ok': |
|
942 | 947 | if heart in self.heartmonitor.hearts: |
|
943 | 948 | # already beating |
|
944 | 949 | self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid) |
|
945 | 950 | self.finish_registration(heart) |
|
946 | 951 | else: |
|
947 | 952 | purge = lambda : self._purge_stalled_registration(heart) |
|
948 | 953 | dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop) |
|
949 | 954 | dc.start() |
|
950 | 955 | self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid,stallback=dc) |
|
951 | 956 | else: |
|
952 | 957 | self.log.error("registration::registration %i failed: %r", eid, content['evalue']) |
|
953 | 958 | |
|
954 | 959 | return eid |
|
955 | 960 | |
|
956 | 961 | def unregister_engine(self, ident, msg): |
|
957 | 962 | """Unregister an engine that explicitly requested to leave.""" |
|
958 | 963 | try: |
|
959 | 964 | eid = msg['content']['id'] |
|
960 | 965 | except: |
|
961 | 966 | self.log.error("registration::bad engine id for unregistration: %r", ident, exc_info=True) |
|
962 | 967 | return |
|
963 | 968 | self.log.info("registration::unregister_engine(%r)", eid) |
|
964 | 969 | # print (eid) |
|
965 | 970 | uuid = self.keytable[eid] |
|
966 | 971 | content=dict(id=eid, uuid=uuid) |
|
967 | 972 | self.dead_engines.add(uuid) |
|
968 | 973 | # self.ids.remove(eid) |
|
969 | 974 | # uuid = self.keytable.pop(eid) |
|
970 | 975 | # |
|
971 | 976 | # ec = self.engines.pop(eid) |
|
972 | 977 | # self.hearts.pop(ec.heartbeat) |
|
973 | 978 | # self.by_ident.pop(ec.queue) |
|
974 | 979 | # self.completed.pop(eid) |
|
975 | 980 | handleit = lambda : self._handle_stranded_msgs(eid, uuid) |
|
976 | 981 | dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop) |
|
977 | 982 | dc.start() |
|
978 | 983 | ############## TODO: HANDLE IT ################ |
|
979 | 984 | |
|
980 | 985 | self._save_engine_state() |
|
981 | 986 | |
|
982 | 987 | if self.notifier: |
|
983 | 988 | self.session.send(self.notifier, "unregistration_notification", content=content) |
|
984 | 989 | |
|
985 | 990 | def _handle_stranded_msgs(self, eid, uuid): |
|
986 | 991 | """Handle messages known to be on an engine when the engine unregisters. |
|
987 | 992 | |
|
988 | 993 | It is possible that this will fire prematurely - that is, an engine will |
|
989 | 994 | go down after completing a result, and the client will be notified |
|
990 | 995 | that the result failed and later receive the actual result. |
|
991 | 996 | """ |
|
992 | 997 | |
|
993 | 998 | outstanding = self.queues[eid] |
|
994 | 999 | |
|
995 | 1000 | for msg_id in outstanding: |
|
996 | 1001 | self.pending.remove(msg_id) |
|
997 | 1002 | self.all_completed.add(msg_id) |
|
998 | 1003 | try: |
|
999 | 1004 | raise error.EngineError("Engine %r died while running task %r" % (eid, msg_id)) |
|
1000 | 1005 | except: |
|
1001 | 1006 | content = error.wrap_exception() |
|
1002 | 1007 | # build a fake header: |
|
1003 | 1008 | header = {} |
|
1004 | 1009 | header['engine'] = uuid |
|
1005 | 1010 | header['date'] = datetime.now() |
|
1006 | 1011 | rec = dict(result_content=content, result_header=header, result_buffers=[]) |
|
1007 | 1012 | rec['completed'] = header['date'] |
|
1008 | 1013 | rec['engine_uuid'] = uuid |
|
1009 | 1014 | try: |
|
1010 | 1015 | self.db.update_record(msg_id, rec) |
|
1011 | 1016 | except Exception: |
|
1012 | 1017 | self.log.error("DB Error handling stranded msg %r", msg_id, exc_info=True) |
|
1013 | 1018 | |
|
1014 | 1019 | |
|
1015 | 1020 | def finish_registration(self, heart): |
|
1016 | 1021 | """Second half of engine registration, called after our HeartMonitor |
|
1017 | 1022 | has received a beat from the Engine's Heart.""" |
|
1018 | 1023 | try: |
|
1019 | 1024 | ec = self.incoming_registrations.pop(heart) |
|
1020 | 1025 | except KeyError: |
|
1021 | 1026 | self.log.error("registration::tried to finish nonexistant registration", exc_info=True) |
|
1022 | 1027 | return |
|
1023 | 1028 | self.log.info("registration::finished registering engine %i:%s", ec.id, ec.uuid) |
|
1024 | 1029 | if ec.stallback is not None: |
|
1025 | 1030 | ec.stallback.stop() |
|
1026 | 1031 | eid = ec.id |
|
1027 | 1032 | self.ids.add(eid) |
|
1028 | 1033 | self.keytable[eid] = ec.uuid |
|
1029 | 1034 | self.engines[eid] = ec |
|
1030 | 1035 | self.by_ident[cast_bytes(ec.uuid)] = ec.id |
|
1031 | 1036 | self.queues[eid] = list() |
|
1032 | 1037 | self.tasks[eid] = list() |
|
1033 | 1038 | self.completed[eid] = list() |
|
1034 | 1039 | self.hearts[heart] = eid |
|
1035 | 1040 | content = dict(id=eid, uuid=self.engines[eid].uuid) |
|
1036 | 1041 | if self.notifier: |
|
1037 | 1042 | self.session.send(self.notifier, "registration_notification", content=content) |
|
1038 | 1043 | self.log.info("engine::Engine Connected: %i", eid) |
|
1039 | 1044 | |
|
1040 | 1045 | self._save_engine_state() |
|
1041 | 1046 | |
|
1042 | 1047 | def _purge_stalled_registration(self, heart): |
|
1043 | 1048 | if heart in self.incoming_registrations: |
|
1044 | 1049 | ec = self.incoming_registrations.pop(heart) |
|
1045 | 1050 | self.log.info("registration::purging stalled registration: %i", ec.id) |
|
1046 | 1051 | else: |
|
1047 | 1052 | pass |
|
1048 | 1053 | |
|
1049 | 1054 | #------------------------------------------------------------------------- |
|
1050 | 1055 | # Engine State |
|
1051 | 1056 | #------------------------------------------------------------------------- |
|
1052 | 1057 | |
|
1053 | 1058 | |
|
1054 | 1059 | def _cleanup_engine_state_file(self): |
|
1055 | 1060 | """cleanup engine state mapping""" |
|
1056 | 1061 | |
|
1057 | 1062 | if os.path.exists(self.engine_state_file): |
|
1058 | 1063 | self.log.debug("cleaning up engine state: %s", self.engine_state_file) |
|
1059 | 1064 | try: |
|
1060 | 1065 | os.remove(self.engine_state_file) |
|
1061 | 1066 | except IOError: |
|
1062 | 1067 | self.log.error("Couldn't cleanup file: %s", self.engine_state_file, exc_info=True) |
|
1063 | 1068 | |
|
1064 | 1069 | |
|
1065 | 1070 | def _save_engine_state(self): |
|
1066 | 1071 | """save engine mapping to JSON file""" |
|
1067 | 1072 | if not self.engine_state_file: |
|
1068 | 1073 | return |
|
1069 | 1074 | self.log.debug("save engine state to %s" % self.engine_state_file) |
|
1070 | 1075 | state = {} |
|
1071 | 1076 | engines = {} |
|
1072 | 1077 | for eid, ec in self.engines.iteritems(): |
|
1073 | 1078 | if ec.uuid not in self.dead_engines: |
|
1074 | 1079 | engines[eid] = ec.uuid |
|
1075 | 1080 | |
|
1076 | 1081 | state['engines'] = engines |
|
1077 | 1082 | |
|
1078 | 1083 | state['next_id'] = self._idcounter |
|
1079 | 1084 | |
|
1080 | 1085 | with open(self.engine_state_file, 'w') as f: |
|
1081 | 1086 | json.dump(state, f) |
|
1082 | 1087 | |
|
1083 | 1088 | |
|
1084 | 1089 | def _load_engine_state(self): |
|
1085 | 1090 | """load engine mapping from JSON file""" |
|
1086 | 1091 | if not os.path.exists(self.engine_state_file): |
|
1087 | 1092 | return |
|
1088 | 1093 | |
|
1089 | 1094 | self.log.info("loading engine state from %s" % self.engine_state_file) |
|
1090 | 1095 | |
|
1091 | 1096 | with open(self.engine_state_file) as f: |
|
1092 | 1097 | state = json.load(f) |
|
1093 | 1098 | |
|
1094 | 1099 | save_notifier = self.notifier |
|
1095 | 1100 | self.notifier = None |
|
1096 | 1101 | for eid, uuid in state['engines'].iteritems(): |
|
1097 | 1102 | heart = uuid.encode('ascii') |
|
1098 | 1103 | # start with this heart as current and beating: |
|
1099 | 1104 | self.heartmonitor.responses.add(heart) |
|
1100 | 1105 | self.heartmonitor.hearts.add(heart) |
|
1101 | 1106 | |
|
1102 | 1107 | self.incoming_registrations[heart] = EngineConnector(id=int(eid), uuid=uuid) |
|
1103 | 1108 | self.finish_registration(heart) |
|
1104 | 1109 | |
|
1105 | 1110 | self.notifier = save_notifier |
|
1106 | 1111 | |
|
1107 | 1112 | self._idcounter = state['next_id'] |
|
1108 | 1113 | |
|
1109 | 1114 | #------------------------------------------------------------------------- |
|
1110 | 1115 | # Client Requests |
|
1111 | 1116 | #------------------------------------------------------------------------- |
|
1112 | 1117 | |
|
1113 | 1118 | def shutdown_request(self, client_id, msg): |
|
1114 | 1119 | """handle shutdown request.""" |
|
1115 | 1120 | self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id) |
|
1116 | 1121 | # also notify other clients of shutdown |
|
1117 | 1122 | self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'}) |
|
1118 | 1123 | dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop) |
|
1119 | 1124 | dc.start() |
|
1120 | 1125 | |
|
1121 | 1126 | def _shutdown(self): |
|
1122 | 1127 | self.log.info("hub::hub shutting down.") |
|
1123 | 1128 | time.sleep(0.1) |
|
1124 | 1129 | sys.exit(0) |
|
1125 | 1130 | |
|
1126 | 1131 | |
|
1127 | 1132 | def check_load(self, client_id, msg): |
|
1128 | 1133 | content = msg['content'] |
|
1129 | 1134 | try: |
|
1130 | 1135 | targets = content['targets'] |
|
1131 | 1136 | targets = self._validate_targets(targets) |
|
1132 | 1137 | except: |
|
1133 | 1138 | content = error.wrap_exception() |
|
1134 | 1139 | self.session.send(self.query, "hub_error", |
|
1135 | 1140 | content=content, ident=client_id) |
|
1136 | 1141 | return |
|
1137 | 1142 | |
|
1138 | 1143 | content = dict(status='ok') |
|
1139 | 1144 | # loads = {} |
|
1140 | 1145 | for t in targets: |
|
1141 | 1146 | content[bytes(t)] = len(self.queues[t])+len(self.tasks[t]) |
|
1142 | 1147 | self.session.send(self.query, "load_reply", content=content, ident=client_id) |
|
1143 | 1148 | |
|
1144 | 1149 | |
|
1145 | 1150 | def queue_status(self, client_id, msg): |
|
1146 | 1151 | """Return the Queue status of one or more targets. |
|
1147 | 1152 | if verbose: return the msg_ids |
|
1148 | 1153 | else: return len of each type. |
|
1149 | 1154 | keys: queue (pending MUX jobs) |
|
1150 | 1155 | tasks (pending Task jobs) |
|
1151 | 1156 | completed (finished jobs from both queues)""" |
|
1152 | 1157 | content = msg['content'] |
|
1153 | 1158 | targets = content['targets'] |
|
1154 | 1159 | try: |
|
1155 | 1160 | targets = self._validate_targets(targets) |
|
1156 | 1161 | except: |
|
1157 | 1162 | content = error.wrap_exception() |
|
1158 | 1163 | self.session.send(self.query, "hub_error", |
|
1159 | 1164 | content=content, ident=client_id) |
|
1160 | 1165 | return |
|
1161 | 1166 | verbose = content.get('verbose', False) |
|
1162 | 1167 | content = dict(status='ok') |
|
1163 | 1168 | for t in targets: |
|
1164 | 1169 | queue = self.queues[t] |
|
1165 | 1170 | completed = self.completed[t] |
|
1166 | 1171 | tasks = self.tasks[t] |
|
1167 | 1172 | if not verbose: |
|
1168 | 1173 | queue = len(queue) |
|
1169 | 1174 | completed = len(completed) |
|
1170 | 1175 | tasks = len(tasks) |
|
1171 | 1176 | content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks} |
|
1172 | 1177 | content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned) |
|
1173 | 1178 | # print (content) |
|
1174 | 1179 | self.session.send(self.query, "queue_reply", content=content, ident=client_id) |
|
1175 | 1180 | |
|
1176 | 1181 | def purge_results(self, client_id, msg): |
|
1177 | 1182 | """Purge results from memory. This method is more valuable before we move |
|
1178 | 1183 | to a DB based message storage mechanism.""" |
|
1179 | 1184 | content = msg['content'] |
|
1180 | 1185 | self.log.info("Dropping records with %s", content) |
|
1181 | 1186 | msg_ids = content.get('msg_ids', []) |
|
1182 | 1187 | reply = dict(status='ok') |
|
1183 | 1188 | if msg_ids == 'all': |
|
1184 | 1189 | try: |
|
1185 | 1190 | self.db.drop_matching_records(dict(completed={'$ne':None})) |
|
1186 | 1191 | except Exception: |
|
1187 | 1192 | reply = error.wrap_exception() |
|
1188 | 1193 | else: |
|
1189 | 1194 | pending = filter(lambda m: m in self.pending, msg_ids) |
|
1190 | 1195 | if pending: |
|
1191 | 1196 | try: |
|
1192 | 1197 | raise IndexError("msg pending: %r" % pending[0]) |
|
1193 | 1198 | except: |
|
1194 | 1199 | reply = error.wrap_exception() |
|
1195 | 1200 | else: |
|
1196 | 1201 | try: |
|
1197 | 1202 | self.db.drop_matching_records(dict(msg_id={'$in':msg_ids})) |
|
1198 | 1203 | except Exception: |
|
1199 | 1204 | reply = error.wrap_exception() |
|
1200 | 1205 | |
|
1201 | 1206 | if reply['status'] == 'ok': |
|
1202 | 1207 | eids = content.get('engine_ids', []) |
|
1203 | 1208 | for eid in eids: |
|
1204 | 1209 | if eid not in self.engines: |
|
1205 | 1210 | try: |
|
1206 | 1211 | raise IndexError("No such engine: %i" % eid) |
|
1207 | 1212 | except: |
|
1208 | 1213 | reply = error.wrap_exception() |
|
1209 | 1214 | break |
|
1210 | 1215 | uid = self.engines[eid].uuid |
|
1211 | 1216 | try: |
|
1212 | 1217 | self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None})) |
|
1213 | 1218 | except Exception: |
|
1214 | 1219 | reply = error.wrap_exception() |
|
1215 | 1220 | break |
|
1216 | 1221 | |
|
1217 | 1222 | self.session.send(self.query, 'purge_reply', content=reply, ident=client_id) |
|
1218 | 1223 | |
|
1219 | 1224 | def resubmit_task(self, client_id, msg): |
|
1220 | 1225 | """Resubmit one or more tasks.""" |
|
1221 | 1226 | def finish(reply): |
|
1222 | 1227 | self.session.send(self.query, 'resubmit_reply', content=reply, ident=client_id) |
|
1223 | 1228 | |
|
1224 | 1229 | content = msg['content'] |
|
1225 | 1230 | msg_ids = content['msg_ids'] |
|
1226 | 1231 | reply = dict(status='ok') |
|
1227 | 1232 | try: |
|
1228 | 1233 | records = self.db.find_records({'msg_id' : {'$in' : msg_ids}}, keys=[ |
|
1229 | 1234 | 'header', 'content', 'buffers']) |
|
1230 | 1235 | except Exception: |
|
1231 | 1236 | self.log.error('db::db error finding tasks to resubmit', exc_info=True) |
|
1232 | 1237 | return finish(error.wrap_exception()) |
|
1233 | 1238 | |
|
1234 | 1239 | # validate msg_ids |
|
1235 | 1240 | found_ids = [ rec['msg_id'] for rec in records ] |
|
1236 | 1241 | pending_ids = [ msg_id for msg_id in found_ids if msg_id in self.pending ] |
|
1237 | 1242 | if len(records) > len(msg_ids): |
|
1238 | 1243 | try: |
|
1239 | 1244 | raise RuntimeError("DB appears to be in an inconsistent state." |
|
1240 | 1245 | "More matching records were found than should exist") |
|
1241 | 1246 | except Exception: |
|
1242 | 1247 | return finish(error.wrap_exception()) |
|
1243 | 1248 | elif len(records) < len(msg_ids): |
|
1244 | 1249 | missing = [ m for m in msg_ids if m not in found_ids ] |
|
1245 | 1250 | try: |
|
1246 | 1251 | raise KeyError("No such msg(s): %r" % missing) |
|
1247 | 1252 | except KeyError: |
|
1248 | 1253 | return finish(error.wrap_exception()) |
|
1249 | 1254 | elif pending_ids: |
|
1250 | 1255 | pass |
|
1251 | 1256 | # no need to raise on resubmit of pending task, now that we |
|
1252 | 1257 | # resubmit under new ID, but do we want to raise anyway? |
|
1253 | 1258 | # msg_id = invalid_ids[0] |
|
1254 | 1259 | # try: |
|
1255 | 1260 | # raise ValueError("Task(s) %r appears to be inflight" % ) |
|
1256 | 1261 | # except Exception: |
|
1257 | 1262 | # return finish(error.wrap_exception()) |
|
1258 | 1263 | |
|
1259 | 1264 | # mapping of original IDs to resubmitted IDs |
|
1260 | 1265 | resubmitted = {} |
|
1261 | 1266 | |
|
1262 | 1267 | # send the messages |
|
1263 | 1268 | for rec in records: |
|
1264 | 1269 | header = rec['header'] |
|
1265 | 1270 | msg = self.session.msg(header['msg_type'], parent=header) |
|
1266 | 1271 | msg_id = msg['msg_id'] |
|
1267 | 1272 | msg['content'] = rec['content'] |
|
1268 | 1273 | |
|
1269 | 1274 | # use the old header, but update msg_id and timestamp |
|
1270 | 1275 | fresh = msg['header'] |
|
1271 | 1276 | header['msg_id'] = fresh['msg_id'] |
|
1272 | 1277 | header['date'] = fresh['date'] |
|
1273 | 1278 | msg['header'] = header |
|
1274 | 1279 | |
|
1275 | 1280 | self.session.send(self.resubmit, msg, buffers=rec['buffers']) |
|
1276 | 1281 | |
|
1277 | 1282 | resubmitted[rec['msg_id']] = msg_id |
|
1278 | 1283 | self.pending.add(msg_id) |
|
1279 | 1284 | msg['buffers'] = rec['buffers'] |
|
1280 | 1285 | try: |
|
1281 | 1286 | self.db.add_record(msg_id, init_record(msg)) |
|
1282 | 1287 | except Exception: |
|
1283 | 1288 | self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True) |
|
1284 | 1289 | return finish(error.wrap_exception()) |
|
1285 | 1290 | |
|
1286 | 1291 | finish(dict(status='ok', resubmitted=resubmitted)) |
|
1287 | 1292 | |
|
1288 | 1293 | # store the new IDs in the Task DB |
|
1289 | 1294 | for msg_id, resubmit_id in resubmitted.iteritems(): |
|
1290 | 1295 | try: |
|
1291 | 1296 | self.db.update_record(msg_id, {'resubmitted' : resubmit_id}) |
|
1292 | 1297 | except Exception: |
|
1293 | 1298 | self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True) |
|
1294 | 1299 | |
|
1295 | 1300 | |
|
1296 | 1301 | def _extract_record(self, rec): |
|
1297 | 1302 | """decompose a TaskRecord dict into subsection of reply for get_result""" |
|
1298 | 1303 | io_dict = {} |
|
1299 | 1304 | for key in ('pyin', 'pyout', 'pyerr', 'stdout', 'stderr'): |
|
1300 | 1305 | io_dict[key] = rec[key] |
|
1301 | 1306 | content = { |
|
1302 | 1307 | 'header': rec['header'], |
|
1303 | 1308 | 'metadata': rec['metadata'], |
|
1304 | 1309 | 'result_metadata': rec['result_metadata'], |
|
1305 | 1310 | 'result_header' : rec['result_header'], |
|
1306 | 1311 | 'result_content': rec['result_content'], |
|
1307 | 1312 | 'received' : rec['received'], |
|
1308 | 1313 | 'io' : io_dict, |
|
1309 | 1314 | } |
|
1310 | 1315 | if rec['result_buffers']: |
|
1311 | 1316 | buffers = map(bytes, rec['result_buffers']) |
|
1312 | 1317 | else: |
|
1313 | 1318 | buffers = [] |
|
1314 | 1319 | |
|
1315 | 1320 | return content, buffers |
|
1316 | 1321 | |
|
1317 | 1322 | def get_results(self, client_id, msg): |
|
1318 | 1323 | """Get the result of 1 or more messages.""" |
|
1319 | 1324 | content = msg['content'] |
|
1320 | 1325 | msg_ids = sorted(set(content['msg_ids'])) |
|
1321 | 1326 | statusonly = content.get('status_only', False) |
|
1322 | 1327 | pending = [] |
|
1323 | 1328 | completed = [] |
|
1324 | 1329 | content = dict(status='ok') |
|
1325 | 1330 | content['pending'] = pending |
|
1326 | 1331 | content['completed'] = completed |
|
1327 | 1332 | buffers = [] |
|
1328 | 1333 | if not statusonly: |
|
1329 | 1334 | try: |
|
1330 | 1335 | matches = self.db.find_records(dict(msg_id={'$in':msg_ids})) |
|
1331 | 1336 | # turn match list into dict, for faster lookup |
|
1332 | 1337 | records = {} |
|
1333 | 1338 | for rec in matches: |
|
1334 | 1339 | records[rec['msg_id']] = rec |
|
1335 | 1340 | except Exception: |
|
1336 | 1341 | content = error.wrap_exception() |
|
1337 | 1342 | self.session.send(self.query, "result_reply", content=content, |
|
1338 | 1343 | parent=msg, ident=client_id) |
|
1339 | 1344 | return |
|
1340 | 1345 | else: |
|
1341 | 1346 | records = {} |
|
1342 | 1347 | for msg_id in msg_ids: |
|
1343 | 1348 | if msg_id in self.pending: |
|
1344 | 1349 | pending.append(msg_id) |
|
1345 | 1350 | elif msg_id in self.all_completed: |
|
1346 | 1351 | completed.append(msg_id) |
|
1347 | 1352 | if not statusonly: |
|
1348 | 1353 | c,bufs = self._extract_record(records[msg_id]) |
|
1349 | 1354 | content[msg_id] = c |
|
1350 | 1355 | buffers.extend(bufs) |
|
1351 | 1356 | elif msg_id in records: |
|
1352 | 1357 | if rec['completed']: |
|
1353 | 1358 | completed.append(msg_id) |
|
1354 | 1359 | c,bufs = self._extract_record(records[msg_id]) |
|
1355 | 1360 | content[msg_id] = c |
|
1356 | 1361 | buffers.extend(bufs) |
|
1357 | 1362 | else: |
|
1358 | 1363 | pending.append(msg_id) |
|
1359 | 1364 | else: |
|
1360 | 1365 | try: |
|
1361 | 1366 | raise KeyError('No such message: '+msg_id) |
|
1362 | 1367 | except: |
|
1363 | 1368 | content = error.wrap_exception() |
|
1364 | 1369 | break |
|
1365 | 1370 | self.session.send(self.query, "result_reply", content=content, |
|
1366 | 1371 | parent=msg, ident=client_id, |
|
1367 | 1372 | buffers=buffers) |
|
1368 | 1373 | |
|
1369 | 1374 | def get_history(self, client_id, msg): |
|
1370 | 1375 | """Get a list of all msg_ids in our DB records""" |
|
1371 | 1376 | try: |
|
1372 | 1377 | msg_ids = self.db.get_history() |
|
1373 | 1378 | except Exception as e: |
|
1374 | 1379 | content = error.wrap_exception() |
|
1375 | 1380 | else: |
|
1376 | 1381 | content = dict(status='ok', history=msg_ids) |
|
1377 | 1382 | |
|
1378 | 1383 | self.session.send(self.query, "history_reply", content=content, |
|
1379 | 1384 | parent=msg, ident=client_id) |
|
1380 | 1385 | |
|
1381 | 1386 | def db_query(self, client_id, msg): |
|
1382 | 1387 | """Perform a raw query on the task record database.""" |
|
1383 | 1388 | content = msg['content'] |
|
1384 | 1389 | query = content.get('query', {}) |
|
1385 | 1390 | keys = content.get('keys', None) |
|
1386 | 1391 | buffers = [] |
|
1387 | 1392 | empty = list() |
|
1388 | 1393 | try: |
|
1389 | 1394 | records = self.db.find_records(query, keys) |
|
1390 | 1395 | except Exception as e: |
|
1391 | 1396 | content = error.wrap_exception() |
|
1392 | 1397 | else: |
|
1393 | 1398 | # extract buffers from reply content: |
|
1394 | 1399 | if keys is not None: |
|
1395 | 1400 | buffer_lens = [] if 'buffers' in keys else None |
|
1396 | 1401 | result_buffer_lens = [] if 'result_buffers' in keys else None |
|
1397 | 1402 | else: |
|
1398 | 1403 | buffer_lens = None |
|
1399 | 1404 | result_buffer_lens = None |
|
1400 | 1405 | |
|
1401 | 1406 | for rec in records: |
|
1402 | 1407 | # buffers may be None, so double check |
|
1403 | 1408 | b = rec.pop('buffers', empty) or empty |
|
1404 | 1409 | if buffer_lens is not None: |
|
1405 | 1410 | buffer_lens.append(len(b)) |
|
1406 | 1411 | buffers.extend(b) |
|
1407 | 1412 | rb = rec.pop('result_buffers', empty) or empty |
|
1408 | 1413 | if result_buffer_lens is not None: |
|
1409 | 1414 | result_buffer_lens.append(len(rb)) |
|
1410 | 1415 | buffers.extend(rb) |
|
1411 | 1416 | content = dict(status='ok', records=records, buffer_lens=buffer_lens, |
|
1412 | 1417 | result_buffer_lens=result_buffer_lens) |
|
1413 | 1418 | # self.log.debug (content) |
|
1414 | 1419 | self.session.send(self.query, "db_reply", content=content, |
|
1415 | 1420 | parent=msg, ident=client_id, |
|
1416 | 1421 | buffers=buffers) |
|
1417 | 1422 |
@@ -1,305 +1,305 b'' | |||
|
1 | 1 | """A simple engine that talks to a controller over 0MQ. |
|
2 | 2 | it handles registration, etc. and launches a kernel |
|
3 | 3 | connected to the Controller's Schedulers. |
|
4 | 4 | |
|
5 | 5 | Authors: |
|
6 | 6 | |
|
7 | 7 | * Min RK |
|
8 | 8 | """ |
|
9 | 9 | #----------------------------------------------------------------------------- |
|
10 | 10 | # Copyright (C) 2010-2011 The IPython Development Team |
|
11 | 11 | # |
|
12 | 12 | # Distributed under the terms of the BSD License. The full license is in |
|
13 | 13 | # the file COPYING, distributed as part of this software. |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | |
|
16 | 16 | from __future__ import print_function |
|
17 | 17 | |
|
18 | 18 | import sys |
|
19 | 19 | import time |
|
20 | 20 | from getpass import getpass |
|
21 | 21 | |
|
22 | 22 | import zmq |
|
23 | 23 | from zmq.eventloop import ioloop, zmqstream |
|
24 | 24 | |
|
25 | 25 | from IPython.external.ssh import tunnel |
|
26 | 26 | # internal |
|
27 |
from IPython.utils.localinterfaces import |
|
|
27 | from IPython.utils.localinterfaces import localhost | |
|
28 | 28 | from IPython.utils.traitlets import ( |
|
29 | 29 | Instance, Dict, Integer, Type, Float, Integer, Unicode, CBytes, Bool |
|
30 | 30 | ) |
|
31 | 31 | from IPython.utils.py3compat import cast_bytes |
|
32 | 32 | |
|
33 | 33 | from IPython.parallel.controller.heartmonitor import Heart |
|
34 | 34 | from IPython.parallel.factory import RegistrationFactory |
|
35 | 35 | from IPython.parallel.util import disambiguate_url |
|
36 | 36 | |
|
37 | 37 | from IPython.kernel.zmq.session import Message |
|
38 | 38 | from IPython.kernel.zmq.ipkernel import Kernel |
|
39 | 39 | from IPython.kernel.zmq.kernelapp import IPKernelApp |
|
40 | 40 | |
|
41 | 41 | class EngineFactory(RegistrationFactory): |
|
42 | 42 | """IPython engine""" |
|
43 | 43 | |
|
44 | 44 | # configurables: |
|
45 | 45 | out_stream_factory=Type('IPython.kernel.zmq.iostream.OutStream', config=True, |
|
46 | 46 | help="""The OutStream for handling stdout/err. |
|
47 | 47 | Typically 'IPython.kernel.zmq.iostream.OutStream'""") |
|
48 | 48 | display_hook_factory=Type('IPython.kernel.zmq.displayhook.ZMQDisplayHook', config=True, |
|
49 | 49 | help="""The class for handling displayhook. |
|
50 | 50 | Typically 'IPython.kernel.zmq.displayhook.ZMQDisplayHook'""") |
|
51 | 51 | location=Unicode(config=True, |
|
52 | 52 | help="""The location (an IP address) of the controller. This is |
|
53 | 53 | used for disambiguating URLs, to determine whether |
|
54 | 54 | loopback should be used to connect or the public address.""") |
|
55 | 55 | timeout=Float(5.0, config=True, |
|
56 | 56 | help="""The time (in seconds) to wait for the Controller to respond |
|
57 | 57 | to registration requests before giving up.""") |
|
58 | 58 | max_heartbeat_misses=Integer(50, config=True, |
|
59 | 59 | help="""The maximum number of times a check for the heartbeat ping of a |
|
60 | 60 | controller can be missed before shutting down the engine. |
|
61 | 61 | |
|
62 | 62 | If set to 0, the check is disabled.""") |
|
63 | 63 | sshserver=Unicode(config=True, |
|
64 | 64 | help="""The SSH server to use for tunneling connections to the Controller.""") |
|
65 | 65 | sshkey=Unicode(config=True, |
|
66 | 66 | help="""The SSH private key file to use when tunneling connections to the Controller.""") |
|
67 | 67 | paramiko=Bool(sys.platform == 'win32', config=True, |
|
68 | 68 | help="""Whether to use paramiko instead of openssh for tunnels.""") |
|
69 | 69 | |
|
70 | 70 | |
|
71 | 71 | # not configurable: |
|
72 | 72 | connection_info = Dict() |
|
73 | 73 | user_ns = Dict() |
|
74 | 74 | id = Integer(allow_none=True) |
|
75 | 75 | registrar = Instance('zmq.eventloop.zmqstream.ZMQStream') |
|
76 | 76 | kernel = Instance(Kernel) |
|
77 | 77 | hb_check_period=Integer() |
|
78 | 78 | |
|
79 | 79 | # States for the heartbeat monitoring |
|
80 | 80 | # Initial values for monitored and pinged must satisfy "monitored > pinged == False" so that |
|
81 | 81 | # during the first check no "missed" ping is reported. Must be floats for Python 3 compatibility. |
|
82 | 82 | _hb_last_pinged = 0.0 |
|
83 | 83 | _hb_last_monitored = 0.0 |
|
84 | 84 | _hb_missed_beats = 0 |
|
85 | 85 | # The zmq Stream which receives the pings from the Heart |
|
86 | 86 | _hb_listener = None |
|
87 | 87 | |
|
88 | 88 | bident = CBytes() |
|
89 | 89 | ident = Unicode() |
|
90 | 90 | def _ident_changed(self, name, old, new): |
|
91 | 91 | self.bident = cast_bytes(new) |
|
92 | 92 | using_ssh=Bool(False) |
|
93 | 93 | |
|
94 | 94 | |
|
95 | 95 | def __init__(self, **kwargs): |
|
96 | 96 | super(EngineFactory, self).__init__(**kwargs) |
|
97 | 97 | self.ident = self.session.session |
|
98 | 98 | |
|
99 | 99 | def init_connector(self): |
|
100 | 100 | """construct connection function, which handles tunnels.""" |
|
101 | 101 | self.using_ssh = bool(self.sshkey or self.sshserver) |
|
102 | 102 | |
|
103 | 103 | if self.sshkey and not self.sshserver: |
|
104 | 104 | # We are using ssh directly to the controller, tunneling localhost to localhost |
|
105 | 105 | self.sshserver = self.url.split('://')[1].split(':')[0] |
|
106 | 106 | |
|
107 | 107 | if self.using_ssh: |
|
108 | 108 | if tunnel.try_passwordless_ssh(self.sshserver, self.sshkey, self.paramiko): |
|
109 | 109 | password=False |
|
110 | 110 | else: |
|
111 | 111 | password = getpass("SSH Password for %s: "%self.sshserver) |
|
112 | 112 | else: |
|
113 | 113 | password = False |
|
114 | 114 | |
|
115 | 115 | def connect(s, url): |
|
116 | 116 | url = disambiguate_url(url, self.location) |
|
117 | 117 | if self.using_ssh: |
|
118 | 118 | self.log.debug("Tunneling connection to %s via %s", url, self.sshserver) |
|
119 | 119 | return tunnel.tunnel_connection(s, url, self.sshserver, |
|
120 | 120 | keyfile=self.sshkey, paramiko=self.paramiko, |
|
121 | 121 | password=password, |
|
122 | 122 | ) |
|
123 | 123 | else: |
|
124 | 124 | return s.connect(url) |
|
125 | 125 | |
|
126 | 126 | def maybe_tunnel(url): |
|
127 | 127 | """like connect, but don't complete the connection (for use by heartbeat)""" |
|
128 | 128 | url = disambiguate_url(url, self.location) |
|
129 | 129 | if self.using_ssh: |
|
130 | 130 | self.log.debug("Tunneling connection to %s via %s", url, self.sshserver) |
|
131 | 131 | url,tunnelobj = tunnel.open_tunnel(url, self.sshserver, |
|
132 | 132 | keyfile=self.sshkey, paramiko=self.paramiko, |
|
133 | 133 | password=password, |
|
134 | 134 | ) |
|
135 | 135 | return str(url) |
|
136 | 136 | return connect, maybe_tunnel |
|
137 | 137 | |
|
138 | 138 | def register(self): |
|
139 | 139 | """send the registration_request""" |
|
140 | 140 | |
|
141 | 141 | self.log.info("Registering with controller at %s"%self.url) |
|
142 | 142 | ctx = self.context |
|
143 | 143 | connect,maybe_tunnel = self.init_connector() |
|
144 | 144 | reg = ctx.socket(zmq.DEALER) |
|
145 | 145 | reg.setsockopt(zmq.IDENTITY, self.bident) |
|
146 | 146 | connect(reg, self.url) |
|
147 | 147 | self.registrar = zmqstream.ZMQStream(reg, self.loop) |
|
148 | 148 | |
|
149 | 149 | |
|
150 | 150 | content = dict(uuid=self.ident) |
|
151 | 151 | self.registrar.on_recv(lambda msg: self.complete_registration(msg, connect, maybe_tunnel)) |
|
152 | 152 | # print (self.session.key) |
|
153 | 153 | self.session.send(self.registrar, "registration_request", content=content) |
|
154 | 154 | |
|
155 | 155 | def _report_ping(self, msg): |
|
156 | 156 | """Callback for when the heartmonitor.Heart receives a ping""" |
|
157 | 157 | #self.log.debug("Received a ping: %s", msg) |
|
158 | 158 | self._hb_last_pinged = time.time() |
|
159 | 159 | |
|
160 | 160 | def complete_registration(self, msg, connect, maybe_tunnel): |
|
161 | 161 | # print msg |
|
162 | 162 | self._abort_dc.stop() |
|
163 | 163 | ctx = self.context |
|
164 | 164 | loop = self.loop |
|
165 | 165 | identity = self.bident |
|
166 | 166 | idents,msg = self.session.feed_identities(msg) |
|
167 | 167 | msg = self.session.unserialize(msg) |
|
168 | 168 | content = msg['content'] |
|
169 | 169 | info = self.connection_info |
|
170 | 170 | |
|
171 | 171 | def url(key): |
|
172 | 172 | """get zmq url for given channel""" |
|
173 | 173 | return str(info["interface"] + ":%i" % info[key]) |
|
174 | 174 | |
|
175 | 175 | if content['status'] == 'ok': |
|
176 | 176 | self.id = int(content['id']) |
|
177 | 177 | |
|
178 | 178 | # launch heartbeat |
|
179 | 179 | # possibly forward hb ports with tunnels |
|
180 | 180 | hb_ping = maybe_tunnel(url('hb_ping')) |
|
181 | 181 | hb_pong = maybe_tunnel(url('hb_pong')) |
|
182 | 182 | |
|
183 | 183 | hb_monitor = None |
|
184 | 184 | if self.max_heartbeat_misses > 0: |
|
185 | 185 | # Add a monitor socket which will record the last time a ping was seen |
|
186 | 186 | mon = self.context.socket(zmq.SUB) |
|
187 |
mport = mon.bind_to_random_port('tcp://%s' % |
|
|
187 | mport = mon.bind_to_random_port('tcp://%s' % localhost()) | |
|
188 | 188 | mon.setsockopt(zmq.SUBSCRIBE, b"") |
|
189 | 189 | self._hb_listener = zmqstream.ZMQStream(mon, self.loop) |
|
190 | 190 | self._hb_listener.on_recv(self._report_ping) |
|
191 | 191 | |
|
192 | 192 | |
|
193 |
hb_monitor = "tcp://%s:%i" % ( |
|
|
193 | hb_monitor = "tcp://%s:%i" % (localhost(), mport) | |
|
194 | 194 | |
|
195 | 195 | heart = Heart(hb_ping, hb_pong, hb_monitor , heart_id=identity) |
|
196 | 196 | heart.start() |
|
197 | 197 | |
|
198 | 198 | # create Shell Connections (MUX, Task, etc.): |
|
199 | 199 | shell_addrs = url('mux'), url('task') |
|
200 | 200 | |
|
201 | 201 | # Use only one shell stream for mux and tasks |
|
202 | 202 | stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop) |
|
203 | 203 | stream.setsockopt(zmq.IDENTITY, identity) |
|
204 | 204 | shell_streams = [stream] |
|
205 | 205 | for addr in shell_addrs: |
|
206 | 206 | connect(stream, addr) |
|
207 | 207 | |
|
208 | 208 | # control stream: |
|
209 | 209 | control_addr = url('control') |
|
210 | 210 | control_stream = zmqstream.ZMQStream(ctx.socket(zmq.ROUTER), loop) |
|
211 | 211 | control_stream.setsockopt(zmq.IDENTITY, identity) |
|
212 | 212 | connect(control_stream, control_addr) |
|
213 | 213 | |
|
214 | 214 | # create iopub stream: |
|
215 | 215 | iopub_addr = url('iopub') |
|
216 | 216 | iopub_socket = ctx.socket(zmq.PUB) |
|
217 | 217 | iopub_socket.setsockopt(zmq.IDENTITY, identity) |
|
218 | 218 | connect(iopub_socket, iopub_addr) |
|
219 | 219 | |
|
220 | 220 | # disable history: |
|
221 | 221 | self.config.HistoryManager.hist_file = ':memory:' |
|
222 | 222 | |
|
223 | 223 | # Redirect input streams and set a display hook. |
|
224 | 224 | if self.out_stream_factory: |
|
225 | 225 | sys.stdout = self.out_stream_factory(self.session, iopub_socket, u'stdout') |
|
226 | 226 | sys.stdout.topic = cast_bytes('engine.%i.stdout' % self.id) |
|
227 | 227 | sys.stderr = self.out_stream_factory(self.session, iopub_socket, u'stderr') |
|
228 | 228 | sys.stderr.topic = cast_bytes('engine.%i.stderr' % self.id) |
|
229 | 229 | if self.display_hook_factory: |
|
230 | 230 | sys.displayhook = self.display_hook_factory(self.session, iopub_socket) |
|
231 | 231 | sys.displayhook.topic = cast_bytes('engine.%i.pyout' % self.id) |
|
232 | 232 | |
|
233 | 233 | self.kernel = Kernel(parent=self, int_id=self.id, ident=self.ident, session=self.session, |
|
234 | 234 | control_stream=control_stream, shell_streams=shell_streams, iopub_socket=iopub_socket, |
|
235 | 235 | loop=loop, user_ns=self.user_ns, log=self.log) |
|
236 | 236 | |
|
237 | 237 | self.kernel.shell.display_pub.topic = cast_bytes('engine.%i.displaypub' % self.id) |
|
238 | 238 | |
|
239 | 239 | |
|
240 | 240 | # periodically check the heartbeat pings of the controller |
|
241 | 241 | # Should be started here and not in "start()" so that the right period can be taken |
|
242 | 242 | # from the hubs HeartBeatMonitor.period |
|
243 | 243 | if self.max_heartbeat_misses > 0: |
|
244 | 244 | # Use a slightly bigger check period than the hub signal period to not warn unnecessary |
|
245 | 245 | self.hb_check_period = int(content['hb_period'])+10 |
|
246 | 246 | self.log.info("Starting to monitor the heartbeat signal from the hub every %i ms." , self.hb_check_period) |
|
247 | 247 | self._hb_reporter = ioloop.PeriodicCallback(self._hb_monitor, self.hb_check_period, self.loop) |
|
248 | 248 | self._hb_reporter.start() |
|
249 | 249 | else: |
|
250 | 250 | self.log.info("Monitoring of the heartbeat signal from the hub is not enabled.") |
|
251 | 251 | |
|
252 | 252 | |
|
253 | 253 | # FIXME: This is a hack until IPKernelApp and IPEngineApp can be fully merged |
|
254 | 254 | app = IPKernelApp(parent=self, shell=self.kernel.shell, kernel=self.kernel, log=self.log) |
|
255 | 255 | app.init_profile_dir() |
|
256 | 256 | app.init_code() |
|
257 | 257 | |
|
258 | 258 | self.kernel.start() |
|
259 | 259 | else: |
|
260 | 260 | self.log.fatal("Registration Failed: %s"%msg) |
|
261 | 261 | raise Exception("Registration Failed: %s"%msg) |
|
262 | 262 | |
|
263 | 263 | self.log.info("Completed registration with id %i"%self.id) |
|
264 | 264 | |
|
265 | 265 | |
|
266 | 266 | def abort(self): |
|
267 | 267 | self.log.fatal("Registration timed out after %.1f seconds"%self.timeout) |
|
268 | 268 | if self.url.startswith('127.'): |
|
269 | 269 | self.log.fatal(""" |
|
270 | 270 | If the controller and engines are not on the same machine, |
|
271 | 271 | you will have to instruct the controller to listen on an external IP (in ipcontroller_config.py): |
|
272 | 272 | c.HubFactory.ip='*' # for all interfaces, internal and external |
|
273 | 273 | c.HubFactory.ip='192.168.1.101' # or any interface that the engines can see |
|
274 | 274 | or tunnel connections via ssh. |
|
275 | 275 | """) |
|
276 | 276 | self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id)) |
|
277 | 277 | time.sleep(1) |
|
278 | 278 | sys.exit(255) |
|
279 | 279 | |
|
280 | 280 | def _hb_monitor(self): |
|
281 | 281 | """Callback to monitor the heartbeat from the controller""" |
|
282 | 282 | self._hb_listener.flush() |
|
283 | 283 | if self._hb_last_monitored > self._hb_last_pinged: |
|
284 | 284 | self._hb_missed_beats += 1 |
|
285 | 285 | self.log.warn("No heartbeat in the last %s ms (%s time(s) in a row).", self.hb_check_period, self._hb_missed_beats) |
|
286 | 286 | else: |
|
287 | 287 | #self.log.debug("Heartbeat received (after missing %s beats).", self._hb_missed_beats) |
|
288 | 288 | self._hb_missed_beats = 0 |
|
289 | 289 | |
|
290 | 290 | if self._hb_missed_beats >= self.max_heartbeat_misses: |
|
291 | 291 | self.log.fatal("Maximum number of heartbeats misses reached (%s times %s ms), shutting down.", |
|
292 | 292 | self.max_heartbeat_misses, self.hb_check_period) |
|
293 | 293 | self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id)) |
|
294 | 294 | self.loop.stop() |
|
295 | 295 | |
|
296 | 296 | self._hb_last_monitored = time.time() |
|
297 | 297 | |
|
298 | 298 | |
|
299 | 299 | def start(self): |
|
300 | 300 | dc = ioloop.DelayedCallback(self.register, 0, self.loop) |
|
301 | 301 | dc.start() |
|
302 | 302 | self._abort_dc = ioloop.DelayedCallback(self.abort, self.timeout*1000, self.loop) |
|
303 | 303 | self._abort_dc.start() |
|
304 | 304 | |
|
305 | 305 |
@@ -1,79 +1,81 b'' | |||
|
1 | 1 | """Base config factories. |
|
2 | 2 | |
|
3 | 3 | Authors: |
|
4 | 4 | |
|
5 | 5 | * Min RK |
|
6 | 6 | """ |
|
7 | 7 | |
|
8 | 8 | #----------------------------------------------------------------------------- |
|
9 | 9 | # Copyright (C) 2010-2011 The IPython Development Team |
|
10 | 10 | # |
|
11 | 11 | # Distributed under the terms of the BSD License. The full license is in |
|
12 | 12 | # the file COPYING, distributed as part of this software. |
|
13 | 13 | #----------------------------------------------------------------------------- |
|
14 | 14 | |
|
15 | 15 | #----------------------------------------------------------------------------- |
|
16 | 16 | # Imports |
|
17 | 17 | #----------------------------------------------------------------------------- |
|
18 | 18 | |
|
19 | 19 | |
|
20 | 20 | import logging |
|
21 | 21 | import os |
|
22 | 22 | |
|
23 | 23 | import zmq |
|
24 | 24 | from zmq.eventloop.ioloop import IOLoop |
|
25 | 25 | |
|
26 | 26 | from IPython.config.configurable import Configurable |
|
27 |
from IPython.utils.localinterfaces import |
|
|
27 | from IPython.utils.localinterfaces import localhost | |
|
28 | 28 | from IPython.utils.traitlets import Integer, Instance, Unicode |
|
29 | 29 | |
|
30 | 30 | from IPython.parallel.util import select_random_ports |
|
31 | 31 | from IPython.kernel.zmq.session import Session, SessionFactory |
|
32 | 32 | |
|
33 | 33 | #----------------------------------------------------------------------------- |
|
34 | 34 | # Classes |
|
35 | 35 | #----------------------------------------------------------------------------- |
|
36 | 36 | |
|
37 | 37 | |
|
38 | 38 | class RegistrationFactory(SessionFactory): |
|
39 | 39 | """The Base Configurable for objects that involve registration.""" |
|
40 | 40 | |
|
41 | 41 | url = Unicode('', config=True, |
|
42 | 42 | help="""The 0MQ url used for registration. This sets transport, ip, and port |
|
43 |
in one variable. For example: url='tcp:// |
|
|
43 | in one variable. For example: url='tcp://127.0.0.1:12345' or | |
|
44 | 44 | url='epgm://*:90210'""" |
|
45 |
|
|
|
45 | ) # url takes precedence over ip,regport,transport | |
|
46 | 46 | transport = Unicode('tcp', config=True, |
|
47 | 47 | help="""The 0MQ transport for communications. This will likely be |
|
48 | 48 | the default of 'tcp', but other values include 'ipc', 'epgm', 'inproc'.""") |
|
49 |
ip = Unicode( |
|
|
49 | ip = Unicode(config=True, | |
|
50 | 50 | help="""The IP address for registration. This is generally either |
|
51 | 51 | '127.0.0.1' for loopback only or '*' for all interfaces. |
|
52 | [default: '%s']""" % LOCALHOST) | |
|
52 | """) | |
|
53 | def _ip_default(self): | |
|
54 | return localhost() | |
|
53 | 55 | regport = Integer(config=True, |
|
54 | 56 | help="""The port on which the Hub listens for registration.""") |
|
55 | 57 | def _regport_default(self): |
|
56 | 58 | return select_random_ports(1)[0] |
|
57 | 59 | |
|
58 | 60 | def __init__(self, **kwargs): |
|
59 | 61 | super(RegistrationFactory, self).__init__(**kwargs) |
|
60 | 62 | self._propagate_url() |
|
61 | 63 | self._rebuild_url() |
|
62 | 64 | self.on_trait_change(self._propagate_url, 'url') |
|
63 | 65 | self.on_trait_change(self._rebuild_url, 'ip') |
|
64 | 66 | self.on_trait_change(self._rebuild_url, 'transport') |
|
65 | 67 | self.on_trait_change(self._rebuild_url, 'regport') |
|
66 | 68 | |
|
67 | 69 | def _rebuild_url(self): |
|
68 | 70 | self.url = "%s://%s:%i"%(self.transport, self.ip, self.regport) |
|
69 | 71 | |
|
70 | 72 | def _propagate_url(self): |
|
71 | 73 | """Ensure self.url contains full transport://interface:port""" |
|
72 | 74 | if self.url: |
|
73 | 75 | iface = self.url.split('://',1) |
|
74 | 76 | if len(iface) == 2: |
|
75 | 77 | self.transport,iface = iface |
|
76 | 78 | iface = iface.split(':') |
|
77 | 79 | self.ip = iface[0] |
|
78 | 80 | if iface[1]: |
|
79 | 81 | self.regport = int(iface[1]) |
@@ -1,368 +1,368 b'' | |||
|
1 | 1 | """some generic utilities for dealing with classes, urls, and serialization |
|
2 | 2 | |
|
3 | 3 | Authors: |
|
4 | 4 | |
|
5 | 5 | * Min RK |
|
6 | 6 | """ |
|
7 | 7 | #----------------------------------------------------------------------------- |
|
8 | 8 | # Copyright (C) 2010-2011 The IPython Development Team |
|
9 | 9 | # |
|
10 | 10 | # Distributed under the terms of the BSD License. The full license is in |
|
11 | 11 | # the file COPYING, distributed as part of this software. |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | |
|
14 | 14 | #----------------------------------------------------------------------------- |
|
15 | 15 | # Imports |
|
16 | 16 | #----------------------------------------------------------------------------- |
|
17 | 17 | |
|
18 | 18 | # Standard library imports. |
|
19 | 19 | import logging |
|
20 | 20 | import os |
|
21 | 21 | import re |
|
22 | 22 | import stat |
|
23 | 23 | import socket |
|
24 | 24 | import sys |
|
25 | 25 | from signal import signal, SIGINT, SIGABRT, SIGTERM |
|
26 | 26 | try: |
|
27 | 27 | from signal import SIGKILL |
|
28 | 28 | except ImportError: |
|
29 | 29 | SIGKILL=None |
|
30 | 30 | |
|
31 | 31 | try: |
|
32 | 32 | import cPickle |
|
33 | 33 | pickle = cPickle |
|
34 | 34 | except: |
|
35 | 35 | cPickle = None |
|
36 | 36 | import pickle |
|
37 | 37 | |
|
38 | 38 | # System library imports |
|
39 | 39 | import zmq |
|
40 | 40 | from zmq.log import handlers |
|
41 | 41 | |
|
42 | 42 | from IPython.external.decorator import decorator |
|
43 | 43 | |
|
44 | 44 | # IPython imports |
|
45 | 45 | from IPython.config.application import Application |
|
46 |
from IPython.utils.localinterfaces import |
|
|
46 | from IPython.utils.localinterfaces import localhost, is_public_ip, public_ips | |
|
47 | 47 | from IPython.kernel.zmq.log import EnginePUBHandler |
|
48 | 48 | from IPython.kernel.zmq.serialize import ( |
|
49 | 49 | unserialize_object, serialize_object, pack_apply_message, unpack_apply_message |
|
50 | 50 | ) |
|
51 | 51 | |
|
52 | 52 | #----------------------------------------------------------------------------- |
|
53 | 53 | # Classes |
|
54 | 54 | #----------------------------------------------------------------------------- |
|
55 | 55 | |
|
56 | 56 | class Namespace(dict): |
|
57 | 57 | """Subclass of dict for attribute access to keys.""" |
|
58 | 58 | |
|
59 | 59 | def __getattr__(self, key): |
|
60 | 60 | """getattr aliased to getitem""" |
|
61 | 61 | if key in self.iterkeys(): |
|
62 | 62 | return self[key] |
|
63 | 63 | else: |
|
64 | 64 | raise NameError(key) |
|
65 | 65 | |
|
66 | 66 | def __setattr__(self, key, value): |
|
67 | 67 | """setattr aliased to setitem, with strict""" |
|
68 | 68 | if hasattr(dict, key): |
|
69 | 69 | raise KeyError("Cannot override dict keys %r"%key) |
|
70 | 70 | self[key] = value |
|
71 | 71 | |
|
72 | 72 | |
|
73 | 73 | class ReverseDict(dict): |
|
74 | 74 | """simple double-keyed subset of dict methods.""" |
|
75 | 75 | |
|
76 | 76 | def __init__(self, *args, **kwargs): |
|
77 | 77 | dict.__init__(self, *args, **kwargs) |
|
78 | 78 | self._reverse = dict() |
|
79 | 79 | for key, value in self.iteritems(): |
|
80 | 80 | self._reverse[value] = key |
|
81 | 81 | |
|
82 | 82 | def __getitem__(self, key): |
|
83 | 83 | try: |
|
84 | 84 | return dict.__getitem__(self, key) |
|
85 | 85 | except KeyError: |
|
86 | 86 | return self._reverse[key] |
|
87 | 87 | |
|
88 | 88 | def __setitem__(self, key, value): |
|
89 | 89 | if key in self._reverse: |
|
90 | 90 | raise KeyError("Can't have key %r on both sides!"%key) |
|
91 | 91 | dict.__setitem__(self, key, value) |
|
92 | 92 | self._reverse[value] = key |
|
93 | 93 | |
|
94 | 94 | def pop(self, key): |
|
95 | 95 | value = dict.pop(self, key) |
|
96 | 96 | self._reverse.pop(value) |
|
97 | 97 | return value |
|
98 | 98 | |
|
99 | 99 | def get(self, key, default=None): |
|
100 | 100 | try: |
|
101 | 101 | return self[key] |
|
102 | 102 | except KeyError: |
|
103 | 103 | return default |
|
104 | 104 | |
|
105 | 105 | #----------------------------------------------------------------------------- |
|
106 | 106 | # Functions |
|
107 | 107 | #----------------------------------------------------------------------------- |
|
108 | 108 | |
|
109 | 109 | @decorator |
|
110 | 110 | def log_errors(f, self, *args, **kwargs): |
|
111 | 111 | """decorator to log unhandled exceptions raised in a method. |
|
112 | 112 | |
|
113 | 113 | For use wrapping on_recv callbacks, so that exceptions |
|
114 | 114 | do not cause the stream to be closed. |
|
115 | 115 | """ |
|
116 | 116 | try: |
|
117 | 117 | return f(self, *args, **kwargs) |
|
118 | 118 | except Exception: |
|
119 | 119 | self.log.error("Uncaught exception in %r" % f, exc_info=True) |
|
120 | 120 | |
|
121 | 121 | |
|
122 | 122 | def is_url(url): |
|
123 | 123 | """boolean check for whether a string is a zmq url""" |
|
124 | 124 | if '://' not in url: |
|
125 | 125 | return False |
|
126 | 126 | proto, addr = url.split('://', 1) |
|
127 | 127 | if proto.lower() not in ['tcp','pgm','epgm','ipc','inproc']: |
|
128 | 128 | return False |
|
129 | 129 | return True |
|
130 | 130 | |
|
131 | 131 | def validate_url(url): |
|
132 | 132 | """validate a url for zeromq""" |
|
133 | 133 | if not isinstance(url, basestring): |
|
134 | 134 | raise TypeError("url must be a string, not %r"%type(url)) |
|
135 | 135 | url = url.lower() |
|
136 | 136 | |
|
137 | 137 | proto_addr = url.split('://') |
|
138 | 138 | assert len(proto_addr) == 2, 'Invalid url: %r'%url |
|
139 | 139 | proto, addr = proto_addr |
|
140 | 140 | assert proto in ['tcp','pgm','epgm','ipc','inproc'], "Invalid protocol: %r"%proto |
|
141 | 141 | |
|
142 | 142 | # domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391 |
|
143 | 143 | # author: Remi Sabourin |
|
144 | 144 | pat = re.compile(r'^([\w\d]([\w\d\-]{0,61}[\w\d])?\.)*[\w\d]([\w\d\-]{0,61}[\w\d])?$') |
|
145 | 145 | |
|
146 | 146 | if proto == 'tcp': |
|
147 | 147 | lis = addr.split(':') |
|
148 | 148 | assert len(lis) == 2, 'Invalid url: %r'%url |
|
149 | 149 | addr,s_port = lis |
|
150 | 150 | try: |
|
151 | 151 | port = int(s_port) |
|
152 | 152 | except ValueError: |
|
153 | 153 | raise AssertionError("Invalid port %r in url: %r"%(port, url)) |
|
154 | 154 | |
|
155 | 155 | assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url |
|
156 | 156 | |
|
157 | 157 | else: |
|
158 | 158 | # only validate tcp urls currently |
|
159 | 159 | pass |
|
160 | 160 | |
|
161 | 161 | return True |
|
162 | 162 | |
|
163 | 163 | |
|
164 | 164 | def validate_url_container(container): |
|
165 | 165 | """validate a potentially nested collection of urls.""" |
|
166 | 166 | if isinstance(container, basestring): |
|
167 | 167 | url = container |
|
168 | 168 | return validate_url(url) |
|
169 | 169 | elif isinstance(container, dict): |
|
170 | 170 | container = container.itervalues() |
|
171 | 171 | |
|
172 | 172 | for element in container: |
|
173 | 173 | validate_url_container(element) |
|
174 | 174 | |
|
175 | 175 | |
|
176 | 176 | def split_url(url): |
|
177 | 177 | """split a zmq url (tcp://ip:port) into ('tcp','ip','port').""" |
|
178 | 178 | proto_addr = url.split('://') |
|
179 | 179 | assert len(proto_addr) == 2, 'Invalid url: %r'%url |
|
180 | 180 | proto, addr = proto_addr |
|
181 | 181 | lis = addr.split(':') |
|
182 | 182 | assert len(lis) == 2, 'Invalid url: %r'%url |
|
183 | 183 | addr,s_port = lis |
|
184 | 184 | return proto,addr,s_port |
|
185 | 185 | |
|
186 | 186 | def disambiguate_ip_address(ip, location=None): |
|
187 | 187 | """turn multi-ip interfaces '0.0.0.0' and '*' into connectable |
|
188 | 188 | ones, based on the location (default interpretation of location is localhost).""" |
|
189 | 189 | if ip in ('0.0.0.0', '*'): |
|
190 |
if location is None or location |
|
|
190 | if location is None or is_public_ip(location) or not public_ips(): | |
|
191 | 191 | # If location is unspecified or cannot be determined, assume local |
|
192 |
ip = |
|
|
192 | ip = localhost() | |
|
193 | 193 | elif location: |
|
194 | 194 | return location |
|
195 | 195 | return ip |
|
196 | 196 | |
|
197 | 197 | def disambiguate_url(url, location=None): |
|
198 | 198 | """turn multi-ip interfaces '0.0.0.0' and '*' into connectable |
|
199 | 199 | ones, based on the location (default interpretation is localhost). |
|
200 | 200 | |
|
201 | 201 | This is for zeromq urls, such as tcp://*:10101.""" |
|
202 | 202 | try: |
|
203 | 203 | proto,ip,port = split_url(url) |
|
204 | 204 | except AssertionError: |
|
205 | 205 | # probably not tcp url; could be ipc, etc. |
|
206 | 206 | return url |
|
207 | 207 | |
|
208 | 208 | ip = disambiguate_ip_address(ip,location) |
|
209 | 209 | |
|
210 | 210 | return "%s://%s:%s"%(proto,ip,port) |
|
211 | 211 | |
|
212 | 212 | |
|
213 | 213 | #-------------------------------------------------------------------------- |
|
214 | 214 | # helpers for implementing old MEC API via view.apply |
|
215 | 215 | #-------------------------------------------------------------------------- |
|
216 | 216 | |
|
217 | 217 | def interactive(f): |
|
218 | 218 | """decorator for making functions appear as interactively defined. |
|
219 | 219 | This results in the function being linked to the user_ns as globals() |
|
220 | 220 | instead of the module globals(). |
|
221 | 221 | """ |
|
222 | 222 | f.__module__ = '__main__' |
|
223 | 223 | return f |
|
224 | 224 | |
|
225 | 225 | @interactive |
|
226 | 226 | def _push(**ns): |
|
227 | 227 | """helper method for implementing `client.push` via `client.apply`""" |
|
228 | 228 | user_ns = globals() |
|
229 | 229 | tmp = '_IP_PUSH_TMP_' |
|
230 | 230 | while tmp in user_ns: |
|
231 | 231 | tmp = tmp + '_' |
|
232 | 232 | try: |
|
233 | 233 | for name, value in ns.iteritems(): |
|
234 | 234 | user_ns[tmp] = value |
|
235 | 235 | exec "%s = %s" % (name, tmp) in user_ns |
|
236 | 236 | finally: |
|
237 | 237 | user_ns.pop(tmp, None) |
|
238 | 238 | |
|
239 | 239 | @interactive |
|
240 | 240 | def _pull(keys): |
|
241 | 241 | """helper method for implementing `client.pull` via `client.apply`""" |
|
242 | 242 | if isinstance(keys, (list,tuple, set)): |
|
243 | 243 | return map(lambda key: eval(key, globals()), keys) |
|
244 | 244 | else: |
|
245 | 245 | return eval(keys, globals()) |
|
246 | 246 | |
|
247 | 247 | @interactive |
|
248 | 248 | def _execute(code): |
|
249 | 249 | """helper method for implementing `client.execute` via `client.apply`""" |
|
250 | 250 | exec code in globals() |
|
251 | 251 | |
|
252 | 252 | #-------------------------------------------------------------------------- |
|
253 | 253 | # extra process management utilities |
|
254 | 254 | #-------------------------------------------------------------------------- |
|
255 | 255 | |
|
256 | 256 | _random_ports = set() |
|
257 | 257 | |
|
258 | 258 | def select_random_ports(n): |
|
259 | 259 | """Selects and return n random ports that are available.""" |
|
260 | 260 | ports = [] |
|
261 | 261 | for i in xrange(n): |
|
262 | 262 | sock = socket.socket() |
|
263 | 263 | sock.bind(('', 0)) |
|
264 | 264 | while sock.getsockname()[1] in _random_ports: |
|
265 | 265 | sock.close() |
|
266 | 266 | sock = socket.socket() |
|
267 | 267 | sock.bind(('', 0)) |
|
268 | 268 | ports.append(sock) |
|
269 | 269 | for i, sock in enumerate(ports): |
|
270 | 270 | port = sock.getsockname()[1] |
|
271 | 271 | sock.close() |
|
272 | 272 | ports[i] = port |
|
273 | 273 | _random_ports.add(port) |
|
274 | 274 | return ports |
|
275 | 275 | |
|
276 | 276 | def signal_children(children): |
|
277 | 277 | """Relay interupt/term signals to children, for more solid process cleanup.""" |
|
278 | 278 | def terminate_children(sig, frame): |
|
279 | 279 | log = Application.instance().log |
|
280 | 280 | log.critical("Got signal %i, terminating children..."%sig) |
|
281 | 281 | for child in children: |
|
282 | 282 | child.terminate() |
|
283 | 283 | |
|
284 | 284 | sys.exit(sig != SIGINT) |
|
285 | 285 | # sys.exit(sig) |
|
286 | 286 | for sig in (SIGINT, SIGABRT, SIGTERM): |
|
287 | 287 | signal(sig, terminate_children) |
|
288 | 288 | |
|
289 | 289 | def generate_exec_key(keyfile): |
|
290 | 290 | import uuid |
|
291 | 291 | newkey = str(uuid.uuid4()) |
|
292 | 292 | with open(keyfile, 'w') as f: |
|
293 | 293 | # f.write('ipython-key ') |
|
294 | 294 | f.write(newkey+'\n') |
|
295 | 295 | # set user-only RW permissions (0600) |
|
296 | 296 | # this will have no effect on Windows |
|
297 | 297 | os.chmod(keyfile, stat.S_IRUSR|stat.S_IWUSR) |
|
298 | 298 | |
|
299 | 299 | |
|
300 | 300 | def integer_loglevel(loglevel): |
|
301 | 301 | try: |
|
302 | 302 | loglevel = int(loglevel) |
|
303 | 303 | except ValueError: |
|
304 | 304 | if isinstance(loglevel, str): |
|
305 | 305 | loglevel = getattr(logging, loglevel) |
|
306 | 306 | return loglevel |
|
307 | 307 | |
|
308 | 308 | def connect_logger(logname, context, iface, root="ip", loglevel=logging.DEBUG): |
|
309 | 309 | logger = logging.getLogger(logname) |
|
310 | 310 | if any([isinstance(h, handlers.PUBHandler) for h in logger.handlers]): |
|
311 | 311 | # don't add a second PUBHandler |
|
312 | 312 | return |
|
313 | 313 | loglevel = integer_loglevel(loglevel) |
|
314 | 314 | lsock = context.socket(zmq.PUB) |
|
315 | 315 | lsock.connect(iface) |
|
316 | 316 | handler = handlers.PUBHandler(lsock) |
|
317 | 317 | handler.setLevel(loglevel) |
|
318 | 318 | handler.root_topic = root |
|
319 | 319 | logger.addHandler(handler) |
|
320 | 320 | logger.setLevel(loglevel) |
|
321 | 321 | |
|
322 | 322 | def connect_engine_logger(context, iface, engine, loglevel=logging.DEBUG): |
|
323 | 323 | logger = logging.getLogger() |
|
324 | 324 | if any([isinstance(h, handlers.PUBHandler) for h in logger.handlers]): |
|
325 | 325 | # don't add a second PUBHandler |
|
326 | 326 | return |
|
327 | 327 | loglevel = integer_loglevel(loglevel) |
|
328 | 328 | lsock = context.socket(zmq.PUB) |
|
329 | 329 | lsock.connect(iface) |
|
330 | 330 | handler = EnginePUBHandler(engine, lsock) |
|
331 | 331 | handler.setLevel(loglevel) |
|
332 | 332 | logger.addHandler(handler) |
|
333 | 333 | logger.setLevel(loglevel) |
|
334 | 334 | return logger |
|
335 | 335 | |
|
336 | 336 | def local_logger(logname, loglevel=logging.DEBUG): |
|
337 | 337 | loglevel = integer_loglevel(loglevel) |
|
338 | 338 | logger = logging.getLogger(logname) |
|
339 | 339 | if any([isinstance(h, logging.StreamHandler) for h in logger.handlers]): |
|
340 | 340 | # don't add a second StreamHandler |
|
341 | 341 | return |
|
342 | 342 | handler = logging.StreamHandler() |
|
343 | 343 | handler.setLevel(loglevel) |
|
344 | 344 | formatter = logging.Formatter("%(asctime)s.%(msecs).03d [%(name)s] %(message)s", |
|
345 | 345 | datefmt="%Y-%m-%d %H:%M:%S") |
|
346 | 346 | handler.setFormatter(formatter) |
|
347 | 347 | |
|
348 | 348 | logger.addHandler(handler) |
|
349 | 349 | logger.setLevel(loglevel) |
|
350 | 350 | return logger |
|
351 | 351 | |
|
352 | 352 | def set_hwm(sock, hwm=0): |
|
353 | 353 | """set zmq High Water Mark on a socket |
|
354 | 354 | |
|
355 | 355 | in a way that always works for various pyzmq / libzmq versions. |
|
356 | 356 | """ |
|
357 | 357 | import zmq |
|
358 | 358 | |
|
359 | 359 | for key in ('HWM', 'SNDHWM', 'RCVHWM'): |
|
360 | 360 | opt = getattr(zmq, key, None) |
|
361 | 361 | if opt is None: |
|
362 | 362 | continue |
|
363 | 363 | try: |
|
364 | 364 | sock.setsockopt(opt, hwm) |
|
365 | 365 | except zmq.ZMQError: |
|
366 | 366 | pass |
|
367 | 367 | |
|
368 | 368 | No newline at end of file |
@@ -1,382 +1,382 b'' | |||
|
1 | 1 | """ A minimal application using the Qt console-style IPython frontend. |
|
2 | 2 | |
|
3 | 3 | This is not a complete console app, as subprocess will not be able to receive |
|
4 | 4 | input, there is no real readline support, among other limitations. |
|
5 | 5 | |
|
6 | 6 | Authors: |
|
7 | 7 | |
|
8 | 8 | * Evan Patterson |
|
9 | 9 | * Min RK |
|
10 | 10 | * Erik Tollerud |
|
11 | 11 | * Fernando Perez |
|
12 | 12 | * Bussonnier Matthias |
|
13 | 13 | * Thomas Kluyver |
|
14 | 14 | * Paul Ivanov |
|
15 | 15 | |
|
16 | 16 | """ |
|
17 | 17 | |
|
18 | 18 | #----------------------------------------------------------------------------- |
|
19 | 19 | # Imports |
|
20 | 20 | #----------------------------------------------------------------------------- |
|
21 | 21 | |
|
22 | 22 | # stdlib imports |
|
23 | 23 | import os |
|
24 | 24 | import signal |
|
25 | 25 | import sys |
|
26 | 26 | |
|
27 | 27 | # If run on Windows, install an exception hook which pops up a |
|
28 | 28 | # message box. Pythonw.exe hides the console, so without this |
|
29 | 29 | # the application silently fails to load. |
|
30 | 30 | # |
|
31 | 31 | # We always install this handler, because the expectation is for |
|
32 | 32 | # qtconsole to bring up a GUI even if called from the console. |
|
33 | 33 | # The old handler is called, so the exception is printed as well. |
|
34 | 34 | # If desired, check for pythonw with an additional condition |
|
35 | 35 | # (sys.executable.lower().find('pythonw.exe') >= 0). |
|
36 | 36 | if os.name == 'nt': |
|
37 | 37 | old_excepthook = sys.excepthook |
|
38 | 38 | |
|
39 | 39 | def gui_excepthook(exctype, value, tb): |
|
40 | 40 | try: |
|
41 | 41 | import ctypes, traceback |
|
42 | 42 | MB_ICONERROR = 0x00000010L |
|
43 | 43 | title = u'Error starting IPython QtConsole' |
|
44 | 44 | msg = u''.join(traceback.format_exception(exctype, value, tb)) |
|
45 | 45 | ctypes.windll.user32.MessageBoxW(0, msg, title, MB_ICONERROR) |
|
46 | 46 | finally: |
|
47 | 47 | # Also call the old exception hook to let it do |
|
48 | 48 | # its thing too. |
|
49 | 49 | old_excepthook(exctype, value, tb) |
|
50 | 50 | |
|
51 | 51 | sys.excepthook = gui_excepthook |
|
52 | 52 | |
|
53 | 53 | # System library imports |
|
54 | 54 | from IPython.external.qt import QtCore, QtGui |
|
55 | 55 | |
|
56 | 56 | # Local imports |
|
57 | 57 | from IPython.config.application import catch_config_error |
|
58 | 58 | from IPython.core.application import BaseIPythonApplication |
|
59 | 59 | from IPython.qt.console.ipython_widget import IPythonWidget |
|
60 | 60 | from IPython.qt.console.rich_ipython_widget import RichIPythonWidget |
|
61 | 61 | from IPython.qt.console import styles |
|
62 | 62 | from IPython.qt.console.mainwindow import MainWindow |
|
63 | 63 | from IPython.qt.client import QtKernelClient |
|
64 | 64 | from IPython.qt.manager import QtKernelManager |
|
65 | 65 | from IPython.utils.traitlets import ( |
|
66 | 66 | Dict, Unicode, CBool, Any |
|
67 | 67 | ) |
|
68 | 68 | |
|
69 | 69 | from IPython.consoleapp import ( |
|
70 | 70 | IPythonConsoleApp, app_aliases, app_flags, flags, aliases |
|
71 | 71 | ) |
|
72 | 72 | |
|
73 | 73 | #----------------------------------------------------------------------------- |
|
74 | 74 | # Network Constants |
|
75 | 75 | #----------------------------------------------------------------------------- |
|
76 | 76 | |
|
77 |
from IPython.utils.localinterfaces import |
|
|
77 | from IPython.utils.localinterfaces import is_local_ip | |
|
78 | 78 | |
|
79 | 79 | #----------------------------------------------------------------------------- |
|
80 | 80 | # Globals |
|
81 | 81 | #----------------------------------------------------------------------------- |
|
82 | 82 | |
|
83 | 83 | _examples = """ |
|
84 | 84 | ipython qtconsole # start the qtconsole |
|
85 | 85 | ipython qtconsole --matplotlib=inline # start with matplotlib inline plotting mode |
|
86 | 86 | """ |
|
87 | 87 | |
|
88 | 88 | #----------------------------------------------------------------------------- |
|
89 | 89 | # Aliases and Flags |
|
90 | 90 | #----------------------------------------------------------------------------- |
|
91 | 91 | |
|
92 | 92 | # start with copy of flags |
|
93 | 93 | flags = dict(flags) |
|
94 | 94 | qt_flags = { |
|
95 | 95 | 'plain' : ({'IPythonQtConsoleApp' : {'plain' : True}}, |
|
96 | 96 | "Disable rich text support."), |
|
97 | 97 | } |
|
98 | 98 | |
|
99 | 99 | # and app_flags from the Console Mixin |
|
100 | 100 | qt_flags.update(app_flags) |
|
101 | 101 | # add frontend flags to the full set |
|
102 | 102 | flags.update(qt_flags) |
|
103 | 103 | |
|
104 | 104 | # start with copy of front&backend aliases list |
|
105 | 105 | aliases = dict(aliases) |
|
106 | 106 | qt_aliases = dict( |
|
107 | 107 | style = 'IPythonWidget.syntax_style', |
|
108 | 108 | stylesheet = 'IPythonQtConsoleApp.stylesheet', |
|
109 | 109 | colors = 'ZMQInteractiveShell.colors', |
|
110 | 110 | |
|
111 | 111 | editor = 'IPythonWidget.editor', |
|
112 | 112 | paging = 'ConsoleWidget.paging', |
|
113 | 113 | ) |
|
114 | 114 | # and app_aliases from the Console Mixin |
|
115 | 115 | qt_aliases.update(app_aliases) |
|
116 | 116 | qt_aliases.update({'gui-completion':'ConsoleWidget.gui_completion'}) |
|
117 | 117 | # add frontend aliases to the full set |
|
118 | 118 | aliases.update(qt_aliases) |
|
119 | 119 | |
|
120 | 120 | # get flags&aliases into sets, and remove a couple that |
|
121 | 121 | # shouldn't be scrubbed from backend flags: |
|
122 | 122 | qt_aliases = set(qt_aliases.keys()) |
|
123 | 123 | qt_aliases.remove('colors') |
|
124 | 124 | qt_flags = set(qt_flags.keys()) |
|
125 | 125 | |
|
126 | 126 | #----------------------------------------------------------------------------- |
|
127 | 127 | # Classes |
|
128 | 128 | #----------------------------------------------------------------------------- |
|
129 | 129 | |
|
130 | 130 | #----------------------------------------------------------------------------- |
|
131 | 131 | # IPythonQtConsole |
|
132 | 132 | #----------------------------------------------------------------------------- |
|
133 | 133 | |
|
134 | 134 | |
|
135 | 135 | class IPythonQtConsoleApp(BaseIPythonApplication, IPythonConsoleApp): |
|
136 | 136 | name = 'ipython-qtconsole' |
|
137 | 137 | |
|
138 | 138 | description = """ |
|
139 | 139 | The IPython QtConsole. |
|
140 | 140 | |
|
141 | 141 | This launches a Console-style application using Qt. It is not a full |
|
142 | 142 | console, in that launched terminal subprocesses will not be able to accept |
|
143 | 143 | input. |
|
144 | 144 | |
|
145 | 145 | The QtConsole supports various extra features beyond the Terminal IPython |
|
146 | 146 | shell, such as inline plotting with matplotlib, via: |
|
147 | 147 | |
|
148 | 148 | ipython qtconsole --matplotlib=inline |
|
149 | 149 | |
|
150 | 150 | as well as saving your session as HTML, and printing the output. |
|
151 | 151 | |
|
152 | 152 | """ |
|
153 | 153 | examples = _examples |
|
154 | 154 | |
|
155 | 155 | classes = [IPythonWidget] + IPythonConsoleApp.classes |
|
156 | 156 | flags = Dict(flags) |
|
157 | 157 | aliases = Dict(aliases) |
|
158 | 158 | frontend_flags = Any(qt_flags) |
|
159 | 159 | frontend_aliases = Any(qt_aliases) |
|
160 | 160 | kernel_client_class = QtKernelClient |
|
161 | 161 | kernel_manager_class = QtKernelManager |
|
162 | 162 | |
|
163 | 163 | stylesheet = Unicode('', config=True, |
|
164 | 164 | help="path to a custom CSS stylesheet") |
|
165 | 165 | |
|
166 | 166 | hide_menubar = CBool(False, config=True, |
|
167 | 167 | help="Start the console window with the menu bar hidden.") |
|
168 | 168 | |
|
169 | 169 | maximize = CBool(False, config=True, |
|
170 | 170 | help="Start the console window maximized.") |
|
171 | 171 | |
|
172 | 172 | plain = CBool(False, config=True, |
|
173 | 173 | help="Use a plaintext widget instead of rich text (plain can't print/save).") |
|
174 | 174 | |
|
175 | 175 | def _plain_changed(self, name, old, new): |
|
176 | 176 | kind = 'plain' if new else 'rich' |
|
177 | 177 | self.config.ConsoleWidget.kind = kind |
|
178 | 178 | if new: |
|
179 | 179 | self.widget_factory = IPythonWidget |
|
180 | 180 | else: |
|
181 | 181 | self.widget_factory = RichIPythonWidget |
|
182 | 182 | |
|
183 | 183 | # the factory for creating a widget |
|
184 | 184 | widget_factory = Any(RichIPythonWidget) |
|
185 | 185 | |
|
186 | 186 | def parse_command_line(self, argv=None): |
|
187 | 187 | super(IPythonQtConsoleApp, self).parse_command_line(argv) |
|
188 | 188 | self.build_kernel_argv(argv) |
|
189 | 189 | |
|
190 | 190 | |
|
191 | 191 | def new_frontend_master(self): |
|
192 | 192 | """ Create and return new frontend attached to new kernel, launched on localhost. |
|
193 | 193 | """ |
|
194 | 194 | kernel_manager = self.kernel_manager_class( |
|
195 | 195 | connection_file=self._new_connection_file(), |
|
196 | 196 | parent=self, |
|
197 | 197 | autorestart=True, |
|
198 | 198 | ) |
|
199 | 199 | # start the kernel |
|
200 | 200 | kwargs = dict() |
|
201 | 201 | kwargs['extra_arguments'] = self.kernel_argv |
|
202 | 202 | kernel_manager.start_kernel(**kwargs) |
|
203 | 203 | kernel_manager.client_factory = self.kernel_client_class |
|
204 | 204 | kernel_client = kernel_manager.client() |
|
205 | 205 | kernel_client.start_channels(shell=True, iopub=True) |
|
206 | 206 | widget = self.widget_factory(config=self.config, |
|
207 | 207 | local_kernel=True) |
|
208 | 208 | self.init_colors(widget) |
|
209 | 209 | widget.kernel_manager = kernel_manager |
|
210 | 210 | widget.kernel_client = kernel_client |
|
211 | 211 | widget._existing = False |
|
212 | 212 | widget._may_close = True |
|
213 | 213 | widget._confirm_exit = self.confirm_exit |
|
214 | 214 | return widget |
|
215 | 215 | |
|
216 | 216 | def new_frontend_slave(self, current_widget): |
|
217 | 217 | """Create and return a new frontend attached to an existing kernel. |
|
218 | 218 | |
|
219 | 219 | Parameters |
|
220 | 220 | ---------- |
|
221 | 221 | current_widget : IPythonWidget |
|
222 | 222 | The IPythonWidget whose kernel this frontend is to share |
|
223 | 223 | """ |
|
224 | 224 | kernel_client = self.kernel_client_class( |
|
225 | 225 | connection_file=current_widget.kernel_client.connection_file, |
|
226 | 226 | config = self.config, |
|
227 | 227 | ) |
|
228 | 228 | kernel_client.load_connection_file() |
|
229 | 229 | kernel_client.start_channels() |
|
230 | 230 | widget = self.widget_factory(config=self.config, |
|
231 | 231 | local_kernel=False) |
|
232 | 232 | self.init_colors(widget) |
|
233 | 233 | widget._existing = True |
|
234 | 234 | widget._may_close = False |
|
235 | 235 | widget._confirm_exit = False |
|
236 | 236 | widget.kernel_client = kernel_client |
|
237 | 237 | widget.kernel_manager = current_widget.kernel_manager |
|
238 | 238 | return widget |
|
239 | 239 | |
|
240 | 240 | def init_qt_app(self): |
|
241 | 241 | # separate from qt_elements, because it must run first |
|
242 | 242 | self.app = QtGui.QApplication([]) |
|
243 | 243 | |
|
244 | 244 | def init_qt_elements(self): |
|
245 | 245 | # Create the widget. |
|
246 | 246 | |
|
247 | 247 | base_path = os.path.abspath(os.path.dirname(__file__)) |
|
248 | 248 | icon_path = os.path.join(base_path, 'resources', 'icon', 'IPythonConsole.svg') |
|
249 | 249 | self.app.icon = QtGui.QIcon(icon_path) |
|
250 | 250 | QtGui.QApplication.setWindowIcon(self.app.icon) |
|
251 | 251 | |
|
252 | 252 | ip = self.ip |
|
253 |
local_kernel = (not self.existing) or ip |
|
|
253 | local_kernel = (not self.existing) or is_local_ip(ip) | |
|
254 | 254 | self.widget = self.widget_factory(config=self.config, |
|
255 | 255 | local_kernel=local_kernel) |
|
256 | 256 | self.init_colors(self.widget) |
|
257 | 257 | self.widget._existing = self.existing |
|
258 | 258 | self.widget._may_close = not self.existing |
|
259 | 259 | self.widget._confirm_exit = self.confirm_exit |
|
260 | 260 | |
|
261 | 261 | self.widget.kernel_manager = self.kernel_manager |
|
262 | 262 | self.widget.kernel_client = self.kernel_client |
|
263 | 263 | self.window = MainWindow(self.app, |
|
264 | 264 | confirm_exit=self.confirm_exit, |
|
265 | 265 | new_frontend_factory=self.new_frontend_master, |
|
266 | 266 | slave_frontend_factory=self.new_frontend_slave, |
|
267 | 267 | ) |
|
268 | 268 | self.window.log = self.log |
|
269 | 269 | self.window.add_tab_with_frontend(self.widget) |
|
270 | 270 | self.window.init_menu_bar() |
|
271 | 271 | |
|
272 | 272 | # Ignore on OSX, where there is always a menu bar |
|
273 | 273 | if sys.platform != 'darwin' and self.hide_menubar: |
|
274 | 274 | self.window.menuBar().setVisible(False) |
|
275 | 275 | |
|
276 | 276 | self.window.setWindowTitle('IPython') |
|
277 | 277 | |
|
278 | 278 | def init_colors(self, widget): |
|
279 | 279 | """Configure the coloring of the widget""" |
|
280 | 280 | # Note: This will be dramatically simplified when colors |
|
281 | 281 | # are removed from the backend. |
|
282 | 282 | |
|
283 | 283 | # parse the colors arg down to current known labels |
|
284 | 284 | try: |
|
285 | 285 | colors = self.config.ZMQInteractiveShell.colors |
|
286 | 286 | except AttributeError: |
|
287 | 287 | colors = None |
|
288 | 288 | try: |
|
289 | 289 | style = self.config.IPythonWidget.syntax_style |
|
290 | 290 | except AttributeError: |
|
291 | 291 | style = None |
|
292 | 292 | try: |
|
293 | 293 | sheet = self.config.IPythonWidget.style_sheet |
|
294 | 294 | except AttributeError: |
|
295 | 295 | sheet = None |
|
296 | 296 | |
|
297 | 297 | # find the value for colors: |
|
298 | 298 | if colors: |
|
299 | 299 | colors=colors.lower() |
|
300 | 300 | if colors in ('lightbg', 'light'): |
|
301 | 301 | colors='lightbg' |
|
302 | 302 | elif colors in ('dark', 'linux'): |
|
303 | 303 | colors='linux' |
|
304 | 304 | else: |
|
305 | 305 | colors='nocolor' |
|
306 | 306 | elif style: |
|
307 | 307 | if style=='bw': |
|
308 | 308 | colors='nocolor' |
|
309 | 309 | elif styles.dark_style(style): |
|
310 | 310 | colors='linux' |
|
311 | 311 | else: |
|
312 | 312 | colors='lightbg' |
|
313 | 313 | else: |
|
314 | 314 | colors=None |
|
315 | 315 | |
|
316 | 316 | # Configure the style |
|
317 | 317 | if style: |
|
318 | 318 | widget.style_sheet = styles.sheet_from_template(style, colors) |
|
319 | 319 | widget.syntax_style = style |
|
320 | 320 | widget._syntax_style_changed() |
|
321 | 321 | widget._style_sheet_changed() |
|
322 | 322 | elif colors: |
|
323 | 323 | # use a default dark/light/bw style |
|
324 | 324 | widget.set_default_style(colors=colors) |
|
325 | 325 | |
|
326 | 326 | if self.stylesheet: |
|
327 | 327 | # we got an explicit stylesheet |
|
328 | 328 | if os.path.isfile(self.stylesheet): |
|
329 | 329 | with open(self.stylesheet) as f: |
|
330 | 330 | sheet = f.read() |
|
331 | 331 | else: |
|
332 | 332 | raise IOError("Stylesheet %r not found." % self.stylesheet) |
|
333 | 333 | if sheet: |
|
334 | 334 | widget.style_sheet = sheet |
|
335 | 335 | widget._style_sheet_changed() |
|
336 | 336 | |
|
337 | 337 | |
|
338 | 338 | def init_signal(self): |
|
339 | 339 | """allow clean shutdown on sigint""" |
|
340 | 340 | signal.signal(signal.SIGINT, lambda sig, frame: self.exit(-2)) |
|
341 | 341 | # need a timer, so that QApplication doesn't block until a real |
|
342 | 342 | # Qt event fires (can require mouse movement) |
|
343 | 343 | # timer trick from http://stackoverflow.com/q/4938723/938949 |
|
344 | 344 | timer = QtCore.QTimer() |
|
345 | 345 | # Let the interpreter run each 200 ms: |
|
346 | 346 | timer.timeout.connect(lambda: None) |
|
347 | 347 | timer.start(200) |
|
348 | 348 | # hold onto ref, so the timer doesn't get cleaned up |
|
349 | 349 | self._sigint_timer = timer |
|
350 | 350 | |
|
351 | 351 | @catch_config_error |
|
352 | 352 | def initialize(self, argv=None): |
|
353 | 353 | self.init_qt_app() |
|
354 | 354 | super(IPythonQtConsoleApp, self).initialize(argv) |
|
355 | 355 | IPythonConsoleApp.initialize(self,argv) |
|
356 | 356 | self.init_qt_elements() |
|
357 | 357 | self.init_signal() |
|
358 | 358 | |
|
359 | 359 | def start(self): |
|
360 | 360 | |
|
361 | 361 | # draw the window |
|
362 | 362 | if self.maximize: |
|
363 | 363 | self.window.showMaximized() |
|
364 | 364 | else: |
|
365 | 365 | self.window.show() |
|
366 | 366 | self.window.raise_() |
|
367 | 367 | |
|
368 | 368 | # Start the application main loop. |
|
369 | 369 | self.app.exec_() |
|
370 | 370 | |
|
371 | 371 | #----------------------------------------------------------------------------- |
|
372 | 372 | # Main entry point |
|
373 | 373 | #----------------------------------------------------------------------------- |
|
374 | 374 | |
|
375 | 375 | def main(): |
|
376 | 376 | app = IPythonQtConsoleApp() |
|
377 | 377 | app.initialize() |
|
378 | 378 | app.start() |
|
379 | 379 | |
|
380 | 380 | |
|
381 | 381 | if __name__ == '__main__': |
|
382 | 382 | main() |
@@ -1,55 +1,108 b'' | |||
|
1 | 1 | """Simple utility for building a list of local IPs using the socket module. |
|
2 | 2 | This module defines two constants: |
|
3 | 3 | |
|
4 | 4 | LOCALHOST : The loopback interface, or the first interface that points to this |
|
5 | 5 | machine. It will *almost* always be '127.0.0.1' |
|
6 | 6 | |
|
7 | 7 | LOCAL_IPS : A list of IP addresses, loopback first, that point to this machine. |
|
8 | 8 | |
|
9 | 9 | PUBLIC_IPS : A list of public IP addresses that point to this machine. |
|
10 | 10 | Use these to tell remote clients where to find you. |
|
11 | 11 | """ |
|
12 | 12 | #----------------------------------------------------------------------------- |
|
13 | 13 | # Copyright (C) 2010-2011 The IPython Development Team |
|
14 | 14 | # |
|
15 | 15 | # Distributed under the terms of the BSD License. The full license is in |
|
16 | 16 | # the file COPYING, distributed as part of this software. |
|
17 | 17 | #----------------------------------------------------------------------------- |
|
18 | 18 | |
|
19 | 19 | #----------------------------------------------------------------------------- |
|
20 | 20 | # Imports |
|
21 | 21 | #----------------------------------------------------------------------------- |
|
22 | 22 | |
|
23 | 23 | import socket |
|
24 | 24 | |
|
25 | 25 | from .data import uniq_stable |
|
26 | 26 | |
|
27 | 27 | #----------------------------------------------------------------------------- |
|
28 | 28 | # Code |
|
29 | 29 | #----------------------------------------------------------------------------- |
|
30 | 30 | |
|
31 | 31 | LOCAL_IPS = [] |
|
32 | try: | |
|
33 | LOCAL_IPS = socket.gethostbyname_ex('localhost')[2] | |
|
34 | except socket.error: | |
|
35 | pass | |
|
36 | ||
|
37 | 32 | PUBLIC_IPS = [] |
|
38 | try: | |
|
39 | hostname = socket.gethostname() | |
|
40 | PUBLIC_IPS = socket.gethostbyname_ex(hostname)[2] | |
|
41 | # try hostname.local, in case hostname has been short-circuited to loopback | |
|
42 | if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS): | |
|
43 | PUBLIC_IPS = socket.gethostbyname_ex(socket.gethostname() + '.local')[2] | |
|
44 | except socket.error: | |
|
45 | pass | |
|
46 | finally: | |
|
47 | PUBLIC_IPS = uniq_stable(PUBLIC_IPS) | |
|
48 | LOCAL_IPS.extend(PUBLIC_IPS) | |
|
49 | ||
|
50 | # include all-interface aliases: 0.0.0.0 and '' | |
|
51 | LOCAL_IPS.extend(['0.0.0.0', '']) | |
|
52 | ||
|
53 | LOCAL_IPS = uniq_stable(LOCAL_IPS) | |
|
54 | ||
|
55 | LOCALHOST = LOCAL_IPS[0] | |
|
33 | ||
|
34 | LOCALHOST = '127.0.0.1' | |
|
35 | ||
|
36 | def _only_once(f): | |
|
37 | """decorator to only run a function once""" | |
|
38 | f.called = False | |
|
39 | def wrapped(): | |
|
40 | if f.called: | |
|
41 | return | |
|
42 | ret = f() | |
|
43 | f.called = True | |
|
44 | return ret | |
|
45 | return wrapped | |
|
46 | ||
|
47 | def _requires_ips(f): | |
|
48 | """decorator to ensure load_ips has been run before f""" | |
|
49 | def ips_loaded(*args, **kwargs): | |
|
50 | _load_ips() | |
|
51 | return f(*args, **kwargs) | |
|
52 | return ips_loaded | |
|
53 | ||
|
54 | @_only_once | |
|
55 | def _load_ips(): | |
|
56 | """load the IPs that point to this machine | |
|
57 | ||
|
58 | This function will only ever be called once. | |
|
59 | """ | |
|
60 | global LOCALHOST | |
|
61 | try: | |
|
62 | LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2] | |
|
63 | except socket.error: | |
|
64 | pass | |
|
65 | ||
|
66 | try: | |
|
67 | hostname = socket.gethostname() | |
|
68 | PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2] | |
|
69 | # try hostname.local, in case hostname has been short-circuited to loopback | |
|
70 | if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS): | |
|
71 | PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2] | |
|
72 | except socket.error: | |
|
73 | pass | |
|
74 | finally: | |
|
75 | PUBLIC_IPS[:] = uniq_stable(PUBLIC_IPS) | |
|
76 | LOCAL_IPS.extend(PUBLIC_IPS) | |
|
77 | ||
|
78 | # include all-interface aliases: 0.0.0.0 and '' | |
|
79 | LOCAL_IPS.extend(['0.0.0.0', '']) | |
|
80 | ||
|
81 | LOCAL_IPS[:] = uniq_stable(LOCAL_IPS) | |
|
82 | ||
|
83 | LOCALHOST = LOCAL_IPS[0] | |
|
84 | ||
|
85 | @_requires_ips | |
|
86 | def local_ips(): | |
|
87 | """return the IP addresses that point to this machine""" | |
|
88 | return LOCAL_IPS | |
|
89 | ||
|
90 | @_requires_ips | |
|
91 | def public_ips(): | |
|
92 | """return the IP addresses for this machine that are visible to other machines""" | |
|
93 | return PUBLIC_IPS | |
|
94 | ||
|
95 | @_requires_ips | |
|
96 | def localhost(): | |
|
97 | """return ip for localhost (almost always 127.0.0.1)""" | |
|
98 | return LOCALHOST | |
|
99 | ||
|
100 | @_requires_ips | |
|
101 | def is_local_ip(ip): | |
|
102 | """does `ip` point to this machine?""" | |
|
103 | return ip in LOCAL_IPS | |
|
104 | ||
|
105 | @_requires_ips | |
|
106 | def is_public_ip(ip): | |
|
107 | """is `ip` a publicly visible address?""" | |
|
108 | return ip in PUBLIC_IPS |
General Comments 0
You need to be logged in to leave comments.
Login now