##// END OF EJS Templates
remove user_variables...
MinRK -
Show More

The requested changes are too big and content was truncated. Show full diff

1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,734 +1,721 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for the key interactiveshell module.
3 3
4 4 Historically the main classes in interactiveshell have been under-tested. This
5 5 module should grow as many single-method tests as possible to trap many of the
6 6 recurring bugs we seem to encounter with high-level interaction.
7
8 Authors
9 -------
10 * Fernando Perez
11 7 """
12 #-----------------------------------------------------------------------------
13 # Copyright (C) 2011 The IPython Development Team
14 #
15 # Distributed under the terms of the BSD License. The full license is in
16 # the file COPYING, distributed as part of this software.
17 #-----------------------------------------------------------------------------
18 8
19 #-----------------------------------------------------------------------------
20 # Imports
21 #-----------------------------------------------------------------------------
22 # stdlib
9 # Copyright (c) IPython Development Team.
10 # Distributed under the terms of the Modified BSD License.
11
23 12 import ast
24 13 import os
25 14 import signal
26 15 import shutil
27 16 import sys
28 17 import tempfile
29 18 import unittest
30 19 try:
31 20 from unittest import mock
32 21 except ImportError:
33 22 import mock
34 23 from os.path import join
35 24
36 # third-party
37 25 import nose.tools as nt
38 26
39 # Our own
40 27 from IPython.core.inputtransformer import InputTransformer
41 28 from IPython.testing.decorators import skipif, skip_win32, onlyif_unicode_paths
42 29 from IPython.testing import tools as tt
43 30 from IPython.utils import io
44 31 from IPython.utils import py3compat
45 32 from IPython.utils.py3compat import unicode_type, PY3
46 33
47 34 if PY3:
48 35 from io import StringIO
49 36 else:
50 37 from StringIO import StringIO
51 38
52 39 #-----------------------------------------------------------------------------
53 40 # Globals
54 41 #-----------------------------------------------------------------------------
55 42 # This is used by every single test, no point repeating it ad nauseam
56 43 ip = get_ipython()
57 44
58 45 #-----------------------------------------------------------------------------
59 46 # Tests
60 47 #-----------------------------------------------------------------------------
61 48
62 49 class InteractiveShellTestCase(unittest.TestCase):
63 50 def test_naked_string_cells(self):
64 51 """Test that cells with only naked strings are fully executed"""
65 52 # First, single-line inputs
66 53 ip.run_cell('"a"\n')
67 54 self.assertEqual(ip.user_ns['_'], 'a')
68 55 # And also multi-line cells
69 56 ip.run_cell('"""a\nb"""\n')
70 57 self.assertEqual(ip.user_ns['_'], 'a\nb')
71 58
72 59 def test_run_empty_cell(self):
73 60 """Just make sure we don't get a horrible error with a blank
74 61 cell of input. Yes, I did overlook that."""
75 62 old_xc = ip.execution_count
76 63 ip.run_cell('')
77 64 self.assertEqual(ip.execution_count, old_xc)
78 65
79 66 def test_run_cell_multiline(self):
80 67 """Multi-block, multi-line cells must execute correctly.
81 68 """
82 69 src = '\n'.join(["x=1",
83 70 "y=2",
84 71 "if 1:",
85 72 " x += 1",
86 73 " y += 1",])
87 74 ip.run_cell(src)
88 75 self.assertEqual(ip.user_ns['x'], 2)
89 76 self.assertEqual(ip.user_ns['y'], 3)
90 77
91 78 def test_multiline_string_cells(self):
92 79 "Code sprinkled with multiline strings should execute (GH-306)"
93 80 ip.run_cell('tmp=0')
94 81 self.assertEqual(ip.user_ns['tmp'], 0)
95 82 ip.run_cell('tmp=1;"""a\nb"""\n')
96 83 self.assertEqual(ip.user_ns['tmp'], 1)
97 84
98 85 def test_dont_cache_with_semicolon(self):
99 86 "Ending a line with semicolon should not cache the returned object (GH-307)"
100 87 oldlen = len(ip.user_ns['Out'])
101 88 for cell in ['1;', '1;1;']:
102 89 ip.run_cell(cell, store_history=True)
103 90 newlen = len(ip.user_ns['Out'])
104 91 self.assertEqual(oldlen, newlen)
105 92 i = 0
106 93 #also test the default caching behavior
107 94 for cell in ['1', '1;1']:
108 95 ip.run_cell(cell, store_history=True)
109 96 newlen = len(ip.user_ns['Out'])
110 97 i += 1
111 98 self.assertEqual(oldlen+i, newlen)
112 99
113 100 def test_In_variable(self):
114 101 "Verify that In variable grows with user input (GH-284)"
115 102 oldlen = len(ip.user_ns['In'])
116 103 ip.run_cell('1;', store_history=True)
117 104 newlen = len(ip.user_ns['In'])
118 105 self.assertEqual(oldlen+1, newlen)
119 106 self.assertEqual(ip.user_ns['In'][-1],'1;')
120 107
121 108 def test_magic_names_in_string(self):
122 109 ip.run_cell('a = """\n%exit\n"""')
123 110 self.assertEqual(ip.user_ns['a'], '\n%exit\n')
124 111
125 112 def test_trailing_newline(self):
126 113 """test that running !(command) does not raise a SyntaxError"""
127 114 ip.run_cell('!(true)\n', False)
128 115 ip.run_cell('!(true)\n\n\n', False)
129 116
130 117 def test_gh_597(self):
131 118 """Pretty-printing lists of objects with non-ascii reprs may cause
132 119 problems."""
133 120 class Spam(object):
134 121 def __repr__(self):
135 122 return "\xe9"*50
136 123 import IPython.core.formatters
137 124 f = IPython.core.formatters.PlainTextFormatter()
138 125 f([Spam(),Spam()])
139 126
140 127
141 128 def test_future_flags(self):
142 129 """Check that future flags are used for parsing code (gh-777)"""
143 130 ip.run_cell('from __future__ import print_function')
144 131 try:
145 132 ip.run_cell('prfunc_return_val = print(1,2, sep=" ")')
146 133 assert 'prfunc_return_val' in ip.user_ns
147 134 finally:
148 135 # Reset compiler flags so we don't mess up other tests.
149 136 ip.compile.reset_compiler_flags()
150 137
151 138 def test_future_unicode(self):
152 139 """Check that unicode_literals is imported from __future__ (gh #786)"""
153 140 try:
154 141 ip.run_cell(u'byte_str = "a"')
155 142 assert isinstance(ip.user_ns['byte_str'], str) # string literals are byte strings by default
156 143 ip.run_cell('from __future__ import unicode_literals')
157 144 ip.run_cell(u'unicode_str = "a"')
158 145 assert isinstance(ip.user_ns['unicode_str'], unicode_type) # strings literals are now unicode
159 146 finally:
160 147 # Reset compiler flags so we don't mess up other tests.
161 148 ip.compile.reset_compiler_flags()
162 149
163 150 def test_can_pickle(self):
164 151 "Can we pickle objects defined interactively (GH-29)"
165 152 ip = get_ipython()
166 153 ip.reset()
167 154 ip.run_cell(("class Mylist(list):\n"
168 155 " def __init__(self,x=[]):\n"
169 156 " list.__init__(self,x)"))
170 157 ip.run_cell("w=Mylist([1,2,3])")
171 158
172 159 from pickle import dumps
173 160
174 161 # We need to swap in our main module - this is only necessary
175 162 # inside the test framework, because IPython puts the interactive module
176 163 # in place (but the test framework undoes this).
177 164 _main = sys.modules['__main__']
178 165 sys.modules['__main__'] = ip.user_module
179 166 try:
180 167 res = dumps(ip.user_ns["w"])
181 168 finally:
182 169 sys.modules['__main__'] = _main
183 170 self.assertTrue(isinstance(res, bytes))
184 171
185 172 def test_global_ns(self):
186 173 "Code in functions must be able to access variables outside them."
187 174 ip = get_ipython()
188 175 ip.run_cell("a = 10")
189 176 ip.run_cell(("def f(x):\n"
190 177 " return x + a"))
191 178 ip.run_cell("b = f(12)")
192 179 self.assertEqual(ip.user_ns["b"], 22)
193 180
194 181 def test_bad_custom_tb(self):
195 182 """Check that InteractiveShell is protected from bad custom exception handlers"""
196 183 from IPython.utils import io
197 184 save_stderr = io.stderr
198 185 try:
199 186 # capture stderr
200 187 io.stderr = StringIO()
201 188 ip.set_custom_exc((IOError,), lambda etype,value,tb: 1/0)
202 189 self.assertEqual(ip.custom_exceptions, (IOError,))
203 190 ip.run_cell(u'raise IOError("foo")')
204 191 self.assertEqual(ip.custom_exceptions, ())
205 192 self.assertTrue("Custom TB Handler failed" in io.stderr.getvalue())
206 193 finally:
207 194 io.stderr = save_stderr
208 195
209 196 def test_bad_custom_tb_return(self):
210 197 """Check that InteractiveShell is protected from bad return types in custom exception handlers"""
211 198 from IPython.utils import io
212 199 save_stderr = io.stderr
213 200 try:
214 201 # capture stderr
215 202 io.stderr = StringIO()
216 203 ip.set_custom_exc((NameError,),lambda etype,value,tb, tb_offset=None: 1)
217 204 self.assertEqual(ip.custom_exceptions, (NameError,))
218 205 ip.run_cell(u'a=abracadabra')
219 206 self.assertEqual(ip.custom_exceptions, ())
220 207 self.assertTrue("Custom TB Handler failed" in io.stderr.getvalue())
221 208 finally:
222 209 io.stderr = save_stderr
223 210
224 211 def test_drop_by_id(self):
225 212 myvars = {"a":object(), "b":object(), "c": object()}
226 213 ip.push(myvars, interactive=False)
227 214 for name in myvars:
228 215 assert name in ip.user_ns, name
229 216 assert name in ip.user_ns_hidden, name
230 217 ip.user_ns['b'] = 12
231 218 ip.drop_by_id(myvars)
232 219 for name in ["a", "c"]:
233 220 assert name not in ip.user_ns, name
234 221 assert name not in ip.user_ns_hidden, name
235 222 assert ip.user_ns['b'] == 12
236 223 ip.reset()
237 224
238 225 def test_var_expand(self):
239 226 ip.user_ns['f'] = u'Ca\xf1o'
240 227 self.assertEqual(ip.var_expand(u'echo $f'), u'echo Ca\xf1o')
241 228 self.assertEqual(ip.var_expand(u'echo {f}'), u'echo Ca\xf1o')
242 229 self.assertEqual(ip.var_expand(u'echo {f[:-1]}'), u'echo Ca\xf1')
243 230 self.assertEqual(ip.var_expand(u'echo {1*2}'), u'echo 2')
244 231
245 232 ip.user_ns['f'] = b'Ca\xc3\xb1o'
246 233 # This should not raise any exception:
247 234 ip.var_expand(u'echo $f')
248 235
249 236 def test_var_expand_local(self):
250 237 """Test local variable expansion in !system and %magic calls"""
251 238 # !system
252 239 ip.run_cell('def test():\n'
253 240 ' lvar = "ttt"\n'
254 241 ' ret = !echo {lvar}\n'
255 242 ' return ret[0]\n')
256 243 res = ip.user_ns['test']()
257 244 nt.assert_in('ttt', res)
258 245
259 246 # %magic
260 247 ip.run_cell('def makemacro():\n'
261 248 ' macroname = "macro_var_expand_locals"\n'
262 249 ' %macro {macroname} codestr\n')
263 250 ip.user_ns['codestr'] = "str(12)"
264 251 ip.run_cell('makemacro()')
265 252 nt.assert_in('macro_var_expand_locals', ip.user_ns)
266 253
267 254 def test_var_expand_self(self):
268 255 """Test variable expansion with the name 'self', which was failing.
269 256
270 257 See https://github.com/ipython/ipython/issues/1878#issuecomment-7698218
271 258 """
272 259 ip.run_cell('class cTest:\n'
273 260 ' classvar="see me"\n'
274 261 ' def test(self):\n'
275 262 ' res = !echo Variable: {self.classvar}\n'
276 263 ' return res[0]\n')
277 264 nt.assert_in('see me', ip.user_ns['cTest']().test())
278 265
279 266 def test_bad_var_expand(self):
280 267 """var_expand on invalid formats shouldn't raise"""
281 268 # SyntaxError
282 269 self.assertEqual(ip.var_expand(u"{'a':5}"), u"{'a':5}")
283 270 # NameError
284 271 self.assertEqual(ip.var_expand(u"{asdf}"), u"{asdf}")
285 272 # ZeroDivisionError
286 273 self.assertEqual(ip.var_expand(u"{1/0}"), u"{1/0}")
287 274
288 275 def test_silent_postexec(self):
289 276 """run_cell(silent=True) doesn't invoke pre/post_run_cell callbacks"""
290 277 pre_explicit = mock.Mock()
291 278 pre_always = mock.Mock()
292 279 post_explicit = mock.Mock()
293 280 post_always = mock.Mock()
294 281
295 282 ip.events.register('pre_run_cell', pre_explicit)
296 283 ip.events.register('pre_execute', pre_always)
297 284 ip.events.register('post_run_cell', post_explicit)
298 285 ip.events.register('post_execute', post_always)
299 286
300 287 try:
301 288 ip.run_cell("1", silent=True)
302 289 assert pre_always.called
303 290 assert not pre_explicit.called
304 291 assert post_always.called
305 292 assert not post_explicit.called
306 293 # double-check that non-silent exec did what we expected
307 294 # silent to avoid
308 295 ip.run_cell("1")
309 296 assert pre_explicit.called
310 297 assert post_explicit.called
311 298 finally:
312 299 # remove post-exec
313 300 ip.events.reset_all()
314 301
315 302 def test_silent_noadvance(self):
316 303 """run_cell(silent=True) doesn't advance execution_count"""
317 304 ec = ip.execution_count
318 305 # silent should force store_history=False
319 306 ip.run_cell("1", store_history=True, silent=True)
320 307
321 308 self.assertEqual(ec, ip.execution_count)
322 309 # double-check that non-silent exec did what we expected
323 310 # silent to avoid
324 311 ip.run_cell("1", store_history=True)
325 312 self.assertEqual(ec+1, ip.execution_count)
326 313
327 314 def test_silent_nodisplayhook(self):
328 315 """run_cell(silent=True) doesn't trigger displayhook"""
329 316 d = dict(called=False)
330 317
331 318 trap = ip.display_trap
332 319 save_hook = trap.hook
333 320
334 321 def failing_hook(*args, **kwargs):
335 322 d['called'] = True
336 323
337 324 try:
338 325 trap.hook = failing_hook
339 326 ip.run_cell("1", silent=True)
340 327 self.assertFalse(d['called'])
341 328 # double-check that non-silent exec did what we expected
342 329 # silent to avoid
343 330 ip.run_cell("1")
344 331 self.assertTrue(d['called'])
345 332 finally:
346 333 trap.hook = save_hook
347 334
348 335 @skipif(sys.version_info[0] >= 3, "softspace removed in py3")
349 336 def test_print_softspace(self):
350 337 """Verify that softspace is handled correctly when executing multiple
351 338 statements.
352 339
353 340 In [1]: print 1; print 2
354 341 1
355 342 2
356 343
357 344 In [2]: print 1,; print 2
358 345 1 2
359 346 """
360 347
361 348 def test_ofind_line_magic(self):
362 349 from IPython.core.magic import register_line_magic
363 350
364 351 @register_line_magic
365 352 def lmagic(line):
366 353 "A line magic"
367 354
368 355 # Get info on line magic
369 356 lfind = ip._ofind('lmagic')
370 357 info = dict(found=True, isalias=False, ismagic=True,
371 358 namespace = 'IPython internal', obj= lmagic.__wrapped__,
372 359 parent = None)
373 360 nt.assert_equal(lfind, info)
374 361
375 362 def test_ofind_cell_magic(self):
376 363 from IPython.core.magic import register_cell_magic
377 364
378 365 @register_cell_magic
379 366 def cmagic(line, cell):
380 367 "A cell magic"
381 368
382 369 # Get info on cell magic
383 370 find = ip._ofind('cmagic')
384 371 info = dict(found=True, isalias=False, ismagic=True,
385 372 namespace = 'IPython internal', obj= cmagic.__wrapped__,
386 373 parent = None)
387 374 nt.assert_equal(find, info)
388 375
389 376 def test_custom_exception(self):
390 377 called = []
391 378 def my_handler(shell, etype, value, tb, tb_offset=None):
392 379 called.append(etype)
393 380 shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
394 381
395 382 ip.set_custom_exc((ValueError,), my_handler)
396 383 try:
397 384 ip.run_cell("raise ValueError('test')")
398 385 # Check that this was called, and only once.
399 386 self.assertEqual(called, [ValueError])
400 387 finally:
401 388 # Reset the custom exception hook
402 389 ip.set_custom_exc((), None)
403 390
404 391 @skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
405 392 def test_future_environment(self):
406 393 "Can we run code with & without the shell's __future__ imports?"
407 394 ip.run_cell("from __future__ import division")
408 395 ip.run_cell("a = 1/2", shell_futures=True)
409 396 self.assertEqual(ip.user_ns['a'], 0.5)
410 397 ip.run_cell("b = 1/2", shell_futures=False)
411 398 self.assertEqual(ip.user_ns['b'], 0)
412 399
413 400 ip.compile.reset_compiler_flags()
414 401 # This shouldn't leak to the shell's compiler
415 402 ip.run_cell("from __future__ import division \nc=1/2", shell_futures=False)
416 403 self.assertEqual(ip.user_ns['c'], 0.5)
417 404 ip.run_cell("d = 1/2", shell_futures=True)
418 405 self.assertEqual(ip.user_ns['d'], 0)
419 406
420 407
421 408 class TestSafeExecfileNonAsciiPath(unittest.TestCase):
422 409
423 410 @onlyif_unicode_paths
424 411 def setUp(self):
425 412 self.BASETESTDIR = tempfile.mkdtemp()
426 413 self.TESTDIR = join(self.BASETESTDIR, u"åäö")
427 414 os.mkdir(self.TESTDIR)
428 415 with open(join(self.TESTDIR, u"åäötestscript.py"), "w") as sfile:
429 416 sfile.write("pass\n")
430 417 self.oldpath = py3compat.getcwd()
431 418 os.chdir(self.TESTDIR)
432 419 self.fname = u"åäötestscript.py"
433 420
434 421 def tearDown(self):
435 422 os.chdir(self.oldpath)
436 423 shutil.rmtree(self.BASETESTDIR)
437 424
438 425 @onlyif_unicode_paths
439 426 def test_1(self):
440 427 """Test safe_execfile with non-ascii path
441 428 """
442 429 ip.safe_execfile(self.fname, {}, raise_exceptions=True)
443 430
444 431 class ExitCodeChecks(tt.TempFileMixin):
445 432 def test_exit_code_ok(self):
446 433 self.system('exit 0')
447 434 self.assertEqual(ip.user_ns['_exit_code'], 0)
448 435
449 436 def test_exit_code_error(self):
450 437 self.system('exit 1')
451 438 self.assertEqual(ip.user_ns['_exit_code'], 1)
452 439
453 440 @skipif(not hasattr(signal, 'SIGALRM'))
454 441 def test_exit_code_signal(self):
455 442 self.mktmp("import signal, time\n"
456 443 "signal.setitimer(signal.ITIMER_REAL, 0.1)\n"
457 444 "time.sleep(1)\n")
458 445 self.system("%s %s" % (sys.executable, self.fname))
459 446 self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGALRM)
460 447
461 448 class TestSystemRaw(unittest.TestCase, ExitCodeChecks):
462 449 system = ip.system_raw
463 450
464 451 @onlyif_unicode_paths
465 452 def test_1(self):
466 453 """Test system_raw with non-ascii cmd
467 454 """
468 455 cmd = u'''python -c "'åäö'" '''
469 456 ip.system_raw(cmd)
470 457
471 458 # TODO: Exit codes are currently ignored on Windows.
472 459 class TestSystemPipedExitCode(unittest.TestCase, ExitCodeChecks):
473 460 system = ip.system_piped
474 461
475 462 @skip_win32
476 463 def test_exit_code_ok(self):
477 464 ExitCodeChecks.test_exit_code_ok(self)
478 465
479 466 @skip_win32
480 467 def test_exit_code_error(self):
481 468 ExitCodeChecks.test_exit_code_error(self)
482 469
483 470 @skip_win32
484 471 def test_exit_code_signal(self):
485 472 ExitCodeChecks.test_exit_code_signal(self)
486 473
487 474 class TestModules(unittest.TestCase, tt.TempFileMixin):
488 475 def test_extraneous_loads(self):
489 476 """Test we're not loading modules on startup that we shouldn't.
490 477 """
491 478 self.mktmp("import sys\n"
492 479 "print('numpy' in sys.modules)\n"
493 480 "print('IPython.parallel' in sys.modules)\n"
494 481 "print('IPython.kernel.zmq' in sys.modules)\n"
495 482 )
496 483 out = "False\nFalse\nFalse\n"
497 484 tt.ipexec_validate(self.fname, out)
498 485
499 486 class Negator(ast.NodeTransformer):
500 487 """Negates all number literals in an AST."""
501 488 def visit_Num(self, node):
502 489 node.n = -node.n
503 490 return node
504 491
505 492 class TestAstTransform(unittest.TestCase):
506 493 def setUp(self):
507 494 self.negator = Negator()
508 495 ip.ast_transformers.append(self.negator)
509 496
510 497 def tearDown(self):
511 498 ip.ast_transformers.remove(self.negator)
512 499
513 500 def test_run_cell(self):
514 501 with tt.AssertPrints('-34'):
515 502 ip.run_cell('print (12 + 22)')
516 503
517 504 # A named reference to a number shouldn't be transformed.
518 505 ip.user_ns['n'] = 55
519 506 with tt.AssertNotPrints('-55'):
520 507 ip.run_cell('print (n)')
521 508
522 509 def test_timeit(self):
523 510 called = set()
524 511 def f(x):
525 512 called.add(x)
526 513 ip.push({'f':f})
527 514
528 515 with tt.AssertPrints("best of "):
529 516 ip.run_line_magic("timeit", "-n1 f(1)")
530 517 self.assertEqual(called, set([-1]))
531 518 called.clear()
532 519
533 520 with tt.AssertPrints("best of "):
534 521 ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)")
535 522 self.assertEqual(called, set([-2, -3]))
536 523
537 524 def test_time(self):
538 525 called = []
539 526 def f(x):
540 527 called.append(x)
541 528 ip.push({'f':f})
542 529
543 530 # Test with an expression
544 531 with tt.AssertPrints("Wall time: "):
545 532 ip.run_line_magic("time", "f(5+9)")
546 533 self.assertEqual(called, [-14])
547 534 called[:] = []
548 535
549 536 # Test with a statement (different code path)
550 537 with tt.AssertPrints("Wall time: "):
551 538 ip.run_line_magic("time", "a = f(-3 + -2)")
552 539 self.assertEqual(called, [5])
553 540
554 541 def test_macro(self):
555 542 ip.push({'a':10})
556 543 # The AST transformation makes this do a+=-1
557 544 ip.define_macro("amacro", "a+=1\nprint(a)")
558 545
559 546 with tt.AssertPrints("9"):
560 547 ip.run_cell("amacro")
561 548 with tt.AssertPrints("8"):
562 549 ip.run_cell("amacro")
563 550
564 551 class IntegerWrapper(ast.NodeTransformer):
565 552 """Wraps all integers in a call to Integer()"""
566 553 def visit_Num(self, node):
567 554 if isinstance(node.n, int):
568 555 return ast.Call(func=ast.Name(id='Integer', ctx=ast.Load()),
569 556 args=[node], keywords=[])
570 557 return node
571 558
572 559 class TestAstTransform2(unittest.TestCase):
573 560 def setUp(self):
574 561 self.intwrapper = IntegerWrapper()
575 562 ip.ast_transformers.append(self.intwrapper)
576 563
577 564 self.calls = []
578 565 def Integer(*args):
579 566 self.calls.append(args)
580 567 return args
581 568 ip.push({"Integer": Integer})
582 569
583 570 def tearDown(self):
584 571 ip.ast_transformers.remove(self.intwrapper)
585 572 del ip.user_ns['Integer']
586 573
587 574 def test_run_cell(self):
588 575 ip.run_cell("n = 2")
589 576 self.assertEqual(self.calls, [(2,)])
590 577
591 578 # This shouldn't throw an error
592 579 ip.run_cell("o = 2.0")
593 580 self.assertEqual(ip.user_ns['o'], 2.0)
594 581
595 582 def test_timeit(self):
596 583 called = set()
597 584 def f(x):
598 585 called.add(x)
599 586 ip.push({'f':f})
600 587
601 588 with tt.AssertPrints("best of "):
602 589 ip.run_line_magic("timeit", "-n1 f(1)")
603 590 self.assertEqual(called, set([(1,)]))
604 591 called.clear()
605 592
606 593 with tt.AssertPrints("best of "):
607 594 ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)")
608 595 self.assertEqual(called, set([(2,), (3,)]))
609 596
610 597 class ErrorTransformer(ast.NodeTransformer):
611 598 """Throws an error when it sees a number."""
612 599 def visit_Num(self):
613 600 raise ValueError("test")
614 601
615 602 class TestAstTransformError(unittest.TestCase):
616 603 def test_unregistering(self):
617 604 err_transformer = ErrorTransformer()
618 605 ip.ast_transformers.append(err_transformer)
619 606
620 607 with tt.AssertPrints("unregister", channel='stderr'):
621 608 ip.run_cell("1 + 2")
622 609
623 610 # This should have been removed.
624 611 nt.assert_not_in(err_transformer, ip.ast_transformers)
625 612
626 613 def test__IPYTHON__():
627 614 # This shouldn't raise a NameError, that's all
628 615 __IPYTHON__
629 616
630 617
631 618 class DummyRepr(object):
632 619 def __repr__(self):
633 620 return "DummyRepr"
634 621
635 622 def _repr_html_(self):
636 623 return "<b>dummy</b>"
637 624
638 625 def _repr_javascript_(self):
639 626 return "console.log('hi');", {'key': 'value'}
640 627
641 628
642 629 def test_user_variables():
643 630 # enable all formatters
644 631 ip.display_formatter.active_types = ip.display_formatter.format_types
645 632
646 633 ip.user_ns['dummy'] = d = DummyRepr()
647 634 keys = set(['dummy', 'doesnotexist'])
648 r = ip.user_variables(keys)
635 r = ip.user_expressions({ key:key for key in keys})
649 636
650 637 nt.assert_equal(keys, set(r.keys()))
651 638 dummy = r['dummy']
652 639 nt.assert_equal(set(['status', 'data', 'metadata']), set(dummy.keys()))
653 640 nt.assert_equal(dummy['status'], 'ok')
654 641 data = dummy['data']
655 642 metadata = dummy['metadata']
656 643 nt.assert_equal(data.get('text/html'), d._repr_html_())
657 644 js, jsmd = d._repr_javascript_()
658 645 nt.assert_equal(data.get('application/javascript'), js)
659 646 nt.assert_equal(metadata.get('application/javascript'), jsmd)
660 647
661 648 dne = r['doesnotexist']
662 649 nt.assert_equal(dne['status'], 'error')
663 nt.assert_equal(dne['ename'], 'KeyError')
650 nt.assert_equal(dne['ename'], 'NameError')
664 651
665 652 # back to text only
666 653 ip.display_formatter.active_types = ['text/plain']
667 654
668 655 def test_user_expression():
669 656 # enable all formatters
670 657 ip.display_formatter.active_types = ip.display_formatter.format_types
671 658 query = {
672 659 'a' : '1 + 2',
673 660 'b' : '1/0',
674 661 }
675 662 r = ip.user_expressions(query)
676 663 import pprint
677 664 pprint.pprint(r)
678 665 nt.assert_equal(set(r.keys()), set(query.keys()))
679 666 a = r['a']
680 667 nt.assert_equal(set(['status', 'data', 'metadata']), set(a.keys()))
681 668 nt.assert_equal(a['status'], 'ok')
682 669 data = a['data']
683 670 metadata = a['metadata']
684 671 nt.assert_equal(data.get('text/plain'), '3')
685 672
686 673 b = r['b']
687 674 nt.assert_equal(b['status'], 'error')
688 675 nt.assert_equal(b['ename'], 'ZeroDivisionError')
689 676
690 677 # back to text only
691 678 ip.display_formatter.active_types = ['text/plain']
692 679
693 680
694 681
695 682
696 683
697 684 class TestSyntaxErrorTransformer(unittest.TestCase):
698 685 """Check that SyntaxError raised by an input transformer is handled by run_cell()"""
699 686
700 687 class SyntaxErrorTransformer(InputTransformer):
701 688
702 689 def push(self, line):
703 690 pos = line.find('syntaxerror')
704 691 if pos >= 0:
705 692 e = SyntaxError('input contains "syntaxerror"')
706 693 e.text = line
707 694 e.offset = pos + 1
708 695 raise e
709 696 return line
710 697
711 698 def reset(self):
712 699 pass
713 700
714 701 def setUp(self):
715 702 self.transformer = TestSyntaxErrorTransformer.SyntaxErrorTransformer()
716 703 ip.input_splitter.python_line_transforms.append(self.transformer)
717 704 ip.input_transformer_manager.python_line_transforms.append(self.transformer)
718 705
719 706 def tearDown(self):
720 707 ip.input_splitter.python_line_transforms.remove(self.transformer)
721 708 ip.input_transformer_manager.python_line_transforms.remove(self.transformer)
722 709
723 710 def test_syntaxerror_input_transformer(self):
724 711 with tt.AssertPrints('1234'):
725 712 ip.run_cell('1234')
726 713 with tt.AssertPrints('SyntaxError: invalid syntax'):
727 714 ip.run_cell('1 2 3') # plain python syntax error
728 715 with tt.AssertPrints('SyntaxError: input contains "syntaxerror"'):
729 716 ip.run_cell('2345 # syntaxerror') # input transformer syntax error
730 717 with tt.AssertPrints('3456'):
731 718 ip.run_cell('3456')
732 719
733 720
734 721
@@ -1,624 +1,621 b''
1 1 // Copyright (c) IPython Development Team.
2 2 // Distributed under the terms of the Modified BSD License.
3 3
4 4 //============================================================================
5 5 // Kernel
6 6 //============================================================================
7 7
8 8 /**
9 9 * @module IPython
10 10 * @namespace IPython
11 11 * @submodule Kernel
12 12 */
13 13
14 14 var IPython = (function (IPython) {
15 15 "use strict";
16 16
17 17 var utils = IPython.utils;
18 18
19 19 // Initialization and connection.
20 20 /**
21 21 * A Kernel Class to communicate with the Python kernel
22 22 * @Class Kernel
23 23 */
24 24 var Kernel = function (kernel_service_url) {
25 25 this.kernel_id = null;
26 26 this.shell_channel = null;
27 27 this.iopub_channel = null;
28 28 this.stdin_channel = null;
29 29 this.kernel_service_url = kernel_service_url;
30 30 this.running = false;
31 31 this.username = "username";
32 32 this.session_id = utils.uuid();
33 33 this._msg_callbacks = {};
34 34 this.post = $.post;
35 35
36 36 if (typeof(WebSocket) !== 'undefined') {
37 37 this.WebSocket = WebSocket;
38 38 } else if (typeof(MozWebSocket) !== 'undefined') {
39 39 this.WebSocket = MozWebSocket;
40 40 } else {
41 41 alert('Your browser does not have WebSocket support, please try Chrome, Safari or Firefox ≥ 6. Firefox 4 and 5 are also supported by you have to enable WebSockets in about:config.');
42 42 }
43 43
44 44 this.bind_events();
45 45 this.init_iopub_handlers();
46 46 this.comm_manager = new IPython.CommManager(this);
47 47 this.widget_manager = new IPython.WidgetManager(this.comm_manager);
48 48
49 49 this.last_msg_id = null;
50 50 this.last_msg_callbacks = {};
51 51 };
52 52
53 53
54 54 Kernel.prototype._get_msg = function (msg_type, content, metadata) {
55 55 var msg = {
56 56 header : {
57 57 msg_id : utils.uuid(),
58 58 username : this.username,
59 59 session : this.session_id,
60 60 msg_type : msg_type
61 61 },
62 62 metadata : metadata || {},
63 63 content : content,
64 64 parent_header : {}
65 65 };
66 66 return msg;
67 67 };
68 68
69 69 Kernel.prototype.bind_events = function () {
70 70 var that = this;
71 71 $([IPython.events]).on('send_input_reply.Kernel', function(evt, data) {
72 72 that.send_input_reply(data);
73 73 });
74 74 };
75 75
76 76 // Initialize the iopub handlers
77 77
78 78 Kernel.prototype.init_iopub_handlers = function () {
79 79 var output_msg_types = ['stream', 'display_data', 'execute_result', 'error'];
80 80 this._iopub_handlers = {};
81 81 this.register_iopub_handler('status', $.proxy(this._handle_status_message, this));
82 82 this.register_iopub_handler('clear_output', $.proxy(this._handle_clear_output, this));
83 83
84 84 for (var i=0; i < output_msg_types.length; i++) {
85 85 this.register_iopub_handler(output_msg_types[i], $.proxy(this._handle_output_message, this));
86 86 }
87 87 };
88 88
89 89 /**
90 90 * Start the Python kernel
91 91 * @method start
92 92 */
93 93 Kernel.prototype.start = function (params) {
94 94 params = params || {};
95 95 if (!this.running) {
96 96 var qs = $.param(params);
97 97 this.post(utils.url_join_encode(this.kernel_service_url) + '?' + qs,
98 98 $.proxy(this._kernel_started, this),
99 99 'json'
100 100 );
101 101 }
102 102 };
103 103
104 104 /**
105 105 * Restart the python kernel.
106 106 *
107 107 * Emit a 'status_restarting.Kernel' event with
108 108 * the current object as parameter
109 109 *
110 110 * @method restart
111 111 */
112 112 Kernel.prototype.restart = function () {
113 113 $([IPython.events]).trigger('status_restarting.Kernel', {kernel: this});
114 114 if (this.running) {
115 115 this.stop_channels();
116 116 this.post(utils.url_join_encode(this.kernel_url, "restart"),
117 117 $.proxy(this._kernel_started, this),
118 118 'json'
119 119 );
120 120 }
121 121 };
122 122
123 123
124 124 Kernel.prototype._kernel_started = function (json) {
125 125 console.log("Kernel started: ", json.id);
126 126 this.running = true;
127 127 this.kernel_id = json.id;
128 128 // trailing 's' in https will become wss for secure web sockets
129 129 this.ws_host = location.protocol.replace('http', 'ws') + "//" + location.host;
130 130 this.kernel_url = utils.url_path_join(this.kernel_service_url, this.kernel_id);
131 131 this.start_channels();
132 132 };
133 133
134 134
135 135 Kernel.prototype._websocket_closed = function(ws_url, early) {
136 136 this.stop_channels();
137 137 $([IPython.events]).trigger('websocket_closed.Kernel',
138 138 {ws_url: ws_url, kernel: this, early: early}
139 139 );
140 140 };
141 141
142 142 /**
143 143 * Start the `shell`and `iopub` channels.
144 144 * Will stop and restart them if they already exist.
145 145 *
146 146 * @method start_channels
147 147 */
148 148 Kernel.prototype.start_channels = function () {
149 149 var that = this;
150 150 this.stop_channels();
151 151 var ws_host_url = this.ws_host + this.kernel_url;
152 152 console.log("Starting WebSockets:", ws_host_url);
153 153 this.shell_channel = new this.WebSocket(
154 154 this.ws_host + utils.url_join_encode(this.kernel_url, "shell")
155 155 );
156 156 this.stdin_channel = new this.WebSocket(
157 157 this.ws_host + utils.url_join_encode(this.kernel_url, "stdin")
158 158 );
159 159 this.iopub_channel = new this.WebSocket(
160 160 this.ws_host + utils.url_join_encode(this.kernel_url, "iopub")
161 161 );
162 162
163 163 var already_called_onclose = false; // only alert once
164 164 var ws_closed_early = function(evt){
165 165 if (already_called_onclose){
166 166 return;
167 167 }
168 168 already_called_onclose = true;
169 169 if ( ! evt.wasClean ){
170 170 that._websocket_closed(ws_host_url, true);
171 171 }
172 172 };
173 173 var ws_closed_late = function(evt){
174 174 if (already_called_onclose){
175 175 return;
176 176 }
177 177 already_called_onclose = true;
178 178 if ( ! evt.wasClean ){
179 179 that._websocket_closed(ws_host_url, false);
180 180 }
181 181 };
182 182 var channels = [this.shell_channel, this.iopub_channel, this.stdin_channel];
183 183 for (var i=0; i < channels.length; i++) {
184 184 channels[i].onopen = $.proxy(this._ws_opened, this);
185 185 channels[i].onclose = ws_closed_early;
186 186 }
187 187 // switch from early-close to late-close message after 1s
188 188 setTimeout(function() {
189 189 for (var i=0; i < channels.length; i++) {
190 190 if (channels[i] !== null) {
191 191 channels[i].onclose = ws_closed_late;
192 192 }
193 193 }
194 194 }, 1000);
195 195 this.shell_channel.onmessage = $.proxy(this._handle_shell_reply, this);
196 196 this.iopub_channel.onmessage = $.proxy(this._handle_iopub_message, this);
197 197 this.stdin_channel.onmessage = $.proxy(this._handle_input_request, this);
198 198 };
199 199
200 200 /**
201 201 * Handle a websocket entering the open state
202 202 * sends session and cookie authentication info as first message.
203 203 * Once all sockets are open, signal the Kernel.status_started event.
204 204 * @method _ws_opened
205 205 */
206 206 Kernel.prototype._ws_opened = function (evt) {
207 207 // send the session id so the Session object Python-side
208 208 // has the same identity
209 209 evt.target.send(this.session_id + ':' + document.cookie);
210 210
211 211 var channels = [this.shell_channel, this.iopub_channel, this.stdin_channel];
212 212 for (var i=0; i < channels.length; i++) {
213 213 // if any channel is not ready, don't trigger event.
214 214 if ( !channels[i].readyState ) return;
215 215 }
216 216 // all events ready, trigger started event.
217 217 $([IPython.events]).trigger('status_started.Kernel', {kernel: this});
218 218 };
219 219
220 220 /**
221 221 * Stop the websocket channels.
222 222 * @method stop_channels
223 223 */
224 224 Kernel.prototype.stop_channels = function () {
225 225 var channels = [this.shell_channel, this.iopub_channel, this.stdin_channel];
226 226 for (var i=0; i < channels.length; i++) {
227 227 if ( channels[i] !== null ) {
228 228 channels[i].onclose = null;
229 229 channels[i].close();
230 230 }
231 231 }
232 232 this.shell_channel = this.iopub_channel = this.stdin_channel = null;
233 233 };
234 234
235 235 // Main public methods.
236 236
237 237 // send a message on the Kernel's shell channel
238 238 Kernel.prototype.send_shell_message = function (msg_type, content, callbacks, metadata) {
239 239 var msg = this._get_msg(msg_type, content, metadata);
240 240 this.shell_channel.send(JSON.stringify(msg));
241 241 this.set_callbacks_for_msg(msg.header.msg_id, callbacks);
242 242 return msg.header.msg_id;
243 243 };
244 244
245 245 /**
246 246 * Get kernel info
247 247 *
248 248 * @param callback {function}
249 249 * @method object_info
250 250 *
251 251 * When calling this method, pass a callback function that expects one argument.
252 252 * The callback will be passed the complete `kernel_info_reply` message documented
253 253 * [here](http://ipython.org/ipython-doc/dev/development/messaging.html#kernel-info)
254 254 */
255 255 Kernel.prototype.kernel_info = function (callback) {
256 256 var callbacks;
257 257 if (callback) {
258 258 callbacks = { shell : { reply : callback } };
259 259 }
260 260 return this.send_shell_message("kernel_info_request", {}, callbacks);
261 261 };
262 262
263 263 /**
264 264 * Get info on an object
265 265 *
266 266 * @param objname {string}
267 267 * @param callback {function}
268 268 * @method object_info
269 269 *
270 270 * When calling this method, pass a callback function that expects one argument.
271 271 * The callback will be passed the complete `object_info_reply` message documented
272 272 * [here](http://ipython.org/ipython-doc/dev/development/messaging.html#object-information)
273 273 */
274 274 Kernel.prototype.object_info = function (objname, callback) {
275 275 var callbacks;
276 276 if (callback) {
277 277 callbacks = { shell : { reply : callback } };
278 278 }
279 279
280 280 if (typeof(objname) !== null && objname !== null) {
281 281 var content = {
282 282 oname : objname.toString(),
283 283 detail_level : 0,
284 284 };
285 285 return this.send_shell_message("object_info_request", content, callbacks);
286 286 }
287 287 return;
288 288 };
289 289
290 290 /**
291 291 * Execute given code into kernel, and pass result to callback.
292 292 *
293 293 * @async
294 294 * @method execute
295 295 * @param {string} code
296 296 * @param [callbacks] {Object} With the following keys (all optional)
297 297 * @param callbacks.shell.reply {function}
298 298 * @param callbacks.shell.payload.[payload_name] {function}
299 299 * @param callbacks.iopub.output {function}
300 300 * @param callbacks.iopub.clear_output {function}
301 301 * @param callbacks.input {function}
302 302 * @param {object} [options]
303 303 * @param [options.silent=false] {Boolean}
304 304 * @param [options.user_expressions=empty_dict] {Dict}
305 * @param [options.user_variables=empty_list] {List od Strings}
306 305 * @param [options.allow_stdin=false] {Boolean} true|false
307 306 *
308 307 * @example
309 308 *
310 309 * The options object should contain the options for the execute call. Its default
311 310 * values are:
312 311 *
313 312 * options = {
314 313 * silent : true,
315 * user_variables : [],
316 314 * user_expressions : {},
317 315 * allow_stdin : false
318 316 * }
319 317 *
320 318 * When calling this method pass a callbacks structure of the form:
321 319 *
322 320 * callbacks = {
323 321 * shell : {
324 322 * reply : execute_reply_callback,
325 323 * payload : {
326 324 * set_next_input : set_next_input_callback,
327 325 * }
328 326 * },
329 327 * iopub : {
330 328 * output : output_callback,
331 329 * clear_output : clear_output_callback,
332 330 * },
333 331 * input : raw_input_callback
334 332 * }
335 333 *
336 334 * Each callback will be passed the entire message as a single arugment.
337 335 * Payload handlers will be passed the corresponding payload and the execute_reply message.
338 336 */
339 337 Kernel.prototype.execute = function (code, callbacks, options) {
340 338
341 339 var content = {
342 340 code : code,
343 341 silent : true,
344 342 store_history : false,
345 user_variables : [],
346 343 user_expressions : {},
347 344 allow_stdin : false
348 345 };
349 346 callbacks = callbacks || {};
350 347 if (callbacks.input !== undefined) {
351 348 content.allow_stdin = true;
352 349 }
353 350 $.extend(true, content, options);
354 351 $([IPython.events]).trigger('execution_request.Kernel', {kernel: this, content:content});
355 352 return this.send_shell_message("execute_request", content, callbacks);
356 353 };
357 354
358 355 /**
359 356 * When calling this method, pass a function to be called with the `complete_reply` message
360 357 * as its only argument when it arrives.
361 358 *
362 359 * `complete_reply` is documented
363 360 * [here](http://ipython.org/ipython-doc/dev/development/messaging.html#complete)
364 361 *
365 362 * @method complete
366 363 * @param line {integer}
367 364 * @param cursor_pos {integer}
368 365 * @param callback {function}
369 366 *
370 367 */
371 368 Kernel.prototype.complete = function (line, cursor_pos, callback) {
372 369 var callbacks;
373 370 if (callback) {
374 371 callbacks = { shell : { reply : callback } };
375 372 }
376 373 var content = {
377 374 text : '',
378 375 line : line,
379 376 block : null,
380 377 cursor_pos : cursor_pos
381 378 };
382 379 return this.send_shell_message("complete_request", content, callbacks);
383 380 };
384 381
385 382
386 383 Kernel.prototype.interrupt = function () {
387 384 if (this.running) {
388 385 $([IPython.events]).trigger('status_interrupting.Kernel', {kernel: this});
389 386 this.post(utils.url_join_encode(this.kernel_url, "interrupt"));
390 387 }
391 388 };
392 389
393 390
394 391 Kernel.prototype.kill = function () {
395 392 if (this.running) {
396 393 this.running = false;
397 394 var settings = {
398 395 cache : false,
399 396 type : "DELETE",
400 397 error : utils.log_ajax_error,
401 398 };
402 399 $.ajax(utils.url_join_encode(this.kernel_url), settings);
403 400 }
404 401 };
405 402
406 403 Kernel.prototype.send_input_reply = function (input) {
407 404 var content = {
408 405 value : input,
409 406 };
410 407 $([IPython.events]).trigger('input_reply.Kernel', {kernel: this, content:content});
411 408 var msg = this._get_msg("input_reply", content);
412 409 this.stdin_channel.send(JSON.stringify(msg));
413 410 return msg.header.msg_id;
414 411 };
415 412
416 413
417 414 // Reply handlers
418 415
419 416 Kernel.prototype.register_iopub_handler = function (msg_type, callback) {
420 417 this._iopub_handlers[msg_type] = callback;
421 418 };
422 419
423 420 Kernel.prototype.get_iopub_handler = function (msg_type) {
424 421 // get iopub handler for a specific message type
425 422 return this._iopub_handlers[msg_type];
426 423 };
427 424
428 425
429 426 Kernel.prototype.get_callbacks_for_msg = function (msg_id) {
430 427 // get callbacks for a specific message
431 428 if (msg_id == this.last_msg_id) {
432 429 return this.last_msg_callbacks;
433 430 } else {
434 431 return this._msg_callbacks[msg_id];
435 432 }
436 433 };
437 434
438 435
439 436 Kernel.prototype.clear_callbacks_for_msg = function (msg_id) {
440 437 if (this._msg_callbacks[msg_id] !== undefined ) {
441 438 delete this._msg_callbacks[msg_id];
442 439 }
443 440 };
444 441
445 442 Kernel.prototype._finish_shell = function (msg_id) {
446 443 var callbacks = this._msg_callbacks[msg_id];
447 444 if (callbacks !== undefined) {
448 445 callbacks.shell_done = true;
449 446 if (callbacks.iopub_done) {
450 447 this.clear_callbacks_for_msg(msg_id);
451 448 }
452 449 }
453 450 };
454 451
455 452 Kernel.prototype._finish_iopub = function (msg_id) {
456 453 var callbacks = this._msg_callbacks[msg_id];
457 454 if (callbacks !== undefined) {
458 455 callbacks.iopub_done = true;
459 456 if (!callbacks.shell_done) {
460 457 this.clear_callbacks_for_msg(msg_id);
461 458 }
462 459 }
463 460 };
464 461
465 462 /* Set callbacks for a particular message.
466 463 * Callbacks should be a struct of the following form:
467 464 * shell : {
468 465 *
469 466 * }
470 467
471 468 */
472 469 Kernel.prototype.set_callbacks_for_msg = function (msg_id, callbacks) {
473 470 this.last_msg_id = msg_id;
474 471 if (callbacks) {
475 472 // shallow-copy mapping, because we will modify it at the top level
476 473 var cbcopy = this._msg_callbacks[msg_id] = this.last_msg_callbacks = {};
477 474 cbcopy.shell = callbacks.shell;
478 475 cbcopy.iopub = callbacks.iopub;
479 476 cbcopy.input = callbacks.input;
480 477 cbcopy.shell_done = (!callbacks.shell);
481 478 cbcopy.iopub_done = (!callbacks.iopub);
482 479 } else {
483 480 this.last_msg_callbacks = {};
484 481 }
485 482 };
486 483
487 484
488 485 Kernel.prototype._handle_shell_reply = function (e) {
489 486 var reply = $.parseJSON(e.data);
490 487 $([IPython.events]).trigger('shell_reply.Kernel', {kernel: this, reply:reply});
491 488 var content = reply.content;
492 489 var metadata = reply.metadata;
493 490 var parent_id = reply.parent_header.msg_id;
494 491 var callbacks = this.get_callbacks_for_msg(parent_id);
495 492 if (!callbacks || !callbacks.shell) {
496 493 return;
497 494 }
498 495 var shell_callbacks = callbacks.shell;
499 496
500 497 // signal that shell callbacks are done
501 498 this._finish_shell(parent_id);
502 499
503 500 if (shell_callbacks.reply !== undefined) {
504 501 shell_callbacks.reply(reply);
505 502 }
506 503 if (content.payload && shell_callbacks.payload) {
507 504 this._handle_payloads(content.payload, shell_callbacks.payload, reply);
508 505 }
509 506 };
510 507
511 508
512 509 Kernel.prototype._handle_payloads = function (payloads, payload_callbacks, msg) {
513 510 var l = payloads.length;
514 511 // Payloads are handled by triggering events because we don't want the Kernel
515 512 // to depend on the Notebook or Pager classes.
516 513 for (var i=0; i<l; i++) {
517 514 var payload = payloads[i];
518 515 var callback = payload_callbacks[payload.source];
519 516 if (callback) {
520 517 callback(payload, msg);
521 518 }
522 519 }
523 520 };
524 521
525 522 Kernel.prototype._handle_status_message = function (msg) {
526 523 var execution_state = msg.content.execution_state;
527 524 var parent_id = msg.parent_header.msg_id;
528 525
529 526 // dispatch status msg callbacks, if any
530 527 var callbacks = this.get_callbacks_for_msg(parent_id);
531 528 if (callbacks && callbacks.iopub && callbacks.iopub.status) {
532 529 try {
533 530 callbacks.iopub.status(msg);
534 531 } catch (e) {
535 532 console.log("Exception in status msg handler", e, e.stack);
536 533 }
537 534 }
538 535
539 536 if (execution_state === 'busy') {
540 537 $([IPython.events]).trigger('status_busy.Kernel', {kernel: this});
541 538 } else if (execution_state === 'idle') {
542 539 // signal that iopub callbacks are (probably) done
543 540 // async output may still arrive,
544 541 // but only for the most recent request
545 542 this._finish_iopub(parent_id);
546 543
547 544 // trigger status_idle event
548 545 $([IPython.events]).trigger('status_idle.Kernel', {kernel: this});
549 546 } else if (execution_state === 'restarting') {
550 547 // autorestarting is distinct from restarting,
551 548 // in that it means the kernel died and the server is restarting it.
552 549 // status_restarting sets the notification widget,
553 550 // autorestart shows the more prominent dialog.
554 551 $([IPython.events]).trigger('status_autorestarting.Kernel', {kernel: this});
555 552 $([IPython.events]).trigger('status_restarting.Kernel', {kernel: this});
556 553 } else if (execution_state === 'dead') {
557 554 this.stop_channels();
558 555 $([IPython.events]).trigger('status_dead.Kernel', {kernel: this});
559 556 }
560 557 };
561 558
562 559
563 560 // handle clear_output message
564 561 Kernel.prototype._handle_clear_output = function (msg) {
565 562 var callbacks = this.get_callbacks_for_msg(msg.parent_header.msg_id);
566 563 if (!callbacks || !callbacks.iopub) {
567 564 return;
568 565 }
569 566 var callback = callbacks.iopub.clear_output;
570 567 if (callback) {
571 568 callback(msg);
572 569 }
573 570 };
574 571
575 572
576 573 // handle an output message (execute_result, display_data, etc.)
577 574 Kernel.prototype._handle_output_message = function (msg) {
578 575 var callbacks = this.get_callbacks_for_msg(msg.parent_header.msg_id);
579 576 if (!callbacks || !callbacks.iopub) {
580 577 return;
581 578 }
582 579 var callback = callbacks.iopub.output;
583 580 if (callback) {
584 581 callback(msg);
585 582 }
586 583 };
587 584
588 585 // dispatch IOPub messages to respective handlers.
589 586 // each message type should have a handler.
590 587 Kernel.prototype._handle_iopub_message = function (e) {
591 588 var msg = $.parseJSON(e.data);
592 589
593 590 var handler = this.get_iopub_handler(msg.header.msg_type);
594 591 if (handler !== undefined) {
595 592 handler(msg);
596 593 }
597 594 };
598 595
599 596
600 597 Kernel.prototype._handle_input_request = function (e) {
601 598 var request = $.parseJSON(e.data);
602 599 var header = request.header;
603 600 var content = request.content;
604 601 var metadata = request.metadata;
605 602 var msg_type = header.msg_type;
606 603 if (msg_type !== 'input_request') {
607 604 console.log("Invalid input request!", request);
608 605 return;
609 606 }
610 607 var callbacks = this.get_callbacks_for_msg(request.parent_header.msg_id);
611 608 if (callbacks) {
612 609 if (callbacks.input) {
613 610 callbacks.input(request);
614 611 }
615 612 }
616 613 };
617 614
618 615
619 616 IPython.Kernel = Kernel;
620 617
621 618 return IPython;
622 619
623 620 }(IPython));
624 621
@@ -1,637 +1,618 b''
1 """Base classes to manage a Client's interaction with a running kernel
2 """
1 """Base classes to manage a Client's interaction with a running kernel"""
3 2
4 #-----------------------------------------------------------------------------
5 # Copyright (C) 2013 The IPython Development Team
6 #
7 # Distributed under the terms of the BSD License. The full license is in
8 # the file COPYING, distributed as part of this software.
9 #-----------------------------------------------------------------------------
10
11 #-----------------------------------------------------------------------------
12 # Imports
13 #-----------------------------------------------------------------------------
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
14 5
15 6 from __future__ import absolute_import
16 7
17 # Standard library imports
18 8 import atexit
19 9 import errno
20 10 from threading import Thread
21 11 import time
22 12
23 13 import zmq
24 14 # import ZMQError in top-level namespace, to avoid ugly attribute-error messages
25 15 # during garbage collection of threads at exit:
26 16 from zmq import ZMQError
27 17 from zmq.eventloop import ioloop, zmqstream
28 18
29 19 # Local imports
30 20 from .channelsabc import (
31 21 ShellChannelABC, IOPubChannelABC,
32 22 HBChannelABC, StdInChannelABC,
33 23 )
34 24 from IPython.utils.py3compat import string_types, iteritems
35 25
36 26 #-----------------------------------------------------------------------------
37 27 # Constants and exceptions
38 28 #-----------------------------------------------------------------------------
39 29
40 30 class InvalidPortNumber(Exception):
41 31 pass
42 32
43 33 #-----------------------------------------------------------------------------
44 34 # Utility functions
45 35 #-----------------------------------------------------------------------------
46 36
47 37 # some utilities to validate message structure, these might get moved elsewhere
48 38 # if they prove to have more generic utility
49 39
50 40 def validate_string_list(lst):
51 41 """Validate that the input is a list of strings.
52 42
53 43 Raises ValueError if not."""
54 44 if not isinstance(lst, list):
55 45 raise ValueError('input %r must be a list' % lst)
56 46 for x in lst:
57 47 if not isinstance(x, string_types):
58 48 raise ValueError('element %r in list must be a string' % x)
59 49
60 50
61 51 def validate_string_dict(dct):
62 52 """Validate that the input is a dict with string keys and values.
63 53
64 54 Raises ValueError if not."""
65 55 for k,v in iteritems(dct):
66 56 if not isinstance(k, string_types):
67 57 raise ValueError('key %r in dict must be a string' % k)
68 58 if not isinstance(v, string_types):
69 59 raise ValueError('value %r in dict must be a string' % v)
70 60
71 61
72 62 #-----------------------------------------------------------------------------
73 63 # ZMQ Socket Channel classes
74 64 #-----------------------------------------------------------------------------
75 65
76 66 class ZMQSocketChannel(Thread):
77 67 """The base class for the channels that use ZMQ sockets."""
78 68 context = None
79 69 session = None
80 70 socket = None
81 71 ioloop = None
82 72 stream = None
83 73 _address = None
84 74 _exiting = False
85 75 proxy_methods = []
86 76
87 77 def __init__(self, context, session, address):
88 78 """Create a channel.
89 79
90 80 Parameters
91 81 ----------
92 82 context : :class:`zmq.Context`
93 83 The ZMQ context to use.
94 84 session : :class:`session.Session`
95 85 The session to use.
96 86 address : zmq url
97 87 Standard (ip, port) tuple that the kernel is listening on.
98 88 """
99 89 super(ZMQSocketChannel, self).__init__()
100 90 self.daemon = True
101 91
102 92 self.context = context
103 93 self.session = session
104 94 if isinstance(address, tuple):
105 95 if address[1] == 0:
106 96 message = 'The port number for a channel cannot be 0.'
107 97 raise InvalidPortNumber(message)
108 98 address = "tcp://%s:%i" % address
109 99 self._address = address
110 100 atexit.register(self._notice_exit)
111 101
112 102 def _notice_exit(self):
113 103 self._exiting = True
114 104
115 105 def _run_loop(self):
116 106 """Run my loop, ignoring EINTR events in the poller"""
117 107 while True:
118 108 try:
119 109 self.ioloop.start()
120 110 except ZMQError as e:
121 111 if e.errno == errno.EINTR:
122 112 continue
123 113 else:
124 114 raise
125 115 except Exception:
126 116 if self._exiting:
127 117 break
128 118 else:
129 119 raise
130 120 else:
131 121 break
132 122
133 123 def stop(self):
134 124 """Stop the channel's event loop and join its thread.
135 125
136 126 This calls :meth:`~threading.Thread.join` and returns when the thread
137 127 terminates. :class:`RuntimeError` will be raised if
138 128 :meth:`~threading.Thread.start` is called again.
139 129 """
140 130 if self.ioloop is not None:
141 131 self.ioloop.stop()
142 132 self.join()
143 133 self.close()
144 134
145 135 def close(self):
146 136 if self.ioloop is not None:
147 137 try:
148 138 self.ioloop.close(all_fds=True)
149 139 except Exception:
150 140 pass
151 141 if self.socket is not None:
152 142 try:
153 143 self.socket.close(linger=0)
154 144 except Exception:
155 145 pass
156 146 self.socket = None
157 147
158 148 @property
159 149 def address(self):
160 150 """Get the channel's address as a zmq url string.
161 151
162 152 These URLS have the form: 'tcp://127.0.0.1:5555'.
163 153 """
164 154 return self._address
165 155
166 156 def _queue_send(self, msg):
167 157 """Queue a message to be sent from the IOLoop's thread.
168 158
169 159 Parameters
170 160 ----------
171 161 msg : message to send
172 162
173 163 This is threadsafe, as it uses IOLoop.add_callback to give the loop's
174 164 thread control of the action.
175 165 """
176 166 def thread_send():
177 167 self.session.send(self.stream, msg)
178 168 self.ioloop.add_callback(thread_send)
179 169
180 170 def _handle_recv(self, msg):
181 171 """Callback for stream.on_recv.
182 172
183 173 Unpacks message, and calls handlers with it.
184 174 """
185 175 ident,smsg = self.session.feed_identities(msg)
186 176 self.call_handlers(self.session.unserialize(smsg))
187 177
188 178
189 179
190 180 class ShellChannel(ZMQSocketChannel):
191 181 """The shell channel for issuing request/replies to the kernel."""
192 182
193 183 command_queue = None
194 184 # flag for whether execute requests should be allowed to call raw_input:
195 185 allow_stdin = True
196 186 proxy_methods = [
197 187 'execute',
198 188 'complete',
199 189 'object_info',
200 190 'history',
201 191 'kernel_info',
202 192 'shutdown',
203 193 ]
204 194
205 195 def __init__(self, context, session, address):
206 196 super(ShellChannel, self).__init__(context, session, address)
207 197 self.ioloop = ioloop.IOLoop()
208 198
209 199 def run(self):
210 200 """The thread's main activity. Call start() instead."""
211 201 self.socket = self.context.socket(zmq.DEALER)
212 202 self.socket.linger = 1000
213 203 self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
214 204 self.socket.connect(self.address)
215 205 self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
216 206 self.stream.on_recv(self._handle_recv)
217 207 self._run_loop()
218 208
219 209 def call_handlers(self, msg):
220 210 """This method is called in the ioloop thread when a message arrives.
221 211
222 212 Subclasses should override this method to handle incoming messages.
223 213 It is important to remember that this method is called in the thread
224 214 so that some logic must be done to ensure that the application level
225 215 handlers are called in the application thread.
226 216 """
227 217 raise NotImplementedError('call_handlers must be defined in a subclass.')
228 218
229 219 def execute(self, code, silent=False, store_history=True,
230 user_variables=None, user_expressions=None, allow_stdin=None):
220 user_expressions=None, allow_stdin=None):
231 221 """Execute code in the kernel.
232 222
233 223 Parameters
234 224 ----------
235 225 code : str
236 226 A string of Python code.
237 227
238 228 silent : bool, optional (default False)
239 229 If set, the kernel will execute the code as quietly possible, and
240 230 will force store_history to be False.
241 231
242 232 store_history : bool, optional (default True)
243 233 If set, the kernel will store command history. This is forced
244 234 to be False if silent is True.
245 235
246 user_variables : list, optional
247 A list of variable names to pull from the user's namespace. They
248 will come back as a dict with these names as keys and their
249 :func:`repr` as values.
250
251 236 user_expressions : dict, optional
252 237 A dict mapping names to expressions to be evaluated in the user's
253 238 dict. The expression values are returned as strings formatted using
254 239 :func:`repr`.
255 240
256 241 allow_stdin : bool, optional (default self.allow_stdin)
257 242 Flag for whether the kernel can send stdin requests to frontends.
258 243
259 244 Some frontends (e.g. the Notebook) do not support stdin requests.
260 245 If raw_input is called from code executed from such a frontend, a
261 246 StdinNotImplementedError will be raised.
262 247
263 248 Returns
264 249 -------
265 250 The msg_id of the message sent.
266 251 """
267 if user_variables is None:
268 user_variables = []
269 252 if user_expressions is None:
270 253 user_expressions = {}
271 254 if allow_stdin is None:
272 255 allow_stdin = self.allow_stdin
273 256
274 257
275 258 # Don't waste network traffic if inputs are invalid
276 259 if not isinstance(code, string_types):
277 260 raise ValueError('code %r must be a string' % code)
278 validate_string_list(user_variables)
279 261 validate_string_dict(user_expressions)
280 262
281 263 # Create class for content/msg creation. Related to, but possibly
282 264 # not in Session.
283 265 content = dict(code=code, silent=silent, store_history=store_history,
284 user_variables=user_variables,
285 266 user_expressions=user_expressions,
286 267 allow_stdin=allow_stdin,
287 268 )
288 269 msg = self.session.msg('execute_request', content)
289 270 self._queue_send(msg)
290 271 return msg['header']['msg_id']
291 272
292 273 def complete(self, text, line, cursor_pos, block=None):
293 274 """Tab complete text in the kernel's namespace.
294 275
295 276 Parameters
296 277 ----------
297 278 text : str
298 279 The text to complete.
299 280 line : str
300 281 The full line of text that is the surrounding context for the
301 282 text to complete.
302 283 cursor_pos : int
303 284 The position of the cursor in the line where the completion was
304 285 requested.
305 286 block : str, optional
306 287 The full block of code in which the completion is being requested.
307 288
308 289 Returns
309 290 -------
310 291 The msg_id of the message sent.
311 292 """
312 293 content = dict(text=text, line=line, block=block, cursor_pos=cursor_pos)
313 294 msg = self.session.msg('complete_request', content)
314 295 self._queue_send(msg)
315 296 return msg['header']['msg_id']
316 297
317 298 def object_info(self, oname, detail_level=0):
318 299 """Get metadata information about an object in the kernel's namespace.
319 300
320 301 Parameters
321 302 ----------
322 303 oname : str
323 304 A string specifying the object name.
324 305 detail_level : int, optional
325 306 The level of detail for the introspection (0-2)
326 307
327 308 Returns
328 309 -------
329 310 The msg_id of the message sent.
330 311 """
331 312 content = dict(oname=oname, detail_level=detail_level)
332 313 msg = self.session.msg('object_info_request', content)
333 314 self._queue_send(msg)
334 315 return msg['header']['msg_id']
335 316
336 317 def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
337 318 """Get entries from the kernel's history list.
338 319
339 320 Parameters
340 321 ----------
341 322 raw : bool
342 323 If True, return the raw input.
343 324 output : bool
344 325 If True, then return the output as well.
345 326 hist_access_type : str
346 327 'range' (fill in session, start and stop params), 'tail' (fill in n)
347 328 or 'search' (fill in pattern param).
348 329
349 330 session : int
350 331 For a range request, the session from which to get lines. Session
351 332 numbers are positive integers; negative ones count back from the
352 333 current session.
353 334 start : int
354 335 The first line number of a history range.
355 336 stop : int
356 337 The final (excluded) line number of a history range.
357 338
358 339 n : int
359 340 The number of lines of history to get for a tail request.
360 341
361 342 pattern : str
362 343 The glob-syntax pattern for a search request.
363 344
364 345 Returns
365 346 -------
366 347 The msg_id of the message sent.
367 348 """
368 349 content = dict(raw=raw, output=output, hist_access_type=hist_access_type,
369 350 **kwargs)
370 351 msg = self.session.msg('history_request', content)
371 352 self._queue_send(msg)
372 353 return msg['header']['msg_id']
373 354
374 355 def kernel_info(self):
375 356 """Request kernel info."""
376 357 msg = self.session.msg('kernel_info_request')
377 358 self._queue_send(msg)
378 359 return msg['header']['msg_id']
379 360
380 361 def shutdown(self, restart=False):
381 362 """Request an immediate kernel shutdown.
382 363
383 364 Upon receipt of the (empty) reply, client code can safely assume that
384 365 the kernel has shut down and it's safe to forcefully terminate it if
385 366 it's still alive.
386 367
387 368 The kernel will send the reply via a function registered with Python's
388 369 atexit module, ensuring it's truly done as the kernel is done with all
389 370 normal operation.
390 371 """
391 372 # Send quit message to kernel. Once we implement kernel-side setattr,
392 373 # this should probably be done that way, but for now this will do.
393 374 msg = self.session.msg('shutdown_request', {'restart':restart})
394 375 self._queue_send(msg)
395 376 return msg['header']['msg_id']
396 377
397 378
398 379
399 380 class IOPubChannel(ZMQSocketChannel):
400 381 """The iopub channel which listens for messages that the kernel publishes.
401 382
402 383 This channel is where all output is published to frontends.
403 384 """
404 385
405 386 def __init__(self, context, session, address):
406 387 super(IOPubChannel, self).__init__(context, session, address)
407 388 self.ioloop = ioloop.IOLoop()
408 389
409 390 def run(self):
410 391 """The thread's main activity. Call start() instead."""
411 392 self.socket = self.context.socket(zmq.SUB)
412 393 self.socket.linger = 1000
413 394 self.socket.setsockopt(zmq.SUBSCRIBE,b'')
414 395 self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
415 396 self.socket.connect(self.address)
416 397 self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
417 398 self.stream.on_recv(self._handle_recv)
418 399 self._run_loop()
419 400
420 401 def call_handlers(self, msg):
421 402 """This method is called in the ioloop thread when a message arrives.
422 403
423 404 Subclasses should override this method to handle incoming messages.
424 405 It is important to remember that this method is called in the thread
425 406 so that some logic must be done to ensure that the application leve
426 407 handlers are called in the application thread.
427 408 """
428 409 raise NotImplementedError('call_handlers must be defined in a subclass.')
429 410
430 411 def flush(self, timeout=1.0):
431 412 """Immediately processes all pending messages on the iopub channel.
432 413
433 414 Callers should use this method to ensure that :meth:`call_handlers`
434 415 has been called for all messages that have been received on the
435 416 0MQ SUB socket of this channel.
436 417
437 418 This method is thread safe.
438 419
439 420 Parameters
440 421 ----------
441 422 timeout : float, optional
442 423 The maximum amount of time to spend flushing, in seconds. The
443 424 default is one second.
444 425 """
445 426 # We do the IOLoop callback process twice to ensure that the IOLoop
446 427 # gets to perform at least one full poll.
447 428 stop_time = time.time() + timeout
448 429 for i in range(2):
449 430 self._flushed = False
450 431 self.ioloop.add_callback(self._flush)
451 432 while not self._flushed and time.time() < stop_time:
452 433 time.sleep(0.01)
453 434
454 435 def _flush(self):
455 436 """Callback for :method:`self.flush`."""
456 437 self.stream.flush()
457 438 self._flushed = True
458 439
459 440
460 441 class StdInChannel(ZMQSocketChannel):
461 442 """The stdin channel to handle raw_input requests that the kernel makes."""
462 443
463 444 msg_queue = None
464 445 proxy_methods = ['input']
465 446
466 447 def __init__(self, context, session, address):
467 448 super(StdInChannel, self).__init__(context, session, address)
468 449 self.ioloop = ioloop.IOLoop()
469 450
470 451 def run(self):
471 452 """The thread's main activity. Call start() instead."""
472 453 self.socket = self.context.socket(zmq.DEALER)
473 454 self.socket.linger = 1000
474 455 self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
475 456 self.socket.connect(self.address)
476 457 self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
477 458 self.stream.on_recv(self._handle_recv)
478 459 self._run_loop()
479 460
480 461 def call_handlers(self, msg):
481 462 """This method is called in the ioloop thread when a message arrives.
482 463
483 464 Subclasses should override this method to handle incoming messages.
484 465 It is important to remember that this method is called in the thread
485 466 so that some logic must be done to ensure that the application leve
486 467 handlers are called in the application thread.
487 468 """
488 469 raise NotImplementedError('call_handlers must be defined in a subclass.')
489 470
490 471 def input(self, string):
491 472 """Send a string of raw input to the kernel."""
492 473 content = dict(value=string)
493 474 msg = self.session.msg('input_reply', content)
494 475 self._queue_send(msg)
495 476
496 477
497 478 class HBChannel(ZMQSocketChannel):
498 479 """The heartbeat channel which monitors the kernel heartbeat.
499 480
500 481 Note that the heartbeat channel is paused by default. As long as you start
501 482 this channel, the kernel manager will ensure that it is paused and un-paused
502 483 as appropriate.
503 484 """
504 485
505 486 time_to_dead = 3.0
506 487 socket = None
507 488 poller = None
508 489 _running = None
509 490 _pause = None
510 491 _beating = None
511 492
512 493 def __init__(self, context, session, address):
513 494 super(HBChannel, self).__init__(context, session, address)
514 495 self._running = False
515 496 self._pause =True
516 497 self.poller = zmq.Poller()
517 498
518 499 def _create_socket(self):
519 500 if self.socket is not None:
520 501 # close previous socket, before opening a new one
521 502 self.poller.unregister(self.socket)
522 503 self.socket.close()
523 504 self.socket = self.context.socket(zmq.REQ)
524 505 self.socket.linger = 1000
525 506 self.socket.connect(self.address)
526 507
527 508 self.poller.register(self.socket, zmq.POLLIN)
528 509
529 510 def _poll(self, start_time):
530 511 """poll for heartbeat replies until we reach self.time_to_dead.
531 512
532 513 Ignores interrupts, and returns the result of poll(), which
533 514 will be an empty list if no messages arrived before the timeout,
534 515 or the event tuple if there is a message to receive.
535 516 """
536 517
537 518 until_dead = self.time_to_dead - (time.time() - start_time)
538 519 # ensure poll at least once
539 520 until_dead = max(until_dead, 1e-3)
540 521 events = []
541 522 while True:
542 523 try:
543 524 events = self.poller.poll(1000 * until_dead)
544 525 except ZMQError as e:
545 526 if e.errno == errno.EINTR:
546 527 # ignore interrupts during heartbeat
547 528 # this may never actually happen
548 529 until_dead = self.time_to_dead - (time.time() - start_time)
549 530 until_dead = max(until_dead, 1e-3)
550 531 pass
551 532 else:
552 533 raise
553 534 except Exception:
554 535 if self._exiting:
555 536 break
556 537 else:
557 538 raise
558 539 else:
559 540 break
560 541 return events
561 542
562 543 def run(self):
563 544 """The thread's main activity. Call start() instead."""
564 545 self._create_socket()
565 546 self._running = True
566 547 self._beating = True
567 548
568 549 while self._running:
569 550 if self._pause:
570 551 # just sleep, and skip the rest of the loop
571 552 time.sleep(self.time_to_dead)
572 553 continue
573 554
574 555 since_last_heartbeat = 0.0
575 556 # io.rprint('Ping from HB channel') # dbg
576 557 # no need to catch EFSM here, because the previous event was
577 558 # either a recv or connect, which cannot be followed by EFSM
578 559 self.socket.send(b'ping')
579 560 request_time = time.time()
580 561 ready = self._poll(request_time)
581 562 if ready:
582 563 self._beating = True
583 564 # the poll above guarantees we have something to recv
584 565 self.socket.recv()
585 566 # sleep the remainder of the cycle
586 567 remainder = self.time_to_dead - (time.time() - request_time)
587 568 if remainder > 0:
588 569 time.sleep(remainder)
589 570 continue
590 571 else:
591 572 # nothing was received within the time limit, signal heart failure
592 573 self._beating = False
593 574 since_last_heartbeat = time.time() - request_time
594 575 self.call_handlers(since_last_heartbeat)
595 576 # and close/reopen the socket, because the REQ/REP cycle has been broken
596 577 self._create_socket()
597 578 continue
598 579
599 580 def pause(self):
600 581 """Pause the heartbeat."""
601 582 self._pause = True
602 583
603 584 def unpause(self):
604 585 """Unpause the heartbeat."""
605 586 self._pause = False
606 587
607 588 def is_beating(self):
608 589 """Is the heartbeat running and responsive (and not paused)."""
609 590 if self.is_alive() and not self._pause and self._beating:
610 591 return True
611 592 else:
612 593 return False
613 594
614 595 def stop(self):
615 596 """Stop the channel's event loop and join its thread."""
616 597 self._running = False
617 598 super(HBChannel, self).stop()
618 599
619 600 def call_handlers(self, since_last_heartbeat):
620 601 """This method is called in the ioloop thread when a message arrives.
621 602
622 603 Subclasses should override this method to handle incoming messages.
623 604 It is important to remember that this method is called in the thread
624 605 so that some logic must be done to ensure that the application level
625 606 handlers are called in the application thread.
626 607 """
627 608 raise NotImplementedError('call_handlers must be defined in a subclass.')
628 609
629 610
630 611 #---------------------------------------------------------------------#-----------------------------------------------------------------------------
631 612 # ABC Registration
632 613 #-----------------------------------------------------------------------------
633 614
634 615 ShellChannelABC.register(ShellChannel)
635 616 IOPubChannelABC.register(IOPubChannel)
636 617 HBChannelABC.register(HBChannel)
637 618 StdInChannelABC.register(StdInChannel)
@@ -1,117 +1,113 b''
1 1 """Abstract base classes for kernel client channels"""
2 2
3 #-----------------------------------------------------------------------------
4 # Copyright (C) 2013 The IPython Development Team
5 #
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING, distributed as part of this software.
8 #-----------------------------------------------------------------------------
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
9 5
10 6 import abc
11 7
12 8 from IPython.utils.py3compat import with_metaclass
13 9
14 10
15 11 class ChannelABC(with_metaclass(abc.ABCMeta, object)):
16 12 """A base class for all channel ABCs."""
17 13
18 14 @abc.abstractmethod
19 15 def start(self):
20 16 pass
21 17
22 18 @abc.abstractmethod
23 19 def stop(self):
24 20 pass
25 21
26 22 @abc.abstractmethod
27 23 def is_alive(self):
28 24 pass
29 25
30 26
31 27 class ShellChannelABC(ChannelABC):
32 28 """ShellChannel ABC.
33 29
34 30 The docstrings for this class can be found in the base implementation:
35 31
36 32 `IPython.kernel.channels.ShellChannel`
37 33 """
38 34
39 35 @abc.abstractproperty
40 36 def allow_stdin(self):
41 37 pass
42 38
43 39 @abc.abstractmethod
44 40 def execute(self, code, silent=False, store_history=True,
45 user_variables=None, user_expressions=None, allow_stdin=None):
41 user_expressions=None, allow_stdin=None):
46 42 pass
47 43
48 44 @abc.abstractmethod
49 45 def complete(self, text, line, cursor_pos, block=None):
50 46 pass
51 47
52 48 @abc.abstractmethod
53 49 def object_info(self, oname, detail_level=0):
54 50 pass
55 51
56 52 @abc.abstractmethod
57 53 def history(self, raw=True, output=False, hist_access_type='range', **kwargs):
58 54 pass
59 55
60 56 @abc.abstractmethod
61 57 def kernel_info(self):
62 58 pass
63 59
64 60 @abc.abstractmethod
65 61 def shutdown(self, restart=False):
66 62 pass
67 63
68 64
69 65 class IOPubChannelABC(ChannelABC):
70 66 """IOPubChannel ABC.
71 67
72 68 The docstrings for this class can be found in the base implementation:
73 69
74 70 `IPython.kernel.channels.IOPubChannel`
75 71 """
76 72
77 73 @abc.abstractmethod
78 74 def flush(self, timeout=1.0):
79 75 pass
80 76
81 77
82 78 class StdInChannelABC(ChannelABC):
83 79 """StdInChannel ABC.
84 80
85 81 The docstrings for this class can be found in the base implementation:
86 82
87 83 `IPython.kernel.channels.StdInChannel`
88 84 """
89 85
90 86 @abc.abstractmethod
91 87 def input(self, string):
92 88 pass
93 89
94 90
95 91 class HBChannelABC(ChannelABC):
96 92 """HBChannel ABC.
97 93
98 94 The docstrings for this class can be found in the base implementation:
99 95
100 96 `IPython.kernel.channels.HBChannel`
101 97 """
102 98
103 99 @abc.abstractproperty
104 100 def time_to_dead(self):
105 101 pass
106 102
107 103 @abc.abstractmethod
108 104 def pause(self):
109 105 pass
110 106
111 107 @abc.abstractmethod
112 108 def unpause(self):
113 109 pass
114 110
115 111 @abc.abstractmethod
116 112 def is_beating(self):
117 113 pass
@@ -1,201 +1,190 b''
1 """ A kernel client for in-process kernels. """
1 """A kernel client for in-process kernels."""
2 2
3 #-----------------------------------------------------------------------------
4 # Copyright (C) 2012 The IPython Development Team
5 #
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING, distributed as part of this software.
8 #-----------------------------------------------------------------------------
9
10 #-----------------------------------------------------------------------------
11 # Imports
12 #-----------------------------------------------------------------------------
3 # Copyright (c) IPython Development Team.
4 # Distributed under the terms of the Modified BSD License.
13 5
14 # IPython imports
15 6 from IPython.kernel.channelsabc import (
16 7 ShellChannelABC, IOPubChannelABC,
17 8 HBChannelABC, StdInChannelABC,
18 9 )
19 10
20 # Local imports
21 11 from .socket import DummySocket
22 12
23 13 #-----------------------------------------------------------------------------
24 14 # Channel classes
25 15 #-----------------------------------------------------------------------------
26 16
27 17 class InProcessChannel(object):
28 18 """Base class for in-process channels."""
29 19 proxy_methods = []
30 20
31 21 def __init__(self, client=None):
32 22 super(InProcessChannel, self).__init__()
33 23 self.client = client
34 24 self._is_alive = False
35 25
36 26 #--------------------------------------------------------------------------
37 27 # Channel interface
38 28 #--------------------------------------------------------------------------
39 29
40 30 def is_alive(self):
41 31 return self._is_alive
42 32
43 33 def start(self):
44 34 self._is_alive = True
45 35
46 36 def stop(self):
47 37 self._is_alive = False
48 38
49 39 def call_handlers(self, msg):
50 40 """ This method is called in the main thread when a message arrives.
51 41
52 42 Subclasses should override this method to handle incoming messages.
53 43 """
54 44 raise NotImplementedError('call_handlers must be defined in a subclass.')
55 45
56 46 #--------------------------------------------------------------------------
57 47 # InProcessChannel interface
58 48 #--------------------------------------------------------------------------
59 49
60 50 def call_handlers_later(self, *args, **kwds):
61 51 """ Call the message handlers later.
62 52
63 53 The default implementation just calls the handlers immediately, but this
64 54 method exists so that GUI toolkits can defer calling the handlers until
65 55 after the event loop has run, as expected by GUI frontends.
66 56 """
67 57 self.call_handlers(*args, **kwds)
68 58
69 59 def process_events(self):
70 60 """ Process any pending GUI events.
71 61
72 62 This method will be never be called from a frontend without an event
73 63 loop (e.g., a terminal frontend).
74 64 """
75 65 raise NotImplementedError
76 66
77 67
78 68 class InProcessShellChannel(InProcessChannel):
79 69 """See `IPython.kernel.channels.ShellChannel` for docstrings."""
80 70
81 71 # flag for whether execute requests should be allowed to call raw_input
82 72 allow_stdin = True
83 73 proxy_methods = [
84 74 'execute',
85 75 'complete',
86 76 'object_info',
87 77 'history',
88 78 'shutdown',
89 79 'kernel_info',
90 80 ]
91 81
92 82 #--------------------------------------------------------------------------
93 83 # ShellChannel interface
94 84 #--------------------------------------------------------------------------
95 85
96 86 def execute(self, code, silent=False, store_history=True,
97 user_variables=[], user_expressions={}, allow_stdin=None):
87 user_expressions={}, allow_stdin=None):
98 88 if allow_stdin is None:
99 89 allow_stdin = self.allow_stdin
100 90 content = dict(code=code, silent=silent, store_history=store_history,
101 user_variables=user_variables,
102 91 user_expressions=user_expressions,
103 92 allow_stdin=allow_stdin)
104 93 msg = self.client.session.msg('execute_request', content)
105 94 self._dispatch_to_kernel(msg)
106 95 return msg['header']['msg_id']
107 96
108 97 def complete(self, text, line, cursor_pos, block=None):
109 98 content = dict(text=text, line=line, block=block, cursor_pos=cursor_pos)
110 99 msg = self.client.session.msg('complete_request', content)
111 100 self._dispatch_to_kernel(msg)
112 101 return msg['header']['msg_id']
113 102
114 103 def object_info(self, oname, detail_level=0):
115 104 content = dict(oname=oname, detail_level=detail_level)
116 105 msg = self.client.session.msg('object_info_request', content)
117 106 self._dispatch_to_kernel(msg)
118 107 return msg['header']['msg_id']
119 108
120 109 def history(self, raw=True, output=False, hist_access_type='range', **kwds):
121 110 content = dict(raw=raw, output=output,
122 111 hist_access_type=hist_access_type, **kwds)
123 112 msg = self.client.session.msg('history_request', content)
124 113 self._dispatch_to_kernel(msg)
125 114 return msg['header']['msg_id']
126 115
127 116 def shutdown(self, restart=False):
128 117 # FIXME: What to do here?
129 118 raise NotImplementedError('Cannot shutdown in-process kernel')
130 119
131 120 def kernel_info(self):
132 121 """Request kernel info."""
133 122 msg = self.client.session.msg('kernel_info_request')
134 123 self._dispatch_to_kernel(msg)
135 124 return msg['header']['msg_id']
136 125
137 126 #--------------------------------------------------------------------------
138 127 # Protected interface
139 128 #--------------------------------------------------------------------------
140 129
141 130 def _dispatch_to_kernel(self, msg):
142 131 """ Send a message to the kernel and handle a reply.
143 132 """
144 133 kernel = self.client.kernel
145 134 if kernel is None:
146 135 raise RuntimeError('Cannot send request. No kernel exists.')
147 136
148 137 stream = DummySocket()
149 138 self.client.session.send(stream, msg)
150 139 msg_parts = stream.recv_multipart()
151 140 kernel.dispatch_shell(stream, msg_parts)
152 141
153 142 idents, reply_msg = self.client.session.recv(stream, copy=False)
154 143 self.call_handlers_later(reply_msg)
155 144
156 145
157 146 class InProcessIOPubChannel(InProcessChannel):
158 147 """See `IPython.kernel.channels.IOPubChannel` for docstrings."""
159 148
160 149 def flush(self, timeout=1.0):
161 150 pass
162 151
163 152
164 153 class InProcessStdInChannel(InProcessChannel):
165 154 """See `IPython.kernel.channels.StdInChannel` for docstrings."""
166 155
167 156 proxy_methods = ['input']
168 157
169 158 def input(self, string):
170 159 kernel = self.client.kernel
171 160 if kernel is None:
172 161 raise RuntimeError('Cannot send input reply. No kernel exists.')
173 162 kernel.raw_input_str = string
174 163
175 164
176 165 class InProcessHBChannel(InProcessChannel):
177 166 """See `IPython.kernel.channels.HBChannel` for docstrings."""
178 167
179 168 time_to_dead = 3.0
180 169
181 170 def __init__(self, *args, **kwds):
182 171 super(InProcessHBChannel, self).__init__(*args, **kwds)
183 172 self._pause = True
184 173
185 174 def pause(self):
186 175 self._pause = True
187 176
188 177 def unpause(self):
189 178 self._pause = False
190 179
191 180 def is_beating(self):
192 181 return not self._pause
193 182
194 183 #-----------------------------------------------------------------------------
195 184 # ABC Registration
196 185 #-----------------------------------------------------------------------------
197 186
198 187 ShellChannelABC.register(InProcessShellChannel)
199 188 IOPubChannelABC.register(InProcessIOPubChannel)
200 189 HBChannelABC.register(InProcessHBChannel)
201 190 StdInChannelABC.register(InProcessStdInChannel)
@@ -1,442 +1,420 b''
1 1 """Test suite for our zeromq-based message specification."""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 import re
7 7 from distutils.version import LooseVersion as V
8 8 from subprocess import PIPE
9 9 try:
10 10 from queue import Empty # Py 3
11 11 except ImportError:
12 12 from Queue import Empty # Py 2
13 13
14 14 import nose.tools as nt
15 15
16 16 from IPython.kernel import KernelManager
17 17
18 18 from IPython.utils.traitlets import (
19 19 HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum, Any,
20 20 )
21 21 from IPython.utils.py3compat import string_types, iteritems
22 22
23 23 from .utils import TIMEOUT, start_global_kernel, flush_channels, execute
24 24
25 25 #-----------------------------------------------------------------------------
26 26 # Globals
27 27 #-----------------------------------------------------------------------------
28 28 KC = None
29 29
30 30 def setup():
31 31 global KC
32 32 KC = start_global_kernel()
33 33
34 34 #-----------------------------------------------------------------------------
35 35 # Message Spec References
36 36 #-----------------------------------------------------------------------------
37 37
38 38 class Reference(HasTraits):
39 39
40 40 """
41 41 Base class for message spec specification testing.
42 42
43 43 This class is the core of the message specification test. The
44 44 idea is that child classes implement trait attributes for each
45 45 message keys, so that message keys can be tested against these
46 46 traits using :meth:`check` method.
47 47
48 48 """
49 49
50 50 def check(self, d):
51 51 """validate a dict against our traits"""
52 52 for key in self.trait_names():
53 53 nt.assert_in(key, d)
54 54 # FIXME: always allow None, probably not a good idea
55 55 if d[key] is None:
56 56 continue
57 57 try:
58 58 setattr(self, key, d[key])
59 59 except TraitError as e:
60 60 assert False, str(e)
61 61
62 62 class Version(Unicode):
63 63 def validate(self, obj, value):
64 64 min_version = self.default_value
65 65 if V(value) < V(min_version):
66 66 raise TraitError("bad version: %s < %s" % (value, min_version))
67 67
68 68 class RMessage(Reference):
69 69 msg_id = Unicode()
70 70 msg_type = Unicode()
71 71 header = Dict()
72 72 parent_header = Dict()
73 73 content = Dict()
74 74
75 75 def check(self, d):
76 76 super(RMessage, self).check(d)
77 77 RHeader().check(self.header)
78 RHeader().check(self.parent_header)
78 if self.parent_header:
79 RHeader().check(self.parent_header)
79 80
80 81 class RHeader(Reference):
81 82 msg_id = Unicode()
82 83 msg_type = Unicode()
83 84 session = Unicode()
84 85 username = Unicode()
85 86 version = Version('5.0')
86 87
87 88
88 89 class ExecuteReply(Reference):
89 90 execution_count = Integer()
90 91 status = Enum((u'ok', u'error'))
91 92
92 93 def check(self, d):
93 94 Reference.check(self, d)
94 95 if d['status'] == 'ok':
95 96 ExecuteReplyOkay().check(d)
96 97 elif d['status'] == 'error':
97 98 ExecuteReplyError().check(d)
98 99
99 100
100 101 class ExecuteReplyOkay(Reference):
101 102 payload = List(Dict)
102 user_variables = Dict()
103 103 user_expressions = Dict()
104 104
105 105
106 106 class ExecuteReplyError(Reference):
107 107 ename = Unicode()
108 108 evalue = Unicode()
109 109 traceback = List(Unicode)
110 110
111 111
112 112 class OInfoReply(Reference):
113 113 name = Unicode()
114 114 found = Bool()
115 115 ismagic = Bool()
116 116 isalias = Bool()
117 117 namespace = Enum((u'builtin', u'magics', u'alias', u'Interactive'))
118 118 type_name = Unicode()
119 119 string_form = Unicode()
120 120 base_class = Unicode()
121 121 length = Integer()
122 122 file = Unicode()
123 123 definition = Unicode()
124 124 argspec = Dict()
125 125 init_definition = Unicode()
126 126 docstring = Unicode()
127 127 init_docstring = Unicode()
128 128 class_docstring = Unicode()
129 129 call_def = Unicode()
130 130 call_docstring = Unicode()
131 131 source = Unicode()
132 132
133 133 def check(self, d):
134 134 super(OInfoReply, self).check(d)
135 135 if d['argspec'] is not None:
136 136 ArgSpec().check(d['argspec'])
137 137
138 138
139 139 class ArgSpec(Reference):
140 140 args = List(Unicode)
141 141 varargs = Unicode()
142 142 varkw = Unicode()
143 143 defaults = List()
144 144
145 145
146 146 class Status(Reference):
147 147 execution_state = Enum((u'busy', u'idle', u'starting'))
148 148
149 149
150 150 class CompleteReply(Reference):
151 151 matches = List(Unicode)
152 152
153 153
154 154 class KernelInfoReply(Reference):
155 155 protocol_version = Version('5.0')
156 156 ipython_version = Version('2.0')
157 157 language_version = Version('2.7')
158 158 language = Unicode()
159 159
160 160
161 161 # IOPub messages
162 162
163 163 class ExecuteInput(Reference):
164 164 code = Unicode()
165 165 execution_count = Integer()
166 166
167 167
168 168 Error = ExecuteReplyError
169 169
170 170
171 171 class Stream(Reference):
172 172 name = Enum((u'stdout', u'stderr'))
173 173 data = Unicode()
174 174
175 175
176 176 mime_pat = re.compile(r'\w+/\w+')
177 177
178 178 class DisplayData(Reference):
179 179 source = Unicode()
180 180 metadata = Dict()
181 181 data = Dict()
182 182 def _data_changed(self, name, old, new):
183 183 for k,v in iteritems(new):
184 184 assert mime_pat.match(k)
185 185 nt.assert_is_instance(v, string_types)
186 186
187 187
188 188 class ExecuteResult(Reference):
189 189 execution_count = Integer()
190 190 data = Dict()
191 191 def _data_changed(self, name, old, new):
192 192 for k,v in iteritems(new):
193 193 assert mime_pat.match(k)
194 194 nt.assert_is_instance(v, string_types)
195 195
196 196
197 197 references = {
198 198 'execute_reply' : ExecuteReply(),
199 199 'object_info_reply' : OInfoReply(),
200 200 'status' : Status(),
201 201 'complete_reply' : CompleteReply(),
202 202 'kernel_info_reply': KernelInfoReply(),
203 203 'execute_input' : ExecuteInput(),
204 204 'execute_result' : ExecuteResult(),
205 205 'error' : Error(),
206 206 'stream' : Stream(),
207 207 'display_data' : DisplayData(),
208 208 'header' : RHeader(),
209 209 }
210 210 """
211 211 Specifications of `content` part of the reply messages.
212 212 """
213 213
214 214
215 215 def validate_message(msg, msg_type=None, parent=None):
216 216 """validate a message
217 217
218 218 This is a generator, and must be iterated through to actually
219 219 trigger each test.
220 220
221 221 If msg_type and/or parent are given, the msg_type and/or parent msg_id
222 222 are compared with the given values.
223 223 """
224 224 RMessage().check(msg)
225 225 if msg_type:
226 226 nt.assert_equal(msg['msg_type'], msg_type)
227 227 if parent:
228 228 nt.assert_equal(msg['parent_header']['msg_id'], parent)
229 229 content = msg['content']
230 230 ref = references[msg['msg_type']]
231 231 ref.check(content)
232 232
233 233
234 234 #-----------------------------------------------------------------------------
235 235 # Tests
236 236 #-----------------------------------------------------------------------------
237 237
238 238 # Shell channel
239 239
240 240 def test_execute():
241 241 flush_channels()
242 242
243 243 msg_id = KC.execute(code='x=1')
244 244 reply = KC.get_shell_msg(timeout=TIMEOUT)
245 245 validate_message(reply, 'execute_reply', msg_id)
246 246
247 247
248 248 def test_execute_silent():
249 249 flush_channels()
250 250 msg_id, reply = execute(code='x=1', silent=True)
251 251
252 252 # flush status=idle
253 253 status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
254 254 validate_message(status, 'status', msg_id)
255 255 nt.assert_equal(status['content']['execution_state'], 'idle')
256 256
257 257 nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
258 258 count = reply['execution_count']
259 259
260 260 msg_id, reply = execute(code='x=2', silent=True)
261 261
262 262 # flush status=idle
263 263 status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
264 264 validate_message(status, 'status', msg_id)
265 265 nt.assert_equal(status['content']['execution_state'], 'idle')
266 266
267 267 nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
268 268 count_2 = reply['execution_count']
269 269 nt.assert_equal(count_2, count)
270 270
271 271
272 272 def test_execute_error():
273 273 flush_channels()
274 274
275 275 msg_id, reply = execute(code='1/0')
276 276 nt.assert_equal(reply['status'], 'error')
277 277 nt.assert_equal(reply['ename'], 'ZeroDivisionError')
278 278
279 279 error = KC.iopub_channel.get_msg(timeout=TIMEOUT)
280 280 validate_message(error, 'error', msg_id)
281 281
282 282
283 283 def test_execute_inc():
284 284 """execute request should increment execution_count"""
285 285 flush_channels()
286 286
287 287 msg_id, reply = execute(code='x=1')
288 288 count = reply['execution_count']
289 289
290 290 flush_channels()
291 291
292 292 msg_id, reply = execute(code='x=2')
293 293 count_2 = reply['execution_count']
294 294 nt.assert_equal(count_2, count+1)
295 295
296 296
297 def test_user_variables():
298 flush_channels()
299
300 msg_id, reply = execute(code='x=1', user_variables=['x'])
301 user_variables = reply['user_variables']
302 nt.assert_equal(user_variables, {u'x': {
303 u'status': u'ok',
304 u'data': {u'text/plain': u'1'},
305 u'metadata': {},
306 }})
307
308
309 def test_user_variables_fail():
310 flush_channels()
311
312 msg_id, reply = execute(code='x=1', user_variables=['nosuchname'])
313 user_variables = reply['user_variables']
314 foo = user_variables['nosuchname']
315 nt.assert_equal(foo['status'], 'error')
316 nt.assert_equal(foo['ename'], 'KeyError')
317
318
319 297 def test_user_expressions():
320 298 flush_channels()
321 299
322 300 msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
323 301 user_expressions = reply['user_expressions']
324 302 nt.assert_equal(user_expressions, {u'foo': {
325 303 u'status': u'ok',
326 304 u'data': {u'text/plain': u'2'},
327 305 u'metadata': {},
328 306 }})
329 307
330 308
331 309 def test_user_expressions_fail():
332 310 flush_channels()
333 311
334 312 msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
335 313 user_expressions = reply['user_expressions']
336 314 foo = user_expressions['foo']
337 315 nt.assert_equal(foo['status'], 'error')
338 316 nt.assert_equal(foo['ename'], 'NameError')
339 317
340 318
341 319 def test_oinfo():
342 320 flush_channels()
343 321
344 322 msg_id = KC.object_info('a')
345 323 reply = KC.get_shell_msg(timeout=TIMEOUT)
346 324 validate_message(reply, 'object_info_reply', msg_id)
347 325
348 326
349 327 def test_oinfo_found():
350 328 flush_channels()
351 329
352 330 msg_id, reply = execute(code='a=5')
353 331
354 332 msg_id = KC.object_info('a')
355 333 reply = KC.get_shell_msg(timeout=TIMEOUT)
356 334 validate_message(reply, 'object_info_reply', msg_id)
357 335 content = reply['content']
358 336 assert content['found']
359 337 argspec = content['argspec']
360 338 nt.assert_is(argspec, None)
361 339
362 340
363 341 def test_oinfo_detail():
364 342 flush_channels()
365 343
366 344 msg_id, reply = execute(code='ip=get_ipython()')
367 345
368 346 msg_id = KC.object_info('ip.object_inspect', detail_level=2)
369 347 reply = KC.get_shell_msg(timeout=TIMEOUT)
370 348 validate_message(reply, 'object_info_reply', msg_id)
371 349 content = reply['content']
372 350 assert content['found']
373 351 argspec = content['argspec']
374 352 nt.assert_is_instance(argspec, dict, "expected non-empty argspec dict, got %r" % argspec)
375 353 nt.assert_equal(argspec['defaults'], [0])
376 354
377 355
378 356 def test_oinfo_not_found():
379 357 flush_channels()
380 358
381 359 msg_id = KC.object_info('dne')
382 360 reply = KC.get_shell_msg(timeout=TIMEOUT)
383 361 validate_message(reply, 'object_info_reply', msg_id)
384 362 content = reply['content']
385 363 nt.assert_false(content['found'])
386 364
387 365
388 366 def test_complete():
389 367 flush_channels()
390 368
391 369 msg_id, reply = execute(code="alpha = albert = 5")
392 370
393 371 msg_id = KC.complete('al', 'al', 2)
394 372 reply = KC.get_shell_msg(timeout=TIMEOUT)
395 373 validate_message(reply, 'complete_reply', msg_id)
396 374 matches = reply['content']['matches']
397 375 for name in ('alpha', 'albert'):
398 376 nt.assert_in(name, matches)
399 377
400 378
401 379 def test_kernel_info_request():
402 380 flush_channels()
403 381
404 382 msg_id = KC.kernel_info()
405 383 reply = KC.get_shell_msg(timeout=TIMEOUT)
406 384 validate_message(reply, 'kernel_info_reply', msg_id)
407 385
408 386
409 387 def test_single_payload():
410 388 flush_channels()
411 389 msg_id, reply = execute(code="for i in range(3):\n"+
412 390 " x=range?\n")
413 391 payload = reply['payload']
414 392 next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"]
415 393 nt.assert_equal(len(next_input_pls), 1)
416 394
417 395
418 396 # IOPub channel
419 397
420 398
421 399 def test_stream():
422 400 flush_channels()
423 401
424 402 msg_id, reply = execute("print('hi')")
425 403
426 404 stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
427 405 validate_message(stdout, 'stream', msg_id)
428 406 content = stdout['content']
429 407 nt.assert_equal(content['name'], u'stdout')
430 408 nt.assert_equal(content['data'], u'hi\n')
431 409
432 410
433 411 def test_display_data():
434 412 flush_channels()
435 413
436 414 msg_id, reply = execute("from IPython.core.display import display; display(1)")
437 415
438 416 display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
439 417 validate_message(display, 'display_data', parent=msg_id)
440 418 data = display['content']['data']
441 419 nt.assert_equal(data['text/plain'], u'1')
442 420
@@ -1,797 +1,793 b''
1 1 #!/usr/bin/env python
2 2 """An interactive kernel that talks to frontends over 0MQ."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 from __future__ import print_function
8 8
9 9 import sys
10 10 import time
11 11 import traceback
12 12 import logging
13 13 import uuid
14 14
15 15 from datetime import datetime
16 16 from signal import (
17 17 signal, default_int_handler, SIGINT
18 18 )
19 19
20 20 import zmq
21 21 from zmq.eventloop import ioloop
22 22 from zmq.eventloop.zmqstream import ZMQStream
23 23
24 24 from IPython.config.configurable import Configurable
25 25 from IPython.core.error import StdinNotImplementedError
26 26 from IPython.core import release
27 27 from IPython.utils import py3compat
28 28 from IPython.utils.py3compat import builtin_mod, unicode_type, string_types
29 29 from IPython.utils.jsonutil import json_clean
30 30 from IPython.utils.traitlets import (
31 31 Any, Instance, Float, Dict, List, Set, Integer, Unicode,
32 32 Type, Bool,
33 33 )
34 34
35 35 from .serialize import serialize_object, unpack_apply_message
36 36 from .session import Session
37 37 from .zmqshell import ZMQInteractiveShell
38 38
39 39
40 40 #-----------------------------------------------------------------------------
41 41 # Main kernel class
42 42 #-----------------------------------------------------------------------------
43 43
44 44 protocol_version = release.kernel_protocol_version
45 45 ipython_version = release.version
46 46 language_version = sys.version.split()[0]
47 47
48 48
49 49 class Kernel(Configurable):
50 50
51 51 #---------------------------------------------------------------------------
52 52 # Kernel interface
53 53 #---------------------------------------------------------------------------
54 54
55 55 # attribute to override with a GUI
56 56 eventloop = Any(None)
57 57 def _eventloop_changed(self, name, old, new):
58 58 """schedule call to eventloop from IOLoop"""
59 59 loop = ioloop.IOLoop.instance()
60 60 loop.add_callback(self.enter_eventloop)
61 61
62 62 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
63 63 shell_class = Type(ZMQInteractiveShell)
64 64
65 65 session = Instance(Session)
66 66 profile_dir = Instance('IPython.core.profiledir.ProfileDir')
67 67 shell_streams = List()
68 68 control_stream = Instance(ZMQStream)
69 69 iopub_socket = Instance(zmq.Socket)
70 70 stdin_socket = Instance(zmq.Socket)
71 71 log = Instance(logging.Logger)
72 72
73 73 user_module = Any()
74 74 def _user_module_changed(self, name, old, new):
75 75 if self.shell is not None:
76 76 self.shell.user_module = new
77 77
78 78 user_ns = Instance(dict, args=None, allow_none=True)
79 79 def _user_ns_changed(self, name, old, new):
80 80 if self.shell is not None:
81 81 self.shell.user_ns = new
82 82 self.shell.init_user_ns()
83 83
84 84 # identities:
85 85 int_id = Integer(-1)
86 86 ident = Unicode()
87 87
88 88 def _ident_default(self):
89 89 return unicode_type(uuid.uuid4())
90 90
91 91 # Private interface
92 92
93 93 _darwin_app_nap = Bool(True, config=True,
94 94 help="""Whether to use appnope for compatiblity with OS X App Nap.
95 95
96 96 Only affects OS X >= 10.9.
97 97 """
98 98 )
99 99
100 100 # Time to sleep after flushing the stdout/err buffers in each execute
101 101 # cycle. While this introduces a hard limit on the minimal latency of the
102 102 # execute cycle, it helps prevent output synchronization problems for
103 103 # clients.
104 104 # Units are in seconds. The minimum zmq latency on local host is probably
105 105 # ~150 microseconds, set this to 500us for now. We may need to increase it
106 106 # a little if it's not enough after more interactive testing.
107 107 _execute_sleep = Float(0.0005, config=True)
108 108
109 109 # Frequency of the kernel's event loop.
110 110 # Units are in seconds, kernel subclasses for GUI toolkits may need to
111 111 # adapt to milliseconds.
112 112 _poll_interval = Float(0.05, config=True)
113 113
114 114 # If the shutdown was requested over the network, we leave here the
115 115 # necessary reply message so it can be sent by our registered atexit
116 116 # handler. This ensures that the reply is only sent to clients truly at
117 117 # the end of our shutdown process (which happens after the underlying
118 118 # IPython shell's own shutdown).
119 119 _shutdown_message = None
120 120
121 121 # This is a dict of port number that the kernel is listening on. It is set
122 122 # by record_ports and used by connect_request.
123 123 _recorded_ports = Dict()
124 124
125 125 # A reference to the Python builtin 'raw_input' function.
126 126 # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
127 127 _sys_raw_input = Any()
128 128 _sys_eval_input = Any()
129 129
130 130 # set of aborted msg_ids
131 131 aborted = Set()
132 132
133 133
134 134 def __init__(self, **kwargs):
135 135 super(Kernel, self).__init__(**kwargs)
136 136
137 137 # Initialize the InteractiveShell subclass
138 138 self.shell = self.shell_class.instance(parent=self,
139 139 profile_dir = self.profile_dir,
140 140 user_module = self.user_module,
141 141 user_ns = self.user_ns,
142 142 kernel = self,
143 143 )
144 144 self.shell.displayhook.session = self.session
145 145 self.shell.displayhook.pub_socket = self.iopub_socket
146 146 self.shell.displayhook.topic = self._topic('execute_result')
147 147 self.shell.display_pub.session = self.session
148 148 self.shell.display_pub.pub_socket = self.iopub_socket
149 149 self.shell.data_pub.session = self.session
150 150 self.shell.data_pub.pub_socket = self.iopub_socket
151 151
152 152 # TMP - hack while developing
153 153 self.shell._reply_content = None
154 154
155 155 # Build dict of handlers for message types
156 156 msg_types = [ 'execute_request', 'complete_request',
157 157 'object_info_request', 'history_request',
158 158 'kernel_info_request',
159 159 'connect_request', 'shutdown_request',
160 160 'apply_request',
161 161 ]
162 162 self.shell_handlers = {}
163 163 for msg_type in msg_types:
164 164 self.shell_handlers[msg_type] = getattr(self, msg_type)
165 165
166 166 comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
167 167 comm_manager = self.shell.comm_manager
168 168 for msg_type in comm_msg_types:
169 169 self.shell_handlers[msg_type] = getattr(comm_manager, msg_type)
170 170
171 171 control_msg_types = msg_types + [ 'clear_request', 'abort_request' ]
172 172 self.control_handlers = {}
173 173 for msg_type in control_msg_types:
174 174 self.control_handlers[msg_type] = getattr(self, msg_type)
175 175
176 176
177 177 def dispatch_control(self, msg):
178 178 """dispatch control requests"""
179 179 idents,msg = self.session.feed_identities(msg, copy=False)
180 180 try:
181 181 msg = self.session.unserialize(msg, content=True, copy=False)
182 182 except:
183 183 self.log.error("Invalid Control Message", exc_info=True)
184 184 return
185 185
186 186 self.log.debug("Control received: %s", msg)
187 187
188 188 header = msg['header']
189 189 msg_id = header['msg_id']
190 190 msg_type = header['msg_type']
191 191
192 192 handler = self.control_handlers.get(msg_type, None)
193 193 if handler is None:
194 194 self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
195 195 else:
196 196 try:
197 197 handler(self.control_stream, idents, msg)
198 198 except Exception:
199 199 self.log.error("Exception in control handler:", exc_info=True)
200 200
201 201 def dispatch_shell(self, stream, msg):
202 202 """dispatch shell requests"""
203 203 # flush control requests first
204 204 if self.control_stream:
205 205 self.control_stream.flush()
206 206
207 207 idents,msg = self.session.feed_identities(msg, copy=False)
208 208 try:
209 209 msg = self.session.unserialize(msg, content=True, copy=False)
210 210 except:
211 211 self.log.error("Invalid Message", exc_info=True)
212 212 return
213 213
214 214 header = msg['header']
215 215 msg_id = header['msg_id']
216 216 msg_type = msg['header']['msg_type']
217 217
218 218 # Print some info about this message and leave a '--->' marker, so it's
219 219 # easier to trace visually the message chain when debugging. Each
220 220 # handler prints its message at the end.
221 221 self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
222 222 self.log.debug(' Content: %s\n --->\n ', msg['content'])
223 223
224 224 if msg_id in self.aborted:
225 225 self.aborted.remove(msg_id)
226 226 # is it safe to assume a msg_id will not be resubmitted?
227 227 reply_type = msg_type.split('_')[0] + '_reply'
228 228 status = {'status' : 'aborted'}
229 229 md = {'engine' : self.ident}
230 230 md.update(status)
231 231 reply_msg = self.session.send(stream, reply_type, metadata=md,
232 232 content=status, parent=msg, ident=idents)
233 233 return
234 234
235 235 handler = self.shell_handlers.get(msg_type, None)
236 236 if handler is None:
237 237 self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type)
238 238 else:
239 239 # ensure default_int_handler during handler call
240 240 sig = signal(SIGINT, default_int_handler)
241 241 try:
242 242 handler(stream, idents, msg)
243 243 except Exception:
244 244 self.log.error("Exception in message handler:", exc_info=True)
245 245 finally:
246 246 signal(SIGINT, sig)
247 247
248 248 def enter_eventloop(self):
249 249 """enter eventloop"""
250 250 self.log.info("entering eventloop %s", self.eventloop)
251 251 for stream in self.shell_streams:
252 252 # flush any pending replies,
253 253 # which may be skipped by entering the eventloop
254 254 stream.flush(zmq.POLLOUT)
255 255 # restore default_int_handler
256 256 signal(SIGINT, default_int_handler)
257 257 while self.eventloop is not None:
258 258 try:
259 259 self.eventloop(self)
260 260 except KeyboardInterrupt:
261 261 # Ctrl-C shouldn't crash the kernel
262 262 self.log.error("KeyboardInterrupt caught in kernel")
263 263 continue
264 264 else:
265 265 # eventloop exited cleanly, this means we should stop (right?)
266 266 self.eventloop = None
267 267 break
268 268 self.log.info("exiting eventloop")
269 269
270 270 def start(self):
271 271 """register dispatchers for streams"""
272 272 self.shell.exit_now = False
273 273 if self.control_stream:
274 274 self.control_stream.on_recv(self.dispatch_control, copy=False)
275 275
276 276 def make_dispatcher(stream):
277 277 def dispatcher(msg):
278 278 return self.dispatch_shell(stream, msg)
279 279 return dispatcher
280 280
281 281 for s in self.shell_streams:
282 282 s.on_recv(make_dispatcher(s), copy=False)
283 283
284 284 # publish idle status
285 285 self._publish_status('starting')
286 286
287 287 def do_one_iteration(self):
288 288 """step eventloop just once"""
289 289 if self.control_stream:
290 290 self.control_stream.flush()
291 291 for stream in self.shell_streams:
292 292 # handle at most one request per iteration
293 293 stream.flush(zmq.POLLIN, 1)
294 294 stream.flush(zmq.POLLOUT)
295 295
296 296
297 297 def record_ports(self, ports):
298 298 """Record the ports that this kernel is using.
299 299
300 300 The creator of the Kernel instance must call this methods if they
301 301 want the :meth:`connect_request` method to return the port numbers.
302 302 """
303 303 self._recorded_ports = ports
304 304
305 305 #---------------------------------------------------------------------------
306 306 # Kernel request handlers
307 307 #---------------------------------------------------------------------------
308 308
309 309 def _make_metadata(self, other=None):
310 310 """init metadata dict, for execute/apply_reply"""
311 311 new_md = {
312 312 'dependencies_met' : True,
313 313 'engine' : self.ident,
314 314 'started': datetime.now(),
315 315 }
316 316 if other:
317 317 new_md.update(other)
318 318 return new_md
319 319
320 320 def _publish_execute_input(self, code, parent, execution_count):
321 321 """Publish the code request on the iopub stream."""
322 322
323 323 self.session.send(self.iopub_socket, u'execute_input',
324 324 {u'code':code, u'execution_count': execution_count},
325 325 parent=parent, ident=self._topic('execute_input')
326 326 )
327 327
328 328 def _publish_status(self, status, parent=None):
329 329 """send status (busy/idle) on IOPub"""
330 330 self.session.send(self.iopub_socket,
331 331 u'status',
332 332 {u'execution_state': status},
333 333 parent=parent,
334 334 ident=self._topic('status'),
335 335 )
336 336
337 337
338 338 def execute_request(self, stream, ident, parent):
339 339 """handle an execute_request"""
340 340
341 341 self._publish_status(u'busy', parent)
342 342
343 343 try:
344 344 content = parent[u'content']
345 345 code = py3compat.cast_unicode_py2(content[u'code'])
346 346 silent = content[u'silent']
347 347 store_history = content.get(u'store_history', not silent)
348 348 except:
349 349 self.log.error("Got bad msg: ")
350 350 self.log.error("%s", parent)
351 351 return
352 352
353 353 md = self._make_metadata(parent['metadata'])
354 354
355 355 shell = self.shell # we'll need this a lot here
356 356
357 357 # Replace raw_input. Note that is not sufficient to replace
358 358 # raw_input in the user namespace.
359 359 if content.get('allow_stdin', False):
360 360 raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
361 361 input = lambda prompt='': eval(raw_input(prompt))
362 362 else:
363 363 raw_input = input = lambda prompt='' : self._no_raw_input()
364 364
365 365 if py3compat.PY3:
366 366 self._sys_raw_input = builtin_mod.input
367 367 builtin_mod.input = raw_input
368 368 else:
369 369 self._sys_raw_input = builtin_mod.raw_input
370 370 self._sys_eval_input = builtin_mod.input
371 371 builtin_mod.raw_input = raw_input
372 372 builtin_mod.input = input
373 373
374 374 # Set the parent message of the display hook and out streams.
375 375 shell.set_parent(parent)
376 376
377 377 # Re-broadcast our input for the benefit of listening clients, and
378 378 # start computing output
379 379 if not silent:
380 380 self._publish_execute_input(code, parent, shell.execution_count)
381 381
382 382 reply_content = {}
383 383 # FIXME: the shell calls the exception handler itself.
384 384 shell._reply_content = None
385 385 try:
386 386 shell.run_cell(code, store_history=store_history, silent=silent)
387 387 except:
388 388 status = u'error'
389 389 # FIXME: this code right now isn't being used yet by default,
390 390 # because the run_cell() call above directly fires off exception
391 391 # reporting. This code, therefore, is only active in the scenario
392 392 # where runlines itself has an unhandled exception. We need to
393 393 # uniformize this, for all exception construction to come from a
394 394 # single location in the codbase.
395 395 etype, evalue, tb = sys.exc_info()
396 396 tb_list = traceback.format_exception(etype, evalue, tb)
397 397 reply_content.update(shell._showtraceback(etype, evalue, tb_list))
398 398 else:
399 399 status = u'ok'
400 400 finally:
401 401 # Restore raw_input.
402 402 if py3compat.PY3:
403 403 builtin_mod.input = self._sys_raw_input
404 404 else:
405 405 builtin_mod.raw_input = self._sys_raw_input
406 406 builtin_mod.input = self._sys_eval_input
407 407
408 408 reply_content[u'status'] = status
409 409
410 410 # Return the execution counter so clients can display prompts
411 411 reply_content['execution_count'] = shell.execution_count - 1
412 412
413 413 # FIXME - fish exception info out of shell, possibly left there by
414 414 # runlines. We'll need to clean up this logic later.
415 415 if shell._reply_content is not None:
416 416 reply_content.update(shell._reply_content)
417 417 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='execute')
418 418 reply_content['engine_info'] = e_info
419 419 # reset after use
420 420 shell._reply_content = None
421 421
422 422 if 'traceback' in reply_content:
423 423 self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
424 424
425 425
426 426 # At this point, we can tell whether the main code execution succeeded
427 # or not. If it did, we proceed to evaluate user_variables/expressions
427 # or not. If it did, we proceed to evaluate user_expressions
428 428 if reply_content['status'] == 'ok':
429 reply_content[u'user_variables'] = \
430 shell.user_variables(content.get(u'user_variables', []))
431 429 reply_content[u'user_expressions'] = \
432 430 shell.user_expressions(content.get(u'user_expressions', {}))
433 431 else:
434 # If there was an error, don't even try to compute variables or
435 # expressions
436 reply_content[u'user_variables'] = {}
432 # If there was an error, don't even try to compute expressions
437 433 reply_content[u'user_expressions'] = {}
438 434
439 435 # Payloads should be retrieved regardless of outcome, so we can both
440 436 # recover partial output (that could have been generated early in a
441 437 # block, before an error) and clear the payload system always.
442 438 reply_content[u'payload'] = shell.payload_manager.read_payload()
443 439 # Be agressive about clearing the payload because we don't want
444 440 # it to sit in memory until the next execute_request comes in.
445 441 shell.payload_manager.clear_payload()
446 442
447 443 # Flush output before sending the reply.
448 444 sys.stdout.flush()
449 445 sys.stderr.flush()
450 446 # FIXME: on rare occasions, the flush doesn't seem to make it to the
451 447 # clients... This seems to mitigate the problem, but we definitely need
452 448 # to better understand what's going on.
453 449 if self._execute_sleep:
454 450 time.sleep(self._execute_sleep)
455 451
456 452 # Send the reply.
457 453 reply_content = json_clean(reply_content)
458 454
459 455 md['status'] = reply_content['status']
460 456 if reply_content['status'] == 'error' and \
461 457 reply_content['ename'] == 'UnmetDependency':
462 458 md['dependencies_met'] = False
463 459
464 460 reply_msg = self.session.send(stream, u'execute_reply',
465 461 reply_content, parent, metadata=md,
466 462 ident=ident)
467 463
468 464 self.log.debug("%s", reply_msg)
469 465
470 466 if not silent and reply_msg['content']['status'] == u'error':
471 467 self._abort_queues()
472 468
473 469 self._publish_status(u'idle', parent)
474 470
475 471 def complete_request(self, stream, ident, parent):
476 472 txt, matches = self._complete(parent)
477 473 matches = {'matches' : matches,
478 474 'matched_text' : txt,
479 475 'status' : 'ok'}
480 476 matches = json_clean(matches)
481 477 completion_msg = self.session.send(stream, 'complete_reply',
482 478 matches, parent, ident)
483 479 self.log.debug("%s", completion_msg)
484 480
485 481 def object_info_request(self, stream, ident, parent):
486 482 content = parent['content']
487 483 object_info = self.shell.object_inspect(content['oname'],
488 484 detail_level = content.get('detail_level', 0)
489 485 )
490 486 # Before we send this object over, we scrub it for JSON usage
491 487 oinfo = json_clean(object_info)
492 488 msg = self.session.send(stream, 'object_info_reply',
493 489 oinfo, parent, ident)
494 490 self.log.debug("%s", msg)
495 491
496 492 def history_request(self, stream, ident, parent):
497 493 # We need to pull these out, as passing **kwargs doesn't work with
498 494 # unicode keys before Python 2.6.5.
499 495 hist_access_type = parent['content']['hist_access_type']
500 496 raw = parent['content']['raw']
501 497 output = parent['content']['output']
502 498 if hist_access_type == 'tail':
503 499 n = parent['content']['n']
504 500 hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
505 501 include_latest=True)
506 502
507 503 elif hist_access_type == 'range':
508 504 session = parent['content']['session']
509 505 start = parent['content']['start']
510 506 stop = parent['content']['stop']
511 507 hist = self.shell.history_manager.get_range(session, start, stop,
512 508 raw=raw, output=output)
513 509
514 510 elif hist_access_type == 'search':
515 511 n = parent['content'].get('n')
516 512 unique = parent['content'].get('unique', False)
517 513 pattern = parent['content']['pattern']
518 514 hist = self.shell.history_manager.search(
519 515 pattern, raw=raw, output=output, n=n, unique=unique)
520 516
521 517 else:
522 518 hist = []
523 519 hist = list(hist)
524 520 content = {'history' : hist}
525 521 content = json_clean(content)
526 522 msg = self.session.send(stream, 'history_reply',
527 523 content, parent, ident)
528 524 self.log.debug("Sending history reply with %i entries", len(hist))
529 525
530 526 def connect_request(self, stream, ident, parent):
531 527 if self._recorded_ports is not None:
532 528 content = self._recorded_ports.copy()
533 529 else:
534 530 content = {}
535 531 msg = self.session.send(stream, 'connect_reply',
536 532 content, parent, ident)
537 533 self.log.debug("%s", msg)
538 534
539 535 def kernel_info_request(self, stream, ident, parent):
540 536 vinfo = {
541 537 'protocol_version': protocol_version,
542 538 'ipython_version': ipython_version,
543 539 'language_version': language_version,
544 540 'language': 'python',
545 541 }
546 542 msg = self.session.send(stream, 'kernel_info_reply',
547 543 vinfo, parent, ident)
548 544 self.log.debug("%s", msg)
549 545
550 546 def shutdown_request(self, stream, ident, parent):
551 547 self.shell.exit_now = True
552 548 content = dict(status='ok')
553 549 content.update(parent['content'])
554 550 self.session.send(stream, u'shutdown_reply', content, parent, ident=ident)
555 551 # same content, but different msg_id for broadcasting on IOPub
556 552 self._shutdown_message = self.session.msg(u'shutdown_reply',
557 553 content, parent
558 554 )
559 555
560 556 self._at_shutdown()
561 557 # call sys.exit after a short delay
562 558 loop = ioloop.IOLoop.instance()
563 559 loop.add_timeout(time.time()+0.1, loop.stop)
564 560
565 561 #---------------------------------------------------------------------------
566 562 # Engine methods
567 563 #---------------------------------------------------------------------------
568 564
569 565 def apply_request(self, stream, ident, parent):
570 566 try:
571 567 content = parent[u'content']
572 568 bufs = parent[u'buffers']
573 569 msg_id = parent['header']['msg_id']
574 570 except:
575 571 self.log.error("Got bad msg: %s", parent, exc_info=True)
576 572 return
577 573
578 574 self._publish_status(u'busy', parent)
579 575
580 576 # Set the parent message of the display hook and out streams.
581 577 shell = self.shell
582 578 shell.set_parent(parent)
583 579
584 580 # execute_input_msg = self.session.msg(u'execute_input',{u'code':code}, parent=parent)
585 581 # self.iopub_socket.send(execute_input_msg)
586 582 # self.session.send(self.iopub_socket, u'execute_input', {u'code':code},parent=parent)
587 583 md = self._make_metadata(parent['metadata'])
588 584 try:
589 585 working = shell.user_ns
590 586
591 587 prefix = "_"+str(msg_id).replace("-","")+"_"
592 588
593 589 f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
594 590
595 591 fname = getattr(f, '__name__', 'f')
596 592
597 593 fname = prefix+"f"
598 594 argname = prefix+"args"
599 595 kwargname = prefix+"kwargs"
600 596 resultname = prefix+"result"
601 597
602 598 ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
603 599 # print ns
604 600 working.update(ns)
605 601 code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
606 602 try:
607 603 exec(code, shell.user_global_ns, shell.user_ns)
608 604 result = working.get(resultname)
609 605 finally:
610 606 for key in ns:
611 607 working.pop(key)
612 608
613 609 result_buf = serialize_object(result,
614 610 buffer_threshold=self.session.buffer_threshold,
615 611 item_threshold=self.session.item_threshold,
616 612 )
617 613
618 614 except:
619 615 # invoke IPython traceback formatting
620 616 shell.showtraceback()
621 617 # FIXME - fish exception info out of shell, possibly left there by
622 618 # run_code. We'll need to clean up this logic later.
623 619 reply_content = {}
624 620 if shell._reply_content is not None:
625 621 reply_content.update(shell._reply_content)
626 622 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
627 623 reply_content['engine_info'] = e_info
628 624 # reset after use
629 625 shell._reply_content = None
630 626
631 627 self.session.send(self.iopub_socket, u'error', reply_content, parent=parent,
632 628 ident=self._topic('error'))
633 629 self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
634 630 result_buf = []
635 631
636 632 if reply_content['ename'] == 'UnmetDependency':
637 633 md['dependencies_met'] = False
638 634 else:
639 635 reply_content = {'status' : 'ok'}
640 636
641 637 # put 'ok'/'error' status in header, for scheduler introspection:
642 638 md['status'] = reply_content['status']
643 639
644 640 # flush i/o
645 641 sys.stdout.flush()
646 642 sys.stderr.flush()
647 643
648 644 reply_msg = self.session.send(stream, u'apply_reply', reply_content,
649 645 parent=parent, ident=ident,buffers=result_buf, metadata=md)
650 646
651 647 self._publish_status(u'idle', parent)
652 648
653 649 #---------------------------------------------------------------------------
654 650 # Control messages
655 651 #---------------------------------------------------------------------------
656 652
657 653 def abort_request(self, stream, ident, parent):
658 654 """abort a specifig msg by id"""
659 655 msg_ids = parent['content'].get('msg_ids', None)
660 656 if isinstance(msg_ids, string_types):
661 657 msg_ids = [msg_ids]
662 658 if not msg_ids:
663 659 self.abort_queues()
664 660 for mid in msg_ids:
665 661 self.aborted.add(str(mid))
666 662
667 663 content = dict(status='ok')
668 664 reply_msg = self.session.send(stream, 'abort_reply', content=content,
669 665 parent=parent, ident=ident)
670 666 self.log.debug("%s", reply_msg)
671 667
672 668 def clear_request(self, stream, idents, parent):
673 669 """Clear our namespace."""
674 670 self.shell.reset(False)
675 671 msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
676 672 content = dict(status='ok'))
677 673
678 674
679 675 #---------------------------------------------------------------------------
680 676 # Protected interface
681 677 #---------------------------------------------------------------------------
682 678
683 679 def _wrap_exception(self, method=None):
684 680 # import here, because _wrap_exception is only used in parallel,
685 681 # and parallel has higher min pyzmq version
686 682 from IPython.parallel.error import wrap_exception
687 683 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method)
688 684 content = wrap_exception(e_info)
689 685 return content
690 686
691 687 def _topic(self, topic):
692 688 """prefixed topic for IOPub messages"""
693 689 if self.int_id >= 0:
694 690 base = "engine.%i" % self.int_id
695 691 else:
696 692 base = "kernel.%s" % self.ident
697 693
698 694 return py3compat.cast_bytes("%s.%s" % (base, topic))
699 695
700 696 def _abort_queues(self):
701 697 for stream in self.shell_streams:
702 698 if stream:
703 699 self._abort_queue(stream)
704 700
705 701 def _abort_queue(self, stream):
706 702 poller = zmq.Poller()
707 703 poller.register(stream.socket, zmq.POLLIN)
708 704 while True:
709 705 idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
710 706 if msg is None:
711 707 return
712 708
713 709 self.log.info("Aborting:")
714 710 self.log.info("%s", msg)
715 711 msg_type = msg['header']['msg_type']
716 712 reply_type = msg_type.split('_')[0] + '_reply'
717 713
718 714 status = {'status' : 'aborted'}
719 715 md = {'engine' : self.ident}
720 716 md.update(status)
721 717 reply_msg = self.session.send(stream, reply_type, metadata=md,
722 718 content=status, parent=msg, ident=idents)
723 719 self.log.debug("%s", reply_msg)
724 720 # We need to wait a bit for requests to come in. This can probably
725 721 # be set shorter for true asynchronous clients.
726 722 poller.poll(50)
727 723
728 724
729 725 def _no_raw_input(self):
730 726 """Raise StdinNotImplentedError if active frontend doesn't support
731 727 stdin."""
732 728 raise StdinNotImplementedError("raw_input was called, but this "
733 729 "frontend does not support stdin.")
734 730
735 731 def _raw_input(self, prompt, ident, parent):
736 732 # Flush output before making the request.
737 733 sys.stderr.flush()
738 734 sys.stdout.flush()
739 735 # flush the stdin socket, to purge stale replies
740 736 while True:
741 737 try:
742 738 self.stdin_socket.recv_multipart(zmq.NOBLOCK)
743 739 except zmq.ZMQError as e:
744 740 if e.errno == zmq.EAGAIN:
745 741 break
746 742 else:
747 743 raise
748 744
749 745 # Send the input request.
750 746 content = json_clean(dict(prompt=prompt))
751 747 self.session.send(self.stdin_socket, u'input_request', content, parent,
752 748 ident=ident)
753 749
754 750 # Await a response.
755 751 while True:
756 752 try:
757 753 ident, reply = self.session.recv(self.stdin_socket, 0)
758 754 except Exception:
759 755 self.log.warn("Invalid Message:", exc_info=True)
760 756 except KeyboardInterrupt:
761 757 # re-raise KeyboardInterrupt, to truncate traceback
762 758 raise KeyboardInterrupt
763 759 else:
764 760 break
765 761 try:
766 762 value = py3compat.unicode_to_str(reply['content']['value'])
767 763 except:
768 764 self.log.error("Got bad raw_input reply: ")
769 765 self.log.error("%s", parent)
770 766 value = ''
771 767 if value == '\x04':
772 768 # EOF
773 769 raise EOFError
774 770 return value
775 771
776 772 def _complete(self, msg):
777 773 c = msg['content']
778 774 try:
779 775 cpos = int(c['cursor_pos'])
780 776 except:
781 777 # If we don't get something that we can convert to an integer, at
782 778 # least attempt the completion guessing the cursor is at the end of
783 779 # the text, if there's any, and otherwise of the line
784 780 cpos = len(c['text'])
785 781 if cpos==0:
786 782 cpos = len(c['line'])
787 783 return self.shell.complete(c['text'], c['line'], cpos)
788 784
789 785 def _at_shutdown(self):
790 786 """Actions taken at shutdown by the kernel, called by python's atexit.
791 787 """
792 788 # io.rprint("Kernel at_shutdown") # dbg
793 789 if self._shutdown_message is not None:
794 790 self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown'))
795 791 self.log.debug("%s", self._shutdown_message)
796 792 [ s.flush(zmq.POLLOUT) for s in self.shell_streams ]
797 793
@@ -1,1863 +1,1863 b''
1 1 """A semi-synchronous Client for IPython parallel"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from __future__ import print_function
7 7
8 8 import os
9 9 import json
10 10 import sys
11 11 from threading import Thread, Event
12 12 import time
13 13 import warnings
14 14 from datetime import datetime
15 15 from getpass import getpass
16 16 from pprint import pprint
17 17
18 18 pjoin = os.path.join
19 19
20 20 import zmq
21 21
22 22 from IPython.config.configurable import MultipleInstanceError
23 23 from IPython.core.application import BaseIPythonApplication
24 24 from IPython.core.profiledir import ProfileDir, ProfileDirError
25 25
26 26 from IPython.utils.capture import RichOutput
27 27 from IPython.utils.coloransi import TermColors
28 28 from IPython.utils.jsonutil import rekey, extract_dates, parse_date
29 29 from IPython.utils.localinterfaces import localhost, is_local_ip
30 30 from IPython.utils.path import get_ipython_dir
31 31 from IPython.utils.py3compat import cast_bytes, string_types, xrange, iteritems
32 32 from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode,
33 33 Dict, List, Bool, Set, Any)
34 34 from IPython.external.decorator import decorator
35 35 from IPython.external.ssh import tunnel
36 36
37 37 from IPython.parallel import Reference
38 38 from IPython.parallel import error
39 39 from IPython.parallel import util
40 40
41 41 from IPython.kernel.zmq.session import Session, Message
42 42 from IPython.kernel.zmq import serialize
43 43
44 44 from .asyncresult import AsyncResult, AsyncHubResult
45 45 from .view import DirectView, LoadBalancedView
46 46
47 47 #--------------------------------------------------------------------------
48 48 # Decorators for Client methods
49 49 #--------------------------------------------------------------------------
50 50
51 51 @decorator
52 52 def spin_first(f, self, *args, **kwargs):
53 53 """Call spin() to sync state prior to calling the method."""
54 54 self.spin()
55 55 return f(self, *args, **kwargs)
56 56
57 57
58 58 #--------------------------------------------------------------------------
59 59 # Classes
60 60 #--------------------------------------------------------------------------
61 61
62 62
63 63 class ExecuteReply(RichOutput):
64 64 """wrapper for finished Execute results"""
65 65 def __init__(self, msg_id, content, metadata):
66 66 self.msg_id = msg_id
67 67 self._content = content
68 68 self.execution_count = content['execution_count']
69 69 self.metadata = metadata
70 70
71 71 # RichOutput overrides
72 72
73 73 @property
74 74 def source(self):
75 75 execute_result = self.metadata['execute_result']
76 76 if execute_result:
77 77 return execute_result.get('source', '')
78 78
79 79 @property
80 80 def data(self):
81 81 execute_result = self.metadata['execute_result']
82 82 if execute_result:
83 83 return execute_result.get('data', {})
84 84
85 85 @property
86 86 def _metadata(self):
87 87 execute_result = self.metadata['execute_result']
88 88 if execute_result:
89 89 return execute_result.get('metadata', {})
90 90
91 91 def display(self):
92 92 from IPython.display import publish_display_data
93 93 publish_display_data(self.source, self.data, self.metadata)
94 94
95 95 def _repr_mime_(self, mime):
96 96 if mime not in self.data:
97 97 return
98 98 data = self.data[mime]
99 99 if mime in self._metadata:
100 100 return data, self._metadata[mime]
101 101 else:
102 102 return data
103 103
104 104 def __getitem__(self, key):
105 105 return self.metadata[key]
106 106
107 107 def __getattr__(self, key):
108 108 if key not in self.metadata:
109 109 raise AttributeError(key)
110 110 return self.metadata[key]
111 111
112 112 def __repr__(self):
113 113 execute_result = self.metadata['execute_result'] or {'data':{}}
114 114 text_out = execute_result['data'].get('text/plain', '')
115 115 if len(text_out) > 32:
116 116 text_out = text_out[:29] + '...'
117 117
118 118 return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
119 119
120 120 def _repr_pretty_(self, p, cycle):
121 121 execute_result = self.metadata['execute_result'] or {'data':{}}
122 122 text_out = execute_result['data'].get('text/plain', '')
123 123
124 124 if not text_out:
125 125 return
126 126
127 127 try:
128 128 ip = get_ipython()
129 129 except NameError:
130 130 colors = "NoColor"
131 131 else:
132 132 colors = ip.colors
133 133
134 134 if colors == "NoColor":
135 135 out = normal = ""
136 136 else:
137 137 out = TermColors.Red
138 138 normal = TermColors.Normal
139 139
140 140 if '\n' in text_out and not text_out.startswith('\n'):
141 141 # add newline for multiline reprs
142 142 text_out = '\n' + text_out
143 143
144 144 p.text(
145 145 out + u'Out[%i:%i]: ' % (
146 146 self.metadata['engine_id'], self.execution_count
147 147 ) + normal + text_out
148 148 )
149 149
150 150
151 151 class Metadata(dict):
152 152 """Subclass of dict for initializing metadata values.
153 153
154 154 Attribute access works on keys.
155 155
156 156 These objects have a strict set of keys - errors will raise if you try
157 157 to add new keys.
158 158 """
159 159 def __init__(self, *args, **kwargs):
160 160 dict.__init__(self)
161 161 md = {'msg_id' : None,
162 162 'submitted' : None,
163 163 'started' : None,
164 164 'completed' : None,
165 165 'received' : None,
166 166 'engine_uuid' : None,
167 167 'engine_id' : None,
168 168 'follow' : None,
169 169 'after' : None,
170 170 'status' : None,
171 171
172 172 'execute_input' : None,
173 173 'execute_result' : None,
174 174 'error' : None,
175 175 'stdout' : '',
176 176 'stderr' : '',
177 177 'outputs' : [],
178 178 'data': {},
179 179 'outputs_ready' : False,
180 180 }
181 181 self.update(md)
182 182 self.update(dict(*args, **kwargs))
183 183
184 184 def __getattr__(self, key):
185 185 """getattr aliased to getitem"""
186 186 if key in self:
187 187 return self[key]
188 188 else:
189 189 raise AttributeError(key)
190 190
191 191 def __setattr__(self, key, value):
192 192 """setattr aliased to setitem, with strict"""
193 193 if key in self:
194 194 self[key] = value
195 195 else:
196 196 raise AttributeError(key)
197 197
198 198 def __setitem__(self, key, value):
199 199 """strict static key enforcement"""
200 200 if key in self:
201 201 dict.__setitem__(self, key, value)
202 202 else:
203 203 raise KeyError(key)
204 204
205 205
206 206 class Client(HasTraits):
207 207 """A semi-synchronous client to the IPython ZMQ cluster
208 208
209 209 Parameters
210 210 ----------
211 211
212 212 url_file : str/unicode; path to ipcontroller-client.json
213 213 This JSON file should contain all the information needed to connect to a cluster,
214 214 and is likely the only argument needed.
215 215 Connection information for the Hub's registration. If a json connector
216 216 file is given, then likely no further configuration is necessary.
217 217 [Default: use profile]
218 218 profile : bytes
219 219 The name of the Cluster profile to be used to find connector information.
220 220 If run from an IPython application, the default profile will be the same
221 221 as the running application, otherwise it will be 'default'.
222 222 cluster_id : str
223 223 String id to added to runtime files, to prevent name collisions when using
224 224 multiple clusters with a single profile simultaneously.
225 225 When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
226 226 Since this is text inserted into filenames, typical recommendations apply:
227 227 Simple character strings are ideal, and spaces are not recommended (but
228 228 should generally work)
229 229 context : zmq.Context
230 230 Pass an existing zmq.Context instance, otherwise the client will create its own.
231 231 debug : bool
232 232 flag for lots of message printing for debug purposes
233 233 timeout : int/float
234 234 time (in seconds) to wait for connection replies from the Hub
235 235 [Default: 10]
236 236
237 237 #-------------- session related args ----------------
238 238
239 239 config : Config object
240 240 If specified, this will be relayed to the Session for configuration
241 241 username : str
242 242 set username for the session object
243 243
244 244 #-------------- ssh related args ----------------
245 245 # These are args for configuring the ssh tunnel to be used
246 246 # credentials are used to forward connections over ssh to the Controller
247 247 # Note that the ip given in `addr` needs to be relative to sshserver
248 248 # The most basic case is to leave addr as pointing to localhost (127.0.0.1),
249 249 # and set sshserver as the same machine the Controller is on. However,
250 250 # the only requirement is that sshserver is able to see the Controller
251 251 # (i.e. is within the same trusted network).
252 252
253 253 sshserver : str
254 254 A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
255 255 If keyfile or password is specified, and this is not, it will default to
256 256 the ip given in addr.
257 257 sshkey : str; path to ssh private key file
258 258 This specifies a key to be used in ssh login, default None.
259 259 Regular default ssh keys will be used without specifying this argument.
260 260 password : str
261 261 Your ssh password to sshserver. Note that if this is left None,
262 262 you will be prompted for it if passwordless key based login is unavailable.
263 263 paramiko : bool
264 264 flag for whether to use paramiko instead of shell ssh for tunneling.
265 265 [default: True on win32, False else]
266 266
267 267
268 268 Attributes
269 269 ----------
270 270
271 271 ids : list of int engine IDs
272 272 requesting the ids attribute always synchronizes
273 273 the registration state. To request ids without synchronization,
274 274 use semi-private _ids attributes.
275 275
276 276 history : list of msg_ids
277 277 a list of msg_ids, keeping track of all the execution
278 278 messages you have submitted in order.
279 279
280 280 outstanding : set of msg_ids
281 281 a set of msg_ids that have been submitted, but whose
282 282 results have not yet been received.
283 283
284 284 results : dict
285 285 a dict of all our results, keyed by msg_id
286 286
287 287 block : bool
288 288 determines default behavior when block not specified
289 289 in execution methods
290 290
291 291 Methods
292 292 -------
293 293
294 294 spin
295 295 flushes incoming results and registration state changes
296 296 control methods spin, and requesting `ids` also ensures up to date
297 297
298 298 wait
299 299 wait on one or more msg_ids
300 300
301 301 execution methods
302 302 apply
303 303 legacy: execute, run
304 304
305 305 data movement
306 306 push, pull, scatter, gather
307 307
308 308 query methods
309 309 queue_status, get_result, purge, result_status
310 310
311 311 control methods
312 312 abort, shutdown
313 313
314 314 """
315 315
316 316
317 317 block = Bool(False)
318 318 outstanding = Set()
319 319 results = Instance('collections.defaultdict', (dict,))
320 320 metadata = Instance('collections.defaultdict', (Metadata,))
321 321 history = List()
322 322 debug = Bool(False)
323 323 _spin_thread = Any()
324 324 _stop_spinning = Any()
325 325
326 326 profile=Unicode()
327 327 def _profile_default(self):
328 328 if BaseIPythonApplication.initialized():
329 329 # an IPython app *might* be running, try to get its profile
330 330 try:
331 331 return BaseIPythonApplication.instance().profile
332 332 except (AttributeError, MultipleInstanceError):
333 333 # could be a *different* subclass of config.Application,
334 334 # which would raise one of these two errors.
335 335 return u'default'
336 336 else:
337 337 return u'default'
338 338
339 339
340 340 _outstanding_dict = Instance('collections.defaultdict', (set,))
341 341 _ids = List()
342 342 _connected=Bool(False)
343 343 _ssh=Bool(False)
344 344 _context = Instance('zmq.Context')
345 345 _config = Dict()
346 346 _engines=Instance(util.ReverseDict, (), {})
347 347 # _hub_socket=Instance('zmq.Socket')
348 348 _query_socket=Instance('zmq.Socket')
349 349 _control_socket=Instance('zmq.Socket')
350 350 _iopub_socket=Instance('zmq.Socket')
351 351 _notification_socket=Instance('zmq.Socket')
352 352 _mux_socket=Instance('zmq.Socket')
353 353 _task_socket=Instance('zmq.Socket')
354 354 _task_scheme=Unicode()
355 355 _closed = False
356 356 _ignored_control_replies=Integer(0)
357 357 _ignored_hub_replies=Integer(0)
358 358
359 359 def __new__(self, *args, **kw):
360 360 # don't raise on positional args
361 361 return HasTraits.__new__(self, **kw)
362 362
363 363 def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
364 364 context=None, debug=False,
365 365 sshserver=None, sshkey=None, password=None, paramiko=None,
366 366 timeout=10, cluster_id=None, **extra_args
367 367 ):
368 368 if profile:
369 369 super(Client, self).__init__(debug=debug, profile=profile)
370 370 else:
371 371 super(Client, self).__init__(debug=debug)
372 372 if context is None:
373 373 context = zmq.Context.instance()
374 374 self._context = context
375 375 self._stop_spinning = Event()
376 376
377 377 if 'url_or_file' in extra_args:
378 378 url_file = extra_args['url_or_file']
379 379 warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
380 380
381 381 if url_file and util.is_url(url_file):
382 382 raise ValueError("single urls cannot be specified, url-files must be used.")
383 383
384 384 self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
385 385
386 386 if self._cd is not None:
387 387 if url_file is None:
388 388 if not cluster_id:
389 389 client_json = 'ipcontroller-client.json'
390 390 else:
391 391 client_json = 'ipcontroller-%s-client.json' % cluster_id
392 392 url_file = pjoin(self._cd.security_dir, client_json)
393 393 if url_file is None:
394 394 raise ValueError(
395 395 "I can't find enough information to connect to a hub!"
396 396 " Please specify at least one of url_file or profile."
397 397 )
398 398
399 399 with open(url_file) as f:
400 400 cfg = json.load(f)
401 401
402 402 self._task_scheme = cfg['task_scheme']
403 403
404 404 # sync defaults from args, json:
405 405 if sshserver:
406 406 cfg['ssh'] = sshserver
407 407
408 408 location = cfg.setdefault('location', None)
409 409
410 410 proto,addr = cfg['interface'].split('://')
411 411 addr = util.disambiguate_ip_address(addr, location)
412 412 cfg['interface'] = "%s://%s" % (proto, addr)
413 413
414 414 # turn interface,port into full urls:
415 415 for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
416 416 cfg[key] = cfg['interface'] + ':%i' % cfg[key]
417 417
418 418 url = cfg['registration']
419 419
420 420 if location is not None and addr == localhost():
421 421 # location specified, and connection is expected to be local
422 422 if not is_local_ip(location) and not sshserver:
423 423 # load ssh from JSON *only* if the controller is not on
424 424 # this machine
425 425 sshserver=cfg['ssh']
426 426 if not is_local_ip(location) and not sshserver:
427 427 # warn if no ssh specified, but SSH is probably needed
428 428 # This is only a warning, because the most likely cause
429 429 # is a local Controller on a laptop whose IP is dynamic
430 430 warnings.warn("""
431 431 Controller appears to be listening on localhost, but not on this machine.
432 432 If this is true, you should specify Client(...,sshserver='you@%s')
433 433 or instruct your controller to listen on an external IP."""%location,
434 434 RuntimeWarning)
435 435 elif not sshserver:
436 436 # otherwise sync with cfg
437 437 sshserver = cfg['ssh']
438 438
439 439 self._config = cfg
440 440
441 441 self._ssh = bool(sshserver or sshkey or password)
442 442 if self._ssh and sshserver is None:
443 443 # default to ssh via localhost
444 444 sshserver = addr
445 445 if self._ssh and password is None:
446 446 if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
447 447 password=False
448 448 else:
449 449 password = getpass("SSH Password for %s: "%sshserver)
450 450 ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
451 451
452 452 # configure and construct the session
453 453 try:
454 454 extra_args['packer'] = cfg['pack']
455 455 extra_args['unpacker'] = cfg['unpack']
456 456 extra_args['key'] = cast_bytes(cfg['key'])
457 457 extra_args['signature_scheme'] = cfg['signature_scheme']
458 458 except KeyError as exc:
459 459 msg = '\n'.join([
460 460 "Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
461 461 "If you are reusing connection files, remove them and start ipcontroller again."
462 462 ])
463 463 raise ValueError(msg.format(exc.message))
464 464
465 465 self.session = Session(**extra_args)
466 466
467 467 self._query_socket = self._context.socket(zmq.DEALER)
468 468
469 469 if self._ssh:
470 470 tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
471 471 else:
472 472 self._query_socket.connect(cfg['registration'])
473 473
474 474 self.session.debug = self.debug
475 475
476 476 self._notification_handlers = {'registration_notification' : self._register_engine,
477 477 'unregistration_notification' : self._unregister_engine,
478 478 'shutdown_notification' : lambda msg: self.close(),
479 479 }
480 480 self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
481 481 'apply_reply' : self._handle_apply_reply}
482 482
483 483 try:
484 484 self._connect(sshserver, ssh_kwargs, timeout)
485 485 except:
486 486 self.close(linger=0)
487 487 raise
488 488
489 489 # last step: setup magics, if we are in IPython:
490 490
491 491 try:
492 492 ip = get_ipython()
493 493 except NameError:
494 494 return
495 495 else:
496 496 if 'px' not in ip.magics_manager.magics:
497 497 # in IPython but we are the first Client.
498 498 # activate a default view for parallel magics.
499 499 self.activate()
500 500
501 501 def __del__(self):
502 502 """cleanup sockets, but _not_ context."""
503 503 self.close()
504 504
505 505 def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
506 506 if ipython_dir is None:
507 507 ipython_dir = get_ipython_dir()
508 508 if profile_dir is not None:
509 509 try:
510 510 self._cd = ProfileDir.find_profile_dir(profile_dir)
511 511 return
512 512 except ProfileDirError:
513 513 pass
514 514 elif profile is not None:
515 515 try:
516 516 self._cd = ProfileDir.find_profile_dir_by_name(
517 517 ipython_dir, profile)
518 518 return
519 519 except ProfileDirError:
520 520 pass
521 521 self._cd = None
522 522
523 523 def _update_engines(self, engines):
524 524 """Update our engines dict and _ids from a dict of the form: {id:uuid}."""
525 525 for k,v in iteritems(engines):
526 526 eid = int(k)
527 527 if eid not in self._engines:
528 528 self._ids.append(eid)
529 529 self._engines[eid] = v
530 530 self._ids = sorted(self._ids)
531 531 if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
532 532 self._task_scheme == 'pure' and self._task_socket:
533 533 self._stop_scheduling_tasks()
534 534
535 535 def _stop_scheduling_tasks(self):
536 536 """Stop scheduling tasks because an engine has been unregistered
537 537 from a pure ZMQ scheduler.
538 538 """
539 539 self._task_socket.close()
540 540 self._task_socket = None
541 541 msg = "An engine has been unregistered, and we are using pure " +\
542 542 "ZMQ task scheduling. Task farming will be disabled."
543 543 if self.outstanding:
544 544 msg += " If you were running tasks when this happened, " +\
545 545 "some `outstanding` msg_ids may never resolve."
546 546 warnings.warn(msg, RuntimeWarning)
547 547
548 548 def _build_targets(self, targets):
549 549 """Turn valid target IDs or 'all' into two lists:
550 550 (int_ids, uuids).
551 551 """
552 552 if not self._ids:
553 553 # flush notification socket if no engines yet, just in case
554 554 if not self.ids:
555 555 raise error.NoEnginesRegistered("Can't build targets without any engines")
556 556
557 557 if targets is None:
558 558 targets = self._ids
559 559 elif isinstance(targets, string_types):
560 560 if targets.lower() == 'all':
561 561 targets = self._ids
562 562 else:
563 563 raise TypeError("%r not valid str target, must be 'all'"%(targets))
564 564 elif isinstance(targets, int):
565 565 if targets < 0:
566 566 targets = self.ids[targets]
567 567 if targets not in self._ids:
568 568 raise IndexError("No such engine: %i"%targets)
569 569 targets = [targets]
570 570
571 571 if isinstance(targets, slice):
572 572 indices = list(range(len(self._ids))[targets])
573 573 ids = self.ids
574 574 targets = [ ids[i] for i in indices ]
575 575
576 576 if not isinstance(targets, (tuple, list, xrange)):
577 577 raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
578 578
579 579 return [cast_bytes(self._engines[t]) for t in targets], list(targets)
580 580
581 581 def _connect(self, sshserver, ssh_kwargs, timeout):
582 582 """setup all our socket connections to the cluster. This is called from
583 583 __init__."""
584 584
585 585 # Maybe allow reconnecting?
586 586 if self._connected:
587 587 return
588 588 self._connected=True
589 589
590 590 def connect_socket(s, url):
591 591 if self._ssh:
592 592 return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
593 593 else:
594 594 return s.connect(url)
595 595
596 596 self.session.send(self._query_socket, 'connection_request')
597 597 # use Poller because zmq.select has wrong units in pyzmq 2.1.7
598 598 poller = zmq.Poller()
599 599 poller.register(self._query_socket, zmq.POLLIN)
600 600 # poll expects milliseconds, timeout is seconds
601 601 evts = poller.poll(timeout*1000)
602 602 if not evts:
603 603 raise error.TimeoutError("Hub connection request timed out")
604 604 idents,msg = self.session.recv(self._query_socket,mode=0)
605 605 if self.debug:
606 606 pprint(msg)
607 607 content = msg['content']
608 608 # self._config['registration'] = dict(content)
609 609 cfg = self._config
610 610 if content['status'] == 'ok':
611 611 self._mux_socket = self._context.socket(zmq.DEALER)
612 612 connect_socket(self._mux_socket, cfg['mux'])
613 613
614 614 self._task_socket = self._context.socket(zmq.DEALER)
615 615 connect_socket(self._task_socket, cfg['task'])
616 616
617 617 self._notification_socket = self._context.socket(zmq.SUB)
618 618 self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
619 619 connect_socket(self._notification_socket, cfg['notification'])
620 620
621 621 self._control_socket = self._context.socket(zmq.DEALER)
622 622 connect_socket(self._control_socket, cfg['control'])
623 623
624 624 self._iopub_socket = self._context.socket(zmq.SUB)
625 625 self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
626 626 connect_socket(self._iopub_socket, cfg['iopub'])
627 627
628 628 self._update_engines(dict(content['engines']))
629 629 else:
630 630 self._connected = False
631 631 raise Exception("Failed to connect!")
632 632
633 633 #--------------------------------------------------------------------------
634 634 # handlers and callbacks for incoming messages
635 635 #--------------------------------------------------------------------------
636 636
637 637 def _unwrap_exception(self, content):
638 638 """unwrap exception, and remap engine_id to int."""
639 639 e = error.unwrap_exception(content)
640 640 # print e.traceback
641 641 if e.engine_info:
642 642 e_uuid = e.engine_info['engine_uuid']
643 643 eid = self._engines[e_uuid]
644 644 e.engine_info['engine_id'] = eid
645 645 return e
646 646
647 647 def _extract_metadata(self, msg):
648 648 header = msg['header']
649 649 parent = msg['parent_header']
650 650 msg_meta = msg['metadata']
651 651 content = msg['content']
652 652 md = {'msg_id' : parent['msg_id'],
653 653 'received' : datetime.now(),
654 654 'engine_uuid' : msg_meta.get('engine', None),
655 655 'follow' : msg_meta.get('follow', []),
656 656 'after' : msg_meta.get('after', []),
657 657 'status' : content['status'],
658 658 }
659 659
660 660 if md['engine_uuid'] is not None:
661 661 md['engine_id'] = self._engines.get(md['engine_uuid'], None)
662 662
663 663 if 'date' in parent:
664 664 md['submitted'] = parent['date']
665 665 if 'started' in msg_meta:
666 666 md['started'] = parse_date(msg_meta['started'])
667 667 if 'date' in header:
668 668 md['completed'] = header['date']
669 669 return md
670 670
671 671 def _register_engine(self, msg):
672 672 """Register a new engine, and update our connection info."""
673 673 content = msg['content']
674 674 eid = content['id']
675 675 d = {eid : content['uuid']}
676 676 self._update_engines(d)
677 677
678 678 def _unregister_engine(self, msg):
679 679 """Unregister an engine that has died."""
680 680 content = msg['content']
681 681 eid = int(content['id'])
682 682 if eid in self._ids:
683 683 self._ids.remove(eid)
684 684 uuid = self._engines.pop(eid)
685 685
686 686 self._handle_stranded_msgs(eid, uuid)
687 687
688 688 if self._task_socket and self._task_scheme == 'pure':
689 689 self._stop_scheduling_tasks()
690 690
691 691 def _handle_stranded_msgs(self, eid, uuid):
692 692 """Handle messages known to be on an engine when the engine unregisters.
693 693
694 694 It is possible that this will fire prematurely - that is, an engine will
695 695 go down after completing a result, and the client will be notified
696 696 of the unregistration and later receive the successful result.
697 697 """
698 698
699 699 outstanding = self._outstanding_dict[uuid]
700 700
701 701 for msg_id in list(outstanding):
702 702 if msg_id in self.results:
703 703 # we already
704 704 continue
705 705 try:
706 706 raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
707 707 except:
708 708 content = error.wrap_exception()
709 709 # build a fake message:
710 710 msg = self.session.msg('apply_reply', content=content)
711 711 msg['parent_header']['msg_id'] = msg_id
712 712 msg['metadata']['engine'] = uuid
713 713 self._handle_apply_reply(msg)
714 714
715 715 def _handle_execute_reply(self, msg):
716 716 """Save the reply to an execute_request into our results.
717 717
718 718 execute messages are never actually used. apply is used instead.
719 719 """
720 720
721 721 parent = msg['parent_header']
722 722 msg_id = parent['msg_id']
723 723 if msg_id not in self.outstanding:
724 724 if msg_id in self.history:
725 725 print("got stale result: %s"%msg_id)
726 726 else:
727 727 print("got unknown result: %s"%msg_id)
728 728 else:
729 729 self.outstanding.remove(msg_id)
730 730
731 731 content = msg['content']
732 732 header = msg['header']
733 733
734 734 # construct metadata:
735 735 md = self.metadata[msg_id]
736 736 md.update(self._extract_metadata(msg))
737 737 # is this redundant?
738 738 self.metadata[msg_id] = md
739 739
740 740 e_outstanding = self._outstanding_dict[md['engine_uuid']]
741 741 if msg_id in e_outstanding:
742 742 e_outstanding.remove(msg_id)
743 743
744 744 # construct result:
745 745 if content['status'] == 'ok':
746 746 self.results[msg_id] = ExecuteReply(msg_id, content, md)
747 747 elif content['status'] == 'aborted':
748 748 self.results[msg_id] = error.TaskAborted(msg_id)
749 749 elif content['status'] == 'resubmitted':
750 750 # TODO: handle resubmission
751 751 pass
752 752 else:
753 753 self.results[msg_id] = self._unwrap_exception(content)
754 754
755 755 def _handle_apply_reply(self, msg):
756 756 """Save the reply to an apply_request into our results."""
757 757 parent = msg['parent_header']
758 758 msg_id = parent['msg_id']
759 759 if msg_id not in self.outstanding:
760 760 if msg_id in self.history:
761 761 print("got stale result: %s"%msg_id)
762 762 print(self.results[msg_id])
763 763 print(msg)
764 764 else:
765 765 print("got unknown result: %s"%msg_id)
766 766 else:
767 767 self.outstanding.remove(msg_id)
768 768 content = msg['content']
769 769 header = msg['header']
770 770
771 771 # construct metadata:
772 772 md = self.metadata[msg_id]
773 773 md.update(self._extract_metadata(msg))
774 774 # is this redundant?
775 775 self.metadata[msg_id] = md
776 776
777 777 e_outstanding = self._outstanding_dict[md['engine_uuid']]
778 778 if msg_id in e_outstanding:
779 779 e_outstanding.remove(msg_id)
780 780
781 781 # construct result:
782 782 if content['status'] == 'ok':
783 783 self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0]
784 784 elif content['status'] == 'aborted':
785 785 self.results[msg_id] = error.TaskAborted(msg_id)
786 786 elif content['status'] == 'resubmitted':
787 787 # TODO: handle resubmission
788 788 pass
789 789 else:
790 790 self.results[msg_id] = self._unwrap_exception(content)
791 791
792 792 def _flush_notifications(self):
793 793 """Flush notifications of engine registrations waiting
794 794 in ZMQ queue."""
795 795 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
796 796 while msg is not None:
797 797 if self.debug:
798 798 pprint(msg)
799 799 msg_type = msg['header']['msg_type']
800 800 handler = self._notification_handlers.get(msg_type, None)
801 801 if handler is None:
802 802 raise Exception("Unhandled message type: %s" % msg_type)
803 803 else:
804 804 handler(msg)
805 805 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
806 806
807 807 def _flush_results(self, sock):
808 808 """Flush task or queue results waiting in ZMQ queue."""
809 809 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
810 810 while msg is not None:
811 811 if self.debug:
812 812 pprint(msg)
813 813 msg_type = msg['header']['msg_type']
814 814 handler = self._queue_handlers.get(msg_type, None)
815 815 if handler is None:
816 816 raise Exception("Unhandled message type: %s" % msg_type)
817 817 else:
818 818 handler(msg)
819 819 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
820 820
821 821 def _flush_control(self, sock):
822 822 """Flush replies from the control channel waiting
823 823 in the ZMQ queue.
824 824
825 825 Currently: ignore them."""
826 826 if self._ignored_control_replies <= 0:
827 827 return
828 828 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
829 829 while msg is not None:
830 830 self._ignored_control_replies -= 1
831 831 if self.debug:
832 832 pprint(msg)
833 833 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
834 834
835 835 def _flush_ignored_control(self):
836 836 """flush ignored control replies"""
837 837 while self._ignored_control_replies > 0:
838 838 self.session.recv(self._control_socket)
839 839 self._ignored_control_replies -= 1
840 840
841 841 def _flush_ignored_hub_replies(self):
842 842 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
843 843 while msg is not None:
844 844 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
845 845
846 846 def _flush_iopub(self, sock):
847 847 """Flush replies from the iopub channel waiting
848 848 in the ZMQ queue.
849 849 """
850 850 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
851 851 while msg is not None:
852 852 if self.debug:
853 853 pprint(msg)
854 854 parent = msg['parent_header']
855 855 # ignore IOPub messages with no parent.
856 856 # Caused by print statements or warnings from before the first execution.
857 857 if not parent:
858 858 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
859 859 continue
860 860 msg_id = parent['msg_id']
861 861 content = msg['content']
862 862 header = msg['header']
863 863 msg_type = msg['header']['msg_type']
864 864
865 865 # init metadata:
866 866 md = self.metadata[msg_id]
867 867
868 868 if msg_type == 'stream':
869 869 name = content['name']
870 870 s = md[name] or ''
871 871 md[name] = s + content['data']
872 872 elif msg_type == 'error':
873 873 md.update({'error' : self._unwrap_exception(content)})
874 874 elif msg_type == 'execute_input':
875 875 md.update({'execute_input' : content['code']})
876 876 elif msg_type == 'display_data':
877 877 md['outputs'].append(content)
878 878 elif msg_type == 'execute_result':
879 879 md['execute_result'] = content
880 880 elif msg_type == 'data_message':
881 881 data, remainder = serialize.unserialize_object(msg['buffers'])
882 882 md['data'].update(data)
883 883 elif msg_type == 'status':
884 884 # idle message comes after all outputs
885 885 if content['execution_state'] == 'idle':
886 886 md['outputs_ready'] = True
887 887 else:
888 888 # unhandled msg_type (status, etc.)
889 889 pass
890 890
891 891 # reduntant?
892 892 self.metadata[msg_id] = md
893 893
894 894 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
895 895
896 896 #--------------------------------------------------------------------------
897 897 # len, getitem
898 898 #--------------------------------------------------------------------------
899 899
900 900 def __len__(self):
901 901 """len(client) returns # of engines."""
902 902 return len(self.ids)
903 903
904 904 def __getitem__(self, key):
905 905 """index access returns DirectView multiplexer objects
906 906
907 907 Must be int, slice, or list/tuple/xrange of ints"""
908 908 if not isinstance(key, (int, slice, tuple, list, xrange)):
909 909 raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
910 910 else:
911 911 return self.direct_view(key)
912 912
913 913 def __iter__(self):
914 914 """Since we define getitem, Client is iterable
915 915
916 916 but unless we also define __iter__, it won't work correctly unless engine IDs
917 917 start at zero and are continuous.
918 918 """
919 919 for eid in self.ids:
920 920 yield self.direct_view(eid)
921 921
922 922 #--------------------------------------------------------------------------
923 923 # Begin public methods
924 924 #--------------------------------------------------------------------------
925 925
926 926 @property
927 927 def ids(self):
928 928 """Always up-to-date ids property."""
929 929 self._flush_notifications()
930 930 # always copy:
931 931 return list(self._ids)
932 932
933 933 def activate(self, targets='all', suffix=''):
934 934 """Create a DirectView and register it with IPython magics
935 935
936 936 Defines the magics `%px, %autopx, %pxresult, %%px`
937 937
938 938 Parameters
939 939 ----------
940 940
941 941 targets: int, list of ints, or 'all'
942 942 The engines on which the view's magics will run
943 943 suffix: str [default: '']
944 944 The suffix, if any, for the magics. This allows you to have
945 945 multiple views associated with parallel magics at the same time.
946 946
947 947 e.g. ``rc.activate(targets=0, suffix='0')`` will give you
948 948 the magics ``%px0``, ``%pxresult0``, etc. for running magics just
949 949 on engine 0.
950 950 """
951 951 view = self.direct_view(targets)
952 952 view.block = True
953 953 view.activate(suffix)
954 954 return view
955 955
956 956 def close(self, linger=None):
957 957 """Close my zmq Sockets
958 958
959 959 If `linger`, set the zmq LINGER socket option,
960 960 which allows discarding of messages.
961 961 """
962 962 if self._closed:
963 963 return
964 964 self.stop_spin_thread()
965 965 snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
966 966 for name in snames:
967 967 socket = getattr(self, name)
968 968 if socket is not None and not socket.closed:
969 969 if linger is not None:
970 970 socket.close(linger=linger)
971 971 else:
972 972 socket.close()
973 973 self._closed = True
974 974
975 975 def _spin_every(self, interval=1):
976 976 """target func for use in spin_thread"""
977 977 while True:
978 978 if self._stop_spinning.is_set():
979 979 return
980 980 time.sleep(interval)
981 981 self.spin()
982 982
983 983 def spin_thread(self, interval=1):
984 984 """call Client.spin() in a background thread on some regular interval
985 985
986 986 This helps ensure that messages don't pile up too much in the zmq queue
987 987 while you are working on other things, or just leaving an idle terminal.
988 988
989 989 It also helps limit potential padding of the `received` timestamp
990 990 on AsyncResult objects, used for timings.
991 991
992 992 Parameters
993 993 ----------
994 994
995 995 interval : float, optional
996 996 The interval on which to spin the client in the background thread
997 997 (simply passed to time.sleep).
998 998
999 999 Notes
1000 1000 -----
1001 1001
1002 1002 For precision timing, you may want to use this method to put a bound
1003 1003 on the jitter (in seconds) in `received` timestamps used
1004 1004 in AsyncResult.wall_time.
1005 1005
1006 1006 """
1007 1007 if self._spin_thread is not None:
1008 1008 self.stop_spin_thread()
1009 1009 self._stop_spinning.clear()
1010 1010 self._spin_thread = Thread(target=self._spin_every, args=(interval,))
1011 1011 self._spin_thread.daemon = True
1012 1012 self._spin_thread.start()
1013 1013
1014 1014 def stop_spin_thread(self):
1015 1015 """stop background spin_thread, if any"""
1016 1016 if self._spin_thread is not None:
1017 1017 self._stop_spinning.set()
1018 1018 self._spin_thread.join()
1019 1019 self._spin_thread = None
1020 1020
1021 1021 def spin(self):
1022 1022 """Flush any registration notifications and execution results
1023 1023 waiting in the ZMQ queue.
1024 1024 """
1025 1025 if self._notification_socket:
1026 1026 self._flush_notifications()
1027 1027 if self._iopub_socket:
1028 1028 self._flush_iopub(self._iopub_socket)
1029 1029 if self._mux_socket:
1030 1030 self._flush_results(self._mux_socket)
1031 1031 if self._task_socket:
1032 1032 self._flush_results(self._task_socket)
1033 1033 if self._control_socket:
1034 1034 self._flush_control(self._control_socket)
1035 1035 if self._query_socket:
1036 1036 self._flush_ignored_hub_replies()
1037 1037
1038 1038 def wait(self, jobs=None, timeout=-1):
1039 1039 """waits on one or more `jobs`, for up to `timeout` seconds.
1040 1040
1041 1041 Parameters
1042 1042 ----------
1043 1043
1044 1044 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
1045 1045 ints are indices to self.history
1046 1046 strs are msg_ids
1047 1047 default: wait on all outstanding messages
1048 1048 timeout : float
1049 1049 a time in seconds, after which to give up.
1050 1050 default is -1, which means no timeout
1051 1051
1052 1052 Returns
1053 1053 -------
1054 1054
1055 1055 True : when all msg_ids are done
1056 1056 False : timeout reached, some msg_ids still outstanding
1057 1057 """
1058 1058 tic = time.time()
1059 1059 if jobs is None:
1060 1060 theids = self.outstanding
1061 1061 else:
1062 1062 if isinstance(jobs, string_types + (int, AsyncResult)):
1063 1063 jobs = [jobs]
1064 1064 theids = set()
1065 1065 for job in jobs:
1066 1066 if isinstance(job, int):
1067 1067 # index access
1068 1068 job = self.history[job]
1069 1069 elif isinstance(job, AsyncResult):
1070 1070 theids.update(job.msg_ids)
1071 1071 continue
1072 1072 theids.add(job)
1073 1073 if not theids.intersection(self.outstanding):
1074 1074 return True
1075 1075 self.spin()
1076 1076 while theids.intersection(self.outstanding):
1077 1077 if timeout >= 0 and ( time.time()-tic ) > timeout:
1078 1078 break
1079 1079 time.sleep(1e-3)
1080 1080 self.spin()
1081 1081 return len(theids.intersection(self.outstanding)) == 0
1082 1082
1083 1083 #--------------------------------------------------------------------------
1084 1084 # Control methods
1085 1085 #--------------------------------------------------------------------------
1086 1086
1087 1087 @spin_first
1088 1088 def clear(self, targets=None, block=None):
1089 1089 """Clear the namespace in target(s)."""
1090 1090 block = self.block if block is None else block
1091 1091 targets = self._build_targets(targets)[0]
1092 1092 for t in targets:
1093 1093 self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
1094 1094 error = False
1095 1095 if block:
1096 1096 self._flush_ignored_control()
1097 1097 for i in range(len(targets)):
1098 1098 idents,msg = self.session.recv(self._control_socket,0)
1099 1099 if self.debug:
1100 1100 pprint(msg)
1101 1101 if msg['content']['status'] != 'ok':
1102 1102 error = self._unwrap_exception(msg['content'])
1103 1103 else:
1104 1104 self._ignored_control_replies += len(targets)
1105 1105 if error:
1106 1106 raise error
1107 1107
1108 1108
1109 1109 @spin_first
1110 1110 def abort(self, jobs=None, targets=None, block=None):
1111 1111 """Abort specific jobs from the execution queues of target(s).
1112 1112
1113 1113 This is a mechanism to prevent jobs that have already been submitted
1114 1114 from executing.
1115 1115
1116 1116 Parameters
1117 1117 ----------
1118 1118
1119 1119 jobs : msg_id, list of msg_ids, or AsyncResult
1120 1120 The jobs to be aborted
1121 1121
1122 1122 If unspecified/None: abort all outstanding jobs.
1123 1123
1124 1124 """
1125 1125 block = self.block if block is None else block
1126 1126 jobs = jobs if jobs is not None else list(self.outstanding)
1127 1127 targets = self._build_targets(targets)[0]
1128 1128
1129 1129 msg_ids = []
1130 1130 if isinstance(jobs, string_types + (AsyncResult,)):
1131 1131 jobs = [jobs]
1132 1132 bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
1133 1133 if bad_ids:
1134 1134 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1135 1135 for j in jobs:
1136 1136 if isinstance(j, AsyncResult):
1137 1137 msg_ids.extend(j.msg_ids)
1138 1138 else:
1139 1139 msg_ids.append(j)
1140 1140 content = dict(msg_ids=msg_ids)
1141 1141 for t in targets:
1142 1142 self.session.send(self._control_socket, 'abort_request',
1143 1143 content=content, ident=t)
1144 1144 error = False
1145 1145 if block:
1146 1146 self._flush_ignored_control()
1147 1147 for i in range(len(targets)):
1148 1148 idents,msg = self.session.recv(self._control_socket,0)
1149 1149 if self.debug:
1150 1150 pprint(msg)
1151 1151 if msg['content']['status'] != 'ok':
1152 1152 error = self._unwrap_exception(msg['content'])
1153 1153 else:
1154 1154 self._ignored_control_replies += len(targets)
1155 1155 if error:
1156 1156 raise error
1157 1157
1158 1158 @spin_first
1159 1159 def shutdown(self, targets='all', restart=False, hub=False, block=None):
1160 1160 """Terminates one or more engine processes, optionally including the hub.
1161 1161
1162 1162 Parameters
1163 1163 ----------
1164 1164
1165 1165 targets: list of ints or 'all' [default: all]
1166 1166 Which engines to shutdown.
1167 1167 hub: bool [default: False]
1168 1168 Whether to include the Hub. hub=True implies targets='all'.
1169 1169 block: bool [default: self.block]
1170 1170 Whether to wait for clean shutdown replies or not.
1171 1171 restart: bool [default: False]
1172 1172 NOT IMPLEMENTED
1173 1173 whether to restart engines after shutting them down.
1174 1174 """
1175 1175 from IPython.parallel.error import NoEnginesRegistered
1176 1176 if restart:
1177 1177 raise NotImplementedError("Engine restart is not yet implemented")
1178 1178
1179 1179 block = self.block if block is None else block
1180 1180 if hub:
1181 1181 targets = 'all'
1182 1182 try:
1183 1183 targets = self._build_targets(targets)[0]
1184 1184 except NoEnginesRegistered:
1185 1185 targets = []
1186 1186 for t in targets:
1187 1187 self.session.send(self._control_socket, 'shutdown_request',
1188 1188 content={'restart':restart},ident=t)
1189 1189 error = False
1190 1190 if block or hub:
1191 1191 self._flush_ignored_control()
1192 1192 for i in range(len(targets)):
1193 1193 idents,msg = self.session.recv(self._control_socket, 0)
1194 1194 if self.debug:
1195 1195 pprint(msg)
1196 1196 if msg['content']['status'] != 'ok':
1197 1197 error = self._unwrap_exception(msg['content'])
1198 1198 else:
1199 1199 self._ignored_control_replies += len(targets)
1200 1200
1201 1201 if hub:
1202 1202 time.sleep(0.25)
1203 1203 self.session.send(self._query_socket, 'shutdown_request')
1204 1204 idents,msg = self.session.recv(self._query_socket, 0)
1205 1205 if self.debug:
1206 1206 pprint(msg)
1207 1207 if msg['content']['status'] != 'ok':
1208 1208 error = self._unwrap_exception(msg['content'])
1209 1209
1210 1210 if error:
1211 1211 raise error
1212 1212
1213 1213 #--------------------------------------------------------------------------
1214 1214 # Execution related methods
1215 1215 #--------------------------------------------------------------------------
1216 1216
1217 1217 def _maybe_raise(self, result):
1218 1218 """wrapper for maybe raising an exception if apply failed."""
1219 1219 if isinstance(result, error.RemoteError):
1220 1220 raise result
1221 1221
1222 1222 return result
1223 1223
1224 1224 def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
1225 1225 ident=None):
1226 1226 """construct and send an apply message via a socket.
1227 1227
1228 1228 This is the principal method with which all engine execution is performed by views.
1229 1229 """
1230 1230
1231 1231 if self._closed:
1232 1232 raise RuntimeError("Client cannot be used after its sockets have been closed")
1233 1233
1234 1234 # defaults:
1235 1235 args = args if args is not None else []
1236 1236 kwargs = kwargs if kwargs is not None else {}
1237 1237 metadata = metadata if metadata is not None else {}
1238 1238
1239 1239 # validate arguments
1240 1240 if not callable(f) and not isinstance(f, Reference):
1241 1241 raise TypeError("f must be callable, not %s"%type(f))
1242 1242 if not isinstance(args, (tuple, list)):
1243 1243 raise TypeError("args must be tuple or list, not %s"%type(args))
1244 1244 if not isinstance(kwargs, dict):
1245 1245 raise TypeError("kwargs must be dict, not %s"%type(kwargs))
1246 1246 if not isinstance(metadata, dict):
1247 1247 raise TypeError("metadata must be dict, not %s"%type(metadata))
1248 1248
1249 1249 bufs = serialize.pack_apply_message(f, args, kwargs,
1250 1250 buffer_threshold=self.session.buffer_threshold,
1251 1251 item_threshold=self.session.item_threshold,
1252 1252 )
1253 1253
1254 1254 msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
1255 1255 metadata=metadata, track=track)
1256 1256
1257 1257 msg_id = msg['header']['msg_id']
1258 1258 self.outstanding.add(msg_id)
1259 1259 if ident:
1260 1260 # possibly routed to a specific engine
1261 1261 if isinstance(ident, list):
1262 1262 ident = ident[-1]
1263 1263 if ident in self._engines.values():
1264 1264 # save for later, in case of engine death
1265 1265 self._outstanding_dict[ident].add(msg_id)
1266 1266 self.history.append(msg_id)
1267 1267 self.metadata[msg_id]['submitted'] = datetime.now()
1268 1268
1269 1269 return msg
1270 1270
1271 1271 def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
1272 1272 """construct and send an execute request via a socket.
1273 1273
1274 1274 """
1275 1275
1276 1276 if self._closed:
1277 1277 raise RuntimeError("Client cannot be used after its sockets have been closed")
1278 1278
1279 1279 # defaults:
1280 1280 metadata = metadata if metadata is not None else {}
1281 1281
1282 1282 # validate arguments
1283 1283 if not isinstance(code, string_types):
1284 1284 raise TypeError("code must be text, not %s" % type(code))
1285 1285 if not isinstance(metadata, dict):
1286 1286 raise TypeError("metadata must be dict, not %s" % type(metadata))
1287 1287
1288 content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
1288 content = dict(code=code, silent=bool(silent), user_expressions={})
1289 1289
1290 1290
1291 1291 msg = self.session.send(socket, "execute_request", content=content, ident=ident,
1292 1292 metadata=metadata)
1293 1293
1294 1294 msg_id = msg['header']['msg_id']
1295 1295 self.outstanding.add(msg_id)
1296 1296 if ident:
1297 1297 # possibly routed to a specific engine
1298 1298 if isinstance(ident, list):
1299 1299 ident = ident[-1]
1300 1300 if ident in self._engines.values():
1301 1301 # save for later, in case of engine death
1302 1302 self._outstanding_dict[ident].add(msg_id)
1303 1303 self.history.append(msg_id)
1304 1304 self.metadata[msg_id]['submitted'] = datetime.now()
1305 1305
1306 1306 return msg
1307 1307
1308 1308 #--------------------------------------------------------------------------
1309 1309 # construct a View object
1310 1310 #--------------------------------------------------------------------------
1311 1311
1312 1312 def load_balanced_view(self, targets=None):
1313 1313 """construct a DirectView object.
1314 1314
1315 1315 If no arguments are specified, create a LoadBalancedView
1316 1316 using all engines.
1317 1317
1318 1318 Parameters
1319 1319 ----------
1320 1320
1321 1321 targets: list,slice,int,etc. [default: use all engines]
1322 1322 The subset of engines across which to load-balance
1323 1323 """
1324 1324 if targets == 'all':
1325 1325 targets = None
1326 1326 if targets is not None:
1327 1327 targets = self._build_targets(targets)[1]
1328 1328 return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
1329 1329
1330 1330 def direct_view(self, targets='all'):
1331 1331 """construct a DirectView object.
1332 1332
1333 1333 If no targets are specified, create a DirectView using all engines.
1334 1334
1335 1335 rc.direct_view('all') is distinguished from rc[:] in that 'all' will
1336 1336 evaluate the target engines at each execution, whereas rc[:] will connect to
1337 1337 all *current* engines, and that list will not change.
1338 1338
1339 1339 That is, 'all' will always use all engines, whereas rc[:] will not use
1340 1340 engines added after the DirectView is constructed.
1341 1341
1342 1342 Parameters
1343 1343 ----------
1344 1344
1345 1345 targets: list,slice,int,etc. [default: use all engines]
1346 1346 The engines to use for the View
1347 1347 """
1348 1348 single = isinstance(targets, int)
1349 1349 # allow 'all' to be lazily evaluated at each execution
1350 1350 if targets != 'all':
1351 1351 targets = self._build_targets(targets)[1]
1352 1352 if single:
1353 1353 targets = targets[0]
1354 1354 return DirectView(client=self, socket=self._mux_socket, targets=targets)
1355 1355
1356 1356 #--------------------------------------------------------------------------
1357 1357 # Query methods
1358 1358 #--------------------------------------------------------------------------
1359 1359
1360 1360 @spin_first
1361 1361 def get_result(self, indices_or_msg_ids=None, block=None):
1362 1362 """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
1363 1363
1364 1364 If the client already has the results, no request to the Hub will be made.
1365 1365
1366 1366 This is a convenient way to construct AsyncResult objects, which are wrappers
1367 1367 that include metadata about execution, and allow for awaiting results that
1368 1368 were not submitted by this Client.
1369 1369
1370 1370 It can also be a convenient way to retrieve the metadata associated with
1371 1371 blocking execution, since it always retrieves
1372 1372
1373 1373 Examples
1374 1374 --------
1375 1375 ::
1376 1376
1377 1377 In [10]: r = client.apply()
1378 1378
1379 1379 Parameters
1380 1380 ----------
1381 1381
1382 1382 indices_or_msg_ids : integer history index, str msg_id, or list of either
1383 1383 The indices or msg_ids of indices to be retrieved
1384 1384
1385 1385 block : bool
1386 1386 Whether to wait for the result to be done
1387 1387
1388 1388 Returns
1389 1389 -------
1390 1390
1391 1391 AsyncResult
1392 1392 A single AsyncResult object will always be returned.
1393 1393
1394 1394 AsyncHubResult
1395 1395 A subclass of AsyncResult that retrieves results from the Hub
1396 1396
1397 1397 """
1398 1398 block = self.block if block is None else block
1399 1399 if indices_or_msg_ids is None:
1400 1400 indices_or_msg_ids = -1
1401 1401
1402 1402 single_result = False
1403 1403 if not isinstance(indices_or_msg_ids, (list,tuple)):
1404 1404 indices_or_msg_ids = [indices_or_msg_ids]
1405 1405 single_result = True
1406 1406
1407 1407 theids = []
1408 1408 for id in indices_or_msg_ids:
1409 1409 if isinstance(id, int):
1410 1410 id = self.history[id]
1411 1411 if not isinstance(id, string_types):
1412 1412 raise TypeError("indices must be str or int, not %r"%id)
1413 1413 theids.append(id)
1414 1414
1415 1415 local_ids = [msg_id for msg_id in theids if (msg_id in self.outstanding or msg_id in self.results)]
1416 1416 remote_ids = [msg_id for msg_id in theids if msg_id not in local_ids]
1417 1417
1418 1418 # given single msg_id initially, get_result shot get the result itself,
1419 1419 # not a length-one list
1420 1420 if single_result:
1421 1421 theids = theids[0]
1422 1422
1423 1423 if remote_ids:
1424 1424 ar = AsyncHubResult(self, msg_ids=theids)
1425 1425 else:
1426 1426 ar = AsyncResult(self, msg_ids=theids)
1427 1427
1428 1428 if block:
1429 1429 ar.wait()
1430 1430
1431 1431 return ar
1432 1432
1433 1433 @spin_first
1434 1434 def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
1435 1435 """Resubmit one or more tasks.
1436 1436
1437 1437 in-flight tasks may not be resubmitted.
1438 1438
1439 1439 Parameters
1440 1440 ----------
1441 1441
1442 1442 indices_or_msg_ids : integer history index, str msg_id, or list of either
1443 1443 The indices or msg_ids of indices to be retrieved
1444 1444
1445 1445 block : bool
1446 1446 Whether to wait for the result to be done
1447 1447
1448 1448 Returns
1449 1449 -------
1450 1450
1451 1451 AsyncHubResult
1452 1452 A subclass of AsyncResult that retrieves results from the Hub
1453 1453
1454 1454 """
1455 1455 block = self.block if block is None else block
1456 1456 if indices_or_msg_ids is None:
1457 1457 indices_or_msg_ids = -1
1458 1458
1459 1459 if not isinstance(indices_or_msg_ids, (list,tuple)):
1460 1460 indices_or_msg_ids = [indices_or_msg_ids]
1461 1461
1462 1462 theids = []
1463 1463 for id in indices_or_msg_ids:
1464 1464 if isinstance(id, int):
1465 1465 id = self.history[id]
1466 1466 if not isinstance(id, string_types):
1467 1467 raise TypeError("indices must be str or int, not %r"%id)
1468 1468 theids.append(id)
1469 1469
1470 1470 content = dict(msg_ids = theids)
1471 1471
1472 1472 self.session.send(self._query_socket, 'resubmit_request', content)
1473 1473
1474 1474 zmq.select([self._query_socket], [], [])
1475 1475 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1476 1476 if self.debug:
1477 1477 pprint(msg)
1478 1478 content = msg['content']
1479 1479 if content['status'] != 'ok':
1480 1480 raise self._unwrap_exception(content)
1481 1481 mapping = content['resubmitted']
1482 1482 new_ids = [ mapping[msg_id] for msg_id in theids ]
1483 1483
1484 1484 ar = AsyncHubResult(self, msg_ids=new_ids)
1485 1485
1486 1486 if block:
1487 1487 ar.wait()
1488 1488
1489 1489 return ar
1490 1490
1491 1491 @spin_first
1492 1492 def result_status(self, msg_ids, status_only=True):
1493 1493 """Check on the status of the result(s) of the apply request with `msg_ids`.
1494 1494
1495 1495 If status_only is False, then the actual results will be retrieved, else
1496 1496 only the status of the results will be checked.
1497 1497
1498 1498 Parameters
1499 1499 ----------
1500 1500
1501 1501 msg_ids : list of msg_ids
1502 1502 if int:
1503 1503 Passed as index to self.history for convenience.
1504 1504 status_only : bool (default: True)
1505 1505 if False:
1506 1506 Retrieve the actual results of completed tasks.
1507 1507
1508 1508 Returns
1509 1509 -------
1510 1510
1511 1511 results : dict
1512 1512 There will always be the keys 'pending' and 'completed', which will
1513 1513 be lists of msg_ids that are incomplete or complete. If `status_only`
1514 1514 is False, then completed results will be keyed by their `msg_id`.
1515 1515 """
1516 1516 if not isinstance(msg_ids, (list,tuple)):
1517 1517 msg_ids = [msg_ids]
1518 1518
1519 1519 theids = []
1520 1520 for msg_id in msg_ids:
1521 1521 if isinstance(msg_id, int):
1522 1522 msg_id = self.history[msg_id]
1523 1523 if not isinstance(msg_id, string_types):
1524 1524 raise TypeError("msg_ids must be str, not %r"%msg_id)
1525 1525 theids.append(msg_id)
1526 1526
1527 1527 completed = []
1528 1528 local_results = {}
1529 1529
1530 1530 # comment this block out to temporarily disable local shortcut:
1531 1531 for msg_id in theids:
1532 1532 if msg_id in self.results:
1533 1533 completed.append(msg_id)
1534 1534 local_results[msg_id] = self.results[msg_id]
1535 1535 theids.remove(msg_id)
1536 1536
1537 1537 if theids: # some not locally cached
1538 1538 content = dict(msg_ids=theids, status_only=status_only)
1539 1539 msg = self.session.send(self._query_socket, "result_request", content=content)
1540 1540 zmq.select([self._query_socket], [], [])
1541 1541 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1542 1542 if self.debug:
1543 1543 pprint(msg)
1544 1544 content = msg['content']
1545 1545 if content['status'] != 'ok':
1546 1546 raise self._unwrap_exception(content)
1547 1547 buffers = msg['buffers']
1548 1548 else:
1549 1549 content = dict(completed=[],pending=[])
1550 1550
1551 1551 content['completed'].extend(completed)
1552 1552
1553 1553 if status_only:
1554 1554 return content
1555 1555
1556 1556 failures = []
1557 1557 # load cached results into result:
1558 1558 content.update(local_results)
1559 1559
1560 1560 # update cache with results:
1561 1561 for msg_id in sorted(theids):
1562 1562 if msg_id in content['completed']:
1563 1563 rec = content[msg_id]
1564 1564 parent = extract_dates(rec['header'])
1565 1565 header = extract_dates(rec['result_header'])
1566 1566 rcontent = rec['result_content']
1567 1567 iodict = rec['io']
1568 1568 if isinstance(rcontent, str):
1569 1569 rcontent = self.session.unpack(rcontent)
1570 1570
1571 1571 md = self.metadata[msg_id]
1572 1572 md_msg = dict(
1573 1573 content=rcontent,
1574 1574 parent_header=parent,
1575 1575 header=header,
1576 1576 metadata=rec['result_metadata'],
1577 1577 )
1578 1578 md.update(self._extract_metadata(md_msg))
1579 1579 if rec.get('received'):
1580 1580 md['received'] = parse_date(rec['received'])
1581 1581 md.update(iodict)
1582 1582
1583 1583 if rcontent['status'] == 'ok':
1584 1584 if header['msg_type'] == 'apply_reply':
1585 1585 res,buffers = serialize.unserialize_object(buffers)
1586 1586 elif header['msg_type'] == 'execute_reply':
1587 1587 res = ExecuteReply(msg_id, rcontent, md)
1588 1588 else:
1589 1589 raise KeyError("unhandled msg type: %r" % header['msg_type'])
1590 1590 else:
1591 1591 res = self._unwrap_exception(rcontent)
1592 1592 failures.append(res)
1593 1593
1594 1594 self.results[msg_id] = res
1595 1595 content[msg_id] = res
1596 1596
1597 1597 if len(theids) == 1 and failures:
1598 1598 raise failures[0]
1599 1599
1600 1600 error.collect_exceptions(failures, "result_status")
1601 1601 return content
1602 1602
1603 1603 @spin_first
1604 1604 def queue_status(self, targets='all', verbose=False):
1605 1605 """Fetch the status of engine queues.
1606 1606
1607 1607 Parameters
1608 1608 ----------
1609 1609
1610 1610 targets : int/str/list of ints/strs
1611 1611 the engines whose states are to be queried.
1612 1612 default : all
1613 1613 verbose : bool
1614 1614 Whether to return lengths only, or lists of ids for each element
1615 1615 """
1616 1616 if targets == 'all':
1617 1617 # allow 'all' to be evaluated on the engine
1618 1618 engine_ids = None
1619 1619 else:
1620 1620 engine_ids = self._build_targets(targets)[1]
1621 1621 content = dict(targets=engine_ids, verbose=verbose)
1622 1622 self.session.send(self._query_socket, "queue_request", content=content)
1623 1623 idents,msg = self.session.recv(self._query_socket, 0)
1624 1624 if self.debug:
1625 1625 pprint(msg)
1626 1626 content = msg['content']
1627 1627 status = content.pop('status')
1628 1628 if status != 'ok':
1629 1629 raise self._unwrap_exception(content)
1630 1630 content = rekey(content)
1631 1631 if isinstance(targets, int):
1632 1632 return content[targets]
1633 1633 else:
1634 1634 return content
1635 1635
1636 1636 def _build_msgids_from_target(self, targets=None):
1637 1637 """Build a list of msg_ids from the list of engine targets"""
1638 1638 if not targets: # needed as _build_targets otherwise uses all engines
1639 1639 return []
1640 1640 target_ids = self._build_targets(targets)[0]
1641 1641 return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
1642 1642
1643 1643 def _build_msgids_from_jobs(self, jobs=None):
1644 1644 """Build a list of msg_ids from "jobs" """
1645 1645 if not jobs:
1646 1646 return []
1647 1647 msg_ids = []
1648 1648 if isinstance(jobs, string_types + (AsyncResult,)):
1649 1649 jobs = [jobs]
1650 1650 bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
1651 1651 if bad_ids:
1652 1652 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1653 1653 for j in jobs:
1654 1654 if isinstance(j, AsyncResult):
1655 1655 msg_ids.extend(j.msg_ids)
1656 1656 else:
1657 1657 msg_ids.append(j)
1658 1658 return msg_ids
1659 1659
1660 1660 def purge_local_results(self, jobs=[], targets=[]):
1661 1661 """Clears the client caches of results and their metadata.
1662 1662
1663 1663 Individual results can be purged by msg_id, or the entire
1664 1664 history of specific targets can be purged.
1665 1665
1666 1666 Use `purge_local_results('all')` to scrub everything from the Clients's
1667 1667 results and metadata caches.
1668 1668
1669 1669 After this call all `AsyncResults` are invalid and should be discarded.
1670 1670
1671 1671 If you must "reget" the results, you can still do so by using
1672 1672 `client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
1673 1673 redownload the results from the hub if they are still available
1674 1674 (i.e `client.purge_hub_results(...)` has not been called.
1675 1675
1676 1676 Parameters
1677 1677 ----------
1678 1678
1679 1679 jobs : str or list of str or AsyncResult objects
1680 1680 the msg_ids whose results should be purged.
1681 1681 targets : int/list of ints
1682 1682 The engines, by integer ID, whose entire result histories are to be purged.
1683 1683
1684 1684 Raises
1685 1685 ------
1686 1686
1687 1687 RuntimeError : if any of the tasks to be purged are still outstanding.
1688 1688
1689 1689 """
1690 1690 if not targets and not jobs:
1691 1691 raise ValueError("Must specify at least one of `targets` and `jobs`")
1692 1692
1693 1693 if jobs == 'all':
1694 1694 if self.outstanding:
1695 1695 raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
1696 1696 self.results.clear()
1697 1697 self.metadata.clear()
1698 1698 else:
1699 1699 msg_ids = set()
1700 1700 msg_ids.update(self._build_msgids_from_target(targets))
1701 1701 msg_ids.update(self._build_msgids_from_jobs(jobs))
1702 1702 still_outstanding = self.outstanding.intersection(msg_ids)
1703 1703 if still_outstanding:
1704 1704 raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
1705 1705 for mid in msg_ids:
1706 1706 self.results.pop(mid)
1707 1707 self.metadata.pop(mid)
1708 1708
1709 1709
1710 1710 @spin_first
1711 1711 def purge_hub_results(self, jobs=[], targets=[]):
1712 1712 """Tell the Hub to forget results.
1713 1713
1714 1714 Individual results can be purged by msg_id, or the entire
1715 1715 history of specific targets can be purged.
1716 1716
1717 1717 Use `purge_results('all')` to scrub everything from the Hub's db.
1718 1718
1719 1719 Parameters
1720 1720 ----------
1721 1721
1722 1722 jobs : str or list of str or AsyncResult objects
1723 1723 the msg_ids whose results should be forgotten.
1724 1724 targets : int/str/list of ints/strs
1725 1725 The targets, by int_id, whose entire history is to be purged.
1726 1726
1727 1727 default : None
1728 1728 """
1729 1729 if not targets and not jobs:
1730 1730 raise ValueError("Must specify at least one of `targets` and `jobs`")
1731 1731 if targets:
1732 1732 targets = self._build_targets(targets)[1]
1733 1733
1734 1734 # construct msg_ids from jobs
1735 1735 if jobs == 'all':
1736 1736 msg_ids = jobs
1737 1737 else:
1738 1738 msg_ids = self._build_msgids_from_jobs(jobs)
1739 1739
1740 1740 content = dict(engine_ids=targets, msg_ids=msg_ids)
1741 1741 self.session.send(self._query_socket, "purge_request", content=content)
1742 1742 idents, msg = self.session.recv(self._query_socket, 0)
1743 1743 if self.debug:
1744 1744 pprint(msg)
1745 1745 content = msg['content']
1746 1746 if content['status'] != 'ok':
1747 1747 raise self._unwrap_exception(content)
1748 1748
1749 1749 def purge_results(self, jobs=[], targets=[]):
1750 1750 """Clears the cached results from both the hub and the local client
1751 1751
1752 1752 Individual results can be purged by msg_id, or the entire
1753 1753 history of specific targets can be purged.
1754 1754
1755 1755 Use `purge_results('all')` to scrub every cached result from both the Hub's and
1756 1756 the Client's db.
1757 1757
1758 1758 Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
1759 1759 the same arguments.
1760 1760
1761 1761 Parameters
1762 1762 ----------
1763 1763
1764 1764 jobs : str or list of str or AsyncResult objects
1765 1765 the msg_ids whose results should be forgotten.
1766 1766 targets : int/str/list of ints/strs
1767 1767 The targets, by int_id, whose entire history is to be purged.
1768 1768
1769 1769 default : None
1770 1770 """
1771 1771 self.purge_local_results(jobs=jobs, targets=targets)
1772 1772 self.purge_hub_results(jobs=jobs, targets=targets)
1773 1773
1774 1774 def purge_everything(self):
1775 1775 """Clears all content from previous Tasks from both the hub and the local client
1776 1776
1777 1777 In addition to calling `purge_results("all")` it also deletes the history and
1778 1778 other bookkeeping lists.
1779 1779 """
1780 1780 self.purge_results("all")
1781 1781 self.history = []
1782 1782 self.session.digest_history.clear()
1783 1783
1784 1784 @spin_first
1785 1785 def hub_history(self):
1786 1786 """Get the Hub's history
1787 1787
1788 1788 Just like the Client, the Hub has a history, which is a list of msg_ids.
1789 1789 This will contain the history of all clients, and, depending on configuration,
1790 1790 may contain history across multiple cluster sessions.
1791 1791
1792 1792 Any msg_id returned here is a valid argument to `get_result`.
1793 1793
1794 1794 Returns
1795 1795 -------
1796 1796
1797 1797 msg_ids : list of strs
1798 1798 list of all msg_ids, ordered by task submission time.
1799 1799 """
1800 1800
1801 1801 self.session.send(self._query_socket, "history_request", content={})
1802 1802 idents, msg = self.session.recv(self._query_socket, 0)
1803 1803
1804 1804 if self.debug:
1805 1805 pprint(msg)
1806 1806 content = msg['content']
1807 1807 if content['status'] != 'ok':
1808 1808 raise self._unwrap_exception(content)
1809 1809 else:
1810 1810 return content['history']
1811 1811
1812 1812 @spin_first
1813 1813 def db_query(self, query, keys=None):
1814 1814 """Query the Hub's TaskRecord database
1815 1815
1816 1816 This will return a list of task record dicts that match `query`
1817 1817
1818 1818 Parameters
1819 1819 ----------
1820 1820
1821 1821 query : mongodb query dict
1822 1822 The search dict. See mongodb query docs for details.
1823 1823 keys : list of strs [optional]
1824 1824 The subset of keys to be returned. The default is to fetch everything but buffers.
1825 1825 'msg_id' will *always* be included.
1826 1826 """
1827 1827 if isinstance(keys, string_types):
1828 1828 keys = [keys]
1829 1829 content = dict(query=query, keys=keys)
1830 1830 self.session.send(self._query_socket, "db_request", content=content)
1831 1831 idents, msg = self.session.recv(self._query_socket, 0)
1832 1832 if self.debug:
1833 1833 pprint(msg)
1834 1834 content = msg['content']
1835 1835 if content['status'] != 'ok':
1836 1836 raise self._unwrap_exception(content)
1837 1837
1838 1838 records = content['records']
1839 1839
1840 1840 buffer_lens = content['buffer_lens']
1841 1841 result_buffer_lens = content['result_buffer_lens']
1842 1842 buffers = msg['buffers']
1843 1843 has_bufs = buffer_lens is not None
1844 1844 has_rbufs = result_buffer_lens is not None
1845 1845 for i,rec in enumerate(records):
1846 1846 # unpack datetime objects
1847 1847 for hkey in ('header', 'result_header'):
1848 1848 if hkey in rec:
1849 1849 rec[hkey] = extract_dates(rec[hkey])
1850 1850 for dtkey in ('submitted', 'started', 'completed', 'received'):
1851 1851 if dtkey in rec:
1852 1852 rec[dtkey] = parse_date(rec[dtkey])
1853 1853 # relink buffers
1854 1854 if has_bufs:
1855 1855 blen = buffer_lens[i]
1856 1856 rec['buffers'], buffers = buffers[:blen],buffers[blen:]
1857 1857 if has_rbufs:
1858 1858 blen = result_buffer_lens[i]
1859 1859 rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
1860 1860
1861 1861 return records
1862 1862
1863 1863 __all__ = [ 'Client' ]
General Comments 0
You need to be logged in to leave comments. Login now