##// END OF EJS Templates
pyerr -> error
MinRK -
Show More
@@ -1,962 +1,965 b''
1 1 // Copyright (c) IPython Development Team.
2 2 // Distributed under the terms of the Modified BSD License.
3 3
4 4 //============================================================================
5 5 // OutputArea
6 6 //============================================================================
7 7
8 8 /**
9 9 * @module IPython
10 10 * @namespace IPython
11 11 * @submodule OutputArea
12 12 */
13 13 var IPython = (function (IPython) {
14 14 "use strict";
15 15
16 16 var utils = IPython.utils;
17 17
18 18 /**
19 19 * @class OutputArea
20 20 *
21 21 * @constructor
22 22 */
23 23
24 24 var OutputArea = function (selector, prompt_area) {
25 25 this.selector = selector;
26 26 this.wrapper = $(selector);
27 27 this.outputs = [];
28 28 this.collapsed = false;
29 29 this.scrolled = false;
30 30 this.trusted = true;
31 31 this.clear_queued = null;
32 32 if (prompt_area === undefined) {
33 33 this.prompt_area = true;
34 34 } else {
35 35 this.prompt_area = prompt_area;
36 36 }
37 37 this.create_elements();
38 38 this.style();
39 39 this.bind_events();
40 40 };
41 41
42 42
43 43 /**
44 44 * Class prototypes
45 45 **/
46 46
47 47 OutputArea.prototype.create_elements = function () {
48 48 this.element = $("<div/>");
49 49 this.collapse_button = $("<div/>");
50 50 this.prompt_overlay = $("<div/>");
51 51 this.wrapper.append(this.prompt_overlay);
52 52 this.wrapper.append(this.element);
53 53 this.wrapper.append(this.collapse_button);
54 54 };
55 55
56 56
57 57 OutputArea.prototype.style = function () {
58 58 this.collapse_button.hide();
59 59 this.prompt_overlay.hide();
60 60
61 61 this.wrapper.addClass('output_wrapper');
62 62 this.element.addClass('output');
63 63
64 64 this.collapse_button.addClass("btn output_collapsed");
65 65 this.collapse_button.attr('title', 'click to expand output');
66 66 this.collapse_button.text('. . .');
67 67
68 68 this.prompt_overlay.addClass('out_prompt_overlay prompt');
69 69 this.prompt_overlay.attr('title', 'click to expand output; double click to hide output');
70 70
71 71 this.collapse();
72 72 };
73 73
74 74 /**
75 75 * Should the OutputArea scroll?
76 76 * Returns whether the height (in lines) exceeds a threshold.
77 77 *
78 78 * @private
79 79 * @method _should_scroll
80 80 * @param [lines=100]{Integer}
81 81 * @return {Bool}
82 82 *
83 83 */
84 84 OutputArea.prototype._should_scroll = function (lines) {
85 85 if (lines <=0 ){ return }
86 86 if (!lines) {
87 87 lines = 100;
88 88 }
89 89 // line-height from http://stackoverflow.com/questions/1185151
90 90 var fontSize = this.element.css('font-size');
91 91 var lineHeight = Math.floor(parseInt(fontSize.replace('px','')) * 1.5);
92 92
93 93 return (this.element.height() > lines * lineHeight);
94 94 };
95 95
96 96
97 97 OutputArea.prototype.bind_events = function () {
98 98 var that = this;
99 99 this.prompt_overlay.dblclick(function () { that.toggle_output(); });
100 100 this.prompt_overlay.click(function () { that.toggle_scroll(); });
101 101
102 102 this.element.resize(function () {
103 103 // FIXME: Firefox on Linux misbehaves, so automatic scrolling is disabled
104 104 if ( IPython.utils.browser[0] === "Firefox" ) {
105 105 return;
106 106 }
107 107 // maybe scroll output,
108 108 // if it's grown large enough and hasn't already been scrolled.
109 109 if ( !that.scrolled && that._should_scroll(OutputArea.auto_scroll_threshold)) {
110 110 that.scroll_area();
111 111 }
112 112 });
113 113 this.collapse_button.click(function () {
114 114 that.expand();
115 115 });
116 116 };
117 117
118 118
119 119 OutputArea.prototype.collapse = function () {
120 120 if (!this.collapsed) {
121 121 this.element.hide();
122 122 this.prompt_overlay.hide();
123 123 if (this.element.html()){
124 124 this.collapse_button.show();
125 125 }
126 126 this.collapsed = true;
127 127 }
128 128 };
129 129
130 130
131 131 OutputArea.prototype.expand = function () {
132 132 if (this.collapsed) {
133 133 this.collapse_button.hide();
134 134 this.element.show();
135 135 this.prompt_overlay.show();
136 136 this.collapsed = false;
137 137 }
138 138 };
139 139
140 140
141 141 OutputArea.prototype.toggle_output = function () {
142 142 if (this.collapsed) {
143 143 this.expand();
144 144 } else {
145 145 this.collapse();
146 146 }
147 147 };
148 148
149 149
150 150 OutputArea.prototype.scroll_area = function () {
151 151 this.element.addClass('output_scroll');
152 152 this.prompt_overlay.attr('title', 'click to unscroll output; double click to hide');
153 153 this.scrolled = true;
154 154 };
155 155
156 156
157 157 OutputArea.prototype.unscroll_area = function () {
158 158 this.element.removeClass('output_scroll');
159 159 this.prompt_overlay.attr('title', 'click to scroll output; double click to hide');
160 160 this.scrolled = false;
161 161 };
162 162
163 163 /**
164 164 *
165 165 * Scroll OutputArea if height supperior than a threshold (in lines).
166 166 *
167 167 * Threshold is a maximum number of lines. If unspecified, defaults to
168 168 * OutputArea.minimum_scroll_threshold.
169 169 *
170 170 * Negative threshold will prevent the OutputArea from ever scrolling.
171 171 *
172 172 * @method scroll_if_long
173 173 *
174 174 * @param [lines=20]{Number} Default to 20 if not set,
175 175 * behavior undefined for value of `0`.
176 176 *
177 177 **/
178 178 OutputArea.prototype.scroll_if_long = function (lines) {
179 179 var n = lines | OutputArea.minimum_scroll_threshold;
180 180 if(n <= 0){
181 181 return
182 182 }
183 183
184 184 if (this._should_scroll(n)) {
185 185 // only allow scrolling long-enough output
186 186 this.scroll_area();
187 187 }
188 188 };
189 189
190 190
191 191 OutputArea.prototype.toggle_scroll = function () {
192 192 if (this.scrolled) {
193 193 this.unscroll_area();
194 194 } else {
195 195 // only allow scrolling long-enough output
196 196 this.scroll_if_long();
197 197 }
198 198 };
199 199
200 200
201 201 // typeset with MathJax if MathJax is available
202 202 OutputArea.prototype.typeset = function () {
203 203 if (window.MathJax){
204 204 MathJax.Hub.Queue(["Typeset",MathJax.Hub]);
205 205 }
206 206 };
207 207
208 208
209 209 OutputArea.prototype.handle_output = function (msg) {
210 210 var json = {};
211 211 var msg_type = json.output_type = msg.header.msg_type;
212 212 var content = msg.content;
213 213 if (msg_type === "stream") {
214 214 json.text = content.data;
215 215 json.stream = content.name;
216 216 } else if (msg_type === "display_data") {
217 217 json = content.data;
218 218 json.output_type = msg_type;
219 219 json.metadata = content.metadata;
220 220 } else if (msg_type === "execute_result") {
221 221 json = content.data;
222 222 // pyout message has been renamed to execute_result,
223 223 // but the nbformat has not been updated,
224 224 // so transform back to pyout for json.
225 225 json.output_type = "pyout";
226 226 json.metadata = content.metadata;
227 227 json.prompt_number = content.execution_count;
228 } else if (msg_type === "pyerr") {
229 json.ename = content.ename;
230 json.evalue = content.evalue;
231 json.traceback = content.traceback;
228 } else if (msg_type === "error") {
229 // pyerr message has been renamed to error,
230 // but the nbformat has not been updated,
231 // so transform back to pyerr for json.
232 json.output_type = "pyerr";
233 json = this.convert_mime_types(json, content.data);
234 json.metadata = this.convert_mime_types({}, content.metadata);
232 235 }
233 236 this.append_output(json);
234 237 };
235 238
236 239
237 240 OutputArea.prototype.rename_keys = function (data, key_map) {
238 241 var remapped = {};
239 242 for (var key in data) {
240 243 var new_key = key_map[key] || key;
241 244 remapped[new_key] = data[key];
242 245 }
243 246 return remapped;
244 247 };
245 248
246 249
247 250 OutputArea.output_types = [
248 251 'application/javascript',
249 252 'text/html',
250 253 'text/markdown',
251 254 'text/latex',
252 255 'image/svg+xml',
253 256 'image/png',
254 257 'image/jpeg',
255 258 'application/pdf',
256 259 'text/plain'
257 260 ];
258 261
259 262 OutputArea.prototype.validate_output = function (json) {
260 263 // scrub invalid outputs
261 264 // TODO: right now everything is a string, but JSON really shouldn't be.
262 265 // nbformat 4 will fix that.
263 266 $.map(OutputArea.output_types, function(key){
264 267 if (json[key] !== undefined && typeof json[key] !== 'string') {
265 268 console.log("Invalid type for " + key, json[key]);
266 269 delete json[key];
267 270 }
268 271 });
269 272 return json;
270 273 };
271 274
272 275 OutputArea.prototype.append_output = function (json) {
273 276 this.expand();
274 277
275 278 // validate output data types
276 279 json = this.validate_output(json);
277 280
278 281 // Clear the output if clear is queued.
279 282 var needs_height_reset = false;
280 283 if (this.clear_queued) {
281 284 this.clear_output(false);
282 285 needs_height_reset = true;
283 286 }
284 287
285 288 if (json.output_type === 'pyout') {
286 289 this.append_execute_result(json);
287 290 } else if (json.output_type === 'pyerr') {
288 this.append_pyerr(json);
291 this.append_error(json);
289 292 } else if (json.output_type === 'stream') {
290 293 this.append_stream(json);
291 294 }
292 295
293 296 // We must release the animation fixed height in a callback since Gecko
294 297 // (FireFox) doesn't render the image immediately as the data is
295 298 // available.
296 299 var that = this;
297 300 var handle_appended = function ($el) {
298 301 // Only reset the height to automatic if the height is currently
299 302 // fixed (done by wait=True flag on clear_output).
300 303 if (needs_height_reset) {
301 304 that.element.height('');
302 305 }
303 306 that.element.trigger('resize');
304 307 };
305 308 if (json.output_type === 'display_data') {
306 309 this.append_display_data(json, handle_appended);
307 310 } else {
308 311 handle_appended();
309 312 }
310 313
311 314 this.outputs.push(json);
312 315 };
313 316
314 317
315 318 OutputArea.prototype.create_output_area = function () {
316 319 var oa = $("<div/>").addClass("output_area");
317 320 if (this.prompt_area) {
318 321 oa.append($('<div/>').addClass('prompt'));
319 322 }
320 323 return oa;
321 324 };
322 325
323 326
324 327 function _get_metadata_key(metadata, key, mime) {
325 328 var mime_md = metadata[mime];
326 329 // mime-specific higher priority
327 330 if (mime_md && mime_md[key] !== undefined) {
328 331 return mime_md[key];
329 332 }
330 333 // fallback on global
331 334 return metadata[key];
332 335 }
333 336
334 337 OutputArea.prototype.create_output_subarea = function(md, classes, mime) {
335 338 var subarea = $('<div/>').addClass('output_subarea').addClass(classes);
336 339 if (_get_metadata_key(md, 'isolated', mime)) {
337 340 // Create an iframe to isolate the subarea from the rest of the
338 341 // document
339 342 var iframe = $('<iframe/>').addClass('box-flex1');
340 343 iframe.css({'height':1, 'width':'100%', 'display':'block'});
341 344 iframe.attr('frameborder', 0);
342 345 iframe.attr('scrolling', 'auto');
343 346
344 347 // Once the iframe is loaded, the subarea is dynamically inserted
345 348 iframe.on('load', function() {
346 349 // Workaround needed by Firefox, to properly render svg inside
347 350 // iframes, see http://stackoverflow.com/questions/10177190/
348 351 // svg-dynamically-added-to-iframe-does-not-render-correctly
349 352 this.contentDocument.open();
350 353
351 354 // Insert the subarea into the iframe
352 355 // We must directly write the html. When using Jquery's append
353 356 // method, javascript is evaluated in the parent document and
354 357 // not in the iframe document. At this point, subarea doesn't
355 358 // contain any user content.
356 359 this.contentDocument.write(subarea.html());
357 360
358 361 this.contentDocument.close();
359 362
360 363 var body = this.contentDocument.body;
361 364 // Adjust the iframe height automatically
362 365 iframe.height(body.scrollHeight + 'px');
363 366 });
364 367
365 368 // Elements should be appended to the inner subarea and not to the
366 369 // iframe
367 370 iframe.append = function(that) {
368 371 subarea.append(that);
369 372 };
370 373
371 374 return iframe;
372 375 } else {
373 376 return subarea;
374 377 }
375 378 }
376 379
377 380
378 381 OutputArea.prototype._append_javascript_error = function (err, element) {
379 382 // display a message when a javascript error occurs in display output
380 383 var msg = "Javascript error adding output!"
381 384 if ( element === undefined ) return;
382 385 element
383 386 .append($('<div/>').text(msg).addClass('js-error'))
384 387 .append($('<div/>').text(err.toString()).addClass('js-error'))
385 388 .append($('<div/>').text('See your browser Javascript console for more details.').addClass('js-error'));
386 389 };
387 390
388 391 OutputArea.prototype._safe_append = function (toinsert) {
389 392 // safely append an item to the document
390 393 // this is an object created by user code,
391 394 // and may have errors, which should not be raised
392 395 // under any circumstances.
393 396 try {
394 397 this.element.append(toinsert);
395 398 } catch(err) {
396 399 console.log(err);
397 400 // Create an actual output_area and output_subarea, which creates
398 401 // the prompt area and the proper indentation.
399 402 var toinsert = this.create_output_area();
400 403 var subarea = $('<div/>').addClass('output_subarea');
401 404 toinsert.append(subarea);
402 405 this._append_javascript_error(err, subarea);
403 406 this.element.append(toinsert);
404 407 }
405 408 };
406 409
407 410
408 411 OutputArea.prototype.append_execute_result = function (json) {
409 412 var n = json.prompt_number || ' ';
410 413 var toinsert = this.create_output_area();
411 414 if (this.prompt_area) {
412 415 toinsert.find('div.prompt').addClass('output_prompt').text('Out[' + n + ']:');
413 416 }
414 417 var inserted = this.append_mime_type(json, toinsert);
415 418 if (inserted) {
416 419 inserted.addClass('output_pyout');
417 420 }
418 421 this._safe_append(toinsert);
419 422 // If we just output latex, typeset it.
420 423 if ((json['text/latex'] !== undefined) ||
421 424 (json['text/html'] !== undefined) ||
422 425 (json['text/markdown'] !== undefined)) {
423 426 this.typeset();
424 427 }
425 428 };
426 429
427 430
428 OutputArea.prototype.append_pyerr = function (json) {
431 OutputArea.prototype.append_error = function (json) {
429 432 var tb = json.traceback;
430 433 if (tb !== undefined && tb.length > 0) {
431 434 var s = '';
432 435 var len = tb.length;
433 436 for (var i=0; i<len; i++) {
434 437 s = s + tb[i] + '\n';
435 438 }
436 439 s = s + '\n';
437 440 var toinsert = this.create_output_area();
438 441 var append_text = OutputArea.append_map['text/plain'];
439 442 if (append_text) {
440 443 append_text.apply(this, [s, {}, toinsert]).addClass('output_pyerr');
441 444 }
442 445 this._safe_append(toinsert);
443 446 }
444 447 };
445 448
446 449
447 450 OutputArea.prototype.append_stream = function (json) {
448 451 // temporary fix: if stream undefined (json file written prior to this patch),
449 452 // default to most likely stdout:
450 453 if (json.stream === undefined){
451 454 json.stream = 'stdout';
452 455 }
453 456 var text = json.text;
454 457 var subclass = "output_"+json.stream;
455 458 if (this.outputs.length > 0){
456 459 // have at least one output to consider
457 460 var last = this.outputs[this.outputs.length-1];
458 461 if (last.output_type == 'stream' && json.stream == last.stream){
459 462 // latest output was in the same stream,
460 463 // so append directly into its pre tag
461 464 // escape ANSI & HTML specials:
462 465 var pre = this.element.find('div.'+subclass).last().find('pre');
463 466 var html = utils.fixCarriageReturn(
464 467 pre.html() + utils.fixConsole(text));
465 468 // The only user content injected with this HTML call is
466 469 // escaped by the fixConsole() method.
467 470 pre.html(html);
468 471 return;
469 472 }
470 473 }
471 474
472 475 if (!text.replace("\r", "")) {
473 476 // text is nothing (empty string, \r, etc.)
474 477 // so don't append any elements, which might add undesirable space
475 478 return;
476 479 }
477 480
478 481 // If we got here, attach a new div
479 482 var toinsert = this.create_output_area();
480 483 var append_text = OutputArea.append_map['text/plain'];
481 484 if (append_text) {
482 485 append_text.apply(this, [text, {}, toinsert]).addClass("output_stream " + subclass);
483 486 }
484 487 this._safe_append(toinsert);
485 488 };
486 489
487 490
488 491 OutputArea.prototype.append_display_data = function (json, handle_inserted) {
489 492 var toinsert = this.create_output_area();
490 493 if (this.append_mime_type(json, toinsert, handle_inserted)) {
491 494 this._safe_append(toinsert);
492 495 // If we just output latex, typeset it.
493 496 if ((json['text/latex'] !== undefined) ||
494 497 (json['text/html'] !== undefined) ||
495 498 (json['text/markdown'] !== undefined)) {
496 499 this.typeset();
497 500 }
498 501 }
499 502 };
500 503
501 504
502 505 OutputArea.safe_outputs = {
503 506 'text/plain' : true,
504 507 'text/latex' : true,
505 508 'image/png' : true,
506 509 'image/jpeg' : true
507 510 };
508 511
509 512 OutputArea.prototype.append_mime_type = function (json, element, handle_inserted) {
510 513 for (var i=0; i < OutputArea.display_order.length; i++) {
511 514 var type = OutputArea.display_order[i];
512 515 var append = OutputArea.append_map[type];
513 516 if ((json[type] !== undefined) && append) {
514 517 var value = json[type];
515 518 if (!this.trusted && !OutputArea.safe_outputs[type]) {
516 519 // not trusted, sanitize HTML
517 520 if (type==='text/html' || type==='text/svg') {
518 521 value = IPython.security.sanitize_html(value);
519 522 } else {
520 523 // don't display if we don't know how to sanitize it
521 524 console.log("Ignoring untrusted " + type + " output.");
522 525 continue;
523 526 }
524 527 }
525 528 var md = json.metadata || {};
526 529 var toinsert = append.apply(this, [value, md, element, handle_inserted]);
527 530 // Since only the png and jpeg mime types call the inserted
528 531 // callback, if the mime type is something other we must call the
529 532 // inserted callback only when the element is actually inserted
530 533 // into the DOM. Use a timeout of 0 to do this.
531 534 if (['image/png', 'image/jpeg'].indexOf(type) < 0 && handle_inserted !== undefined) {
532 535 setTimeout(handle_inserted, 0);
533 536 }
534 537 $([IPython.events]).trigger('output_appended.OutputArea', [type, value, md, toinsert]);
535 538 return toinsert;
536 539 }
537 540 }
538 541 return null;
539 542 };
540 543
541 544
542 545 var append_html = function (html, md, element) {
543 546 var type = 'text/html';
544 547 var toinsert = this.create_output_subarea(md, "output_html rendered_html", type);
545 548 IPython.keyboard_manager.register_events(toinsert);
546 549 toinsert.append(html);
547 550 element.append(toinsert);
548 551 return toinsert;
549 552 };
550 553
551 554
552 555 var append_markdown = function(markdown, md, element) {
553 556 var type = 'text/markdown';
554 557 var toinsert = this.create_output_subarea(md, "output_markdown", type);
555 558 var text_and_math = IPython.mathjaxutils.remove_math(markdown);
556 559 var text = text_and_math[0];
557 560 var math = text_and_math[1];
558 561 var html = marked.parser(marked.lexer(text));
559 562 html = IPython.mathjaxutils.replace_math(html, math);
560 563 toinsert.append(html);
561 564 element.append(toinsert);
562 565 return toinsert;
563 566 };
564 567
565 568
566 569 var append_javascript = function (js, md, element) {
567 570 // We just eval the JS code, element appears in the local scope.
568 571 var type = 'application/javascript';
569 572 var toinsert = this.create_output_subarea(md, "output_javascript", type);
570 573 IPython.keyboard_manager.register_events(toinsert);
571 574 element.append(toinsert);
572 575 // FIXME TODO : remove `container element for 3.0`
573 576 //backward compat, js should be eval'ed in a context where `container` is defined.
574 577 var container = element;
575 578 container.show = function(){console.log('Warning "container.show()" is deprecated.')};
576 579 // end backward compat
577 580
578 581 // Fix for ipython/issues/5293, make sure `element` is the area which
579 582 // output can be inserted into at the time of JS execution.
580 583 element = toinsert;
581 584 try {
582 585 eval(js);
583 586 } catch(err) {
584 587 console.log(err);
585 588 this._append_javascript_error(err, toinsert);
586 589 }
587 590 return toinsert;
588 591 };
589 592
590 593
591 594 var append_text = function (data, md, element) {
592 595 var type = 'text/plain';
593 596 var toinsert = this.create_output_subarea(md, "output_text", type);
594 597 // escape ANSI & HTML specials in plaintext:
595 598 data = utils.fixConsole(data);
596 599 data = utils.fixCarriageReturn(data);
597 600 data = utils.autoLinkUrls(data);
598 601 // The only user content injected with this HTML call is
599 602 // escaped by the fixConsole() method.
600 603 toinsert.append($("<pre/>").html(data));
601 604 element.append(toinsert);
602 605 return toinsert;
603 606 };
604 607
605 608
606 609 var append_svg = function (svg_html, md, element) {
607 610 var type = 'image/svg+xml';
608 611 var toinsert = this.create_output_subarea(md, "output_svg", type);
609 612
610 613 // Get the svg element from within the HTML.
611 614 var svg = $('<div />').html(svg_html).find('svg');
612 615 var svg_area = $('<div />');
613 616 var width = svg.attr('width');
614 617 var height = svg.attr('height');
615 618 svg
616 619 .width('100%')
617 620 .height('100%');
618 621 svg_area
619 622 .width(width)
620 623 .height(height);
621 624
622 625 // The jQuery resize handlers don't seem to work on the svg element.
623 626 // When the svg renders completely, measure it's size and set the parent
624 627 // div to that size. Then set the svg to 100% the size of the parent
625 628 // div and make the parent div resizable.
626 629 this._dblclick_to_reset_size(svg_area, true, false);
627 630
628 631 svg_area.append(svg);
629 632 toinsert.append(svg_area);
630 633 element.append(toinsert);
631 634
632 635 return toinsert;
633 636 };
634 637
635 638 OutputArea.prototype._dblclick_to_reset_size = function (img, immediately, resize_parent) {
636 639 // Add a resize handler to an element
637 640 //
638 641 // img: jQuery element
639 642 // immediately: bool=False
640 643 // Wait for the element to load before creating the handle.
641 644 // resize_parent: bool=True
642 645 // Should the parent of the element be resized when the element is
643 646 // reset (by double click).
644 647 var callback = function (){
645 648 var h0 = img.height();
646 649 var w0 = img.width();
647 650 if (!(h0 && w0)) {
648 651 // zero size, don't make it resizable
649 652 return;
650 653 }
651 654 img.resizable({
652 655 aspectRatio: true,
653 656 autoHide: true
654 657 });
655 658 img.dblclick(function () {
656 659 // resize wrapper & image together for some reason:
657 660 img.height(h0);
658 661 img.width(w0);
659 662 if (resize_parent === undefined || resize_parent) {
660 663 img.parent().height(h0);
661 664 img.parent().width(w0);
662 665 }
663 666 });
664 667 };
665 668
666 669 if (immediately) {
667 670 callback();
668 671 } else {
669 672 img.on("load", callback);
670 673 }
671 674 };
672 675
673 676 var set_width_height = function (img, md, mime) {
674 677 // set width and height of an img element from metadata
675 678 var height = _get_metadata_key(md, 'height', mime);
676 679 if (height !== undefined) img.attr('height', height);
677 680 var width = _get_metadata_key(md, 'width', mime);
678 681 if (width !== undefined) img.attr('width', width);
679 682 };
680 683
681 684 var append_png = function (png, md, element, handle_inserted) {
682 685 var type = 'image/png';
683 686 var toinsert = this.create_output_subarea(md, "output_png", type);
684 687 var img = $("<img/>");
685 688 if (handle_inserted !== undefined) {
686 689 img.on('load', function(){
687 690 handle_inserted(img);
688 691 });
689 692 }
690 693 img[0].src = 'data:image/png;base64,'+ png;
691 694 set_width_height(img, md, 'image/png');
692 695 this._dblclick_to_reset_size(img);
693 696 toinsert.append(img);
694 697 element.append(toinsert);
695 698 return toinsert;
696 699 };
697 700
698 701
699 702 var append_jpeg = function (jpeg, md, element, handle_inserted) {
700 703 var type = 'image/jpeg';
701 704 var toinsert = this.create_output_subarea(md, "output_jpeg", type);
702 705 var img = $("<img/>");
703 706 if (handle_inserted !== undefined) {
704 707 img.on('load', function(){
705 708 handle_inserted(img);
706 709 });
707 710 }
708 711 img[0].src = 'data:image/jpeg;base64,'+ jpeg;
709 712 set_width_height(img, md, 'image/jpeg');
710 713 this._dblclick_to_reset_size(img);
711 714 toinsert.append(img);
712 715 element.append(toinsert);
713 716 return toinsert;
714 717 };
715 718
716 719
717 720 var append_pdf = function (pdf, md, element) {
718 721 var type = 'application/pdf';
719 722 var toinsert = this.create_output_subarea(md, "output_pdf", type);
720 723 var a = $('<a/>').attr('href', 'data:application/pdf;base64,'+pdf);
721 724 a.attr('target', '_blank');
722 725 a.text('View PDF')
723 726 toinsert.append(a);
724 727 element.append(toinsert);
725 728 return toinsert;
726 729 }
727 730
728 731 var append_latex = function (latex, md, element) {
729 732 // This method cannot do the typesetting because the latex first has to
730 733 // be on the page.
731 734 var type = 'text/latex';
732 735 var toinsert = this.create_output_subarea(md, "output_latex", type);
733 736 toinsert.append(latex);
734 737 element.append(toinsert);
735 738 return toinsert;
736 739 };
737 740
738 741
739 742 OutputArea.prototype.append_raw_input = function (msg) {
740 743 var that = this;
741 744 this.expand();
742 745 var content = msg.content;
743 746 var area = this.create_output_area();
744 747
745 748 // disable any other raw_inputs, if they are left around
746 749 $("div.output_subarea.raw_input_container").remove();
747 750
748 751 area.append(
749 752 $("<div/>")
750 753 .addClass("box-flex1 output_subarea raw_input_container")
751 754 .append(
752 755 $("<span/>")
753 756 .addClass("raw_input_prompt")
754 757 .text(content.prompt)
755 758 )
756 759 .append(
757 760 $("<input/>")
758 761 .addClass("raw_input")
759 762 .attr('type', 'text')
760 763 .attr("size", 47)
761 764 .keydown(function (event, ui) {
762 765 // make sure we submit on enter,
763 766 // and don't re-execute the *cell* on shift-enter
764 767 if (event.which === IPython.keyboard.keycodes.enter) {
765 768 that._submit_raw_input();
766 769 return false;
767 770 }
768 771 })
769 772 )
770 773 );
771 774
772 775 this.element.append(area);
773 776 var raw_input = area.find('input.raw_input');
774 777 // Register events that enable/disable the keyboard manager while raw
775 778 // input is focused.
776 779 IPython.keyboard_manager.register_events(raw_input);
777 780 // Note, the following line used to read raw_input.focus().focus().
778 781 // This seemed to be needed otherwise only the cell would be focused.
779 782 // But with the modal UI, this seems to work fine with one call to focus().
780 783 raw_input.focus();
781 784 }
782 785
783 786 OutputArea.prototype._submit_raw_input = function (evt) {
784 787 var container = this.element.find("div.raw_input_container");
785 788 var theprompt = container.find("span.raw_input_prompt");
786 789 var theinput = container.find("input.raw_input");
787 790 var value = theinput.val();
788 791 var content = {
789 792 output_type : 'stream',
790 793 name : 'stdout',
791 794 text : theprompt.text() + value + '\n'
792 795 }
793 796 // remove form container
794 797 container.parent().remove();
795 798 // replace with plaintext version in stdout
796 799 this.append_output(content, false);
797 800 $([IPython.events]).trigger('send_input_reply.Kernel', value);
798 801 }
799 802
800 803
801 804 OutputArea.prototype.handle_clear_output = function (msg) {
802 805 // msg spec v4 had stdout, stderr, display keys
803 806 // v4.1 replaced these with just wait
804 807 // The default behavior is the same (stdout=stderr=display=True, wait=False),
805 808 // so v4 messages will still be properly handled,
806 809 // except for the rarely used clearing less than all output.
807 810 this.clear_output(msg.content.wait || false);
808 811 };
809 812
810 813
811 814 OutputArea.prototype.clear_output = function(wait) {
812 815 if (wait) {
813 816
814 817 // If a clear is queued, clear before adding another to the queue.
815 818 if (this.clear_queued) {
816 819 this.clear_output(false);
817 820 };
818 821
819 822 this.clear_queued = true;
820 823 } else {
821 824
822 825 // Fix the output div's height if the clear_output is waiting for
823 826 // new output (it is being used in an animation).
824 827 if (this.clear_queued) {
825 828 var height = this.element.height();
826 829 this.element.height(height);
827 830 this.clear_queued = false;
828 831 }
829 832
830 833 // Clear all
831 834 // Remove load event handlers from img tags because we don't want
832 835 // them to fire if the image is never added to the page.
833 836 this.element.find('img').off('load');
834 837 this.element.html("");
835 838 this.outputs = [];
836 839 this.trusted = true;
837 840 this.unscroll_area();
838 841 return;
839 842 };
840 843 };
841 844
842 845
843 846 // JSON serialization
844 847
845 848 OutputArea.prototype.fromJSON = function (outputs) {
846 849 var len = outputs.length;
847 850 var data;
848 851
849 852 for (var i=0; i<len; i++) {
850 853 data = outputs[i];
851 854 var msg_type = data.output_type;
852 855 if (msg_type === "display_data" || msg_type === "pyout") {
853 856 // convert short keys to mime keys
854 857 // TODO: remove mapping of short keys when we update to nbformat 4
855 858 data = this.rename_keys(data, OutputArea.mime_map_r);
856 859 data.metadata = this.rename_keys(data.metadata, OutputArea.mime_map_r);
857 860 }
858 861
859 862 this.append_output(data);
860 863 }
861 864 };
862 865
863 866
864 867 OutputArea.prototype.toJSON = function () {
865 868 var outputs = [];
866 869 var len = this.outputs.length;
867 870 var data;
868 871 for (var i=0; i<len; i++) {
869 872 data = this.outputs[i];
870 873 var msg_type = data.output_type;
871 874 if (msg_type === "display_data" || msg_type === "pyout") {
872 875 // convert mime keys to short keys
873 876 data = this.rename_keys(data, OutputArea.mime_map);
874 877 data.metadata = this.rename_keys(data.metadata, OutputArea.mime_map);
875 878 }
876 879 outputs[i] = data;
877 880 }
878 881 return outputs;
879 882 };
880 883
881 884 /**
882 885 * Class properties
883 886 **/
884 887
885 888 /**
886 889 * Threshold to trigger autoscroll when the OutputArea is resized,
887 890 * typically when new outputs are added.
888 891 *
889 892 * Behavior is undefined if autoscroll is lower than minimum_scroll_threshold,
890 893 * unless it is < 0, in which case autoscroll will never be triggered
891 894 *
892 895 * @property auto_scroll_threshold
893 896 * @type Number
894 897 * @default 100
895 898 *
896 899 **/
897 900 OutputArea.auto_scroll_threshold = 100;
898 901
899 902 /**
900 903 * Lower limit (in lines) for OutputArea to be made scrollable. OutputAreas
901 904 * shorter than this are never scrolled.
902 905 *
903 906 * @property minimum_scroll_threshold
904 907 * @type Number
905 908 * @default 20
906 909 *
907 910 **/
908 911 OutputArea.minimum_scroll_threshold = 20;
909 912
910 913
911 914
912 915 OutputArea.mime_map = {
913 916 "text/plain" : "text",
914 917 "text/html" : "html",
915 918 "image/svg+xml" : "svg",
916 919 "image/png" : "png",
917 920 "image/jpeg" : "jpeg",
918 921 "text/latex" : "latex",
919 922 "application/json" : "json",
920 923 "application/javascript" : "javascript",
921 924 };
922 925
923 926 OutputArea.mime_map_r = {
924 927 "text" : "text/plain",
925 928 "html" : "text/html",
926 929 "svg" : "image/svg+xml",
927 930 "png" : "image/png",
928 931 "jpeg" : "image/jpeg",
929 932 "latex" : "text/latex",
930 933 "json" : "application/json",
931 934 "javascript" : "application/javascript",
932 935 };
933 936
934 937 OutputArea.display_order = [
935 938 'application/javascript',
936 939 'text/html',
937 940 'text/markdown',
938 941 'text/latex',
939 942 'image/svg+xml',
940 943 'image/png',
941 944 'image/jpeg',
942 945 'application/pdf',
943 946 'text/plain'
944 947 ];
945 948
946 949 OutputArea.append_map = {
947 950 "text/plain" : append_text,
948 951 "text/html" : append_html,
949 952 "text/markdown": append_markdown,
950 953 "image/svg+xml" : append_svg,
951 954 "image/png" : append_png,
952 955 "image/jpeg" : append_jpeg,
953 956 "text/latex" : append_latex,
954 957 "application/javascript" : append_javascript,
955 958 "application/pdf" : append_pdf
956 959 };
957 960
958 961 IPython.OutputArea = OutputArea;
959 962
960 963 return IPython;
961 964
962 965 }(IPython));
@@ -1,624 +1,624 b''
1 1 // Copyright (c) IPython Development Team.
2 2 // Distributed under the terms of the Modified BSD License.
3 3
4 4 //============================================================================
5 5 // Kernel
6 6 //============================================================================
7 7
8 8 /**
9 9 * @module IPython
10 10 * @namespace IPython
11 11 * @submodule Kernel
12 12 */
13 13
14 14 var IPython = (function (IPython) {
15 15 "use strict";
16 16
17 17 var utils = IPython.utils;
18 18
19 19 // Initialization and connection.
20 20 /**
21 21 * A Kernel Class to communicate with the Python kernel
22 22 * @Class Kernel
23 23 */
24 24 var Kernel = function (kernel_service_url) {
25 25 this.kernel_id = null;
26 26 this.shell_channel = null;
27 27 this.iopub_channel = null;
28 28 this.stdin_channel = null;
29 29 this.kernel_service_url = kernel_service_url;
30 30 this.running = false;
31 31 this.username = "username";
32 32 this.session_id = utils.uuid();
33 33 this._msg_callbacks = {};
34 34 this.post = $.post;
35 35
36 36 if (typeof(WebSocket) !== 'undefined') {
37 37 this.WebSocket = WebSocket;
38 38 } else if (typeof(MozWebSocket) !== 'undefined') {
39 39 this.WebSocket = MozWebSocket;
40 40 } else {
41 41 alert('Your browser does not have WebSocket support, please try Chrome, Safari or Firefox β‰₯ 6. Firefox 4 and 5 are also supported by you have to enable WebSockets in about:config.');
42 42 }
43 43
44 44 this.bind_events();
45 45 this.init_iopub_handlers();
46 46 this.comm_manager = new IPython.CommManager(this);
47 47 this.widget_manager = new IPython.WidgetManager(this.comm_manager);
48 48
49 49 this.last_msg_id = null;
50 50 this.last_msg_callbacks = {};
51 51 };
52 52
53 53
54 54 Kernel.prototype._get_msg = function (msg_type, content, metadata) {
55 55 var msg = {
56 56 header : {
57 57 msg_id : utils.uuid(),
58 58 username : this.username,
59 59 session : this.session_id,
60 60 msg_type : msg_type
61 61 },
62 62 metadata : metadata || {},
63 63 content : content,
64 64 parent_header : {}
65 65 };
66 66 return msg;
67 67 };
68 68
69 69 Kernel.prototype.bind_events = function () {
70 70 var that = this;
71 71 $([IPython.events]).on('send_input_reply.Kernel', function(evt, data) {
72 72 that.send_input_reply(data);
73 73 });
74 74 };
75 75
76 76 // Initialize the iopub handlers
77 77
78 78 Kernel.prototype.init_iopub_handlers = function () {
79 var output_types = ['stream', 'display_data', 'execute_result', 'pyerr'];
79 var output_msg_types = ['stream', 'display_data', 'execute_result', 'error'];
80 80 this._iopub_handlers = {};
81 81 this.register_iopub_handler('status', $.proxy(this._handle_status_message, this));
82 82 this.register_iopub_handler('clear_output', $.proxy(this._handle_clear_output, this));
83 83
84 for (var i=0; i < output_types.length; i++) {
85 this.register_iopub_handler(output_types[i], $.proxy(this._handle_output_message, this));
84 for (var i=0; i < output_msg_types.length; i++) {
85 this.register_iopub_handler(output_msg_types[i], $.proxy(this._handle_output_message, this));
86 86 }
87 87 };
88 88
89 89 /**
90 90 * Start the Python kernel
91 91 * @method start
92 92 */
93 93 Kernel.prototype.start = function (params) {
94 94 params = params || {};
95 95 if (!this.running) {
96 96 var qs = $.param(params);
97 97 this.post(utils.url_join_encode(this.kernel_service_url) + '?' + qs,
98 98 $.proxy(this._kernel_started, this),
99 99 'json'
100 100 );
101 101 }
102 102 };
103 103
104 104 /**
105 105 * Restart the python kernel.
106 106 *
107 107 * Emit a 'status_restarting.Kernel' event with
108 108 * the current object as parameter
109 109 *
110 110 * @method restart
111 111 */
112 112 Kernel.prototype.restart = function () {
113 113 $([IPython.events]).trigger('status_restarting.Kernel', {kernel: this});
114 114 if (this.running) {
115 115 this.stop_channels();
116 116 this.post(utils.url_join_encode(this.kernel_url, "restart"),
117 117 $.proxy(this._kernel_started, this),
118 118 'json'
119 119 );
120 120 }
121 121 };
122 122
123 123
124 124 Kernel.prototype._kernel_started = function (json) {
125 125 console.log("Kernel started: ", json.id);
126 126 this.running = true;
127 127 this.kernel_id = json.id;
128 128 // trailing 's' in https will become wss for secure web sockets
129 129 this.ws_host = location.protocol.replace('http', 'ws') + "//" + location.host;
130 130 this.kernel_url = utils.url_path_join(this.kernel_service_url, this.kernel_id);
131 131 this.start_channels();
132 132 };
133 133
134 134
135 135 Kernel.prototype._websocket_closed = function(ws_url, early) {
136 136 this.stop_channels();
137 137 $([IPython.events]).trigger('websocket_closed.Kernel',
138 138 {ws_url: ws_url, kernel: this, early: early}
139 139 );
140 140 };
141 141
142 142 /**
143 143 * Start the `shell`and `iopub` channels.
144 144 * Will stop and restart them if they already exist.
145 145 *
146 146 * @method start_channels
147 147 */
148 148 Kernel.prototype.start_channels = function () {
149 149 var that = this;
150 150 this.stop_channels();
151 151 var ws_host_url = this.ws_host + this.kernel_url;
152 152 console.log("Starting WebSockets:", ws_host_url);
153 153 this.shell_channel = new this.WebSocket(
154 154 this.ws_host + utils.url_join_encode(this.kernel_url, "shell")
155 155 );
156 156 this.stdin_channel = new this.WebSocket(
157 157 this.ws_host + utils.url_join_encode(this.kernel_url, "stdin")
158 158 );
159 159 this.iopub_channel = new this.WebSocket(
160 160 this.ws_host + utils.url_join_encode(this.kernel_url, "iopub")
161 161 );
162 162
163 163 var already_called_onclose = false; // only alert once
164 164 var ws_closed_early = function(evt){
165 165 if (already_called_onclose){
166 166 return;
167 167 }
168 168 already_called_onclose = true;
169 169 if ( ! evt.wasClean ){
170 170 that._websocket_closed(ws_host_url, true);
171 171 }
172 172 };
173 173 var ws_closed_late = function(evt){
174 174 if (already_called_onclose){
175 175 return;
176 176 }
177 177 already_called_onclose = true;
178 178 if ( ! evt.wasClean ){
179 179 that._websocket_closed(ws_host_url, false);
180 180 }
181 181 };
182 182 var channels = [this.shell_channel, this.iopub_channel, this.stdin_channel];
183 183 for (var i=0; i < channels.length; i++) {
184 184 channels[i].onopen = $.proxy(this._ws_opened, this);
185 185 channels[i].onclose = ws_closed_early;
186 186 }
187 187 // switch from early-close to late-close message after 1s
188 188 setTimeout(function() {
189 189 for (var i=0; i < channels.length; i++) {
190 190 if (channels[i] !== null) {
191 191 channels[i].onclose = ws_closed_late;
192 192 }
193 193 }
194 194 }, 1000);
195 195 this.shell_channel.onmessage = $.proxy(this._handle_shell_reply, this);
196 196 this.iopub_channel.onmessage = $.proxy(this._handle_iopub_message, this);
197 197 this.stdin_channel.onmessage = $.proxy(this._handle_input_request, this);
198 198 };
199 199
200 200 /**
201 201 * Handle a websocket entering the open state
202 202 * sends session and cookie authentication info as first message.
203 203 * Once all sockets are open, signal the Kernel.status_started event.
204 204 * @method _ws_opened
205 205 */
206 206 Kernel.prototype._ws_opened = function (evt) {
207 207 // send the session id so the Session object Python-side
208 208 // has the same identity
209 209 evt.target.send(this.session_id + ':' + document.cookie);
210 210
211 211 var channels = [this.shell_channel, this.iopub_channel, this.stdin_channel];
212 212 for (var i=0; i < channels.length; i++) {
213 213 // if any channel is not ready, don't trigger event.
214 214 if ( !channels[i].readyState ) return;
215 215 }
216 216 // all events ready, trigger started event.
217 217 $([IPython.events]).trigger('status_started.Kernel', {kernel: this});
218 218 };
219 219
220 220 /**
221 221 * Stop the websocket channels.
222 222 * @method stop_channels
223 223 */
224 224 Kernel.prototype.stop_channels = function () {
225 225 var channels = [this.shell_channel, this.iopub_channel, this.stdin_channel];
226 226 for (var i=0; i < channels.length; i++) {
227 227 if ( channels[i] !== null ) {
228 228 channels[i].onclose = null;
229 229 channels[i].close();
230 230 }
231 231 }
232 232 this.shell_channel = this.iopub_channel = this.stdin_channel = null;
233 233 };
234 234
235 235 // Main public methods.
236 236
237 237 // send a message on the Kernel's shell channel
238 238 Kernel.prototype.send_shell_message = function (msg_type, content, callbacks, metadata) {
239 239 var msg = this._get_msg(msg_type, content, metadata);
240 240 this.shell_channel.send(JSON.stringify(msg));
241 241 this.set_callbacks_for_msg(msg.header.msg_id, callbacks);
242 242 return msg.header.msg_id;
243 243 };
244 244
245 245 /**
246 246 * Get kernel info
247 247 *
248 248 * @param callback {function}
249 249 * @method object_info
250 250 *
251 251 * When calling this method, pass a callback function that expects one argument.
252 252 * The callback will be passed the complete `kernel_info_reply` message documented
253 253 * [here](http://ipython.org/ipython-doc/dev/development/messaging.html#kernel-info)
254 254 */
255 255 Kernel.prototype.kernel_info = function (callback) {
256 256 var callbacks;
257 257 if (callback) {
258 258 callbacks = { shell : { reply : callback } };
259 259 }
260 260 return this.send_shell_message("kernel_info_request", {}, callbacks);
261 261 };
262 262
263 263 /**
264 264 * Get info on an object
265 265 *
266 266 * @param objname {string}
267 267 * @param callback {function}
268 268 * @method object_info
269 269 *
270 270 * When calling this method, pass a callback function that expects one argument.
271 271 * The callback will be passed the complete `object_info_reply` message documented
272 272 * [here](http://ipython.org/ipython-doc/dev/development/messaging.html#object-information)
273 273 */
274 274 Kernel.prototype.object_info = function (objname, callback) {
275 275 var callbacks;
276 276 if (callback) {
277 277 callbacks = { shell : { reply : callback } };
278 278 }
279 279
280 280 if (typeof(objname) !== null && objname !== null) {
281 281 var content = {
282 282 oname : objname.toString(),
283 283 detail_level : 0,
284 284 };
285 285 return this.send_shell_message("object_info_request", content, callbacks);
286 286 }
287 287 return;
288 288 };
289 289
290 290 /**
291 291 * Execute given code into kernel, and pass result to callback.
292 292 *
293 293 * @async
294 294 * @method execute
295 295 * @param {string} code
296 296 * @param [callbacks] {Object} With the following keys (all optional)
297 297 * @param callbacks.shell.reply {function}
298 298 * @param callbacks.shell.payload.[payload_name] {function}
299 299 * @param callbacks.iopub.output {function}
300 300 * @param callbacks.iopub.clear_output {function}
301 301 * @param callbacks.input {function}
302 302 * @param {object} [options]
303 303 * @param [options.silent=false] {Boolean}
304 304 * @param [options.user_expressions=empty_dict] {Dict}
305 305 * @param [options.user_variables=empty_list] {List od Strings}
306 306 * @param [options.allow_stdin=false] {Boolean} true|false
307 307 *
308 308 * @example
309 309 *
310 310 * The options object should contain the options for the execute call. Its default
311 311 * values are:
312 312 *
313 313 * options = {
314 314 * silent : true,
315 315 * user_variables : [],
316 316 * user_expressions : {},
317 317 * allow_stdin : false
318 318 * }
319 319 *
320 320 * When calling this method pass a callbacks structure of the form:
321 321 *
322 322 * callbacks = {
323 323 * shell : {
324 324 * reply : execute_reply_callback,
325 325 * payload : {
326 326 * set_next_input : set_next_input_callback,
327 327 * }
328 328 * },
329 329 * iopub : {
330 330 * output : output_callback,
331 331 * clear_output : clear_output_callback,
332 332 * },
333 333 * input : raw_input_callback
334 334 * }
335 335 *
336 336 * Each callback will be passed the entire message as a single arugment.
337 337 * Payload handlers will be passed the corresponding payload and the execute_reply message.
338 338 */
339 339 Kernel.prototype.execute = function (code, callbacks, options) {
340 340
341 341 var content = {
342 342 code : code,
343 343 silent : true,
344 344 store_history : false,
345 345 user_variables : [],
346 346 user_expressions : {},
347 347 allow_stdin : false
348 348 };
349 349 callbacks = callbacks || {};
350 350 if (callbacks.input !== undefined) {
351 351 content.allow_stdin = true;
352 352 }
353 353 $.extend(true, content, options);
354 354 $([IPython.events]).trigger('execution_request.Kernel', {kernel: this, content:content});
355 355 return this.send_shell_message("execute_request", content, callbacks);
356 356 };
357 357
358 358 /**
359 359 * When calling this method, pass a function to be called with the `complete_reply` message
360 360 * as its only argument when it arrives.
361 361 *
362 362 * `complete_reply` is documented
363 363 * [here](http://ipython.org/ipython-doc/dev/development/messaging.html#complete)
364 364 *
365 365 * @method complete
366 366 * @param line {integer}
367 367 * @param cursor_pos {integer}
368 368 * @param callback {function}
369 369 *
370 370 */
371 371 Kernel.prototype.complete = function (line, cursor_pos, callback) {
372 372 var callbacks;
373 373 if (callback) {
374 374 callbacks = { shell : { reply : callback } };
375 375 }
376 376 var content = {
377 377 text : '',
378 378 line : line,
379 379 block : null,
380 380 cursor_pos : cursor_pos
381 381 };
382 382 return this.send_shell_message("complete_request", content, callbacks);
383 383 };
384 384
385 385
386 386 Kernel.prototype.interrupt = function () {
387 387 if (this.running) {
388 388 $([IPython.events]).trigger('status_interrupting.Kernel', {kernel: this});
389 389 this.post(utils.url_join_encode(this.kernel_url, "interrupt"));
390 390 }
391 391 };
392 392
393 393
394 394 Kernel.prototype.kill = function () {
395 395 if (this.running) {
396 396 this.running = false;
397 397 var settings = {
398 398 cache : false,
399 399 type : "DELETE",
400 400 error : utils.log_ajax_error,
401 401 };
402 402 $.ajax(utils.url_join_encode(this.kernel_url), settings);
403 403 }
404 404 };
405 405
406 406 Kernel.prototype.send_input_reply = function (input) {
407 407 var content = {
408 408 value : input,
409 409 };
410 410 $([IPython.events]).trigger('input_reply.Kernel', {kernel: this, content:content});
411 411 var msg = this._get_msg("input_reply", content);
412 412 this.stdin_channel.send(JSON.stringify(msg));
413 413 return msg.header.msg_id;
414 414 };
415 415
416 416
417 417 // Reply handlers
418 418
419 419 Kernel.prototype.register_iopub_handler = function (msg_type, callback) {
420 420 this._iopub_handlers[msg_type] = callback;
421 421 };
422 422
423 423 Kernel.prototype.get_iopub_handler = function (msg_type) {
424 424 // get iopub handler for a specific message type
425 425 return this._iopub_handlers[msg_type];
426 426 };
427 427
428 428
429 429 Kernel.prototype.get_callbacks_for_msg = function (msg_id) {
430 430 // get callbacks for a specific message
431 431 if (msg_id == this.last_msg_id) {
432 432 return this.last_msg_callbacks;
433 433 } else {
434 434 return this._msg_callbacks[msg_id];
435 435 }
436 436 };
437 437
438 438
439 439 Kernel.prototype.clear_callbacks_for_msg = function (msg_id) {
440 440 if (this._msg_callbacks[msg_id] !== undefined ) {
441 441 delete this._msg_callbacks[msg_id];
442 442 }
443 443 };
444 444
445 445 Kernel.prototype._finish_shell = function (msg_id) {
446 446 var callbacks = this._msg_callbacks[msg_id];
447 447 if (callbacks !== undefined) {
448 448 callbacks.shell_done = true;
449 449 if (callbacks.iopub_done) {
450 450 this.clear_callbacks_for_msg(msg_id);
451 451 }
452 452 }
453 453 };
454 454
455 455 Kernel.prototype._finish_iopub = function (msg_id) {
456 456 var callbacks = this._msg_callbacks[msg_id];
457 457 if (callbacks !== undefined) {
458 458 callbacks.iopub_done = true;
459 459 if (!callbacks.shell_done) {
460 460 this.clear_callbacks_for_msg(msg_id);
461 461 }
462 462 }
463 463 };
464 464
465 465 /* Set callbacks for a particular message.
466 466 * Callbacks should be a struct of the following form:
467 467 * shell : {
468 468 *
469 469 * }
470 470
471 471 */
472 472 Kernel.prototype.set_callbacks_for_msg = function (msg_id, callbacks) {
473 473 this.last_msg_id = msg_id;
474 474 if (callbacks) {
475 475 // shallow-copy mapping, because we will modify it at the top level
476 476 var cbcopy = this._msg_callbacks[msg_id] = this.last_msg_callbacks = {};
477 477 cbcopy.shell = callbacks.shell;
478 478 cbcopy.iopub = callbacks.iopub;
479 479 cbcopy.input = callbacks.input;
480 480 cbcopy.shell_done = (!callbacks.shell);
481 481 cbcopy.iopub_done = (!callbacks.iopub);
482 482 } else {
483 483 this.last_msg_callbacks = {};
484 484 }
485 485 };
486 486
487 487
488 488 Kernel.prototype._handle_shell_reply = function (e) {
489 489 var reply = $.parseJSON(e.data);
490 490 $([IPython.events]).trigger('shell_reply.Kernel', {kernel: this, reply:reply});
491 491 var content = reply.content;
492 492 var metadata = reply.metadata;
493 493 var parent_id = reply.parent_header.msg_id;
494 494 var callbacks = this.get_callbacks_for_msg(parent_id);
495 495 if (!callbacks || !callbacks.shell) {
496 496 return;
497 497 }
498 498 var shell_callbacks = callbacks.shell;
499 499
500 500 // signal that shell callbacks are done
501 501 this._finish_shell(parent_id);
502 502
503 503 if (shell_callbacks.reply !== undefined) {
504 504 shell_callbacks.reply(reply);
505 505 }
506 506 if (content.payload && shell_callbacks.payload) {
507 507 this._handle_payloads(content.payload, shell_callbacks.payload, reply);
508 508 }
509 509 };
510 510
511 511
512 512 Kernel.prototype._handle_payloads = function (payloads, payload_callbacks, msg) {
513 513 var l = payloads.length;
514 514 // Payloads are handled by triggering events because we don't want the Kernel
515 515 // to depend on the Notebook or Pager classes.
516 516 for (var i=0; i<l; i++) {
517 517 var payload = payloads[i];
518 518 var callback = payload_callbacks[payload.source];
519 519 if (callback) {
520 520 callback(payload, msg);
521 521 }
522 522 }
523 523 };
524 524
525 525 Kernel.prototype._handle_status_message = function (msg) {
526 526 var execution_state = msg.content.execution_state;
527 527 var parent_id = msg.parent_header.msg_id;
528 528
529 529 // dispatch status msg callbacks, if any
530 530 var callbacks = this.get_callbacks_for_msg(parent_id);
531 531 if (callbacks && callbacks.iopub && callbacks.iopub.status) {
532 532 try {
533 533 callbacks.iopub.status(msg);
534 534 } catch (e) {
535 535 console.log("Exception in status msg handler", e, e.stack);
536 536 }
537 537 }
538 538
539 539 if (execution_state === 'busy') {
540 540 $([IPython.events]).trigger('status_busy.Kernel', {kernel: this});
541 541 } else if (execution_state === 'idle') {
542 542 // signal that iopub callbacks are (probably) done
543 543 // async output may still arrive,
544 544 // but only for the most recent request
545 545 this._finish_iopub(parent_id);
546 546
547 547 // trigger status_idle event
548 548 $([IPython.events]).trigger('status_idle.Kernel', {kernel: this});
549 549 } else if (execution_state === 'restarting') {
550 550 // autorestarting is distinct from restarting,
551 551 // in that it means the kernel died and the server is restarting it.
552 552 // status_restarting sets the notification widget,
553 553 // autorestart shows the more prominent dialog.
554 554 $([IPython.events]).trigger('status_autorestarting.Kernel', {kernel: this});
555 555 $([IPython.events]).trigger('status_restarting.Kernel', {kernel: this});
556 556 } else if (execution_state === 'dead') {
557 557 this.stop_channels();
558 558 $([IPython.events]).trigger('status_dead.Kernel', {kernel: this});
559 559 }
560 560 };
561 561
562 562
563 563 // handle clear_output message
564 564 Kernel.prototype._handle_clear_output = function (msg) {
565 565 var callbacks = this.get_callbacks_for_msg(msg.parent_header.msg_id);
566 566 if (!callbacks || !callbacks.iopub) {
567 567 return;
568 568 }
569 569 var callback = callbacks.iopub.clear_output;
570 570 if (callback) {
571 571 callback(msg);
572 572 }
573 573 };
574 574
575 575
576 576 // handle an output message (execute_result, display_data, etc.)
577 577 Kernel.prototype._handle_output_message = function (msg) {
578 578 var callbacks = this.get_callbacks_for_msg(msg.parent_header.msg_id);
579 579 if (!callbacks || !callbacks.iopub) {
580 580 return;
581 581 }
582 582 var callback = callbacks.iopub.output;
583 583 if (callback) {
584 584 callback(msg);
585 585 }
586 586 };
587 587
588 588 // dispatch IOPub messages to respective handlers.
589 589 // each message type should have a handler.
590 590 Kernel.prototype._handle_iopub_message = function (e) {
591 591 var msg = $.parseJSON(e.data);
592 592
593 593 var handler = this.get_iopub_handler(msg.header.msg_type);
594 594 if (handler !== undefined) {
595 595 handler(msg);
596 596 }
597 597 };
598 598
599 599
600 600 Kernel.prototype._handle_input_request = function (e) {
601 601 var request = $.parseJSON(e.data);
602 602 var header = request.header;
603 603 var content = request.content;
604 604 var metadata = request.metadata;
605 605 var msg_type = header.msg_type;
606 606 if (msg_type !== 'input_request') {
607 607 console.log("Invalid input request!", request);
608 608 return;
609 609 }
610 610 var callbacks = this.get_callbacks_for_msg(request.parent_header.msg_id);
611 611 if (callbacks) {
612 612 if (callbacks.input) {
613 613 callbacks.input(request);
614 614 }
615 615 }
616 616 };
617 617
618 618
619 619 IPython.Kernel = Kernel;
620 620
621 621 return IPython;
622 622
623 623 }(IPython));
624 624
@@ -1,442 +1,442 b''
1 1 """Test suite for our zeromq-based message specification."""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 import re
7 7 from distutils.version import LooseVersion as V
8 8 from subprocess import PIPE
9 9 try:
10 10 from queue import Empty # Py 3
11 11 except ImportError:
12 12 from Queue import Empty # Py 2
13 13
14 14 import nose.tools as nt
15 15
16 16 from IPython.kernel import KernelManager
17 17
18 18 from IPython.utils.traitlets import (
19 19 HasTraits, TraitError, Bool, Unicode, Dict, Integer, List, Enum, Any,
20 20 )
21 21 from IPython.utils.py3compat import string_types, iteritems
22 22
23 23 from .utils import TIMEOUT, start_global_kernel, flush_channels, execute
24 24
25 25 #-----------------------------------------------------------------------------
26 26 # Globals
27 27 #-----------------------------------------------------------------------------
28 28 KC = None
29 29
30 30 def setup():
31 31 global KC
32 32 KC = start_global_kernel()
33 33
34 34 #-----------------------------------------------------------------------------
35 35 # Message Spec References
36 36 #-----------------------------------------------------------------------------
37 37
38 38 class Reference(HasTraits):
39 39
40 40 """
41 41 Base class for message spec specification testing.
42 42
43 43 This class is the core of the message specification test. The
44 44 idea is that child classes implement trait attributes for each
45 45 message keys, so that message keys can be tested against these
46 46 traits using :meth:`check` method.
47 47
48 48 """
49 49
50 50 def check(self, d):
51 51 """validate a dict against our traits"""
52 52 for key in self.trait_names():
53 53 nt.assert_in(key, d)
54 54 # FIXME: always allow None, probably not a good idea
55 55 if d[key] is None:
56 56 continue
57 57 try:
58 58 setattr(self, key, d[key])
59 59 except TraitError as e:
60 60 assert False, str(e)
61 61
62 62 class Version(Unicode):
63 63 def validate(self, obj, value):
64 64 min_version = self.default_value
65 65 if V(value) < V(min_version):
66 66 raise TraitError("bad version: %s < %s" % (value, min_version))
67 67
68 68 class RMessage(Reference):
69 69 msg_id = Unicode()
70 70 msg_type = Unicode()
71 71 header = Dict()
72 72 parent_header = Dict()
73 73 content = Dict()
74 74
75 75 def check(self, d):
76 76 super(RMessage, self).check(d)
77 77 RHeader().check(self.header)
78 78 RHeader().check(self.parent_header)
79 79
80 80 class RHeader(Reference):
81 81 msg_id = Unicode()
82 82 msg_type = Unicode()
83 83 session = Unicode()
84 84 username = Unicode()
85 85 version = Version('5.0')
86 86
87 87
88 88 class ExecuteReply(Reference):
89 89 execution_count = Integer()
90 90 status = Enum((u'ok', u'error'))
91 91
92 92 def check(self, d):
93 93 Reference.check(self, d)
94 94 if d['status'] == 'ok':
95 95 ExecuteReplyOkay().check(d)
96 96 elif d['status'] == 'error':
97 97 ExecuteReplyError().check(d)
98 98
99 99
100 100 class ExecuteReplyOkay(Reference):
101 101 payload = List(Dict)
102 102 user_variables = Dict()
103 103 user_expressions = Dict()
104 104
105 105
106 106 class ExecuteReplyError(Reference):
107 107 ename = Unicode()
108 108 evalue = Unicode()
109 109 traceback = List(Unicode)
110 110
111 111
112 112 class OInfoReply(Reference):
113 113 name = Unicode()
114 114 found = Bool()
115 115 ismagic = Bool()
116 116 isalias = Bool()
117 117 namespace = Enum((u'builtin', u'magics', u'alias', u'Interactive'))
118 118 type_name = Unicode()
119 119 string_form = Unicode()
120 120 base_class = Unicode()
121 121 length = Integer()
122 122 file = Unicode()
123 123 definition = Unicode()
124 124 argspec = Dict()
125 125 init_definition = Unicode()
126 126 docstring = Unicode()
127 127 init_docstring = Unicode()
128 128 class_docstring = Unicode()
129 129 call_def = Unicode()
130 130 call_docstring = Unicode()
131 131 source = Unicode()
132 132
133 133 def check(self, d):
134 134 super(OInfoReply, self).check(d)
135 135 if d['argspec'] is not None:
136 136 ArgSpec().check(d['argspec'])
137 137
138 138
139 139 class ArgSpec(Reference):
140 140 args = List(Unicode)
141 141 varargs = Unicode()
142 142 varkw = Unicode()
143 143 defaults = List()
144 144
145 145
146 146 class Status(Reference):
147 147 execution_state = Enum((u'busy', u'idle', u'starting'))
148 148
149 149
150 150 class CompleteReply(Reference):
151 151 matches = List(Unicode)
152 152
153 153
154 154 class KernelInfoReply(Reference):
155 155 protocol_version = Version('5.0')
156 156 ipython_version = Version('2.0')
157 157 language_version = Version('2.7')
158 158 language = Unicode()
159 159
160 160
161 161 # IOPub messages
162 162
163 163 class ExecuteInput(Reference):
164 164 code = Unicode()
165 165 execution_count = Integer()
166 166
167 167
168 PyErr = ExecuteReplyError
168 Error = ExecuteReplyError
169 169
170 170
171 171 class Stream(Reference):
172 172 name = Enum((u'stdout', u'stderr'))
173 173 data = Unicode()
174 174
175 175
176 176 mime_pat = re.compile(r'\w+/\w+')
177 177
178 178 class DisplayData(Reference):
179 179 source = Unicode()
180 180 metadata = Dict()
181 181 data = Dict()
182 182 def _data_changed(self, name, old, new):
183 183 for k,v in iteritems(new):
184 184 assert mime_pat.match(k)
185 185 nt.assert_is_instance(v, string_types)
186 186
187 187
188 188 class ExecuteResult(Reference):
189 189 execution_count = Integer()
190 190 data = Dict()
191 191 def _data_changed(self, name, old, new):
192 192 for k,v in iteritems(new):
193 193 assert mime_pat.match(k)
194 194 nt.assert_is_instance(v, string_types)
195 195
196 196
197 197 references = {
198 198 'execute_reply' : ExecuteReply(),
199 199 'object_info_reply' : OInfoReply(),
200 200 'status' : Status(),
201 201 'complete_reply' : CompleteReply(),
202 202 'kernel_info_reply': KernelInfoReply(),
203 203 'execute_input' : ExecuteInput(),
204 204 'execute_result' : ExecuteResult(),
205 'pyerr' : PyErr(),
205 'error' : Error(),
206 206 'stream' : Stream(),
207 207 'display_data' : DisplayData(),
208 208 'header' : RHeader(),
209 209 }
210 210 """
211 211 Specifications of `content` part of the reply messages.
212 212 """
213 213
214 214
215 215 def validate_message(msg, msg_type=None, parent=None):
216 216 """validate a message
217 217
218 218 This is a generator, and must be iterated through to actually
219 219 trigger each test.
220 220
221 221 If msg_type and/or parent are given, the msg_type and/or parent msg_id
222 222 are compared with the given values.
223 223 """
224 224 RMessage().check(msg)
225 225 if msg_type:
226 226 nt.assert_equal(msg['msg_type'], msg_type)
227 227 if parent:
228 228 nt.assert_equal(msg['parent_header']['msg_id'], parent)
229 229 content = msg['content']
230 230 ref = references[msg['msg_type']]
231 231 ref.check(content)
232 232
233 233
234 234 #-----------------------------------------------------------------------------
235 235 # Tests
236 236 #-----------------------------------------------------------------------------
237 237
238 238 # Shell channel
239 239
240 240 def test_execute():
241 241 flush_channels()
242 242
243 243 msg_id = KC.execute(code='x=1')
244 244 reply = KC.get_shell_msg(timeout=TIMEOUT)
245 245 validate_message(reply, 'execute_reply', msg_id)
246 246
247 247
248 248 def test_execute_silent():
249 249 flush_channels()
250 250 msg_id, reply = execute(code='x=1', silent=True)
251 251
252 252 # flush status=idle
253 253 status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
254 254 validate_message(status, 'status', msg_id)
255 255 nt.assert_equal(status['content']['execution_state'], 'idle')
256 256
257 257 nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
258 258 count = reply['execution_count']
259 259
260 260 msg_id, reply = execute(code='x=2', silent=True)
261 261
262 262 # flush status=idle
263 263 status = KC.iopub_channel.get_msg(timeout=TIMEOUT)
264 264 validate_message(status, 'status', msg_id)
265 265 nt.assert_equal(status['content']['execution_state'], 'idle')
266 266
267 267 nt.assert_raises(Empty, KC.iopub_channel.get_msg, timeout=0.1)
268 268 count_2 = reply['execution_count']
269 269 nt.assert_equal(count_2, count)
270 270
271 271
272 272 def test_execute_error():
273 273 flush_channels()
274 274
275 275 msg_id, reply = execute(code='1/0')
276 276 nt.assert_equal(reply['status'], 'error')
277 277 nt.assert_equal(reply['ename'], 'ZeroDivisionError')
278 278
279 pyerr = KC.iopub_channel.get_msg(timeout=TIMEOUT)
280 validate_message(pyerr, 'pyerr', msg_id)
279 error = KC.iopub_channel.get_msg(timeout=TIMEOUT)
280 validate_message(error, 'error', msg_id)
281 281
282 282
283 283 def test_execute_inc():
284 284 """execute request should increment execution_count"""
285 285 flush_channels()
286 286
287 287 msg_id, reply = execute(code='x=1')
288 288 count = reply['execution_count']
289 289
290 290 flush_channels()
291 291
292 292 msg_id, reply = execute(code='x=2')
293 293 count_2 = reply['execution_count']
294 294 nt.assert_equal(count_2, count+1)
295 295
296 296
297 297 def test_user_variables():
298 298 flush_channels()
299 299
300 300 msg_id, reply = execute(code='x=1', user_variables=['x'])
301 301 user_variables = reply['user_variables']
302 302 nt.assert_equal(user_variables, {u'x': {
303 303 u'status': u'ok',
304 304 u'data': {u'text/plain': u'1'},
305 305 u'metadata': {},
306 306 }})
307 307
308 308
309 309 def test_user_variables_fail():
310 310 flush_channels()
311 311
312 312 msg_id, reply = execute(code='x=1', user_variables=['nosuchname'])
313 313 user_variables = reply['user_variables']
314 314 foo = user_variables['nosuchname']
315 315 nt.assert_equal(foo['status'], 'error')
316 316 nt.assert_equal(foo['ename'], 'KeyError')
317 317
318 318
319 319 def test_user_expressions():
320 320 flush_channels()
321 321
322 322 msg_id, reply = execute(code='x=1', user_expressions=dict(foo='x+1'))
323 323 user_expressions = reply['user_expressions']
324 324 nt.assert_equal(user_expressions, {u'foo': {
325 325 u'status': u'ok',
326 326 u'data': {u'text/plain': u'2'},
327 327 u'metadata': {},
328 328 }})
329 329
330 330
331 331 def test_user_expressions_fail():
332 332 flush_channels()
333 333
334 334 msg_id, reply = execute(code='x=0', user_expressions=dict(foo='nosuchname'))
335 335 user_expressions = reply['user_expressions']
336 336 foo = user_expressions['foo']
337 337 nt.assert_equal(foo['status'], 'error')
338 338 nt.assert_equal(foo['ename'], 'NameError')
339 339
340 340
341 341 def test_oinfo():
342 342 flush_channels()
343 343
344 344 msg_id = KC.object_info('a')
345 345 reply = KC.get_shell_msg(timeout=TIMEOUT)
346 346 validate_message(reply, 'object_info_reply', msg_id)
347 347
348 348
349 349 def test_oinfo_found():
350 350 flush_channels()
351 351
352 352 msg_id, reply = execute(code='a=5')
353 353
354 354 msg_id = KC.object_info('a')
355 355 reply = KC.get_shell_msg(timeout=TIMEOUT)
356 356 validate_message(reply, 'object_info_reply', msg_id)
357 357 content = reply['content']
358 358 assert content['found']
359 359 argspec = content['argspec']
360 360 nt.assert_is(argspec, None)
361 361
362 362
363 363 def test_oinfo_detail():
364 364 flush_channels()
365 365
366 366 msg_id, reply = execute(code='ip=get_ipython()')
367 367
368 368 msg_id = KC.object_info('ip.object_inspect', detail_level=2)
369 369 reply = KC.get_shell_msg(timeout=TIMEOUT)
370 370 validate_message(reply, 'object_info_reply', msg_id)
371 371 content = reply['content']
372 372 assert content['found']
373 373 argspec = content['argspec']
374 374 nt.assert_is_instance(argspec, dict, "expected non-empty argspec dict, got %r" % argspec)
375 375 nt.assert_equal(argspec['defaults'], [0])
376 376
377 377
378 378 def test_oinfo_not_found():
379 379 flush_channels()
380 380
381 381 msg_id = KC.object_info('dne')
382 382 reply = KC.get_shell_msg(timeout=TIMEOUT)
383 383 validate_message(reply, 'object_info_reply', msg_id)
384 384 content = reply['content']
385 385 nt.assert_false(content['found'])
386 386
387 387
388 388 def test_complete():
389 389 flush_channels()
390 390
391 391 msg_id, reply = execute(code="alpha = albert = 5")
392 392
393 393 msg_id = KC.complete('al', 'al', 2)
394 394 reply = KC.get_shell_msg(timeout=TIMEOUT)
395 395 validate_message(reply, 'complete_reply', msg_id)
396 396 matches = reply['content']['matches']
397 397 for name in ('alpha', 'albert'):
398 398 nt.assert_in(name, matches)
399 399
400 400
401 401 def test_kernel_info_request():
402 402 flush_channels()
403 403
404 404 msg_id = KC.kernel_info()
405 405 reply = KC.get_shell_msg(timeout=TIMEOUT)
406 406 validate_message(reply, 'kernel_info_reply', msg_id)
407 407
408 408
409 409 def test_single_payload():
410 410 flush_channels()
411 411 msg_id, reply = execute(code="for i in range(3):\n"+
412 412 " x=range?\n")
413 413 payload = reply['payload']
414 414 next_input_pls = [pl for pl in payload if pl["source"] == "set_next_input"]
415 415 nt.assert_equal(len(next_input_pls), 1)
416 416
417 417
418 418 # IOPub channel
419 419
420 420
421 421 def test_stream():
422 422 flush_channels()
423 423
424 424 msg_id, reply = execute("print('hi')")
425 425
426 426 stdout = KC.iopub_channel.get_msg(timeout=TIMEOUT)
427 427 validate_message(stdout, 'stream', msg_id)
428 428 content = stdout['content']
429 429 nt.assert_equal(content['name'], u'stdout')
430 430 nt.assert_equal(content['data'], u'hi\n')
431 431
432 432
433 433 def test_display_data():
434 434 flush_channels()
435 435
436 436 msg_id, reply = execute("from IPython.core.display import display; display(1)")
437 437
438 438 display = KC.iopub_channel.get_msg(timeout=TIMEOUT)
439 439 validate_message(display, 'display_data', parent=msg_id)
440 440 data = display['content']['data']
441 441 nt.assert_equal(data['text/plain'], u'1')
442 442
@@ -1,797 +1,797 b''
1 1 #!/usr/bin/env python
2 2 """An interactive kernel that talks to frontends over 0MQ."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 from __future__ import print_function
8 8
9 9 import sys
10 10 import time
11 11 import traceback
12 12 import logging
13 13 import uuid
14 14
15 15 from datetime import datetime
16 16 from signal import (
17 17 signal, default_int_handler, SIGINT
18 18 )
19 19
20 20 import zmq
21 21 from zmq.eventloop import ioloop
22 22 from zmq.eventloop.zmqstream import ZMQStream
23 23
24 24 from IPython.config.configurable import Configurable
25 25 from IPython.core.error import StdinNotImplementedError
26 26 from IPython.core import release
27 27 from IPython.utils import py3compat
28 28 from IPython.utils.py3compat import builtin_mod, unicode_type, string_types
29 29 from IPython.utils.jsonutil import json_clean
30 30 from IPython.utils.traitlets import (
31 31 Any, Instance, Float, Dict, List, Set, Integer, Unicode,
32 32 Type, Bool,
33 33 )
34 34
35 35 from .serialize import serialize_object, unpack_apply_message
36 36 from .session import Session
37 37 from .zmqshell import ZMQInteractiveShell
38 38
39 39
40 40 #-----------------------------------------------------------------------------
41 41 # Main kernel class
42 42 #-----------------------------------------------------------------------------
43 43
44 44 protocol_version = release.kernel_protocol_version
45 45 ipython_version = release.version
46 46 language_version = sys.version.split()[0]
47 47
48 48
49 49 class Kernel(Configurable):
50 50
51 51 #---------------------------------------------------------------------------
52 52 # Kernel interface
53 53 #---------------------------------------------------------------------------
54 54
55 55 # attribute to override with a GUI
56 56 eventloop = Any(None)
57 57 def _eventloop_changed(self, name, old, new):
58 58 """schedule call to eventloop from IOLoop"""
59 59 loop = ioloop.IOLoop.instance()
60 60 loop.add_callback(self.enter_eventloop)
61 61
62 62 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
63 63 shell_class = Type(ZMQInteractiveShell)
64 64
65 65 session = Instance(Session)
66 66 profile_dir = Instance('IPython.core.profiledir.ProfileDir')
67 67 shell_streams = List()
68 68 control_stream = Instance(ZMQStream)
69 69 iopub_socket = Instance(zmq.Socket)
70 70 stdin_socket = Instance(zmq.Socket)
71 71 log = Instance(logging.Logger)
72 72
73 73 user_module = Any()
74 74 def _user_module_changed(self, name, old, new):
75 75 if self.shell is not None:
76 76 self.shell.user_module = new
77 77
78 78 user_ns = Instance(dict, args=None, allow_none=True)
79 79 def _user_ns_changed(self, name, old, new):
80 80 if self.shell is not None:
81 81 self.shell.user_ns = new
82 82 self.shell.init_user_ns()
83 83
84 84 # identities:
85 85 int_id = Integer(-1)
86 86 ident = Unicode()
87 87
88 88 def _ident_default(self):
89 89 return unicode_type(uuid.uuid4())
90 90
91 91 # Private interface
92 92
93 93 _darwin_app_nap = Bool(True, config=True,
94 94 help="""Whether to use appnope for compatiblity with OS X App Nap.
95 95
96 96 Only affects OS X >= 10.9.
97 97 """
98 98 )
99 99
100 100 # Time to sleep after flushing the stdout/err buffers in each execute
101 101 # cycle. While this introduces a hard limit on the minimal latency of the
102 102 # execute cycle, it helps prevent output synchronization problems for
103 103 # clients.
104 104 # Units are in seconds. The minimum zmq latency on local host is probably
105 105 # ~150 microseconds, set this to 500us for now. We may need to increase it
106 106 # a little if it's not enough after more interactive testing.
107 107 _execute_sleep = Float(0.0005, config=True)
108 108
109 109 # Frequency of the kernel's event loop.
110 110 # Units are in seconds, kernel subclasses for GUI toolkits may need to
111 111 # adapt to milliseconds.
112 112 _poll_interval = Float(0.05, config=True)
113 113
114 114 # If the shutdown was requested over the network, we leave here the
115 115 # necessary reply message so it can be sent by our registered atexit
116 116 # handler. This ensures that the reply is only sent to clients truly at
117 117 # the end of our shutdown process (which happens after the underlying
118 118 # IPython shell's own shutdown).
119 119 _shutdown_message = None
120 120
121 121 # This is a dict of port number that the kernel is listening on. It is set
122 122 # by record_ports and used by connect_request.
123 123 _recorded_ports = Dict()
124 124
125 125 # A reference to the Python builtin 'raw_input' function.
126 126 # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
127 127 _sys_raw_input = Any()
128 128 _sys_eval_input = Any()
129 129
130 130 # set of aborted msg_ids
131 131 aborted = Set()
132 132
133 133
134 134 def __init__(self, **kwargs):
135 135 super(Kernel, self).__init__(**kwargs)
136 136
137 137 # Initialize the InteractiveShell subclass
138 138 self.shell = self.shell_class.instance(parent=self,
139 139 profile_dir = self.profile_dir,
140 140 user_module = self.user_module,
141 141 user_ns = self.user_ns,
142 142 kernel = self,
143 143 )
144 144 self.shell.displayhook.session = self.session
145 145 self.shell.displayhook.pub_socket = self.iopub_socket
146 146 self.shell.displayhook.topic = self._topic('execute_result')
147 147 self.shell.display_pub.session = self.session
148 148 self.shell.display_pub.pub_socket = self.iopub_socket
149 149 self.shell.data_pub.session = self.session
150 150 self.shell.data_pub.pub_socket = self.iopub_socket
151 151
152 152 # TMP - hack while developing
153 153 self.shell._reply_content = None
154 154
155 155 # Build dict of handlers for message types
156 156 msg_types = [ 'execute_request', 'complete_request',
157 157 'object_info_request', 'history_request',
158 158 'kernel_info_request',
159 159 'connect_request', 'shutdown_request',
160 160 'apply_request',
161 161 ]
162 162 self.shell_handlers = {}
163 163 for msg_type in msg_types:
164 164 self.shell_handlers[msg_type] = getattr(self, msg_type)
165 165
166 166 comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
167 167 comm_manager = self.shell.comm_manager
168 168 for msg_type in comm_msg_types:
169 169 self.shell_handlers[msg_type] = getattr(comm_manager, msg_type)
170 170
171 171 control_msg_types = msg_types + [ 'clear_request', 'abort_request' ]
172 172 self.control_handlers = {}
173 173 for msg_type in control_msg_types:
174 174 self.control_handlers[msg_type] = getattr(self, msg_type)
175 175
176 176
177 177 def dispatch_control(self, msg):
178 178 """dispatch control requests"""
179 179 idents,msg = self.session.feed_identities(msg, copy=False)
180 180 try:
181 181 msg = self.session.unserialize(msg, content=True, copy=False)
182 182 except:
183 183 self.log.error("Invalid Control Message", exc_info=True)
184 184 return
185 185
186 186 self.log.debug("Control received: %s", msg)
187 187
188 188 header = msg['header']
189 189 msg_id = header['msg_id']
190 190 msg_type = header['msg_type']
191 191
192 192 handler = self.control_handlers.get(msg_type, None)
193 193 if handler is None:
194 194 self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type)
195 195 else:
196 196 try:
197 197 handler(self.control_stream, idents, msg)
198 198 except Exception:
199 199 self.log.error("Exception in control handler:", exc_info=True)
200 200
201 201 def dispatch_shell(self, stream, msg):
202 202 """dispatch shell requests"""
203 203 # flush control requests first
204 204 if self.control_stream:
205 205 self.control_stream.flush()
206 206
207 207 idents,msg = self.session.feed_identities(msg, copy=False)
208 208 try:
209 209 msg = self.session.unserialize(msg, content=True, copy=False)
210 210 except:
211 211 self.log.error("Invalid Message", exc_info=True)
212 212 return
213 213
214 214 header = msg['header']
215 215 msg_id = header['msg_id']
216 216 msg_type = msg['header']['msg_type']
217 217
218 218 # Print some info about this message and leave a '--->' marker, so it's
219 219 # easier to trace visually the message chain when debugging. Each
220 220 # handler prints its message at the end.
221 221 self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
222 222 self.log.debug(' Content: %s\n --->\n ', msg['content'])
223 223
224 224 if msg_id in self.aborted:
225 225 self.aborted.remove(msg_id)
226 226 # is it safe to assume a msg_id will not be resubmitted?
227 227 reply_type = msg_type.split('_')[0] + '_reply'
228 228 status = {'status' : 'aborted'}
229 229 md = {'engine' : self.ident}
230 230 md.update(status)
231 231 reply_msg = self.session.send(stream, reply_type, metadata=md,
232 232 content=status, parent=msg, ident=idents)
233 233 return
234 234
235 235 handler = self.shell_handlers.get(msg_type, None)
236 236 if handler is None:
237 237 self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type)
238 238 else:
239 239 # ensure default_int_handler during handler call
240 240 sig = signal(SIGINT, default_int_handler)
241 241 try:
242 242 handler(stream, idents, msg)
243 243 except Exception:
244 244 self.log.error("Exception in message handler:", exc_info=True)
245 245 finally:
246 246 signal(SIGINT, sig)
247 247
248 248 def enter_eventloop(self):
249 249 """enter eventloop"""
250 250 self.log.info("entering eventloop %s", self.eventloop)
251 251 for stream in self.shell_streams:
252 252 # flush any pending replies,
253 253 # which may be skipped by entering the eventloop
254 254 stream.flush(zmq.POLLOUT)
255 255 # restore default_int_handler
256 256 signal(SIGINT, default_int_handler)
257 257 while self.eventloop is not None:
258 258 try:
259 259 self.eventloop(self)
260 260 except KeyboardInterrupt:
261 261 # Ctrl-C shouldn't crash the kernel
262 262 self.log.error("KeyboardInterrupt caught in kernel")
263 263 continue
264 264 else:
265 265 # eventloop exited cleanly, this means we should stop (right?)
266 266 self.eventloop = None
267 267 break
268 268 self.log.info("exiting eventloop")
269 269
270 270 def start(self):
271 271 """register dispatchers for streams"""
272 272 self.shell.exit_now = False
273 273 if self.control_stream:
274 274 self.control_stream.on_recv(self.dispatch_control, copy=False)
275 275
276 276 def make_dispatcher(stream):
277 277 def dispatcher(msg):
278 278 return self.dispatch_shell(stream, msg)
279 279 return dispatcher
280 280
281 281 for s in self.shell_streams:
282 282 s.on_recv(make_dispatcher(s), copy=False)
283 283
284 284 # publish idle status
285 285 self._publish_status('starting')
286 286
287 287 def do_one_iteration(self):
288 288 """step eventloop just once"""
289 289 if self.control_stream:
290 290 self.control_stream.flush()
291 291 for stream in self.shell_streams:
292 292 # handle at most one request per iteration
293 293 stream.flush(zmq.POLLIN, 1)
294 294 stream.flush(zmq.POLLOUT)
295 295
296 296
297 297 def record_ports(self, ports):
298 298 """Record the ports that this kernel is using.
299 299
300 300 The creator of the Kernel instance must call this methods if they
301 301 want the :meth:`connect_request` method to return the port numbers.
302 302 """
303 303 self._recorded_ports = ports
304 304
305 305 #---------------------------------------------------------------------------
306 306 # Kernel request handlers
307 307 #---------------------------------------------------------------------------
308 308
309 309 def _make_metadata(self, other=None):
310 310 """init metadata dict, for execute/apply_reply"""
311 311 new_md = {
312 312 'dependencies_met' : True,
313 313 'engine' : self.ident,
314 314 'started': datetime.now(),
315 315 }
316 316 if other:
317 317 new_md.update(other)
318 318 return new_md
319 319
320 320 def _publish_execute_input(self, code, parent, execution_count):
321 321 """Publish the code request on the iopub stream."""
322 322
323 323 self.session.send(self.iopub_socket, u'execute_input',
324 324 {u'code':code, u'execution_count': execution_count},
325 325 parent=parent, ident=self._topic('execute_input')
326 326 )
327 327
328 328 def _publish_status(self, status, parent=None):
329 329 """send status (busy/idle) on IOPub"""
330 330 self.session.send(self.iopub_socket,
331 331 u'status',
332 332 {u'execution_state': status},
333 333 parent=parent,
334 334 ident=self._topic('status'),
335 335 )
336 336
337 337
338 338 def execute_request(self, stream, ident, parent):
339 339 """handle an execute_request"""
340 340
341 341 self._publish_status(u'busy', parent)
342 342
343 343 try:
344 344 content = parent[u'content']
345 345 code = py3compat.cast_unicode_py2(content[u'code'])
346 346 silent = content[u'silent']
347 347 store_history = content.get(u'store_history', not silent)
348 348 except:
349 349 self.log.error("Got bad msg: ")
350 350 self.log.error("%s", parent)
351 351 return
352 352
353 353 md = self._make_metadata(parent['metadata'])
354 354
355 355 shell = self.shell # we'll need this a lot here
356 356
357 357 # Replace raw_input. Note that is not sufficient to replace
358 358 # raw_input in the user namespace.
359 359 if content.get('allow_stdin', False):
360 360 raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
361 361 input = lambda prompt='': eval(raw_input(prompt))
362 362 else:
363 363 raw_input = input = lambda prompt='' : self._no_raw_input()
364 364
365 365 if py3compat.PY3:
366 366 self._sys_raw_input = builtin_mod.input
367 367 builtin_mod.input = raw_input
368 368 else:
369 369 self._sys_raw_input = builtin_mod.raw_input
370 370 self._sys_eval_input = builtin_mod.input
371 371 builtin_mod.raw_input = raw_input
372 372 builtin_mod.input = input
373 373
374 374 # Set the parent message of the display hook and out streams.
375 375 shell.set_parent(parent)
376 376
377 377 # Re-broadcast our input for the benefit of listening clients, and
378 378 # start computing output
379 379 if not silent:
380 380 self._publish_execute_input(code, parent, shell.execution_count)
381 381
382 382 reply_content = {}
383 383 # FIXME: the shell calls the exception handler itself.
384 384 shell._reply_content = None
385 385 try:
386 386 shell.run_cell(code, store_history=store_history, silent=silent)
387 387 except:
388 388 status = u'error'
389 389 # FIXME: this code right now isn't being used yet by default,
390 390 # because the run_cell() call above directly fires off exception
391 391 # reporting. This code, therefore, is only active in the scenario
392 392 # where runlines itself has an unhandled exception. We need to
393 393 # uniformize this, for all exception construction to come from a
394 394 # single location in the codbase.
395 395 etype, evalue, tb = sys.exc_info()
396 396 tb_list = traceback.format_exception(etype, evalue, tb)
397 397 reply_content.update(shell._showtraceback(etype, evalue, tb_list))
398 398 else:
399 399 status = u'ok'
400 400 finally:
401 401 # Restore raw_input.
402 402 if py3compat.PY3:
403 403 builtin_mod.input = self._sys_raw_input
404 404 else:
405 405 builtin_mod.raw_input = self._sys_raw_input
406 406 builtin_mod.input = self._sys_eval_input
407 407
408 408 reply_content[u'status'] = status
409 409
410 410 # Return the execution counter so clients can display prompts
411 411 reply_content['execution_count'] = shell.execution_count - 1
412 412
413 413 # FIXME - fish exception info out of shell, possibly left there by
414 414 # runlines. We'll need to clean up this logic later.
415 415 if shell._reply_content is not None:
416 416 reply_content.update(shell._reply_content)
417 417 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='execute')
418 418 reply_content['engine_info'] = e_info
419 419 # reset after use
420 420 shell._reply_content = None
421 421
422 422 if 'traceback' in reply_content:
423 423 self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
424 424
425 425
426 426 # At this point, we can tell whether the main code execution succeeded
427 427 # or not. If it did, we proceed to evaluate user_variables/expressions
428 428 if reply_content['status'] == 'ok':
429 429 reply_content[u'user_variables'] = \
430 430 shell.user_variables(content.get(u'user_variables', []))
431 431 reply_content[u'user_expressions'] = \
432 432 shell.user_expressions(content.get(u'user_expressions', {}))
433 433 else:
434 434 # If there was an error, don't even try to compute variables or
435 435 # expressions
436 436 reply_content[u'user_variables'] = {}
437 437 reply_content[u'user_expressions'] = {}
438 438
439 439 # Payloads should be retrieved regardless of outcome, so we can both
440 440 # recover partial output (that could have been generated early in a
441 441 # block, before an error) and clear the payload system always.
442 442 reply_content[u'payload'] = shell.payload_manager.read_payload()
443 443 # Be agressive about clearing the payload because we don't want
444 444 # it to sit in memory until the next execute_request comes in.
445 445 shell.payload_manager.clear_payload()
446 446
447 447 # Flush output before sending the reply.
448 448 sys.stdout.flush()
449 449 sys.stderr.flush()
450 450 # FIXME: on rare occasions, the flush doesn't seem to make it to the
451 451 # clients... This seems to mitigate the problem, but we definitely need
452 452 # to better understand what's going on.
453 453 if self._execute_sleep:
454 454 time.sleep(self._execute_sleep)
455 455
456 456 # Send the reply.
457 457 reply_content = json_clean(reply_content)
458 458
459 459 md['status'] = reply_content['status']
460 460 if reply_content['status'] == 'error' and \
461 461 reply_content['ename'] == 'UnmetDependency':
462 462 md['dependencies_met'] = False
463 463
464 464 reply_msg = self.session.send(stream, u'execute_reply',
465 465 reply_content, parent, metadata=md,
466 466 ident=ident)
467 467
468 468 self.log.debug("%s", reply_msg)
469 469
470 470 if not silent and reply_msg['content']['status'] == u'error':
471 471 self._abort_queues()
472 472
473 473 self._publish_status(u'idle', parent)
474 474
475 475 def complete_request(self, stream, ident, parent):
476 476 txt, matches = self._complete(parent)
477 477 matches = {'matches' : matches,
478 478 'matched_text' : txt,
479 479 'status' : 'ok'}
480 480 matches = json_clean(matches)
481 481 completion_msg = self.session.send(stream, 'complete_reply',
482 482 matches, parent, ident)
483 483 self.log.debug("%s", completion_msg)
484 484
485 485 def object_info_request(self, stream, ident, parent):
486 486 content = parent['content']
487 487 object_info = self.shell.object_inspect(content['oname'],
488 488 detail_level = content.get('detail_level', 0)
489 489 )
490 490 # Before we send this object over, we scrub it for JSON usage
491 491 oinfo = json_clean(object_info)
492 492 msg = self.session.send(stream, 'object_info_reply',
493 493 oinfo, parent, ident)
494 494 self.log.debug("%s", msg)
495 495
496 496 def history_request(self, stream, ident, parent):
497 497 # We need to pull these out, as passing **kwargs doesn't work with
498 498 # unicode keys before Python 2.6.5.
499 499 hist_access_type = parent['content']['hist_access_type']
500 500 raw = parent['content']['raw']
501 501 output = parent['content']['output']
502 502 if hist_access_type == 'tail':
503 503 n = parent['content']['n']
504 504 hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
505 505 include_latest=True)
506 506
507 507 elif hist_access_type == 'range':
508 508 session = parent['content']['session']
509 509 start = parent['content']['start']
510 510 stop = parent['content']['stop']
511 511 hist = self.shell.history_manager.get_range(session, start, stop,
512 512 raw=raw, output=output)
513 513
514 514 elif hist_access_type == 'search':
515 515 n = parent['content'].get('n')
516 516 unique = parent['content'].get('unique', False)
517 517 pattern = parent['content']['pattern']
518 518 hist = self.shell.history_manager.search(
519 519 pattern, raw=raw, output=output, n=n, unique=unique)
520 520
521 521 else:
522 522 hist = []
523 523 hist = list(hist)
524 524 content = {'history' : hist}
525 525 content = json_clean(content)
526 526 msg = self.session.send(stream, 'history_reply',
527 527 content, parent, ident)
528 528 self.log.debug("Sending history reply with %i entries", len(hist))
529 529
530 530 def connect_request(self, stream, ident, parent):
531 531 if self._recorded_ports is not None:
532 532 content = self._recorded_ports.copy()
533 533 else:
534 534 content = {}
535 535 msg = self.session.send(stream, 'connect_reply',
536 536 content, parent, ident)
537 537 self.log.debug("%s", msg)
538 538
539 539 def kernel_info_request(self, stream, ident, parent):
540 540 vinfo = {
541 541 'protocol_version': protocol_version,
542 542 'ipython_version': ipython_version,
543 543 'language_version': language_version,
544 544 'language': 'python',
545 545 }
546 546 msg = self.session.send(stream, 'kernel_info_reply',
547 547 vinfo, parent, ident)
548 548 self.log.debug("%s", msg)
549 549
550 550 def shutdown_request(self, stream, ident, parent):
551 551 self.shell.exit_now = True
552 552 content = dict(status='ok')
553 553 content.update(parent['content'])
554 554 self.session.send(stream, u'shutdown_reply', content, parent, ident=ident)
555 555 # same content, but different msg_id for broadcasting on IOPub
556 556 self._shutdown_message = self.session.msg(u'shutdown_reply',
557 557 content, parent
558 558 )
559 559
560 560 self._at_shutdown()
561 561 # call sys.exit after a short delay
562 562 loop = ioloop.IOLoop.instance()
563 563 loop.add_timeout(time.time()+0.1, loop.stop)
564 564
565 565 #---------------------------------------------------------------------------
566 566 # Engine methods
567 567 #---------------------------------------------------------------------------
568 568
569 569 def apply_request(self, stream, ident, parent):
570 570 try:
571 571 content = parent[u'content']
572 572 bufs = parent[u'buffers']
573 573 msg_id = parent['header']['msg_id']
574 574 except:
575 575 self.log.error("Got bad msg: %s", parent, exc_info=True)
576 576 return
577 577
578 578 self._publish_status(u'busy', parent)
579 579
580 580 # Set the parent message of the display hook and out streams.
581 581 shell = self.shell
582 582 shell.set_parent(parent)
583 583
584 584 # execute_input_msg = self.session.msg(u'execute_input',{u'code':code}, parent=parent)
585 585 # self.iopub_socket.send(execute_input_msg)
586 586 # self.session.send(self.iopub_socket, u'execute_input', {u'code':code},parent=parent)
587 587 md = self._make_metadata(parent['metadata'])
588 588 try:
589 589 working = shell.user_ns
590 590
591 591 prefix = "_"+str(msg_id).replace("-","")+"_"
592 592
593 593 f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
594 594
595 595 fname = getattr(f, '__name__', 'f')
596 596
597 597 fname = prefix+"f"
598 598 argname = prefix+"args"
599 599 kwargname = prefix+"kwargs"
600 600 resultname = prefix+"result"
601 601
602 602 ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
603 603 # print ns
604 604 working.update(ns)
605 605 code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
606 606 try:
607 607 exec(code, shell.user_global_ns, shell.user_ns)
608 608 result = working.get(resultname)
609 609 finally:
610 610 for key in ns:
611 611 working.pop(key)
612 612
613 613 result_buf = serialize_object(result,
614 614 buffer_threshold=self.session.buffer_threshold,
615 615 item_threshold=self.session.item_threshold,
616 616 )
617 617
618 618 except:
619 619 # invoke IPython traceback formatting
620 620 shell.showtraceback()
621 621 # FIXME - fish exception info out of shell, possibly left there by
622 622 # run_code. We'll need to clean up this logic later.
623 623 reply_content = {}
624 624 if shell._reply_content is not None:
625 625 reply_content.update(shell._reply_content)
626 626 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
627 627 reply_content['engine_info'] = e_info
628 628 # reset after use
629 629 shell._reply_content = None
630 630
631 self.session.send(self.iopub_socket, u'pyerr', reply_content, parent=parent,
632 ident=self._topic('pyerr'))
631 self.session.send(self.iopub_socket, u'error', reply_content, parent=parent,
632 ident=self._topic('error'))
633 633 self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
634 634 result_buf = []
635 635
636 636 if reply_content['ename'] == 'UnmetDependency':
637 637 md['dependencies_met'] = False
638 638 else:
639 639 reply_content = {'status' : 'ok'}
640 640
641 641 # put 'ok'/'error' status in header, for scheduler introspection:
642 642 md['status'] = reply_content['status']
643 643
644 644 # flush i/o
645 645 sys.stdout.flush()
646 646 sys.stderr.flush()
647 647
648 648 reply_msg = self.session.send(stream, u'apply_reply', reply_content,
649 649 parent=parent, ident=ident,buffers=result_buf, metadata=md)
650 650
651 651 self._publish_status(u'idle', parent)
652 652
653 653 #---------------------------------------------------------------------------
654 654 # Control messages
655 655 #---------------------------------------------------------------------------
656 656
657 657 def abort_request(self, stream, ident, parent):
658 658 """abort a specifig msg by id"""
659 659 msg_ids = parent['content'].get('msg_ids', None)
660 660 if isinstance(msg_ids, string_types):
661 661 msg_ids = [msg_ids]
662 662 if not msg_ids:
663 663 self.abort_queues()
664 664 for mid in msg_ids:
665 665 self.aborted.add(str(mid))
666 666
667 667 content = dict(status='ok')
668 668 reply_msg = self.session.send(stream, 'abort_reply', content=content,
669 669 parent=parent, ident=ident)
670 670 self.log.debug("%s", reply_msg)
671 671
672 672 def clear_request(self, stream, idents, parent):
673 673 """Clear our namespace."""
674 674 self.shell.reset(False)
675 675 msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
676 676 content = dict(status='ok'))
677 677
678 678
679 679 #---------------------------------------------------------------------------
680 680 # Protected interface
681 681 #---------------------------------------------------------------------------
682 682
683 683 def _wrap_exception(self, method=None):
684 684 # import here, because _wrap_exception is only used in parallel,
685 685 # and parallel has higher min pyzmq version
686 686 from IPython.parallel.error import wrap_exception
687 687 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method)
688 688 content = wrap_exception(e_info)
689 689 return content
690 690
691 691 def _topic(self, topic):
692 692 """prefixed topic for IOPub messages"""
693 693 if self.int_id >= 0:
694 694 base = "engine.%i" % self.int_id
695 695 else:
696 696 base = "kernel.%s" % self.ident
697 697
698 698 return py3compat.cast_bytes("%s.%s" % (base, topic))
699 699
700 700 def _abort_queues(self):
701 701 for stream in self.shell_streams:
702 702 if stream:
703 703 self._abort_queue(stream)
704 704
705 705 def _abort_queue(self, stream):
706 706 poller = zmq.Poller()
707 707 poller.register(stream.socket, zmq.POLLIN)
708 708 while True:
709 709 idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
710 710 if msg is None:
711 711 return
712 712
713 713 self.log.info("Aborting:")
714 714 self.log.info("%s", msg)
715 715 msg_type = msg['header']['msg_type']
716 716 reply_type = msg_type.split('_')[0] + '_reply'
717 717
718 718 status = {'status' : 'aborted'}
719 719 md = {'engine' : self.ident}
720 720 md.update(status)
721 721 reply_msg = self.session.send(stream, reply_type, metadata=md,
722 722 content=status, parent=msg, ident=idents)
723 723 self.log.debug("%s", reply_msg)
724 724 # We need to wait a bit for requests to come in. This can probably
725 725 # be set shorter for true asynchronous clients.
726 726 poller.poll(50)
727 727
728 728
729 729 def _no_raw_input(self):
730 730 """Raise StdinNotImplentedError if active frontend doesn't support
731 731 stdin."""
732 732 raise StdinNotImplementedError("raw_input was called, but this "
733 733 "frontend does not support stdin.")
734 734
735 735 def _raw_input(self, prompt, ident, parent):
736 736 # Flush output before making the request.
737 737 sys.stderr.flush()
738 738 sys.stdout.flush()
739 739 # flush the stdin socket, to purge stale replies
740 740 while True:
741 741 try:
742 742 self.stdin_socket.recv_multipart(zmq.NOBLOCK)
743 743 except zmq.ZMQError as e:
744 744 if e.errno == zmq.EAGAIN:
745 745 break
746 746 else:
747 747 raise
748 748
749 749 # Send the input request.
750 750 content = json_clean(dict(prompt=prompt))
751 751 self.session.send(self.stdin_socket, u'input_request', content, parent,
752 752 ident=ident)
753 753
754 754 # Await a response.
755 755 while True:
756 756 try:
757 757 ident, reply = self.session.recv(self.stdin_socket, 0)
758 758 except Exception:
759 759 self.log.warn("Invalid Message:", exc_info=True)
760 760 except KeyboardInterrupt:
761 761 # re-raise KeyboardInterrupt, to truncate traceback
762 762 raise KeyboardInterrupt
763 763 else:
764 764 break
765 765 try:
766 766 value = py3compat.unicode_to_str(reply['content']['value'])
767 767 except:
768 768 self.log.error("Got bad raw_input reply: ")
769 769 self.log.error("%s", parent)
770 770 value = ''
771 771 if value == '\x04':
772 772 # EOF
773 773 raise EOFError
774 774 return value
775 775
776 776 def _complete(self, msg):
777 777 c = msg['content']
778 778 try:
779 779 cpos = int(c['cursor_pos'])
780 780 except:
781 781 # If we don't get something that we can convert to an integer, at
782 782 # least attempt the completion guessing the cursor is at the end of
783 783 # the text, if there's any, and otherwise of the line
784 784 cpos = len(c['text'])
785 785 if cpos==0:
786 786 cpos = len(c['line'])
787 787 return self.shell.complete(c['text'], c['line'], cpos)
788 788
789 789 def _at_shutdown(self):
790 790 """Actions taken at shutdown by the kernel, called by python's atexit.
791 791 """
792 792 # io.rprint("Kernel at_shutdown") # dbg
793 793 if self._shutdown_message is not None:
794 794 self.session.send(self.iopub_socket, self._shutdown_message, ident=self._topic('shutdown'))
795 795 self.log.debug("%s", self._shutdown_message)
796 796 [ s.flush(zmq.POLLOUT) for s in self.shell_streams ]
797 797
@@ -1,415 +1,408 b''
1 """An Application for launching a kernel
2 """
1 """An Application for launching a kernel"""
2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 #-----------------------------------------------------------------------------
7 # Imports
8 #-----------------------------------------------------------------------------
9
10 6 from __future__ import print_function
11 7
12 # Standard library imports
13 8 import atexit
14 9 import os
15 10 import sys
16 11 import signal
17 12
18 # System library imports
19 13 import zmq
20 14 from zmq.eventloop import ioloop
21 15 from zmq.eventloop.zmqstream import ZMQStream
22 16
23 # IPython imports
24 17 from IPython.core.ultratb import FormattedTB
25 18 from IPython.core.application import (
26 19 BaseIPythonApplication, base_flags, base_aliases, catch_config_error
27 20 )
28 21 from IPython.core.profiledir import ProfileDir
29 22 from IPython.core.shellapp import (
30 23 InteractiveShellApp, shell_flags, shell_aliases
31 24 )
32 25 from IPython.utils import io
33 26 from IPython.utils.path import filefind
34 27 from IPython.utils.traitlets import (
35 28 Any, Instance, Dict, Unicode, Integer, Bool, DottedObjectName,
36 29 )
37 30 from IPython.utils.importstring import import_item
38 31 from IPython.kernel import write_connection_file
39 32 from IPython.kernel.connect import ConnectionFileMixin
40 33
41 34 # local imports
42 35 from .heartbeat import Heartbeat
43 36 from .ipkernel import Kernel
44 37 from .parentpoller import ParentPollerUnix, ParentPollerWindows
45 38 from .session import (
46 39 Session, session_flags, session_aliases, default_secure,
47 40 )
48 41 from .zmqshell import ZMQInteractiveShell
49 42
50 43 #-----------------------------------------------------------------------------
51 44 # Flags and Aliases
52 45 #-----------------------------------------------------------------------------
53 46
54 47 kernel_aliases = dict(base_aliases)
55 48 kernel_aliases.update({
56 49 'ip' : 'IPKernelApp.ip',
57 50 'hb' : 'IPKernelApp.hb_port',
58 51 'shell' : 'IPKernelApp.shell_port',
59 52 'iopub' : 'IPKernelApp.iopub_port',
60 53 'stdin' : 'IPKernelApp.stdin_port',
61 54 'control' : 'IPKernelApp.control_port',
62 55 'f' : 'IPKernelApp.connection_file',
63 56 'parent': 'IPKernelApp.parent_handle',
64 57 'transport': 'IPKernelApp.transport',
65 58 })
66 59 if sys.platform.startswith('win'):
67 60 kernel_aliases['interrupt'] = 'IPKernelApp.interrupt'
68 61
69 62 kernel_flags = dict(base_flags)
70 63 kernel_flags.update({
71 64 'no-stdout' : (
72 65 {'IPKernelApp' : {'no_stdout' : True}},
73 66 "redirect stdout to the null device"),
74 67 'no-stderr' : (
75 68 {'IPKernelApp' : {'no_stderr' : True}},
76 69 "redirect stderr to the null device"),
77 70 'pylab' : (
78 71 {'IPKernelApp' : {'pylab' : 'auto'}},
79 72 """Pre-load matplotlib and numpy for interactive use with
80 73 the default matplotlib backend."""),
81 74 })
82 75
83 76 # inherit flags&aliases for any IPython shell apps
84 77 kernel_aliases.update(shell_aliases)
85 78 kernel_flags.update(shell_flags)
86 79
87 80 # inherit flags&aliases for Sessions
88 81 kernel_aliases.update(session_aliases)
89 82 kernel_flags.update(session_flags)
90 83
91 84 _ctrl_c_message = """\
92 85 NOTE: When using the `ipython kernel` entry point, Ctrl-C will not work.
93 86
94 87 To exit, you will have to explicitly quit this process, by either sending
95 88 "quit" from a client, or using Ctrl-\\ in UNIX-like environments.
96 89
97 90 To read more about this, see https://github.com/ipython/ipython/issues/2049
98 91
99 92 """
100 93
101 94 #-----------------------------------------------------------------------------
102 95 # Application class for starting an IPython Kernel
103 96 #-----------------------------------------------------------------------------
104 97
105 98 class IPKernelApp(BaseIPythonApplication, InteractiveShellApp,
106 99 ConnectionFileMixin):
107 100 name='ipkernel'
108 101 aliases = Dict(kernel_aliases)
109 102 flags = Dict(kernel_flags)
110 103 classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
111 104 # the kernel class, as an importstring
112 105 kernel_class = DottedObjectName('IPython.kernel.zmq.ipkernel.Kernel', config=True,
113 106 help="""The Kernel subclass to be used.
114 107
115 108 This should allow easy re-use of the IPKernelApp entry point
116 109 to configure and launch kernels other than IPython's own.
117 110 """)
118 111 kernel = Any()
119 112 poller = Any() # don't restrict this even though current pollers are all Threads
120 113 heartbeat = Instance(Heartbeat)
121 114 session = Instance('IPython.kernel.zmq.session.Session')
122 115 ports = Dict()
123 116
124 117 # ipkernel doesn't get its own config file
125 118 def _config_file_name_default(self):
126 119 return 'ipython_config.py'
127 120
128 121 # inherit config file name from parent:
129 122 parent_appname = Unicode(config=True)
130 123 def _parent_appname_changed(self, name, old, new):
131 124 if self.config_file_specified:
132 125 # it was manually specified, ignore
133 126 return
134 127 self.config_file_name = new.replace('-','_') + u'_config.py'
135 128 # don't let this count as specifying the config file
136 129 self.config_file_specified.remove(self.config_file_name)
137 130
138 131 # connection info:
139 132
140 133 @property
141 134 def abs_connection_file(self):
142 135 if os.path.basename(self.connection_file) == self.connection_file:
143 136 return os.path.join(self.profile_dir.security_dir, self.connection_file)
144 137 else:
145 138 return self.connection_file
146 139
147 140
148 141 # streams, etc.
149 142 no_stdout = Bool(False, config=True, help="redirect stdout to the null device")
150 143 no_stderr = Bool(False, config=True, help="redirect stderr to the null device")
151 144 outstream_class = DottedObjectName('IPython.kernel.zmq.iostream.OutStream',
152 145 config=True, help="The importstring for the OutStream factory")
153 146 displayhook_class = DottedObjectName('IPython.kernel.zmq.displayhook.ZMQDisplayHook',
154 147 config=True, help="The importstring for the DisplayHook factory")
155 148
156 149 # polling
157 150 parent_handle = Integer(0, config=True,
158 151 help="""kill this process if its parent dies. On Windows, the argument
159 152 specifies the HANDLE of the parent process, otherwise it is simply boolean.
160 153 """)
161 154 interrupt = Integer(0, config=True,
162 155 help="""ONLY USED ON WINDOWS
163 156 Interrupt this process when the parent is signaled.
164 157 """)
165 158
166 159 def init_crash_handler(self):
167 160 # Install minimal exception handling
168 161 sys.excepthook = FormattedTB(mode='Verbose', color_scheme='NoColor',
169 162 ostream=sys.__stdout__)
170 163
171 164 def init_poller(self):
172 165 if sys.platform == 'win32':
173 166 if self.interrupt or self.parent_handle:
174 167 self.poller = ParentPollerWindows(self.interrupt, self.parent_handle)
175 168 elif self.parent_handle:
176 169 self.poller = ParentPollerUnix()
177 170
178 171 def _bind_socket(self, s, port):
179 172 iface = '%s://%s' % (self.transport, self.ip)
180 173 if self.transport == 'tcp':
181 174 if port <= 0:
182 175 port = s.bind_to_random_port(iface)
183 176 else:
184 177 s.bind("tcp://%s:%i" % (self.ip, port))
185 178 elif self.transport == 'ipc':
186 179 if port <= 0:
187 180 port = 1
188 181 path = "%s-%i" % (self.ip, port)
189 182 while os.path.exists(path):
190 183 port = port + 1
191 184 path = "%s-%i" % (self.ip, port)
192 185 else:
193 186 path = "%s-%i" % (self.ip, port)
194 187 s.bind("ipc://%s" % path)
195 188 return port
196 189
197 190 def write_connection_file(self):
198 191 """write connection info to JSON file"""
199 192 cf = self.abs_connection_file
200 193 self.log.debug("Writing connection file: %s", cf)
201 194 write_connection_file(cf, ip=self.ip, key=self.session.key, transport=self.transport,
202 195 shell_port=self.shell_port, stdin_port=self.stdin_port, hb_port=self.hb_port,
203 196 iopub_port=self.iopub_port, control_port=self.control_port)
204 197
205 198 def cleanup_connection_file(self):
206 199 cf = self.abs_connection_file
207 200 self.log.debug("Cleaning up connection file: %s", cf)
208 201 try:
209 202 os.remove(cf)
210 203 except (IOError, OSError):
211 204 pass
212 205
213 206 self.cleanup_ipc_files()
214 207
215 208 def init_connection_file(self):
216 209 if not self.connection_file:
217 210 self.connection_file = "kernel-%s.json"%os.getpid()
218 211 try:
219 212 self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir])
220 213 except IOError:
221 214 self.log.debug("Connection file not found: %s", self.connection_file)
222 215 # This means I own it, so I will clean it up:
223 216 atexit.register(self.cleanup_connection_file)
224 217 return
225 218 try:
226 219 self.load_connection_file()
227 220 except Exception:
228 221 self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True)
229 222 self.exit(1)
230 223
231 224 def init_sockets(self):
232 225 # Create a context, a session, and the kernel sockets.
233 226 self.log.info("Starting the kernel at pid: %i", os.getpid())
234 227 context = zmq.Context.instance()
235 228 # Uncomment this to try closing the context.
236 229 # atexit.register(context.term)
237 230
238 231 self.shell_socket = context.socket(zmq.ROUTER)
239 232 self.shell_socket.linger = 1000
240 233 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port)
241 234 self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port)
242 235
243 236 self.iopub_socket = context.socket(zmq.PUB)
244 237 self.iopub_socket.linger = 1000
245 238 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port)
246 239 self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port)
247 240
248 241 self.stdin_socket = context.socket(zmq.ROUTER)
249 242 self.stdin_socket.linger = 1000
250 243 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port)
251 244 self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port)
252 245
253 246 self.control_socket = context.socket(zmq.ROUTER)
254 247 self.control_socket.linger = 1000
255 248 self.control_port = self._bind_socket(self.control_socket, self.control_port)
256 249 self.log.debug("control ROUTER Channel on port: %i" % self.control_port)
257 250
258 251 def init_heartbeat(self):
259 252 """start the heart beating"""
260 253 # heartbeat doesn't share context, because it mustn't be blocked
261 254 # by the GIL, which is accessed by libzmq when freeing zero-copy messages
262 255 hb_ctx = zmq.Context()
263 256 self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port))
264 257 self.hb_port = self.heartbeat.port
265 258 self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port)
266 259 self.heartbeat.start()
267 260
268 261 def log_connection_info(self):
269 262 """display connection info, and store ports"""
270 263 basename = os.path.basename(self.connection_file)
271 264 if basename == self.connection_file or \
272 265 os.path.dirname(self.connection_file) == self.profile_dir.security_dir:
273 266 # use shortname
274 267 tail = basename
275 268 if self.profile != 'default':
276 269 tail += " --profile %s" % self.profile
277 270 else:
278 271 tail = self.connection_file
279 272 lines = [
280 273 "To connect another client to this kernel, use:",
281 274 " --existing %s" % tail,
282 275 ]
283 276 # log connection info
284 277 # info-level, so often not shown.
285 278 # frontends should use the %connect_info magic
286 279 # to see the connection info
287 280 for line in lines:
288 281 self.log.info(line)
289 282 # also raw print to the terminal if no parent_handle (`ipython kernel`)
290 283 if not self.parent_handle:
291 284 io.rprint(_ctrl_c_message)
292 285 for line in lines:
293 286 io.rprint(line)
294 287
295 288 self.ports = dict(shell=self.shell_port, iopub=self.iopub_port,
296 289 stdin=self.stdin_port, hb=self.hb_port,
297 290 control=self.control_port)
298 291
299 292 def init_session(self):
300 293 """create our session object"""
301 294 default_secure(self.config)
302 295 self.session = Session(parent=self, username=u'kernel')
303 296
304 297 def init_blackhole(self):
305 298 """redirects stdout/stderr to devnull if necessary"""
306 299 if self.no_stdout or self.no_stderr:
307 300 blackhole = open(os.devnull, 'w')
308 301 if self.no_stdout:
309 302 sys.stdout = sys.__stdout__ = blackhole
310 303 if self.no_stderr:
311 304 sys.stderr = sys.__stderr__ = blackhole
312 305
313 306 def init_io(self):
314 307 """Redirect input streams and set a display hook."""
315 308 if self.outstream_class:
316 309 outstream_factory = import_item(str(self.outstream_class))
317 310 sys.stdout = outstream_factory(self.session, self.iopub_socket, u'stdout')
318 311 sys.stderr = outstream_factory(self.session, self.iopub_socket, u'stderr')
319 312 if self.displayhook_class:
320 313 displayhook_factory = import_item(str(self.displayhook_class))
321 314 sys.displayhook = displayhook_factory(self.session, self.iopub_socket)
322 315
323 316 def init_signal(self):
324 317 signal.signal(signal.SIGINT, signal.SIG_IGN)
325 318
326 319 def init_kernel(self):
327 320 """Create the Kernel object itself"""
328 321 shell_stream = ZMQStream(self.shell_socket)
329 322 control_stream = ZMQStream(self.control_socket)
330 323
331 324 kernel_factory = import_item(str(self.kernel_class))
332 325
333 326 kernel = kernel_factory(parent=self, session=self.session,
334 327 shell_streams=[shell_stream, control_stream],
335 328 iopub_socket=self.iopub_socket,
336 329 stdin_socket=self.stdin_socket,
337 330 log=self.log,
338 331 profile_dir=self.profile_dir,
339 332 user_ns=self.user_ns,
340 333 )
341 334 kernel.record_ports(self.ports)
342 335 self.kernel = kernel
343 336
344 337 def init_gui_pylab(self):
345 338 """Enable GUI event loop integration, taking pylab into account."""
346 339
347 340 # Provide a wrapper for :meth:`InteractiveShellApp.init_gui_pylab`
348 341 # to ensure that any exception is printed straight to stderr.
349 342 # Normally _showtraceback associates the reply with an execution,
350 343 # which means frontends will never draw it, as this exception
351 344 # is not associated with any execute request.
352 345
353 346 shell = self.shell
354 347 _showtraceback = shell._showtraceback
355 348 try:
356 # replace pyerr-sending traceback with stderr
349 # replace error-sending traceback with stderr
357 350 def print_tb(etype, evalue, stb):
358 351 print ("GUI event loop or pylab initialization failed",
359 352 file=io.stderr)
360 353 print (shell.InteractiveTB.stb2text(stb), file=io.stderr)
361 354 shell._showtraceback = print_tb
362 355 InteractiveShellApp.init_gui_pylab(self)
363 356 finally:
364 357 shell._showtraceback = _showtraceback
365 358
366 359 def init_shell(self):
367 360 self.shell = self.kernel.shell
368 361 self.shell.configurables.append(self)
369 362
370 363 @catch_config_error
371 364 def initialize(self, argv=None):
372 365 super(IPKernelApp, self).initialize(argv)
373 366 self.init_blackhole()
374 367 self.init_connection_file()
375 368 self.init_session()
376 369 self.init_poller()
377 370 self.init_sockets()
378 371 self.init_heartbeat()
379 372 # writing/displaying connection info must be *after* init_sockets/heartbeat
380 373 self.log_connection_info()
381 374 self.write_connection_file()
382 375 self.init_io()
383 376 self.init_signal()
384 377 self.init_kernel()
385 378 # shell init steps
386 379 self.init_path()
387 380 self.init_shell()
388 381 self.init_gui_pylab()
389 382 self.init_extensions()
390 383 self.init_code()
391 384 # flush stdout/stderr, so that anything written to these streams during
392 385 # initialization do not get associated with the first execution request
393 386 sys.stdout.flush()
394 387 sys.stderr.flush()
395 388
396 389 def start(self):
397 390 if self.poller is not None:
398 391 self.poller.start()
399 392 self.kernel.start()
400 393 try:
401 394 ioloop.IOLoop.instance().start()
402 395 except KeyboardInterrupt:
403 396 pass
404 397
405 398 launch_new_instance = IPKernelApp.launch_instance
406 399
407 400 def main():
408 401 """Run an IPKernel as an application"""
409 402 app = IPKernelApp.instance()
410 403 app.initialize()
411 404 app.start()
412 405
413 406
414 407 if __name__ == '__main__':
415 408 main()
@@ -1,569 +1,569 b''
1 1 """A ZMQ-based subclass of InteractiveShell.
2 2
3 3 This code is meant to ease the refactoring of the base InteractiveShell into
4 4 something with a cleaner architecture for 2-process use, without actually
5 5 breaking InteractiveShell itself. So we're doing something a bit ugly, where
6 6 we subclass and override what we want to fix. Once this is working well, we
7 7 can go back to the base class and refactor the code for a cleaner inheritance
8 8 implementation that doesn't rely on so much monkeypatching.
9 9
10 10 But this lets us maintain a fully working IPython as we develop the new
11 11 machinery. This should thus be thought of as scaffolding.
12 12 """
13 13
14 14 # Copyright (c) IPython Development Team.
15 15 # Distributed under the terms of the Modified BSD License.
16 16
17 17 from __future__ import print_function
18 18
19 19 import os
20 20 import sys
21 21 import time
22 22
23 23 from zmq.eventloop import ioloop
24 24
25 25 from IPython.core.interactiveshell import (
26 26 InteractiveShell, InteractiveShellABC
27 27 )
28 28 from IPython.core import page
29 29 from IPython.core.autocall import ZMQExitAutocall
30 30 from IPython.core.displaypub import DisplayPublisher
31 31 from IPython.core.error import UsageError
32 32 from IPython.core.magics import MacroToEdit, CodeMagics
33 33 from IPython.core.magic import magics_class, line_magic, Magics
34 34 from IPython.core.payloadpage import install_payload_page
35 35 from IPython.display import display, Javascript
36 36 from IPython.kernel.inprocess.socket import SocketABC
37 37 from IPython.kernel import (
38 38 get_connection_file, get_connection_info, connect_qtconsole
39 39 )
40 40 from IPython.testing.skipdoctest import skip_doctest
41 41 from IPython.utils import openpy
42 42 from IPython.utils.jsonutil import json_clean, encode_images
43 43 from IPython.utils.process import arg_split
44 44 from IPython.utils import py3compat
45 45 from IPython.utils.py3compat import unicode_type
46 46 from IPython.utils.traitlets import Instance, Type, Dict, CBool, CBytes, Any
47 47 from IPython.utils.warn import error
48 48 from IPython.kernel.zmq.displayhook import ZMQShellDisplayHook
49 49 from IPython.kernel.zmq.datapub import ZMQDataPublisher
50 50 from IPython.kernel.zmq.session import extract_header
51 51 from IPython.kernel.comm import CommManager
52 52 from .session import Session
53 53
54 54 #-----------------------------------------------------------------------------
55 55 # Functions and classes
56 56 #-----------------------------------------------------------------------------
57 57
58 58 class ZMQDisplayPublisher(DisplayPublisher):
59 59 """A display publisher that publishes data using a ZeroMQ PUB socket."""
60 60
61 61 session = Instance(Session)
62 62 pub_socket = Instance(SocketABC)
63 63 parent_header = Dict({})
64 64 topic = CBytes(b'display_data')
65 65
66 66 def set_parent(self, parent):
67 67 """Set the parent for outbound messages."""
68 68 self.parent_header = extract_header(parent)
69 69
70 70 def _flush_streams(self):
71 71 """flush IO Streams prior to display"""
72 72 sys.stdout.flush()
73 73 sys.stderr.flush()
74 74
75 75 def publish(self, source, data, metadata=None):
76 76 self._flush_streams()
77 77 if metadata is None:
78 78 metadata = {}
79 79 self._validate_data(source, data, metadata)
80 80 content = {}
81 81 content['source'] = source
82 82 content['data'] = encode_images(data)
83 83 content['metadata'] = metadata
84 84 self.session.send(
85 85 self.pub_socket, u'display_data', json_clean(content),
86 86 parent=self.parent_header, ident=self.topic,
87 87 )
88 88
89 89 def clear_output(self, wait=False):
90 90 content = dict(wait=wait)
91 91 self._flush_streams()
92 92 self.session.send(
93 93 self.pub_socket, u'clear_output', content,
94 94 parent=self.parent_header, ident=self.topic,
95 95 )
96 96
97 97 @magics_class
98 98 class KernelMagics(Magics):
99 99 #------------------------------------------------------------------------
100 100 # Magic overrides
101 101 #------------------------------------------------------------------------
102 102 # Once the base class stops inheriting from magic, this code needs to be
103 103 # moved into a separate machinery as well. For now, at least isolate here
104 104 # the magics which this class needs to implement differently from the base
105 105 # class, or that are unique to it.
106 106
107 107 @line_magic
108 108 def doctest_mode(self, parameter_s=''):
109 109 """Toggle doctest mode on and off.
110 110
111 111 This mode is intended to make IPython behave as much as possible like a
112 112 plain Python shell, from the perspective of how its prompts, exceptions
113 113 and output look. This makes it easy to copy and paste parts of a
114 114 session into doctests. It does so by:
115 115
116 116 - Changing the prompts to the classic ``>>>`` ones.
117 117 - Changing the exception reporting mode to 'Plain'.
118 118 - Disabling pretty-printing of output.
119 119
120 120 Note that IPython also supports the pasting of code snippets that have
121 121 leading '>>>' and '...' prompts in them. This means that you can paste
122 122 doctests from files or docstrings (even if they have leading
123 123 whitespace), and the code will execute correctly. You can then use
124 124 '%history -t' to see the translated history; this will give you the
125 125 input after removal of all the leading prompts and whitespace, which
126 126 can be pasted back into an editor.
127 127
128 128 With these features, you can switch into this mode easily whenever you
129 129 need to do testing and changes to doctests, without having to leave
130 130 your existing IPython session.
131 131 """
132 132
133 133 from IPython.utils.ipstruct import Struct
134 134
135 135 # Shorthands
136 136 shell = self.shell
137 137 disp_formatter = self.shell.display_formatter
138 138 ptformatter = disp_formatter.formatters['text/plain']
139 139 # dstore is a data store kept in the instance metadata bag to track any
140 140 # changes we make, so we can undo them later.
141 141 dstore = shell.meta.setdefault('doctest_mode', Struct())
142 142 save_dstore = dstore.setdefault
143 143
144 144 # save a few values we'll need to recover later
145 145 mode = save_dstore('mode', False)
146 146 save_dstore('rc_pprint', ptformatter.pprint)
147 147 save_dstore('rc_active_types',disp_formatter.active_types)
148 148 save_dstore('xmode', shell.InteractiveTB.mode)
149 149
150 150 if mode == False:
151 151 # turn on
152 152 ptformatter.pprint = False
153 153 disp_formatter.active_types = ['text/plain']
154 154 shell.magic('xmode Plain')
155 155 else:
156 156 # turn off
157 157 ptformatter.pprint = dstore.rc_pprint
158 158 disp_formatter.active_types = dstore.rc_active_types
159 159 shell.magic("xmode " + dstore.xmode)
160 160
161 161 # Store new mode and inform on console
162 162 dstore.mode = bool(1-int(mode))
163 163 mode_label = ['OFF','ON'][dstore.mode]
164 164 print('Doctest mode is:', mode_label)
165 165
166 166 # Send the payload back so that clients can modify their prompt display
167 167 payload = dict(
168 168 source='doctest_mode',
169 169 mode=dstore.mode)
170 170 shell.payload_manager.write_payload(payload)
171 171
172 172
173 173 _find_edit_target = CodeMagics._find_edit_target
174 174
175 175 @skip_doctest
176 176 @line_magic
177 177 def edit(self, parameter_s='', last_call=['','']):
178 178 """Bring up an editor and execute the resulting code.
179 179
180 180 Usage:
181 181 %edit [options] [args]
182 182
183 183 %edit runs an external text editor. You will need to set the command for
184 184 this editor via the ``TerminalInteractiveShell.editor`` option in your
185 185 configuration file before it will work.
186 186
187 187 This command allows you to conveniently edit multi-line code right in
188 188 your IPython session.
189 189
190 190 If called without arguments, %edit opens up an empty editor with a
191 191 temporary file and will execute the contents of this file when you
192 192 close it (don't forget to save it!).
193 193
194 194 Options:
195 195
196 196 -n <number>
197 197 Open the editor at a specified line number. By default, the IPython
198 198 editor hook uses the unix syntax 'editor +N filename', but you can
199 199 configure this by providing your own modified hook if your favorite
200 200 editor supports line-number specifications with a different syntax.
201 201
202 202 -p
203 203 Call the editor with the same data as the previous time it was used,
204 204 regardless of how long ago (in your current session) it was.
205 205
206 206 -r
207 207 Use 'raw' input. This option only applies to input taken from the
208 208 user's history. By default, the 'processed' history is used, so that
209 209 magics are loaded in their transformed version to valid Python. If
210 210 this option is given, the raw input as typed as the command line is
211 211 used instead. When you exit the editor, it will be executed by
212 212 IPython's own processor.
213 213
214 214 Arguments:
215 215
216 216 If arguments are given, the following possibilites exist:
217 217
218 218 - The arguments are numbers or pairs of colon-separated numbers (like
219 219 1 4:8 9). These are interpreted as lines of previous input to be
220 220 loaded into the editor. The syntax is the same of the %macro command.
221 221
222 222 - If the argument doesn't start with a number, it is evaluated as a
223 223 variable and its contents loaded into the editor. You can thus edit
224 224 any string which contains python code (including the result of
225 225 previous edits).
226 226
227 227 - If the argument is the name of an object (other than a string),
228 228 IPython will try to locate the file where it was defined and open the
229 229 editor at the point where it is defined. You can use ``%edit function``
230 230 to load an editor exactly at the point where 'function' is defined,
231 231 edit it and have the file be executed automatically.
232 232
233 233 If the object is a macro (see %macro for details), this opens up your
234 234 specified editor with a temporary file containing the macro's data.
235 235 Upon exit, the macro is reloaded with the contents of the file.
236 236
237 237 Note: opening at an exact line is only supported under Unix, and some
238 238 editors (like kedit and gedit up to Gnome 2.8) do not understand the
239 239 '+NUMBER' parameter necessary for this feature. Good editors like
240 240 (X)Emacs, vi, jed, pico and joe all do.
241 241
242 242 - If the argument is not found as a variable, IPython will look for a
243 243 file with that name (adding .py if necessary) and load it into the
244 244 editor. It will execute its contents with execfile() when you exit,
245 245 loading any code in the file into your interactive namespace.
246 246
247 247 Unlike in the terminal, this is designed to use a GUI editor, and we do
248 248 not know when it has closed. So the file you edit will not be
249 249 automatically executed or printed.
250 250
251 251 Note that %edit is also available through the alias %ed.
252 252 """
253 253
254 254 opts,args = self.parse_options(parameter_s,'prn:')
255 255
256 256 try:
257 257 filename, lineno, _ = CodeMagics._find_edit_target(self.shell, args, opts, last_call)
258 258 except MacroToEdit as e:
259 259 # TODO: Implement macro editing over 2 processes.
260 260 print("Macro editing not yet implemented in 2-process model.")
261 261 return
262 262
263 263 # Make sure we send to the client an absolute path, in case the working
264 264 # directory of client and kernel don't match
265 265 filename = os.path.abspath(filename)
266 266
267 267 payload = {
268 268 'source' : 'edit_magic',
269 269 'filename' : filename,
270 270 'line_number' : lineno
271 271 }
272 272 self.shell.payload_manager.write_payload(payload)
273 273
274 274 # A few magics that are adapted to the specifics of using pexpect and a
275 275 # remote terminal
276 276
277 277 @line_magic
278 278 def clear(self, arg_s):
279 279 """Clear the terminal."""
280 280 if os.name == 'posix':
281 281 self.shell.system("clear")
282 282 else:
283 283 self.shell.system("cls")
284 284
285 285 if os.name == 'nt':
286 286 # This is the usual name in windows
287 287 cls = line_magic('cls')(clear)
288 288
289 289 # Terminal pagers won't work over pexpect, but we do have our own pager
290 290
291 291 @line_magic
292 292 def less(self, arg_s):
293 293 """Show a file through the pager.
294 294
295 295 Files ending in .py are syntax-highlighted."""
296 296 if not arg_s:
297 297 raise UsageError('Missing filename.')
298 298
299 299 cont = open(arg_s).read()
300 300 if arg_s.endswith('.py'):
301 301 cont = self.shell.pycolorize(openpy.read_py_file(arg_s, skip_encoding_cookie=False))
302 302 else:
303 303 cont = open(arg_s).read()
304 304 page.page(cont)
305 305
306 306 more = line_magic('more')(less)
307 307
308 308 # Man calls a pager, so we also need to redefine it
309 309 if os.name == 'posix':
310 310 @line_magic
311 311 def man(self, arg_s):
312 312 """Find the man page for the given command and display in pager."""
313 313 page.page(self.shell.getoutput('man %s | col -b' % arg_s,
314 314 split=False))
315 315
316 316 @line_magic
317 317 def connect_info(self, arg_s):
318 318 """Print information for connecting other clients to this kernel
319 319
320 320 It will print the contents of this session's connection file, as well as
321 321 shortcuts for local clients.
322 322
323 323 In the simplest case, when called from the most recently launched kernel,
324 324 secondary clients can be connected, simply with:
325 325
326 326 $> ipython <app> --existing
327 327
328 328 """
329 329
330 330 from IPython.core.application import BaseIPythonApplication as BaseIPApp
331 331
332 332 if BaseIPApp.initialized():
333 333 app = BaseIPApp.instance()
334 334 security_dir = app.profile_dir.security_dir
335 335 profile = app.profile
336 336 else:
337 337 profile = 'default'
338 338 security_dir = ''
339 339
340 340 try:
341 341 connection_file = get_connection_file()
342 342 info = get_connection_info(unpack=False)
343 343 except Exception as e:
344 344 error("Could not get connection info: %r" % e)
345 345 return
346 346
347 347 # add profile flag for non-default profile
348 348 profile_flag = "--profile %s" % profile if profile != 'default' else ""
349 349
350 350 # if it's in the security dir, truncate to basename
351 351 if security_dir == os.path.dirname(connection_file):
352 352 connection_file = os.path.basename(connection_file)
353 353
354 354
355 355 print (info + '\n')
356 356 print ("Paste the above JSON into a file, and connect with:\n"
357 357 " $> ipython <app> --existing <file>\n"
358 358 "or, if you are local, you can connect with just:\n"
359 359 " $> ipython <app> --existing {0} {1}\n"
360 360 "or even just:\n"
361 361 " $> ipython <app> --existing {1}\n"
362 362 "if this is the most recent IPython session you have started.".format(
363 363 connection_file, profile_flag
364 364 )
365 365 )
366 366
367 367 @line_magic
368 368 def qtconsole(self, arg_s):
369 369 """Open a qtconsole connected to this kernel.
370 370
371 371 Useful for connecting a qtconsole to running notebooks, for better
372 372 debugging.
373 373 """
374 374
375 375 # %qtconsole should imply bind_kernel for engines:
376 376 try:
377 377 from IPython.parallel import bind_kernel
378 378 except ImportError:
379 379 # technically possible, because parallel has higher pyzmq min-version
380 380 pass
381 381 else:
382 382 bind_kernel()
383 383
384 384 try:
385 385 p = connect_qtconsole(argv=arg_split(arg_s, os.name=='posix'))
386 386 except Exception as e:
387 387 error("Could not start qtconsole: %r" % e)
388 388 return
389 389
390 390 @line_magic
391 391 def autosave(self, arg_s):
392 392 """Set the autosave interval in the notebook (in seconds).
393 393
394 394 The default value is 120, or two minutes.
395 395 ``%autosave 0`` will disable autosave.
396 396
397 397 This magic only has an effect when called from the notebook interface.
398 398 It has no effect when called in a startup file.
399 399 """
400 400
401 401 try:
402 402 interval = int(arg_s)
403 403 except ValueError:
404 404 raise UsageError("%%autosave requires an integer, got %r" % arg_s)
405 405
406 406 # javascript wants milliseconds
407 407 milliseconds = 1000 * interval
408 408 display(Javascript("IPython.notebook.set_autosave_interval(%i)" % milliseconds),
409 409 include=['application/javascript']
410 410 )
411 411 if interval:
412 412 print("Autosaving every %i seconds" % interval)
413 413 else:
414 414 print("Autosave disabled")
415 415
416 416
417 417 class ZMQInteractiveShell(InteractiveShell):
418 418 """A subclass of InteractiveShell for ZMQ."""
419 419
420 420 displayhook_class = Type(ZMQShellDisplayHook)
421 421 display_pub_class = Type(ZMQDisplayPublisher)
422 422 data_pub_class = Type(ZMQDataPublisher)
423 423 kernel = Any()
424 424 parent_header = Any()
425 425
426 426 # Override the traitlet in the parent class, because there's no point using
427 427 # readline for the kernel. Can be removed when the readline code is moved
428 428 # to the terminal frontend.
429 429 colors_force = CBool(True)
430 430 readline_use = CBool(False)
431 431 # autoindent has no meaning in a zmqshell, and attempting to enable it
432 432 # will print a warning in the absence of readline.
433 433 autoindent = CBool(False)
434 434
435 435 exiter = Instance(ZMQExitAutocall)
436 436 def _exiter_default(self):
437 437 return ZMQExitAutocall(self)
438 438
439 439 def _exit_now_changed(self, name, old, new):
440 440 """stop eventloop when exit_now fires"""
441 441 if new:
442 442 loop = ioloop.IOLoop.instance()
443 443 loop.add_timeout(time.time()+0.1, loop.stop)
444 444
445 445 keepkernel_on_exit = None
446 446
447 447 # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no
448 448 # interactive input being read; we provide event loop support in ipkernel
449 449 @staticmethod
450 450 def enable_gui(gui):
451 451 from .eventloops import enable_gui as real_enable_gui
452 452 try:
453 453 real_enable_gui(gui)
454 454 except ValueError as e:
455 455 raise UsageError("%s" % e)
456 456
457 457 def init_environment(self):
458 458 """Configure the user's environment.
459 459
460 460 """
461 461 env = os.environ
462 462 # These two ensure 'ls' produces nice coloring on BSD-derived systems
463 463 env['TERM'] = 'xterm-color'
464 464 env['CLICOLOR'] = '1'
465 465 # Since normal pagers don't work at all (over pexpect we don't have
466 466 # single-key control of the subprocess), try to disable paging in
467 467 # subprocesses as much as possible.
468 468 env['PAGER'] = 'cat'
469 469 env['GIT_PAGER'] = 'cat'
470 470
471 471 # And install the payload version of page.
472 472 install_payload_page()
473 473
474 474 def auto_rewrite_input(self, cmd):
475 475 """Called to show the auto-rewritten input for autocall and friends.
476 476
477 477 FIXME: this payload is currently not correctly processed by the
478 478 frontend.
479 479 """
480 480 new = self.prompt_manager.render('rewrite') + cmd
481 481 payload = dict(
482 482 source='auto_rewrite_input',
483 483 transformed_input=new,
484 484 )
485 485 self.payload_manager.write_payload(payload)
486 486
487 487 def ask_exit(self):
488 488 """Engage the exit actions."""
489 489 self.exit_now = True
490 490 payload = dict(
491 491 source='ask_exit',
492 492 exit=True,
493 493 keepkernel=self.keepkernel_on_exit,
494 494 )
495 495 self.payload_manager.write_payload(payload)
496 496
497 497 def _showtraceback(self, etype, evalue, stb):
498 498 # try to preserve ordering of tracebacks and print statements
499 499 sys.stdout.flush()
500 500 sys.stderr.flush()
501 501
502 502 exc_content = {
503 503 u'traceback' : stb,
504 504 u'ename' : unicode_type(etype.__name__),
505 505 u'evalue' : py3compat.safe_unicode(evalue),
506 506 }
507 507
508 508 dh = self.displayhook
509 509 # Send exception info over pub socket for other clients than the caller
510 510 # to pick up
511 511 topic = None
512 512 if dh.topic:
513 topic = dh.topic.replace(b'execute_result', b'pyerr')
513 topic = dh.topic.replace(b'execute_result', b'error')
514 514
515 exc_msg = dh.session.send(dh.pub_socket, u'pyerr', json_clean(exc_content), dh.parent_header, ident=topic)
515 exc_msg = dh.session.send(dh.pub_socket, u'error', json_clean(exc_content), dh.parent_header, ident=topic)
516 516
517 517 # FIXME - Hack: store exception info in shell object. Right now, the
518 518 # caller is reading this info after the fact, we need to fix this logic
519 519 # to remove this hack. Even uglier, we need to store the error status
520 520 # here, because in the main loop, the logic that sets it is being
521 521 # skipped because runlines swallows the exceptions.
522 522 exc_content[u'status'] = u'error'
523 523 self._reply_content = exc_content
524 524 # /FIXME
525 525
526 526 return exc_content
527 527
528 528 def set_next_input(self, text):
529 529 """Send the specified text to the frontend to be presented at the next
530 530 input cell."""
531 531 payload = dict(
532 532 source='set_next_input',
533 533 text=text
534 534 )
535 535 self.payload_manager.write_payload(payload)
536 536
537 537 def set_parent(self, parent):
538 538 """Set the parent header for associating output with its triggering input"""
539 539 self.parent_header = parent
540 540 self.displayhook.set_parent(parent)
541 541 self.display_pub.set_parent(parent)
542 542 self.data_pub.set_parent(parent)
543 543 try:
544 544 sys.stdout.set_parent(parent)
545 545 except AttributeError:
546 546 pass
547 547 try:
548 548 sys.stderr.set_parent(parent)
549 549 except AttributeError:
550 550 pass
551 551
552 552 def get_parent(self):
553 553 return self.parent_header
554 554
555 555 #-------------------------------------------------------------------------
556 556 # Things related to magics
557 557 #-------------------------------------------------------------------------
558 558
559 559 def init_magics(self):
560 560 super(ZMQInteractiveShell, self).init_magics()
561 561 self.register_magics(KernelMagics)
562 562 self.magics_manager.register_alias('ed', 'edit')
563 563
564 564 def init_comms(self):
565 565 self.comm_manager = CommManager(shell=self, parent=self)
566 566 self.configurables.append(self.comm_manager)
567 567
568 568
569 569 InteractiveShellABC.register(ZMQInteractiveShell)
@@ -1,1863 +1,1863 b''
1 1 """A semi-synchronous Client for IPython parallel"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from __future__ import print_function
7 7
8 8 import os
9 9 import json
10 10 import sys
11 11 from threading import Thread, Event
12 12 import time
13 13 import warnings
14 14 from datetime import datetime
15 15 from getpass import getpass
16 16 from pprint import pprint
17 17
18 18 pjoin = os.path.join
19 19
20 20 import zmq
21 21
22 22 from IPython.config.configurable import MultipleInstanceError
23 23 from IPython.core.application import BaseIPythonApplication
24 24 from IPython.core.profiledir import ProfileDir, ProfileDirError
25 25
26 26 from IPython.utils.capture import RichOutput
27 27 from IPython.utils.coloransi import TermColors
28 28 from IPython.utils.jsonutil import rekey, extract_dates, parse_date
29 29 from IPython.utils.localinterfaces import localhost, is_local_ip
30 30 from IPython.utils.path import get_ipython_dir
31 31 from IPython.utils.py3compat import cast_bytes, string_types, xrange, iteritems
32 32 from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode,
33 33 Dict, List, Bool, Set, Any)
34 34 from IPython.external.decorator import decorator
35 35 from IPython.external.ssh import tunnel
36 36
37 37 from IPython.parallel import Reference
38 38 from IPython.parallel import error
39 39 from IPython.parallel import util
40 40
41 41 from IPython.kernel.zmq.session import Session, Message
42 42 from IPython.kernel.zmq import serialize
43 43
44 44 from .asyncresult import AsyncResult, AsyncHubResult
45 45 from .view import DirectView, LoadBalancedView
46 46
47 47 #--------------------------------------------------------------------------
48 48 # Decorators for Client methods
49 49 #--------------------------------------------------------------------------
50 50
51 51 @decorator
52 52 def spin_first(f, self, *args, **kwargs):
53 53 """Call spin() to sync state prior to calling the method."""
54 54 self.spin()
55 55 return f(self, *args, **kwargs)
56 56
57 57
58 58 #--------------------------------------------------------------------------
59 59 # Classes
60 60 #--------------------------------------------------------------------------
61 61
62 62
63 63 class ExecuteReply(RichOutput):
64 64 """wrapper for finished Execute results"""
65 65 def __init__(self, msg_id, content, metadata):
66 66 self.msg_id = msg_id
67 67 self._content = content
68 68 self.execution_count = content['execution_count']
69 69 self.metadata = metadata
70 70
71 71 # RichOutput overrides
72 72
73 73 @property
74 74 def source(self):
75 75 execute_result = self.metadata['execute_result']
76 76 if execute_result:
77 77 return execute_result.get('source', '')
78 78
79 79 @property
80 80 def data(self):
81 81 execute_result = self.metadata['execute_result']
82 82 if execute_result:
83 83 return execute_result.get('data', {})
84 84
85 85 @property
86 86 def _metadata(self):
87 87 execute_result = self.metadata['execute_result']
88 88 if execute_result:
89 89 return execute_result.get('metadata', {})
90 90
91 91 def display(self):
92 92 from IPython.display import publish_display_data
93 93 publish_display_data(self.source, self.data, self.metadata)
94 94
95 95 def _repr_mime_(self, mime):
96 96 if mime not in self.data:
97 97 return
98 98 data = self.data[mime]
99 99 if mime in self._metadata:
100 100 return data, self._metadata[mime]
101 101 else:
102 102 return data
103 103
104 104 def __getitem__(self, key):
105 105 return self.metadata[key]
106 106
107 107 def __getattr__(self, key):
108 108 if key not in self.metadata:
109 109 raise AttributeError(key)
110 110 return self.metadata[key]
111 111
112 112 def __repr__(self):
113 113 execute_result = self.metadata['execute_result'] or {'data':{}}
114 114 text_out = execute_result['data'].get('text/plain', '')
115 115 if len(text_out) > 32:
116 116 text_out = text_out[:29] + '...'
117 117
118 118 return "<ExecuteReply[%i]: %s>" % (self.execution_count, text_out)
119 119
120 120 def _repr_pretty_(self, p, cycle):
121 121 execute_result = self.metadata['execute_result'] or {'data':{}}
122 122 text_out = execute_result['data'].get('text/plain', '')
123 123
124 124 if not text_out:
125 125 return
126 126
127 127 try:
128 128 ip = get_ipython()
129 129 except NameError:
130 130 colors = "NoColor"
131 131 else:
132 132 colors = ip.colors
133 133
134 134 if colors == "NoColor":
135 135 out = normal = ""
136 136 else:
137 137 out = TermColors.Red
138 138 normal = TermColors.Normal
139 139
140 140 if '\n' in text_out and not text_out.startswith('\n'):
141 141 # add newline for multiline reprs
142 142 text_out = '\n' + text_out
143 143
144 144 p.text(
145 145 out + u'Out[%i:%i]: ' % (
146 146 self.metadata['engine_id'], self.execution_count
147 147 ) + normal + text_out
148 148 )
149 149
150 150
151 151 class Metadata(dict):
152 152 """Subclass of dict for initializing metadata values.
153 153
154 154 Attribute access works on keys.
155 155
156 156 These objects have a strict set of keys - errors will raise if you try
157 157 to add new keys.
158 158 """
159 159 def __init__(self, *args, **kwargs):
160 160 dict.__init__(self)
161 161 md = {'msg_id' : None,
162 162 'submitted' : None,
163 163 'started' : None,
164 164 'completed' : None,
165 165 'received' : None,
166 166 'engine_uuid' : None,
167 167 'engine_id' : None,
168 168 'follow' : None,
169 169 'after' : None,
170 170 'status' : None,
171 171
172 172 'execute_input' : None,
173 173 'execute_result' : None,
174 'pyerr' : None,
174 'error' : None,
175 175 'stdout' : '',
176 176 'stderr' : '',
177 177 'outputs' : [],
178 178 'data': {},
179 179 'outputs_ready' : False,
180 180 }
181 181 self.update(md)
182 182 self.update(dict(*args, **kwargs))
183 183
184 184 def __getattr__(self, key):
185 185 """getattr aliased to getitem"""
186 186 if key in self:
187 187 return self[key]
188 188 else:
189 189 raise AttributeError(key)
190 190
191 191 def __setattr__(self, key, value):
192 192 """setattr aliased to setitem, with strict"""
193 193 if key in self:
194 194 self[key] = value
195 195 else:
196 196 raise AttributeError(key)
197 197
198 198 def __setitem__(self, key, value):
199 199 """strict static key enforcement"""
200 200 if key in self:
201 201 dict.__setitem__(self, key, value)
202 202 else:
203 203 raise KeyError(key)
204 204
205 205
206 206 class Client(HasTraits):
207 207 """A semi-synchronous client to the IPython ZMQ cluster
208 208
209 209 Parameters
210 210 ----------
211 211
212 212 url_file : str/unicode; path to ipcontroller-client.json
213 213 This JSON file should contain all the information needed to connect to a cluster,
214 214 and is likely the only argument needed.
215 215 Connection information for the Hub's registration. If a json connector
216 216 file is given, then likely no further configuration is necessary.
217 217 [Default: use profile]
218 218 profile : bytes
219 219 The name of the Cluster profile to be used to find connector information.
220 220 If run from an IPython application, the default profile will be the same
221 221 as the running application, otherwise it will be 'default'.
222 222 cluster_id : str
223 223 String id to added to runtime files, to prevent name collisions when using
224 224 multiple clusters with a single profile simultaneously.
225 225 When set, will look for files named like: 'ipcontroller-<cluster_id>-client.json'
226 226 Since this is text inserted into filenames, typical recommendations apply:
227 227 Simple character strings are ideal, and spaces are not recommended (but
228 228 should generally work)
229 229 context : zmq.Context
230 230 Pass an existing zmq.Context instance, otherwise the client will create its own.
231 231 debug : bool
232 232 flag for lots of message printing for debug purposes
233 233 timeout : int/float
234 234 time (in seconds) to wait for connection replies from the Hub
235 235 [Default: 10]
236 236
237 237 #-------------- session related args ----------------
238 238
239 239 config : Config object
240 240 If specified, this will be relayed to the Session for configuration
241 241 username : str
242 242 set username for the session object
243 243
244 244 #-------------- ssh related args ----------------
245 245 # These are args for configuring the ssh tunnel to be used
246 246 # credentials are used to forward connections over ssh to the Controller
247 247 # Note that the ip given in `addr` needs to be relative to sshserver
248 248 # The most basic case is to leave addr as pointing to localhost (127.0.0.1),
249 249 # and set sshserver as the same machine the Controller is on. However,
250 250 # the only requirement is that sshserver is able to see the Controller
251 251 # (i.e. is within the same trusted network).
252 252
253 253 sshserver : str
254 254 A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
255 255 If keyfile or password is specified, and this is not, it will default to
256 256 the ip given in addr.
257 257 sshkey : str; path to ssh private key file
258 258 This specifies a key to be used in ssh login, default None.
259 259 Regular default ssh keys will be used without specifying this argument.
260 260 password : str
261 261 Your ssh password to sshserver. Note that if this is left None,
262 262 you will be prompted for it if passwordless key based login is unavailable.
263 263 paramiko : bool
264 264 flag for whether to use paramiko instead of shell ssh for tunneling.
265 265 [default: True on win32, False else]
266 266
267 267
268 268 Attributes
269 269 ----------
270 270
271 271 ids : list of int engine IDs
272 272 requesting the ids attribute always synchronizes
273 273 the registration state. To request ids without synchronization,
274 274 use semi-private _ids attributes.
275 275
276 276 history : list of msg_ids
277 277 a list of msg_ids, keeping track of all the execution
278 278 messages you have submitted in order.
279 279
280 280 outstanding : set of msg_ids
281 281 a set of msg_ids that have been submitted, but whose
282 282 results have not yet been received.
283 283
284 284 results : dict
285 285 a dict of all our results, keyed by msg_id
286 286
287 287 block : bool
288 288 determines default behavior when block not specified
289 289 in execution methods
290 290
291 291 Methods
292 292 -------
293 293
294 294 spin
295 295 flushes incoming results and registration state changes
296 296 control methods spin, and requesting `ids` also ensures up to date
297 297
298 298 wait
299 299 wait on one or more msg_ids
300 300
301 301 execution methods
302 302 apply
303 303 legacy: execute, run
304 304
305 305 data movement
306 306 push, pull, scatter, gather
307 307
308 308 query methods
309 309 queue_status, get_result, purge, result_status
310 310
311 311 control methods
312 312 abort, shutdown
313 313
314 314 """
315 315
316 316
317 317 block = Bool(False)
318 318 outstanding = Set()
319 319 results = Instance('collections.defaultdict', (dict,))
320 320 metadata = Instance('collections.defaultdict', (Metadata,))
321 321 history = List()
322 322 debug = Bool(False)
323 323 _spin_thread = Any()
324 324 _stop_spinning = Any()
325 325
326 326 profile=Unicode()
327 327 def _profile_default(self):
328 328 if BaseIPythonApplication.initialized():
329 329 # an IPython app *might* be running, try to get its profile
330 330 try:
331 331 return BaseIPythonApplication.instance().profile
332 332 except (AttributeError, MultipleInstanceError):
333 333 # could be a *different* subclass of config.Application,
334 334 # which would raise one of these two errors.
335 335 return u'default'
336 336 else:
337 337 return u'default'
338 338
339 339
340 340 _outstanding_dict = Instance('collections.defaultdict', (set,))
341 341 _ids = List()
342 342 _connected=Bool(False)
343 343 _ssh=Bool(False)
344 344 _context = Instance('zmq.Context')
345 345 _config = Dict()
346 346 _engines=Instance(util.ReverseDict, (), {})
347 347 # _hub_socket=Instance('zmq.Socket')
348 348 _query_socket=Instance('zmq.Socket')
349 349 _control_socket=Instance('zmq.Socket')
350 350 _iopub_socket=Instance('zmq.Socket')
351 351 _notification_socket=Instance('zmq.Socket')
352 352 _mux_socket=Instance('zmq.Socket')
353 353 _task_socket=Instance('zmq.Socket')
354 354 _task_scheme=Unicode()
355 355 _closed = False
356 356 _ignored_control_replies=Integer(0)
357 357 _ignored_hub_replies=Integer(0)
358 358
359 359 def __new__(self, *args, **kw):
360 360 # don't raise on positional args
361 361 return HasTraits.__new__(self, **kw)
362 362
363 363 def __init__(self, url_file=None, profile=None, profile_dir=None, ipython_dir=None,
364 364 context=None, debug=False,
365 365 sshserver=None, sshkey=None, password=None, paramiko=None,
366 366 timeout=10, cluster_id=None, **extra_args
367 367 ):
368 368 if profile:
369 369 super(Client, self).__init__(debug=debug, profile=profile)
370 370 else:
371 371 super(Client, self).__init__(debug=debug)
372 372 if context is None:
373 373 context = zmq.Context.instance()
374 374 self._context = context
375 375 self._stop_spinning = Event()
376 376
377 377 if 'url_or_file' in extra_args:
378 378 url_file = extra_args['url_or_file']
379 379 warnings.warn("url_or_file arg no longer supported, use url_file", DeprecationWarning)
380 380
381 381 if url_file and util.is_url(url_file):
382 382 raise ValueError("single urls cannot be specified, url-files must be used.")
383 383
384 384 self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
385 385
386 386 if self._cd is not None:
387 387 if url_file is None:
388 388 if not cluster_id:
389 389 client_json = 'ipcontroller-client.json'
390 390 else:
391 391 client_json = 'ipcontroller-%s-client.json' % cluster_id
392 392 url_file = pjoin(self._cd.security_dir, client_json)
393 393 if url_file is None:
394 394 raise ValueError(
395 395 "I can't find enough information to connect to a hub!"
396 396 " Please specify at least one of url_file or profile."
397 397 )
398 398
399 399 with open(url_file) as f:
400 400 cfg = json.load(f)
401 401
402 402 self._task_scheme = cfg['task_scheme']
403 403
404 404 # sync defaults from args, json:
405 405 if sshserver:
406 406 cfg['ssh'] = sshserver
407 407
408 408 location = cfg.setdefault('location', None)
409 409
410 410 proto,addr = cfg['interface'].split('://')
411 411 addr = util.disambiguate_ip_address(addr, location)
412 412 cfg['interface'] = "%s://%s" % (proto, addr)
413 413
414 414 # turn interface,port into full urls:
415 415 for key in ('control', 'task', 'mux', 'iopub', 'notification', 'registration'):
416 416 cfg[key] = cfg['interface'] + ':%i' % cfg[key]
417 417
418 418 url = cfg['registration']
419 419
420 420 if location is not None and addr == localhost():
421 421 # location specified, and connection is expected to be local
422 422 if not is_local_ip(location) and not sshserver:
423 423 # load ssh from JSON *only* if the controller is not on
424 424 # this machine
425 425 sshserver=cfg['ssh']
426 426 if not is_local_ip(location) and not sshserver:
427 427 # warn if no ssh specified, but SSH is probably needed
428 428 # This is only a warning, because the most likely cause
429 429 # is a local Controller on a laptop whose IP is dynamic
430 430 warnings.warn("""
431 431 Controller appears to be listening on localhost, but not on this machine.
432 432 If this is true, you should specify Client(...,sshserver='you@%s')
433 433 or instruct your controller to listen on an external IP."""%location,
434 434 RuntimeWarning)
435 435 elif not sshserver:
436 436 # otherwise sync with cfg
437 437 sshserver = cfg['ssh']
438 438
439 439 self._config = cfg
440 440
441 441 self._ssh = bool(sshserver or sshkey or password)
442 442 if self._ssh and sshserver is None:
443 443 # default to ssh via localhost
444 444 sshserver = addr
445 445 if self._ssh and password is None:
446 446 if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
447 447 password=False
448 448 else:
449 449 password = getpass("SSH Password for %s: "%sshserver)
450 450 ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
451 451
452 452 # configure and construct the session
453 453 try:
454 454 extra_args['packer'] = cfg['pack']
455 455 extra_args['unpacker'] = cfg['unpack']
456 456 extra_args['key'] = cast_bytes(cfg['key'])
457 457 extra_args['signature_scheme'] = cfg['signature_scheme']
458 458 except KeyError as exc:
459 459 msg = '\n'.join([
460 460 "Connection file is invalid (missing '{}'), possibly from an old version of IPython.",
461 461 "If you are reusing connection files, remove them and start ipcontroller again."
462 462 ])
463 463 raise ValueError(msg.format(exc.message))
464 464
465 465 self.session = Session(**extra_args)
466 466
467 467 self._query_socket = self._context.socket(zmq.DEALER)
468 468
469 469 if self._ssh:
470 470 tunnel.tunnel_connection(self._query_socket, cfg['registration'], sshserver, **ssh_kwargs)
471 471 else:
472 472 self._query_socket.connect(cfg['registration'])
473 473
474 474 self.session.debug = self.debug
475 475
476 476 self._notification_handlers = {'registration_notification' : self._register_engine,
477 477 'unregistration_notification' : self._unregister_engine,
478 478 'shutdown_notification' : lambda msg: self.close(),
479 479 }
480 480 self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
481 481 'apply_reply' : self._handle_apply_reply}
482 482
483 483 try:
484 484 self._connect(sshserver, ssh_kwargs, timeout)
485 485 except:
486 486 self.close(linger=0)
487 487 raise
488 488
489 489 # last step: setup magics, if we are in IPython:
490 490
491 491 try:
492 492 ip = get_ipython()
493 493 except NameError:
494 494 return
495 495 else:
496 496 if 'px' not in ip.magics_manager.magics:
497 497 # in IPython but we are the first Client.
498 498 # activate a default view for parallel magics.
499 499 self.activate()
500 500
501 501 def __del__(self):
502 502 """cleanup sockets, but _not_ context."""
503 503 self.close()
504 504
505 505 def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
506 506 if ipython_dir is None:
507 507 ipython_dir = get_ipython_dir()
508 508 if profile_dir is not None:
509 509 try:
510 510 self._cd = ProfileDir.find_profile_dir(profile_dir)
511 511 return
512 512 except ProfileDirError:
513 513 pass
514 514 elif profile is not None:
515 515 try:
516 516 self._cd = ProfileDir.find_profile_dir_by_name(
517 517 ipython_dir, profile)
518 518 return
519 519 except ProfileDirError:
520 520 pass
521 521 self._cd = None
522 522
523 523 def _update_engines(self, engines):
524 524 """Update our engines dict and _ids from a dict of the form: {id:uuid}."""
525 525 for k,v in iteritems(engines):
526 526 eid = int(k)
527 527 if eid not in self._engines:
528 528 self._ids.append(eid)
529 529 self._engines[eid] = v
530 530 self._ids = sorted(self._ids)
531 531 if sorted(self._engines.keys()) != list(range(len(self._engines))) and \
532 532 self._task_scheme == 'pure' and self._task_socket:
533 533 self._stop_scheduling_tasks()
534 534
535 535 def _stop_scheduling_tasks(self):
536 536 """Stop scheduling tasks because an engine has been unregistered
537 537 from a pure ZMQ scheduler.
538 538 """
539 539 self._task_socket.close()
540 540 self._task_socket = None
541 541 msg = "An engine has been unregistered, and we are using pure " +\
542 542 "ZMQ task scheduling. Task farming will be disabled."
543 543 if self.outstanding:
544 544 msg += " If you were running tasks when this happened, " +\
545 545 "some `outstanding` msg_ids may never resolve."
546 546 warnings.warn(msg, RuntimeWarning)
547 547
548 548 def _build_targets(self, targets):
549 549 """Turn valid target IDs or 'all' into two lists:
550 550 (int_ids, uuids).
551 551 """
552 552 if not self._ids:
553 553 # flush notification socket if no engines yet, just in case
554 554 if not self.ids:
555 555 raise error.NoEnginesRegistered("Can't build targets without any engines")
556 556
557 557 if targets is None:
558 558 targets = self._ids
559 559 elif isinstance(targets, string_types):
560 560 if targets.lower() == 'all':
561 561 targets = self._ids
562 562 else:
563 563 raise TypeError("%r not valid str target, must be 'all'"%(targets))
564 564 elif isinstance(targets, int):
565 565 if targets < 0:
566 566 targets = self.ids[targets]
567 567 if targets not in self._ids:
568 568 raise IndexError("No such engine: %i"%targets)
569 569 targets = [targets]
570 570
571 571 if isinstance(targets, slice):
572 572 indices = list(range(len(self._ids))[targets])
573 573 ids = self.ids
574 574 targets = [ ids[i] for i in indices ]
575 575
576 576 if not isinstance(targets, (tuple, list, xrange)):
577 577 raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
578 578
579 579 return [cast_bytes(self._engines[t]) for t in targets], list(targets)
580 580
581 581 def _connect(self, sshserver, ssh_kwargs, timeout):
582 582 """setup all our socket connections to the cluster. This is called from
583 583 __init__."""
584 584
585 585 # Maybe allow reconnecting?
586 586 if self._connected:
587 587 return
588 588 self._connected=True
589 589
590 590 def connect_socket(s, url):
591 591 if self._ssh:
592 592 return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
593 593 else:
594 594 return s.connect(url)
595 595
596 596 self.session.send(self._query_socket, 'connection_request')
597 597 # use Poller because zmq.select has wrong units in pyzmq 2.1.7
598 598 poller = zmq.Poller()
599 599 poller.register(self._query_socket, zmq.POLLIN)
600 600 # poll expects milliseconds, timeout is seconds
601 601 evts = poller.poll(timeout*1000)
602 602 if not evts:
603 603 raise error.TimeoutError("Hub connection request timed out")
604 604 idents,msg = self.session.recv(self._query_socket,mode=0)
605 605 if self.debug:
606 606 pprint(msg)
607 607 content = msg['content']
608 608 # self._config['registration'] = dict(content)
609 609 cfg = self._config
610 610 if content['status'] == 'ok':
611 611 self._mux_socket = self._context.socket(zmq.DEALER)
612 612 connect_socket(self._mux_socket, cfg['mux'])
613 613
614 614 self._task_socket = self._context.socket(zmq.DEALER)
615 615 connect_socket(self._task_socket, cfg['task'])
616 616
617 617 self._notification_socket = self._context.socket(zmq.SUB)
618 618 self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
619 619 connect_socket(self._notification_socket, cfg['notification'])
620 620
621 621 self._control_socket = self._context.socket(zmq.DEALER)
622 622 connect_socket(self._control_socket, cfg['control'])
623 623
624 624 self._iopub_socket = self._context.socket(zmq.SUB)
625 625 self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
626 626 connect_socket(self._iopub_socket, cfg['iopub'])
627 627
628 628 self._update_engines(dict(content['engines']))
629 629 else:
630 630 self._connected = False
631 631 raise Exception("Failed to connect!")
632 632
633 633 #--------------------------------------------------------------------------
634 634 # handlers and callbacks for incoming messages
635 635 #--------------------------------------------------------------------------
636 636
637 637 def _unwrap_exception(self, content):
638 638 """unwrap exception, and remap engine_id to int."""
639 639 e = error.unwrap_exception(content)
640 640 # print e.traceback
641 641 if e.engine_info:
642 642 e_uuid = e.engine_info['engine_uuid']
643 643 eid = self._engines[e_uuid]
644 644 e.engine_info['engine_id'] = eid
645 645 return e
646 646
647 647 def _extract_metadata(self, msg):
648 648 header = msg['header']
649 649 parent = msg['parent_header']
650 650 msg_meta = msg['metadata']
651 651 content = msg['content']
652 652 md = {'msg_id' : parent['msg_id'],
653 653 'received' : datetime.now(),
654 654 'engine_uuid' : msg_meta.get('engine', None),
655 655 'follow' : msg_meta.get('follow', []),
656 656 'after' : msg_meta.get('after', []),
657 657 'status' : content['status'],
658 658 }
659 659
660 660 if md['engine_uuid'] is not None:
661 661 md['engine_id'] = self._engines.get(md['engine_uuid'], None)
662 662
663 663 if 'date' in parent:
664 664 md['submitted'] = parent['date']
665 665 if 'started' in msg_meta:
666 666 md['started'] = parse_date(msg_meta['started'])
667 667 if 'date' in header:
668 668 md['completed'] = header['date']
669 669 return md
670 670
671 671 def _register_engine(self, msg):
672 672 """Register a new engine, and update our connection info."""
673 673 content = msg['content']
674 674 eid = content['id']
675 675 d = {eid : content['uuid']}
676 676 self._update_engines(d)
677 677
678 678 def _unregister_engine(self, msg):
679 679 """Unregister an engine that has died."""
680 680 content = msg['content']
681 681 eid = int(content['id'])
682 682 if eid in self._ids:
683 683 self._ids.remove(eid)
684 684 uuid = self._engines.pop(eid)
685 685
686 686 self._handle_stranded_msgs(eid, uuid)
687 687
688 688 if self._task_socket and self._task_scheme == 'pure':
689 689 self._stop_scheduling_tasks()
690 690
691 691 def _handle_stranded_msgs(self, eid, uuid):
692 692 """Handle messages known to be on an engine when the engine unregisters.
693 693
694 694 It is possible that this will fire prematurely - that is, an engine will
695 695 go down after completing a result, and the client will be notified
696 696 of the unregistration and later receive the successful result.
697 697 """
698 698
699 699 outstanding = self._outstanding_dict[uuid]
700 700
701 701 for msg_id in list(outstanding):
702 702 if msg_id in self.results:
703 703 # we already
704 704 continue
705 705 try:
706 706 raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
707 707 except:
708 708 content = error.wrap_exception()
709 709 # build a fake message:
710 710 msg = self.session.msg('apply_reply', content=content)
711 711 msg['parent_header']['msg_id'] = msg_id
712 712 msg['metadata']['engine'] = uuid
713 713 self._handle_apply_reply(msg)
714 714
715 715 def _handle_execute_reply(self, msg):
716 716 """Save the reply to an execute_request into our results.
717 717
718 718 execute messages are never actually used. apply is used instead.
719 719 """
720 720
721 721 parent = msg['parent_header']
722 722 msg_id = parent['msg_id']
723 723 if msg_id not in self.outstanding:
724 724 if msg_id in self.history:
725 725 print("got stale result: %s"%msg_id)
726 726 else:
727 727 print("got unknown result: %s"%msg_id)
728 728 else:
729 729 self.outstanding.remove(msg_id)
730 730
731 731 content = msg['content']
732 732 header = msg['header']
733 733
734 734 # construct metadata:
735 735 md = self.metadata[msg_id]
736 736 md.update(self._extract_metadata(msg))
737 737 # is this redundant?
738 738 self.metadata[msg_id] = md
739 739
740 740 e_outstanding = self._outstanding_dict[md['engine_uuid']]
741 741 if msg_id in e_outstanding:
742 742 e_outstanding.remove(msg_id)
743 743
744 744 # construct result:
745 745 if content['status'] == 'ok':
746 746 self.results[msg_id] = ExecuteReply(msg_id, content, md)
747 747 elif content['status'] == 'aborted':
748 748 self.results[msg_id] = error.TaskAborted(msg_id)
749 749 elif content['status'] == 'resubmitted':
750 750 # TODO: handle resubmission
751 751 pass
752 752 else:
753 753 self.results[msg_id] = self._unwrap_exception(content)
754 754
755 755 def _handle_apply_reply(self, msg):
756 756 """Save the reply to an apply_request into our results."""
757 757 parent = msg['parent_header']
758 758 msg_id = parent['msg_id']
759 759 if msg_id not in self.outstanding:
760 760 if msg_id in self.history:
761 761 print("got stale result: %s"%msg_id)
762 762 print(self.results[msg_id])
763 763 print(msg)
764 764 else:
765 765 print("got unknown result: %s"%msg_id)
766 766 else:
767 767 self.outstanding.remove(msg_id)
768 768 content = msg['content']
769 769 header = msg['header']
770 770
771 771 # construct metadata:
772 772 md = self.metadata[msg_id]
773 773 md.update(self._extract_metadata(msg))
774 774 # is this redundant?
775 775 self.metadata[msg_id] = md
776 776
777 777 e_outstanding = self._outstanding_dict[md['engine_uuid']]
778 778 if msg_id in e_outstanding:
779 779 e_outstanding.remove(msg_id)
780 780
781 781 # construct result:
782 782 if content['status'] == 'ok':
783 783 self.results[msg_id] = serialize.unserialize_object(msg['buffers'])[0]
784 784 elif content['status'] == 'aborted':
785 785 self.results[msg_id] = error.TaskAborted(msg_id)
786 786 elif content['status'] == 'resubmitted':
787 787 # TODO: handle resubmission
788 788 pass
789 789 else:
790 790 self.results[msg_id] = self._unwrap_exception(content)
791 791
792 792 def _flush_notifications(self):
793 793 """Flush notifications of engine registrations waiting
794 794 in ZMQ queue."""
795 795 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
796 796 while msg is not None:
797 797 if self.debug:
798 798 pprint(msg)
799 799 msg_type = msg['header']['msg_type']
800 800 handler = self._notification_handlers.get(msg_type, None)
801 801 if handler is None:
802 802 raise Exception("Unhandled message type: %s" % msg_type)
803 803 else:
804 804 handler(msg)
805 805 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
806 806
807 807 def _flush_results(self, sock):
808 808 """Flush task or queue results waiting in ZMQ queue."""
809 809 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
810 810 while msg is not None:
811 811 if self.debug:
812 812 pprint(msg)
813 813 msg_type = msg['header']['msg_type']
814 814 handler = self._queue_handlers.get(msg_type, None)
815 815 if handler is None:
816 816 raise Exception("Unhandled message type: %s" % msg_type)
817 817 else:
818 818 handler(msg)
819 819 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
820 820
821 821 def _flush_control(self, sock):
822 822 """Flush replies from the control channel waiting
823 823 in the ZMQ queue.
824 824
825 825 Currently: ignore them."""
826 826 if self._ignored_control_replies <= 0:
827 827 return
828 828 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
829 829 while msg is not None:
830 830 self._ignored_control_replies -= 1
831 831 if self.debug:
832 832 pprint(msg)
833 833 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
834 834
835 835 def _flush_ignored_control(self):
836 836 """flush ignored control replies"""
837 837 while self._ignored_control_replies > 0:
838 838 self.session.recv(self._control_socket)
839 839 self._ignored_control_replies -= 1
840 840
841 841 def _flush_ignored_hub_replies(self):
842 842 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
843 843 while msg is not None:
844 844 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
845 845
846 846 def _flush_iopub(self, sock):
847 847 """Flush replies from the iopub channel waiting
848 848 in the ZMQ queue.
849 849 """
850 850 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
851 851 while msg is not None:
852 852 if self.debug:
853 853 pprint(msg)
854 854 parent = msg['parent_header']
855 855 # ignore IOPub messages with no parent.
856 856 # Caused by print statements or warnings from before the first execution.
857 857 if not parent:
858 858 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
859 859 continue
860 860 msg_id = parent['msg_id']
861 861 content = msg['content']
862 862 header = msg['header']
863 863 msg_type = msg['header']['msg_type']
864 864
865 865 # init metadata:
866 866 md = self.metadata[msg_id]
867 867
868 868 if msg_type == 'stream':
869 869 name = content['name']
870 870 s = md[name] or ''
871 871 md[name] = s + content['data']
872 elif msg_type == 'pyerr':
873 md.update({'pyerr' : self._unwrap_exception(content)})
872 elif msg_type == 'error':
873 md.update({'error' : self._unwrap_exception(content)})
874 874 elif msg_type == 'execute_input':
875 875 md.update({'execute_input' : content['code']})
876 876 elif msg_type == 'display_data':
877 877 md['outputs'].append(content)
878 878 elif msg_type == 'execute_result':
879 879 md['execute_result'] = content
880 880 elif msg_type == 'data_message':
881 881 data, remainder = serialize.unserialize_object(msg['buffers'])
882 882 md['data'].update(data)
883 883 elif msg_type == 'status':
884 884 # idle message comes after all outputs
885 885 if content['execution_state'] == 'idle':
886 886 md['outputs_ready'] = True
887 887 else:
888 888 # unhandled msg_type (status, etc.)
889 889 pass
890 890
891 891 # reduntant?
892 892 self.metadata[msg_id] = md
893 893
894 894 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
895 895
896 896 #--------------------------------------------------------------------------
897 897 # len, getitem
898 898 #--------------------------------------------------------------------------
899 899
900 900 def __len__(self):
901 901 """len(client) returns # of engines."""
902 902 return len(self.ids)
903 903
904 904 def __getitem__(self, key):
905 905 """index access returns DirectView multiplexer objects
906 906
907 907 Must be int, slice, or list/tuple/xrange of ints"""
908 908 if not isinstance(key, (int, slice, tuple, list, xrange)):
909 909 raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
910 910 else:
911 911 return self.direct_view(key)
912 912
913 913 def __iter__(self):
914 914 """Since we define getitem, Client is iterable
915 915
916 916 but unless we also define __iter__, it won't work correctly unless engine IDs
917 917 start at zero and are continuous.
918 918 """
919 919 for eid in self.ids:
920 920 yield self.direct_view(eid)
921 921
922 922 #--------------------------------------------------------------------------
923 923 # Begin public methods
924 924 #--------------------------------------------------------------------------
925 925
926 926 @property
927 927 def ids(self):
928 928 """Always up-to-date ids property."""
929 929 self._flush_notifications()
930 930 # always copy:
931 931 return list(self._ids)
932 932
933 933 def activate(self, targets='all', suffix=''):
934 934 """Create a DirectView and register it with IPython magics
935 935
936 936 Defines the magics `%px, %autopx, %pxresult, %%px`
937 937
938 938 Parameters
939 939 ----------
940 940
941 941 targets: int, list of ints, or 'all'
942 942 The engines on which the view's magics will run
943 943 suffix: str [default: '']
944 944 The suffix, if any, for the magics. This allows you to have
945 945 multiple views associated with parallel magics at the same time.
946 946
947 947 e.g. ``rc.activate(targets=0, suffix='0')`` will give you
948 948 the magics ``%px0``, ``%pxresult0``, etc. for running magics just
949 949 on engine 0.
950 950 """
951 951 view = self.direct_view(targets)
952 952 view.block = True
953 953 view.activate(suffix)
954 954 return view
955 955
956 956 def close(self, linger=None):
957 957 """Close my zmq Sockets
958 958
959 959 If `linger`, set the zmq LINGER socket option,
960 960 which allows discarding of messages.
961 961 """
962 962 if self._closed:
963 963 return
964 964 self.stop_spin_thread()
965 965 snames = [ trait for trait in self.trait_names() if trait.endswith("socket") ]
966 966 for name in snames:
967 967 socket = getattr(self, name)
968 968 if socket is not None and not socket.closed:
969 969 if linger is not None:
970 970 socket.close(linger=linger)
971 971 else:
972 972 socket.close()
973 973 self._closed = True
974 974
975 975 def _spin_every(self, interval=1):
976 976 """target func for use in spin_thread"""
977 977 while True:
978 978 if self._stop_spinning.is_set():
979 979 return
980 980 time.sleep(interval)
981 981 self.spin()
982 982
983 983 def spin_thread(self, interval=1):
984 984 """call Client.spin() in a background thread on some regular interval
985 985
986 986 This helps ensure that messages don't pile up too much in the zmq queue
987 987 while you are working on other things, or just leaving an idle terminal.
988 988
989 989 It also helps limit potential padding of the `received` timestamp
990 990 on AsyncResult objects, used for timings.
991 991
992 992 Parameters
993 993 ----------
994 994
995 995 interval : float, optional
996 996 The interval on which to spin the client in the background thread
997 997 (simply passed to time.sleep).
998 998
999 999 Notes
1000 1000 -----
1001 1001
1002 1002 For precision timing, you may want to use this method to put a bound
1003 1003 on the jitter (in seconds) in `received` timestamps used
1004 1004 in AsyncResult.wall_time.
1005 1005
1006 1006 """
1007 1007 if self._spin_thread is not None:
1008 1008 self.stop_spin_thread()
1009 1009 self._stop_spinning.clear()
1010 1010 self._spin_thread = Thread(target=self._spin_every, args=(interval,))
1011 1011 self._spin_thread.daemon = True
1012 1012 self._spin_thread.start()
1013 1013
1014 1014 def stop_spin_thread(self):
1015 1015 """stop background spin_thread, if any"""
1016 1016 if self._spin_thread is not None:
1017 1017 self._stop_spinning.set()
1018 1018 self._spin_thread.join()
1019 1019 self._spin_thread = None
1020 1020
1021 1021 def spin(self):
1022 1022 """Flush any registration notifications and execution results
1023 1023 waiting in the ZMQ queue.
1024 1024 """
1025 1025 if self._notification_socket:
1026 1026 self._flush_notifications()
1027 1027 if self._iopub_socket:
1028 1028 self._flush_iopub(self._iopub_socket)
1029 1029 if self._mux_socket:
1030 1030 self._flush_results(self._mux_socket)
1031 1031 if self._task_socket:
1032 1032 self._flush_results(self._task_socket)
1033 1033 if self._control_socket:
1034 1034 self._flush_control(self._control_socket)
1035 1035 if self._query_socket:
1036 1036 self._flush_ignored_hub_replies()
1037 1037
1038 1038 def wait(self, jobs=None, timeout=-1):
1039 1039 """waits on one or more `jobs`, for up to `timeout` seconds.
1040 1040
1041 1041 Parameters
1042 1042 ----------
1043 1043
1044 1044 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
1045 1045 ints are indices to self.history
1046 1046 strs are msg_ids
1047 1047 default: wait on all outstanding messages
1048 1048 timeout : float
1049 1049 a time in seconds, after which to give up.
1050 1050 default is -1, which means no timeout
1051 1051
1052 1052 Returns
1053 1053 -------
1054 1054
1055 1055 True : when all msg_ids are done
1056 1056 False : timeout reached, some msg_ids still outstanding
1057 1057 """
1058 1058 tic = time.time()
1059 1059 if jobs is None:
1060 1060 theids = self.outstanding
1061 1061 else:
1062 1062 if isinstance(jobs, string_types + (int, AsyncResult)):
1063 1063 jobs = [jobs]
1064 1064 theids = set()
1065 1065 for job in jobs:
1066 1066 if isinstance(job, int):
1067 1067 # index access
1068 1068 job = self.history[job]
1069 1069 elif isinstance(job, AsyncResult):
1070 1070 theids.update(job.msg_ids)
1071 1071 continue
1072 1072 theids.add(job)
1073 1073 if not theids.intersection(self.outstanding):
1074 1074 return True
1075 1075 self.spin()
1076 1076 while theids.intersection(self.outstanding):
1077 1077 if timeout >= 0 and ( time.time()-tic ) > timeout:
1078 1078 break
1079 1079 time.sleep(1e-3)
1080 1080 self.spin()
1081 1081 return len(theids.intersection(self.outstanding)) == 0
1082 1082
1083 1083 #--------------------------------------------------------------------------
1084 1084 # Control methods
1085 1085 #--------------------------------------------------------------------------
1086 1086
1087 1087 @spin_first
1088 1088 def clear(self, targets=None, block=None):
1089 1089 """Clear the namespace in target(s)."""
1090 1090 block = self.block if block is None else block
1091 1091 targets = self._build_targets(targets)[0]
1092 1092 for t in targets:
1093 1093 self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
1094 1094 error = False
1095 1095 if block:
1096 1096 self._flush_ignored_control()
1097 1097 for i in range(len(targets)):
1098 1098 idents,msg = self.session.recv(self._control_socket,0)
1099 1099 if self.debug:
1100 1100 pprint(msg)
1101 1101 if msg['content']['status'] != 'ok':
1102 1102 error = self._unwrap_exception(msg['content'])
1103 1103 else:
1104 1104 self._ignored_control_replies += len(targets)
1105 1105 if error:
1106 1106 raise error
1107 1107
1108 1108
1109 1109 @spin_first
1110 1110 def abort(self, jobs=None, targets=None, block=None):
1111 1111 """Abort specific jobs from the execution queues of target(s).
1112 1112
1113 1113 This is a mechanism to prevent jobs that have already been submitted
1114 1114 from executing.
1115 1115
1116 1116 Parameters
1117 1117 ----------
1118 1118
1119 1119 jobs : msg_id, list of msg_ids, or AsyncResult
1120 1120 The jobs to be aborted
1121 1121
1122 1122 If unspecified/None: abort all outstanding jobs.
1123 1123
1124 1124 """
1125 1125 block = self.block if block is None else block
1126 1126 jobs = jobs if jobs is not None else list(self.outstanding)
1127 1127 targets = self._build_targets(targets)[0]
1128 1128
1129 1129 msg_ids = []
1130 1130 if isinstance(jobs, string_types + (AsyncResult,)):
1131 1131 jobs = [jobs]
1132 1132 bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
1133 1133 if bad_ids:
1134 1134 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1135 1135 for j in jobs:
1136 1136 if isinstance(j, AsyncResult):
1137 1137 msg_ids.extend(j.msg_ids)
1138 1138 else:
1139 1139 msg_ids.append(j)
1140 1140 content = dict(msg_ids=msg_ids)
1141 1141 for t in targets:
1142 1142 self.session.send(self._control_socket, 'abort_request',
1143 1143 content=content, ident=t)
1144 1144 error = False
1145 1145 if block:
1146 1146 self._flush_ignored_control()
1147 1147 for i in range(len(targets)):
1148 1148 idents,msg = self.session.recv(self._control_socket,0)
1149 1149 if self.debug:
1150 1150 pprint(msg)
1151 1151 if msg['content']['status'] != 'ok':
1152 1152 error = self._unwrap_exception(msg['content'])
1153 1153 else:
1154 1154 self._ignored_control_replies += len(targets)
1155 1155 if error:
1156 1156 raise error
1157 1157
1158 1158 @spin_first
1159 1159 def shutdown(self, targets='all', restart=False, hub=False, block=None):
1160 1160 """Terminates one or more engine processes, optionally including the hub.
1161 1161
1162 1162 Parameters
1163 1163 ----------
1164 1164
1165 1165 targets: list of ints or 'all' [default: all]
1166 1166 Which engines to shutdown.
1167 1167 hub: bool [default: False]
1168 1168 Whether to include the Hub. hub=True implies targets='all'.
1169 1169 block: bool [default: self.block]
1170 1170 Whether to wait for clean shutdown replies or not.
1171 1171 restart: bool [default: False]
1172 1172 NOT IMPLEMENTED
1173 1173 whether to restart engines after shutting them down.
1174 1174 """
1175 1175 from IPython.parallel.error import NoEnginesRegistered
1176 1176 if restart:
1177 1177 raise NotImplementedError("Engine restart is not yet implemented")
1178 1178
1179 1179 block = self.block if block is None else block
1180 1180 if hub:
1181 1181 targets = 'all'
1182 1182 try:
1183 1183 targets = self._build_targets(targets)[0]
1184 1184 except NoEnginesRegistered:
1185 1185 targets = []
1186 1186 for t in targets:
1187 1187 self.session.send(self._control_socket, 'shutdown_request',
1188 1188 content={'restart':restart},ident=t)
1189 1189 error = False
1190 1190 if block or hub:
1191 1191 self._flush_ignored_control()
1192 1192 for i in range(len(targets)):
1193 1193 idents,msg = self.session.recv(self._control_socket, 0)
1194 1194 if self.debug:
1195 1195 pprint(msg)
1196 1196 if msg['content']['status'] != 'ok':
1197 1197 error = self._unwrap_exception(msg['content'])
1198 1198 else:
1199 1199 self._ignored_control_replies += len(targets)
1200 1200
1201 1201 if hub:
1202 1202 time.sleep(0.25)
1203 1203 self.session.send(self._query_socket, 'shutdown_request')
1204 1204 idents,msg = self.session.recv(self._query_socket, 0)
1205 1205 if self.debug:
1206 1206 pprint(msg)
1207 1207 if msg['content']['status'] != 'ok':
1208 1208 error = self._unwrap_exception(msg['content'])
1209 1209
1210 1210 if error:
1211 1211 raise error
1212 1212
1213 1213 #--------------------------------------------------------------------------
1214 1214 # Execution related methods
1215 1215 #--------------------------------------------------------------------------
1216 1216
1217 1217 def _maybe_raise(self, result):
1218 1218 """wrapper for maybe raising an exception if apply failed."""
1219 1219 if isinstance(result, error.RemoteError):
1220 1220 raise result
1221 1221
1222 1222 return result
1223 1223
1224 1224 def send_apply_request(self, socket, f, args=None, kwargs=None, metadata=None, track=False,
1225 1225 ident=None):
1226 1226 """construct and send an apply message via a socket.
1227 1227
1228 1228 This is the principal method with which all engine execution is performed by views.
1229 1229 """
1230 1230
1231 1231 if self._closed:
1232 1232 raise RuntimeError("Client cannot be used after its sockets have been closed")
1233 1233
1234 1234 # defaults:
1235 1235 args = args if args is not None else []
1236 1236 kwargs = kwargs if kwargs is not None else {}
1237 1237 metadata = metadata if metadata is not None else {}
1238 1238
1239 1239 # validate arguments
1240 1240 if not callable(f) and not isinstance(f, Reference):
1241 1241 raise TypeError("f must be callable, not %s"%type(f))
1242 1242 if not isinstance(args, (tuple, list)):
1243 1243 raise TypeError("args must be tuple or list, not %s"%type(args))
1244 1244 if not isinstance(kwargs, dict):
1245 1245 raise TypeError("kwargs must be dict, not %s"%type(kwargs))
1246 1246 if not isinstance(metadata, dict):
1247 1247 raise TypeError("metadata must be dict, not %s"%type(metadata))
1248 1248
1249 1249 bufs = serialize.pack_apply_message(f, args, kwargs,
1250 1250 buffer_threshold=self.session.buffer_threshold,
1251 1251 item_threshold=self.session.item_threshold,
1252 1252 )
1253 1253
1254 1254 msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
1255 1255 metadata=metadata, track=track)
1256 1256
1257 1257 msg_id = msg['header']['msg_id']
1258 1258 self.outstanding.add(msg_id)
1259 1259 if ident:
1260 1260 # possibly routed to a specific engine
1261 1261 if isinstance(ident, list):
1262 1262 ident = ident[-1]
1263 1263 if ident in self._engines.values():
1264 1264 # save for later, in case of engine death
1265 1265 self._outstanding_dict[ident].add(msg_id)
1266 1266 self.history.append(msg_id)
1267 1267 self.metadata[msg_id]['submitted'] = datetime.now()
1268 1268
1269 1269 return msg
1270 1270
1271 1271 def send_execute_request(self, socket, code, silent=True, metadata=None, ident=None):
1272 1272 """construct and send an execute request via a socket.
1273 1273
1274 1274 """
1275 1275
1276 1276 if self._closed:
1277 1277 raise RuntimeError("Client cannot be used after its sockets have been closed")
1278 1278
1279 1279 # defaults:
1280 1280 metadata = metadata if metadata is not None else {}
1281 1281
1282 1282 # validate arguments
1283 1283 if not isinstance(code, string_types):
1284 1284 raise TypeError("code must be text, not %s" % type(code))
1285 1285 if not isinstance(metadata, dict):
1286 1286 raise TypeError("metadata must be dict, not %s" % type(metadata))
1287 1287
1288 1288 content = dict(code=code, silent=bool(silent), user_variables=[], user_expressions={})
1289 1289
1290 1290
1291 1291 msg = self.session.send(socket, "execute_request", content=content, ident=ident,
1292 1292 metadata=metadata)
1293 1293
1294 1294 msg_id = msg['header']['msg_id']
1295 1295 self.outstanding.add(msg_id)
1296 1296 if ident:
1297 1297 # possibly routed to a specific engine
1298 1298 if isinstance(ident, list):
1299 1299 ident = ident[-1]
1300 1300 if ident in self._engines.values():
1301 1301 # save for later, in case of engine death
1302 1302 self._outstanding_dict[ident].add(msg_id)
1303 1303 self.history.append(msg_id)
1304 1304 self.metadata[msg_id]['submitted'] = datetime.now()
1305 1305
1306 1306 return msg
1307 1307
1308 1308 #--------------------------------------------------------------------------
1309 1309 # construct a View object
1310 1310 #--------------------------------------------------------------------------
1311 1311
1312 1312 def load_balanced_view(self, targets=None):
1313 1313 """construct a DirectView object.
1314 1314
1315 1315 If no arguments are specified, create a LoadBalancedView
1316 1316 using all engines.
1317 1317
1318 1318 Parameters
1319 1319 ----------
1320 1320
1321 1321 targets: list,slice,int,etc. [default: use all engines]
1322 1322 The subset of engines across which to load-balance
1323 1323 """
1324 1324 if targets == 'all':
1325 1325 targets = None
1326 1326 if targets is not None:
1327 1327 targets = self._build_targets(targets)[1]
1328 1328 return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
1329 1329
1330 1330 def direct_view(self, targets='all'):
1331 1331 """construct a DirectView object.
1332 1332
1333 1333 If no targets are specified, create a DirectView using all engines.
1334 1334
1335 1335 rc.direct_view('all') is distinguished from rc[:] in that 'all' will
1336 1336 evaluate the target engines at each execution, whereas rc[:] will connect to
1337 1337 all *current* engines, and that list will not change.
1338 1338
1339 1339 That is, 'all' will always use all engines, whereas rc[:] will not use
1340 1340 engines added after the DirectView is constructed.
1341 1341
1342 1342 Parameters
1343 1343 ----------
1344 1344
1345 1345 targets: list,slice,int,etc. [default: use all engines]
1346 1346 The engines to use for the View
1347 1347 """
1348 1348 single = isinstance(targets, int)
1349 1349 # allow 'all' to be lazily evaluated at each execution
1350 1350 if targets != 'all':
1351 1351 targets = self._build_targets(targets)[1]
1352 1352 if single:
1353 1353 targets = targets[0]
1354 1354 return DirectView(client=self, socket=self._mux_socket, targets=targets)
1355 1355
1356 1356 #--------------------------------------------------------------------------
1357 1357 # Query methods
1358 1358 #--------------------------------------------------------------------------
1359 1359
1360 1360 @spin_first
1361 1361 def get_result(self, indices_or_msg_ids=None, block=None):
1362 1362 """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
1363 1363
1364 1364 If the client already has the results, no request to the Hub will be made.
1365 1365
1366 1366 This is a convenient way to construct AsyncResult objects, which are wrappers
1367 1367 that include metadata about execution, and allow for awaiting results that
1368 1368 were not submitted by this Client.
1369 1369
1370 1370 It can also be a convenient way to retrieve the metadata associated with
1371 1371 blocking execution, since it always retrieves
1372 1372
1373 1373 Examples
1374 1374 --------
1375 1375 ::
1376 1376
1377 1377 In [10]: r = client.apply()
1378 1378
1379 1379 Parameters
1380 1380 ----------
1381 1381
1382 1382 indices_or_msg_ids : integer history index, str msg_id, or list of either
1383 1383 The indices or msg_ids of indices to be retrieved
1384 1384
1385 1385 block : bool
1386 1386 Whether to wait for the result to be done
1387 1387
1388 1388 Returns
1389 1389 -------
1390 1390
1391 1391 AsyncResult
1392 1392 A single AsyncResult object will always be returned.
1393 1393
1394 1394 AsyncHubResult
1395 1395 A subclass of AsyncResult that retrieves results from the Hub
1396 1396
1397 1397 """
1398 1398 block = self.block if block is None else block
1399 1399 if indices_or_msg_ids is None:
1400 1400 indices_or_msg_ids = -1
1401 1401
1402 1402 single_result = False
1403 1403 if not isinstance(indices_or_msg_ids, (list,tuple)):
1404 1404 indices_or_msg_ids = [indices_or_msg_ids]
1405 1405 single_result = True
1406 1406
1407 1407 theids = []
1408 1408 for id in indices_or_msg_ids:
1409 1409 if isinstance(id, int):
1410 1410 id = self.history[id]
1411 1411 if not isinstance(id, string_types):
1412 1412 raise TypeError("indices must be str or int, not %r"%id)
1413 1413 theids.append(id)
1414 1414
1415 1415 local_ids = [msg_id for msg_id in theids if (msg_id in self.outstanding or msg_id in self.results)]
1416 1416 remote_ids = [msg_id for msg_id in theids if msg_id not in local_ids]
1417 1417
1418 1418 # given single msg_id initially, get_result shot get the result itself,
1419 1419 # not a length-one list
1420 1420 if single_result:
1421 1421 theids = theids[0]
1422 1422
1423 1423 if remote_ids:
1424 1424 ar = AsyncHubResult(self, msg_ids=theids)
1425 1425 else:
1426 1426 ar = AsyncResult(self, msg_ids=theids)
1427 1427
1428 1428 if block:
1429 1429 ar.wait()
1430 1430
1431 1431 return ar
1432 1432
1433 1433 @spin_first
1434 1434 def resubmit(self, indices_or_msg_ids=None, metadata=None, block=None):
1435 1435 """Resubmit one or more tasks.
1436 1436
1437 1437 in-flight tasks may not be resubmitted.
1438 1438
1439 1439 Parameters
1440 1440 ----------
1441 1441
1442 1442 indices_or_msg_ids : integer history index, str msg_id, or list of either
1443 1443 The indices or msg_ids of indices to be retrieved
1444 1444
1445 1445 block : bool
1446 1446 Whether to wait for the result to be done
1447 1447
1448 1448 Returns
1449 1449 -------
1450 1450
1451 1451 AsyncHubResult
1452 1452 A subclass of AsyncResult that retrieves results from the Hub
1453 1453
1454 1454 """
1455 1455 block = self.block if block is None else block
1456 1456 if indices_or_msg_ids is None:
1457 1457 indices_or_msg_ids = -1
1458 1458
1459 1459 if not isinstance(indices_or_msg_ids, (list,tuple)):
1460 1460 indices_or_msg_ids = [indices_or_msg_ids]
1461 1461
1462 1462 theids = []
1463 1463 for id in indices_or_msg_ids:
1464 1464 if isinstance(id, int):
1465 1465 id = self.history[id]
1466 1466 if not isinstance(id, string_types):
1467 1467 raise TypeError("indices must be str or int, not %r"%id)
1468 1468 theids.append(id)
1469 1469
1470 1470 content = dict(msg_ids = theids)
1471 1471
1472 1472 self.session.send(self._query_socket, 'resubmit_request', content)
1473 1473
1474 1474 zmq.select([self._query_socket], [], [])
1475 1475 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1476 1476 if self.debug:
1477 1477 pprint(msg)
1478 1478 content = msg['content']
1479 1479 if content['status'] != 'ok':
1480 1480 raise self._unwrap_exception(content)
1481 1481 mapping = content['resubmitted']
1482 1482 new_ids = [ mapping[msg_id] for msg_id in theids ]
1483 1483
1484 1484 ar = AsyncHubResult(self, msg_ids=new_ids)
1485 1485
1486 1486 if block:
1487 1487 ar.wait()
1488 1488
1489 1489 return ar
1490 1490
1491 1491 @spin_first
1492 1492 def result_status(self, msg_ids, status_only=True):
1493 1493 """Check on the status of the result(s) of the apply request with `msg_ids`.
1494 1494
1495 1495 If status_only is False, then the actual results will be retrieved, else
1496 1496 only the status of the results will be checked.
1497 1497
1498 1498 Parameters
1499 1499 ----------
1500 1500
1501 1501 msg_ids : list of msg_ids
1502 1502 if int:
1503 1503 Passed as index to self.history for convenience.
1504 1504 status_only : bool (default: True)
1505 1505 if False:
1506 1506 Retrieve the actual results of completed tasks.
1507 1507
1508 1508 Returns
1509 1509 -------
1510 1510
1511 1511 results : dict
1512 1512 There will always be the keys 'pending' and 'completed', which will
1513 1513 be lists of msg_ids that are incomplete or complete. If `status_only`
1514 1514 is False, then completed results will be keyed by their `msg_id`.
1515 1515 """
1516 1516 if not isinstance(msg_ids, (list,tuple)):
1517 1517 msg_ids = [msg_ids]
1518 1518
1519 1519 theids = []
1520 1520 for msg_id in msg_ids:
1521 1521 if isinstance(msg_id, int):
1522 1522 msg_id = self.history[msg_id]
1523 1523 if not isinstance(msg_id, string_types):
1524 1524 raise TypeError("msg_ids must be str, not %r"%msg_id)
1525 1525 theids.append(msg_id)
1526 1526
1527 1527 completed = []
1528 1528 local_results = {}
1529 1529
1530 1530 # comment this block out to temporarily disable local shortcut:
1531 1531 for msg_id in theids:
1532 1532 if msg_id in self.results:
1533 1533 completed.append(msg_id)
1534 1534 local_results[msg_id] = self.results[msg_id]
1535 1535 theids.remove(msg_id)
1536 1536
1537 1537 if theids: # some not locally cached
1538 1538 content = dict(msg_ids=theids, status_only=status_only)
1539 1539 msg = self.session.send(self._query_socket, "result_request", content=content)
1540 1540 zmq.select([self._query_socket], [], [])
1541 1541 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1542 1542 if self.debug:
1543 1543 pprint(msg)
1544 1544 content = msg['content']
1545 1545 if content['status'] != 'ok':
1546 1546 raise self._unwrap_exception(content)
1547 1547 buffers = msg['buffers']
1548 1548 else:
1549 1549 content = dict(completed=[],pending=[])
1550 1550
1551 1551 content['completed'].extend(completed)
1552 1552
1553 1553 if status_only:
1554 1554 return content
1555 1555
1556 1556 failures = []
1557 1557 # load cached results into result:
1558 1558 content.update(local_results)
1559 1559
1560 1560 # update cache with results:
1561 1561 for msg_id in sorted(theids):
1562 1562 if msg_id in content['completed']:
1563 1563 rec = content[msg_id]
1564 1564 parent = extract_dates(rec['header'])
1565 1565 header = extract_dates(rec['result_header'])
1566 1566 rcontent = rec['result_content']
1567 1567 iodict = rec['io']
1568 1568 if isinstance(rcontent, str):
1569 1569 rcontent = self.session.unpack(rcontent)
1570 1570
1571 1571 md = self.metadata[msg_id]
1572 1572 md_msg = dict(
1573 1573 content=rcontent,
1574 1574 parent_header=parent,
1575 1575 header=header,
1576 1576 metadata=rec['result_metadata'],
1577 1577 )
1578 1578 md.update(self._extract_metadata(md_msg))
1579 1579 if rec.get('received'):
1580 1580 md['received'] = parse_date(rec['received'])
1581 1581 md.update(iodict)
1582 1582
1583 1583 if rcontent['status'] == 'ok':
1584 1584 if header['msg_type'] == 'apply_reply':
1585 1585 res,buffers = serialize.unserialize_object(buffers)
1586 1586 elif header['msg_type'] == 'execute_reply':
1587 1587 res = ExecuteReply(msg_id, rcontent, md)
1588 1588 else:
1589 1589 raise KeyError("unhandled msg type: %r" % header['msg_type'])
1590 1590 else:
1591 1591 res = self._unwrap_exception(rcontent)
1592 1592 failures.append(res)
1593 1593
1594 1594 self.results[msg_id] = res
1595 1595 content[msg_id] = res
1596 1596
1597 1597 if len(theids) == 1 and failures:
1598 1598 raise failures[0]
1599 1599
1600 1600 error.collect_exceptions(failures, "result_status")
1601 1601 return content
1602 1602
1603 1603 @spin_first
1604 1604 def queue_status(self, targets='all', verbose=False):
1605 1605 """Fetch the status of engine queues.
1606 1606
1607 1607 Parameters
1608 1608 ----------
1609 1609
1610 1610 targets : int/str/list of ints/strs
1611 1611 the engines whose states are to be queried.
1612 1612 default : all
1613 1613 verbose : bool
1614 1614 Whether to return lengths only, or lists of ids for each element
1615 1615 """
1616 1616 if targets == 'all':
1617 1617 # allow 'all' to be evaluated on the engine
1618 1618 engine_ids = None
1619 1619 else:
1620 1620 engine_ids = self._build_targets(targets)[1]
1621 1621 content = dict(targets=engine_ids, verbose=verbose)
1622 1622 self.session.send(self._query_socket, "queue_request", content=content)
1623 1623 idents,msg = self.session.recv(self._query_socket, 0)
1624 1624 if self.debug:
1625 1625 pprint(msg)
1626 1626 content = msg['content']
1627 1627 status = content.pop('status')
1628 1628 if status != 'ok':
1629 1629 raise self._unwrap_exception(content)
1630 1630 content = rekey(content)
1631 1631 if isinstance(targets, int):
1632 1632 return content[targets]
1633 1633 else:
1634 1634 return content
1635 1635
1636 1636 def _build_msgids_from_target(self, targets=None):
1637 1637 """Build a list of msg_ids from the list of engine targets"""
1638 1638 if not targets: # needed as _build_targets otherwise uses all engines
1639 1639 return []
1640 1640 target_ids = self._build_targets(targets)[0]
1641 1641 return [md_id for md_id in self.metadata if self.metadata[md_id]["engine_uuid"] in target_ids]
1642 1642
1643 1643 def _build_msgids_from_jobs(self, jobs=None):
1644 1644 """Build a list of msg_ids from "jobs" """
1645 1645 if not jobs:
1646 1646 return []
1647 1647 msg_ids = []
1648 1648 if isinstance(jobs, string_types + (AsyncResult,)):
1649 1649 jobs = [jobs]
1650 1650 bad_ids = [obj for obj in jobs if not isinstance(obj, string_types + (AsyncResult,))]
1651 1651 if bad_ids:
1652 1652 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1653 1653 for j in jobs:
1654 1654 if isinstance(j, AsyncResult):
1655 1655 msg_ids.extend(j.msg_ids)
1656 1656 else:
1657 1657 msg_ids.append(j)
1658 1658 return msg_ids
1659 1659
1660 1660 def purge_local_results(self, jobs=[], targets=[]):
1661 1661 """Clears the client caches of results and their metadata.
1662 1662
1663 1663 Individual results can be purged by msg_id, or the entire
1664 1664 history of specific targets can be purged.
1665 1665
1666 1666 Use `purge_local_results('all')` to scrub everything from the Clients's
1667 1667 results and metadata caches.
1668 1668
1669 1669 After this call all `AsyncResults` are invalid and should be discarded.
1670 1670
1671 1671 If you must "reget" the results, you can still do so by using
1672 1672 `client.get_result(msg_id)` or `client.get_result(asyncresult)`. This will
1673 1673 redownload the results from the hub if they are still available
1674 1674 (i.e `client.purge_hub_results(...)` has not been called.
1675 1675
1676 1676 Parameters
1677 1677 ----------
1678 1678
1679 1679 jobs : str or list of str or AsyncResult objects
1680 1680 the msg_ids whose results should be purged.
1681 1681 targets : int/list of ints
1682 1682 The engines, by integer ID, whose entire result histories are to be purged.
1683 1683
1684 1684 Raises
1685 1685 ------
1686 1686
1687 1687 RuntimeError : if any of the tasks to be purged are still outstanding.
1688 1688
1689 1689 """
1690 1690 if not targets and not jobs:
1691 1691 raise ValueError("Must specify at least one of `targets` and `jobs`")
1692 1692
1693 1693 if jobs == 'all':
1694 1694 if self.outstanding:
1695 1695 raise RuntimeError("Can't purge outstanding tasks: %s" % self.outstanding)
1696 1696 self.results.clear()
1697 1697 self.metadata.clear()
1698 1698 else:
1699 1699 msg_ids = set()
1700 1700 msg_ids.update(self._build_msgids_from_target(targets))
1701 1701 msg_ids.update(self._build_msgids_from_jobs(jobs))
1702 1702 still_outstanding = self.outstanding.intersection(msg_ids)
1703 1703 if still_outstanding:
1704 1704 raise RuntimeError("Can't purge outstanding tasks: %s" % still_outstanding)
1705 1705 for mid in msg_ids:
1706 1706 self.results.pop(mid)
1707 1707 self.metadata.pop(mid)
1708 1708
1709 1709
1710 1710 @spin_first
1711 1711 def purge_hub_results(self, jobs=[], targets=[]):
1712 1712 """Tell the Hub to forget results.
1713 1713
1714 1714 Individual results can be purged by msg_id, or the entire
1715 1715 history of specific targets can be purged.
1716 1716
1717 1717 Use `purge_results('all')` to scrub everything from the Hub's db.
1718 1718
1719 1719 Parameters
1720 1720 ----------
1721 1721
1722 1722 jobs : str or list of str or AsyncResult objects
1723 1723 the msg_ids whose results should be forgotten.
1724 1724 targets : int/str/list of ints/strs
1725 1725 The targets, by int_id, whose entire history is to be purged.
1726 1726
1727 1727 default : None
1728 1728 """
1729 1729 if not targets and not jobs:
1730 1730 raise ValueError("Must specify at least one of `targets` and `jobs`")
1731 1731 if targets:
1732 1732 targets = self._build_targets(targets)[1]
1733 1733
1734 1734 # construct msg_ids from jobs
1735 1735 if jobs == 'all':
1736 1736 msg_ids = jobs
1737 1737 else:
1738 1738 msg_ids = self._build_msgids_from_jobs(jobs)
1739 1739
1740 1740 content = dict(engine_ids=targets, msg_ids=msg_ids)
1741 1741 self.session.send(self._query_socket, "purge_request", content=content)
1742 1742 idents, msg = self.session.recv(self._query_socket, 0)
1743 1743 if self.debug:
1744 1744 pprint(msg)
1745 1745 content = msg['content']
1746 1746 if content['status'] != 'ok':
1747 1747 raise self._unwrap_exception(content)
1748 1748
1749 1749 def purge_results(self, jobs=[], targets=[]):
1750 1750 """Clears the cached results from both the hub and the local client
1751 1751
1752 1752 Individual results can be purged by msg_id, or the entire
1753 1753 history of specific targets can be purged.
1754 1754
1755 1755 Use `purge_results('all')` to scrub every cached result from both the Hub's and
1756 1756 the Client's db.
1757 1757
1758 1758 Equivalent to calling both `purge_hub_results()` and `purge_client_results()` with
1759 1759 the same arguments.
1760 1760
1761 1761 Parameters
1762 1762 ----------
1763 1763
1764 1764 jobs : str or list of str or AsyncResult objects
1765 1765 the msg_ids whose results should be forgotten.
1766 1766 targets : int/str/list of ints/strs
1767 1767 The targets, by int_id, whose entire history is to be purged.
1768 1768
1769 1769 default : None
1770 1770 """
1771 1771 self.purge_local_results(jobs=jobs, targets=targets)
1772 1772 self.purge_hub_results(jobs=jobs, targets=targets)
1773 1773
1774 1774 def purge_everything(self):
1775 1775 """Clears all content from previous Tasks from both the hub and the local client
1776 1776
1777 1777 In addition to calling `purge_results("all")` it also deletes the history and
1778 1778 other bookkeeping lists.
1779 1779 """
1780 1780 self.purge_results("all")
1781 1781 self.history = []
1782 1782 self.session.digest_history.clear()
1783 1783
1784 1784 @spin_first
1785 1785 def hub_history(self):
1786 1786 """Get the Hub's history
1787 1787
1788 1788 Just like the Client, the Hub has a history, which is a list of msg_ids.
1789 1789 This will contain the history of all clients, and, depending on configuration,
1790 1790 may contain history across multiple cluster sessions.
1791 1791
1792 1792 Any msg_id returned here is a valid argument to `get_result`.
1793 1793
1794 1794 Returns
1795 1795 -------
1796 1796
1797 1797 msg_ids : list of strs
1798 1798 list of all msg_ids, ordered by task submission time.
1799 1799 """
1800 1800
1801 1801 self.session.send(self._query_socket, "history_request", content={})
1802 1802 idents, msg = self.session.recv(self._query_socket, 0)
1803 1803
1804 1804 if self.debug:
1805 1805 pprint(msg)
1806 1806 content = msg['content']
1807 1807 if content['status'] != 'ok':
1808 1808 raise self._unwrap_exception(content)
1809 1809 else:
1810 1810 return content['history']
1811 1811
1812 1812 @spin_first
1813 1813 def db_query(self, query, keys=None):
1814 1814 """Query the Hub's TaskRecord database
1815 1815
1816 1816 This will return a list of task record dicts that match `query`
1817 1817
1818 1818 Parameters
1819 1819 ----------
1820 1820
1821 1821 query : mongodb query dict
1822 1822 The search dict. See mongodb query docs for details.
1823 1823 keys : list of strs [optional]
1824 1824 The subset of keys to be returned. The default is to fetch everything but buffers.
1825 1825 'msg_id' will *always* be included.
1826 1826 """
1827 1827 if isinstance(keys, string_types):
1828 1828 keys = [keys]
1829 1829 content = dict(query=query, keys=keys)
1830 1830 self.session.send(self._query_socket, "db_request", content=content)
1831 1831 idents, msg = self.session.recv(self._query_socket, 0)
1832 1832 if self.debug:
1833 1833 pprint(msg)
1834 1834 content = msg['content']
1835 1835 if content['status'] != 'ok':
1836 1836 raise self._unwrap_exception(content)
1837 1837
1838 1838 records = content['records']
1839 1839
1840 1840 buffer_lens = content['buffer_lens']
1841 1841 result_buffer_lens = content['result_buffer_lens']
1842 1842 buffers = msg['buffers']
1843 1843 has_bufs = buffer_lens is not None
1844 1844 has_rbufs = result_buffer_lens is not None
1845 1845 for i,rec in enumerate(records):
1846 1846 # unpack datetime objects
1847 1847 for hkey in ('header', 'result_header'):
1848 1848 if hkey in rec:
1849 1849 rec[hkey] = extract_dates(rec[hkey])
1850 1850 for dtkey in ('submitted', 'started', 'completed', 'received'):
1851 1851 if dtkey in rec:
1852 1852 rec[dtkey] = parse_date(rec[dtkey])
1853 1853 # relink buffers
1854 1854 if has_bufs:
1855 1855 blen = buffer_lens[i]
1856 1856 rec['buffers'], buffers = buffers[:blen],buffers[blen:]
1857 1857 if has_rbufs:
1858 1858 blen = result_buffer_lens[i]
1859 1859 rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
1860 1860
1861 1861 return records
1862 1862
1863 1863 __all__ = [ 'Client' ]
@@ -1,1440 +1,1440 b''
1 1 """The IPython Controller Hub with 0MQ
2 2
3 3 This is the master object that handles connections from engines and clients,
4 4 and monitors traffic through the various queues.
5 5 """
6 6
7 7 # Copyright (c) IPython Development Team.
8 8 # Distributed under the terms of the Modified BSD License.
9 9
10 10 from __future__ import print_function
11 11
12 12 import json
13 13 import os
14 14 import sys
15 15 import time
16 16 from datetime import datetime
17 17
18 18 import zmq
19 19 from zmq.eventloop import ioloop
20 20 from zmq.eventloop.zmqstream import ZMQStream
21 21
22 22 # internal:
23 23 from IPython.utils.importstring import import_item
24 24 from IPython.utils.jsonutil import extract_dates
25 25 from IPython.utils.localinterfaces import localhost
26 26 from IPython.utils.py3compat import cast_bytes, unicode_type, iteritems
27 27 from IPython.utils.traitlets import (
28 28 HasTraits, Instance, Integer, Unicode, Dict, Set, Tuple, CBytes, DottedObjectName
29 29 )
30 30
31 31 from IPython.parallel import error, util
32 32 from IPython.parallel.factory import RegistrationFactory
33 33
34 34 from IPython.kernel.zmq.session import SessionFactory
35 35
36 36 from .heartmonitor import HeartMonitor
37 37
38 38 #-----------------------------------------------------------------------------
39 39 # Code
40 40 #-----------------------------------------------------------------------------
41 41
42 42 def _passer(*args, **kwargs):
43 43 return
44 44
45 45 def _printer(*args, **kwargs):
46 46 print (args)
47 47 print (kwargs)
48 48
49 49 def empty_record():
50 50 """Return an empty dict with all record keys."""
51 51 return {
52 52 'msg_id' : None,
53 53 'header' : None,
54 54 'metadata' : None,
55 55 'content': None,
56 56 'buffers': None,
57 57 'submitted': None,
58 58 'client_uuid' : None,
59 59 'engine_uuid' : None,
60 60 'started': None,
61 61 'completed': None,
62 62 'resubmitted': None,
63 63 'received': None,
64 64 'result_header' : None,
65 65 'result_metadata' : None,
66 66 'result_content' : None,
67 67 'result_buffers' : None,
68 68 'queue' : None,
69 69 'execute_input' : None,
70 70 'execute_result': None,
71 'pyerr': None,
71 'error': None,
72 72 'stdout': '',
73 73 'stderr': '',
74 74 }
75 75
76 76 def init_record(msg):
77 77 """Initialize a TaskRecord based on a request."""
78 78 header = msg['header']
79 79 return {
80 80 'msg_id' : header['msg_id'],
81 81 'header' : header,
82 82 'content': msg['content'],
83 83 'metadata': msg['metadata'],
84 84 'buffers': msg['buffers'],
85 85 'submitted': header['date'],
86 86 'client_uuid' : None,
87 87 'engine_uuid' : None,
88 88 'started': None,
89 89 'completed': None,
90 90 'resubmitted': None,
91 91 'received': None,
92 92 'result_header' : None,
93 93 'result_metadata': None,
94 94 'result_content' : None,
95 95 'result_buffers' : None,
96 96 'queue' : None,
97 97 'execute_input' : None,
98 98 'execute_result': None,
99 'pyerr': None,
99 'error': None,
100 100 'stdout': '',
101 101 'stderr': '',
102 102 }
103 103
104 104
105 105 class EngineConnector(HasTraits):
106 106 """A simple object for accessing the various zmq connections of an object.
107 107 Attributes are:
108 108 id (int): engine ID
109 109 uuid (unicode): engine UUID
110 110 pending: set of msg_ids
111 111 stallback: DelayedCallback for stalled registration
112 112 """
113 113
114 114 id = Integer(0)
115 115 uuid = Unicode()
116 116 pending = Set()
117 117 stallback = Instance(ioloop.DelayedCallback)
118 118
119 119
120 120 _db_shortcuts = {
121 121 'sqlitedb' : 'IPython.parallel.controller.sqlitedb.SQLiteDB',
122 122 'mongodb' : 'IPython.parallel.controller.mongodb.MongoDB',
123 123 'dictdb' : 'IPython.parallel.controller.dictdb.DictDB',
124 124 'nodb' : 'IPython.parallel.controller.dictdb.NoDB',
125 125 }
126 126
127 127 class HubFactory(RegistrationFactory):
128 128 """The Configurable for setting up a Hub."""
129 129
130 130 # port-pairs for monitoredqueues:
131 131 hb = Tuple(Integer,Integer,config=True,
132 132 help="""PUB/ROUTER Port pair for Engine heartbeats""")
133 133 def _hb_default(self):
134 134 return tuple(util.select_random_ports(2))
135 135
136 136 mux = Tuple(Integer,Integer,config=True,
137 137 help="""Client/Engine Port pair for MUX queue""")
138 138
139 139 def _mux_default(self):
140 140 return tuple(util.select_random_ports(2))
141 141
142 142 task = Tuple(Integer,Integer,config=True,
143 143 help="""Client/Engine Port pair for Task queue""")
144 144 def _task_default(self):
145 145 return tuple(util.select_random_ports(2))
146 146
147 147 control = Tuple(Integer,Integer,config=True,
148 148 help="""Client/Engine Port pair for Control queue""")
149 149
150 150 def _control_default(self):
151 151 return tuple(util.select_random_ports(2))
152 152
153 153 iopub = Tuple(Integer,Integer,config=True,
154 154 help="""Client/Engine Port pair for IOPub relay""")
155 155
156 156 def _iopub_default(self):
157 157 return tuple(util.select_random_ports(2))
158 158
159 159 # single ports:
160 160 mon_port = Integer(config=True,
161 161 help="""Monitor (SUB) port for queue traffic""")
162 162
163 163 def _mon_port_default(self):
164 164 return util.select_random_ports(1)[0]
165 165
166 166 notifier_port = Integer(config=True,
167 167 help="""PUB port for sending engine status notifications""")
168 168
169 169 def _notifier_port_default(self):
170 170 return util.select_random_ports(1)[0]
171 171
172 172 engine_ip = Unicode(config=True,
173 173 help="IP on which to listen for engine connections. [default: loopback]")
174 174 def _engine_ip_default(self):
175 175 return localhost()
176 176 engine_transport = Unicode('tcp', config=True,
177 177 help="0MQ transport for engine connections. [default: tcp]")
178 178
179 179 client_ip = Unicode(config=True,
180 180 help="IP on which to listen for client connections. [default: loopback]")
181 181 client_transport = Unicode('tcp', config=True,
182 182 help="0MQ transport for client connections. [default : tcp]")
183 183
184 184 monitor_ip = Unicode(config=True,
185 185 help="IP on which to listen for monitor messages. [default: loopback]")
186 186 monitor_transport = Unicode('tcp', config=True,
187 187 help="0MQ transport for monitor messages. [default : tcp]")
188 188
189 189 _client_ip_default = _monitor_ip_default = _engine_ip_default
190 190
191 191
192 192 monitor_url = Unicode('')
193 193
194 194 db_class = DottedObjectName('NoDB',
195 195 config=True, help="""The class to use for the DB backend
196 196
197 197 Options include:
198 198
199 199 SQLiteDB: SQLite
200 200 MongoDB : use MongoDB
201 201 DictDB : in-memory storage (fastest, but be mindful of memory growth of the Hub)
202 202 NoDB : disable database altogether (default)
203 203
204 204 """)
205 205
206 206 registration_timeout = Integer(0, config=True,
207 207 help="Engine registration timeout in seconds [default: max(30,"
208 208 "10*heartmonitor.period)]" )
209 209
210 210 def _registration_timeout_default(self):
211 211 if self.heartmonitor is None:
212 212 # early initialization, this value will be ignored
213 213 return 0
214 214 # heartmonitor period is in milliseconds, so 10x in seconds is .01
215 215 return max(30, int(.01 * self.heartmonitor.period))
216 216
217 217 # not configurable
218 218 db = Instance('IPython.parallel.controller.dictdb.BaseDB')
219 219 heartmonitor = Instance('IPython.parallel.controller.heartmonitor.HeartMonitor')
220 220
221 221 def _ip_changed(self, name, old, new):
222 222 self.engine_ip = new
223 223 self.client_ip = new
224 224 self.monitor_ip = new
225 225 self._update_monitor_url()
226 226
227 227 def _update_monitor_url(self):
228 228 self.monitor_url = "%s://%s:%i" % (self.monitor_transport, self.monitor_ip, self.mon_port)
229 229
230 230 def _transport_changed(self, name, old, new):
231 231 self.engine_transport = new
232 232 self.client_transport = new
233 233 self.monitor_transport = new
234 234 self._update_monitor_url()
235 235
236 236 def __init__(self, **kwargs):
237 237 super(HubFactory, self).__init__(**kwargs)
238 238 self._update_monitor_url()
239 239
240 240
241 241 def construct(self):
242 242 self.init_hub()
243 243
244 244 def start(self):
245 245 self.heartmonitor.start()
246 246 self.log.info("Heartmonitor started")
247 247
248 248 def client_url(self, channel):
249 249 """return full zmq url for a named client channel"""
250 250 return "%s://%s:%i" % (self.client_transport, self.client_ip, self.client_info[channel])
251 251
252 252 def engine_url(self, channel):
253 253 """return full zmq url for a named engine channel"""
254 254 return "%s://%s:%i" % (self.engine_transport, self.engine_ip, self.engine_info[channel])
255 255
256 256 def init_hub(self):
257 257 """construct Hub object"""
258 258
259 259 ctx = self.context
260 260 loop = self.loop
261 261 if 'TaskScheduler.scheme_name' in self.config:
262 262 scheme = self.config.TaskScheduler.scheme_name
263 263 else:
264 264 from .scheduler import TaskScheduler
265 265 scheme = TaskScheduler.scheme_name.get_default_value()
266 266
267 267 # build connection dicts
268 268 engine = self.engine_info = {
269 269 'interface' : "%s://%s" % (self.engine_transport, self.engine_ip),
270 270 'registration' : self.regport,
271 271 'control' : self.control[1],
272 272 'mux' : self.mux[1],
273 273 'hb_ping' : self.hb[0],
274 274 'hb_pong' : self.hb[1],
275 275 'task' : self.task[1],
276 276 'iopub' : self.iopub[1],
277 277 }
278 278
279 279 client = self.client_info = {
280 280 'interface' : "%s://%s" % (self.client_transport, self.client_ip),
281 281 'registration' : self.regport,
282 282 'control' : self.control[0],
283 283 'mux' : self.mux[0],
284 284 'task' : self.task[0],
285 285 'task_scheme' : scheme,
286 286 'iopub' : self.iopub[0],
287 287 'notification' : self.notifier_port,
288 288 }
289 289
290 290 self.log.debug("Hub engine addrs: %s", self.engine_info)
291 291 self.log.debug("Hub client addrs: %s", self.client_info)
292 292
293 293 # Registrar socket
294 294 q = ZMQStream(ctx.socket(zmq.ROUTER), loop)
295 295 util.set_hwm(q, 0)
296 296 q.bind(self.client_url('registration'))
297 297 self.log.info("Hub listening on %s for registration.", self.client_url('registration'))
298 298 if self.client_ip != self.engine_ip:
299 299 q.bind(self.engine_url('registration'))
300 300 self.log.info("Hub listening on %s for registration.", self.engine_url('registration'))
301 301
302 302 ### Engine connections ###
303 303
304 304 # heartbeat
305 305 hpub = ctx.socket(zmq.PUB)
306 306 hpub.bind(self.engine_url('hb_ping'))
307 307 hrep = ctx.socket(zmq.ROUTER)
308 308 util.set_hwm(hrep, 0)
309 309 hrep.bind(self.engine_url('hb_pong'))
310 310 self.heartmonitor = HeartMonitor(loop=loop, parent=self, log=self.log,
311 311 pingstream=ZMQStream(hpub,loop),
312 312 pongstream=ZMQStream(hrep,loop)
313 313 )
314 314
315 315 ### Client connections ###
316 316
317 317 # Notifier socket
318 318 n = ZMQStream(ctx.socket(zmq.PUB), loop)
319 319 n.bind(self.client_url('notification'))
320 320
321 321 ### build and launch the queues ###
322 322
323 323 # monitor socket
324 324 sub = ctx.socket(zmq.SUB)
325 325 sub.setsockopt(zmq.SUBSCRIBE, b"")
326 326 sub.bind(self.monitor_url)
327 327 sub.bind('inproc://monitor')
328 328 sub = ZMQStream(sub, loop)
329 329
330 330 # connect the db
331 331 db_class = _db_shortcuts.get(self.db_class.lower(), self.db_class)
332 332 self.log.info('Hub using DB backend: %r', (db_class.split('.')[-1]))
333 333 self.db = import_item(str(db_class))(session=self.session.session,
334 334 parent=self, log=self.log)
335 335 time.sleep(.25)
336 336
337 337 # resubmit stream
338 338 r = ZMQStream(ctx.socket(zmq.DEALER), loop)
339 339 url = util.disambiguate_url(self.client_url('task'))
340 340 r.connect(url)
341 341
342 342 # convert seconds to msec
343 343 registration_timeout = 1000*self.registration_timeout
344 344
345 345 self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
346 346 query=q, notifier=n, resubmit=r, db=self.db,
347 347 engine_info=self.engine_info, client_info=self.client_info,
348 348 log=self.log, registration_timeout=registration_timeout)
349 349
350 350
351 351 class Hub(SessionFactory):
352 352 """The IPython Controller Hub with 0MQ connections
353 353
354 354 Parameters
355 355 ==========
356 356 loop: zmq IOLoop instance
357 357 session: Session object
358 358 <removed> context: zmq context for creating new connections (?)
359 359 queue: ZMQStream for monitoring the command queue (SUB)
360 360 query: ZMQStream for engine registration and client queries requests (ROUTER)
361 361 heartbeat: HeartMonitor object checking the pulse of the engines
362 362 notifier: ZMQStream for broadcasting engine registration changes (PUB)
363 363 db: connection to db for out of memory logging of commands
364 364 NotImplemented
365 365 engine_info: dict of zmq connection information for engines to connect
366 366 to the queues.
367 367 client_info: dict of zmq connection information for engines to connect
368 368 to the queues.
369 369 """
370 370
371 371 engine_state_file = Unicode()
372 372
373 373 # internal data structures:
374 374 ids=Set() # engine IDs
375 375 keytable=Dict()
376 376 by_ident=Dict()
377 377 engines=Dict()
378 378 clients=Dict()
379 379 hearts=Dict()
380 380 pending=Set()
381 381 queues=Dict() # pending msg_ids keyed by engine_id
382 382 tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
383 383 completed=Dict() # completed msg_ids keyed by engine_id
384 384 all_completed=Set() # completed msg_ids keyed by engine_id
385 385 dead_engines=Set() # completed msg_ids keyed by engine_id
386 386 unassigned=Set() # set of task msg_ds not yet assigned a destination
387 387 incoming_registrations=Dict()
388 388 registration_timeout=Integer()
389 389 _idcounter=Integer(0)
390 390
391 391 # objects from constructor:
392 392 query=Instance(ZMQStream)
393 393 monitor=Instance(ZMQStream)
394 394 notifier=Instance(ZMQStream)
395 395 resubmit=Instance(ZMQStream)
396 396 heartmonitor=Instance(HeartMonitor)
397 397 db=Instance(object)
398 398 client_info=Dict()
399 399 engine_info=Dict()
400 400
401 401
402 402 def __init__(self, **kwargs):
403 403 """
404 404 # universal:
405 405 loop: IOLoop for creating future connections
406 406 session: streamsession for sending serialized data
407 407 # engine:
408 408 queue: ZMQStream for monitoring queue messages
409 409 query: ZMQStream for engine+client registration and client requests
410 410 heartbeat: HeartMonitor object for tracking engines
411 411 # extra:
412 412 db: ZMQStream for db connection (NotImplemented)
413 413 engine_info: zmq address/protocol dict for engine connections
414 414 client_info: zmq address/protocol dict for client connections
415 415 """
416 416
417 417 super(Hub, self).__init__(**kwargs)
418 418
419 419 # register our callbacks
420 420 self.query.on_recv(self.dispatch_query)
421 421 self.monitor.on_recv(self.dispatch_monitor_traffic)
422 422
423 423 self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
424 424 self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
425 425
426 426 self.monitor_handlers = {b'in' : self.save_queue_request,
427 427 b'out': self.save_queue_result,
428 428 b'intask': self.save_task_request,
429 429 b'outtask': self.save_task_result,
430 430 b'tracktask': self.save_task_destination,
431 431 b'incontrol': _passer,
432 432 b'outcontrol': _passer,
433 433 b'iopub': self.save_iopub_message,
434 434 }
435 435
436 436 self.query_handlers = {'queue_request': self.queue_status,
437 437 'result_request': self.get_results,
438 438 'history_request': self.get_history,
439 439 'db_request': self.db_query,
440 440 'purge_request': self.purge_results,
441 441 'load_request': self.check_load,
442 442 'resubmit_request': self.resubmit_task,
443 443 'shutdown_request': self.shutdown_request,
444 444 'registration_request' : self.register_engine,
445 445 'unregistration_request' : self.unregister_engine,
446 446 'connection_request': self.connection_request,
447 447 }
448 448
449 449 # ignore resubmit replies
450 450 self.resubmit.on_recv(lambda msg: None, copy=False)
451 451
452 452 self.log.info("hub::created hub")
453 453
454 454 @property
455 455 def _next_id(self):
456 456 """gemerate a new ID.
457 457
458 458 No longer reuse old ids, just count from 0."""
459 459 newid = self._idcounter
460 460 self._idcounter += 1
461 461 return newid
462 462 # newid = 0
463 463 # incoming = [id[0] for id in itervalues(self.incoming_registrations)]
464 464 # # print newid, self.ids, self.incoming_registrations
465 465 # while newid in self.ids or newid in incoming:
466 466 # newid += 1
467 467 # return newid
468 468
469 469 #-----------------------------------------------------------------------------
470 470 # message validation
471 471 #-----------------------------------------------------------------------------
472 472
473 473 def _validate_targets(self, targets):
474 474 """turn any valid targets argument into a list of integer ids"""
475 475 if targets is None:
476 476 # default to all
477 477 return self.ids
478 478
479 479 if isinstance(targets, (int,str,unicode_type)):
480 480 # only one target specified
481 481 targets = [targets]
482 482 _targets = []
483 483 for t in targets:
484 484 # map raw identities to ids
485 485 if isinstance(t, (str,unicode_type)):
486 486 t = self.by_ident.get(cast_bytes(t), t)
487 487 _targets.append(t)
488 488 targets = _targets
489 489 bad_targets = [ t for t in targets if t not in self.ids ]
490 490 if bad_targets:
491 491 raise IndexError("No Such Engine: %r" % bad_targets)
492 492 if not targets:
493 493 raise IndexError("No Engines Registered")
494 494 return targets
495 495
496 496 #-----------------------------------------------------------------------------
497 497 # dispatch methods (1 per stream)
498 498 #-----------------------------------------------------------------------------
499 499
500 500
501 501 @util.log_errors
502 502 def dispatch_monitor_traffic(self, msg):
503 503 """all ME and Task queue messages come through here, as well as
504 504 IOPub traffic."""
505 505 self.log.debug("monitor traffic: %r", msg[0])
506 506 switch = msg[0]
507 507 try:
508 508 idents, msg = self.session.feed_identities(msg[1:])
509 509 except ValueError:
510 510 idents=[]
511 511 if not idents:
512 512 self.log.error("Monitor message without topic: %r", msg)
513 513 return
514 514 handler = self.monitor_handlers.get(switch, None)
515 515 if handler is not None:
516 516 handler(idents, msg)
517 517 else:
518 518 self.log.error("Unrecognized monitor topic: %r", switch)
519 519
520 520
521 521 @util.log_errors
522 522 def dispatch_query(self, msg):
523 523 """Route registration requests and queries from clients."""
524 524 try:
525 525 idents, msg = self.session.feed_identities(msg)
526 526 except ValueError:
527 527 idents = []
528 528 if not idents:
529 529 self.log.error("Bad Query Message: %r", msg)
530 530 return
531 531 client_id = idents[0]
532 532 try:
533 533 msg = self.session.unserialize(msg, content=True)
534 534 except Exception:
535 535 content = error.wrap_exception()
536 536 self.log.error("Bad Query Message: %r", msg, exc_info=True)
537 537 self.session.send(self.query, "hub_error", ident=client_id,
538 538 content=content)
539 539 return
540 540 # print client_id, header, parent, content
541 541 #switch on message type:
542 542 msg_type = msg['header']['msg_type']
543 543 self.log.info("client::client %r requested %r", client_id, msg_type)
544 544 handler = self.query_handlers.get(msg_type, None)
545 545 try:
546 546 assert handler is not None, "Bad Message Type: %r" % msg_type
547 547 except:
548 548 content = error.wrap_exception()
549 549 self.log.error("Bad Message Type: %r", msg_type, exc_info=True)
550 550 self.session.send(self.query, "hub_error", ident=client_id,
551 551 content=content)
552 552 return
553 553
554 554 else:
555 555 handler(idents, msg)
556 556
557 557 def dispatch_db(self, msg):
558 558 """"""
559 559 raise NotImplementedError
560 560
561 561 #---------------------------------------------------------------------------
562 562 # handler methods (1 per event)
563 563 #---------------------------------------------------------------------------
564 564
565 565 #----------------------- Heartbeat --------------------------------------
566 566
567 567 def handle_new_heart(self, heart):
568 568 """handler to attach to heartbeater.
569 569 Called when a new heart starts to beat.
570 570 Triggers completion of registration."""
571 571 self.log.debug("heartbeat::handle_new_heart(%r)", heart)
572 572 if heart not in self.incoming_registrations:
573 573 self.log.info("heartbeat::ignoring new heart: %r", heart)
574 574 else:
575 575 self.finish_registration(heart)
576 576
577 577
578 578 def handle_heart_failure(self, heart):
579 579 """handler to attach to heartbeater.
580 580 called when a previously registered heart fails to respond to beat request.
581 581 triggers unregistration"""
582 582 self.log.debug("heartbeat::handle_heart_failure(%r)", heart)
583 583 eid = self.hearts.get(heart, None)
584 584 uuid = self.engines[eid].uuid
585 585 if eid is None or self.keytable[eid] in self.dead_engines:
586 586 self.log.info("heartbeat::ignoring heart failure %r (not an engine or already dead)", heart)
587 587 else:
588 588 self.unregister_engine(heart, dict(content=dict(id=eid, queue=uuid)))
589 589
590 590 #----------------------- MUX Queue Traffic ------------------------------
591 591
592 592 def save_queue_request(self, idents, msg):
593 593 if len(idents) < 2:
594 594 self.log.error("invalid identity prefix: %r", idents)
595 595 return
596 596 queue_id, client_id = idents[:2]
597 597 try:
598 598 msg = self.session.unserialize(msg)
599 599 except Exception:
600 600 self.log.error("queue::client %r sent invalid message to %r: %r", client_id, queue_id, msg, exc_info=True)
601 601 return
602 602
603 603 eid = self.by_ident.get(queue_id, None)
604 604 if eid is None:
605 605 self.log.error("queue::target %r not registered", queue_id)
606 606 self.log.debug("queue:: valid are: %r", self.by_ident.keys())
607 607 return
608 608 record = init_record(msg)
609 609 msg_id = record['msg_id']
610 610 self.log.info("queue::client %r submitted request %r to %s", client_id, msg_id, eid)
611 611 # Unicode in records
612 612 record['engine_uuid'] = queue_id.decode('ascii')
613 613 record['client_uuid'] = msg['header']['session']
614 614 record['queue'] = 'mux'
615 615
616 616 try:
617 617 # it's posible iopub arrived first:
618 618 existing = self.db.get_record(msg_id)
619 619 for key,evalue in iteritems(existing):
620 620 rvalue = record.get(key, None)
621 621 if evalue and rvalue and evalue != rvalue:
622 622 self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
623 623 elif evalue and not rvalue:
624 624 record[key] = evalue
625 625 try:
626 626 self.db.update_record(msg_id, record)
627 627 except Exception:
628 628 self.log.error("DB Error updating record %r", msg_id, exc_info=True)
629 629 except KeyError:
630 630 try:
631 631 self.db.add_record(msg_id, record)
632 632 except Exception:
633 633 self.log.error("DB Error adding record %r", msg_id, exc_info=True)
634 634
635 635
636 636 self.pending.add(msg_id)
637 637 self.queues[eid].append(msg_id)
638 638
639 639 def save_queue_result(self, idents, msg):
640 640 if len(idents) < 2:
641 641 self.log.error("invalid identity prefix: %r", idents)
642 642 return
643 643
644 644 client_id, queue_id = idents[:2]
645 645 try:
646 646 msg = self.session.unserialize(msg)
647 647 except Exception:
648 648 self.log.error("queue::engine %r sent invalid message to %r: %r",
649 649 queue_id, client_id, msg, exc_info=True)
650 650 return
651 651
652 652 eid = self.by_ident.get(queue_id, None)
653 653 if eid is None:
654 654 self.log.error("queue::unknown engine %r is sending a reply: ", queue_id)
655 655 return
656 656
657 657 parent = msg['parent_header']
658 658 if not parent:
659 659 return
660 660 msg_id = parent['msg_id']
661 661 if msg_id in self.pending:
662 662 self.pending.remove(msg_id)
663 663 self.all_completed.add(msg_id)
664 664 self.queues[eid].remove(msg_id)
665 665 self.completed[eid].append(msg_id)
666 666 self.log.info("queue::request %r completed on %s", msg_id, eid)
667 667 elif msg_id not in self.all_completed:
668 668 # it could be a result from a dead engine that died before delivering the
669 669 # result
670 670 self.log.warn("queue:: unknown msg finished %r", msg_id)
671 671 return
672 672 # update record anyway, because the unregistration could have been premature
673 673 rheader = msg['header']
674 674 md = msg['metadata']
675 675 completed = rheader['date']
676 676 started = extract_dates(md.get('started', None))
677 677 result = {
678 678 'result_header' : rheader,
679 679 'result_metadata': md,
680 680 'result_content': msg['content'],
681 681 'received': datetime.now(),
682 682 'started' : started,
683 683 'completed' : completed
684 684 }
685 685
686 686 result['result_buffers'] = msg['buffers']
687 687 try:
688 688 self.db.update_record(msg_id, result)
689 689 except Exception:
690 690 self.log.error("DB Error updating record %r", msg_id, exc_info=True)
691 691
692 692
693 693 #--------------------- Task Queue Traffic ------------------------------
694 694
695 695 def save_task_request(self, idents, msg):
696 696 """Save the submission of a task."""
697 697 client_id = idents[0]
698 698
699 699 try:
700 700 msg = self.session.unserialize(msg)
701 701 except Exception:
702 702 self.log.error("task::client %r sent invalid task message: %r",
703 703 client_id, msg, exc_info=True)
704 704 return
705 705 record = init_record(msg)
706 706
707 707 record['client_uuid'] = msg['header']['session']
708 708 record['queue'] = 'task'
709 709 header = msg['header']
710 710 msg_id = header['msg_id']
711 711 self.pending.add(msg_id)
712 712 self.unassigned.add(msg_id)
713 713 try:
714 714 # it's posible iopub arrived first:
715 715 existing = self.db.get_record(msg_id)
716 716 if existing['resubmitted']:
717 717 for key in ('submitted', 'client_uuid', 'buffers'):
718 718 # don't clobber these keys on resubmit
719 719 # submitted and client_uuid should be different
720 720 # and buffers might be big, and shouldn't have changed
721 721 record.pop(key)
722 722 # still check content,header which should not change
723 723 # but are not expensive to compare as buffers
724 724
725 725 for key,evalue in iteritems(existing):
726 726 if key.endswith('buffers'):
727 727 # don't compare buffers
728 728 continue
729 729 rvalue = record.get(key, None)
730 730 if evalue and rvalue and evalue != rvalue:
731 731 self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue)
732 732 elif evalue and not rvalue:
733 733 record[key] = evalue
734 734 try:
735 735 self.db.update_record(msg_id, record)
736 736 except Exception:
737 737 self.log.error("DB Error updating record %r", msg_id, exc_info=True)
738 738 except KeyError:
739 739 try:
740 740 self.db.add_record(msg_id, record)
741 741 except Exception:
742 742 self.log.error("DB Error adding record %r", msg_id, exc_info=True)
743 743 except Exception:
744 744 self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
745 745
746 746 def save_task_result(self, idents, msg):
747 747 """save the result of a completed task."""
748 748 client_id = idents[0]
749 749 try:
750 750 msg = self.session.unserialize(msg)
751 751 except Exception:
752 752 self.log.error("task::invalid task result message send to %r: %r",
753 753 client_id, msg, exc_info=True)
754 754 return
755 755
756 756 parent = msg['parent_header']
757 757 if not parent:
758 758 # print msg
759 759 self.log.warn("Task %r had no parent!", msg)
760 760 return
761 761 msg_id = parent['msg_id']
762 762 if msg_id in self.unassigned:
763 763 self.unassigned.remove(msg_id)
764 764
765 765 header = msg['header']
766 766 md = msg['metadata']
767 767 engine_uuid = md.get('engine', u'')
768 768 eid = self.by_ident.get(cast_bytes(engine_uuid), None)
769 769
770 770 status = md.get('status', None)
771 771
772 772 if msg_id in self.pending:
773 773 self.log.info("task::task %r finished on %s", msg_id, eid)
774 774 self.pending.remove(msg_id)
775 775 self.all_completed.add(msg_id)
776 776 if eid is not None:
777 777 if status != 'aborted':
778 778 self.completed[eid].append(msg_id)
779 779 if msg_id in self.tasks[eid]:
780 780 self.tasks[eid].remove(msg_id)
781 781 completed = header['date']
782 782 started = extract_dates(md.get('started', None))
783 783 result = {
784 784 'result_header' : header,
785 785 'result_metadata': msg['metadata'],
786 786 'result_content': msg['content'],
787 787 'started' : started,
788 788 'completed' : completed,
789 789 'received' : datetime.now(),
790 790 'engine_uuid': engine_uuid,
791 791 }
792 792
793 793 result['result_buffers'] = msg['buffers']
794 794 try:
795 795 self.db.update_record(msg_id, result)
796 796 except Exception:
797 797 self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
798 798
799 799 else:
800 800 self.log.debug("task::unknown task %r finished", msg_id)
801 801
802 802 def save_task_destination(self, idents, msg):
803 803 try:
804 804 msg = self.session.unserialize(msg, content=True)
805 805 except Exception:
806 806 self.log.error("task::invalid task tracking message", exc_info=True)
807 807 return
808 808 content = msg['content']
809 809 # print (content)
810 810 msg_id = content['msg_id']
811 811 engine_uuid = content['engine_id']
812 812 eid = self.by_ident[cast_bytes(engine_uuid)]
813 813
814 814 self.log.info("task::task %r arrived on %r", msg_id, eid)
815 815 if msg_id in self.unassigned:
816 816 self.unassigned.remove(msg_id)
817 817 # else:
818 818 # self.log.debug("task::task %r not listed as MIA?!"%(msg_id))
819 819
820 820 self.tasks[eid].append(msg_id)
821 821 # self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
822 822 try:
823 823 self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
824 824 except Exception:
825 825 self.log.error("DB Error saving task destination %r", msg_id, exc_info=True)
826 826
827 827
828 828 def mia_task_request(self, idents, msg):
829 829 raise NotImplementedError
830 830 client_id = idents[0]
831 831 # content = dict(mia=self.mia,status='ok')
832 832 # self.session.send('mia_reply', content=content, idents=client_id)
833 833
834 834
835 835 #--------------------- IOPub Traffic ------------------------------
836 836
837 837 def save_iopub_message(self, topics, msg):
838 838 """save an iopub message into the db"""
839 839 # print (topics)
840 840 try:
841 841 msg = self.session.unserialize(msg, content=True)
842 842 except Exception:
843 843 self.log.error("iopub::invalid IOPub message", exc_info=True)
844 844 return
845 845
846 846 parent = msg['parent_header']
847 847 if not parent:
848 848 self.log.warn("iopub::IOPub message lacks parent: %r", msg)
849 849 return
850 850 msg_id = parent['msg_id']
851 851 msg_type = msg['header']['msg_type']
852 852 content = msg['content']
853 853
854 854 # ensure msg_id is in db
855 855 try:
856 856 rec = self.db.get_record(msg_id)
857 857 except KeyError:
858 858 rec = empty_record()
859 859 rec['msg_id'] = msg_id
860 860 self.db.add_record(msg_id, rec)
861 861 # stream
862 862 d = {}
863 863 if msg_type == 'stream':
864 864 name = content['name']
865 865 s = rec[name] or ''
866 866 d[name] = s + content['data']
867 867
868 elif msg_type == 'pyerr':
869 d['pyerr'] = content
868 elif msg_type == 'error':
869 d['error'] = content
870 870 elif msg_type == 'execute_input':
871 871 d['execute_input'] = content['code']
872 872 elif msg_type in ('display_data', 'execute_result'):
873 873 d[msg_type] = content
874 874 elif msg_type == 'status':
875 875 pass
876 876 elif msg_type == 'data_pub':
877 877 self.log.info("ignored data_pub message for %s" % msg_id)
878 878 else:
879 879 self.log.warn("unhandled iopub msg_type: %r", msg_type)
880 880
881 881 if not d:
882 882 return
883 883
884 884 try:
885 885 self.db.update_record(msg_id, d)
886 886 except Exception:
887 887 self.log.error("DB Error saving iopub message %r", msg_id, exc_info=True)
888 888
889 889
890 890
891 891 #-------------------------------------------------------------------------
892 892 # Registration requests
893 893 #-------------------------------------------------------------------------
894 894
895 895 def connection_request(self, client_id, msg):
896 896 """Reply with connection addresses for clients."""
897 897 self.log.info("client::client %r connected", client_id)
898 898 content = dict(status='ok')
899 899 jsonable = {}
900 900 for k,v in iteritems(self.keytable):
901 901 if v not in self.dead_engines:
902 902 jsonable[str(k)] = v
903 903 content['engines'] = jsonable
904 904 self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id)
905 905
906 906 def register_engine(self, reg, msg):
907 907 """Register a new engine."""
908 908 content = msg['content']
909 909 try:
910 910 uuid = content['uuid']
911 911 except KeyError:
912 912 self.log.error("registration::queue not specified", exc_info=True)
913 913 return
914 914
915 915 eid = self._next_id
916 916
917 917 self.log.debug("registration::register_engine(%i, %r)", eid, uuid)
918 918
919 919 content = dict(id=eid,status='ok',hb_period=self.heartmonitor.period)
920 920 # check if requesting available IDs:
921 921 if cast_bytes(uuid) in self.by_ident:
922 922 try:
923 923 raise KeyError("uuid %r in use" % uuid)
924 924 except:
925 925 content = error.wrap_exception()
926 926 self.log.error("uuid %r in use", uuid, exc_info=True)
927 927 else:
928 928 for h, ec in iteritems(self.incoming_registrations):
929 929 if uuid == h:
930 930 try:
931 931 raise KeyError("heart_id %r in use" % uuid)
932 932 except:
933 933 self.log.error("heart_id %r in use", uuid, exc_info=True)
934 934 content = error.wrap_exception()
935 935 break
936 936 elif uuid == ec.uuid:
937 937 try:
938 938 raise KeyError("uuid %r in use" % uuid)
939 939 except:
940 940 self.log.error("uuid %r in use", uuid, exc_info=True)
941 941 content = error.wrap_exception()
942 942 break
943 943
944 944 msg = self.session.send(self.query, "registration_reply",
945 945 content=content,
946 946 ident=reg)
947 947
948 948 heart = cast_bytes(uuid)
949 949
950 950 if content['status'] == 'ok':
951 951 if heart in self.heartmonitor.hearts:
952 952 # already beating
953 953 self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid)
954 954 self.finish_registration(heart)
955 955 else:
956 956 purge = lambda : self._purge_stalled_registration(heart)
957 957 dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
958 958 dc.start()
959 959 self.incoming_registrations[heart] = EngineConnector(id=eid,uuid=uuid,stallback=dc)
960 960 else:
961 961 self.log.error("registration::registration %i failed: %r", eid, content['evalue'])
962 962
963 963 return eid
964 964
965 965 def unregister_engine(self, ident, msg):
966 966 """Unregister an engine that explicitly requested to leave."""
967 967 try:
968 968 eid = msg['content']['id']
969 969 except:
970 970 self.log.error("registration::bad engine id for unregistration: %r", ident, exc_info=True)
971 971 return
972 972 self.log.info("registration::unregister_engine(%r)", eid)
973 973 # print (eid)
974 974 uuid = self.keytable[eid]
975 975 content=dict(id=eid, uuid=uuid)
976 976 self.dead_engines.add(uuid)
977 977 # self.ids.remove(eid)
978 978 # uuid = self.keytable.pop(eid)
979 979 #
980 980 # ec = self.engines.pop(eid)
981 981 # self.hearts.pop(ec.heartbeat)
982 982 # self.by_ident.pop(ec.queue)
983 983 # self.completed.pop(eid)
984 984 handleit = lambda : self._handle_stranded_msgs(eid, uuid)
985 985 dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop)
986 986 dc.start()
987 987 ############## TODO: HANDLE IT ################
988 988
989 989 self._save_engine_state()
990 990
991 991 if self.notifier:
992 992 self.session.send(self.notifier, "unregistration_notification", content=content)
993 993
994 994 def _handle_stranded_msgs(self, eid, uuid):
995 995 """Handle messages known to be on an engine when the engine unregisters.
996 996
997 997 It is possible that this will fire prematurely - that is, an engine will
998 998 go down after completing a result, and the client will be notified
999 999 that the result failed and later receive the actual result.
1000 1000 """
1001 1001
1002 1002 outstanding = self.queues[eid]
1003 1003
1004 1004 for msg_id in outstanding:
1005 1005 self.pending.remove(msg_id)
1006 1006 self.all_completed.add(msg_id)
1007 1007 try:
1008 1008 raise error.EngineError("Engine %r died while running task %r" % (eid, msg_id))
1009 1009 except:
1010 1010 content = error.wrap_exception()
1011 1011 # build a fake header:
1012 1012 header = {}
1013 1013 header['engine'] = uuid
1014 1014 header['date'] = datetime.now()
1015 1015 rec = dict(result_content=content, result_header=header, result_buffers=[])
1016 1016 rec['completed'] = header['date']
1017 1017 rec['engine_uuid'] = uuid
1018 1018 try:
1019 1019 self.db.update_record(msg_id, rec)
1020 1020 except Exception:
1021 1021 self.log.error("DB Error handling stranded msg %r", msg_id, exc_info=True)
1022 1022
1023 1023
1024 1024 def finish_registration(self, heart):
1025 1025 """Second half of engine registration, called after our HeartMonitor
1026 1026 has received a beat from the Engine's Heart."""
1027 1027 try:
1028 1028 ec = self.incoming_registrations.pop(heart)
1029 1029 except KeyError:
1030 1030 self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
1031 1031 return
1032 1032 self.log.info("registration::finished registering engine %i:%s", ec.id, ec.uuid)
1033 1033 if ec.stallback is not None:
1034 1034 ec.stallback.stop()
1035 1035 eid = ec.id
1036 1036 self.ids.add(eid)
1037 1037 self.keytable[eid] = ec.uuid
1038 1038 self.engines[eid] = ec
1039 1039 self.by_ident[cast_bytes(ec.uuid)] = ec.id
1040 1040 self.queues[eid] = list()
1041 1041 self.tasks[eid] = list()
1042 1042 self.completed[eid] = list()
1043 1043 self.hearts[heart] = eid
1044 1044 content = dict(id=eid, uuid=self.engines[eid].uuid)
1045 1045 if self.notifier:
1046 1046 self.session.send(self.notifier, "registration_notification", content=content)
1047 1047 self.log.info("engine::Engine Connected: %i", eid)
1048 1048
1049 1049 self._save_engine_state()
1050 1050
1051 1051 def _purge_stalled_registration(self, heart):
1052 1052 if heart in self.incoming_registrations:
1053 1053 ec = self.incoming_registrations.pop(heart)
1054 1054 self.log.info("registration::purging stalled registration: %i", ec.id)
1055 1055 else:
1056 1056 pass
1057 1057
1058 1058 #-------------------------------------------------------------------------
1059 1059 # Engine State
1060 1060 #-------------------------------------------------------------------------
1061 1061
1062 1062
1063 1063 def _cleanup_engine_state_file(self):
1064 1064 """cleanup engine state mapping"""
1065 1065
1066 1066 if os.path.exists(self.engine_state_file):
1067 1067 self.log.debug("cleaning up engine state: %s", self.engine_state_file)
1068 1068 try:
1069 1069 os.remove(self.engine_state_file)
1070 1070 except IOError:
1071 1071 self.log.error("Couldn't cleanup file: %s", self.engine_state_file, exc_info=True)
1072 1072
1073 1073
1074 1074 def _save_engine_state(self):
1075 1075 """save engine mapping to JSON file"""
1076 1076 if not self.engine_state_file:
1077 1077 return
1078 1078 self.log.debug("save engine state to %s" % self.engine_state_file)
1079 1079 state = {}
1080 1080 engines = {}
1081 1081 for eid, ec in iteritems(self.engines):
1082 1082 if ec.uuid not in self.dead_engines:
1083 1083 engines[eid] = ec.uuid
1084 1084
1085 1085 state['engines'] = engines
1086 1086
1087 1087 state['next_id'] = self._idcounter
1088 1088
1089 1089 with open(self.engine_state_file, 'w') as f:
1090 1090 json.dump(state, f)
1091 1091
1092 1092
1093 1093 def _load_engine_state(self):
1094 1094 """load engine mapping from JSON file"""
1095 1095 if not os.path.exists(self.engine_state_file):
1096 1096 return
1097 1097
1098 1098 self.log.info("loading engine state from %s" % self.engine_state_file)
1099 1099
1100 1100 with open(self.engine_state_file) as f:
1101 1101 state = json.load(f)
1102 1102
1103 1103 save_notifier = self.notifier
1104 1104 self.notifier = None
1105 1105 for eid, uuid in iteritems(state['engines']):
1106 1106 heart = uuid.encode('ascii')
1107 1107 # start with this heart as current and beating:
1108 1108 self.heartmonitor.responses.add(heart)
1109 1109 self.heartmonitor.hearts.add(heart)
1110 1110
1111 1111 self.incoming_registrations[heart] = EngineConnector(id=int(eid), uuid=uuid)
1112 1112 self.finish_registration(heart)
1113 1113
1114 1114 self.notifier = save_notifier
1115 1115
1116 1116 self._idcounter = state['next_id']
1117 1117
1118 1118 #-------------------------------------------------------------------------
1119 1119 # Client Requests
1120 1120 #-------------------------------------------------------------------------
1121 1121
1122 1122 def shutdown_request(self, client_id, msg):
1123 1123 """handle shutdown request."""
1124 1124 self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
1125 1125 # also notify other clients of shutdown
1126 1126 self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'})
1127 1127 dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
1128 1128 dc.start()
1129 1129
1130 1130 def _shutdown(self):
1131 1131 self.log.info("hub::hub shutting down.")
1132 1132 time.sleep(0.1)
1133 1133 sys.exit(0)
1134 1134
1135 1135
1136 1136 def check_load(self, client_id, msg):
1137 1137 content = msg['content']
1138 1138 try:
1139 1139 targets = content['targets']
1140 1140 targets = self._validate_targets(targets)
1141 1141 except:
1142 1142 content = error.wrap_exception()
1143 1143 self.session.send(self.query, "hub_error",
1144 1144 content=content, ident=client_id)
1145 1145 return
1146 1146
1147 1147 content = dict(status='ok')
1148 1148 # loads = {}
1149 1149 for t in targets:
1150 1150 content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
1151 1151 self.session.send(self.query, "load_reply", content=content, ident=client_id)
1152 1152
1153 1153
1154 1154 def queue_status(self, client_id, msg):
1155 1155 """Return the Queue status of one or more targets.
1156 1156
1157 1157 If verbose, return the msg_ids, else return len of each type.
1158 1158
1159 1159 Keys:
1160 1160
1161 1161 * queue (pending MUX jobs)
1162 1162 * tasks (pending Task jobs)
1163 1163 * completed (finished jobs from both queues)
1164 1164 """
1165 1165 content = msg['content']
1166 1166 targets = content['targets']
1167 1167 try:
1168 1168 targets = self._validate_targets(targets)
1169 1169 except:
1170 1170 content = error.wrap_exception()
1171 1171 self.session.send(self.query, "hub_error",
1172 1172 content=content, ident=client_id)
1173 1173 return
1174 1174 verbose = content.get('verbose', False)
1175 1175 content = dict(status='ok')
1176 1176 for t in targets:
1177 1177 queue = self.queues[t]
1178 1178 completed = self.completed[t]
1179 1179 tasks = self.tasks[t]
1180 1180 if not verbose:
1181 1181 queue = len(queue)
1182 1182 completed = len(completed)
1183 1183 tasks = len(tasks)
1184 1184 content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
1185 1185 content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned)
1186 1186 # print (content)
1187 1187 self.session.send(self.query, "queue_reply", content=content, ident=client_id)
1188 1188
1189 1189 def purge_results(self, client_id, msg):
1190 1190 """Purge results from memory. This method is more valuable before we move
1191 1191 to a DB based message storage mechanism."""
1192 1192 content = msg['content']
1193 1193 self.log.info("Dropping records with %s", content)
1194 1194 msg_ids = content.get('msg_ids', [])
1195 1195 reply = dict(status='ok')
1196 1196 if msg_ids == 'all':
1197 1197 try:
1198 1198 self.db.drop_matching_records(dict(completed={'$ne':None}))
1199 1199 except Exception:
1200 1200 reply = error.wrap_exception()
1201 1201 self.log.exception("Error dropping records")
1202 1202 else:
1203 1203 pending = [m for m in msg_ids if (m in self.pending)]
1204 1204 if pending:
1205 1205 try:
1206 1206 raise IndexError("msg pending: %r" % pending[0])
1207 1207 except:
1208 1208 reply = error.wrap_exception()
1209 1209 self.log.exception("Error dropping records")
1210 1210 else:
1211 1211 try:
1212 1212 self.db.drop_matching_records(dict(msg_id={'$in':msg_ids}))
1213 1213 except Exception:
1214 1214 reply = error.wrap_exception()
1215 1215 self.log.exception("Error dropping records")
1216 1216
1217 1217 if reply['status'] == 'ok':
1218 1218 eids = content.get('engine_ids', [])
1219 1219 for eid in eids:
1220 1220 if eid not in self.engines:
1221 1221 try:
1222 1222 raise IndexError("No such engine: %i" % eid)
1223 1223 except:
1224 1224 reply = error.wrap_exception()
1225 1225 self.log.exception("Error dropping records")
1226 1226 break
1227 1227 uid = self.engines[eid].uuid
1228 1228 try:
1229 1229 self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
1230 1230 except Exception:
1231 1231 reply = error.wrap_exception()
1232 1232 self.log.exception("Error dropping records")
1233 1233 break
1234 1234
1235 1235 self.session.send(self.query, 'purge_reply', content=reply, ident=client_id)
1236 1236
1237 1237 def resubmit_task(self, client_id, msg):
1238 1238 """Resubmit one or more tasks."""
1239 1239 def finish(reply):
1240 1240 self.session.send(self.query, 'resubmit_reply', content=reply, ident=client_id)
1241 1241
1242 1242 content = msg['content']
1243 1243 msg_ids = content['msg_ids']
1244 1244 reply = dict(status='ok')
1245 1245 try:
1246 1246 records = self.db.find_records({'msg_id' : {'$in' : msg_ids}}, keys=[
1247 1247 'header', 'content', 'buffers'])
1248 1248 except Exception:
1249 1249 self.log.error('db::db error finding tasks to resubmit', exc_info=True)
1250 1250 return finish(error.wrap_exception())
1251 1251
1252 1252 # validate msg_ids
1253 1253 found_ids = [ rec['msg_id'] for rec in records ]
1254 1254 pending_ids = [ msg_id for msg_id in found_ids if msg_id in self.pending ]
1255 1255 if len(records) > len(msg_ids):
1256 1256 try:
1257 1257 raise RuntimeError("DB appears to be in an inconsistent state."
1258 1258 "More matching records were found than should exist")
1259 1259 except Exception:
1260 1260 self.log.exception("Failed to resubmit task")
1261 1261 return finish(error.wrap_exception())
1262 1262 elif len(records) < len(msg_ids):
1263 1263 missing = [ m for m in msg_ids if m not in found_ids ]
1264 1264 try:
1265 1265 raise KeyError("No such msg(s): %r" % missing)
1266 1266 except KeyError:
1267 1267 self.log.exception("Failed to resubmit task")
1268 1268 return finish(error.wrap_exception())
1269 1269 elif pending_ids:
1270 1270 pass
1271 1271 # no need to raise on resubmit of pending task, now that we
1272 1272 # resubmit under new ID, but do we want to raise anyway?
1273 1273 # msg_id = invalid_ids[0]
1274 1274 # try:
1275 1275 # raise ValueError("Task(s) %r appears to be inflight" % )
1276 1276 # except Exception:
1277 1277 # return finish(error.wrap_exception())
1278 1278
1279 1279 # mapping of original IDs to resubmitted IDs
1280 1280 resubmitted = {}
1281 1281
1282 1282 # send the messages
1283 1283 for rec in records:
1284 1284 header = rec['header']
1285 1285 msg = self.session.msg(header['msg_type'], parent=header)
1286 1286 msg_id = msg['msg_id']
1287 1287 msg['content'] = rec['content']
1288 1288
1289 1289 # use the old header, but update msg_id and timestamp
1290 1290 fresh = msg['header']
1291 1291 header['msg_id'] = fresh['msg_id']
1292 1292 header['date'] = fresh['date']
1293 1293 msg['header'] = header
1294 1294
1295 1295 self.session.send(self.resubmit, msg, buffers=rec['buffers'])
1296 1296
1297 1297 resubmitted[rec['msg_id']] = msg_id
1298 1298 self.pending.add(msg_id)
1299 1299 msg['buffers'] = rec['buffers']
1300 1300 try:
1301 1301 self.db.add_record(msg_id, init_record(msg))
1302 1302 except Exception:
1303 1303 self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True)
1304 1304 return finish(error.wrap_exception())
1305 1305
1306 1306 finish(dict(status='ok', resubmitted=resubmitted))
1307 1307
1308 1308 # store the new IDs in the Task DB
1309 1309 for msg_id, resubmit_id in iteritems(resubmitted):
1310 1310 try:
1311 1311 self.db.update_record(msg_id, {'resubmitted' : resubmit_id})
1312 1312 except Exception:
1313 1313 self.log.error("db::DB Error updating record: %s", msg_id, exc_info=True)
1314 1314
1315 1315
1316 1316 def _extract_record(self, rec):
1317 1317 """decompose a TaskRecord dict into subsection of reply for get_result"""
1318 1318 io_dict = {}
1319 for key in ('execute_input', 'execute_result', 'pyerr', 'stdout', 'stderr'):
1319 for key in ('execute_input', 'execute_result', 'error', 'stdout', 'stderr'):
1320 1320 io_dict[key] = rec[key]
1321 1321 content = {
1322 1322 'header': rec['header'],
1323 1323 'metadata': rec['metadata'],
1324 1324 'result_metadata': rec['result_metadata'],
1325 1325 'result_header' : rec['result_header'],
1326 1326 'result_content': rec['result_content'],
1327 1327 'received' : rec['received'],
1328 1328 'io' : io_dict,
1329 1329 }
1330 1330 if rec['result_buffers']:
1331 1331 buffers = list(map(bytes, rec['result_buffers']))
1332 1332 else:
1333 1333 buffers = []
1334 1334
1335 1335 return content, buffers
1336 1336
1337 1337 def get_results(self, client_id, msg):
1338 1338 """Get the result of 1 or more messages."""
1339 1339 content = msg['content']
1340 1340 msg_ids = sorted(set(content['msg_ids']))
1341 1341 statusonly = content.get('status_only', False)
1342 1342 pending = []
1343 1343 completed = []
1344 1344 content = dict(status='ok')
1345 1345 content['pending'] = pending
1346 1346 content['completed'] = completed
1347 1347 buffers = []
1348 1348 if not statusonly:
1349 1349 try:
1350 1350 matches = self.db.find_records(dict(msg_id={'$in':msg_ids}))
1351 1351 # turn match list into dict, for faster lookup
1352 1352 records = {}
1353 1353 for rec in matches:
1354 1354 records[rec['msg_id']] = rec
1355 1355 except Exception:
1356 1356 content = error.wrap_exception()
1357 1357 self.log.exception("Failed to get results")
1358 1358 self.session.send(self.query, "result_reply", content=content,
1359 1359 parent=msg, ident=client_id)
1360 1360 return
1361 1361 else:
1362 1362 records = {}
1363 1363 for msg_id in msg_ids:
1364 1364 if msg_id in self.pending:
1365 1365 pending.append(msg_id)
1366 1366 elif msg_id in self.all_completed:
1367 1367 completed.append(msg_id)
1368 1368 if not statusonly:
1369 1369 c,bufs = self._extract_record(records[msg_id])
1370 1370 content[msg_id] = c
1371 1371 buffers.extend(bufs)
1372 1372 elif msg_id in records:
1373 1373 if rec['completed']:
1374 1374 completed.append(msg_id)
1375 1375 c,bufs = self._extract_record(records[msg_id])
1376 1376 content[msg_id] = c
1377 1377 buffers.extend(bufs)
1378 1378 else:
1379 1379 pending.append(msg_id)
1380 1380 else:
1381 1381 try:
1382 1382 raise KeyError('No such message: '+msg_id)
1383 1383 except:
1384 1384 content = error.wrap_exception()
1385 1385 break
1386 1386 self.session.send(self.query, "result_reply", content=content,
1387 1387 parent=msg, ident=client_id,
1388 1388 buffers=buffers)
1389 1389
1390 1390 def get_history(self, client_id, msg):
1391 1391 """Get a list of all msg_ids in our DB records"""
1392 1392 try:
1393 1393 msg_ids = self.db.get_history()
1394 1394 except Exception as e:
1395 1395 content = error.wrap_exception()
1396 1396 self.log.exception("Failed to get history")
1397 1397 else:
1398 1398 content = dict(status='ok', history=msg_ids)
1399 1399
1400 1400 self.session.send(self.query, "history_reply", content=content,
1401 1401 parent=msg, ident=client_id)
1402 1402
1403 1403 def db_query(self, client_id, msg):
1404 1404 """Perform a raw query on the task record database."""
1405 1405 content = msg['content']
1406 1406 query = extract_dates(content.get('query', {}))
1407 1407 keys = content.get('keys', None)
1408 1408 buffers = []
1409 1409 empty = list()
1410 1410 try:
1411 1411 records = self.db.find_records(query, keys)
1412 1412 except Exception as e:
1413 1413 content = error.wrap_exception()
1414 1414 self.log.exception("DB query failed")
1415 1415 else:
1416 1416 # extract buffers from reply content:
1417 1417 if keys is not None:
1418 1418 buffer_lens = [] if 'buffers' in keys else None
1419 1419 result_buffer_lens = [] if 'result_buffers' in keys else None
1420 1420 else:
1421 1421 buffer_lens = None
1422 1422 result_buffer_lens = None
1423 1423
1424 1424 for rec in records:
1425 1425 # buffers may be None, so double check
1426 1426 b = rec.pop('buffers', empty) or empty
1427 1427 if buffer_lens is not None:
1428 1428 buffer_lens.append(len(b))
1429 1429 buffers.extend(b)
1430 1430 rb = rec.pop('result_buffers', empty) or empty
1431 1431 if result_buffer_lens is not None:
1432 1432 result_buffer_lens.append(len(rb))
1433 1433 buffers.extend(rb)
1434 1434 content = dict(status='ok', records=records, buffer_lens=buffer_lens,
1435 1435 result_buffer_lens=result_buffer_lens)
1436 1436 # self.log.debug (content)
1437 1437 self.session.send(self.query, "db_reply", content=content,
1438 1438 parent=msg, ident=client_id,
1439 1439 buffers=buffers)
1440 1440
@@ -1,414 +1,414 b''
1 1 """A TaskRecord backend using sqlite3"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 import json
7 7 import os
8 8 try:
9 9 import cPickle as pickle
10 10 except ImportError:
11 11 import pickle
12 12 from datetime import datetime
13 13
14 14 try:
15 15 import sqlite3
16 16 except ImportError:
17 17 sqlite3 = None
18 18
19 19 from zmq.eventloop import ioloop
20 20
21 21 from IPython.utils.traitlets import Unicode, Instance, List, Dict
22 22 from .dictdb import BaseDB
23 23 from IPython.utils.jsonutil import date_default, extract_dates, squash_dates
24 24 from IPython.utils.py3compat import iteritems
25 25
26 26 #-----------------------------------------------------------------------------
27 27 # SQLite operators, adapters, and converters
28 28 #-----------------------------------------------------------------------------
29 29
30 30 try:
31 31 buffer
32 32 except NameError:
33 33 # py3k
34 34 buffer = memoryview
35 35
36 36 operators = {
37 37 '$lt' : "<",
38 38 '$gt' : ">",
39 39 # null is handled weird with ==,!=
40 40 '$eq' : "=",
41 41 '$ne' : "!=",
42 42 '$lte': "<=",
43 43 '$gte': ">=",
44 44 '$in' : ('=', ' OR '),
45 45 '$nin': ('!=', ' AND '),
46 46 # '$all': None,
47 47 # '$mod': None,
48 48 # '$exists' : None
49 49 }
50 50 null_operators = {
51 51 '=' : "IS NULL",
52 52 '!=' : "IS NOT NULL",
53 53 }
54 54
55 55 def _adapt_dict(d):
56 56 return json.dumps(d, default=date_default)
57 57
58 58 def _convert_dict(ds):
59 59 if ds is None:
60 60 return ds
61 61 else:
62 62 if isinstance(ds, bytes):
63 63 # If I understand the sqlite doc correctly, this will always be utf8
64 64 ds = ds.decode('utf8')
65 65 return extract_dates(json.loads(ds))
66 66
67 67 def _adapt_bufs(bufs):
68 68 # this is *horrible*
69 69 # copy buffers into single list and pickle it:
70 70 if bufs and isinstance(bufs[0], (bytes, buffer)):
71 71 return sqlite3.Binary(pickle.dumps(list(map(bytes, bufs)),-1))
72 72 elif bufs:
73 73 return bufs
74 74 else:
75 75 return None
76 76
77 77 def _convert_bufs(bs):
78 78 if bs is None:
79 79 return []
80 80 else:
81 81 return pickle.loads(bytes(bs))
82 82
83 83 #-----------------------------------------------------------------------------
84 84 # SQLiteDB class
85 85 #-----------------------------------------------------------------------------
86 86
87 87 class SQLiteDB(BaseDB):
88 88 """SQLite3 TaskRecord backend."""
89 89
90 90 filename = Unicode('tasks.db', config=True,
91 91 help="""The filename of the sqlite task database. [default: 'tasks.db']""")
92 92 location = Unicode('', config=True,
93 93 help="""The directory containing the sqlite task database. The default
94 94 is to use the cluster_dir location.""")
95 95 table = Unicode("ipython-tasks", config=True,
96 96 help="""The SQLite Table to use for storing tasks for this session. If unspecified,
97 97 a new table will be created with the Hub's IDENT. Specifying the table will result
98 98 in tasks from previous sessions being available via Clients' db_query and
99 99 get_result methods.""")
100 100
101 101 if sqlite3 is not None:
102 102 _db = Instance('sqlite3.Connection')
103 103 else:
104 104 _db = None
105 105 # the ordered list of column names
106 106 _keys = List(['msg_id' ,
107 107 'header' ,
108 108 'metadata',
109 109 'content',
110 110 'buffers',
111 111 'submitted',
112 112 'client_uuid' ,
113 113 'engine_uuid' ,
114 114 'started',
115 115 'completed',
116 116 'resubmitted',
117 117 'received',
118 118 'result_header' ,
119 119 'result_metadata',
120 120 'result_content' ,
121 121 'result_buffers' ,
122 122 'queue' ,
123 123 'execute_input' ,
124 124 'execute_result',
125 'pyerr',
125 'error',
126 126 'stdout',
127 127 'stderr',
128 128 ])
129 129 # sqlite datatypes for checking that db is current format
130 130 _types = Dict({'msg_id' : 'text' ,
131 131 'header' : 'dict text',
132 132 'metadata' : 'dict text',
133 133 'content' : 'dict text',
134 134 'buffers' : 'bufs blob',
135 135 'submitted' : 'timestamp',
136 136 'client_uuid' : 'text',
137 137 'engine_uuid' : 'text',
138 138 'started' : 'timestamp',
139 139 'completed' : 'timestamp',
140 140 'resubmitted' : 'text',
141 141 'received' : 'timestamp',
142 142 'result_header' : 'dict text',
143 143 'result_metadata' : 'dict text',
144 144 'result_content' : 'dict text',
145 145 'result_buffers' : 'bufs blob',
146 146 'queue' : 'text',
147 147 'execute_input' : 'text',
148 148 'execute_result' : 'text',
149 'pyerr' : 'text',
149 'error' : 'text',
150 150 'stdout' : 'text',
151 151 'stderr' : 'text',
152 152 })
153 153
154 154 def __init__(self, **kwargs):
155 155 super(SQLiteDB, self).__init__(**kwargs)
156 156 if sqlite3 is None:
157 157 raise ImportError("SQLiteDB requires sqlite3")
158 158 if not self.table:
159 159 # use session, and prefix _, since starting with # is illegal
160 160 self.table = '_'+self.session.replace('-','_')
161 161 if not self.location:
162 162 # get current profile
163 163 from IPython.core.application import BaseIPythonApplication
164 164 if BaseIPythonApplication.initialized():
165 165 app = BaseIPythonApplication.instance()
166 166 if app.profile_dir is not None:
167 167 self.location = app.profile_dir.location
168 168 else:
169 169 self.location = u'.'
170 170 else:
171 171 self.location = u'.'
172 172 self._init_db()
173 173
174 174 # register db commit as 2s periodic callback
175 175 # to prevent clogging pipes
176 176 # assumes we are being run in a zmq ioloop app
177 177 loop = ioloop.IOLoop.instance()
178 178 pc = ioloop.PeriodicCallback(self._db.commit, 2000, loop)
179 179 pc.start()
180 180
181 181 def _defaults(self, keys=None):
182 182 """create an empty record"""
183 183 d = {}
184 184 keys = self._keys if keys is None else keys
185 185 for key in keys:
186 186 d[key] = None
187 187 return d
188 188
189 189 def _check_table(self):
190 190 """Ensure that an incorrect table doesn't exist
191 191
192 192 If a bad (old) table does exist, return False
193 193 """
194 194 cursor = self._db.execute("PRAGMA table_info('%s')"%self.table)
195 195 lines = cursor.fetchall()
196 196 if not lines:
197 197 # table does not exist
198 198 return True
199 199 types = {}
200 200 keys = []
201 201 for line in lines:
202 202 keys.append(line[1])
203 203 types[line[1]] = line[2]
204 204 if self._keys != keys:
205 205 # key mismatch
206 206 self.log.warn('keys mismatch')
207 207 return False
208 208 for key in self._keys:
209 209 if types[key] != self._types[key]:
210 210 self.log.warn(
211 211 'type mismatch: %s: %s != %s'%(key,types[key],self._types[key])
212 212 )
213 213 return False
214 214 return True
215 215
216 216 def _init_db(self):
217 217 """Connect to the database and get new session number."""
218 218 # register adapters
219 219 sqlite3.register_adapter(dict, _adapt_dict)
220 220 sqlite3.register_converter('dict', _convert_dict)
221 221 sqlite3.register_adapter(list, _adapt_bufs)
222 222 sqlite3.register_converter('bufs', _convert_bufs)
223 223 # connect to the db
224 224 dbfile = os.path.join(self.location, self.filename)
225 225 self._db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES,
226 226 # isolation_level = None)#,
227 227 cached_statements=64)
228 228 # print dir(self._db)
229 229 first_table = previous_table = self.table
230 230 i=0
231 231 while not self._check_table():
232 232 i+=1
233 233 self.table = first_table+'_%i'%i
234 234 self.log.warn(
235 235 "Table %s exists and doesn't match db format, trying %s"%
236 236 (previous_table, self.table)
237 237 )
238 238 previous_table = self.table
239 239
240 240 self._db.execute("""CREATE TABLE IF NOT EXISTS '%s'
241 241 (msg_id text PRIMARY KEY,
242 242 header dict text,
243 243 metadata dict text,
244 244 content dict text,
245 245 buffers bufs blob,
246 246 submitted timestamp,
247 247 client_uuid text,
248 248 engine_uuid text,
249 249 started timestamp,
250 250 completed timestamp,
251 251 resubmitted text,
252 252 received timestamp,
253 253 result_header dict text,
254 254 result_metadata dict text,
255 255 result_content dict text,
256 256 result_buffers bufs blob,
257 257 queue text,
258 258 execute_input text,
259 259 execute_result text,
260 pyerr text,
260 error text,
261 261 stdout text,
262 262 stderr text)
263 263 """%self.table)
264 264 self._db.commit()
265 265
266 266 def _dict_to_list(self, d):
267 267 """turn a mongodb-style record dict into a list."""
268 268
269 269 return [ d[key] for key in self._keys ]
270 270
271 271 def _list_to_dict(self, line, keys=None):
272 272 """Inverse of dict_to_list"""
273 273 keys = self._keys if keys is None else keys
274 274 d = self._defaults(keys)
275 275 for key,value in zip(keys, line):
276 276 d[key] = value
277 277
278 278 return d
279 279
280 280 def _render_expression(self, check):
281 281 """Turn a mongodb-style search dict into an SQL query."""
282 282 expressions = []
283 283 args = []
284 284
285 285 skeys = set(check.keys())
286 286 skeys.difference_update(set(self._keys))
287 287 skeys.difference_update(set(['buffers', 'result_buffers']))
288 288 if skeys:
289 289 raise KeyError("Illegal testing key(s): %s"%skeys)
290 290
291 291 for name,sub_check in iteritems(check):
292 292 if isinstance(sub_check, dict):
293 293 for test,value in iteritems(sub_check):
294 294 try:
295 295 op = operators[test]
296 296 except KeyError:
297 297 raise KeyError("Unsupported operator: %r"%test)
298 298 if isinstance(op, tuple):
299 299 op, join = op
300 300
301 301 if value is None and op in null_operators:
302 302 expr = "%s %s" % (name, null_operators[op])
303 303 else:
304 304 expr = "%s %s ?"%(name, op)
305 305 if isinstance(value, (tuple,list)):
306 306 if op in null_operators and any([v is None for v in value]):
307 307 # equality tests don't work with NULL
308 308 raise ValueError("Cannot use %r test with NULL values on SQLite backend"%test)
309 309 expr = '( %s )'%( join.join([expr]*len(value)) )
310 310 args.extend(value)
311 311 else:
312 312 args.append(value)
313 313 expressions.append(expr)
314 314 else:
315 315 # it's an equality check
316 316 if sub_check is None:
317 317 expressions.append("%s IS NULL" % name)
318 318 else:
319 319 expressions.append("%s = ?"%name)
320 320 args.append(sub_check)
321 321
322 322 expr = " AND ".join(expressions)
323 323 return expr, args
324 324
325 325 def add_record(self, msg_id, rec):
326 326 """Add a new Task Record, by msg_id."""
327 327 d = self._defaults()
328 328 d.update(rec)
329 329 d['msg_id'] = msg_id
330 330 line = self._dict_to_list(d)
331 331 tups = '(%s)'%(','.join(['?']*len(line)))
332 332 self._db.execute("INSERT INTO '%s' VALUES %s"%(self.table, tups), line)
333 333 # self._db.commit()
334 334
335 335 def get_record(self, msg_id):
336 336 """Get a specific Task Record, by msg_id."""
337 337 cursor = self._db.execute("""SELECT * FROM '%s' WHERE msg_id==?"""%self.table, (msg_id,))
338 338 line = cursor.fetchone()
339 339 if line is None:
340 340 raise KeyError("No such msg: %r"%msg_id)
341 341 return self._list_to_dict(line)
342 342
343 343 def update_record(self, msg_id, rec):
344 344 """Update the data in an existing record."""
345 345 query = "UPDATE '%s' SET "%self.table
346 346 sets = []
347 347 keys = sorted(rec.keys())
348 348 values = []
349 349 for key in keys:
350 350 sets.append('%s = ?'%key)
351 351 values.append(rec[key])
352 352 query += ', '.join(sets)
353 353 query += ' WHERE msg_id == ?'
354 354 values.append(msg_id)
355 355 self._db.execute(query, values)
356 356 # self._db.commit()
357 357
358 358 def drop_record(self, msg_id):
359 359 """Remove a record from the DB."""
360 360 self._db.execute("""DELETE FROM '%s' WHERE msg_id==?"""%self.table, (msg_id,))
361 361 # self._db.commit()
362 362
363 363 def drop_matching_records(self, check):
364 364 """Remove a record from the DB."""
365 365 expr,args = self._render_expression(check)
366 366 query = "DELETE FROM '%s' WHERE %s"%(self.table, expr)
367 367 self._db.execute(query,args)
368 368 # self._db.commit()
369 369
370 370 def find_records(self, check, keys=None):
371 371 """Find records matching a query dict, optionally extracting subset of keys.
372 372
373 373 Returns list of matching records.
374 374
375 375 Parameters
376 376 ----------
377 377
378 378 check: dict
379 379 mongodb-style query argument
380 380 keys: list of strs [optional]
381 381 if specified, the subset of keys to extract. msg_id will *always* be
382 382 included.
383 383 """
384 384 if keys:
385 385 bad_keys = [ key for key in keys if key not in self._keys ]
386 386 if bad_keys:
387 387 raise KeyError("Bad record key(s): %s"%bad_keys)
388 388
389 389 if keys:
390 390 # ensure msg_id is present and first:
391 391 if 'msg_id' in keys:
392 392 keys.remove('msg_id')
393 393 keys.insert(0, 'msg_id')
394 394 req = ', '.join(keys)
395 395 else:
396 396 req = '*'
397 397 expr,args = self._render_expression(check)
398 398 query = """SELECT %s FROM '%s' WHERE %s"""%(req, self.table, expr)
399 399 cursor = self._db.execute(query, args)
400 400 matches = cursor.fetchall()
401 401 records = []
402 402 for line in matches:
403 403 rec = self._list_to_dict(line, keys)
404 404 records.append(rec)
405 405 return records
406 406
407 407 def get_history(self):
408 408 """get all msg_ids, ordered by time submitted."""
409 409 query = """SELECT msg_id FROM '%s' ORDER by submitted ASC"""%self.table
410 410 cursor = self._db.execute(query)
411 411 # will be a list of length 1 tuples
412 412 return [ tup[0] for tup in cursor.fetchall()]
413 413
414 414 __all__ = ['SQLiteDB'] No newline at end of file
@@ -1,215 +1,215 b''
1 1 """Defines a KernelManager that provides signals and slots."""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from IPython.external.qt import QtCore
7 7
8 8 from IPython.utils.traitlets import HasTraits, Type
9 9 from .util import MetaQObjectHasTraits, SuperQObject
10 10
11 11
12 12 class ChannelQObject(SuperQObject):
13 13
14 14 # Emitted when the channel is started.
15 15 started = QtCore.Signal()
16 16
17 17 # Emitted when the channel is stopped.
18 18 stopped = QtCore.Signal()
19 19
20 20 #---------------------------------------------------------------------------
21 21 # Channel interface
22 22 #---------------------------------------------------------------------------
23 23
24 24 def start(self):
25 25 """ Reimplemented to emit signal.
26 26 """
27 27 super(ChannelQObject, self).start()
28 28 self.started.emit()
29 29
30 30 def stop(self):
31 31 """ Reimplemented to emit signal.
32 32 """
33 33 super(ChannelQObject, self).stop()
34 34 self.stopped.emit()
35 35
36 36 #---------------------------------------------------------------------------
37 37 # InProcessChannel interface
38 38 #---------------------------------------------------------------------------
39 39
40 40 def call_handlers_later(self, *args, **kwds):
41 41 """ Call the message handlers later.
42 42 """
43 43 do_later = lambda: self.call_handlers(*args, **kwds)
44 44 QtCore.QTimer.singleShot(0, do_later)
45 45
46 46 def process_events(self):
47 47 """ Process any pending GUI events.
48 48 """
49 49 QtCore.QCoreApplication.instance().processEvents()
50 50
51 51
52 52 class QtShellChannelMixin(ChannelQObject):
53 53
54 54 # Emitted when any message is received.
55 55 message_received = QtCore.Signal(object)
56 56
57 57 # Emitted when a reply has been received for the corresponding request type.
58 58 execute_reply = QtCore.Signal(object)
59 59 complete_reply = QtCore.Signal(object)
60 60 object_info_reply = QtCore.Signal(object)
61 61 history_reply = QtCore.Signal(object)
62 62
63 63 #---------------------------------------------------------------------------
64 64 # 'ShellChannel' interface
65 65 #---------------------------------------------------------------------------
66 66
67 67 def call_handlers(self, msg):
68 68 """ Reimplemented to emit signals instead of making callbacks.
69 69 """
70 70 # Emit the generic signal.
71 71 self.message_received.emit(msg)
72 72
73 73 # Emit signals for specialized message types.
74 74 msg_type = msg['header']['msg_type']
75 75 signal = getattr(self, msg_type, None)
76 76 if signal:
77 77 signal.emit(msg)
78 78
79 79
80 80 class QtIOPubChannelMixin(ChannelQObject):
81 81
82 82 # Emitted when any message is received.
83 83 message_received = QtCore.Signal(object)
84 84
85 85 # Emitted when a message of type 'stream' is received.
86 86 stream_received = QtCore.Signal(object)
87 87
88 88 # Emitted when a message of type 'execute_input' is received.
89 89 execute_input_received = QtCore.Signal(object)
90 90
91 91 # Emitted when a message of type 'execute_result' is received.
92 92 execute_result_received = QtCore.Signal(object)
93 93
94 # Emitted when a message of type 'pyerr' is received.
95 pyerr_received = QtCore.Signal(object)
94 # Emitted when a message of type 'error' is received.
95 error_received = QtCore.Signal(object)
96 96
97 97 # Emitted when a message of type 'display_data' is received
98 98 display_data_received = QtCore.Signal(object)
99 99
100 100 # Emitted when a crash report message is received from the kernel's
101 101 # last-resort sys.excepthook.
102 102 crash_received = QtCore.Signal(object)
103 103
104 104 # Emitted when a shutdown is noticed.
105 105 shutdown_reply_received = QtCore.Signal(object)
106 106
107 107 #---------------------------------------------------------------------------
108 108 # 'IOPubChannel' interface
109 109 #---------------------------------------------------------------------------
110 110
111 111 def call_handlers(self, msg):
112 112 """ Reimplemented to emit signals instead of making callbacks.
113 113 """
114 114 # Emit the generic signal.
115 115 self.message_received.emit(msg)
116 116 # Emit signals for specialized message types.
117 117 msg_type = msg['header']['msg_type']
118 118 signal = getattr(self, msg_type + '_received', None)
119 119 if signal:
120 120 signal.emit(msg)
121 121 elif msg_type in ('stdout', 'stderr'):
122 122 self.stream_received.emit(msg)
123 123
124 124 def flush(self):
125 125 """ Reimplemented to ensure that signals are dispatched immediately.
126 126 """
127 127 super(QtIOPubChannelMixin, self).flush()
128 128 QtCore.QCoreApplication.instance().processEvents()
129 129
130 130
131 131 class QtStdInChannelMixin(ChannelQObject):
132 132
133 133 # Emitted when any message is received.
134 134 message_received = QtCore.Signal(object)
135 135
136 136 # Emitted when an input request is received.
137 137 input_requested = QtCore.Signal(object)
138 138
139 139 #---------------------------------------------------------------------------
140 140 # 'StdInChannel' interface
141 141 #---------------------------------------------------------------------------
142 142
143 143 def call_handlers(self, msg):
144 144 """ Reimplemented to emit signals instead of making callbacks.
145 145 """
146 146 # Emit the generic signal.
147 147 self.message_received.emit(msg)
148 148
149 149 # Emit signals for specialized message types.
150 150 msg_type = msg['header']['msg_type']
151 151 if msg_type == 'input_request':
152 152 self.input_requested.emit(msg)
153 153
154 154
155 155 class QtHBChannelMixin(ChannelQObject):
156 156
157 157 # Emitted when the kernel has died.
158 158 kernel_died = QtCore.Signal(object)
159 159
160 160 #---------------------------------------------------------------------------
161 161 # 'HBChannel' interface
162 162 #---------------------------------------------------------------------------
163 163
164 164 def call_handlers(self, since_last_heartbeat):
165 165 """ Reimplemented to emit signals instead of making callbacks.
166 166 """
167 167 # Emit the generic signal.
168 168 self.kernel_died.emit(since_last_heartbeat)
169 169
170 170
171 171 class QtKernelRestarterMixin(MetaQObjectHasTraits('NewBase', (HasTraits, SuperQObject), {})):
172 172
173 173 _timer = None
174 174
175 175
176 176 class QtKernelManagerMixin(MetaQObjectHasTraits('NewBase', (HasTraits, SuperQObject), {})):
177 177 """ A KernelClient that provides signals and slots.
178 178 """
179 179
180 180 kernel_restarted = QtCore.Signal()
181 181
182 182
183 183 class QtKernelClientMixin(MetaQObjectHasTraits('NewBase', (HasTraits, SuperQObject), {})):
184 184 """ A KernelClient that provides signals and slots.
185 185 """
186 186
187 187 # Emitted when the kernel client has started listening.
188 188 started_channels = QtCore.Signal()
189 189
190 190 # Emitted when the kernel client has stopped listening.
191 191 stopped_channels = QtCore.Signal()
192 192
193 193 # Use Qt-specific channel classes that emit signals.
194 194 iopub_channel_class = Type(QtIOPubChannelMixin)
195 195 shell_channel_class = Type(QtShellChannelMixin)
196 196 stdin_channel_class = Type(QtStdInChannelMixin)
197 197 hb_channel_class = Type(QtHBChannelMixin)
198 198
199 199 #---------------------------------------------------------------------------
200 200 # 'KernelClient' interface
201 201 #---------------------------------------------------------------------------
202 202
203 203 #------ Channel management -------------------------------------------------
204 204
205 205 def start_channels(self, *args, **kw):
206 206 """ Reimplemented to emit signal.
207 207 """
208 208 super(QtKernelClientMixin, self).start_channels(*args, **kw)
209 209 self.started_channels.emit()
210 210
211 211 def stop_channels(self):
212 212 """ Reimplemented to emit signal.
213 213 """
214 214 super(QtKernelClientMixin, self).stop_channels()
215 215 self.stopped_channels.emit()
General Comments 0
You need to be logged in to leave comments. Login now