##// END OF EJS Templates
Fixing code to assume msg_type and msg_id are top-level....
Brian E. Granger -
Show More
@@ -1,109 +1,109 b''
1 """ Defines a convenient mix-in class for implementing Qt frontends.
1 """ Defines a convenient mix-in class for implementing Qt frontends.
2 """
2 """
3
3
4 class BaseFrontendMixin(object):
4 class BaseFrontendMixin(object):
5 """ A mix-in class for implementing Qt frontends.
5 """ A mix-in class for implementing Qt frontends.
6
6
7 To handle messages of a particular type, frontends need only define an
7 To handle messages of a particular type, frontends need only define an
8 appropriate handler method. For example, to handle 'stream' messaged, define
8 appropriate handler method. For example, to handle 'stream' messaged, define
9 a '_handle_stream(msg)' method.
9 a '_handle_stream(msg)' method.
10 """
10 """
11
11
12 #---------------------------------------------------------------------------
12 #---------------------------------------------------------------------------
13 # 'BaseFrontendMixin' concrete interface
13 # 'BaseFrontendMixin' concrete interface
14 #---------------------------------------------------------------------------
14 #---------------------------------------------------------------------------
15
15
16 def _get_kernel_manager(self):
16 def _get_kernel_manager(self):
17 """ Returns the current kernel manager.
17 """ Returns the current kernel manager.
18 """
18 """
19 return self._kernel_manager
19 return self._kernel_manager
20
20
21 def _set_kernel_manager(self, kernel_manager):
21 def _set_kernel_manager(self, kernel_manager):
22 """ Disconnect from the current kernel manager (if any) and set a new
22 """ Disconnect from the current kernel manager (if any) and set a new
23 kernel manager.
23 kernel manager.
24 """
24 """
25 # Disconnect the old kernel manager, if necessary.
25 # Disconnect the old kernel manager, if necessary.
26 old_manager = self._kernel_manager
26 old_manager = self._kernel_manager
27 if old_manager is not None:
27 if old_manager is not None:
28 old_manager.started_channels.disconnect(self._started_channels)
28 old_manager.started_channels.disconnect(self._started_channels)
29 old_manager.stopped_channels.disconnect(self._stopped_channels)
29 old_manager.stopped_channels.disconnect(self._stopped_channels)
30
30
31 # Disconnect the old kernel manager's channels.
31 # Disconnect the old kernel manager's channels.
32 old_manager.sub_channel.message_received.disconnect(self._dispatch)
32 old_manager.sub_channel.message_received.disconnect(self._dispatch)
33 old_manager.shell_channel.message_received.disconnect(self._dispatch)
33 old_manager.shell_channel.message_received.disconnect(self._dispatch)
34 old_manager.stdin_channel.message_received.disconnect(self._dispatch)
34 old_manager.stdin_channel.message_received.disconnect(self._dispatch)
35 old_manager.hb_channel.kernel_died.disconnect(
35 old_manager.hb_channel.kernel_died.disconnect(
36 self._handle_kernel_died)
36 self._handle_kernel_died)
37
37
38 # Handle the case where the old kernel manager is still listening.
38 # Handle the case where the old kernel manager is still listening.
39 if old_manager.channels_running:
39 if old_manager.channels_running:
40 self._stopped_channels()
40 self._stopped_channels()
41
41
42 # Set the new kernel manager.
42 # Set the new kernel manager.
43 self._kernel_manager = kernel_manager
43 self._kernel_manager = kernel_manager
44 if kernel_manager is None:
44 if kernel_manager is None:
45 return
45 return
46
46
47 # Connect the new kernel manager.
47 # Connect the new kernel manager.
48 kernel_manager.started_channels.connect(self._started_channels)
48 kernel_manager.started_channels.connect(self._started_channels)
49 kernel_manager.stopped_channels.connect(self._stopped_channels)
49 kernel_manager.stopped_channels.connect(self._stopped_channels)
50
50
51 # Connect the new kernel manager's channels.
51 # Connect the new kernel manager's channels.
52 kernel_manager.sub_channel.message_received.connect(self._dispatch)
52 kernel_manager.sub_channel.message_received.connect(self._dispatch)
53 kernel_manager.shell_channel.message_received.connect(self._dispatch)
53 kernel_manager.shell_channel.message_received.connect(self._dispatch)
54 kernel_manager.stdin_channel.message_received.connect(self._dispatch)
54 kernel_manager.stdin_channel.message_received.connect(self._dispatch)
55 kernel_manager.hb_channel.kernel_died.connect(self._handle_kernel_died)
55 kernel_manager.hb_channel.kernel_died.connect(self._handle_kernel_died)
56
56
57 # Handle the case where the kernel manager started channels before
57 # Handle the case where the kernel manager started channels before
58 # we connected.
58 # we connected.
59 if kernel_manager.channels_running:
59 if kernel_manager.channels_running:
60 self._started_channels()
60 self._started_channels()
61
61
62 kernel_manager = property(_get_kernel_manager, _set_kernel_manager)
62 kernel_manager = property(_get_kernel_manager, _set_kernel_manager)
63
63
64 #---------------------------------------------------------------------------
64 #---------------------------------------------------------------------------
65 # 'BaseFrontendMixin' abstract interface
65 # 'BaseFrontendMixin' abstract interface
66 #---------------------------------------------------------------------------
66 #---------------------------------------------------------------------------
67
67
68 def _handle_kernel_died(self, since_last_heartbeat):
68 def _handle_kernel_died(self, since_last_heartbeat):
69 """ This is called when the ``kernel_died`` signal is emitted.
69 """ This is called when the ``kernel_died`` signal is emitted.
70
70
71 This method is called when the kernel heartbeat has not been
71 This method is called when the kernel heartbeat has not been
72 active for a certain amount of time. The typical action will be to
72 active for a certain amount of time. The typical action will be to
73 give the user the option of restarting the kernel.
73 give the user the option of restarting the kernel.
74
74
75 Parameters
75 Parameters
76 ----------
76 ----------
77 since_last_heartbeat : float
77 since_last_heartbeat : float
78 The time since the heartbeat was last received.
78 The time since the heartbeat was last received.
79 """
79 """
80
80
81 def _started_channels(self):
81 def _started_channels(self):
82 """ Called when the KernelManager channels have started listening or
82 """ Called when the KernelManager channels have started listening or
83 when the frontend is assigned an already listening KernelManager.
83 when the frontend is assigned an already listening KernelManager.
84 """
84 """
85
85
86 def _stopped_channels(self):
86 def _stopped_channels(self):
87 """ Called when the KernelManager channels have stopped listening or
87 """ Called when the KernelManager channels have stopped listening or
88 when a listening KernelManager is removed from the frontend.
88 when a listening KernelManager is removed from the frontend.
89 """
89 """
90
90
91 #---------------------------------------------------------------------------
91 #---------------------------------------------------------------------------
92 # 'BaseFrontendMixin' protected interface
92 # 'BaseFrontendMixin' protected interface
93 #---------------------------------------------------------------------------
93 #---------------------------------------------------------------------------
94
94
95 def _dispatch(self, msg):
95 def _dispatch(self, msg):
96 """ Calls the frontend handler associated with the message type of the
96 """ Calls the frontend handler associated with the message type of the
97 given message.
97 given message.
98 """
98 """
99 msg_type = msg['msg_type']
99 msg_type = msg['header']['msg_type']
100 handler = getattr(self, '_handle_' + msg_type, None)
100 handler = getattr(self, '_handle_' + msg_type, None)
101 if handler:
101 if handler:
102 handler(msg)
102 handler(msg)
103
103
104 def _is_from_this_session(self, msg):
104 def _is_from_this_session(self, msg):
105 """ Returns whether a reply from the kernel originated from a request
105 """ Returns whether a reply from the kernel originated from a request
106 from this frontend.
106 from this frontend.
107 """
107 """
108 session = self._kernel_manager.session.session
108 session = self._kernel_manager.session.session
109 return msg['parent_header']['session'] == session
109 return msg['parent_header']['session'] == session
@@ -1,243 +1,243 b''
1 """ Defines a KernelManager that provides signals and slots.
1 """ Defines a KernelManager that provides signals and slots.
2 """
2 """
3
3
4 # System library imports.
4 # System library imports.
5 from IPython.external.qt import QtCore
5 from IPython.external.qt import QtCore
6
6
7 # IPython imports.
7 # IPython imports.
8 from IPython.utils.traitlets import Type
8 from IPython.utils.traitlets import Type
9 from IPython.zmq.kernelmanager import KernelManager, SubSocketChannel, \
9 from IPython.zmq.kernelmanager import KernelManager, SubSocketChannel, \
10 ShellSocketChannel, StdInSocketChannel, HBSocketChannel
10 ShellSocketChannel, StdInSocketChannel, HBSocketChannel
11 from util import MetaQObjectHasTraits, SuperQObject
11 from util import MetaQObjectHasTraits, SuperQObject
12
12
13
13
14 class SocketChannelQObject(SuperQObject):
14 class SocketChannelQObject(SuperQObject):
15
15
16 # Emitted when the channel is started.
16 # Emitted when the channel is started.
17 started = QtCore.Signal()
17 started = QtCore.Signal()
18
18
19 # Emitted when the channel is stopped.
19 # Emitted when the channel is stopped.
20 stopped = QtCore.Signal()
20 stopped = QtCore.Signal()
21
21
22 #---------------------------------------------------------------------------
22 #---------------------------------------------------------------------------
23 # 'ZMQSocketChannel' interface
23 # 'ZMQSocketChannel' interface
24 #---------------------------------------------------------------------------
24 #---------------------------------------------------------------------------
25
25
26 def start(self):
26 def start(self):
27 """ Reimplemented to emit signal.
27 """ Reimplemented to emit signal.
28 """
28 """
29 super(SocketChannelQObject, self).start()
29 super(SocketChannelQObject, self).start()
30 self.started.emit()
30 self.started.emit()
31
31
32 def stop(self):
32 def stop(self):
33 """ Reimplemented to emit signal.
33 """ Reimplemented to emit signal.
34 """
34 """
35 super(SocketChannelQObject, self).stop()
35 super(SocketChannelQObject, self).stop()
36 self.stopped.emit()
36 self.stopped.emit()
37
37
38
38
39 class QtShellSocketChannel(SocketChannelQObject, ShellSocketChannel):
39 class QtShellSocketChannel(SocketChannelQObject, ShellSocketChannel):
40
40
41 # Emitted when any message is received.
41 # Emitted when any message is received.
42 message_received = QtCore.Signal(object)
42 message_received = QtCore.Signal(object)
43
43
44 # Emitted when a reply has been received for the corresponding request
44 # Emitted when a reply has been received for the corresponding request
45 # type.
45 # type.
46 execute_reply = QtCore.Signal(object)
46 execute_reply = QtCore.Signal(object)
47 complete_reply = QtCore.Signal(object)
47 complete_reply = QtCore.Signal(object)
48 object_info_reply = QtCore.Signal(object)
48 object_info_reply = QtCore.Signal(object)
49 history_reply = QtCore.Signal(object)
49 history_reply = QtCore.Signal(object)
50
50
51 # Emitted when the first reply comes back.
51 # Emitted when the first reply comes back.
52 first_reply = QtCore.Signal()
52 first_reply = QtCore.Signal()
53
53
54 # Used by the first_reply signal logic to determine if a reply is the
54 # Used by the first_reply signal logic to determine if a reply is the
55 # first.
55 # first.
56 _handlers_called = False
56 _handlers_called = False
57
57
58 #---------------------------------------------------------------------------
58 #---------------------------------------------------------------------------
59 # 'ShellSocketChannel' interface
59 # 'ShellSocketChannel' interface
60 #---------------------------------------------------------------------------
60 #---------------------------------------------------------------------------
61
61
62 def call_handlers(self, msg):
62 def call_handlers(self, msg):
63 """ Reimplemented to emit signals instead of making callbacks.
63 """ Reimplemented to emit signals instead of making callbacks.
64 """
64 """
65 # Emit the generic signal.
65 # Emit the generic signal.
66 self.message_received.emit(msg)
66 self.message_received.emit(msg)
67
67
68 # Emit signals for specialized message types.
68 # Emit signals for specialized message types.
69 msg_type = msg['msg_type']
69 msg_type = msg['header']['msg_type']
70 signal = getattr(self, msg_type, None)
70 signal = getattr(self, msg_type, None)
71 if signal:
71 if signal:
72 signal.emit(msg)
72 signal.emit(msg)
73
73
74 if not self._handlers_called:
74 if not self._handlers_called:
75 self.first_reply.emit()
75 self.first_reply.emit()
76 self._handlers_called = True
76 self._handlers_called = True
77
77
78 #---------------------------------------------------------------------------
78 #---------------------------------------------------------------------------
79 # 'QtShellSocketChannel' interface
79 # 'QtShellSocketChannel' interface
80 #---------------------------------------------------------------------------
80 #---------------------------------------------------------------------------
81
81
82 def reset_first_reply(self):
82 def reset_first_reply(self):
83 """ Reset the first_reply signal to fire again on the next reply.
83 """ Reset the first_reply signal to fire again on the next reply.
84 """
84 """
85 self._handlers_called = False
85 self._handlers_called = False
86
86
87
87
88 class QtSubSocketChannel(SocketChannelQObject, SubSocketChannel):
88 class QtSubSocketChannel(SocketChannelQObject, SubSocketChannel):
89
89
90 # Emitted when any message is received.
90 # Emitted when any message is received.
91 message_received = QtCore.Signal(object)
91 message_received = QtCore.Signal(object)
92
92
93 # Emitted when a message of type 'stream' is received.
93 # Emitted when a message of type 'stream' is received.
94 stream_received = QtCore.Signal(object)
94 stream_received = QtCore.Signal(object)
95
95
96 # Emitted when a message of type 'pyin' is received.
96 # Emitted when a message of type 'pyin' is received.
97 pyin_received = QtCore.Signal(object)
97 pyin_received = QtCore.Signal(object)
98
98
99 # Emitted when a message of type 'pyout' is received.
99 # Emitted when a message of type 'pyout' is received.
100 pyout_received = QtCore.Signal(object)
100 pyout_received = QtCore.Signal(object)
101
101
102 # Emitted when a message of type 'pyerr' is received.
102 # Emitted when a message of type 'pyerr' is received.
103 pyerr_received = QtCore.Signal(object)
103 pyerr_received = QtCore.Signal(object)
104
104
105 # Emitted when a message of type 'display_data' is received
105 # Emitted when a message of type 'display_data' is received
106 display_data_received = QtCore.Signal(object)
106 display_data_received = QtCore.Signal(object)
107
107
108 # Emitted when a crash report message is received from the kernel's
108 # Emitted when a crash report message is received from the kernel's
109 # last-resort sys.excepthook.
109 # last-resort sys.excepthook.
110 crash_received = QtCore.Signal(object)
110 crash_received = QtCore.Signal(object)
111
111
112 # Emitted when a shutdown is noticed.
112 # Emitted when a shutdown is noticed.
113 shutdown_reply_received = QtCore.Signal(object)
113 shutdown_reply_received = QtCore.Signal(object)
114
114
115 #---------------------------------------------------------------------------
115 #---------------------------------------------------------------------------
116 # 'SubSocketChannel' interface
116 # 'SubSocketChannel' interface
117 #---------------------------------------------------------------------------
117 #---------------------------------------------------------------------------
118
118
119 def call_handlers(self, msg):
119 def call_handlers(self, msg):
120 """ Reimplemented to emit signals instead of making callbacks.
120 """ Reimplemented to emit signals instead of making callbacks.
121 """
121 """
122 # Emit the generic signal.
122 # Emit the generic signal.
123 self.message_received.emit(msg)
123 self.message_received.emit(msg)
124 # Emit signals for specialized message types.
124 # Emit signals for specialized message types.
125 msg_type = msg['msg_type']
125 msg_type = msg['header']['msg_type']
126 signal = getattr(self, msg_type + '_received', None)
126 signal = getattr(self, msg_type + '_received', None)
127 if signal:
127 if signal:
128 signal.emit(msg)
128 signal.emit(msg)
129 elif msg_type in ('stdout', 'stderr'):
129 elif msg_type in ('stdout', 'stderr'):
130 self.stream_received.emit(msg)
130 self.stream_received.emit(msg)
131
131
132 def flush(self):
132 def flush(self):
133 """ Reimplemented to ensure that signals are dispatched immediately.
133 """ Reimplemented to ensure that signals are dispatched immediately.
134 """
134 """
135 super(QtSubSocketChannel, self).flush()
135 super(QtSubSocketChannel, self).flush()
136 QtCore.QCoreApplication.instance().processEvents()
136 QtCore.QCoreApplication.instance().processEvents()
137
137
138
138
139 class QtStdInSocketChannel(SocketChannelQObject, StdInSocketChannel):
139 class QtStdInSocketChannel(SocketChannelQObject, StdInSocketChannel):
140
140
141 # Emitted when any message is received.
141 # Emitted when any message is received.
142 message_received = QtCore.Signal(object)
142 message_received = QtCore.Signal(object)
143
143
144 # Emitted when an input request is received.
144 # Emitted when an input request is received.
145 input_requested = QtCore.Signal(object)
145 input_requested = QtCore.Signal(object)
146
146
147 #---------------------------------------------------------------------------
147 #---------------------------------------------------------------------------
148 # 'StdInSocketChannel' interface
148 # 'StdInSocketChannel' interface
149 #---------------------------------------------------------------------------
149 #---------------------------------------------------------------------------
150
150
151 def call_handlers(self, msg):
151 def call_handlers(self, msg):
152 """ Reimplemented to emit signals instead of making callbacks.
152 """ Reimplemented to emit signals instead of making callbacks.
153 """
153 """
154 # Emit the generic signal.
154 # Emit the generic signal.
155 self.message_received.emit(msg)
155 self.message_received.emit(msg)
156
156
157 # Emit signals for specialized message types.
157 # Emit signals for specialized message types.
158 msg_type = msg['msg_type']
158 msg_type = msg['header']['msg_type']
159 if msg_type == 'input_request':
159 if msg_type == 'input_request':
160 self.input_requested.emit(msg)
160 self.input_requested.emit(msg)
161
161
162
162
163 class QtHBSocketChannel(SocketChannelQObject, HBSocketChannel):
163 class QtHBSocketChannel(SocketChannelQObject, HBSocketChannel):
164
164
165 # Emitted when the kernel has died.
165 # Emitted when the kernel has died.
166 kernel_died = QtCore.Signal(object)
166 kernel_died = QtCore.Signal(object)
167
167
168 #---------------------------------------------------------------------------
168 #---------------------------------------------------------------------------
169 # 'HBSocketChannel' interface
169 # 'HBSocketChannel' interface
170 #---------------------------------------------------------------------------
170 #---------------------------------------------------------------------------
171
171
172 def call_handlers(self, since_last_heartbeat):
172 def call_handlers(self, since_last_heartbeat):
173 """ Reimplemented to emit signals instead of making callbacks.
173 """ Reimplemented to emit signals instead of making callbacks.
174 """
174 """
175 # Emit the generic signal.
175 # Emit the generic signal.
176 self.kernel_died.emit(since_last_heartbeat)
176 self.kernel_died.emit(since_last_heartbeat)
177
177
178
178
179 class QtKernelManager(KernelManager, SuperQObject):
179 class QtKernelManager(KernelManager, SuperQObject):
180 """ A KernelManager that provides signals and slots.
180 """ A KernelManager that provides signals and slots.
181 """
181 """
182
182
183 __metaclass__ = MetaQObjectHasTraits
183 __metaclass__ = MetaQObjectHasTraits
184
184
185 # Emitted when the kernel manager has started listening.
185 # Emitted when the kernel manager has started listening.
186 started_channels = QtCore.Signal()
186 started_channels = QtCore.Signal()
187
187
188 # Emitted when the kernel manager has stopped listening.
188 # Emitted when the kernel manager has stopped listening.
189 stopped_channels = QtCore.Signal()
189 stopped_channels = QtCore.Signal()
190
190
191 # Use Qt-specific channel classes that emit signals.
191 # Use Qt-specific channel classes that emit signals.
192 sub_channel_class = Type(QtSubSocketChannel)
192 sub_channel_class = Type(QtSubSocketChannel)
193 shell_channel_class = Type(QtShellSocketChannel)
193 shell_channel_class = Type(QtShellSocketChannel)
194 stdin_channel_class = Type(QtStdInSocketChannel)
194 stdin_channel_class = Type(QtStdInSocketChannel)
195 hb_channel_class = Type(QtHBSocketChannel)
195 hb_channel_class = Type(QtHBSocketChannel)
196
196
197 #---------------------------------------------------------------------------
197 #---------------------------------------------------------------------------
198 # 'KernelManager' interface
198 # 'KernelManager' interface
199 #---------------------------------------------------------------------------
199 #---------------------------------------------------------------------------
200
200
201 #------ Kernel process management ------------------------------------------
201 #------ Kernel process management ------------------------------------------
202
202
203 def start_kernel(self, *args, **kw):
203 def start_kernel(self, *args, **kw):
204 """ Reimplemented for proper heartbeat management.
204 """ Reimplemented for proper heartbeat management.
205 """
205 """
206 if self._shell_channel is not None:
206 if self._shell_channel is not None:
207 self._shell_channel.reset_first_reply()
207 self._shell_channel.reset_first_reply()
208 super(QtKernelManager, self).start_kernel(*args, **kw)
208 super(QtKernelManager, self).start_kernel(*args, **kw)
209
209
210 #------ Channel management -------------------------------------------------
210 #------ Channel management -------------------------------------------------
211
211
212 def start_channels(self, *args, **kw):
212 def start_channels(self, *args, **kw):
213 """ Reimplemented to emit signal.
213 """ Reimplemented to emit signal.
214 """
214 """
215 super(QtKernelManager, self).start_channels(*args, **kw)
215 super(QtKernelManager, self).start_channels(*args, **kw)
216 self.started_channels.emit()
216 self.started_channels.emit()
217
217
218 def stop_channels(self):
218 def stop_channels(self):
219 """ Reimplemented to emit signal.
219 """ Reimplemented to emit signal.
220 """
220 """
221 super(QtKernelManager, self).stop_channels()
221 super(QtKernelManager, self).stop_channels()
222 self.stopped_channels.emit()
222 self.stopped_channels.emit()
223
223
224 @property
224 @property
225 def shell_channel(self):
225 def shell_channel(self):
226 """ Reimplemented for proper heartbeat management.
226 """ Reimplemented for proper heartbeat management.
227 """
227 """
228 if self._shell_channel is None:
228 if self._shell_channel is None:
229 self._shell_channel = super(QtKernelManager, self).shell_channel
229 self._shell_channel = super(QtKernelManager, self).shell_channel
230 self._shell_channel.first_reply.connect(self._first_reply)
230 self._shell_channel.first_reply.connect(self._first_reply)
231 return self._shell_channel
231 return self._shell_channel
232
232
233 #---------------------------------------------------------------------------
233 #---------------------------------------------------------------------------
234 # Protected interface
234 # Protected interface
235 #---------------------------------------------------------------------------
235 #---------------------------------------------------------------------------
236
236
237 def _first_reply(self):
237 def _first_reply(self):
238 """ Unpauses the heartbeat channel when the first reply is received on
238 """ Unpauses the heartbeat channel when the first reply is received on
239 the execute channel. Note that this will *not* start the heartbeat
239 the execute channel. Note that this will *not* start the heartbeat
240 channel if it is not already running!
240 channel if it is not already running!
241 """
241 """
242 if self._hb_channel is not None:
242 if self._hb_channel is not None:
243 self._hb_channel.unpause()
243 self._hb_channel.unpause()
@@ -1,1431 +1,1431 b''
1 """A semi-synchronous Client for the ZMQ cluster
1 """A semi-synchronous Client for the ZMQ cluster
2
2
3 Authors:
3 Authors:
4
4
5 * MinRK
5 * MinRK
6 """
6 """
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8 # Copyright (C) 2010-2011 The IPython Development Team
8 # Copyright (C) 2010-2011 The IPython Development Team
9 #
9 #
10 # Distributed under the terms of the BSD License. The full license is in
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
11 # the file COPYING, distributed as part of this software.
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13
13
14 #-----------------------------------------------------------------------------
14 #-----------------------------------------------------------------------------
15 # Imports
15 # Imports
16 #-----------------------------------------------------------------------------
16 #-----------------------------------------------------------------------------
17
17
18 import os
18 import os
19 import json
19 import json
20 import sys
20 import sys
21 import time
21 import time
22 import warnings
22 import warnings
23 from datetime import datetime
23 from datetime import datetime
24 from getpass import getpass
24 from getpass import getpass
25 from pprint import pprint
25 from pprint import pprint
26
26
27 pjoin = os.path.join
27 pjoin = os.path.join
28
28
29 import zmq
29 import zmq
30 # from zmq.eventloop import ioloop, zmqstream
30 # from zmq.eventloop import ioloop, zmqstream
31
31
32 from IPython.config.configurable import MultipleInstanceError
32 from IPython.config.configurable import MultipleInstanceError
33 from IPython.core.application import BaseIPythonApplication
33 from IPython.core.application import BaseIPythonApplication
34
34
35 from IPython.utils.jsonutil import rekey
35 from IPython.utils.jsonutil import rekey
36 from IPython.utils.localinterfaces import LOCAL_IPS
36 from IPython.utils.localinterfaces import LOCAL_IPS
37 from IPython.utils.path import get_ipython_dir
37 from IPython.utils.path import get_ipython_dir
38 from IPython.utils.traitlets import (HasTraits, Int, Instance, Unicode,
38 from IPython.utils.traitlets import (HasTraits, Int, Instance, Unicode,
39 Dict, List, Bool, Set)
39 Dict, List, Bool, Set)
40 from IPython.external.decorator import decorator
40 from IPython.external.decorator import decorator
41 from IPython.external.ssh import tunnel
41 from IPython.external.ssh import tunnel
42
42
43 from IPython.parallel import error
43 from IPython.parallel import error
44 from IPython.parallel import util
44 from IPython.parallel import util
45
45
46 from IPython.zmq.session import Session, Message
46 from IPython.zmq.session import Session, Message
47
47
48 from .asyncresult import AsyncResult, AsyncHubResult
48 from .asyncresult import AsyncResult, AsyncHubResult
49 from IPython.core.profiledir import ProfileDir, ProfileDirError
49 from IPython.core.profiledir import ProfileDir, ProfileDirError
50 from .view import DirectView, LoadBalancedView
50 from .view import DirectView, LoadBalancedView
51
51
52 if sys.version_info[0] >= 3:
52 if sys.version_info[0] >= 3:
53 # xrange is used in a couple 'isinstance' tests in py2
53 # xrange is used in a couple 'isinstance' tests in py2
54 # should be just 'range' in 3k
54 # should be just 'range' in 3k
55 xrange = range
55 xrange = range
56
56
57 #--------------------------------------------------------------------------
57 #--------------------------------------------------------------------------
58 # Decorators for Client methods
58 # Decorators for Client methods
59 #--------------------------------------------------------------------------
59 #--------------------------------------------------------------------------
60
60
61 @decorator
61 @decorator
62 def spin_first(f, self, *args, **kwargs):
62 def spin_first(f, self, *args, **kwargs):
63 """Call spin() to sync state prior to calling the method."""
63 """Call spin() to sync state prior to calling the method."""
64 self.spin()
64 self.spin()
65 return f(self, *args, **kwargs)
65 return f(self, *args, **kwargs)
66
66
67
67
68 #--------------------------------------------------------------------------
68 #--------------------------------------------------------------------------
69 # Classes
69 # Classes
70 #--------------------------------------------------------------------------
70 #--------------------------------------------------------------------------
71
71
72 class Metadata(dict):
72 class Metadata(dict):
73 """Subclass of dict for initializing metadata values.
73 """Subclass of dict for initializing metadata values.
74
74
75 Attribute access works on keys.
75 Attribute access works on keys.
76
76
77 These objects have a strict set of keys - errors will raise if you try
77 These objects have a strict set of keys - errors will raise if you try
78 to add new keys.
78 to add new keys.
79 """
79 """
80 def __init__(self, *args, **kwargs):
80 def __init__(self, *args, **kwargs):
81 dict.__init__(self)
81 dict.__init__(self)
82 md = {'msg_id' : None,
82 md = {'msg_id' : None,
83 'submitted' : None,
83 'submitted' : None,
84 'started' : None,
84 'started' : None,
85 'completed' : None,
85 'completed' : None,
86 'received' : None,
86 'received' : None,
87 'engine_uuid' : None,
87 'engine_uuid' : None,
88 'engine_id' : None,
88 'engine_id' : None,
89 'follow' : None,
89 'follow' : None,
90 'after' : None,
90 'after' : None,
91 'status' : None,
91 'status' : None,
92
92
93 'pyin' : None,
93 'pyin' : None,
94 'pyout' : None,
94 'pyout' : None,
95 'pyerr' : None,
95 'pyerr' : None,
96 'stdout' : '',
96 'stdout' : '',
97 'stderr' : '',
97 'stderr' : '',
98 }
98 }
99 self.update(md)
99 self.update(md)
100 self.update(dict(*args, **kwargs))
100 self.update(dict(*args, **kwargs))
101
101
102 def __getattr__(self, key):
102 def __getattr__(self, key):
103 """getattr aliased to getitem"""
103 """getattr aliased to getitem"""
104 if key in self.iterkeys():
104 if key in self.iterkeys():
105 return self[key]
105 return self[key]
106 else:
106 else:
107 raise AttributeError(key)
107 raise AttributeError(key)
108
108
109 def __setattr__(self, key, value):
109 def __setattr__(self, key, value):
110 """setattr aliased to setitem, with strict"""
110 """setattr aliased to setitem, with strict"""
111 if key in self.iterkeys():
111 if key in self.iterkeys():
112 self[key] = value
112 self[key] = value
113 else:
113 else:
114 raise AttributeError(key)
114 raise AttributeError(key)
115
115
116 def __setitem__(self, key, value):
116 def __setitem__(self, key, value):
117 """strict static key enforcement"""
117 """strict static key enforcement"""
118 if key in self.iterkeys():
118 if key in self.iterkeys():
119 dict.__setitem__(self, key, value)
119 dict.__setitem__(self, key, value)
120 else:
120 else:
121 raise KeyError(key)
121 raise KeyError(key)
122
122
123
123
124 class Client(HasTraits):
124 class Client(HasTraits):
125 """A semi-synchronous client to the IPython ZMQ cluster
125 """A semi-synchronous client to the IPython ZMQ cluster
126
126
127 Parameters
127 Parameters
128 ----------
128 ----------
129
129
130 url_or_file : bytes or unicode; zmq url or path to ipcontroller-client.json
130 url_or_file : bytes or unicode; zmq url or path to ipcontroller-client.json
131 Connection information for the Hub's registration. If a json connector
131 Connection information for the Hub's registration. If a json connector
132 file is given, then likely no further configuration is necessary.
132 file is given, then likely no further configuration is necessary.
133 [Default: use profile]
133 [Default: use profile]
134 profile : bytes
134 profile : bytes
135 The name of the Cluster profile to be used to find connector information.
135 The name of the Cluster profile to be used to find connector information.
136 If run from an IPython application, the default profile will be the same
136 If run from an IPython application, the default profile will be the same
137 as the running application, otherwise it will be 'default'.
137 as the running application, otherwise it will be 'default'.
138 context : zmq.Context
138 context : zmq.Context
139 Pass an existing zmq.Context instance, otherwise the client will create its own.
139 Pass an existing zmq.Context instance, otherwise the client will create its own.
140 debug : bool
140 debug : bool
141 flag for lots of message printing for debug purposes
141 flag for lots of message printing for debug purposes
142 timeout : int/float
142 timeout : int/float
143 time (in seconds) to wait for connection replies from the Hub
143 time (in seconds) to wait for connection replies from the Hub
144 [Default: 10]
144 [Default: 10]
145
145
146 #-------------- session related args ----------------
146 #-------------- session related args ----------------
147
147
148 config : Config object
148 config : Config object
149 If specified, this will be relayed to the Session for configuration
149 If specified, this will be relayed to the Session for configuration
150 username : str
150 username : str
151 set username for the session object
151 set username for the session object
152 packer : str (import_string) or callable
152 packer : str (import_string) or callable
153 Can be either the simple keyword 'json' or 'pickle', or an import_string to a
153 Can be either the simple keyword 'json' or 'pickle', or an import_string to a
154 function to serialize messages. Must support same input as
154 function to serialize messages. Must support same input as
155 JSON, and output must be bytes.
155 JSON, and output must be bytes.
156 You can pass a callable directly as `pack`
156 You can pass a callable directly as `pack`
157 unpacker : str (import_string) or callable
157 unpacker : str (import_string) or callable
158 The inverse of packer. Only necessary if packer is specified as *not* one
158 The inverse of packer. Only necessary if packer is specified as *not* one
159 of 'json' or 'pickle'.
159 of 'json' or 'pickle'.
160
160
161 #-------------- ssh related args ----------------
161 #-------------- ssh related args ----------------
162 # These are args for configuring the ssh tunnel to be used
162 # These are args for configuring the ssh tunnel to be used
163 # credentials are used to forward connections over ssh to the Controller
163 # credentials are used to forward connections over ssh to the Controller
164 # Note that the ip given in `addr` needs to be relative to sshserver
164 # Note that the ip given in `addr` needs to be relative to sshserver
165 # The most basic case is to leave addr as pointing to localhost (127.0.0.1),
165 # The most basic case is to leave addr as pointing to localhost (127.0.0.1),
166 # and set sshserver as the same machine the Controller is on. However,
166 # and set sshserver as the same machine the Controller is on. However,
167 # the only requirement is that sshserver is able to see the Controller
167 # the only requirement is that sshserver is able to see the Controller
168 # (i.e. is within the same trusted network).
168 # (i.e. is within the same trusted network).
169
169
170 sshserver : str
170 sshserver : str
171 A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
171 A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
172 If keyfile or password is specified, and this is not, it will default to
172 If keyfile or password is specified, and this is not, it will default to
173 the ip given in addr.
173 the ip given in addr.
174 sshkey : str; path to public ssh key file
174 sshkey : str; path to public ssh key file
175 This specifies a key to be used in ssh login, default None.
175 This specifies a key to be used in ssh login, default None.
176 Regular default ssh keys will be used without specifying this argument.
176 Regular default ssh keys will be used without specifying this argument.
177 password : str
177 password : str
178 Your ssh password to sshserver. Note that if this is left None,
178 Your ssh password to sshserver. Note that if this is left None,
179 you will be prompted for it if passwordless key based login is unavailable.
179 you will be prompted for it if passwordless key based login is unavailable.
180 paramiko : bool
180 paramiko : bool
181 flag for whether to use paramiko instead of shell ssh for tunneling.
181 flag for whether to use paramiko instead of shell ssh for tunneling.
182 [default: True on win32, False else]
182 [default: True on win32, False else]
183
183
184 ------- exec authentication args -------
184 ------- exec authentication args -------
185 If even localhost is untrusted, you can have some protection against
185 If even localhost is untrusted, you can have some protection against
186 unauthorized execution by signing messages with HMAC digests.
186 unauthorized execution by signing messages with HMAC digests.
187 Messages are still sent as cleartext, so if someone can snoop your
187 Messages are still sent as cleartext, so if someone can snoop your
188 loopback traffic this will not protect your privacy, but will prevent
188 loopback traffic this will not protect your privacy, but will prevent
189 unauthorized execution.
189 unauthorized execution.
190
190
191 exec_key : str
191 exec_key : str
192 an authentication key or file containing a key
192 an authentication key or file containing a key
193 default: None
193 default: None
194
194
195
195
196 Attributes
196 Attributes
197 ----------
197 ----------
198
198
199 ids : list of int engine IDs
199 ids : list of int engine IDs
200 requesting the ids attribute always synchronizes
200 requesting the ids attribute always synchronizes
201 the registration state. To request ids without synchronization,
201 the registration state. To request ids without synchronization,
202 use semi-private _ids attributes.
202 use semi-private _ids attributes.
203
203
204 history : list of msg_ids
204 history : list of msg_ids
205 a list of msg_ids, keeping track of all the execution
205 a list of msg_ids, keeping track of all the execution
206 messages you have submitted in order.
206 messages you have submitted in order.
207
207
208 outstanding : set of msg_ids
208 outstanding : set of msg_ids
209 a set of msg_ids that have been submitted, but whose
209 a set of msg_ids that have been submitted, but whose
210 results have not yet been received.
210 results have not yet been received.
211
211
212 results : dict
212 results : dict
213 a dict of all our results, keyed by msg_id
213 a dict of all our results, keyed by msg_id
214
214
215 block : bool
215 block : bool
216 determines default behavior when block not specified
216 determines default behavior when block not specified
217 in execution methods
217 in execution methods
218
218
219 Methods
219 Methods
220 -------
220 -------
221
221
222 spin
222 spin
223 flushes incoming results and registration state changes
223 flushes incoming results and registration state changes
224 control methods spin, and requesting `ids` also ensures up to date
224 control methods spin, and requesting `ids` also ensures up to date
225
225
226 wait
226 wait
227 wait on one or more msg_ids
227 wait on one or more msg_ids
228
228
229 execution methods
229 execution methods
230 apply
230 apply
231 legacy: execute, run
231 legacy: execute, run
232
232
233 data movement
233 data movement
234 push, pull, scatter, gather
234 push, pull, scatter, gather
235
235
236 query methods
236 query methods
237 queue_status, get_result, purge, result_status
237 queue_status, get_result, purge, result_status
238
238
239 control methods
239 control methods
240 abort, shutdown
240 abort, shutdown
241
241
242 """
242 """
243
243
244
244
245 block = Bool(False)
245 block = Bool(False)
246 outstanding = Set()
246 outstanding = Set()
247 results = Instance('collections.defaultdict', (dict,))
247 results = Instance('collections.defaultdict', (dict,))
248 metadata = Instance('collections.defaultdict', (Metadata,))
248 metadata = Instance('collections.defaultdict', (Metadata,))
249 history = List()
249 history = List()
250 debug = Bool(False)
250 debug = Bool(False)
251
251
252 profile=Unicode()
252 profile=Unicode()
253 def _profile_default(self):
253 def _profile_default(self):
254 if BaseIPythonApplication.initialized():
254 if BaseIPythonApplication.initialized():
255 # an IPython app *might* be running, try to get its profile
255 # an IPython app *might* be running, try to get its profile
256 try:
256 try:
257 return BaseIPythonApplication.instance().profile
257 return BaseIPythonApplication.instance().profile
258 except (AttributeError, MultipleInstanceError):
258 except (AttributeError, MultipleInstanceError):
259 # could be a *different* subclass of config.Application,
259 # could be a *different* subclass of config.Application,
260 # which would raise one of these two errors.
260 # which would raise one of these two errors.
261 return u'default'
261 return u'default'
262 else:
262 else:
263 return u'default'
263 return u'default'
264
264
265
265
266 _outstanding_dict = Instance('collections.defaultdict', (set,))
266 _outstanding_dict = Instance('collections.defaultdict', (set,))
267 _ids = List()
267 _ids = List()
268 _connected=Bool(False)
268 _connected=Bool(False)
269 _ssh=Bool(False)
269 _ssh=Bool(False)
270 _context = Instance('zmq.Context')
270 _context = Instance('zmq.Context')
271 _config = Dict()
271 _config = Dict()
272 _engines=Instance(util.ReverseDict, (), {})
272 _engines=Instance(util.ReverseDict, (), {})
273 # _hub_socket=Instance('zmq.Socket')
273 # _hub_socket=Instance('zmq.Socket')
274 _query_socket=Instance('zmq.Socket')
274 _query_socket=Instance('zmq.Socket')
275 _control_socket=Instance('zmq.Socket')
275 _control_socket=Instance('zmq.Socket')
276 _iopub_socket=Instance('zmq.Socket')
276 _iopub_socket=Instance('zmq.Socket')
277 _notification_socket=Instance('zmq.Socket')
277 _notification_socket=Instance('zmq.Socket')
278 _mux_socket=Instance('zmq.Socket')
278 _mux_socket=Instance('zmq.Socket')
279 _task_socket=Instance('zmq.Socket')
279 _task_socket=Instance('zmq.Socket')
280 _task_scheme=Unicode()
280 _task_scheme=Unicode()
281 _closed = False
281 _closed = False
282 _ignored_control_replies=Int(0)
282 _ignored_control_replies=Int(0)
283 _ignored_hub_replies=Int(0)
283 _ignored_hub_replies=Int(0)
284
284
285 def __new__(self, *args, **kw):
285 def __new__(self, *args, **kw):
286 # don't raise on positional args
286 # don't raise on positional args
287 return HasTraits.__new__(self, **kw)
287 return HasTraits.__new__(self, **kw)
288
288
289 def __init__(self, url_or_file=None, profile=None, profile_dir=None, ipython_dir=None,
289 def __init__(self, url_or_file=None, profile=None, profile_dir=None, ipython_dir=None,
290 context=None, debug=False, exec_key=None,
290 context=None, debug=False, exec_key=None,
291 sshserver=None, sshkey=None, password=None, paramiko=None,
291 sshserver=None, sshkey=None, password=None, paramiko=None,
292 timeout=10, **extra_args
292 timeout=10, **extra_args
293 ):
293 ):
294 if profile:
294 if profile:
295 super(Client, self).__init__(debug=debug, profile=profile)
295 super(Client, self).__init__(debug=debug, profile=profile)
296 else:
296 else:
297 super(Client, self).__init__(debug=debug)
297 super(Client, self).__init__(debug=debug)
298 if context is None:
298 if context is None:
299 context = zmq.Context.instance()
299 context = zmq.Context.instance()
300 self._context = context
300 self._context = context
301
301
302 self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
302 self._setup_profile_dir(self.profile, profile_dir, ipython_dir)
303 if self._cd is not None:
303 if self._cd is not None:
304 if url_or_file is None:
304 if url_or_file is None:
305 url_or_file = pjoin(self._cd.security_dir, 'ipcontroller-client.json')
305 url_or_file = pjoin(self._cd.security_dir, 'ipcontroller-client.json')
306 assert url_or_file is not None, "I can't find enough information to connect to a hub!"\
306 assert url_or_file is not None, "I can't find enough information to connect to a hub!"\
307 " Please specify at least one of url_or_file or profile."
307 " Please specify at least one of url_or_file or profile."
308
308
309 try:
309 try:
310 util.validate_url(url_or_file)
310 util.validate_url(url_or_file)
311 except AssertionError:
311 except AssertionError:
312 if not os.path.exists(url_or_file):
312 if not os.path.exists(url_or_file):
313 if self._cd:
313 if self._cd:
314 url_or_file = os.path.join(self._cd.security_dir, url_or_file)
314 url_or_file = os.path.join(self._cd.security_dir, url_or_file)
315 assert os.path.exists(url_or_file), "Not a valid connection file or url: %r"%url_or_file
315 assert os.path.exists(url_or_file), "Not a valid connection file or url: %r"%url_or_file
316 with open(url_or_file) as f:
316 with open(url_or_file) as f:
317 cfg = json.loads(f.read())
317 cfg = json.loads(f.read())
318 else:
318 else:
319 cfg = {'url':url_or_file}
319 cfg = {'url':url_or_file}
320
320
321 # sync defaults from args, json:
321 # sync defaults from args, json:
322 if sshserver:
322 if sshserver:
323 cfg['ssh'] = sshserver
323 cfg['ssh'] = sshserver
324 if exec_key:
324 if exec_key:
325 cfg['exec_key'] = exec_key
325 cfg['exec_key'] = exec_key
326 exec_key = cfg['exec_key']
326 exec_key = cfg['exec_key']
327 location = cfg.setdefault('location', None)
327 location = cfg.setdefault('location', None)
328 cfg['url'] = util.disambiguate_url(cfg['url'], location)
328 cfg['url'] = util.disambiguate_url(cfg['url'], location)
329 url = cfg['url']
329 url = cfg['url']
330 proto,addr,port = util.split_url(url)
330 proto,addr,port = util.split_url(url)
331 if location is not None and addr == '127.0.0.1':
331 if location is not None and addr == '127.0.0.1':
332 # location specified, and connection is expected to be local
332 # location specified, and connection is expected to be local
333 if location not in LOCAL_IPS and not sshserver:
333 if location not in LOCAL_IPS and not sshserver:
334 # load ssh from JSON *only* if the controller is not on
334 # load ssh from JSON *only* if the controller is not on
335 # this machine
335 # this machine
336 sshserver=cfg['ssh']
336 sshserver=cfg['ssh']
337 if location not in LOCAL_IPS and not sshserver:
337 if location not in LOCAL_IPS and not sshserver:
338 # warn if no ssh specified, but SSH is probably needed
338 # warn if no ssh specified, but SSH is probably needed
339 # This is only a warning, because the most likely cause
339 # This is only a warning, because the most likely cause
340 # is a local Controller on a laptop whose IP is dynamic
340 # is a local Controller on a laptop whose IP is dynamic
341 warnings.warn("""
341 warnings.warn("""
342 Controller appears to be listening on localhost, but not on this machine.
342 Controller appears to be listening on localhost, but not on this machine.
343 If this is true, you should specify Client(...,sshserver='you@%s')
343 If this is true, you should specify Client(...,sshserver='you@%s')
344 or instruct your controller to listen on an external IP."""%location,
344 or instruct your controller to listen on an external IP."""%location,
345 RuntimeWarning)
345 RuntimeWarning)
346 elif not sshserver:
346 elif not sshserver:
347 # otherwise sync with cfg
347 # otherwise sync with cfg
348 sshserver = cfg['ssh']
348 sshserver = cfg['ssh']
349
349
350 self._config = cfg
350 self._config = cfg
351
351
352 self._ssh = bool(sshserver or sshkey or password)
352 self._ssh = bool(sshserver or sshkey or password)
353 if self._ssh and sshserver is None:
353 if self._ssh and sshserver is None:
354 # default to ssh via localhost
354 # default to ssh via localhost
355 sshserver = url.split('://')[1].split(':')[0]
355 sshserver = url.split('://')[1].split(':')[0]
356 if self._ssh and password is None:
356 if self._ssh and password is None:
357 if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
357 if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
358 password=False
358 password=False
359 else:
359 else:
360 password = getpass("SSH Password for %s: "%sshserver)
360 password = getpass("SSH Password for %s: "%sshserver)
361 ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
361 ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
362
362
363 # configure and construct the session
363 # configure and construct the session
364 if exec_key is not None:
364 if exec_key is not None:
365 if os.path.isfile(exec_key):
365 if os.path.isfile(exec_key):
366 extra_args['keyfile'] = exec_key
366 extra_args['keyfile'] = exec_key
367 else:
367 else:
368 exec_key = util.asbytes(exec_key)
368 exec_key = util.asbytes(exec_key)
369 extra_args['key'] = exec_key
369 extra_args['key'] = exec_key
370 self.session = Session(**extra_args)
370 self.session = Session(**extra_args)
371
371
372 self._query_socket = self._context.socket(zmq.XREQ)
372 self._query_socket = self._context.socket(zmq.XREQ)
373 self._query_socket.setsockopt(zmq.IDENTITY, util.asbytes(self.session.session))
373 self._query_socket.setsockopt(zmq.IDENTITY, util.asbytes(self.session.session))
374 if self._ssh:
374 if self._ssh:
375 tunnel.tunnel_connection(self._query_socket, url, sshserver, **ssh_kwargs)
375 tunnel.tunnel_connection(self._query_socket, url, sshserver, **ssh_kwargs)
376 else:
376 else:
377 self._query_socket.connect(url)
377 self._query_socket.connect(url)
378
378
379 self.session.debug = self.debug
379 self.session.debug = self.debug
380
380
381 self._notification_handlers = {'registration_notification' : self._register_engine,
381 self._notification_handlers = {'registration_notification' : self._register_engine,
382 'unregistration_notification' : self._unregister_engine,
382 'unregistration_notification' : self._unregister_engine,
383 'shutdown_notification' : lambda msg: self.close(),
383 'shutdown_notification' : lambda msg: self.close(),
384 }
384 }
385 self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
385 self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
386 'apply_reply' : self._handle_apply_reply}
386 'apply_reply' : self._handle_apply_reply}
387 self._connect(sshserver, ssh_kwargs, timeout)
387 self._connect(sshserver, ssh_kwargs, timeout)
388
388
389 def __del__(self):
389 def __del__(self):
390 """cleanup sockets, but _not_ context."""
390 """cleanup sockets, but _not_ context."""
391 self.close()
391 self.close()
392
392
393 def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
393 def _setup_profile_dir(self, profile, profile_dir, ipython_dir):
394 if ipython_dir is None:
394 if ipython_dir is None:
395 ipython_dir = get_ipython_dir()
395 ipython_dir = get_ipython_dir()
396 if profile_dir is not None:
396 if profile_dir is not None:
397 try:
397 try:
398 self._cd = ProfileDir.find_profile_dir(profile_dir)
398 self._cd = ProfileDir.find_profile_dir(profile_dir)
399 return
399 return
400 except ProfileDirError:
400 except ProfileDirError:
401 pass
401 pass
402 elif profile is not None:
402 elif profile is not None:
403 try:
403 try:
404 self._cd = ProfileDir.find_profile_dir_by_name(
404 self._cd = ProfileDir.find_profile_dir_by_name(
405 ipython_dir, profile)
405 ipython_dir, profile)
406 return
406 return
407 except ProfileDirError:
407 except ProfileDirError:
408 pass
408 pass
409 self._cd = None
409 self._cd = None
410
410
411 def _update_engines(self, engines):
411 def _update_engines(self, engines):
412 """Update our engines dict and _ids from a dict of the form: {id:uuid}."""
412 """Update our engines dict and _ids from a dict of the form: {id:uuid}."""
413 for k,v in engines.iteritems():
413 for k,v in engines.iteritems():
414 eid = int(k)
414 eid = int(k)
415 self._engines[eid] = v
415 self._engines[eid] = v
416 self._ids.append(eid)
416 self._ids.append(eid)
417 self._ids = sorted(self._ids)
417 self._ids = sorted(self._ids)
418 if sorted(self._engines.keys()) != range(len(self._engines)) and \
418 if sorted(self._engines.keys()) != range(len(self._engines)) and \
419 self._task_scheme == 'pure' and self._task_socket:
419 self._task_scheme == 'pure' and self._task_socket:
420 self._stop_scheduling_tasks()
420 self._stop_scheduling_tasks()
421
421
422 def _stop_scheduling_tasks(self):
422 def _stop_scheduling_tasks(self):
423 """Stop scheduling tasks because an engine has been unregistered
423 """Stop scheduling tasks because an engine has been unregistered
424 from a pure ZMQ scheduler.
424 from a pure ZMQ scheduler.
425 """
425 """
426 self._task_socket.close()
426 self._task_socket.close()
427 self._task_socket = None
427 self._task_socket = None
428 msg = "An engine has been unregistered, and we are using pure " +\
428 msg = "An engine has been unregistered, and we are using pure " +\
429 "ZMQ task scheduling. Task farming will be disabled."
429 "ZMQ task scheduling. Task farming will be disabled."
430 if self.outstanding:
430 if self.outstanding:
431 msg += " If you were running tasks when this happened, " +\
431 msg += " If you were running tasks when this happened, " +\
432 "some `outstanding` msg_ids may never resolve."
432 "some `outstanding` msg_ids may never resolve."
433 warnings.warn(msg, RuntimeWarning)
433 warnings.warn(msg, RuntimeWarning)
434
434
435 def _build_targets(self, targets):
435 def _build_targets(self, targets):
436 """Turn valid target IDs or 'all' into two lists:
436 """Turn valid target IDs or 'all' into two lists:
437 (int_ids, uuids).
437 (int_ids, uuids).
438 """
438 """
439 if not self._ids:
439 if not self._ids:
440 # flush notification socket if no engines yet, just in case
440 # flush notification socket if no engines yet, just in case
441 if not self.ids:
441 if not self.ids:
442 raise error.NoEnginesRegistered("Can't build targets without any engines")
442 raise error.NoEnginesRegistered("Can't build targets without any engines")
443
443
444 if targets is None:
444 if targets is None:
445 targets = self._ids
445 targets = self._ids
446 elif isinstance(targets, basestring):
446 elif isinstance(targets, basestring):
447 if targets.lower() == 'all':
447 if targets.lower() == 'all':
448 targets = self._ids
448 targets = self._ids
449 else:
449 else:
450 raise TypeError("%r not valid str target, must be 'all'"%(targets))
450 raise TypeError("%r not valid str target, must be 'all'"%(targets))
451 elif isinstance(targets, int):
451 elif isinstance(targets, int):
452 if targets < 0:
452 if targets < 0:
453 targets = self.ids[targets]
453 targets = self.ids[targets]
454 if targets not in self._ids:
454 if targets not in self._ids:
455 raise IndexError("No such engine: %i"%targets)
455 raise IndexError("No such engine: %i"%targets)
456 targets = [targets]
456 targets = [targets]
457
457
458 if isinstance(targets, slice):
458 if isinstance(targets, slice):
459 indices = range(len(self._ids))[targets]
459 indices = range(len(self._ids))[targets]
460 ids = self.ids
460 ids = self.ids
461 targets = [ ids[i] for i in indices ]
461 targets = [ ids[i] for i in indices ]
462
462
463 if not isinstance(targets, (tuple, list, xrange)):
463 if not isinstance(targets, (tuple, list, xrange)):
464 raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
464 raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
465
465
466 return [util.asbytes(self._engines[t]) for t in targets], list(targets)
466 return [util.asbytes(self._engines[t]) for t in targets], list(targets)
467
467
468 def _connect(self, sshserver, ssh_kwargs, timeout):
468 def _connect(self, sshserver, ssh_kwargs, timeout):
469 """setup all our socket connections to the cluster. This is called from
469 """setup all our socket connections to the cluster. This is called from
470 __init__."""
470 __init__."""
471
471
472 # Maybe allow reconnecting?
472 # Maybe allow reconnecting?
473 if self._connected:
473 if self._connected:
474 return
474 return
475 self._connected=True
475 self._connected=True
476
476
477 def connect_socket(s, url):
477 def connect_socket(s, url):
478 url = util.disambiguate_url(url, self._config['location'])
478 url = util.disambiguate_url(url, self._config['location'])
479 if self._ssh:
479 if self._ssh:
480 return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
480 return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
481 else:
481 else:
482 return s.connect(url)
482 return s.connect(url)
483
483
484 self.session.send(self._query_socket, 'connection_request')
484 self.session.send(self._query_socket, 'connection_request')
485 # use Poller because zmq.select has wrong units in pyzmq 2.1.7
485 # use Poller because zmq.select has wrong units in pyzmq 2.1.7
486 poller = zmq.Poller()
486 poller = zmq.Poller()
487 poller.register(self._query_socket, zmq.POLLIN)
487 poller.register(self._query_socket, zmq.POLLIN)
488 # poll expects milliseconds, timeout is seconds
488 # poll expects milliseconds, timeout is seconds
489 evts = poller.poll(timeout*1000)
489 evts = poller.poll(timeout*1000)
490 if not evts:
490 if not evts:
491 raise error.TimeoutError("Hub connection request timed out")
491 raise error.TimeoutError("Hub connection request timed out")
492 idents,msg = self.session.recv(self._query_socket,mode=0)
492 idents,msg = self.session.recv(self._query_socket,mode=0)
493 if self.debug:
493 if self.debug:
494 pprint(msg)
494 pprint(msg)
495 msg = Message(msg)
495 msg = Message(msg)
496 content = msg.content
496 content = msg.content
497 self._config['registration'] = dict(content)
497 self._config['registration'] = dict(content)
498 if content.status == 'ok':
498 if content.status == 'ok':
499 ident = util.asbytes(self.session.session)
499 ident = util.asbytes(self.session.session)
500 if content.mux:
500 if content.mux:
501 self._mux_socket = self._context.socket(zmq.XREQ)
501 self._mux_socket = self._context.socket(zmq.XREQ)
502 self._mux_socket.setsockopt(zmq.IDENTITY, ident)
502 self._mux_socket.setsockopt(zmq.IDENTITY, ident)
503 connect_socket(self._mux_socket, content.mux)
503 connect_socket(self._mux_socket, content.mux)
504 if content.task:
504 if content.task:
505 self._task_scheme, task_addr = content.task
505 self._task_scheme, task_addr = content.task
506 self._task_socket = self._context.socket(zmq.XREQ)
506 self._task_socket = self._context.socket(zmq.XREQ)
507 self._task_socket.setsockopt(zmq.IDENTITY, ident)
507 self._task_socket.setsockopt(zmq.IDENTITY, ident)
508 connect_socket(self._task_socket, task_addr)
508 connect_socket(self._task_socket, task_addr)
509 if content.notification:
509 if content.notification:
510 self._notification_socket = self._context.socket(zmq.SUB)
510 self._notification_socket = self._context.socket(zmq.SUB)
511 connect_socket(self._notification_socket, content.notification)
511 connect_socket(self._notification_socket, content.notification)
512 self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
512 self._notification_socket.setsockopt(zmq.SUBSCRIBE, b'')
513 # if content.query:
513 # if content.query:
514 # self._query_socket = self._context.socket(zmq.XREQ)
514 # self._query_socket = self._context.socket(zmq.XREQ)
515 # self._query_socket.setsockopt(zmq.IDENTITY, self.session.session)
515 # self._query_socket.setsockopt(zmq.IDENTITY, self.session.session)
516 # connect_socket(self._query_socket, content.query)
516 # connect_socket(self._query_socket, content.query)
517 if content.control:
517 if content.control:
518 self._control_socket = self._context.socket(zmq.XREQ)
518 self._control_socket = self._context.socket(zmq.XREQ)
519 self._control_socket.setsockopt(zmq.IDENTITY, ident)
519 self._control_socket.setsockopt(zmq.IDENTITY, ident)
520 connect_socket(self._control_socket, content.control)
520 connect_socket(self._control_socket, content.control)
521 if content.iopub:
521 if content.iopub:
522 self._iopub_socket = self._context.socket(zmq.SUB)
522 self._iopub_socket = self._context.socket(zmq.SUB)
523 self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
523 self._iopub_socket.setsockopt(zmq.SUBSCRIBE, b'')
524 self._iopub_socket.setsockopt(zmq.IDENTITY, ident)
524 self._iopub_socket.setsockopt(zmq.IDENTITY, ident)
525 connect_socket(self._iopub_socket, content.iopub)
525 connect_socket(self._iopub_socket, content.iopub)
526 self._update_engines(dict(content.engines))
526 self._update_engines(dict(content.engines))
527 else:
527 else:
528 self._connected = False
528 self._connected = False
529 raise Exception("Failed to connect!")
529 raise Exception("Failed to connect!")
530
530
531 #--------------------------------------------------------------------------
531 #--------------------------------------------------------------------------
532 # handlers and callbacks for incoming messages
532 # handlers and callbacks for incoming messages
533 #--------------------------------------------------------------------------
533 #--------------------------------------------------------------------------
534
534
535 def _unwrap_exception(self, content):
535 def _unwrap_exception(self, content):
536 """unwrap exception, and remap engine_id to int."""
536 """unwrap exception, and remap engine_id to int."""
537 e = error.unwrap_exception(content)
537 e = error.unwrap_exception(content)
538 # print e.traceback
538 # print e.traceback
539 if e.engine_info:
539 if e.engine_info:
540 e_uuid = e.engine_info['engine_uuid']
540 e_uuid = e.engine_info['engine_uuid']
541 eid = self._engines[e_uuid]
541 eid = self._engines[e_uuid]
542 e.engine_info['engine_id'] = eid
542 e.engine_info['engine_id'] = eid
543 return e
543 return e
544
544
545 def _extract_metadata(self, header, parent, content):
545 def _extract_metadata(self, header, parent, content):
546 md = {'msg_id' : parent['msg_id'],
546 md = {'msg_id' : parent['msg_id'],
547 'received' : datetime.now(),
547 'received' : datetime.now(),
548 'engine_uuid' : header.get('engine', None),
548 'engine_uuid' : header.get('engine', None),
549 'follow' : parent.get('follow', []),
549 'follow' : parent.get('follow', []),
550 'after' : parent.get('after', []),
550 'after' : parent.get('after', []),
551 'status' : content['status'],
551 'status' : content['status'],
552 }
552 }
553
553
554 if md['engine_uuid'] is not None:
554 if md['engine_uuid'] is not None:
555 md['engine_id'] = self._engines.get(md['engine_uuid'], None)
555 md['engine_id'] = self._engines.get(md['engine_uuid'], None)
556
556
557 if 'date' in parent:
557 if 'date' in parent:
558 md['submitted'] = parent['date']
558 md['submitted'] = parent['date']
559 if 'started' in header:
559 if 'started' in header:
560 md['started'] = header['started']
560 md['started'] = header['started']
561 if 'date' in header:
561 if 'date' in header:
562 md['completed'] = header['date']
562 md['completed'] = header['date']
563 return md
563 return md
564
564
565 def _register_engine(self, msg):
565 def _register_engine(self, msg):
566 """Register a new engine, and update our connection info."""
566 """Register a new engine, and update our connection info."""
567 content = msg['content']
567 content = msg['content']
568 eid = content['id']
568 eid = content['id']
569 d = {eid : content['queue']}
569 d = {eid : content['queue']}
570 self._update_engines(d)
570 self._update_engines(d)
571
571
572 def _unregister_engine(self, msg):
572 def _unregister_engine(self, msg):
573 """Unregister an engine that has died."""
573 """Unregister an engine that has died."""
574 content = msg['content']
574 content = msg['content']
575 eid = int(content['id'])
575 eid = int(content['id'])
576 if eid in self._ids:
576 if eid in self._ids:
577 self._ids.remove(eid)
577 self._ids.remove(eid)
578 uuid = self._engines.pop(eid)
578 uuid = self._engines.pop(eid)
579
579
580 self._handle_stranded_msgs(eid, uuid)
580 self._handle_stranded_msgs(eid, uuid)
581
581
582 if self._task_socket and self._task_scheme == 'pure':
582 if self._task_socket and self._task_scheme == 'pure':
583 self._stop_scheduling_tasks()
583 self._stop_scheduling_tasks()
584
584
585 def _handle_stranded_msgs(self, eid, uuid):
585 def _handle_stranded_msgs(self, eid, uuid):
586 """Handle messages known to be on an engine when the engine unregisters.
586 """Handle messages known to be on an engine when the engine unregisters.
587
587
588 It is possible that this will fire prematurely - that is, an engine will
588 It is possible that this will fire prematurely - that is, an engine will
589 go down after completing a result, and the client will be notified
589 go down after completing a result, and the client will be notified
590 of the unregistration and later receive the successful result.
590 of the unregistration and later receive the successful result.
591 """
591 """
592
592
593 outstanding = self._outstanding_dict[uuid]
593 outstanding = self._outstanding_dict[uuid]
594
594
595 for msg_id in list(outstanding):
595 for msg_id in list(outstanding):
596 if msg_id in self.results:
596 if msg_id in self.results:
597 # we already
597 # we already
598 continue
598 continue
599 try:
599 try:
600 raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
600 raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
601 except:
601 except:
602 content = error.wrap_exception()
602 content = error.wrap_exception()
603 # build a fake message:
603 # build a fake message:
604 parent = {}
604 parent = {}
605 header = {}
605 header = {}
606 parent['msg_id'] = msg_id
606 parent['msg_id'] = msg_id
607 header['engine'] = uuid
607 header['engine'] = uuid
608 header['date'] = datetime.now()
608 header['date'] = datetime.now()
609 msg = dict(parent_header=parent, header=header, content=content)
609 msg = dict(parent_header=parent, header=header, content=content)
610 self._handle_apply_reply(msg)
610 self._handle_apply_reply(msg)
611
611
612 def _handle_execute_reply(self, msg):
612 def _handle_execute_reply(self, msg):
613 """Save the reply to an execute_request into our results.
613 """Save the reply to an execute_request into our results.
614
614
615 execute messages are never actually used. apply is used instead.
615 execute messages are never actually used. apply is used instead.
616 """
616 """
617
617
618 parent = msg['parent_header']
618 parent = msg['parent_header']
619 msg_id = parent['msg_id']
619 msg_id = parent['msg_id']
620 if msg_id not in self.outstanding:
620 if msg_id not in self.outstanding:
621 if msg_id in self.history:
621 if msg_id in self.history:
622 print ("got stale result: %s"%msg_id)
622 print ("got stale result: %s"%msg_id)
623 else:
623 else:
624 print ("got unknown result: %s"%msg_id)
624 print ("got unknown result: %s"%msg_id)
625 else:
625 else:
626 self.outstanding.remove(msg_id)
626 self.outstanding.remove(msg_id)
627 self.results[msg_id] = self._unwrap_exception(msg['content'])
627 self.results[msg_id] = self._unwrap_exception(msg['content'])
628
628
629 def _handle_apply_reply(self, msg):
629 def _handle_apply_reply(self, msg):
630 """Save the reply to an apply_request into our results."""
630 """Save the reply to an apply_request into our results."""
631 parent = msg['parent_header']
631 parent = msg['parent_header']
632 msg_id = parent['msg_id']
632 msg_id = parent['msg_id']
633 if msg_id not in self.outstanding:
633 if msg_id not in self.outstanding:
634 if msg_id in self.history:
634 if msg_id in self.history:
635 print ("got stale result: %s"%msg_id)
635 print ("got stale result: %s"%msg_id)
636 print self.results[msg_id]
636 print self.results[msg_id]
637 print msg
637 print msg
638 else:
638 else:
639 print ("got unknown result: %s"%msg_id)
639 print ("got unknown result: %s"%msg_id)
640 else:
640 else:
641 self.outstanding.remove(msg_id)
641 self.outstanding.remove(msg_id)
642 content = msg['content']
642 content = msg['content']
643 header = msg['header']
643 header = msg['header']
644
644
645 # construct metadata:
645 # construct metadata:
646 md = self.metadata[msg_id]
646 md = self.metadata[msg_id]
647 md.update(self._extract_metadata(header, parent, content))
647 md.update(self._extract_metadata(header, parent, content))
648 # is this redundant?
648 # is this redundant?
649 self.metadata[msg_id] = md
649 self.metadata[msg_id] = md
650
650
651 e_outstanding = self._outstanding_dict[md['engine_uuid']]
651 e_outstanding = self._outstanding_dict[md['engine_uuid']]
652 if msg_id in e_outstanding:
652 if msg_id in e_outstanding:
653 e_outstanding.remove(msg_id)
653 e_outstanding.remove(msg_id)
654
654
655 # construct result:
655 # construct result:
656 if content['status'] == 'ok':
656 if content['status'] == 'ok':
657 self.results[msg_id] = util.unserialize_object(msg['buffers'])[0]
657 self.results[msg_id] = util.unserialize_object(msg['buffers'])[0]
658 elif content['status'] == 'aborted':
658 elif content['status'] == 'aborted':
659 self.results[msg_id] = error.TaskAborted(msg_id)
659 self.results[msg_id] = error.TaskAborted(msg_id)
660 elif content['status'] == 'resubmitted':
660 elif content['status'] == 'resubmitted':
661 # TODO: handle resubmission
661 # TODO: handle resubmission
662 pass
662 pass
663 else:
663 else:
664 self.results[msg_id] = self._unwrap_exception(content)
664 self.results[msg_id] = self._unwrap_exception(content)
665
665
666 def _flush_notifications(self):
666 def _flush_notifications(self):
667 """Flush notifications of engine registrations waiting
667 """Flush notifications of engine registrations waiting
668 in ZMQ queue."""
668 in ZMQ queue."""
669 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
669 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
670 while msg is not None:
670 while msg is not None:
671 if self.debug:
671 if self.debug:
672 pprint(msg)
672 pprint(msg)
673 msg_type = msg['msg_type']
673 msg_type = msg['header']['msg_type']
674 handler = self._notification_handlers.get(msg_type, None)
674 handler = self._notification_handlers.get(msg_type, None)
675 if handler is None:
675 if handler is None:
676 raise Exception("Unhandled message type: %s"%msg.msg_type)
676 raise Exception("Unhandled message type: %s"%msg.msg_type)
677 else:
677 else:
678 handler(msg)
678 handler(msg)
679 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
679 idents,msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
680
680
681 def _flush_results(self, sock):
681 def _flush_results(self, sock):
682 """Flush task or queue results waiting in ZMQ queue."""
682 """Flush task or queue results waiting in ZMQ queue."""
683 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
683 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
684 while msg is not None:
684 while msg is not None:
685 if self.debug:
685 if self.debug:
686 pprint(msg)
686 pprint(msg)
687 msg_type = msg['msg_type']
687 msg_type = msg['header']['msg_type']
688 handler = self._queue_handlers.get(msg_type, None)
688 handler = self._queue_handlers.get(msg_type, None)
689 if handler is None:
689 if handler is None:
690 raise Exception("Unhandled message type: %s"%msg.msg_type)
690 raise Exception("Unhandled message type: %s"%msg.msg_type)
691 else:
691 else:
692 handler(msg)
692 handler(msg)
693 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
693 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
694
694
695 def _flush_control(self, sock):
695 def _flush_control(self, sock):
696 """Flush replies from the control channel waiting
696 """Flush replies from the control channel waiting
697 in the ZMQ queue.
697 in the ZMQ queue.
698
698
699 Currently: ignore them."""
699 Currently: ignore them."""
700 if self._ignored_control_replies <= 0:
700 if self._ignored_control_replies <= 0:
701 return
701 return
702 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
702 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
703 while msg is not None:
703 while msg is not None:
704 self._ignored_control_replies -= 1
704 self._ignored_control_replies -= 1
705 if self.debug:
705 if self.debug:
706 pprint(msg)
706 pprint(msg)
707 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
707 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
708
708
709 def _flush_ignored_control(self):
709 def _flush_ignored_control(self):
710 """flush ignored control replies"""
710 """flush ignored control replies"""
711 while self._ignored_control_replies > 0:
711 while self._ignored_control_replies > 0:
712 self.session.recv(self._control_socket)
712 self.session.recv(self._control_socket)
713 self._ignored_control_replies -= 1
713 self._ignored_control_replies -= 1
714
714
715 def _flush_ignored_hub_replies(self):
715 def _flush_ignored_hub_replies(self):
716 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
716 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
717 while msg is not None:
717 while msg is not None:
718 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
718 ident,msg = self.session.recv(self._query_socket, mode=zmq.NOBLOCK)
719
719
720 def _flush_iopub(self, sock):
720 def _flush_iopub(self, sock):
721 """Flush replies from the iopub channel waiting
721 """Flush replies from the iopub channel waiting
722 in the ZMQ queue.
722 in the ZMQ queue.
723 """
723 """
724 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
724 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
725 while msg is not None:
725 while msg is not None:
726 if self.debug:
726 if self.debug:
727 pprint(msg)
727 pprint(msg)
728 parent = msg['parent_header']
728 parent = msg['parent_header']
729 msg_id = parent['msg_id']
729 msg_id = parent['msg_id']
730 content = msg['content']
730 content = msg['content']
731 header = msg['header']
731 header = msg['header']
732 msg_type = msg['msg_type']
732 msg_type = msg['header']['msg_type']
733
733
734 # init metadata:
734 # init metadata:
735 md = self.metadata[msg_id]
735 md = self.metadata[msg_id]
736
736
737 if msg_type == 'stream':
737 if msg_type == 'stream':
738 name = content['name']
738 name = content['name']
739 s = md[name] or ''
739 s = md[name] or ''
740 md[name] = s + content['data']
740 md[name] = s + content['data']
741 elif msg_type == 'pyerr':
741 elif msg_type == 'pyerr':
742 md.update({'pyerr' : self._unwrap_exception(content)})
742 md.update({'pyerr' : self._unwrap_exception(content)})
743 elif msg_type == 'pyin':
743 elif msg_type == 'pyin':
744 md.update({'pyin' : content['code']})
744 md.update({'pyin' : content['code']})
745 else:
745 else:
746 md.update({msg_type : content.get('data', '')})
746 md.update({msg_type : content.get('data', '')})
747
747
748 # reduntant?
748 # reduntant?
749 self.metadata[msg_id] = md
749 self.metadata[msg_id] = md
750
750
751 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
751 idents,msg = self.session.recv(sock, mode=zmq.NOBLOCK)
752
752
753 #--------------------------------------------------------------------------
753 #--------------------------------------------------------------------------
754 # len, getitem
754 # len, getitem
755 #--------------------------------------------------------------------------
755 #--------------------------------------------------------------------------
756
756
757 def __len__(self):
757 def __len__(self):
758 """len(client) returns # of engines."""
758 """len(client) returns # of engines."""
759 return len(self.ids)
759 return len(self.ids)
760
760
761 def __getitem__(self, key):
761 def __getitem__(self, key):
762 """index access returns DirectView multiplexer objects
762 """index access returns DirectView multiplexer objects
763
763
764 Must be int, slice, or list/tuple/xrange of ints"""
764 Must be int, slice, or list/tuple/xrange of ints"""
765 if not isinstance(key, (int, slice, tuple, list, xrange)):
765 if not isinstance(key, (int, slice, tuple, list, xrange)):
766 raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
766 raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
767 else:
767 else:
768 return self.direct_view(key)
768 return self.direct_view(key)
769
769
770 #--------------------------------------------------------------------------
770 #--------------------------------------------------------------------------
771 # Begin public methods
771 # Begin public methods
772 #--------------------------------------------------------------------------
772 #--------------------------------------------------------------------------
773
773
774 @property
774 @property
775 def ids(self):
775 def ids(self):
776 """Always up-to-date ids property."""
776 """Always up-to-date ids property."""
777 self._flush_notifications()
777 self._flush_notifications()
778 # always copy:
778 # always copy:
779 return list(self._ids)
779 return list(self._ids)
780
780
781 def close(self):
781 def close(self):
782 if self._closed:
782 if self._closed:
783 return
783 return
784 snames = filter(lambda n: n.endswith('socket'), dir(self))
784 snames = filter(lambda n: n.endswith('socket'), dir(self))
785 for socket in map(lambda name: getattr(self, name), snames):
785 for socket in map(lambda name: getattr(self, name), snames):
786 if isinstance(socket, zmq.Socket) and not socket.closed:
786 if isinstance(socket, zmq.Socket) and not socket.closed:
787 socket.close()
787 socket.close()
788 self._closed = True
788 self._closed = True
789
789
790 def spin(self):
790 def spin(self):
791 """Flush any registration notifications and execution results
791 """Flush any registration notifications and execution results
792 waiting in the ZMQ queue.
792 waiting in the ZMQ queue.
793 """
793 """
794 if self._notification_socket:
794 if self._notification_socket:
795 self._flush_notifications()
795 self._flush_notifications()
796 if self._mux_socket:
796 if self._mux_socket:
797 self._flush_results(self._mux_socket)
797 self._flush_results(self._mux_socket)
798 if self._task_socket:
798 if self._task_socket:
799 self._flush_results(self._task_socket)
799 self._flush_results(self._task_socket)
800 if self._control_socket:
800 if self._control_socket:
801 self._flush_control(self._control_socket)
801 self._flush_control(self._control_socket)
802 if self._iopub_socket:
802 if self._iopub_socket:
803 self._flush_iopub(self._iopub_socket)
803 self._flush_iopub(self._iopub_socket)
804 if self._query_socket:
804 if self._query_socket:
805 self._flush_ignored_hub_replies()
805 self._flush_ignored_hub_replies()
806
806
807 def wait(self, jobs=None, timeout=-1):
807 def wait(self, jobs=None, timeout=-1):
808 """waits on one or more `jobs`, for up to `timeout` seconds.
808 """waits on one or more `jobs`, for up to `timeout` seconds.
809
809
810 Parameters
810 Parameters
811 ----------
811 ----------
812
812
813 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
813 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
814 ints are indices to self.history
814 ints are indices to self.history
815 strs are msg_ids
815 strs are msg_ids
816 default: wait on all outstanding messages
816 default: wait on all outstanding messages
817 timeout : float
817 timeout : float
818 a time in seconds, after which to give up.
818 a time in seconds, after which to give up.
819 default is -1, which means no timeout
819 default is -1, which means no timeout
820
820
821 Returns
821 Returns
822 -------
822 -------
823
823
824 True : when all msg_ids are done
824 True : when all msg_ids are done
825 False : timeout reached, some msg_ids still outstanding
825 False : timeout reached, some msg_ids still outstanding
826 """
826 """
827 tic = time.time()
827 tic = time.time()
828 if jobs is None:
828 if jobs is None:
829 theids = self.outstanding
829 theids = self.outstanding
830 else:
830 else:
831 if isinstance(jobs, (int, basestring, AsyncResult)):
831 if isinstance(jobs, (int, basestring, AsyncResult)):
832 jobs = [jobs]
832 jobs = [jobs]
833 theids = set()
833 theids = set()
834 for job in jobs:
834 for job in jobs:
835 if isinstance(job, int):
835 if isinstance(job, int):
836 # index access
836 # index access
837 job = self.history[job]
837 job = self.history[job]
838 elif isinstance(job, AsyncResult):
838 elif isinstance(job, AsyncResult):
839 map(theids.add, job.msg_ids)
839 map(theids.add, job.msg_ids)
840 continue
840 continue
841 theids.add(job)
841 theids.add(job)
842 if not theids.intersection(self.outstanding):
842 if not theids.intersection(self.outstanding):
843 return True
843 return True
844 self.spin()
844 self.spin()
845 while theids.intersection(self.outstanding):
845 while theids.intersection(self.outstanding):
846 if timeout >= 0 and ( time.time()-tic ) > timeout:
846 if timeout >= 0 and ( time.time()-tic ) > timeout:
847 break
847 break
848 time.sleep(1e-3)
848 time.sleep(1e-3)
849 self.spin()
849 self.spin()
850 return len(theids.intersection(self.outstanding)) == 0
850 return len(theids.intersection(self.outstanding)) == 0
851
851
852 #--------------------------------------------------------------------------
852 #--------------------------------------------------------------------------
853 # Control methods
853 # Control methods
854 #--------------------------------------------------------------------------
854 #--------------------------------------------------------------------------
855
855
856 @spin_first
856 @spin_first
857 def clear(self, targets=None, block=None):
857 def clear(self, targets=None, block=None):
858 """Clear the namespace in target(s)."""
858 """Clear the namespace in target(s)."""
859 block = self.block if block is None else block
859 block = self.block if block is None else block
860 targets = self._build_targets(targets)[0]
860 targets = self._build_targets(targets)[0]
861 for t in targets:
861 for t in targets:
862 self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
862 self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
863 error = False
863 error = False
864 if block:
864 if block:
865 self._flush_ignored_control()
865 self._flush_ignored_control()
866 for i in range(len(targets)):
866 for i in range(len(targets)):
867 idents,msg = self.session.recv(self._control_socket,0)
867 idents,msg = self.session.recv(self._control_socket,0)
868 if self.debug:
868 if self.debug:
869 pprint(msg)
869 pprint(msg)
870 if msg['content']['status'] != 'ok':
870 if msg['content']['status'] != 'ok':
871 error = self._unwrap_exception(msg['content'])
871 error = self._unwrap_exception(msg['content'])
872 else:
872 else:
873 self._ignored_control_replies += len(targets)
873 self._ignored_control_replies += len(targets)
874 if error:
874 if error:
875 raise error
875 raise error
876
876
877
877
878 @spin_first
878 @spin_first
879 def abort(self, jobs=None, targets=None, block=None):
879 def abort(self, jobs=None, targets=None, block=None):
880 """Abort specific jobs from the execution queues of target(s).
880 """Abort specific jobs from the execution queues of target(s).
881
881
882 This is a mechanism to prevent jobs that have already been submitted
882 This is a mechanism to prevent jobs that have already been submitted
883 from executing.
883 from executing.
884
884
885 Parameters
885 Parameters
886 ----------
886 ----------
887
887
888 jobs : msg_id, list of msg_ids, or AsyncResult
888 jobs : msg_id, list of msg_ids, or AsyncResult
889 The jobs to be aborted
889 The jobs to be aborted
890
890
891
891
892 """
892 """
893 block = self.block if block is None else block
893 block = self.block if block is None else block
894 targets = self._build_targets(targets)[0]
894 targets = self._build_targets(targets)[0]
895 msg_ids = []
895 msg_ids = []
896 if isinstance(jobs, (basestring,AsyncResult)):
896 if isinstance(jobs, (basestring,AsyncResult)):
897 jobs = [jobs]
897 jobs = [jobs]
898 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
898 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
899 if bad_ids:
899 if bad_ids:
900 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
900 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
901 for j in jobs:
901 for j in jobs:
902 if isinstance(j, AsyncResult):
902 if isinstance(j, AsyncResult):
903 msg_ids.extend(j.msg_ids)
903 msg_ids.extend(j.msg_ids)
904 else:
904 else:
905 msg_ids.append(j)
905 msg_ids.append(j)
906 content = dict(msg_ids=msg_ids)
906 content = dict(msg_ids=msg_ids)
907 for t in targets:
907 for t in targets:
908 self.session.send(self._control_socket, 'abort_request',
908 self.session.send(self._control_socket, 'abort_request',
909 content=content, ident=t)
909 content=content, ident=t)
910 error = False
910 error = False
911 if block:
911 if block:
912 self._flush_ignored_control()
912 self._flush_ignored_control()
913 for i in range(len(targets)):
913 for i in range(len(targets)):
914 idents,msg = self.session.recv(self._control_socket,0)
914 idents,msg = self.session.recv(self._control_socket,0)
915 if self.debug:
915 if self.debug:
916 pprint(msg)
916 pprint(msg)
917 if msg['content']['status'] != 'ok':
917 if msg['content']['status'] != 'ok':
918 error = self._unwrap_exception(msg['content'])
918 error = self._unwrap_exception(msg['content'])
919 else:
919 else:
920 self._ignored_control_replies += len(targets)
920 self._ignored_control_replies += len(targets)
921 if error:
921 if error:
922 raise error
922 raise error
923
923
924 @spin_first
924 @spin_first
925 def shutdown(self, targets=None, restart=False, hub=False, block=None):
925 def shutdown(self, targets=None, restart=False, hub=False, block=None):
926 """Terminates one or more engine processes, optionally including the hub."""
926 """Terminates one or more engine processes, optionally including the hub."""
927 block = self.block if block is None else block
927 block = self.block if block is None else block
928 if hub:
928 if hub:
929 targets = 'all'
929 targets = 'all'
930 targets = self._build_targets(targets)[0]
930 targets = self._build_targets(targets)[0]
931 for t in targets:
931 for t in targets:
932 self.session.send(self._control_socket, 'shutdown_request',
932 self.session.send(self._control_socket, 'shutdown_request',
933 content={'restart':restart},ident=t)
933 content={'restart':restart},ident=t)
934 error = False
934 error = False
935 if block or hub:
935 if block or hub:
936 self._flush_ignored_control()
936 self._flush_ignored_control()
937 for i in range(len(targets)):
937 for i in range(len(targets)):
938 idents,msg = self.session.recv(self._control_socket, 0)
938 idents,msg = self.session.recv(self._control_socket, 0)
939 if self.debug:
939 if self.debug:
940 pprint(msg)
940 pprint(msg)
941 if msg['content']['status'] != 'ok':
941 if msg['content']['status'] != 'ok':
942 error = self._unwrap_exception(msg['content'])
942 error = self._unwrap_exception(msg['content'])
943 else:
943 else:
944 self._ignored_control_replies += len(targets)
944 self._ignored_control_replies += len(targets)
945
945
946 if hub:
946 if hub:
947 time.sleep(0.25)
947 time.sleep(0.25)
948 self.session.send(self._query_socket, 'shutdown_request')
948 self.session.send(self._query_socket, 'shutdown_request')
949 idents,msg = self.session.recv(self._query_socket, 0)
949 idents,msg = self.session.recv(self._query_socket, 0)
950 if self.debug:
950 if self.debug:
951 pprint(msg)
951 pprint(msg)
952 if msg['content']['status'] != 'ok':
952 if msg['content']['status'] != 'ok':
953 error = self._unwrap_exception(msg['content'])
953 error = self._unwrap_exception(msg['content'])
954
954
955 if error:
955 if error:
956 raise error
956 raise error
957
957
958 #--------------------------------------------------------------------------
958 #--------------------------------------------------------------------------
959 # Execution related methods
959 # Execution related methods
960 #--------------------------------------------------------------------------
960 #--------------------------------------------------------------------------
961
961
962 def _maybe_raise(self, result):
962 def _maybe_raise(self, result):
963 """wrapper for maybe raising an exception if apply failed."""
963 """wrapper for maybe raising an exception if apply failed."""
964 if isinstance(result, error.RemoteError):
964 if isinstance(result, error.RemoteError):
965 raise result
965 raise result
966
966
967 return result
967 return result
968
968
969 def send_apply_message(self, socket, f, args=None, kwargs=None, subheader=None, track=False,
969 def send_apply_message(self, socket, f, args=None, kwargs=None, subheader=None, track=False,
970 ident=None):
970 ident=None):
971 """construct and send an apply message via a socket.
971 """construct and send an apply message via a socket.
972
972
973 This is the principal method with which all engine execution is performed by views.
973 This is the principal method with which all engine execution is performed by views.
974 """
974 """
975
975
976 assert not self._closed, "cannot use me anymore, I'm closed!"
976 assert not self._closed, "cannot use me anymore, I'm closed!"
977 # defaults:
977 # defaults:
978 args = args if args is not None else []
978 args = args if args is not None else []
979 kwargs = kwargs if kwargs is not None else {}
979 kwargs = kwargs if kwargs is not None else {}
980 subheader = subheader if subheader is not None else {}
980 subheader = subheader if subheader is not None else {}
981
981
982 # validate arguments
982 # validate arguments
983 if not callable(f):
983 if not callable(f):
984 raise TypeError("f must be callable, not %s"%type(f))
984 raise TypeError("f must be callable, not %s"%type(f))
985 if not isinstance(args, (tuple, list)):
985 if not isinstance(args, (tuple, list)):
986 raise TypeError("args must be tuple or list, not %s"%type(args))
986 raise TypeError("args must be tuple or list, not %s"%type(args))
987 if not isinstance(kwargs, dict):
987 if not isinstance(kwargs, dict):
988 raise TypeError("kwargs must be dict, not %s"%type(kwargs))
988 raise TypeError("kwargs must be dict, not %s"%type(kwargs))
989 if not isinstance(subheader, dict):
989 if not isinstance(subheader, dict):
990 raise TypeError("subheader must be dict, not %s"%type(subheader))
990 raise TypeError("subheader must be dict, not %s"%type(subheader))
991
991
992 bufs = util.pack_apply_message(f,args,kwargs)
992 bufs = util.pack_apply_message(f,args,kwargs)
993
993
994 msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
994 msg = self.session.send(socket, "apply_request", buffers=bufs, ident=ident,
995 subheader=subheader, track=track)
995 subheader=subheader, track=track)
996
996
997 msg_id = msg['msg_id']
997 msg_id = msg['header']['msg_id']
998 self.outstanding.add(msg_id)
998 self.outstanding.add(msg_id)
999 if ident:
999 if ident:
1000 # possibly routed to a specific engine
1000 # possibly routed to a specific engine
1001 if isinstance(ident, list):
1001 if isinstance(ident, list):
1002 ident = ident[-1]
1002 ident = ident[-1]
1003 if ident in self._engines.values():
1003 if ident in self._engines.values():
1004 # save for later, in case of engine death
1004 # save for later, in case of engine death
1005 self._outstanding_dict[ident].add(msg_id)
1005 self._outstanding_dict[ident].add(msg_id)
1006 self.history.append(msg_id)
1006 self.history.append(msg_id)
1007 self.metadata[msg_id]['submitted'] = datetime.now()
1007 self.metadata[msg_id]['submitted'] = datetime.now()
1008
1008
1009 return msg
1009 return msg
1010
1010
1011 #--------------------------------------------------------------------------
1011 #--------------------------------------------------------------------------
1012 # construct a View object
1012 # construct a View object
1013 #--------------------------------------------------------------------------
1013 #--------------------------------------------------------------------------
1014
1014
1015 def load_balanced_view(self, targets=None):
1015 def load_balanced_view(self, targets=None):
1016 """construct a DirectView object.
1016 """construct a DirectView object.
1017
1017
1018 If no arguments are specified, create a LoadBalancedView
1018 If no arguments are specified, create a LoadBalancedView
1019 using all engines.
1019 using all engines.
1020
1020
1021 Parameters
1021 Parameters
1022 ----------
1022 ----------
1023
1023
1024 targets: list,slice,int,etc. [default: use all engines]
1024 targets: list,slice,int,etc. [default: use all engines]
1025 The subset of engines across which to load-balance
1025 The subset of engines across which to load-balance
1026 """
1026 """
1027 if targets is not None:
1027 if targets is not None:
1028 targets = self._build_targets(targets)[1]
1028 targets = self._build_targets(targets)[1]
1029 return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
1029 return LoadBalancedView(client=self, socket=self._task_socket, targets=targets)
1030
1030
1031 def direct_view(self, targets='all'):
1031 def direct_view(self, targets='all'):
1032 """construct a DirectView object.
1032 """construct a DirectView object.
1033
1033
1034 If no targets are specified, create a DirectView
1034 If no targets are specified, create a DirectView
1035 using all engines.
1035 using all engines.
1036
1036
1037 Parameters
1037 Parameters
1038 ----------
1038 ----------
1039
1039
1040 targets: list,slice,int,etc. [default: use all engines]
1040 targets: list,slice,int,etc. [default: use all engines]
1041 The engines to use for the View
1041 The engines to use for the View
1042 """
1042 """
1043 single = isinstance(targets, int)
1043 single = isinstance(targets, int)
1044 targets = self._build_targets(targets)[1]
1044 targets = self._build_targets(targets)[1]
1045 if single:
1045 if single:
1046 targets = targets[0]
1046 targets = targets[0]
1047 return DirectView(client=self, socket=self._mux_socket, targets=targets)
1047 return DirectView(client=self, socket=self._mux_socket, targets=targets)
1048
1048
1049 #--------------------------------------------------------------------------
1049 #--------------------------------------------------------------------------
1050 # Query methods
1050 # Query methods
1051 #--------------------------------------------------------------------------
1051 #--------------------------------------------------------------------------
1052
1052
1053 @spin_first
1053 @spin_first
1054 def get_result(self, indices_or_msg_ids=None, block=None):
1054 def get_result(self, indices_or_msg_ids=None, block=None):
1055 """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
1055 """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
1056
1056
1057 If the client already has the results, no request to the Hub will be made.
1057 If the client already has the results, no request to the Hub will be made.
1058
1058
1059 This is a convenient way to construct AsyncResult objects, which are wrappers
1059 This is a convenient way to construct AsyncResult objects, which are wrappers
1060 that include metadata about execution, and allow for awaiting results that
1060 that include metadata about execution, and allow for awaiting results that
1061 were not submitted by this Client.
1061 were not submitted by this Client.
1062
1062
1063 It can also be a convenient way to retrieve the metadata associated with
1063 It can also be a convenient way to retrieve the metadata associated with
1064 blocking execution, since it always retrieves
1064 blocking execution, since it always retrieves
1065
1065
1066 Examples
1066 Examples
1067 --------
1067 --------
1068 ::
1068 ::
1069
1069
1070 In [10]: r = client.apply()
1070 In [10]: r = client.apply()
1071
1071
1072 Parameters
1072 Parameters
1073 ----------
1073 ----------
1074
1074
1075 indices_or_msg_ids : integer history index, str msg_id, or list of either
1075 indices_or_msg_ids : integer history index, str msg_id, or list of either
1076 The indices or msg_ids of indices to be retrieved
1076 The indices or msg_ids of indices to be retrieved
1077
1077
1078 block : bool
1078 block : bool
1079 Whether to wait for the result to be done
1079 Whether to wait for the result to be done
1080
1080
1081 Returns
1081 Returns
1082 -------
1082 -------
1083
1083
1084 AsyncResult
1084 AsyncResult
1085 A single AsyncResult object will always be returned.
1085 A single AsyncResult object will always be returned.
1086
1086
1087 AsyncHubResult
1087 AsyncHubResult
1088 A subclass of AsyncResult that retrieves results from the Hub
1088 A subclass of AsyncResult that retrieves results from the Hub
1089
1089
1090 """
1090 """
1091 block = self.block if block is None else block
1091 block = self.block if block is None else block
1092 if indices_or_msg_ids is None:
1092 if indices_or_msg_ids is None:
1093 indices_or_msg_ids = -1
1093 indices_or_msg_ids = -1
1094
1094
1095 if not isinstance(indices_or_msg_ids, (list,tuple)):
1095 if not isinstance(indices_or_msg_ids, (list,tuple)):
1096 indices_or_msg_ids = [indices_or_msg_ids]
1096 indices_or_msg_ids = [indices_or_msg_ids]
1097
1097
1098 theids = []
1098 theids = []
1099 for id in indices_or_msg_ids:
1099 for id in indices_or_msg_ids:
1100 if isinstance(id, int):
1100 if isinstance(id, int):
1101 id = self.history[id]
1101 id = self.history[id]
1102 if not isinstance(id, basestring):
1102 if not isinstance(id, basestring):
1103 raise TypeError("indices must be str or int, not %r"%id)
1103 raise TypeError("indices must be str or int, not %r"%id)
1104 theids.append(id)
1104 theids.append(id)
1105
1105
1106 local_ids = filter(lambda msg_id: msg_id in self.history or msg_id in self.results, theids)
1106 local_ids = filter(lambda msg_id: msg_id in self.history or msg_id in self.results, theids)
1107 remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids)
1107 remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids)
1108
1108
1109 if remote_ids:
1109 if remote_ids:
1110 ar = AsyncHubResult(self, msg_ids=theids)
1110 ar = AsyncHubResult(self, msg_ids=theids)
1111 else:
1111 else:
1112 ar = AsyncResult(self, msg_ids=theids)
1112 ar = AsyncResult(self, msg_ids=theids)
1113
1113
1114 if block:
1114 if block:
1115 ar.wait()
1115 ar.wait()
1116
1116
1117 return ar
1117 return ar
1118
1118
1119 @spin_first
1119 @spin_first
1120 def resubmit(self, indices_or_msg_ids=None, subheader=None, block=None):
1120 def resubmit(self, indices_or_msg_ids=None, subheader=None, block=None):
1121 """Resubmit one or more tasks.
1121 """Resubmit one or more tasks.
1122
1122
1123 in-flight tasks may not be resubmitted.
1123 in-flight tasks may not be resubmitted.
1124
1124
1125 Parameters
1125 Parameters
1126 ----------
1126 ----------
1127
1127
1128 indices_or_msg_ids : integer history index, str msg_id, or list of either
1128 indices_or_msg_ids : integer history index, str msg_id, or list of either
1129 The indices or msg_ids of indices to be retrieved
1129 The indices or msg_ids of indices to be retrieved
1130
1130
1131 block : bool
1131 block : bool
1132 Whether to wait for the result to be done
1132 Whether to wait for the result to be done
1133
1133
1134 Returns
1134 Returns
1135 -------
1135 -------
1136
1136
1137 AsyncHubResult
1137 AsyncHubResult
1138 A subclass of AsyncResult that retrieves results from the Hub
1138 A subclass of AsyncResult that retrieves results from the Hub
1139
1139
1140 """
1140 """
1141 block = self.block if block is None else block
1141 block = self.block if block is None else block
1142 if indices_or_msg_ids is None:
1142 if indices_or_msg_ids is None:
1143 indices_or_msg_ids = -1
1143 indices_or_msg_ids = -1
1144
1144
1145 if not isinstance(indices_or_msg_ids, (list,tuple)):
1145 if not isinstance(indices_or_msg_ids, (list,tuple)):
1146 indices_or_msg_ids = [indices_or_msg_ids]
1146 indices_or_msg_ids = [indices_or_msg_ids]
1147
1147
1148 theids = []
1148 theids = []
1149 for id in indices_or_msg_ids:
1149 for id in indices_or_msg_ids:
1150 if isinstance(id, int):
1150 if isinstance(id, int):
1151 id = self.history[id]
1151 id = self.history[id]
1152 if not isinstance(id, basestring):
1152 if not isinstance(id, basestring):
1153 raise TypeError("indices must be str or int, not %r"%id)
1153 raise TypeError("indices must be str or int, not %r"%id)
1154 theids.append(id)
1154 theids.append(id)
1155
1155
1156 for msg_id in theids:
1156 for msg_id in theids:
1157 self.outstanding.discard(msg_id)
1157 self.outstanding.discard(msg_id)
1158 if msg_id in self.history:
1158 if msg_id in self.history:
1159 self.history.remove(msg_id)
1159 self.history.remove(msg_id)
1160 self.results.pop(msg_id, None)
1160 self.results.pop(msg_id, None)
1161 self.metadata.pop(msg_id, None)
1161 self.metadata.pop(msg_id, None)
1162 content = dict(msg_ids = theids)
1162 content = dict(msg_ids = theids)
1163
1163
1164 self.session.send(self._query_socket, 'resubmit_request', content)
1164 self.session.send(self._query_socket, 'resubmit_request', content)
1165
1165
1166 zmq.select([self._query_socket], [], [])
1166 zmq.select([self._query_socket], [], [])
1167 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1167 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1168 if self.debug:
1168 if self.debug:
1169 pprint(msg)
1169 pprint(msg)
1170 content = msg['content']
1170 content = msg['content']
1171 if content['status'] != 'ok':
1171 if content['status'] != 'ok':
1172 raise self._unwrap_exception(content)
1172 raise self._unwrap_exception(content)
1173
1173
1174 ar = AsyncHubResult(self, msg_ids=theids)
1174 ar = AsyncHubResult(self, msg_ids=theids)
1175
1175
1176 if block:
1176 if block:
1177 ar.wait()
1177 ar.wait()
1178
1178
1179 return ar
1179 return ar
1180
1180
1181 @spin_first
1181 @spin_first
1182 def result_status(self, msg_ids, status_only=True):
1182 def result_status(self, msg_ids, status_only=True):
1183 """Check on the status of the result(s) of the apply request with `msg_ids`.
1183 """Check on the status of the result(s) of the apply request with `msg_ids`.
1184
1184
1185 If status_only is False, then the actual results will be retrieved, else
1185 If status_only is False, then the actual results will be retrieved, else
1186 only the status of the results will be checked.
1186 only the status of the results will be checked.
1187
1187
1188 Parameters
1188 Parameters
1189 ----------
1189 ----------
1190
1190
1191 msg_ids : list of msg_ids
1191 msg_ids : list of msg_ids
1192 if int:
1192 if int:
1193 Passed as index to self.history for convenience.
1193 Passed as index to self.history for convenience.
1194 status_only : bool (default: True)
1194 status_only : bool (default: True)
1195 if False:
1195 if False:
1196 Retrieve the actual results of completed tasks.
1196 Retrieve the actual results of completed tasks.
1197
1197
1198 Returns
1198 Returns
1199 -------
1199 -------
1200
1200
1201 results : dict
1201 results : dict
1202 There will always be the keys 'pending' and 'completed', which will
1202 There will always be the keys 'pending' and 'completed', which will
1203 be lists of msg_ids that are incomplete or complete. If `status_only`
1203 be lists of msg_ids that are incomplete or complete. If `status_only`
1204 is False, then completed results will be keyed by their `msg_id`.
1204 is False, then completed results will be keyed by their `msg_id`.
1205 """
1205 """
1206 if not isinstance(msg_ids, (list,tuple)):
1206 if not isinstance(msg_ids, (list,tuple)):
1207 msg_ids = [msg_ids]
1207 msg_ids = [msg_ids]
1208
1208
1209 theids = []
1209 theids = []
1210 for msg_id in msg_ids:
1210 for msg_id in msg_ids:
1211 if isinstance(msg_id, int):
1211 if isinstance(msg_id, int):
1212 msg_id = self.history[msg_id]
1212 msg_id = self.history[msg_id]
1213 if not isinstance(msg_id, basestring):
1213 if not isinstance(msg_id, basestring):
1214 raise TypeError("msg_ids must be str, not %r"%msg_id)
1214 raise TypeError("msg_ids must be str, not %r"%msg_id)
1215 theids.append(msg_id)
1215 theids.append(msg_id)
1216
1216
1217 completed = []
1217 completed = []
1218 local_results = {}
1218 local_results = {}
1219
1219
1220 # comment this block out to temporarily disable local shortcut:
1220 # comment this block out to temporarily disable local shortcut:
1221 for msg_id in theids:
1221 for msg_id in theids:
1222 if msg_id in self.results:
1222 if msg_id in self.results:
1223 completed.append(msg_id)
1223 completed.append(msg_id)
1224 local_results[msg_id] = self.results[msg_id]
1224 local_results[msg_id] = self.results[msg_id]
1225 theids.remove(msg_id)
1225 theids.remove(msg_id)
1226
1226
1227 if theids: # some not locally cached
1227 if theids: # some not locally cached
1228 content = dict(msg_ids=theids, status_only=status_only)
1228 content = dict(msg_ids=theids, status_only=status_only)
1229 msg = self.session.send(self._query_socket, "result_request", content=content)
1229 msg = self.session.send(self._query_socket, "result_request", content=content)
1230 zmq.select([self._query_socket], [], [])
1230 zmq.select([self._query_socket], [], [])
1231 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1231 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1232 if self.debug:
1232 if self.debug:
1233 pprint(msg)
1233 pprint(msg)
1234 content = msg['content']
1234 content = msg['content']
1235 if content['status'] != 'ok':
1235 if content['status'] != 'ok':
1236 raise self._unwrap_exception(content)
1236 raise self._unwrap_exception(content)
1237 buffers = msg['buffers']
1237 buffers = msg['buffers']
1238 else:
1238 else:
1239 content = dict(completed=[],pending=[])
1239 content = dict(completed=[],pending=[])
1240
1240
1241 content['completed'].extend(completed)
1241 content['completed'].extend(completed)
1242
1242
1243 if status_only:
1243 if status_only:
1244 return content
1244 return content
1245
1245
1246 failures = []
1246 failures = []
1247 # load cached results into result:
1247 # load cached results into result:
1248 content.update(local_results)
1248 content.update(local_results)
1249
1249
1250 # update cache with results:
1250 # update cache with results:
1251 for msg_id in sorted(theids):
1251 for msg_id in sorted(theids):
1252 if msg_id in content['completed']:
1252 if msg_id in content['completed']:
1253 rec = content[msg_id]
1253 rec = content[msg_id]
1254 parent = rec['header']
1254 parent = rec['header']
1255 header = rec['result_header']
1255 header = rec['result_header']
1256 rcontent = rec['result_content']
1256 rcontent = rec['result_content']
1257 iodict = rec['io']
1257 iodict = rec['io']
1258 if isinstance(rcontent, str):
1258 if isinstance(rcontent, str):
1259 rcontent = self.session.unpack(rcontent)
1259 rcontent = self.session.unpack(rcontent)
1260
1260
1261 md = self.metadata[msg_id]
1261 md = self.metadata[msg_id]
1262 md.update(self._extract_metadata(header, parent, rcontent))
1262 md.update(self._extract_metadata(header, parent, rcontent))
1263 md.update(iodict)
1263 md.update(iodict)
1264
1264
1265 if rcontent['status'] == 'ok':
1265 if rcontent['status'] == 'ok':
1266 res,buffers = util.unserialize_object(buffers)
1266 res,buffers = util.unserialize_object(buffers)
1267 else:
1267 else:
1268 print rcontent
1268 print rcontent
1269 res = self._unwrap_exception(rcontent)
1269 res = self._unwrap_exception(rcontent)
1270 failures.append(res)
1270 failures.append(res)
1271
1271
1272 self.results[msg_id] = res
1272 self.results[msg_id] = res
1273 content[msg_id] = res
1273 content[msg_id] = res
1274
1274
1275 if len(theids) == 1 and failures:
1275 if len(theids) == 1 and failures:
1276 raise failures[0]
1276 raise failures[0]
1277
1277
1278 error.collect_exceptions(failures, "result_status")
1278 error.collect_exceptions(failures, "result_status")
1279 return content
1279 return content
1280
1280
1281 @spin_first
1281 @spin_first
1282 def queue_status(self, targets='all', verbose=False):
1282 def queue_status(self, targets='all', verbose=False):
1283 """Fetch the status of engine queues.
1283 """Fetch the status of engine queues.
1284
1284
1285 Parameters
1285 Parameters
1286 ----------
1286 ----------
1287
1287
1288 targets : int/str/list of ints/strs
1288 targets : int/str/list of ints/strs
1289 the engines whose states are to be queried.
1289 the engines whose states are to be queried.
1290 default : all
1290 default : all
1291 verbose : bool
1291 verbose : bool
1292 Whether to return lengths only, or lists of ids for each element
1292 Whether to return lengths only, or lists of ids for each element
1293 """
1293 """
1294 engine_ids = self._build_targets(targets)[1]
1294 engine_ids = self._build_targets(targets)[1]
1295 content = dict(targets=engine_ids, verbose=verbose)
1295 content = dict(targets=engine_ids, verbose=verbose)
1296 self.session.send(self._query_socket, "queue_request", content=content)
1296 self.session.send(self._query_socket, "queue_request", content=content)
1297 idents,msg = self.session.recv(self._query_socket, 0)
1297 idents,msg = self.session.recv(self._query_socket, 0)
1298 if self.debug:
1298 if self.debug:
1299 pprint(msg)
1299 pprint(msg)
1300 content = msg['content']
1300 content = msg['content']
1301 status = content.pop('status')
1301 status = content.pop('status')
1302 if status != 'ok':
1302 if status != 'ok':
1303 raise self._unwrap_exception(content)
1303 raise self._unwrap_exception(content)
1304 content = rekey(content)
1304 content = rekey(content)
1305 if isinstance(targets, int):
1305 if isinstance(targets, int):
1306 return content[targets]
1306 return content[targets]
1307 else:
1307 else:
1308 return content
1308 return content
1309
1309
1310 @spin_first
1310 @spin_first
1311 def purge_results(self, jobs=[], targets=[]):
1311 def purge_results(self, jobs=[], targets=[]):
1312 """Tell the Hub to forget results.
1312 """Tell the Hub to forget results.
1313
1313
1314 Individual results can be purged by msg_id, or the entire
1314 Individual results can be purged by msg_id, or the entire
1315 history of specific targets can be purged.
1315 history of specific targets can be purged.
1316
1316
1317 Use `purge_results('all')` to scrub everything from the Hub's db.
1317 Use `purge_results('all')` to scrub everything from the Hub's db.
1318
1318
1319 Parameters
1319 Parameters
1320 ----------
1320 ----------
1321
1321
1322 jobs : str or list of str or AsyncResult objects
1322 jobs : str or list of str or AsyncResult objects
1323 the msg_ids whose results should be forgotten.
1323 the msg_ids whose results should be forgotten.
1324 targets : int/str/list of ints/strs
1324 targets : int/str/list of ints/strs
1325 The targets, by int_id, whose entire history is to be purged.
1325 The targets, by int_id, whose entire history is to be purged.
1326
1326
1327 default : None
1327 default : None
1328 """
1328 """
1329 if not targets and not jobs:
1329 if not targets and not jobs:
1330 raise ValueError("Must specify at least one of `targets` and `jobs`")
1330 raise ValueError("Must specify at least one of `targets` and `jobs`")
1331 if targets:
1331 if targets:
1332 targets = self._build_targets(targets)[1]
1332 targets = self._build_targets(targets)[1]
1333
1333
1334 # construct msg_ids from jobs
1334 # construct msg_ids from jobs
1335 if jobs == 'all':
1335 if jobs == 'all':
1336 msg_ids = jobs
1336 msg_ids = jobs
1337 else:
1337 else:
1338 msg_ids = []
1338 msg_ids = []
1339 if isinstance(jobs, (basestring,AsyncResult)):
1339 if isinstance(jobs, (basestring,AsyncResult)):
1340 jobs = [jobs]
1340 jobs = [jobs]
1341 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
1341 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
1342 if bad_ids:
1342 if bad_ids:
1343 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1343 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1344 for j in jobs:
1344 for j in jobs:
1345 if isinstance(j, AsyncResult):
1345 if isinstance(j, AsyncResult):
1346 msg_ids.extend(j.msg_ids)
1346 msg_ids.extend(j.msg_ids)
1347 else:
1347 else:
1348 msg_ids.append(j)
1348 msg_ids.append(j)
1349
1349
1350 content = dict(engine_ids=targets, msg_ids=msg_ids)
1350 content = dict(engine_ids=targets, msg_ids=msg_ids)
1351 self.session.send(self._query_socket, "purge_request", content=content)
1351 self.session.send(self._query_socket, "purge_request", content=content)
1352 idents, msg = self.session.recv(self._query_socket, 0)
1352 idents, msg = self.session.recv(self._query_socket, 0)
1353 if self.debug:
1353 if self.debug:
1354 pprint(msg)
1354 pprint(msg)
1355 content = msg['content']
1355 content = msg['content']
1356 if content['status'] != 'ok':
1356 if content['status'] != 'ok':
1357 raise self._unwrap_exception(content)
1357 raise self._unwrap_exception(content)
1358
1358
1359 @spin_first
1359 @spin_first
1360 def hub_history(self):
1360 def hub_history(self):
1361 """Get the Hub's history
1361 """Get the Hub's history
1362
1362
1363 Just like the Client, the Hub has a history, which is a list of msg_ids.
1363 Just like the Client, the Hub has a history, which is a list of msg_ids.
1364 This will contain the history of all clients, and, depending on configuration,
1364 This will contain the history of all clients, and, depending on configuration,
1365 may contain history across multiple cluster sessions.
1365 may contain history across multiple cluster sessions.
1366
1366
1367 Any msg_id returned here is a valid argument to `get_result`.
1367 Any msg_id returned here is a valid argument to `get_result`.
1368
1368
1369 Returns
1369 Returns
1370 -------
1370 -------
1371
1371
1372 msg_ids : list of strs
1372 msg_ids : list of strs
1373 list of all msg_ids, ordered by task submission time.
1373 list of all msg_ids, ordered by task submission time.
1374 """
1374 """
1375
1375
1376 self.session.send(self._query_socket, "history_request", content={})
1376 self.session.send(self._query_socket, "history_request", content={})
1377 idents, msg = self.session.recv(self._query_socket, 0)
1377 idents, msg = self.session.recv(self._query_socket, 0)
1378
1378
1379 if self.debug:
1379 if self.debug:
1380 pprint(msg)
1380 pprint(msg)
1381 content = msg['content']
1381 content = msg['content']
1382 if content['status'] != 'ok':
1382 if content['status'] != 'ok':
1383 raise self._unwrap_exception(content)
1383 raise self._unwrap_exception(content)
1384 else:
1384 else:
1385 return content['history']
1385 return content['history']
1386
1386
1387 @spin_first
1387 @spin_first
1388 def db_query(self, query, keys=None):
1388 def db_query(self, query, keys=None):
1389 """Query the Hub's TaskRecord database
1389 """Query the Hub's TaskRecord database
1390
1390
1391 This will return a list of task record dicts that match `query`
1391 This will return a list of task record dicts that match `query`
1392
1392
1393 Parameters
1393 Parameters
1394 ----------
1394 ----------
1395
1395
1396 query : mongodb query dict
1396 query : mongodb query dict
1397 The search dict. See mongodb query docs for details.
1397 The search dict. See mongodb query docs for details.
1398 keys : list of strs [optional]
1398 keys : list of strs [optional]
1399 The subset of keys to be returned. The default is to fetch everything but buffers.
1399 The subset of keys to be returned. The default is to fetch everything but buffers.
1400 'msg_id' will *always* be included.
1400 'msg_id' will *always* be included.
1401 """
1401 """
1402 if isinstance(keys, basestring):
1402 if isinstance(keys, basestring):
1403 keys = [keys]
1403 keys = [keys]
1404 content = dict(query=query, keys=keys)
1404 content = dict(query=query, keys=keys)
1405 self.session.send(self._query_socket, "db_request", content=content)
1405 self.session.send(self._query_socket, "db_request", content=content)
1406 idents, msg = self.session.recv(self._query_socket, 0)
1406 idents, msg = self.session.recv(self._query_socket, 0)
1407 if self.debug:
1407 if self.debug:
1408 pprint(msg)
1408 pprint(msg)
1409 content = msg['content']
1409 content = msg['content']
1410 if content['status'] != 'ok':
1410 if content['status'] != 'ok':
1411 raise self._unwrap_exception(content)
1411 raise self._unwrap_exception(content)
1412
1412
1413 records = content['records']
1413 records = content['records']
1414
1414
1415 buffer_lens = content['buffer_lens']
1415 buffer_lens = content['buffer_lens']
1416 result_buffer_lens = content['result_buffer_lens']
1416 result_buffer_lens = content['result_buffer_lens']
1417 buffers = msg['buffers']
1417 buffers = msg['buffers']
1418 has_bufs = buffer_lens is not None
1418 has_bufs = buffer_lens is not None
1419 has_rbufs = result_buffer_lens is not None
1419 has_rbufs = result_buffer_lens is not None
1420 for i,rec in enumerate(records):
1420 for i,rec in enumerate(records):
1421 # relink buffers
1421 # relink buffers
1422 if has_bufs:
1422 if has_bufs:
1423 blen = buffer_lens[i]
1423 blen = buffer_lens[i]
1424 rec['buffers'], buffers = buffers[:blen],buffers[blen:]
1424 rec['buffers'], buffers = buffers[:blen],buffers[blen:]
1425 if has_rbufs:
1425 if has_rbufs:
1426 blen = result_buffer_lens[i]
1426 blen = result_buffer_lens[i]
1427 rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
1427 rec['result_buffers'], buffers = buffers[:blen],buffers[blen:]
1428
1428
1429 return records
1429 return records
1430
1430
1431 __all__ = [ 'Client' ]
1431 __all__ = [ 'Client' ]
@@ -1,1048 +1,1048 b''
1 """Views of remote engines.
1 """Views of remote engines.
2
2
3 Authors:
3 Authors:
4
4
5 * Min RK
5 * Min RK
6 """
6 """
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8 # Copyright (C) 2010-2011 The IPython Development Team
8 # Copyright (C) 2010-2011 The IPython Development Team
9 #
9 #
10 # Distributed under the terms of the BSD License. The full license is in
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
11 # the file COPYING, distributed as part of this software.
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13
13
14 #-----------------------------------------------------------------------------
14 #-----------------------------------------------------------------------------
15 # Imports
15 # Imports
16 #-----------------------------------------------------------------------------
16 #-----------------------------------------------------------------------------
17
17
18 import imp
18 import imp
19 import sys
19 import sys
20 import warnings
20 import warnings
21 from contextlib import contextmanager
21 from contextlib import contextmanager
22 from types import ModuleType
22 from types import ModuleType
23
23
24 import zmq
24 import zmq
25
25
26 from IPython.testing.skipdoctest import skip_doctest
26 from IPython.testing.skipdoctest import skip_doctest
27 from IPython.utils.traitlets import HasTraits, Any, Bool, List, Dict, Set, Int, Instance, CFloat, CInt
27 from IPython.utils.traitlets import HasTraits, Any, Bool, List, Dict, Set, Int, Instance, CFloat, CInt
28 from IPython.external.decorator import decorator
28 from IPython.external.decorator import decorator
29
29
30 from IPython.parallel import util
30 from IPython.parallel import util
31 from IPython.parallel.controller.dependency import Dependency, dependent
31 from IPython.parallel.controller.dependency import Dependency, dependent
32
32
33 from . import map as Map
33 from . import map as Map
34 from .asyncresult import AsyncResult, AsyncMapResult
34 from .asyncresult import AsyncResult, AsyncMapResult
35 from .remotefunction import ParallelFunction, parallel, remote
35 from .remotefunction import ParallelFunction, parallel, remote
36
36
37 #-----------------------------------------------------------------------------
37 #-----------------------------------------------------------------------------
38 # Decorators
38 # Decorators
39 #-----------------------------------------------------------------------------
39 #-----------------------------------------------------------------------------
40
40
41 @decorator
41 @decorator
42 def save_ids(f, self, *args, **kwargs):
42 def save_ids(f, self, *args, **kwargs):
43 """Keep our history and outstanding attributes up to date after a method call."""
43 """Keep our history and outstanding attributes up to date after a method call."""
44 n_previous = len(self.client.history)
44 n_previous = len(self.client.history)
45 try:
45 try:
46 ret = f(self, *args, **kwargs)
46 ret = f(self, *args, **kwargs)
47 finally:
47 finally:
48 nmsgs = len(self.client.history) - n_previous
48 nmsgs = len(self.client.history) - n_previous
49 msg_ids = self.client.history[-nmsgs:]
49 msg_ids = self.client.history[-nmsgs:]
50 self.history.extend(msg_ids)
50 self.history.extend(msg_ids)
51 map(self.outstanding.add, msg_ids)
51 map(self.outstanding.add, msg_ids)
52 return ret
52 return ret
53
53
54 @decorator
54 @decorator
55 def sync_results(f, self, *args, **kwargs):
55 def sync_results(f, self, *args, **kwargs):
56 """sync relevant results from self.client to our results attribute."""
56 """sync relevant results from self.client to our results attribute."""
57 ret = f(self, *args, **kwargs)
57 ret = f(self, *args, **kwargs)
58 delta = self.outstanding.difference(self.client.outstanding)
58 delta = self.outstanding.difference(self.client.outstanding)
59 completed = self.outstanding.intersection(delta)
59 completed = self.outstanding.intersection(delta)
60 self.outstanding = self.outstanding.difference(completed)
60 self.outstanding = self.outstanding.difference(completed)
61 for msg_id in completed:
61 for msg_id in completed:
62 self.results[msg_id] = self.client.results[msg_id]
62 self.results[msg_id] = self.client.results[msg_id]
63 return ret
63 return ret
64
64
65 @decorator
65 @decorator
66 def spin_after(f, self, *args, **kwargs):
66 def spin_after(f, self, *args, **kwargs):
67 """call spin after the method."""
67 """call spin after the method."""
68 ret = f(self, *args, **kwargs)
68 ret = f(self, *args, **kwargs)
69 self.spin()
69 self.spin()
70 return ret
70 return ret
71
71
72 #-----------------------------------------------------------------------------
72 #-----------------------------------------------------------------------------
73 # Classes
73 # Classes
74 #-----------------------------------------------------------------------------
74 #-----------------------------------------------------------------------------
75
75
76 @skip_doctest
76 @skip_doctest
77 class View(HasTraits):
77 class View(HasTraits):
78 """Base View class for more convenint apply(f,*args,**kwargs) syntax via attributes.
78 """Base View class for more convenint apply(f,*args,**kwargs) syntax via attributes.
79
79
80 Don't use this class, use subclasses.
80 Don't use this class, use subclasses.
81
81
82 Methods
82 Methods
83 -------
83 -------
84
84
85 spin
85 spin
86 flushes incoming results and registration state changes
86 flushes incoming results and registration state changes
87 control methods spin, and requesting `ids` also ensures up to date
87 control methods spin, and requesting `ids` also ensures up to date
88
88
89 wait
89 wait
90 wait on one or more msg_ids
90 wait on one or more msg_ids
91
91
92 execution methods
92 execution methods
93 apply
93 apply
94 legacy: execute, run
94 legacy: execute, run
95
95
96 data movement
96 data movement
97 push, pull, scatter, gather
97 push, pull, scatter, gather
98
98
99 query methods
99 query methods
100 get_result, queue_status, purge_results, result_status
100 get_result, queue_status, purge_results, result_status
101
101
102 control methods
102 control methods
103 abort, shutdown
103 abort, shutdown
104
104
105 """
105 """
106 # flags
106 # flags
107 block=Bool(False)
107 block=Bool(False)
108 track=Bool(True)
108 track=Bool(True)
109 targets = Any()
109 targets = Any()
110
110
111 history=List()
111 history=List()
112 outstanding = Set()
112 outstanding = Set()
113 results = Dict()
113 results = Dict()
114 client = Instance('IPython.parallel.Client')
114 client = Instance('IPython.parallel.Client')
115
115
116 _socket = Instance('zmq.Socket')
116 _socket = Instance('zmq.Socket')
117 _flag_names = List(['targets', 'block', 'track'])
117 _flag_names = List(['targets', 'block', 'track'])
118 _targets = Any()
118 _targets = Any()
119 _idents = Any()
119 _idents = Any()
120
120
121 def __init__(self, client=None, socket=None, **flags):
121 def __init__(self, client=None, socket=None, **flags):
122 super(View, self).__init__(client=client, _socket=socket)
122 super(View, self).__init__(client=client, _socket=socket)
123 self.block = client.block
123 self.block = client.block
124
124
125 self.set_flags(**flags)
125 self.set_flags(**flags)
126
126
127 assert not self.__class__ is View, "Don't use base View objects, use subclasses"
127 assert not self.__class__ is View, "Don't use base View objects, use subclasses"
128
128
129
129
130 def __repr__(self):
130 def __repr__(self):
131 strtargets = str(self.targets)
131 strtargets = str(self.targets)
132 if len(strtargets) > 16:
132 if len(strtargets) > 16:
133 strtargets = strtargets[:12]+'...]'
133 strtargets = strtargets[:12]+'...]'
134 return "<%s %s>"%(self.__class__.__name__, strtargets)
134 return "<%s %s>"%(self.__class__.__name__, strtargets)
135
135
136 def set_flags(self, **kwargs):
136 def set_flags(self, **kwargs):
137 """set my attribute flags by keyword.
137 """set my attribute flags by keyword.
138
138
139 Views determine behavior with a few attributes (`block`, `track`, etc.).
139 Views determine behavior with a few attributes (`block`, `track`, etc.).
140 These attributes can be set all at once by name with this method.
140 These attributes can be set all at once by name with this method.
141
141
142 Parameters
142 Parameters
143 ----------
143 ----------
144
144
145 block : bool
145 block : bool
146 whether to wait for results
146 whether to wait for results
147 track : bool
147 track : bool
148 whether to create a MessageTracker to allow the user to
148 whether to create a MessageTracker to allow the user to
149 safely edit after arrays and buffers during non-copying
149 safely edit after arrays and buffers during non-copying
150 sends.
150 sends.
151 """
151 """
152 for name, value in kwargs.iteritems():
152 for name, value in kwargs.iteritems():
153 if name not in self._flag_names:
153 if name not in self._flag_names:
154 raise KeyError("Invalid name: %r"%name)
154 raise KeyError("Invalid name: %r"%name)
155 else:
155 else:
156 setattr(self, name, value)
156 setattr(self, name, value)
157
157
158 @contextmanager
158 @contextmanager
159 def temp_flags(self, **kwargs):
159 def temp_flags(self, **kwargs):
160 """temporarily set flags, for use in `with` statements.
160 """temporarily set flags, for use in `with` statements.
161
161
162 See set_flags for permanent setting of flags
162 See set_flags for permanent setting of flags
163
163
164 Examples
164 Examples
165 --------
165 --------
166
166
167 >>> view.track=False
167 >>> view.track=False
168 ...
168 ...
169 >>> with view.temp_flags(track=True):
169 >>> with view.temp_flags(track=True):
170 ... ar = view.apply(dostuff, my_big_array)
170 ... ar = view.apply(dostuff, my_big_array)
171 ... ar.tracker.wait() # wait for send to finish
171 ... ar.tracker.wait() # wait for send to finish
172 >>> view.track
172 >>> view.track
173 False
173 False
174
174
175 """
175 """
176 # preflight: save flags, and set temporaries
176 # preflight: save flags, and set temporaries
177 saved_flags = {}
177 saved_flags = {}
178 for f in self._flag_names:
178 for f in self._flag_names:
179 saved_flags[f] = getattr(self, f)
179 saved_flags[f] = getattr(self, f)
180 self.set_flags(**kwargs)
180 self.set_flags(**kwargs)
181 # yield to the with-statement block
181 # yield to the with-statement block
182 try:
182 try:
183 yield
183 yield
184 finally:
184 finally:
185 # postflight: restore saved flags
185 # postflight: restore saved flags
186 self.set_flags(**saved_flags)
186 self.set_flags(**saved_flags)
187
187
188
188
189 #----------------------------------------------------------------
189 #----------------------------------------------------------------
190 # apply
190 # apply
191 #----------------------------------------------------------------
191 #----------------------------------------------------------------
192
192
193 @sync_results
193 @sync_results
194 @save_ids
194 @save_ids
195 def _really_apply(self, f, args, kwargs, block=None, **options):
195 def _really_apply(self, f, args, kwargs, block=None, **options):
196 """wrapper for client.send_apply_message"""
196 """wrapper for client.send_apply_message"""
197 raise NotImplementedError("Implement in subclasses")
197 raise NotImplementedError("Implement in subclasses")
198
198
199 def apply(self, f, *args, **kwargs):
199 def apply(self, f, *args, **kwargs):
200 """calls f(*args, **kwargs) on remote engines, returning the result.
200 """calls f(*args, **kwargs) on remote engines, returning the result.
201
201
202 This method sets all apply flags via this View's attributes.
202 This method sets all apply flags via this View's attributes.
203
203
204 if self.block is False:
204 if self.block is False:
205 returns AsyncResult
205 returns AsyncResult
206 else:
206 else:
207 returns actual result of f(*args, **kwargs)
207 returns actual result of f(*args, **kwargs)
208 """
208 """
209 return self._really_apply(f, args, kwargs)
209 return self._really_apply(f, args, kwargs)
210
210
211 def apply_async(self, f, *args, **kwargs):
211 def apply_async(self, f, *args, **kwargs):
212 """calls f(*args, **kwargs) on remote engines in a nonblocking manner.
212 """calls f(*args, **kwargs) on remote engines in a nonblocking manner.
213
213
214 returns AsyncResult
214 returns AsyncResult
215 """
215 """
216 return self._really_apply(f, args, kwargs, block=False)
216 return self._really_apply(f, args, kwargs, block=False)
217
217
218 @spin_after
218 @spin_after
219 def apply_sync(self, f, *args, **kwargs):
219 def apply_sync(self, f, *args, **kwargs):
220 """calls f(*args, **kwargs) on remote engines in a blocking manner,
220 """calls f(*args, **kwargs) on remote engines in a blocking manner,
221 returning the result.
221 returning the result.
222
222
223 returns: actual result of f(*args, **kwargs)
223 returns: actual result of f(*args, **kwargs)
224 """
224 """
225 return self._really_apply(f, args, kwargs, block=True)
225 return self._really_apply(f, args, kwargs, block=True)
226
226
227 #----------------------------------------------------------------
227 #----------------------------------------------------------------
228 # wrappers for client and control methods
228 # wrappers for client and control methods
229 #----------------------------------------------------------------
229 #----------------------------------------------------------------
230 @sync_results
230 @sync_results
231 def spin(self):
231 def spin(self):
232 """spin the client, and sync"""
232 """spin the client, and sync"""
233 self.client.spin()
233 self.client.spin()
234
234
235 @sync_results
235 @sync_results
236 def wait(self, jobs=None, timeout=-1):
236 def wait(self, jobs=None, timeout=-1):
237 """waits on one or more `jobs`, for up to `timeout` seconds.
237 """waits on one or more `jobs`, for up to `timeout` seconds.
238
238
239 Parameters
239 Parameters
240 ----------
240 ----------
241
241
242 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
242 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
243 ints are indices to self.history
243 ints are indices to self.history
244 strs are msg_ids
244 strs are msg_ids
245 default: wait on all outstanding messages
245 default: wait on all outstanding messages
246 timeout : float
246 timeout : float
247 a time in seconds, after which to give up.
247 a time in seconds, after which to give up.
248 default is -1, which means no timeout
248 default is -1, which means no timeout
249
249
250 Returns
250 Returns
251 -------
251 -------
252
252
253 True : when all msg_ids are done
253 True : when all msg_ids are done
254 False : timeout reached, some msg_ids still outstanding
254 False : timeout reached, some msg_ids still outstanding
255 """
255 """
256 if jobs is None:
256 if jobs is None:
257 jobs = self.history
257 jobs = self.history
258 return self.client.wait(jobs, timeout)
258 return self.client.wait(jobs, timeout)
259
259
260 def abort(self, jobs=None, targets=None, block=None):
260 def abort(self, jobs=None, targets=None, block=None):
261 """Abort jobs on my engines.
261 """Abort jobs on my engines.
262
262
263 Parameters
263 Parameters
264 ----------
264 ----------
265
265
266 jobs : None, str, list of strs, optional
266 jobs : None, str, list of strs, optional
267 if None: abort all jobs.
267 if None: abort all jobs.
268 else: abort specific msg_id(s).
268 else: abort specific msg_id(s).
269 """
269 """
270 block = block if block is not None else self.block
270 block = block if block is not None else self.block
271 targets = targets if targets is not None else self.targets
271 targets = targets if targets is not None else self.targets
272 return self.client.abort(jobs=jobs, targets=targets, block=block)
272 return self.client.abort(jobs=jobs, targets=targets, block=block)
273
273
274 def queue_status(self, targets=None, verbose=False):
274 def queue_status(self, targets=None, verbose=False):
275 """Fetch the Queue status of my engines"""
275 """Fetch the Queue status of my engines"""
276 targets = targets if targets is not None else self.targets
276 targets = targets if targets is not None else self.targets
277 return self.client.queue_status(targets=targets, verbose=verbose)
277 return self.client.queue_status(targets=targets, verbose=verbose)
278
278
279 def purge_results(self, jobs=[], targets=[]):
279 def purge_results(self, jobs=[], targets=[]):
280 """Instruct the controller to forget specific results."""
280 """Instruct the controller to forget specific results."""
281 if targets is None or targets == 'all':
281 if targets is None or targets == 'all':
282 targets = self.targets
282 targets = self.targets
283 return self.client.purge_results(jobs=jobs, targets=targets)
283 return self.client.purge_results(jobs=jobs, targets=targets)
284
284
285 def shutdown(self, targets=None, restart=False, hub=False, block=None):
285 def shutdown(self, targets=None, restart=False, hub=False, block=None):
286 """Terminates one or more engine processes, optionally including the hub.
286 """Terminates one or more engine processes, optionally including the hub.
287 """
287 """
288 block = self.block if block is None else block
288 block = self.block if block is None else block
289 if targets is None or targets == 'all':
289 if targets is None or targets == 'all':
290 targets = self.targets
290 targets = self.targets
291 return self.client.shutdown(targets=targets, restart=restart, hub=hub, block=block)
291 return self.client.shutdown(targets=targets, restart=restart, hub=hub, block=block)
292
292
293 @spin_after
293 @spin_after
294 def get_result(self, indices_or_msg_ids=None):
294 def get_result(self, indices_or_msg_ids=None):
295 """return one or more results, specified by history index or msg_id.
295 """return one or more results, specified by history index or msg_id.
296
296
297 See client.get_result for details.
297 See client.get_result for details.
298
298
299 """
299 """
300
300
301 if indices_or_msg_ids is None:
301 if indices_or_msg_ids is None:
302 indices_or_msg_ids = -1
302 indices_or_msg_ids = -1
303 if isinstance(indices_or_msg_ids, int):
303 if isinstance(indices_or_msg_ids, int):
304 indices_or_msg_ids = self.history[indices_or_msg_ids]
304 indices_or_msg_ids = self.history[indices_or_msg_ids]
305 elif isinstance(indices_or_msg_ids, (list,tuple,set)):
305 elif isinstance(indices_or_msg_ids, (list,tuple,set)):
306 indices_or_msg_ids = list(indices_or_msg_ids)
306 indices_or_msg_ids = list(indices_or_msg_ids)
307 for i,index in enumerate(indices_or_msg_ids):
307 for i,index in enumerate(indices_or_msg_ids):
308 if isinstance(index, int):
308 if isinstance(index, int):
309 indices_or_msg_ids[i] = self.history[index]
309 indices_or_msg_ids[i] = self.history[index]
310 return self.client.get_result(indices_or_msg_ids)
310 return self.client.get_result(indices_or_msg_ids)
311
311
312 #-------------------------------------------------------------------
312 #-------------------------------------------------------------------
313 # Map
313 # Map
314 #-------------------------------------------------------------------
314 #-------------------------------------------------------------------
315
315
316 def map(self, f, *sequences, **kwargs):
316 def map(self, f, *sequences, **kwargs):
317 """override in subclasses"""
317 """override in subclasses"""
318 raise NotImplementedError
318 raise NotImplementedError
319
319
320 def map_async(self, f, *sequences, **kwargs):
320 def map_async(self, f, *sequences, **kwargs):
321 """Parallel version of builtin `map`, using this view's engines.
321 """Parallel version of builtin `map`, using this view's engines.
322
322
323 This is equivalent to map(...block=False)
323 This is equivalent to map(...block=False)
324
324
325 See `self.map` for details.
325 See `self.map` for details.
326 """
326 """
327 if 'block' in kwargs:
327 if 'block' in kwargs:
328 raise TypeError("map_async doesn't take a `block` keyword argument.")
328 raise TypeError("map_async doesn't take a `block` keyword argument.")
329 kwargs['block'] = False
329 kwargs['block'] = False
330 return self.map(f,*sequences,**kwargs)
330 return self.map(f,*sequences,**kwargs)
331
331
332 def map_sync(self, f, *sequences, **kwargs):
332 def map_sync(self, f, *sequences, **kwargs):
333 """Parallel version of builtin `map`, using this view's engines.
333 """Parallel version of builtin `map`, using this view's engines.
334
334
335 This is equivalent to map(...block=True)
335 This is equivalent to map(...block=True)
336
336
337 See `self.map` for details.
337 See `self.map` for details.
338 """
338 """
339 if 'block' in kwargs:
339 if 'block' in kwargs:
340 raise TypeError("map_sync doesn't take a `block` keyword argument.")
340 raise TypeError("map_sync doesn't take a `block` keyword argument.")
341 kwargs['block'] = True
341 kwargs['block'] = True
342 return self.map(f,*sequences,**kwargs)
342 return self.map(f,*sequences,**kwargs)
343
343
344 def imap(self, f, *sequences, **kwargs):
344 def imap(self, f, *sequences, **kwargs):
345 """Parallel version of `itertools.imap`.
345 """Parallel version of `itertools.imap`.
346
346
347 See `self.map` for details.
347 See `self.map` for details.
348
348
349 """
349 """
350
350
351 return iter(self.map_async(f,*sequences, **kwargs))
351 return iter(self.map_async(f,*sequences, **kwargs))
352
352
353 #-------------------------------------------------------------------
353 #-------------------------------------------------------------------
354 # Decorators
354 # Decorators
355 #-------------------------------------------------------------------
355 #-------------------------------------------------------------------
356
356
357 def remote(self, block=True, **flags):
357 def remote(self, block=True, **flags):
358 """Decorator for making a RemoteFunction"""
358 """Decorator for making a RemoteFunction"""
359 block = self.block if block is None else block
359 block = self.block if block is None else block
360 return remote(self, block=block, **flags)
360 return remote(self, block=block, **flags)
361
361
362 def parallel(self, dist='b', block=None, **flags):
362 def parallel(self, dist='b', block=None, **flags):
363 """Decorator for making a ParallelFunction"""
363 """Decorator for making a ParallelFunction"""
364 block = self.block if block is None else block
364 block = self.block if block is None else block
365 return parallel(self, dist=dist, block=block, **flags)
365 return parallel(self, dist=dist, block=block, **flags)
366
366
367 @skip_doctest
367 @skip_doctest
368 class DirectView(View):
368 class DirectView(View):
369 """Direct Multiplexer View of one or more engines.
369 """Direct Multiplexer View of one or more engines.
370
370
371 These are created via indexed access to a client:
371 These are created via indexed access to a client:
372
372
373 >>> dv_1 = client[1]
373 >>> dv_1 = client[1]
374 >>> dv_all = client[:]
374 >>> dv_all = client[:]
375 >>> dv_even = client[::2]
375 >>> dv_even = client[::2]
376 >>> dv_some = client[1:3]
376 >>> dv_some = client[1:3]
377
377
378 This object provides dictionary access to engine namespaces:
378 This object provides dictionary access to engine namespaces:
379
379
380 # push a=5:
380 # push a=5:
381 >>> dv['a'] = 5
381 >>> dv['a'] = 5
382 # pull 'foo':
382 # pull 'foo':
383 >>> db['foo']
383 >>> db['foo']
384
384
385 """
385 """
386
386
387 def __init__(self, client=None, socket=None, targets=None):
387 def __init__(self, client=None, socket=None, targets=None):
388 super(DirectView, self).__init__(client=client, socket=socket, targets=targets)
388 super(DirectView, self).__init__(client=client, socket=socket, targets=targets)
389
389
390 @property
390 @property
391 def importer(self):
391 def importer(self):
392 """sync_imports(local=True) as a property.
392 """sync_imports(local=True) as a property.
393
393
394 See sync_imports for details.
394 See sync_imports for details.
395
395
396 """
396 """
397 return self.sync_imports(True)
397 return self.sync_imports(True)
398
398
399 @contextmanager
399 @contextmanager
400 def sync_imports(self, local=True):
400 def sync_imports(self, local=True):
401 """Context Manager for performing simultaneous local and remote imports.
401 """Context Manager for performing simultaneous local and remote imports.
402
402
403 'import x as y' will *not* work. The 'as y' part will simply be ignored.
403 'import x as y' will *not* work. The 'as y' part will simply be ignored.
404
404
405 >>> with view.sync_imports():
405 >>> with view.sync_imports():
406 ... from numpy import recarray
406 ... from numpy import recarray
407 importing recarray from numpy on engine(s)
407 importing recarray from numpy on engine(s)
408
408
409 """
409 """
410 import __builtin__
410 import __builtin__
411 local_import = __builtin__.__import__
411 local_import = __builtin__.__import__
412 modules = set()
412 modules = set()
413 results = []
413 results = []
414 @util.interactive
414 @util.interactive
415 def remote_import(name, fromlist, level):
415 def remote_import(name, fromlist, level):
416 """the function to be passed to apply, that actually performs the import
416 """the function to be passed to apply, that actually performs the import
417 on the engine, and loads up the user namespace.
417 on the engine, and loads up the user namespace.
418 """
418 """
419 import sys
419 import sys
420 user_ns = globals()
420 user_ns = globals()
421 mod = __import__(name, fromlist=fromlist, level=level)
421 mod = __import__(name, fromlist=fromlist, level=level)
422 if fromlist:
422 if fromlist:
423 for key in fromlist:
423 for key in fromlist:
424 user_ns[key] = getattr(mod, key)
424 user_ns[key] = getattr(mod, key)
425 else:
425 else:
426 user_ns[name] = sys.modules[name]
426 user_ns[name] = sys.modules[name]
427
427
428 def view_import(name, globals={}, locals={}, fromlist=[], level=-1):
428 def view_import(name, globals={}, locals={}, fromlist=[], level=-1):
429 """the drop-in replacement for __import__, that optionally imports
429 """the drop-in replacement for __import__, that optionally imports
430 locally as well.
430 locally as well.
431 """
431 """
432 # don't override nested imports
432 # don't override nested imports
433 save_import = __builtin__.__import__
433 save_import = __builtin__.__import__
434 __builtin__.__import__ = local_import
434 __builtin__.__import__ = local_import
435
435
436 if imp.lock_held():
436 if imp.lock_held():
437 # this is a side-effect import, don't do it remotely, or even
437 # this is a side-effect import, don't do it remotely, or even
438 # ignore the local effects
438 # ignore the local effects
439 return local_import(name, globals, locals, fromlist, level)
439 return local_import(name, globals, locals, fromlist, level)
440
440
441 imp.acquire_lock()
441 imp.acquire_lock()
442 if local:
442 if local:
443 mod = local_import(name, globals, locals, fromlist, level)
443 mod = local_import(name, globals, locals, fromlist, level)
444 else:
444 else:
445 raise NotImplementedError("remote-only imports not yet implemented")
445 raise NotImplementedError("remote-only imports not yet implemented")
446 imp.release_lock()
446 imp.release_lock()
447
447
448 key = name+':'+','.join(fromlist or [])
448 key = name+':'+','.join(fromlist or [])
449 if level == -1 and key not in modules:
449 if level == -1 and key not in modules:
450 modules.add(key)
450 modules.add(key)
451 if fromlist:
451 if fromlist:
452 print "importing %s from %s on engine(s)"%(','.join(fromlist), name)
452 print "importing %s from %s on engine(s)"%(','.join(fromlist), name)
453 else:
453 else:
454 print "importing %s on engine(s)"%name
454 print "importing %s on engine(s)"%name
455 results.append(self.apply_async(remote_import, name, fromlist, level))
455 results.append(self.apply_async(remote_import, name, fromlist, level))
456 # restore override
456 # restore override
457 __builtin__.__import__ = save_import
457 __builtin__.__import__ = save_import
458
458
459 return mod
459 return mod
460
460
461 # override __import__
461 # override __import__
462 __builtin__.__import__ = view_import
462 __builtin__.__import__ = view_import
463 try:
463 try:
464 # enter the block
464 # enter the block
465 yield
465 yield
466 except ImportError:
466 except ImportError:
467 if not local:
467 if not local:
468 # ignore import errors if not doing local imports
468 # ignore import errors if not doing local imports
469 pass
469 pass
470 finally:
470 finally:
471 # always restore __import__
471 # always restore __import__
472 __builtin__.__import__ = local_import
472 __builtin__.__import__ = local_import
473
473
474 for r in results:
474 for r in results:
475 # raise possible remote ImportErrors here
475 # raise possible remote ImportErrors here
476 r.get()
476 r.get()
477
477
478
478
479 @sync_results
479 @sync_results
480 @save_ids
480 @save_ids
481 def _really_apply(self, f, args=None, kwargs=None, targets=None, block=None, track=None):
481 def _really_apply(self, f, args=None, kwargs=None, targets=None, block=None, track=None):
482 """calls f(*args, **kwargs) on remote engines, returning the result.
482 """calls f(*args, **kwargs) on remote engines, returning the result.
483
483
484 This method sets all of `apply`'s flags via this View's attributes.
484 This method sets all of `apply`'s flags via this View's attributes.
485
485
486 Parameters
486 Parameters
487 ----------
487 ----------
488
488
489 f : callable
489 f : callable
490
490
491 args : list [default: empty]
491 args : list [default: empty]
492
492
493 kwargs : dict [default: empty]
493 kwargs : dict [default: empty]
494
494
495 targets : target list [default: self.targets]
495 targets : target list [default: self.targets]
496 where to run
496 where to run
497 block : bool [default: self.block]
497 block : bool [default: self.block]
498 whether to block
498 whether to block
499 track : bool [default: self.track]
499 track : bool [default: self.track]
500 whether to ask zmq to track the message, for safe non-copying sends
500 whether to ask zmq to track the message, for safe non-copying sends
501
501
502 Returns
502 Returns
503 -------
503 -------
504
504
505 if self.block is False:
505 if self.block is False:
506 returns AsyncResult
506 returns AsyncResult
507 else:
507 else:
508 returns actual result of f(*args, **kwargs) on the engine(s)
508 returns actual result of f(*args, **kwargs) on the engine(s)
509 This will be a list of self.targets is also a list (even length 1), or
509 This will be a list of self.targets is also a list (even length 1), or
510 the single result if self.targets is an integer engine id
510 the single result if self.targets is an integer engine id
511 """
511 """
512 args = [] if args is None else args
512 args = [] if args is None else args
513 kwargs = {} if kwargs is None else kwargs
513 kwargs = {} if kwargs is None else kwargs
514 block = self.block if block is None else block
514 block = self.block if block is None else block
515 track = self.track if track is None else track
515 track = self.track if track is None else track
516 targets = self.targets if targets is None else targets
516 targets = self.targets if targets is None else targets
517
517
518 _idents = self.client._build_targets(targets)[0]
518 _idents = self.client._build_targets(targets)[0]
519 msg_ids = []
519 msg_ids = []
520 trackers = []
520 trackers = []
521 for ident in _idents:
521 for ident in _idents:
522 msg = self.client.send_apply_message(self._socket, f, args, kwargs, track=track,
522 msg = self.client.send_apply_message(self._socket, f, args, kwargs, track=track,
523 ident=ident)
523 ident=ident)
524 if track:
524 if track:
525 trackers.append(msg['tracker'])
525 trackers.append(msg['tracker'])
526 msg_ids.append(msg['msg_id'])
526 msg_ids.append(msg['header']['msg_id'])
527 tracker = None if track is False else zmq.MessageTracker(*trackers)
527 tracker = None if track is False else zmq.MessageTracker(*trackers)
528 ar = AsyncResult(self.client, msg_ids, fname=f.__name__, targets=targets, tracker=tracker)
528 ar = AsyncResult(self.client, msg_ids, fname=f.__name__, targets=targets, tracker=tracker)
529 if block:
529 if block:
530 try:
530 try:
531 return ar.get()
531 return ar.get()
532 except KeyboardInterrupt:
532 except KeyboardInterrupt:
533 pass
533 pass
534 return ar
534 return ar
535
535
536 @spin_after
536 @spin_after
537 def map(self, f, *sequences, **kwargs):
537 def map(self, f, *sequences, **kwargs):
538 """view.map(f, *sequences, block=self.block) => list|AsyncMapResult
538 """view.map(f, *sequences, block=self.block) => list|AsyncMapResult
539
539
540 Parallel version of builtin `map`, using this View's `targets`.
540 Parallel version of builtin `map`, using this View's `targets`.
541
541
542 There will be one task per target, so work will be chunked
542 There will be one task per target, so work will be chunked
543 if the sequences are longer than `targets`.
543 if the sequences are longer than `targets`.
544
544
545 Results can be iterated as they are ready, but will become available in chunks.
545 Results can be iterated as they are ready, but will become available in chunks.
546
546
547 Parameters
547 Parameters
548 ----------
548 ----------
549
549
550 f : callable
550 f : callable
551 function to be mapped
551 function to be mapped
552 *sequences: one or more sequences of matching length
552 *sequences: one or more sequences of matching length
553 the sequences to be distributed and passed to `f`
553 the sequences to be distributed and passed to `f`
554 block : bool
554 block : bool
555 whether to wait for the result or not [default self.block]
555 whether to wait for the result or not [default self.block]
556
556
557 Returns
557 Returns
558 -------
558 -------
559
559
560 if block=False:
560 if block=False:
561 AsyncMapResult
561 AsyncMapResult
562 An object like AsyncResult, but which reassembles the sequence of results
562 An object like AsyncResult, but which reassembles the sequence of results
563 into a single list. AsyncMapResults can be iterated through before all
563 into a single list. AsyncMapResults can be iterated through before all
564 results are complete.
564 results are complete.
565 else:
565 else:
566 list
566 list
567 the result of map(f,*sequences)
567 the result of map(f,*sequences)
568 """
568 """
569
569
570 block = kwargs.pop('block', self.block)
570 block = kwargs.pop('block', self.block)
571 for k in kwargs.keys():
571 for k in kwargs.keys():
572 if k not in ['block', 'track']:
572 if k not in ['block', 'track']:
573 raise TypeError("invalid keyword arg, %r"%k)
573 raise TypeError("invalid keyword arg, %r"%k)
574
574
575 assert len(sequences) > 0, "must have some sequences to map onto!"
575 assert len(sequences) > 0, "must have some sequences to map onto!"
576 pf = ParallelFunction(self, f, block=block, **kwargs)
576 pf = ParallelFunction(self, f, block=block, **kwargs)
577 return pf.map(*sequences)
577 return pf.map(*sequences)
578
578
579 def execute(self, code, targets=None, block=None):
579 def execute(self, code, targets=None, block=None):
580 """Executes `code` on `targets` in blocking or nonblocking manner.
580 """Executes `code` on `targets` in blocking or nonblocking manner.
581
581
582 ``execute`` is always `bound` (affects engine namespace)
582 ``execute`` is always `bound` (affects engine namespace)
583
583
584 Parameters
584 Parameters
585 ----------
585 ----------
586
586
587 code : str
587 code : str
588 the code string to be executed
588 the code string to be executed
589 block : bool
589 block : bool
590 whether or not to wait until done to return
590 whether or not to wait until done to return
591 default: self.block
591 default: self.block
592 """
592 """
593 return self._really_apply(util._execute, args=(code,), block=block, targets=targets)
593 return self._really_apply(util._execute, args=(code,), block=block, targets=targets)
594
594
595 def run(self, filename, targets=None, block=None):
595 def run(self, filename, targets=None, block=None):
596 """Execute contents of `filename` on my engine(s).
596 """Execute contents of `filename` on my engine(s).
597
597
598 This simply reads the contents of the file and calls `execute`.
598 This simply reads the contents of the file and calls `execute`.
599
599
600 Parameters
600 Parameters
601 ----------
601 ----------
602
602
603 filename : str
603 filename : str
604 The path to the file
604 The path to the file
605 targets : int/str/list of ints/strs
605 targets : int/str/list of ints/strs
606 the engines on which to execute
606 the engines on which to execute
607 default : all
607 default : all
608 block : bool
608 block : bool
609 whether or not to wait until done
609 whether or not to wait until done
610 default: self.block
610 default: self.block
611
611
612 """
612 """
613 with open(filename, 'r') as f:
613 with open(filename, 'r') as f:
614 # add newline in case of trailing indented whitespace
614 # add newline in case of trailing indented whitespace
615 # which will cause SyntaxError
615 # which will cause SyntaxError
616 code = f.read()+'\n'
616 code = f.read()+'\n'
617 return self.execute(code, block=block, targets=targets)
617 return self.execute(code, block=block, targets=targets)
618
618
619 def update(self, ns):
619 def update(self, ns):
620 """update remote namespace with dict `ns`
620 """update remote namespace with dict `ns`
621
621
622 See `push` for details.
622 See `push` for details.
623 """
623 """
624 return self.push(ns, block=self.block, track=self.track)
624 return self.push(ns, block=self.block, track=self.track)
625
625
626 def push(self, ns, targets=None, block=None, track=None):
626 def push(self, ns, targets=None, block=None, track=None):
627 """update remote namespace with dict `ns`
627 """update remote namespace with dict `ns`
628
628
629 Parameters
629 Parameters
630 ----------
630 ----------
631
631
632 ns : dict
632 ns : dict
633 dict of keys with which to update engine namespace(s)
633 dict of keys with which to update engine namespace(s)
634 block : bool [default : self.block]
634 block : bool [default : self.block]
635 whether to wait to be notified of engine receipt
635 whether to wait to be notified of engine receipt
636
636
637 """
637 """
638
638
639 block = block if block is not None else self.block
639 block = block if block is not None else self.block
640 track = track if track is not None else self.track
640 track = track if track is not None else self.track
641 targets = targets if targets is not None else self.targets
641 targets = targets if targets is not None else self.targets
642 # applier = self.apply_sync if block else self.apply_async
642 # applier = self.apply_sync if block else self.apply_async
643 if not isinstance(ns, dict):
643 if not isinstance(ns, dict):
644 raise TypeError("Must be a dict, not %s"%type(ns))
644 raise TypeError("Must be a dict, not %s"%type(ns))
645 return self._really_apply(util._push, (ns,), block=block, track=track, targets=targets)
645 return self._really_apply(util._push, (ns,), block=block, track=track, targets=targets)
646
646
647 def get(self, key_s):
647 def get(self, key_s):
648 """get object(s) by `key_s` from remote namespace
648 """get object(s) by `key_s` from remote namespace
649
649
650 see `pull` for details.
650 see `pull` for details.
651 """
651 """
652 # block = block if block is not None else self.block
652 # block = block if block is not None else self.block
653 return self.pull(key_s, block=True)
653 return self.pull(key_s, block=True)
654
654
655 def pull(self, names, targets=None, block=None):
655 def pull(self, names, targets=None, block=None):
656 """get object(s) by `name` from remote namespace
656 """get object(s) by `name` from remote namespace
657
657
658 will return one object if it is a key.
658 will return one object if it is a key.
659 can also take a list of keys, in which case it will return a list of objects.
659 can also take a list of keys, in which case it will return a list of objects.
660 """
660 """
661 block = block if block is not None else self.block
661 block = block if block is not None else self.block
662 targets = targets if targets is not None else self.targets
662 targets = targets if targets is not None else self.targets
663 applier = self.apply_sync if block else self.apply_async
663 applier = self.apply_sync if block else self.apply_async
664 if isinstance(names, basestring):
664 if isinstance(names, basestring):
665 pass
665 pass
666 elif isinstance(names, (list,tuple,set)):
666 elif isinstance(names, (list,tuple,set)):
667 for key in names:
667 for key in names:
668 if not isinstance(key, basestring):
668 if not isinstance(key, basestring):
669 raise TypeError("keys must be str, not type %r"%type(key))
669 raise TypeError("keys must be str, not type %r"%type(key))
670 else:
670 else:
671 raise TypeError("names must be strs, not %r"%names)
671 raise TypeError("names must be strs, not %r"%names)
672 return self._really_apply(util._pull, (names,), block=block, targets=targets)
672 return self._really_apply(util._pull, (names,), block=block, targets=targets)
673
673
674 def scatter(self, key, seq, dist='b', flatten=False, targets=None, block=None, track=None):
674 def scatter(self, key, seq, dist='b', flatten=False, targets=None, block=None, track=None):
675 """
675 """
676 Partition a Python sequence and send the partitions to a set of engines.
676 Partition a Python sequence and send the partitions to a set of engines.
677 """
677 """
678 block = block if block is not None else self.block
678 block = block if block is not None else self.block
679 track = track if track is not None else self.track
679 track = track if track is not None else self.track
680 targets = targets if targets is not None else self.targets
680 targets = targets if targets is not None else self.targets
681
681
682 mapObject = Map.dists[dist]()
682 mapObject = Map.dists[dist]()
683 nparts = len(targets)
683 nparts = len(targets)
684 msg_ids = []
684 msg_ids = []
685 trackers = []
685 trackers = []
686 for index, engineid in enumerate(targets):
686 for index, engineid in enumerate(targets):
687 partition = mapObject.getPartition(seq, index, nparts)
687 partition = mapObject.getPartition(seq, index, nparts)
688 if flatten and len(partition) == 1:
688 if flatten and len(partition) == 1:
689 ns = {key: partition[0]}
689 ns = {key: partition[0]}
690 else:
690 else:
691 ns = {key: partition}
691 ns = {key: partition}
692 r = self.push(ns, block=False, track=track, targets=engineid)
692 r = self.push(ns, block=False, track=track, targets=engineid)
693 msg_ids.extend(r.msg_ids)
693 msg_ids.extend(r.msg_ids)
694 if track:
694 if track:
695 trackers.append(r._tracker)
695 trackers.append(r._tracker)
696
696
697 if track:
697 if track:
698 tracker = zmq.MessageTracker(*trackers)
698 tracker = zmq.MessageTracker(*trackers)
699 else:
699 else:
700 tracker = None
700 tracker = None
701
701
702 r = AsyncResult(self.client, msg_ids, fname='scatter', targets=targets, tracker=tracker)
702 r = AsyncResult(self.client, msg_ids, fname='scatter', targets=targets, tracker=tracker)
703 if block:
703 if block:
704 r.wait()
704 r.wait()
705 else:
705 else:
706 return r
706 return r
707
707
708 @sync_results
708 @sync_results
709 @save_ids
709 @save_ids
710 def gather(self, key, dist='b', targets=None, block=None):
710 def gather(self, key, dist='b', targets=None, block=None):
711 """
711 """
712 Gather a partitioned sequence on a set of engines as a single local seq.
712 Gather a partitioned sequence on a set of engines as a single local seq.
713 """
713 """
714 block = block if block is not None else self.block
714 block = block if block is not None else self.block
715 targets = targets if targets is not None else self.targets
715 targets = targets if targets is not None else self.targets
716 mapObject = Map.dists[dist]()
716 mapObject = Map.dists[dist]()
717 msg_ids = []
717 msg_ids = []
718
718
719 for index, engineid in enumerate(targets):
719 for index, engineid in enumerate(targets):
720 msg_ids.extend(self.pull(key, block=False, targets=engineid).msg_ids)
720 msg_ids.extend(self.pull(key, block=False, targets=engineid).msg_ids)
721
721
722 r = AsyncMapResult(self.client, msg_ids, mapObject, fname='gather')
722 r = AsyncMapResult(self.client, msg_ids, mapObject, fname='gather')
723
723
724 if block:
724 if block:
725 try:
725 try:
726 return r.get()
726 return r.get()
727 except KeyboardInterrupt:
727 except KeyboardInterrupt:
728 pass
728 pass
729 return r
729 return r
730
730
731 def __getitem__(self, key):
731 def __getitem__(self, key):
732 return self.get(key)
732 return self.get(key)
733
733
734 def __setitem__(self,key, value):
734 def __setitem__(self,key, value):
735 self.update({key:value})
735 self.update({key:value})
736
736
737 def clear(self, targets=None, block=False):
737 def clear(self, targets=None, block=False):
738 """Clear the remote namespaces on my engines."""
738 """Clear the remote namespaces on my engines."""
739 block = block if block is not None else self.block
739 block = block if block is not None else self.block
740 targets = targets if targets is not None else self.targets
740 targets = targets if targets is not None else self.targets
741 return self.client.clear(targets=targets, block=block)
741 return self.client.clear(targets=targets, block=block)
742
742
743 def kill(self, targets=None, block=True):
743 def kill(self, targets=None, block=True):
744 """Kill my engines."""
744 """Kill my engines."""
745 block = block if block is not None else self.block
745 block = block if block is not None else self.block
746 targets = targets if targets is not None else self.targets
746 targets = targets if targets is not None else self.targets
747 return self.client.kill(targets=targets, block=block)
747 return self.client.kill(targets=targets, block=block)
748
748
749 #----------------------------------------
749 #----------------------------------------
750 # activate for %px,%autopx magics
750 # activate for %px,%autopx magics
751 #----------------------------------------
751 #----------------------------------------
752 def activate(self):
752 def activate(self):
753 """Make this `View` active for parallel magic commands.
753 """Make this `View` active for parallel magic commands.
754
754
755 IPython has a magic command syntax to work with `MultiEngineClient` objects.
755 IPython has a magic command syntax to work with `MultiEngineClient` objects.
756 In a given IPython session there is a single active one. While
756 In a given IPython session there is a single active one. While
757 there can be many `Views` created and used by the user,
757 there can be many `Views` created and used by the user,
758 there is only one active one. The active `View` is used whenever
758 there is only one active one. The active `View` is used whenever
759 the magic commands %px and %autopx are used.
759 the magic commands %px and %autopx are used.
760
760
761 The activate() method is called on a given `View` to make it
761 The activate() method is called on a given `View` to make it
762 active. Once this has been done, the magic commands can be used.
762 active. Once this has been done, the magic commands can be used.
763 """
763 """
764
764
765 try:
765 try:
766 # This is injected into __builtins__.
766 # This is injected into __builtins__.
767 ip = get_ipython()
767 ip = get_ipython()
768 except NameError:
768 except NameError:
769 print "The IPython parallel magics (%result, %px, %autopx) only work within IPython."
769 print "The IPython parallel magics (%result, %px, %autopx) only work within IPython."
770 else:
770 else:
771 pmagic = ip.plugin_manager.get_plugin('parallelmagic')
771 pmagic = ip.plugin_manager.get_plugin('parallelmagic')
772 if pmagic is None:
772 if pmagic is None:
773 ip.magic_load_ext('parallelmagic')
773 ip.magic_load_ext('parallelmagic')
774 pmagic = ip.plugin_manager.get_plugin('parallelmagic')
774 pmagic = ip.plugin_manager.get_plugin('parallelmagic')
775
775
776 pmagic.active_view = self
776 pmagic.active_view = self
777
777
778
778
779 @skip_doctest
779 @skip_doctest
780 class LoadBalancedView(View):
780 class LoadBalancedView(View):
781 """An load-balancing View that only executes via the Task scheduler.
781 """An load-balancing View that only executes via the Task scheduler.
782
782
783 Load-balanced views can be created with the client's `view` method:
783 Load-balanced views can be created with the client's `view` method:
784
784
785 >>> v = client.load_balanced_view()
785 >>> v = client.load_balanced_view()
786
786
787 or targets can be specified, to restrict the potential destinations:
787 or targets can be specified, to restrict the potential destinations:
788
788
789 >>> v = client.client.load_balanced_view(([1,3])
789 >>> v = client.client.load_balanced_view(([1,3])
790
790
791 which would restrict loadbalancing to between engines 1 and 3.
791 which would restrict loadbalancing to between engines 1 and 3.
792
792
793 """
793 """
794
794
795 follow=Any()
795 follow=Any()
796 after=Any()
796 after=Any()
797 timeout=CFloat()
797 timeout=CFloat()
798 retries = CInt(0)
798 retries = CInt(0)
799
799
800 _task_scheme = Any()
800 _task_scheme = Any()
801 _flag_names = List(['targets', 'block', 'track', 'follow', 'after', 'timeout', 'retries'])
801 _flag_names = List(['targets', 'block', 'track', 'follow', 'after', 'timeout', 'retries'])
802
802
803 def __init__(self, client=None, socket=None, **flags):
803 def __init__(self, client=None, socket=None, **flags):
804 super(LoadBalancedView, self).__init__(client=client, socket=socket, **flags)
804 super(LoadBalancedView, self).__init__(client=client, socket=socket, **flags)
805 self._task_scheme=client._task_scheme
805 self._task_scheme=client._task_scheme
806
806
807 def _validate_dependency(self, dep):
807 def _validate_dependency(self, dep):
808 """validate a dependency.
808 """validate a dependency.
809
809
810 For use in `set_flags`.
810 For use in `set_flags`.
811 """
811 """
812 if dep is None or isinstance(dep, (basestring, AsyncResult, Dependency)):
812 if dep is None or isinstance(dep, (basestring, AsyncResult, Dependency)):
813 return True
813 return True
814 elif isinstance(dep, (list,set, tuple)):
814 elif isinstance(dep, (list,set, tuple)):
815 for d in dep:
815 for d in dep:
816 if not isinstance(d, (basestring, AsyncResult)):
816 if not isinstance(d, (basestring, AsyncResult)):
817 return False
817 return False
818 elif isinstance(dep, dict):
818 elif isinstance(dep, dict):
819 if set(dep.keys()) != set(Dependency().as_dict().keys()):
819 if set(dep.keys()) != set(Dependency().as_dict().keys()):
820 return False
820 return False
821 if not isinstance(dep['msg_ids'], list):
821 if not isinstance(dep['msg_ids'], list):
822 return False
822 return False
823 for d in dep['msg_ids']:
823 for d in dep['msg_ids']:
824 if not isinstance(d, basestring):
824 if not isinstance(d, basestring):
825 return False
825 return False
826 else:
826 else:
827 return False
827 return False
828
828
829 return True
829 return True
830
830
831 def _render_dependency(self, dep):
831 def _render_dependency(self, dep):
832 """helper for building jsonable dependencies from various input forms."""
832 """helper for building jsonable dependencies from various input forms."""
833 if isinstance(dep, Dependency):
833 if isinstance(dep, Dependency):
834 return dep.as_dict()
834 return dep.as_dict()
835 elif isinstance(dep, AsyncResult):
835 elif isinstance(dep, AsyncResult):
836 return dep.msg_ids
836 return dep.msg_ids
837 elif dep is None:
837 elif dep is None:
838 return []
838 return []
839 else:
839 else:
840 # pass to Dependency constructor
840 # pass to Dependency constructor
841 return list(Dependency(dep))
841 return list(Dependency(dep))
842
842
843 def set_flags(self, **kwargs):
843 def set_flags(self, **kwargs):
844 """set my attribute flags by keyword.
844 """set my attribute flags by keyword.
845
845
846 A View is a wrapper for the Client's apply method, but with attributes
846 A View is a wrapper for the Client's apply method, but with attributes
847 that specify keyword arguments, those attributes can be set by keyword
847 that specify keyword arguments, those attributes can be set by keyword
848 argument with this method.
848 argument with this method.
849
849
850 Parameters
850 Parameters
851 ----------
851 ----------
852
852
853 block : bool
853 block : bool
854 whether to wait for results
854 whether to wait for results
855 track : bool
855 track : bool
856 whether to create a MessageTracker to allow the user to
856 whether to create a MessageTracker to allow the user to
857 safely edit after arrays and buffers during non-copying
857 safely edit after arrays and buffers during non-copying
858 sends.
858 sends.
859
859
860 after : Dependency or collection of msg_ids
860 after : Dependency or collection of msg_ids
861 Only for load-balanced execution (targets=None)
861 Only for load-balanced execution (targets=None)
862 Specify a list of msg_ids as a time-based dependency.
862 Specify a list of msg_ids as a time-based dependency.
863 This job will only be run *after* the dependencies
863 This job will only be run *after* the dependencies
864 have been met.
864 have been met.
865
865
866 follow : Dependency or collection of msg_ids
866 follow : Dependency or collection of msg_ids
867 Only for load-balanced execution (targets=None)
867 Only for load-balanced execution (targets=None)
868 Specify a list of msg_ids as a location-based dependency.
868 Specify a list of msg_ids as a location-based dependency.
869 This job will only be run on an engine where this dependency
869 This job will only be run on an engine where this dependency
870 is met.
870 is met.
871
871
872 timeout : float/int or None
872 timeout : float/int or None
873 Only for load-balanced execution (targets=None)
873 Only for load-balanced execution (targets=None)
874 Specify an amount of time (in seconds) for the scheduler to
874 Specify an amount of time (in seconds) for the scheduler to
875 wait for dependencies to be met before failing with a
875 wait for dependencies to be met before failing with a
876 DependencyTimeout.
876 DependencyTimeout.
877
877
878 retries : int
878 retries : int
879 Number of times a task will be retried on failure.
879 Number of times a task will be retried on failure.
880 """
880 """
881
881
882 super(LoadBalancedView, self).set_flags(**kwargs)
882 super(LoadBalancedView, self).set_flags(**kwargs)
883 for name in ('follow', 'after'):
883 for name in ('follow', 'after'):
884 if name in kwargs:
884 if name in kwargs:
885 value = kwargs[name]
885 value = kwargs[name]
886 if self._validate_dependency(value):
886 if self._validate_dependency(value):
887 setattr(self, name, value)
887 setattr(self, name, value)
888 else:
888 else:
889 raise ValueError("Invalid dependency: %r"%value)
889 raise ValueError("Invalid dependency: %r"%value)
890 if 'timeout' in kwargs:
890 if 'timeout' in kwargs:
891 t = kwargs['timeout']
891 t = kwargs['timeout']
892 if not isinstance(t, (int, long, float, type(None))):
892 if not isinstance(t, (int, long, float, type(None))):
893 raise TypeError("Invalid type for timeout: %r"%type(t))
893 raise TypeError("Invalid type for timeout: %r"%type(t))
894 if t is not None:
894 if t is not None:
895 if t < 0:
895 if t < 0:
896 raise ValueError("Invalid timeout: %s"%t)
896 raise ValueError("Invalid timeout: %s"%t)
897 self.timeout = t
897 self.timeout = t
898
898
899 @sync_results
899 @sync_results
900 @save_ids
900 @save_ids
901 def _really_apply(self, f, args=None, kwargs=None, block=None, track=None,
901 def _really_apply(self, f, args=None, kwargs=None, block=None, track=None,
902 after=None, follow=None, timeout=None,
902 after=None, follow=None, timeout=None,
903 targets=None, retries=None):
903 targets=None, retries=None):
904 """calls f(*args, **kwargs) on a remote engine, returning the result.
904 """calls f(*args, **kwargs) on a remote engine, returning the result.
905
905
906 This method temporarily sets all of `apply`'s flags for a single call.
906 This method temporarily sets all of `apply`'s flags for a single call.
907
907
908 Parameters
908 Parameters
909 ----------
909 ----------
910
910
911 f : callable
911 f : callable
912
912
913 args : list [default: empty]
913 args : list [default: empty]
914
914
915 kwargs : dict [default: empty]
915 kwargs : dict [default: empty]
916
916
917 block : bool [default: self.block]
917 block : bool [default: self.block]
918 whether to block
918 whether to block
919 track : bool [default: self.track]
919 track : bool [default: self.track]
920 whether to ask zmq to track the message, for safe non-copying sends
920 whether to ask zmq to track the message, for safe non-copying sends
921
921
922 !!!!!! TODO: THE REST HERE !!!!
922 !!!!!! TODO: THE REST HERE !!!!
923
923
924 Returns
924 Returns
925 -------
925 -------
926
926
927 if self.block is False:
927 if self.block is False:
928 returns AsyncResult
928 returns AsyncResult
929 else:
929 else:
930 returns actual result of f(*args, **kwargs) on the engine(s)
930 returns actual result of f(*args, **kwargs) on the engine(s)
931 This will be a list of self.targets is also a list (even length 1), or
931 This will be a list of self.targets is also a list (even length 1), or
932 the single result if self.targets is an integer engine id
932 the single result if self.targets is an integer engine id
933 """
933 """
934
934
935 # validate whether we can run
935 # validate whether we can run
936 if self._socket.closed:
936 if self._socket.closed:
937 msg = "Task farming is disabled"
937 msg = "Task farming is disabled"
938 if self._task_scheme == 'pure':
938 if self._task_scheme == 'pure':
939 msg += " because the pure ZMQ scheduler cannot handle"
939 msg += " because the pure ZMQ scheduler cannot handle"
940 msg += " disappearing engines."
940 msg += " disappearing engines."
941 raise RuntimeError(msg)
941 raise RuntimeError(msg)
942
942
943 if self._task_scheme == 'pure':
943 if self._task_scheme == 'pure':
944 # pure zmq scheme doesn't support extra features
944 # pure zmq scheme doesn't support extra features
945 msg = "Pure ZMQ scheduler doesn't support the following flags:"
945 msg = "Pure ZMQ scheduler doesn't support the following flags:"
946 "follow, after, retries, targets, timeout"
946 "follow, after, retries, targets, timeout"
947 if (follow or after or retries or targets or timeout):
947 if (follow or after or retries or targets or timeout):
948 # hard fail on Scheduler flags
948 # hard fail on Scheduler flags
949 raise RuntimeError(msg)
949 raise RuntimeError(msg)
950 if isinstance(f, dependent):
950 if isinstance(f, dependent):
951 # soft warn on functional dependencies
951 # soft warn on functional dependencies
952 warnings.warn(msg, RuntimeWarning)
952 warnings.warn(msg, RuntimeWarning)
953
953
954 # build args
954 # build args
955 args = [] if args is None else args
955 args = [] if args is None else args
956 kwargs = {} if kwargs is None else kwargs
956 kwargs = {} if kwargs is None else kwargs
957 block = self.block if block is None else block
957 block = self.block if block is None else block
958 track = self.track if track is None else track
958 track = self.track if track is None else track
959 after = self.after if after is None else after
959 after = self.after if after is None else after
960 retries = self.retries if retries is None else retries
960 retries = self.retries if retries is None else retries
961 follow = self.follow if follow is None else follow
961 follow = self.follow if follow is None else follow
962 timeout = self.timeout if timeout is None else timeout
962 timeout = self.timeout if timeout is None else timeout
963 targets = self.targets if targets is None else targets
963 targets = self.targets if targets is None else targets
964
964
965 if not isinstance(retries, int):
965 if not isinstance(retries, int):
966 raise TypeError('retries must be int, not %r'%type(retries))
966 raise TypeError('retries must be int, not %r'%type(retries))
967
967
968 if targets is None:
968 if targets is None:
969 idents = []
969 idents = []
970 else:
970 else:
971 idents = self.client._build_targets(targets)[0]
971 idents = self.client._build_targets(targets)[0]
972 # ensure *not* bytes
972 # ensure *not* bytes
973 idents = [ ident.decode() for ident in idents ]
973 idents = [ ident.decode() for ident in idents ]
974
974
975 after = self._render_dependency(after)
975 after = self._render_dependency(after)
976 follow = self._render_dependency(follow)
976 follow = self._render_dependency(follow)
977 subheader = dict(after=after, follow=follow, timeout=timeout, targets=idents, retries=retries)
977 subheader = dict(after=after, follow=follow, timeout=timeout, targets=idents, retries=retries)
978
978
979 msg = self.client.send_apply_message(self._socket, f, args, kwargs, track=track,
979 msg = self.client.send_apply_message(self._socket, f, args, kwargs, track=track,
980 subheader=subheader)
980 subheader=subheader)
981 tracker = None if track is False else msg['tracker']
981 tracker = None if track is False else msg['tracker']
982
982
983 ar = AsyncResult(self.client, msg['msg_id'], fname=f.__name__, targets=None, tracker=tracker)
983 ar = AsyncResult(self.client, msg['header']['msg_id'], fname=f.__name__, targets=None, tracker=tracker)
984
984
985 if block:
985 if block:
986 try:
986 try:
987 return ar.get()
987 return ar.get()
988 except KeyboardInterrupt:
988 except KeyboardInterrupt:
989 pass
989 pass
990 return ar
990 return ar
991
991
992 @spin_after
992 @spin_after
993 @save_ids
993 @save_ids
994 def map(self, f, *sequences, **kwargs):
994 def map(self, f, *sequences, **kwargs):
995 """view.map(f, *sequences, block=self.block, chunksize=1) => list|AsyncMapResult
995 """view.map(f, *sequences, block=self.block, chunksize=1) => list|AsyncMapResult
996
996
997 Parallel version of builtin `map`, load-balanced by this View.
997 Parallel version of builtin `map`, load-balanced by this View.
998
998
999 `block`, and `chunksize` can be specified by keyword only.
999 `block`, and `chunksize` can be specified by keyword only.
1000
1000
1001 Each `chunksize` elements will be a separate task, and will be
1001 Each `chunksize` elements will be a separate task, and will be
1002 load-balanced. This lets individual elements be available for iteration
1002 load-balanced. This lets individual elements be available for iteration
1003 as soon as they arrive.
1003 as soon as they arrive.
1004
1004
1005 Parameters
1005 Parameters
1006 ----------
1006 ----------
1007
1007
1008 f : callable
1008 f : callable
1009 function to be mapped
1009 function to be mapped
1010 *sequences: one or more sequences of matching length
1010 *sequences: one or more sequences of matching length
1011 the sequences to be distributed and passed to `f`
1011 the sequences to be distributed and passed to `f`
1012 block : bool
1012 block : bool
1013 whether to wait for the result or not [default self.block]
1013 whether to wait for the result or not [default self.block]
1014 track : bool
1014 track : bool
1015 whether to create a MessageTracker to allow the user to
1015 whether to create a MessageTracker to allow the user to
1016 safely edit after arrays and buffers during non-copying
1016 safely edit after arrays and buffers during non-copying
1017 sends.
1017 sends.
1018 chunksize : int
1018 chunksize : int
1019 how many elements should be in each task [default 1]
1019 how many elements should be in each task [default 1]
1020
1020
1021 Returns
1021 Returns
1022 -------
1022 -------
1023
1023
1024 if block=False:
1024 if block=False:
1025 AsyncMapResult
1025 AsyncMapResult
1026 An object like AsyncResult, but which reassembles the sequence of results
1026 An object like AsyncResult, but which reassembles the sequence of results
1027 into a single list. AsyncMapResults can be iterated through before all
1027 into a single list. AsyncMapResults can be iterated through before all
1028 results are complete.
1028 results are complete.
1029 else:
1029 else:
1030 the result of map(f,*sequences)
1030 the result of map(f,*sequences)
1031
1031
1032 """
1032 """
1033
1033
1034 # default
1034 # default
1035 block = kwargs.get('block', self.block)
1035 block = kwargs.get('block', self.block)
1036 chunksize = kwargs.get('chunksize', 1)
1036 chunksize = kwargs.get('chunksize', 1)
1037
1037
1038 keyset = set(kwargs.keys())
1038 keyset = set(kwargs.keys())
1039 extra_keys = keyset.difference_update(set(['block', 'chunksize']))
1039 extra_keys = keyset.difference_update(set(['block', 'chunksize']))
1040 if extra_keys:
1040 if extra_keys:
1041 raise TypeError("Invalid kwargs: %s"%list(extra_keys))
1041 raise TypeError("Invalid kwargs: %s"%list(extra_keys))
1042
1042
1043 assert len(sequences) > 0, "must have some sequences to map onto!"
1043 assert len(sequences) > 0, "must have some sequences to map onto!"
1044
1044
1045 pf = ParallelFunction(self, f, block=block, chunksize=chunksize)
1045 pf = ParallelFunction(self, f, block=block, chunksize=chunksize)
1046 return pf.map(*sequences)
1046 return pf.map(*sequences)
1047
1047
1048 __all__ = ['LoadBalancedView', 'DirectView']
1048 __all__ = ['LoadBalancedView', 'DirectView']
@@ -1,1291 +1,1291 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """The IPython Controller Hub with 0MQ
2 """The IPython Controller Hub with 0MQ
3 This is the master object that handles connections from engines and clients,
3 This is the master object that handles connections from engines and clients,
4 and monitors traffic through the various queues.
4 and monitors traffic through the various queues.
5
5
6 Authors:
6 Authors:
7
7
8 * Min RK
8 * Min RK
9 """
9 """
10 #-----------------------------------------------------------------------------
10 #-----------------------------------------------------------------------------
11 # Copyright (C) 2010 The IPython Development Team
11 # Copyright (C) 2010 The IPython Development Team
12 #
12 #
13 # Distributed under the terms of the BSD License. The full license is in
13 # Distributed under the terms of the BSD License. The full license is in
14 # the file COPYING, distributed as part of this software.
14 # the file COPYING, distributed as part of this software.
15 #-----------------------------------------------------------------------------
15 #-----------------------------------------------------------------------------
16
16
17 #-----------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18 # Imports
18 # Imports
19 #-----------------------------------------------------------------------------
19 #-----------------------------------------------------------------------------
20 from __future__ import print_function
20 from __future__ import print_function
21
21
22 import sys
22 import sys
23 import time
23 import time
24 from datetime import datetime
24 from datetime import datetime
25
25
26 import zmq
26 import zmq
27 from zmq.eventloop import ioloop
27 from zmq.eventloop import ioloop
28 from zmq.eventloop.zmqstream import ZMQStream
28 from zmq.eventloop.zmqstream import ZMQStream
29
29
30 # internal:
30 # internal:
31 from IPython.utils.importstring import import_item
31 from IPython.utils.importstring import import_item
32 from IPython.utils.traitlets import (
32 from IPython.utils.traitlets import (
33 HasTraits, Instance, Int, Unicode, Dict, Set, Tuple, CBytes, DottedObjectName
33 HasTraits, Instance, Int, Unicode, Dict, Set, Tuple, CBytes, DottedObjectName
34 )
34 )
35
35
36 from IPython.parallel import error, util
36 from IPython.parallel import error, util
37 from IPython.parallel.factory import RegistrationFactory
37 from IPython.parallel.factory import RegistrationFactory
38
38
39 from IPython.zmq.session import SessionFactory
39 from IPython.zmq.session import SessionFactory
40
40
41 from .heartmonitor import HeartMonitor
41 from .heartmonitor import HeartMonitor
42
42
43 #-----------------------------------------------------------------------------
43 #-----------------------------------------------------------------------------
44 # Code
44 # Code
45 #-----------------------------------------------------------------------------
45 #-----------------------------------------------------------------------------
46
46
47 def _passer(*args, **kwargs):
47 def _passer(*args, **kwargs):
48 return
48 return
49
49
50 def _printer(*args, **kwargs):
50 def _printer(*args, **kwargs):
51 print (args)
51 print (args)
52 print (kwargs)
52 print (kwargs)
53
53
54 def empty_record():
54 def empty_record():
55 """Return an empty dict with all record keys."""
55 """Return an empty dict with all record keys."""
56 return {
56 return {
57 'msg_id' : None,
57 'msg_id' : None,
58 'header' : None,
58 'header' : None,
59 'content': None,
59 'content': None,
60 'buffers': None,
60 'buffers': None,
61 'submitted': None,
61 'submitted': None,
62 'client_uuid' : None,
62 'client_uuid' : None,
63 'engine_uuid' : None,
63 'engine_uuid' : None,
64 'started': None,
64 'started': None,
65 'completed': None,
65 'completed': None,
66 'resubmitted': None,
66 'resubmitted': None,
67 'result_header' : None,
67 'result_header' : None,
68 'result_content' : None,
68 'result_content' : None,
69 'result_buffers' : None,
69 'result_buffers' : None,
70 'queue' : None,
70 'queue' : None,
71 'pyin' : None,
71 'pyin' : None,
72 'pyout': None,
72 'pyout': None,
73 'pyerr': None,
73 'pyerr': None,
74 'stdout': '',
74 'stdout': '',
75 'stderr': '',
75 'stderr': '',
76 }
76 }
77
77
78 def init_record(msg):
78 def init_record(msg):
79 """Initialize a TaskRecord based on a request."""
79 """Initialize a TaskRecord based on a request."""
80 header = msg['header']
80 header = msg['header']
81 return {
81 return {
82 'msg_id' : header['msg_id'],
82 'msg_id' : header['msg_id'],
83 'header' : header,
83 'header' : header,
84 'content': msg['content'],
84 'content': msg['content'],
85 'buffers': msg['buffers'],
85 'buffers': msg['buffers'],
86 'submitted': header['date'],
86 'submitted': header['date'],
87 'client_uuid' : None,
87 'client_uuid' : None,
88 'engine_uuid' : None,
88 'engine_uuid' : None,
89 'started': None,
89 'started': None,
90 'completed': None,
90 'completed': None,
91 'resubmitted': None,
91 'resubmitted': None,
92 'result_header' : None,
92 'result_header' : None,
93 'result_content' : None,
93 'result_content' : None,
94 'result_buffers' : None,
94 'result_buffers' : None,
95 'queue' : None,
95 'queue' : None,
96 'pyin' : None,
96 'pyin' : None,
97 'pyout': None,
97 'pyout': None,
98 'pyerr': None,
98 'pyerr': None,
99 'stdout': '',
99 'stdout': '',
100 'stderr': '',
100 'stderr': '',
101 }
101 }
102
102
103
103
104 class EngineConnector(HasTraits):
104 class EngineConnector(HasTraits):
105 """A simple object for accessing the various zmq connections of an object.
105 """A simple object for accessing the various zmq connections of an object.
106 Attributes are:
106 Attributes are:
107 id (int): engine ID
107 id (int): engine ID
108 uuid (str): uuid (unused?)
108 uuid (str): uuid (unused?)
109 queue (str): identity of queue's XREQ socket
109 queue (str): identity of queue's XREQ socket
110 registration (str): identity of registration XREQ socket
110 registration (str): identity of registration XREQ socket
111 heartbeat (str): identity of heartbeat XREQ socket
111 heartbeat (str): identity of heartbeat XREQ socket
112 """
112 """
113 id=Int(0)
113 id=Int(0)
114 queue=CBytes()
114 queue=CBytes()
115 control=CBytes()
115 control=CBytes()
116 registration=CBytes()
116 registration=CBytes()
117 heartbeat=CBytes()
117 heartbeat=CBytes()
118 pending=Set()
118 pending=Set()
119
119
120 class HubFactory(RegistrationFactory):
120 class HubFactory(RegistrationFactory):
121 """The Configurable for setting up a Hub."""
121 """The Configurable for setting up a Hub."""
122
122
123 # port-pairs for monitoredqueues:
123 # port-pairs for monitoredqueues:
124 hb = Tuple(Int,Int,config=True,
124 hb = Tuple(Int,Int,config=True,
125 help="""XREQ/SUB Port pair for Engine heartbeats""")
125 help="""XREQ/SUB Port pair for Engine heartbeats""")
126 def _hb_default(self):
126 def _hb_default(self):
127 return tuple(util.select_random_ports(2))
127 return tuple(util.select_random_ports(2))
128
128
129 mux = Tuple(Int,Int,config=True,
129 mux = Tuple(Int,Int,config=True,
130 help="""Engine/Client Port pair for MUX queue""")
130 help="""Engine/Client Port pair for MUX queue""")
131
131
132 def _mux_default(self):
132 def _mux_default(self):
133 return tuple(util.select_random_ports(2))
133 return tuple(util.select_random_ports(2))
134
134
135 task = Tuple(Int,Int,config=True,
135 task = Tuple(Int,Int,config=True,
136 help="""Engine/Client Port pair for Task queue""")
136 help="""Engine/Client Port pair for Task queue""")
137 def _task_default(self):
137 def _task_default(self):
138 return tuple(util.select_random_ports(2))
138 return tuple(util.select_random_ports(2))
139
139
140 control = Tuple(Int,Int,config=True,
140 control = Tuple(Int,Int,config=True,
141 help="""Engine/Client Port pair for Control queue""")
141 help="""Engine/Client Port pair for Control queue""")
142
142
143 def _control_default(self):
143 def _control_default(self):
144 return tuple(util.select_random_ports(2))
144 return tuple(util.select_random_ports(2))
145
145
146 iopub = Tuple(Int,Int,config=True,
146 iopub = Tuple(Int,Int,config=True,
147 help="""Engine/Client Port pair for IOPub relay""")
147 help="""Engine/Client Port pair for IOPub relay""")
148
148
149 def _iopub_default(self):
149 def _iopub_default(self):
150 return tuple(util.select_random_ports(2))
150 return tuple(util.select_random_ports(2))
151
151
152 # single ports:
152 # single ports:
153 mon_port = Int(config=True,
153 mon_port = Int(config=True,
154 help="""Monitor (SUB) port for queue traffic""")
154 help="""Monitor (SUB) port for queue traffic""")
155
155
156 def _mon_port_default(self):
156 def _mon_port_default(self):
157 return util.select_random_ports(1)[0]
157 return util.select_random_ports(1)[0]
158
158
159 notifier_port = Int(config=True,
159 notifier_port = Int(config=True,
160 help="""PUB port for sending engine status notifications""")
160 help="""PUB port for sending engine status notifications""")
161
161
162 def _notifier_port_default(self):
162 def _notifier_port_default(self):
163 return util.select_random_ports(1)[0]
163 return util.select_random_ports(1)[0]
164
164
165 engine_ip = Unicode('127.0.0.1', config=True,
165 engine_ip = Unicode('127.0.0.1', config=True,
166 help="IP on which to listen for engine connections. [default: loopback]")
166 help="IP on which to listen for engine connections. [default: loopback]")
167 engine_transport = Unicode('tcp', config=True,
167 engine_transport = Unicode('tcp', config=True,
168 help="0MQ transport for engine connections. [default: tcp]")
168 help="0MQ transport for engine connections. [default: tcp]")
169
169
170 client_ip = Unicode('127.0.0.1', config=True,
170 client_ip = Unicode('127.0.0.1', config=True,
171 help="IP on which to listen for client connections. [default: loopback]")
171 help="IP on which to listen for client connections. [default: loopback]")
172 client_transport = Unicode('tcp', config=True,
172 client_transport = Unicode('tcp', config=True,
173 help="0MQ transport for client connections. [default : tcp]")
173 help="0MQ transport for client connections. [default : tcp]")
174
174
175 monitor_ip = Unicode('127.0.0.1', config=True,
175 monitor_ip = Unicode('127.0.0.1', config=True,
176 help="IP on which to listen for monitor messages. [default: loopback]")
176 help="IP on which to listen for monitor messages. [default: loopback]")
177 monitor_transport = Unicode('tcp', config=True,
177 monitor_transport = Unicode('tcp', config=True,
178 help="0MQ transport for monitor messages. [default : tcp]")
178 help="0MQ transport for monitor messages. [default : tcp]")
179
179
180 monitor_url = Unicode('')
180 monitor_url = Unicode('')
181
181
182 db_class = DottedObjectName('IPython.parallel.controller.dictdb.DictDB',
182 db_class = DottedObjectName('IPython.parallel.controller.dictdb.DictDB',
183 config=True, help="""The class to use for the DB backend""")
183 config=True, help="""The class to use for the DB backend""")
184
184
185 # not configurable
185 # not configurable
186 db = Instance('IPython.parallel.controller.dictdb.BaseDB')
186 db = Instance('IPython.parallel.controller.dictdb.BaseDB')
187 heartmonitor = Instance('IPython.parallel.controller.heartmonitor.HeartMonitor')
187 heartmonitor = Instance('IPython.parallel.controller.heartmonitor.HeartMonitor')
188
188
189 def _ip_changed(self, name, old, new):
189 def _ip_changed(self, name, old, new):
190 self.engine_ip = new
190 self.engine_ip = new
191 self.client_ip = new
191 self.client_ip = new
192 self.monitor_ip = new
192 self.monitor_ip = new
193 self._update_monitor_url()
193 self._update_monitor_url()
194
194
195 def _update_monitor_url(self):
195 def _update_monitor_url(self):
196 self.monitor_url = "%s://%s:%i"%(self.monitor_transport, self.monitor_ip, self.mon_port)
196 self.monitor_url = "%s://%s:%i"%(self.monitor_transport, self.monitor_ip, self.mon_port)
197
197
198 def _transport_changed(self, name, old, new):
198 def _transport_changed(self, name, old, new):
199 self.engine_transport = new
199 self.engine_transport = new
200 self.client_transport = new
200 self.client_transport = new
201 self.monitor_transport = new
201 self.monitor_transport = new
202 self._update_monitor_url()
202 self._update_monitor_url()
203
203
204 def __init__(self, **kwargs):
204 def __init__(self, **kwargs):
205 super(HubFactory, self).__init__(**kwargs)
205 super(HubFactory, self).__init__(**kwargs)
206 self._update_monitor_url()
206 self._update_monitor_url()
207
207
208
208
209 def construct(self):
209 def construct(self):
210 self.init_hub()
210 self.init_hub()
211
211
212 def start(self):
212 def start(self):
213 self.heartmonitor.start()
213 self.heartmonitor.start()
214 self.log.info("Heartmonitor started")
214 self.log.info("Heartmonitor started")
215
215
216 def init_hub(self):
216 def init_hub(self):
217 """construct"""
217 """construct"""
218 client_iface = "%s://%s:"%(self.client_transport, self.client_ip) + "%i"
218 client_iface = "%s://%s:"%(self.client_transport, self.client_ip) + "%i"
219 engine_iface = "%s://%s:"%(self.engine_transport, self.engine_ip) + "%i"
219 engine_iface = "%s://%s:"%(self.engine_transport, self.engine_ip) + "%i"
220
220
221 ctx = self.context
221 ctx = self.context
222 loop = self.loop
222 loop = self.loop
223
223
224 # Registrar socket
224 # Registrar socket
225 q = ZMQStream(ctx.socket(zmq.XREP), loop)
225 q = ZMQStream(ctx.socket(zmq.XREP), loop)
226 q.bind(client_iface % self.regport)
226 q.bind(client_iface % self.regport)
227 self.log.info("Hub listening on %s for registration."%(client_iface%self.regport))
227 self.log.info("Hub listening on %s for registration."%(client_iface%self.regport))
228 if self.client_ip != self.engine_ip:
228 if self.client_ip != self.engine_ip:
229 q.bind(engine_iface % self.regport)
229 q.bind(engine_iface % self.regport)
230 self.log.info("Hub listening on %s for registration."%(engine_iface%self.regport))
230 self.log.info("Hub listening on %s for registration."%(engine_iface%self.regport))
231
231
232 ### Engine connections ###
232 ### Engine connections ###
233
233
234 # heartbeat
234 # heartbeat
235 hpub = ctx.socket(zmq.PUB)
235 hpub = ctx.socket(zmq.PUB)
236 hpub.bind(engine_iface % self.hb[0])
236 hpub.bind(engine_iface % self.hb[0])
237 hrep = ctx.socket(zmq.XREP)
237 hrep = ctx.socket(zmq.XREP)
238 hrep.bind(engine_iface % self.hb[1])
238 hrep.bind(engine_iface % self.hb[1])
239 self.heartmonitor = HeartMonitor(loop=loop, config=self.config, log=self.log,
239 self.heartmonitor = HeartMonitor(loop=loop, config=self.config, log=self.log,
240 pingstream=ZMQStream(hpub,loop),
240 pingstream=ZMQStream(hpub,loop),
241 pongstream=ZMQStream(hrep,loop)
241 pongstream=ZMQStream(hrep,loop)
242 )
242 )
243
243
244 ### Client connections ###
244 ### Client connections ###
245 # Notifier socket
245 # Notifier socket
246 n = ZMQStream(ctx.socket(zmq.PUB), loop)
246 n = ZMQStream(ctx.socket(zmq.PUB), loop)
247 n.bind(client_iface%self.notifier_port)
247 n.bind(client_iface%self.notifier_port)
248
248
249 ### build and launch the queues ###
249 ### build and launch the queues ###
250
250
251 # monitor socket
251 # monitor socket
252 sub = ctx.socket(zmq.SUB)
252 sub = ctx.socket(zmq.SUB)
253 sub.setsockopt(zmq.SUBSCRIBE, b"")
253 sub.setsockopt(zmq.SUBSCRIBE, b"")
254 sub.bind(self.monitor_url)
254 sub.bind(self.monitor_url)
255 sub.bind('inproc://monitor')
255 sub.bind('inproc://monitor')
256 sub = ZMQStream(sub, loop)
256 sub = ZMQStream(sub, loop)
257
257
258 # connect the db
258 # connect the db
259 self.log.info('Hub using DB backend: %r'%(self.db_class.split()[-1]))
259 self.log.info('Hub using DB backend: %r'%(self.db_class.split()[-1]))
260 # cdir = self.config.Global.cluster_dir
260 # cdir = self.config.Global.cluster_dir
261 self.db = import_item(str(self.db_class))(session=self.session.session,
261 self.db = import_item(str(self.db_class))(session=self.session.session,
262 config=self.config, log=self.log)
262 config=self.config, log=self.log)
263 time.sleep(.25)
263 time.sleep(.25)
264 try:
264 try:
265 scheme = self.config.TaskScheduler.scheme_name
265 scheme = self.config.TaskScheduler.scheme_name
266 except AttributeError:
266 except AttributeError:
267 from .scheduler import TaskScheduler
267 from .scheduler import TaskScheduler
268 scheme = TaskScheduler.scheme_name.get_default_value()
268 scheme = TaskScheduler.scheme_name.get_default_value()
269 # build connection dicts
269 # build connection dicts
270 self.engine_info = {
270 self.engine_info = {
271 'control' : engine_iface%self.control[1],
271 'control' : engine_iface%self.control[1],
272 'mux': engine_iface%self.mux[1],
272 'mux': engine_iface%self.mux[1],
273 'heartbeat': (engine_iface%self.hb[0], engine_iface%self.hb[1]),
273 'heartbeat': (engine_iface%self.hb[0], engine_iface%self.hb[1]),
274 'task' : engine_iface%self.task[1],
274 'task' : engine_iface%self.task[1],
275 'iopub' : engine_iface%self.iopub[1],
275 'iopub' : engine_iface%self.iopub[1],
276 # 'monitor' : engine_iface%self.mon_port,
276 # 'monitor' : engine_iface%self.mon_port,
277 }
277 }
278
278
279 self.client_info = {
279 self.client_info = {
280 'control' : client_iface%self.control[0],
280 'control' : client_iface%self.control[0],
281 'mux': client_iface%self.mux[0],
281 'mux': client_iface%self.mux[0],
282 'task' : (scheme, client_iface%self.task[0]),
282 'task' : (scheme, client_iface%self.task[0]),
283 'iopub' : client_iface%self.iopub[0],
283 'iopub' : client_iface%self.iopub[0],
284 'notification': client_iface%self.notifier_port
284 'notification': client_iface%self.notifier_port
285 }
285 }
286 self.log.debug("Hub engine addrs: %s"%self.engine_info)
286 self.log.debug("Hub engine addrs: %s"%self.engine_info)
287 self.log.debug("Hub client addrs: %s"%self.client_info)
287 self.log.debug("Hub client addrs: %s"%self.client_info)
288
288
289 # resubmit stream
289 # resubmit stream
290 r = ZMQStream(ctx.socket(zmq.XREQ), loop)
290 r = ZMQStream(ctx.socket(zmq.XREQ), loop)
291 url = util.disambiguate_url(self.client_info['task'][-1])
291 url = util.disambiguate_url(self.client_info['task'][-1])
292 r.setsockopt(zmq.IDENTITY, util.asbytes(self.session.session))
292 r.setsockopt(zmq.IDENTITY, util.asbytes(self.session.session))
293 r.connect(url)
293 r.connect(url)
294
294
295 self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
295 self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
296 query=q, notifier=n, resubmit=r, db=self.db,
296 query=q, notifier=n, resubmit=r, db=self.db,
297 engine_info=self.engine_info, client_info=self.client_info,
297 engine_info=self.engine_info, client_info=self.client_info,
298 log=self.log)
298 log=self.log)
299
299
300
300
301 class Hub(SessionFactory):
301 class Hub(SessionFactory):
302 """The IPython Controller Hub with 0MQ connections
302 """The IPython Controller Hub with 0MQ connections
303
303
304 Parameters
304 Parameters
305 ==========
305 ==========
306 loop: zmq IOLoop instance
306 loop: zmq IOLoop instance
307 session: Session object
307 session: Session object
308 <removed> context: zmq context for creating new connections (?)
308 <removed> context: zmq context for creating new connections (?)
309 queue: ZMQStream for monitoring the command queue (SUB)
309 queue: ZMQStream for monitoring the command queue (SUB)
310 query: ZMQStream for engine registration and client queries requests (XREP)
310 query: ZMQStream for engine registration and client queries requests (XREP)
311 heartbeat: HeartMonitor object checking the pulse of the engines
311 heartbeat: HeartMonitor object checking the pulse of the engines
312 notifier: ZMQStream for broadcasting engine registration changes (PUB)
312 notifier: ZMQStream for broadcasting engine registration changes (PUB)
313 db: connection to db for out of memory logging of commands
313 db: connection to db for out of memory logging of commands
314 NotImplemented
314 NotImplemented
315 engine_info: dict of zmq connection information for engines to connect
315 engine_info: dict of zmq connection information for engines to connect
316 to the queues.
316 to the queues.
317 client_info: dict of zmq connection information for engines to connect
317 client_info: dict of zmq connection information for engines to connect
318 to the queues.
318 to the queues.
319 """
319 """
320 # internal data structures:
320 # internal data structures:
321 ids=Set() # engine IDs
321 ids=Set() # engine IDs
322 keytable=Dict()
322 keytable=Dict()
323 by_ident=Dict()
323 by_ident=Dict()
324 engines=Dict()
324 engines=Dict()
325 clients=Dict()
325 clients=Dict()
326 hearts=Dict()
326 hearts=Dict()
327 pending=Set()
327 pending=Set()
328 queues=Dict() # pending msg_ids keyed by engine_id
328 queues=Dict() # pending msg_ids keyed by engine_id
329 tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
329 tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
330 completed=Dict() # completed msg_ids keyed by engine_id
330 completed=Dict() # completed msg_ids keyed by engine_id
331 all_completed=Set() # completed msg_ids keyed by engine_id
331 all_completed=Set() # completed msg_ids keyed by engine_id
332 dead_engines=Set() # completed msg_ids keyed by engine_id
332 dead_engines=Set() # completed msg_ids keyed by engine_id
333 unassigned=Set() # set of task msg_ds not yet assigned a destination
333 unassigned=Set() # set of task msg_ds not yet assigned a destination
334 incoming_registrations=Dict()
334 incoming_registrations=Dict()
335 registration_timeout=Int()
335 registration_timeout=Int()
336 _idcounter=Int(0)
336 _idcounter=Int(0)
337
337
338 # objects from constructor:
338 # objects from constructor:
339 query=Instance(ZMQStream)
339 query=Instance(ZMQStream)
340 monitor=Instance(ZMQStream)
340 monitor=Instance(ZMQStream)
341 notifier=Instance(ZMQStream)
341 notifier=Instance(ZMQStream)
342 resubmit=Instance(ZMQStream)
342 resubmit=Instance(ZMQStream)
343 heartmonitor=Instance(HeartMonitor)
343 heartmonitor=Instance(HeartMonitor)
344 db=Instance(object)
344 db=Instance(object)
345 client_info=Dict()
345 client_info=Dict()
346 engine_info=Dict()
346 engine_info=Dict()
347
347
348
348
349 def __init__(self, **kwargs):
349 def __init__(self, **kwargs):
350 """
350 """
351 # universal:
351 # universal:
352 loop: IOLoop for creating future connections
352 loop: IOLoop for creating future connections
353 session: streamsession for sending serialized data
353 session: streamsession for sending serialized data
354 # engine:
354 # engine:
355 queue: ZMQStream for monitoring queue messages
355 queue: ZMQStream for monitoring queue messages
356 query: ZMQStream for engine+client registration and client requests
356 query: ZMQStream for engine+client registration and client requests
357 heartbeat: HeartMonitor object for tracking engines
357 heartbeat: HeartMonitor object for tracking engines
358 # extra:
358 # extra:
359 db: ZMQStream for db connection (NotImplemented)
359 db: ZMQStream for db connection (NotImplemented)
360 engine_info: zmq address/protocol dict for engine connections
360 engine_info: zmq address/protocol dict for engine connections
361 client_info: zmq address/protocol dict for client connections
361 client_info: zmq address/protocol dict for client connections
362 """
362 """
363
363
364 super(Hub, self).__init__(**kwargs)
364 super(Hub, self).__init__(**kwargs)
365 self.registration_timeout = max(5000, 2*self.heartmonitor.period)
365 self.registration_timeout = max(5000, 2*self.heartmonitor.period)
366
366
367 # validate connection dicts:
367 # validate connection dicts:
368 for k,v in self.client_info.iteritems():
368 for k,v in self.client_info.iteritems():
369 if k == 'task':
369 if k == 'task':
370 util.validate_url_container(v[1])
370 util.validate_url_container(v[1])
371 else:
371 else:
372 util.validate_url_container(v)
372 util.validate_url_container(v)
373 # util.validate_url_container(self.client_info)
373 # util.validate_url_container(self.client_info)
374 util.validate_url_container(self.engine_info)
374 util.validate_url_container(self.engine_info)
375
375
376 # register our callbacks
376 # register our callbacks
377 self.query.on_recv(self.dispatch_query)
377 self.query.on_recv(self.dispatch_query)
378 self.monitor.on_recv(self.dispatch_monitor_traffic)
378 self.monitor.on_recv(self.dispatch_monitor_traffic)
379
379
380 self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
380 self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
381 self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
381 self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
382
382
383 self.monitor_handlers = {b'in' : self.save_queue_request,
383 self.monitor_handlers = {b'in' : self.save_queue_request,
384 b'out': self.save_queue_result,
384 b'out': self.save_queue_result,
385 b'intask': self.save_task_request,
385 b'intask': self.save_task_request,
386 b'outtask': self.save_task_result,
386 b'outtask': self.save_task_result,
387 b'tracktask': self.save_task_destination,
387 b'tracktask': self.save_task_destination,
388 b'incontrol': _passer,
388 b'incontrol': _passer,
389 b'outcontrol': _passer,
389 b'outcontrol': _passer,
390 b'iopub': self.save_iopub_message,
390 b'iopub': self.save_iopub_message,
391 }
391 }
392
392
393 self.query_handlers = {'queue_request': self.queue_status,
393 self.query_handlers = {'queue_request': self.queue_status,
394 'result_request': self.get_results,
394 'result_request': self.get_results,
395 'history_request': self.get_history,
395 'history_request': self.get_history,
396 'db_request': self.db_query,
396 'db_request': self.db_query,
397 'purge_request': self.purge_results,
397 'purge_request': self.purge_results,
398 'load_request': self.check_load,
398 'load_request': self.check_load,
399 'resubmit_request': self.resubmit_task,
399 'resubmit_request': self.resubmit_task,
400 'shutdown_request': self.shutdown_request,
400 'shutdown_request': self.shutdown_request,
401 'registration_request' : self.register_engine,
401 'registration_request' : self.register_engine,
402 'unregistration_request' : self.unregister_engine,
402 'unregistration_request' : self.unregister_engine,
403 'connection_request': self.connection_request,
403 'connection_request': self.connection_request,
404 }
404 }
405
405
406 # ignore resubmit replies
406 # ignore resubmit replies
407 self.resubmit.on_recv(lambda msg: None, copy=False)
407 self.resubmit.on_recv(lambda msg: None, copy=False)
408
408
409 self.log.info("hub::created hub")
409 self.log.info("hub::created hub")
410
410
411 @property
411 @property
412 def _next_id(self):
412 def _next_id(self):
413 """gemerate a new ID.
413 """gemerate a new ID.
414
414
415 No longer reuse old ids, just count from 0."""
415 No longer reuse old ids, just count from 0."""
416 newid = self._idcounter
416 newid = self._idcounter
417 self._idcounter += 1
417 self._idcounter += 1
418 return newid
418 return newid
419 # newid = 0
419 # newid = 0
420 # incoming = [id[0] for id in self.incoming_registrations.itervalues()]
420 # incoming = [id[0] for id in self.incoming_registrations.itervalues()]
421 # # print newid, self.ids, self.incoming_registrations
421 # # print newid, self.ids, self.incoming_registrations
422 # while newid in self.ids or newid in incoming:
422 # while newid in self.ids or newid in incoming:
423 # newid += 1
423 # newid += 1
424 # return newid
424 # return newid
425
425
426 #-----------------------------------------------------------------------------
426 #-----------------------------------------------------------------------------
427 # message validation
427 # message validation
428 #-----------------------------------------------------------------------------
428 #-----------------------------------------------------------------------------
429
429
430 def _validate_targets(self, targets):
430 def _validate_targets(self, targets):
431 """turn any valid targets argument into a list of integer ids"""
431 """turn any valid targets argument into a list of integer ids"""
432 if targets is None:
432 if targets is None:
433 # default to all
433 # default to all
434 targets = self.ids
434 targets = self.ids
435
435
436 if isinstance(targets, (int,str,unicode)):
436 if isinstance(targets, (int,str,unicode)):
437 # only one target specified
437 # only one target specified
438 targets = [targets]
438 targets = [targets]
439 _targets = []
439 _targets = []
440 for t in targets:
440 for t in targets:
441 # map raw identities to ids
441 # map raw identities to ids
442 if isinstance(t, (str,unicode)):
442 if isinstance(t, (str,unicode)):
443 t = self.by_ident.get(t, t)
443 t = self.by_ident.get(t, t)
444 _targets.append(t)
444 _targets.append(t)
445 targets = _targets
445 targets = _targets
446 bad_targets = [ t for t in targets if t not in self.ids ]
446 bad_targets = [ t for t in targets if t not in self.ids ]
447 if bad_targets:
447 if bad_targets:
448 raise IndexError("No Such Engine: %r"%bad_targets)
448 raise IndexError("No Such Engine: %r"%bad_targets)
449 if not targets:
449 if not targets:
450 raise IndexError("No Engines Registered")
450 raise IndexError("No Engines Registered")
451 return targets
451 return targets
452
452
453 #-----------------------------------------------------------------------------
453 #-----------------------------------------------------------------------------
454 # dispatch methods (1 per stream)
454 # dispatch methods (1 per stream)
455 #-----------------------------------------------------------------------------
455 #-----------------------------------------------------------------------------
456
456
457
457
458 def dispatch_monitor_traffic(self, msg):
458 def dispatch_monitor_traffic(self, msg):
459 """all ME and Task queue messages come through here, as well as
459 """all ME and Task queue messages come through here, as well as
460 IOPub traffic."""
460 IOPub traffic."""
461 self.log.debug("monitor traffic: %r"%msg[:2])
461 self.log.debug("monitor traffic: %r"%msg[:2])
462 switch = msg[0]
462 switch = msg[0]
463 try:
463 try:
464 idents, msg = self.session.feed_identities(msg[1:])
464 idents, msg = self.session.feed_identities(msg[1:])
465 except ValueError:
465 except ValueError:
466 idents=[]
466 idents=[]
467 if not idents:
467 if not idents:
468 self.log.error("Bad Monitor Message: %r"%msg)
468 self.log.error("Bad Monitor Message: %r"%msg)
469 return
469 return
470 handler = self.monitor_handlers.get(switch, None)
470 handler = self.monitor_handlers.get(switch, None)
471 if handler is not None:
471 if handler is not None:
472 handler(idents, msg)
472 handler(idents, msg)
473 else:
473 else:
474 self.log.error("Invalid monitor topic: %r"%switch)
474 self.log.error("Invalid monitor topic: %r"%switch)
475
475
476
476
477 def dispatch_query(self, msg):
477 def dispatch_query(self, msg):
478 """Route registration requests and queries from clients."""
478 """Route registration requests and queries from clients."""
479 try:
479 try:
480 idents, msg = self.session.feed_identities(msg)
480 idents, msg = self.session.feed_identities(msg)
481 except ValueError:
481 except ValueError:
482 idents = []
482 idents = []
483 if not idents:
483 if not idents:
484 self.log.error("Bad Query Message: %r"%msg)
484 self.log.error("Bad Query Message: %r"%msg)
485 return
485 return
486 client_id = idents[0]
486 client_id = idents[0]
487 try:
487 try:
488 msg = self.session.unpack_message(msg, content=True)
488 msg = self.session.unpack_message(msg, content=True)
489 except Exception:
489 except Exception:
490 content = error.wrap_exception()
490 content = error.wrap_exception()
491 self.log.error("Bad Query Message: %r"%msg, exc_info=True)
491 self.log.error("Bad Query Message: %r"%msg, exc_info=True)
492 self.session.send(self.query, "hub_error", ident=client_id,
492 self.session.send(self.query, "hub_error", ident=client_id,
493 content=content)
493 content=content)
494 return
494 return
495 # print client_id, header, parent, content
495 # print client_id, header, parent, content
496 #switch on message type:
496 #switch on message type:
497 msg_type = msg['msg_type']
497 msg_type = msg['header']['msg_type']
498 self.log.info("client::client %r requested %r"%(client_id, msg_type))
498 self.log.info("client::client %r requested %r"%(client_id, msg_type))
499 handler = self.query_handlers.get(msg_type, None)
499 handler = self.query_handlers.get(msg_type, None)
500 try:
500 try:
501 assert handler is not None, "Bad Message Type: %r"%msg_type
501 assert handler is not None, "Bad Message Type: %r"%msg_type
502 except:
502 except:
503 content = error.wrap_exception()
503 content = error.wrap_exception()
504 self.log.error("Bad Message Type: %r"%msg_type, exc_info=True)
504 self.log.error("Bad Message Type: %r"%msg_type, exc_info=True)
505 self.session.send(self.query, "hub_error", ident=client_id,
505 self.session.send(self.query, "hub_error", ident=client_id,
506 content=content)
506 content=content)
507 return
507 return
508
508
509 else:
509 else:
510 handler(idents, msg)
510 handler(idents, msg)
511
511
512 def dispatch_db(self, msg):
512 def dispatch_db(self, msg):
513 """"""
513 """"""
514 raise NotImplementedError
514 raise NotImplementedError
515
515
516 #---------------------------------------------------------------------------
516 #---------------------------------------------------------------------------
517 # handler methods (1 per event)
517 # handler methods (1 per event)
518 #---------------------------------------------------------------------------
518 #---------------------------------------------------------------------------
519
519
520 #----------------------- Heartbeat --------------------------------------
520 #----------------------- Heartbeat --------------------------------------
521
521
522 def handle_new_heart(self, heart):
522 def handle_new_heart(self, heart):
523 """handler to attach to heartbeater.
523 """handler to attach to heartbeater.
524 Called when a new heart starts to beat.
524 Called when a new heart starts to beat.
525 Triggers completion of registration."""
525 Triggers completion of registration."""
526 self.log.debug("heartbeat::handle_new_heart(%r)"%heart)
526 self.log.debug("heartbeat::handle_new_heart(%r)"%heart)
527 if heart not in self.incoming_registrations:
527 if heart not in self.incoming_registrations:
528 self.log.info("heartbeat::ignoring new heart: %r"%heart)
528 self.log.info("heartbeat::ignoring new heart: %r"%heart)
529 else:
529 else:
530 self.finish_registration(heart)
530 self.finish_registration(heart)
531
531
532
532
533 def handle_heart_failure(self, heart):
533 def handle_heart_failure(self, heart):
534 """handler to attach to heartbeater.
534 """handler to attach to heartbeater.
535 called when a previously registered heart fails to respond to beat request.
535 called when a previously registered heart fails to respond to beat request.
536 triggers unregistration"""
536 triggers unregistration"""
537 self.log.debug("heartbeat::handle_heart_failure(%r)"%heart)
537 self.log.debug("heartbeat::handle_heart_failure(%r)"%heart)
538 eid = self.hearts.get(heart, None)
538 eid = self.hearts.get(heart, None)
539 queue = self.engines[eid].queue
539 queue = self.engines[eid].queue
540 if eid is None:
540 if eid is None:
541 self.log.info("heartbeat::ignoring heart failure %r"%heart)
541 self.log.info("heartbeat::ignoring heart failure %r"%heart)
542 else:
542 else:
543 self.unregister_engine(heart, dict(content=dict(id=eid, queue=queue)))
543 self.unregister_engine(heart, dict(content=dict(id=eid, queue=queue)))
544
544
545 #----------------------- MUX Queue Traffic ------------------------------
545 #----------------------- MUX Queue Traffic ------------------------------
546
546
547 def save_queue_request(self, idents, msg):
547 def save_queue_request(self, idents, msg):
548 if len(idents) < 2:
548 if len(idents) < 2:
549 self.log.error("invalid identity prefix: %r"%idents)
549 self.log.error("invalid identity prefix: %r"%idents)
550 return
550 return
551 queue_id, client_id = idents[:2]
551 queue_id, client_id = idents[:2]
552 try:
552 try:
553 msg = self.session.unpack_message(msg)
553 msg = self.session.unpack_message(msg)
554 except Exception:
554 except Exception:
555 self.log.error("queue::client %r sent invalid message to %r: %r"%(client_id, queue_id, msg), exc_info=True)
555 self.log.error("queue::client %r sent invalid message to %r: %r"%(client_id, queue_id, msg), exc_info=True)
556 return
556 return
557
557
558 eid = self.by_ident.get(queue_id, None)
558 eid = self.by_ident.get(queue_id, None)
559 if eid is None:
559 if eid is None:
560 self.log.error("queue::target %r not registered"%queue_id)
560 self.log.error("queue::target %r not registered"%queue_id)
561 self.log.debug("queue:: valid are: %r"%(self.by_ident.keys()))
561 self.log.debug("queue:: valid are: %r"%(self.by_ident.keys()))
562 return
562 return
563 record = init_record(msg)
563 record = init_record(msg)
564 msg_id = record['msg_id']
564 msg_id = record['msg_id']
565 # Unicode in records
565 # Unicode in records
566 record['engine_uuid'] = queue_id.decode('ascii')
566 record['engine_uuid'] = queue_id.decode('ascii')
567 record['client_uuid'] = client_id.decode('ascii')
567 record['client_uuid'] = client_id.decode('ascii')
568 record['queue'] = 'mux'
568 record['queue'] = 'mux'
569
569
570 try:
570 try:
571 # it's posible iopub arrived first:
571 # it's posible iopub arrived first:
572 existing = self.db.get_record(msg_id)
572 existing = self.db.get_record(msg_id)
573 for key,evalue in existing.iteritems():
573 for key,evalue in existing.iteritems():
574 rvalue = record.get(key, None)
574 rvalue = record.get(key, None)
575 if evalue and rvalue and evalue != rvalue:
575 if evalue and rvalue and evalue != rvalue:
576 self.log.warn("conflicting initial state for record: %r:%r <%r> %r"%(msg_id, rvalue, key, evalue))
576 self.log.warn("conflicting initial state for record: %r:%r <%r> %r"%(msg_id, rvalue, key, evalue))
577 elif evalue and not rvalue:
577 elif evalue and not rvalue:
578 record[key] = evalue
578 record[key] = evalue
579 try:
579 try:
580 self.db.update_record(msg_id, record)
580 self.db.update_record(msg_id, record)
581 except Exception:
581 except Exception:
582 self.log.error("DB Error updating record %r"%msg_id, exc_info=True)
582 self.log.error("DB Error updating record %r"%msg_id, exc_info=True)
583 except KeyError:
583 except KeyError:
584 try:
584 try:
585 self.db.add_record(msg_id, record)
585 self.db.add_record(msg_id, record)
586 except Exception:
586 except Exception:
587 self.log.error("DB Error adding record %r"%msg_id, exc_info=True)
587 self.log.error("DB Error adding record %r"%msg_id, exc_info=True)
588
588
589
589
590 self.pending.add(msg_id)
590 self.pending.add(msg_id)
591 self.queues[eid].append(msg_id)
591 self.queues[eid].append(msg_id)
592
592
593 def save_queue_result(self, idents, msg):
593 def save_queue_result(self, idents, msg):
594 if len(idents) < 2:
594 if len(idents) < 2:
595 self.log.error("invalid identity prefix: %r"%idents)
595 self.log.error("invalid identity prefix: %r"%idents)
596 return
596 return
597
597
598 client_id, queue_id = idents[:2]
598 client_id, queue_id = idents[:2]
599 try:
599 try:
600 msg = self.session.unpack_message(msg)
600 msg = self.session.unpack_message(msg)
601 except Exception:
601 except Exception:
602 self.log.error("queue::engine %r sent invalid message to %r: %r"%(
602 self.log.error("queue::engine %r sent invalid message to %r: %r"%(
603 queue_id,client_id, msg), exc_info=True)
603 queue_id,client_id, msg), exc_info=True)
604 return
604 return
605
605
606 eid = self.by_ident.get(queue_id, None)
606 eid = self.by_ident.get(queue_id, None)
607 if eid is None:
607 if eid is None:
608 self.log.error("queue::unknown engine %r is sending a reply: "%queue_id)
608 self.log.error("queue::unknown engine %r is sending a reply: "%queue_id)
609 return
609 return
610
610
611 parent = msg['parent_header']
611 parent = msg['parent_header']
612 if not parent:
612 if not parent:
613 return
613 return
614 msg_id = parent['msg_id']
614 msg_id = parent['msg_id']
615 if msg_id in self.pending:
615 if msg_id in self.pending:
616 self.pending.remove(msg_id)
616 self.pending.remove(msg_id)
617 self.all_completed.add(msg_id)
617 self.all_completed.add(msg_id)
618 self.queues[eid].remove(msg_id)
618 self.queues[eid].remove(msg_id)
619 self.completed[eid].append(msg_id)
619 self.completed[eid].append(msg_id)
620 elif msg_id not in self.all_completed:
620 elif msg_id not in self.all_completed:
621 # it could be a result from a dead engine that died before delivering the
621 # it could be a result from a dead engine that died before delivering the
622 # result
622 # result
623 self.log.warn("queue:: unknown msg finished %r"%msg_id)
623 self.log.warn("queue:: unknown msg finished %r"%msg_id)
624 return
624 return
625 # update record anyway, because the unregistration could have been premature
625 # update record anyway, because the unregistration could have been premature
626 rheader = msg['header']
626 rheader = msg['header']
627 completed = rheader['date']
627 completed = rheader['date']
628 started = rheader.get('started', None)
628 started = rheader.get('started', None)
629 result = {
629 result = {
630 'result_header' : rheader,
630 'result_header' : rheader,
631 'result_content': msg['content'],
631 'result_content': msg['content'],
632 'started' : started,
632 'started' : started,
633 'completed' : completed
633 'completed' : completed
634 }
634 }
635
635
636 result['result_buffers'] = msg['buffers']
636 result['result_buffers'] = msg['buffers']
637 try:
637 try:
638 self.db.update_record(msg_id, result)
638 self.db.update_record(msg_id, result)
639 except Exception:
639 except Exception:
640 self.log.error("DB Error updating record %r"%msg_id, exc_info=True)
640 self.log.error("DB Error updating record %r"%msg_id, exc_info=True)
641
641
642
642
643 #--------------------- Task Queue Traffic ------------------------------
643 #--------------------- Task Queue Traffic ------------------------------
644
644
645 def save_task_request(self, idents, msg):
645 def save_task_request(self, idents, msg):
646 """Save the submission of a task."""
646 """Save the submission of a task."""
647 client_id = idents[0]
647 client_id = idents[0]
648
648
649 try:
649 try:
650 msg = self.session.unpack_message(msg)
650 msg = self.session.unpack_message(msg)
651 except Exception:
651 except Exception:
652 self.log.error("task::client %r sent invalid task message: %r"%(
652 self.log.error("task::client %r sent invalid task message: %r"%(
653 client_id, msg), exc_info=True)
653 client_id, msg), exc_info=True)
654 return
654 return
655 record = init_record(msg)
655 record = init_record(msg)
656
656
657 record['client_uuid'] = client_id
657 record['client_uuid'] = client_id
658 record['queue'] = 'task'
658 record['queue'] = 'task'
659 header = msg['header']
659 header = msg['header']
660 msg_id = header['msg_id']
660 msg_id = header['msg_id']
661 self.pending.add(msg_id)
661 self.pending.add(msg_id)
662 self.unassigned.add(msg_id)
662 self.unassigned.add(msg_id)
663 try:
663 try:
664 # it's posible iopub arrived first:
664 # it's posible iopub arrived first:
665 existing = self.db.get_record(msg_id)
665 existing = self.db.get_record(msg_id)
666 if existing['resubmitted']:
666 if existing['resubmitted']:
667 for key in ('submitted', 'client_uuid', 'buffers'):
667 for key in ('submitted', 'client_uuid', 'buffers'):
668 # don't clobber these keys on resubmit
668 # don't clobber these keys on resubmit
669 # submitted and client_uuid should be different
669 # submitted and client_uuid should be different
670 # and buffers might be big, and shouldn't have changed
670 # and buffers might be big, and shouldn't have changed
671 record.pop(key)
671 record.pop(key)
672 # still check content,header which should not change
672 # still check content,header which should not change
673 # but are not expensive to compare as buffers
673 # but are not expensive to compare as buffers
674
674
675 for key,evalue in existing.iteritems():
675 for key,evalue in existing.iteritems():
676 if key.endswith('buffers'):
676 if key.endswith('buffers'):
677 # don't compare buffers
677 # don't compare buffers
678 continue
678 continue
679 rvalue = record.get(key, None)
679 rvalue = record.get(key, None)
680 if evalue and rvalue and evalue != rvalue:
680 if evalue and rvalue and evalue != rvalue:
681 self.log.warn("conflicting initial state for record: %r:%r <%r> %r"%(msg_id, rvalue, key, evalue))
681 self.log.warn("conflicting initial state for record: %r:%r <%r> %r"%(msg_id, rvalue, key, evalue))
682 elif evalue and not rvalue:
682 elif evalue and not rvalue:
683 record[key] = evalue
683 record[key] = evalue
684 try:
684 try:
685 self.db.update_record(msg_id, record)
685 self.db.update_record(msg_id, record)
686 except Exception:
686 except Exception:
687 self.log.error("DB Error updating record %r"%msg_id, exc_info=True)
687 self.log.error("DB Error updating record %r"%msg_id, exc_info=True)
688 except KeyError:
688 except KeyError:
689 try:
689 try:
690 self.db.add_record(msg_id, record)
690 self.db.add_record(msg_id, record)
691 except Exception:
691 except Exception:
692 self.log.error("DB Error adding record %r"%msg_id, exc_info=True)
692 self.log.error("DB Error adding record %r"%msg_id, exc_info=True)
693 except Exception:
693 except Exception:
694 self.log.error("DB Error saving task request %r"%msg_id, exc_info=True)
694 self.log.error("DB Error saving task request %r"%msg_id, exc_info=True)
695
695
696 def save_task_result(self, idents, msg):
696 def save_task_result(self, idents, msg):
697 """save the result of a completed task."""
697 """save the result of a completed task."""
698 client_id = idents[0]
698 client_id = idents[0]
699 try:
699 try:
700 msg = self.session.unpack_message(msg)
700 msg = self.session.unpack_message(msg)
701 except Exception:
701 except Exception:
702 self.log.error("task::invalid task result message send to %r: %r"%(
702 self.log.error("task::invalid task result message send to %r: %r"%(
703 client_id, msg), exc_info=True)
703 client_id, msg), exc_info=True)
704 return
704 return
705
705
706 parent = msg['parent_header']
706 parent = msg['parent_header']
707 if not parent:
707 if not parent:
708 # print msg
708 # print msg
709 self.log.warn("Task %r had no parent!"%msg)
709 self.log.warn("Task %r had no parent!"%msg)
710 return
710 return
711 msg_id = parent['msg_id']
711 msg_id = parent['msg_id']
712 if msg_id in self.unassigned:
712 if msg_id in self.unassigned:
713 self.unassigned.remove(msg_id)
713 self.unassigned.remove(msg_id)
714
714
715 header = msg['header']
715 header = msg['header']
716 engine_uuid = header.get('engine', None)
716 engine_uuid = header.get('engine', None)
717 eid = self.by_ident.get(engine_uuid, None)
717 eid = self.by_ident.get(engine_uuid, None)
718
718
719 if msg_id in self.pending:
719 if msg_id in self.pending:
720 self.pending.remove(msg_id)
720 self.pending.remove(msg_id)
721 self.all_completed.add(msg_id)
721 self.all_completed.add(msg_id)
722 if eid is not None:
722 if eid is not None:
723 self.completed[eid].append(msg_id)
723 self.completed[eid].append(msg_id)
724 if msg_id in self.tasks[eid]:
724 if msg_id in self.tasks[eid]:
725 self.tasks[eid].remove(msg_id)
725 self.tasks[eid].remove(msg_id)
726 completed = header['date']
726 completed = header['date']
727 started = header.get('started', None)
727 started = header.get('started', None)
728 result = {
728 result = {
729 'result_header' : header,
729 'result_header' : header,
730 'result_content': msg['content'],
730 'result_content': msg['content'],
731 'started' : started,
731 'started' : started,
732 'completed' : completed,
732 'completed' : completed,
733 'engine_uuid': engine_uuid
733 'engine_uuid': engine_uuid
734 }
734 }
735
735
736 result['result_buffers'] = msg['buffers']
736 result['result_buffers'] = msg['buffers']
737 try:
737 try:
738 self.db.update_record(msg_id, result)
738 self.db.update_record(msg_id, result)
739 except Exception:
739 except Exception:
740 self.log.error("DB Error saving task request %r"%msg_id, exc_info=True)
740 self.log.error("DB Error saving task request %r"%msg_id, exc_info=True)
741
741
742 else:
742 else:
743 self.log.debug("task::unknown task %r finished"%msg_id)
743 self.log.debug("task::unknown task %r finished"%msg_id)
744
744
745 def save_task_destination(self, idents, msg):
745 def save_task_destination(self, idents, msg):
746 try:
746 try:
747 msg = self.session.unpack_message(msg, content=True)
747 msg = self.session.unpack_message(msg, content=True)
748 except Exception:
748 except Exception:
749 self.log.error("task::invalid task tracking message", exc_info=True)
749 self.log.error("task::invalid task tracking message", exc_info=True)
750 return
750 return
751 content = msg['content']
751 content = msg['content']
752 # print (content)
752 # print (content)
753 msg_id = content['msg_id']
753 msg_id = content['msg_id']
754 engine_uuid = content['engine_id']
754 engine_uuid = content['engine_id']
755 eid = self.by_ident[util.asbytes(engine_uuid)]
755 eid = self.by_ident[util.asbytes(engine_uuid)]
756
756
757 self.log.info("task::task %r arrived on %r"%(msg_id, eid))
757 self.log.info("task::task %r arrived on %r"%(msg_id, eid))
758 if msg_id in self.unassigned:
758 if msg_id in self.unassigned:
759 self.unassigned.remove(msg_id)
759 self.unassigned.remove(msg_id)
760 # else:
760 # else:
761 # self.log.debug("task::task %r not listed as MIA?!"%(msg_id))
761 # self.log.debug("task::task %r not listed as MIA?!"%(msg_id))
762
762
763 self.tasks[eid].append(msg_id)
763 self.tasks[eid].append(msg_id)
764 # self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
764 # self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
765 try:
765 try:
766 self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
766 self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
767 except Exception:
767 except Exception:
768 self.log.error("DB Error saving task destination %r"%msg_id, exc_info=True)
768 self.log.error("DB Error saving task destination %r"%msg_id, exc_info=True)
769
769
770
770
771 def mia_task_request(self, idents, msg):
771 def mia_task_request(self, idents, msg):
772 raise NotImplementedError
772 raise NotImplementedError
773 client_id = idents[0]
773 client_id = idents[0]
774 # content = dict(mia=self.mia,status='ok')
774 # content = dict(mia=self.mia,status='ok')
775 # self.session.send('mia_reply', content=content, idents=client_id)
775 # self.session.send('mia_reply', content=content, idents=client_id)
776
776
777
777
778 #--------------------- IOPub Traffic ------------------------------
778 #--------------------- IOPub Traffic ------------------------------
779
779
780 def save_iopub_message(self, topics, msg):
780 def save_iopub_message(self, topics, msg):
781 """save an iopub message into the db"""
781 """save an iopub message into the db"""
782 # print (topics)
782 # print (topics)
783 try:
783 try:
784 msg = self.session.unpack_message(msg, content=True)
784 msg = self.session.unpack_message(msg, content=True)
785 except Exception:
785 except Exception:
786 self.log.error("iopub::invalid IOPub message", exc_info=True)
786 self.log.error("iopub::invalid IOPub message", exc_info=True)
787 return
787 return
788
788
789 parent = msg['parent_header']
789 parent = msg['parent_header']
790 if not parent:
790 if not parent:
791 self.log.error("iopub::invalid IOPub message: %r"%msg)
791 self.log.error("iopub::invalid IOPub message: %r"%msg)
792 return
792 return
793 msg_id = parent['msg_id']
793 msg_id = parent['msg_id']
794 msg_type = msg['msg_type']
794 msg_type = msg['header']['msg_type']
795 content = msg['content']
795 content = msg['content']
796
796
797 # ensure msg_id is in db
797 # ensure msg_id is in db
798 try:
798 try:
799 rec = self.db.get_record(msg_id)
799 rec = self.db.get_record(msg_id)
800 except KeyError:
800 except KeyError:
801 rec = empty_record()
801 rec = empty_record()
802 rec['msg_id'] = msg_id
802 rec['msg_id'] = msg_id
803 self.db.add_record(msg_id, rec)
803 self.db.add_record(msg_id, rec)
804 # stream
804 # stream
805 d = {}
805 d = {}
806 if msg_type == 'stream':
806 if msg_type == 'stream':
807 name = content['name']
807 name = content['name']
808 s = rec[name] or ''
808 s = rec[name] or ''
809 d[name] = s + content['data']
809 d[name] = s + content['data']
810
810
811 elif msg_type == 'pyerr':
811 elif msg_type == 'pyerr':
812 d['pyerr'] = content
812 d['pyerr'] = content
813 elif msg_type == 'pyin':
813 elif msg_type == 'pyin':
814 d['pyin'] = content['code']
814 d['pyin'] = content['code']
815 else:
815 else:
816 d[msg_type] = content.get('data', '')
816 d[msg_type] = content.get('data', '')
817
817
818 try:
818 try:
819 self.db.update_record(msg_id, d)
819 self.db.update_record(msg_id, d)
820 except Exception:
820 except Exception:
821 self.log.error("DB Error saving iopub message %r"%msg_id, exc_info=True)
821 self.log.error("DB Error saving iopub message %r"%msg_id, exc_info=True)
822
822
823
823
824
824
825 #-------------------------------------------------------------------------
825 #-------------------------------------------------------------------------
826 # Registration requests
826 # Registration requests
827 #-------------------------------------------------------------------------
827 #-------------------------------------------------------------------------
828
828
829 def connection_request(self, client_id, msg):
829 def connection_request(self, client_id, msg):
830 """Reply with connection addresses for clients."""
830 """Reply with connection addresses for clients."""
831 self.log.info("client::client %r connected"%client_id)
831 self.log.info("client::client %r connected"%client_id)
832 content = dict(status='ok')
832 content = dict(status='ok')
833 content.update(self.client_info)
833 content.update(self.client_info)
834 jsonable = {}
834 jsonable = {}
835 for k,v in self.keytable.iteritems():
835 for k,v in self.keytable.iteritems():
836 if v not in self.dead_engines:
836 if v not in self.dead_engines:
837 jsonable[str(k)] = v.decode('ascii')
837 jsonable[str(k)] = v.decode('ascii')
838 content['engines'] = jsonable
838 content['engines'] = jsonable
839 self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id)
839 self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id)
840
840
841 def register_engine(self, reg, msg):
841 def register_engine(self, reg, msg):
842 """Register a new engine."""
842 """Register a new engine."""
843 content = msg['content']
843 content = msg['content']
844 try:
844 try:
845 queue = util.asbytes(content['queue'])
845 queue = util.asbytes(content['queue'])
846 except KeyError:
846 except KeyError:
847 self.log.error("registration::queue not specified", exc_info=True)
847 self.log.error("registration::queue not specified", exc_info=True)
848 return
848 return
849 heart = content.get('heartbeat', None)
849 heart = content.get('heartbeat', None)
850 if heart:
850 if heart:
851 heart = util.asbytes(heart)
851 heart = util.asbytes(heart)
852 """register a new engine, and create the socket(s) necessary"""
852 """register a new engine, and create the socket(s) necessary"""
853 eid = self._next_id
853 eid = self._next_id
854 # print (eid, queue, reg, heart)
854 # print (eid, queue, reg, heart)
855
855
856 self.log.debug("registration::register_engine(%i, %r, %r, %r)"%(eid, queue, reg, heart))
856 self.log.debug("registration::register_engine(%i, %r, %r, %r)"%(eid, queue, reg, heart))
857
857
858 content = dict(id=eid,status='ok')
858 content = dict(id=eid,status='ok')
859 content.update(self.engine_info)
859 content.update(self.engine_info)
860 # check if requesting available IDs:
860 # check if requesting available IDs:
861 if queue in self.by_ident:
861 if queue in self.by_ident:
862 try:
862 try:
863 raise KeyError("queue_id %r in use"%queue)
863 raise KeyError("queue_id %r in use"%queue)
864 except:
864 except:
865 content = error.wrap_exception()
865 content = error.wrap_exception()
866 self.log.error("queue_id %r in use"%queue, exc_info=True)
866 self.log.error("queue_id %r in use"%queue, exc_info=True)
867 elif heart in self.hearts: # need to check unique hearts?
867 elif heart in self.hearts: # need to check unique hearts?
868 try:
868 try:
869 raise KeyError("heart_id %r in use"%heart)
869 raise KeyError("heart_id %r in use"%heart)
870 except:
870 except:
871 self.log.error("heart_id %r in use"%heart, exc_info=True)
871 self.log.error("heart_id %r in use"%heart, exc_info=True)
872 content = error.wrap_exception()
872 content = error.wrap_exception()
873 else:
873 else:
874 for h, pack in self.incoming_registrations.iteritems():
874 for h, pack in self.incoming_registrations.iteritems():
875 if heart == h:
875 if heart == h:
876 try:
876 try:
877 raise KeyError("heart_id %r in use"%heart)
877 raise KeyError("heart_id %r in use"%heart)
878 except:
878 except:
879 self.log.error("heart_id %r in use"%heart, exc_info=True)
879 self.log.error("heart_id %r in use"%heart, exc_info=True)
880 content = error.wrap_exception()
880 content = error.wrap_exception()
881 break
881 break
882 elif queue == pack[1]:
882 elif queue == pack[1]:
883 try:
883 try:
884 raise KeyError("queue_id %r in use"%queue)
884 raise KeyError("queue_id %r in use"%queue)
885 except:
885 except:
886 self.log.error("queue_id %r in use"%queue, exc_info=True)
886 self.log.error("queue_id %r in use"%queue, exc_info=True)
887 content = error.wrap_exception()
887 content = error.wrap_exception()
888 break
888 break
889
889
890 msg = self.session.send(self.query, "registration_reply",
890 msg = self.session.send(self.query, "registration_reply",
891 content=content,
891 content=content,
892 ident=reg)
892 ident=reg)
893
893
894 if content['status'] == 'ok':
894 if content['status'] == 'ok':
895 if heart in self.heartmonitor.hearts:
895 if heart in self.heartmonitor.hearts:
896 # already beating
896 # already beating
897 self.incoming_registrations[heart] = (eid,queue,reg[0],None)
897 self.incoming_registrations[heart] = (eid,queue,reg[0],None)
898 self.finish_registration(heart)
898 self.finish_registration(heart)
899 else:
899 else:
900 purge = lambda : self._purge_stalled_registration(heart)
900 purge = lambda : self._purge_stalled_registration(heart)
901 dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
901 dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
902 dc.start()
902 dc.start()
903 self.incoming_registrations[heart] = (eid,queue,reg[0],dc)
903 self.incoming_registrations[heart] = (eid,queue,reg[0],dc)
904 else:
904 else:
905 self.log.error("registration::registration %i failed: %r"%(eid, content['evalue']))
905 self.log.error("registration::registration %i failed: %r"%(eid, content['evalue']))
906 return eid
906 return eid
907
907
908 def unregister_engine(self, ident, msg):
908 def unregister_engine(self, ident, msg):
909 """Unregister an engine that explicitly requested to leave."""
909 """Unregister an engine that explicitly requested to leave."""
910 try:
910 try:
911 eid = msg['content']['id']
911 eid = msg['content']['id']
912 except:
912 except:
913 self.log.error("registration::bad engine id for unregistration: %r"%ident, exc_info=True)
913 self.log.error("registration::bad engine id for unregistration: %r"%ident, exc_info=True)
914 return
914 return
915 self.log.info("registration::unregister_engine(%r)"%eid)
915 self.log.info("registration::unregister_engine(%r)"%eid)
916 # print (eid)
916 # print (eid)
917 uuid = self.keytable[eid]
917 uuid = self.keytable[eid]
918 content=dict(id=eid, queue=uuid.decode('ascii'))
918 content=dict(id=eid, queue=uuid.decode('ascii'))
919 self.dead_engines.add(uuid)
919 self.dead_engines.add(uuid)
920 # self.ids.remove(eid)
920 # self.ids.remove(eid)
921 # uuid = self.keytable.pop(eid)
921 # uuid = self.keytable.pop(eid)
922 #
922 #
923 # ec = self.engines.pop(eid)
923 # ec = self.engines.pop(eid)
924 # self.hearts.pop(ec.heartbeat)
924 # self.hearts.pop(ec.heartbeat)
925 # self.by_ident.pop(ec.queue)
925 # self.by_ident.pop(ec.queue)
926 # self.completed.pop(eid)
926 # self.completed.pop(eid)
927 handleit = lambda : self._handle_stranded_msgs(eid, uuid)
927 handleit = lambda : self._handle_stranded_msgs(eid, uuid)
928 dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop)
928 dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop)
929 dc.start()
929 dc.start()
930 ############## TODO: HANDLE IT ################
930 ############## TODO: HANDLE IT ################
931
931
932 if self.notifier:
932 if self.notifier:
933 self.session.send(self.notifier, "unregistration_notification", content=content)
933 self.session.send(self.notifier, "unregistration_notification", content=content)
934
934
935 def _handle_stranded_msgs(self, eid, uuid):
935 def _handle_stranded_msgs(self, eid, uuid):
936 """Handle messages known to be on an engine when the engine unregisters.
936 """Handle messages known to be on an engine when the engine unregisters.
937
937
938 It is possible that this will fire prematurely - that is, an engine will
938 It is possible that this will fire prematurely - that is, an engine will
939 go down after completing a result, and the client will be notified
939 go down after completing a result, and the client will be notified
940 that the result failed and later receive the actual result.
940 that the result failed and later receive the actual result.
941 """
941 """
942
942
943 outstanding = self.queues[eid]
943 outstanding = self.queues[eid]
944
944
945 for msg_id in outstanding:
945 for msg_id in outstanding:
946 self.pending.remove(msg_id)
946 self.pending.remove(msg_id)
947 self.all_completed.add(msg_id)
947 self.all_completed.add(msg_id)
948 try:
948 try:
949 raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
949 raise error.EngineError("Engine %r died while running task %r"%(eid, msg_id))
950 except:
950 except:
951 content = error.wrap_exception()
951 content = error.wrap_exception()
952 # build a fake header:
952 # build a fake header:
953 header = {}
953 header = {}
954 header['engine'] = uuid
954 header['engine'] = uuid
955 header['date'] = datetime.now()
955 header['date'] = datetime.now()
956 rec = dict(result_content=content, result_header=header, result_buffers=[])
956 rec = dict(result_content=content, result_header=header, result_buffers=[])
957 rec['completed'] = header['date']
957 rec['completed'] = header['date']
958 rec['engine_uuid'] = uuid
958 rec['engine_uuid'] = uuid
959 try:
959 try:
960 self.db.update_record(msg_id, rec)
960 self.db.update_record(msg_id, rec)
961 except Exception:
961 except Exception:
962 self.log.error("DB Error handling stranded msg %r"%msg_id, exc_info=True)
962 self.log.error("DB Error handling stranded msg %r"%msg_id, exc_info=True)
963
963
964
964
965 def finish_registration(self, heart):
965 def finish_registration(self, heart):
966 """Second half of engine registration, called after our HeartMonitor
966 """Second half of engine registration, called after our HeartMonitor
967 has received a beat from the Engine's Heart."""
967 has received a beat from the Engine's Heart."""
968 try:
968 try:
969 (eid,queue,reg,purge) = self.incoming_registrations.pop(heart)
969 (eid,queue,reg,purge) = self.incoming_registrations.pop(heart)
970 except KeyError:
970 except KeyError:
971 self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
971 self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
972 return
972 return
973 self.log.info("registration::finished registering engine %i:%r"%(eid,queue))
973 self.log.info("registration::finished registering engine %i:%r"%(eid,queue))
974 if purge is not None:
974 if purge is not None:
975 purge.stop()
975 purge.stop()
976 control = queue
976 control = queue
977 self.ids.add(eid)
977 self.ids.add(eid)
978 self.keytable[eid] = queue
978 self.keytable[eid] = queue
979 self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg,
979 self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg,
980 control=control, heartbeat=heart)
980 control=control, heartbeat=heart)
981 self.by_ident[queue] = eid
981 self.by_ident[queue] = eid
982 self.queues[eid] = list()
982 self.queues[eid] = list()
983 self.tasks[eid] = list()
983 self.tasks[eid] = list()
984 self.completed[eid] = list()
984 self.completed[eid] = list()
985 self.hearts[heart] = eid
985 self.hearts[heart] = eid
986 content = dict(id=eid, queue=self.engines[eid].queue.decode('ascii'))
986 content = dict(id=eid, queue=self.engines[eid].queue.decode('ascii'))
987 if self.notifier:
987 if self.notifier:
988 self.session.send(self.notifier, "registration_notification", content=content)
988 self.session.send(self.notifier, "registration_notification", content=content)
989 self.log.info("engine::Engine Connected: %i"%eid)
989 self.log.info("engine::Engine Connected: %i"%eid)
990
990
991 def _purge_stalled_registration(self, heart):
991 def _purge_stalled_registration(self, heart):
992 if heart in self.incoming_registrations:
992 if heart in self.incoming_registrations:
993 eid = self.incoming_registrations.pop(heart)[0]
993 eid = self.incoming_registrations.pop(heart)[0]
994 self.log.info("registration::purging stalled registration: %i"%eid)
994 self.log.info("registration::purging stalled registration: %i"%eid)
995 else:
995 else:
996 pass
996 pass
997
997
998 #-------------------------------------------------------------------------
998 #-------------------------------------------------------------------------
999 # Client Requests
999 # Client Requests
1000 #-------------------------------------------------------------------------
1000 #-------------------------------------------------------------------------
1001
1001
1002 def shutdown_request(self, client_id, msg):
1002 def shutdown_request(self, client_id, msg):
1003 """handle shutdown request."""
1003 """handle shutdown request."""
1004 self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
1004 self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
1005 # also notify other clients of shutdown
1005 # also notify other clients of shutdown
1006 self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'})
1006 self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'})
1007 dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
1007 dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
1008 dc.start()
1008 dc.start()
1009
1009
1010 def _shutdown(self):
1010 def _shutdown(self):
1011 self.log.info("hub::hub shutting down.")
1011 self.log.info("hub::hub shutting down.")
1012 time.sleep(0.1)
1012 time.sleep(0.1)
1013 sys.exit(0)
1013 sys.exit(0)
1014
1014
1015
1015
1016 def check_load(self, client_id, msg):
1016 def check_load(self, client_id, msg):
1017 content = msg['content']
1017 content = msg['content']
1018 try:
1018 try:
1019 targets = content['targets']
1019 targets = content['targets']
1020 targets = self._validate_targets(targets)
1020 targets = self._validate_targets(targets)
1021 except:
1021 except:
1022 content = error.wrap_exception()
1022 content = error.wrap_exception()
1023 self.session.send(self.query, "hub_error",
1023 self.session.send(self.query, "hub_error",
1024 content=content, ident=client_id)
1024 content=content, ident=client_id)
1025 return
1025 return
1026
1026
1027 content = dict(status='ok')
1027 content = dict(status='ok')
1028 # loads = {}
1028 # loads = {}
1029 for t in targets:
1029 for t in targets:
1030 content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
1030 content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
1031 self.session.send(self.query, "load_reply", content=content, ident=client_id)
1031 self.session.send(self.query, "load_reply", content=content, ident=client_id)
1032
1032
1033
1033
1034 def queue_status(self, client_id, msg):
1034 def queue_status(self, client_id, msg):
1035 """Return the Queue status of one or more targets.
1035 """Return the Queue status of one or more targets.
1036 if verbose: return the msg_ids
1036 if verbose: return the msg_ids
1037 else: return len of each type.
1037 else: return len of each type.
1038 keys: queue (pending MUX jobs)
1038 keys: queue (pending MUX jobs)
1039 tasks (pending Task jobs)
1039 tasks (pending Task jobs)
1040 completed (finished jobs from both queues)"""
1040 completed (finished jobs from both queues)"""
1041 content = msg['content']
1041 content = msg['content']
1042 targets = content['targets']
1042 targets = content['targets']
1043 try:
1043 try:
1044 targets = self._validate_targets(targets)
1044 targets = self._validate_targets(targets)
1045 except:
1045 except:
1046 content = error.wrap_exception()
1046 content = error.wrap_exception()
1047 self.session.send(self.query, "hub_error",
1047 self.session.send(self.query, "hub_error",
1048 content=content, ident=client_id)
1048 content=content, ident=client_id)
1049 return
1049 return
1050 verbose = content.get('verbose', False)
1050 verbose = content.get('verbose', False)
1051 content = dict(status='ok')
1051 content = dict(status='ok')
1052 for t in targets:
1052 for t in targets:
1053 queue = self.queues[t]
1053 queue = self.queues[t]
1054 completed = self.completed[t]
1054 completed = self.completed[t]
1055 tasks = self.tasks[t]
1055 tasks = self.tasks[t]
1056 if not verbose:
1056 if not verbose:
1057 queue = len(queue)
1057 queue = len(queue)
1058 completed = len(completed)
1058 completed = len(completed)
1059 tasks = len(tasks)
1059 tasks = len(tasks)
1060 content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
1060 content[str(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
1061 content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned)
1061 content['unassigned'] = list(self.unassigned) if verbose else len(self.unassigned)
1062 # print (content)
1062 # print (content)
1063 self.session.send(self.query, "queue_reply", content=content, ident=client_id)
1063 self.session.send(self.query, "queue_reply", content=content, ident=client_id)
1064
1064
1065 def purge_results(self, client_id, msg):
1065 def purge_results(self, client_id, msg):
1066 """Purge results from memory. This method is more valuable before we move
1066 """Purge results from memory. This method is more valuable before we move
1067 to a DB based message storage mechanism."""
1067 to a DB based message storage mechanism."""
1068 content = msg['content']
1068 content = msg['content']
1069 self.log.info("Dropping records with %s", content)
1069 self.log.info("Dropping records with %s", content)
1070 msg_ids = content.get('msg_ids', [])
1070 msg_ids = content.get('msg_ids', [])
1071 reply = dict(status='ok')
1071 reply = dict(status='ok')
1072 if msg_ids == 'all':
1072 if msg_ids == 'all':
1073 try:
1073 try:
1074 self.db.drop_matching_records(dict(completed={'$ne':None}))
1074 self.db.drop_matching_records(dict(completed={'$ne':None}))
1075 except Exception:
1075 except Exception:
1076 reply = error.wrap_exception()
1076 reply = error.wrap_exception()
1077 else:
1077 else:
1078 pending = filter(lambda m: m in self.pending, msg_ids)
1078 pending = filter(lambda m: m in self.pending, msg_ids)
1079 if pending:
1079 if pending:
1080 try:
1080 try:
1081 raise IndexError("msg pending: %r"%pending[0])
1081 raise IndexError("msg pending: %r"%pending[0])
1082 except:
1082 except:
1083 reply = error.wrap_exception()
1083 reply = error.wrap_exception()
1084 else:
1084 else:
1085 try:
1085 try:
1086 self.db.drop_matching_records(dict(msg_id={'$in':msg_ids}))
1086 self.db.drop_matching_records(dict(msg_id={'$in':msg_ids}))
1087 except Exception:
1087 except Exception:
1088 reply = error.wrap_exception()
1088 reply = error.wrap_exception()
1089
1089
1090 if reply['status'] == 'ok':
1090 if reply['status'] == 'ok':
1091 eids = content.get('engine_ids', [])
1091 eids = content.get('engine_ids', [])
1092 for eid in eids:
1092 for eid in eids:
1093 if eid not in self.engines:
1093 if eid not in self.engines:
1094 try:
1094 try:
1095 raise IndexError("No such engine: %i"%eid)
1095 raise IndexError("No such engine: %i"%eid)
1096 except:
1096 except:
1097 reply = error.wrap_exception()
1097 reply = error.wrap_exception()
1098 break
1098 break
1099 uid = self.engines[eid].queue
1099 uid = self.engines[eid].queue
1100 try:
1100 try:
1101 self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
1101 self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
1102 except Exception:
1102 except Exception:
1103 reply = error.wrap_exception()
1103 reply = error.wrap_exception()
1104 break
1104 break
1105
1105
1106 self.session.send(self.query, 'purge_reply', content=reply, ident=client_id)
1106 self.session.send(self.query, 'purge_reply', content=reply, ident=client_id)
1107
1107
1108 def resubmit_task(self, client_id, msg):
1108 def resubmit_task(self, client_id, msg):
1109 """Resubmit one or more tasks."""
1109 """Resubmit one or more tasks."""
1110 def finish(reply):
1110 def finish(reply):
1111 self.session.send(self.query, 'resubmit_reply', content=reply, ident=client_id)
1111 self.session.send(self.query, 'resubmit_reply', content=reply, ident=client_id)
1112
1112
1113 content = msg['content']
1113 content = msg['content']
1114 msg_ids = content['msg_ids']
1114 msg_ids = content['msg_ids']
1115 reply = dict(status='ok')
1115 reply = dict(status='ok')
1116 try:
1116 try:
1117 records = self.db.find_records({'msg_id' : {'$in' : msg_ids}}, keys=[
1117 records = self.db.find_records({'msg_id' : {'$in' : msg_ids}}, keys=[
1118 'header', 'content', 'buffers'])
1118 'header', 'content', 'buffers'])
1119 except Exception:
1119 except Exception:
1120 self.log.error('db::db error finding tasks to resubmit', exc_info=True)
1120 self.log.error('db::db error finding tasks to resubmit', exc_info=True)
1121 return finish(error.wrap_exception())
1121 return finish(error.wrap_exception())
1122
1122
1123 # validate msg_ids
1123 # validate msg_ids
1124 found_ids = [ rec['msg_id'] for rec in records ]
1124 found_ids = [ rec['msg_id'] for rec in records ]
1125 invalid_ids = filter(lambda m: m in self.pending, found_ids)
1125 invalid_ids = filter(lambda m: m in self.pending, found_ids)
1126 if len(records) > len(msg_ids):
1126 if len(records) > len(msg_ids):
1127 try:
1127 try:
1128 raise RuntimeError("DB appears to be in an inconsistent state."
1128 raise RuntimeError("DB appears to be in an inconsistent state."
1129 "More matching records were found than should exist")
1129 "More matching records were found than should exist")
1130 except Exception:
1130 except Exception:
1131 return finish(error.wrap_exception())
1131 return finish(error.wrap_exception())
1132 elif len(records) < len(msg_ids):
1132 elif len(records) < len(msg_ids):
1133 missing = [ m for m in msg_ids if m not in found_ids ]
1133 missing = [ m for m in msg_ids if m not in found_ids ]
1134 try:
1134 try:
1135 raise KeyError("No such msg(s): %r"%missing)
1135 raise KeyError("No such msg(s): %r"%missing)
1136 except KeyError:
1136 except KeyError:
1137 return finish(error.wrap_exception())
1137 return finish(error.wrap_exception())
1138 elif invalid_ids:
1138 elif invalid_ids:
1139 msg_id = invalid_ids[0]
1139 msg_id = invalid_ids[0]
1140 try:
1140 try:
1141 raise ValueError("Task %r appears to be inflight"%(msg_id))
1141 raise ValueError("Task %r appears to be inflight"%(msg_id))
1142 except Exception:
1142 except Exception:
1143 return finish(error.wrap_exception())
1143 return finish(error.wrap_exception())
1144
1144
1145 # clear the existing records
1145 # clear the existing records
1146 now = datetime.now()
1146 now = datetime.now()
1147 rec = empty_record()
1147 rec = empty_record()
1148 map(rec.pop, ['msg_id', 'header', 'content', 'buffers', 'submitted'])
1148 map(rec.pop, ['msg_id', 'header', 'content', 'buffers', 'submitted'])
1149 rec['resubmitted'] = now
1149 rec['resubmitted'] = now
1150 rec['queue'] = 'task'
1150 rec['queue'] = 'task'
1151 rec['client_uuid'] = client_id[0]
1151 rec['client_uuid'] = client_id[0]
1152 try:
1152 try:
1153 for msg_id in msg_ids:
1153 for msg_id in msg_ids:
1154 self.all_completed.discard(msg_id)
1154 self.all_completed.discard(msg_id)
1155 self.db.update_record(msg_id, rec)
1155 self.db.update_record(msg_id, rec)
1156 except Exception:
1156 except Exception:
1157 self.log.error('db::db error upating record', exc_info=True)
1157 self.log.error('db::db error upating record', exc_info=True)
1158 reply = error.wrap_exception()
1158 reply = error.wrap_exception()
1159 else:
1159 else:
1160 # send the messages
1160 # send the messages
1161 for rec in records:
1161 for rec in records:
1162 header = rec['header']
1162 header = rec['header']
1163 # include resubmitted in header to prevent digest collision
1163 # include resubmitted in header to prevent digest collision
1164 header['resubmitted'] = now
1164 header['resubmitted'] = now
1165 msg = self.session.msg(header['msg_type'])
1165 msg = self.session.msg(header['msg_type'])
1166 msg['content'] = rec['content']
1166 msg['content'] = rec['content']
1167 msg['header'] = header
1167 msg['header'] = header
1168 msg['msg_id'] = rec['msg_id']
1168 msg['msg_id'] = rec['msg_id']
1169 self.session.send(self.resubmit, msg, buffers=rec['buffers'])
1169 self.session.send(self.resubmit, msg, buffers=rec['buffers'])
1170
1170
1171 finish(dict(status='ok'))
1171 finish(dict(status='ok'))
1172
1172
1173
1173
1174 def _extract_record(self, rec):
1174 def _extract_record(self, rec):
1175 """decompose a TaskRecord dict into subsection of reply for get_result"""
1175 """decompose a TaskRecord dict into subsection of reply for get_result"""
1176 io_dict = {}
1176 io_dict = {}
1177 for key in 'pyin pyout pyerr stdout stderr'.split():
1177 for key in 'pyin pyout pyerr stdout stderr'.split():
1178 io_dict[key] = rec[key]
1178 io_dict[key] = rec[key]
1179 content = { 'result_content': rec['result_content'],
1179 content = { 'result_content': rec['result_content'],
1180 'header': rec['header'],
1180 'header': rec['header'],
1181 'result_header' : rec['result_header'],
1181 'result_header' : rec['result_header'],
1182 'io' : io_dict,
1182 'io' : io_dict,
1183 }
1183 }
1184 if rec['result_buffers']:
1184 if rec['result_buffers']:
1185 buffers = map(bytes, rec['result_buffers'])
1185 buffers = map(bytes, rec['result_buffers'])
1186 else:
1186 else:
1187 buffers = []
1187 buffers = []
1188
1188
1189 return content, buffers
1189 return content, buffers
1190
1190
1191 def get_results(self, client_id, msg):
1191 def get_results(self, client_id, msg):
1192 """Get the result of 1 or more messages."""
1192 """Get the result of 1 or more messages."""
1193 content = msg['content']
1193 content = msg['content']
1194 msg_ids = sorted(set(content['msg_ids']))
1194 msg_ids = sorted(set(content['msg_ids']))
1195 statusonly = content.get('status_only', False)
1195 statusonly = content.get('status_only', False)
1196 pending = []
1196 pending = []
1197 completed = []
1197 completed = []
1198 content = dict(status='ok')
1198 content = dict(status='ok')
1199 content['pending'] = pending
1199 content['pending'] = pending
1200 content['completed'] = completed
1200 content['completed'] = completed
1201 buffers = []
1201 buffers = []
1202 if not statusonly:
1202 if not statusonly:
1203 try:
1203 try:
1204 matches = self.db.find_records(dict(msg_id={'$in':msg_ids}))
1204 matches = self.db.find_records(dict(msg_id={'$in':msg_ids}))
1205 # turn match list into dict, for faster lookup
1205 # turn match list into dict, for faster lookup
1206 records = {}
1206 records = {}
1207 for rec in matches:
1207 for rec in matches:
1208 records[rec['msg_id']] = rec
1208 records[rec['msg_id']] = rec
1209 except Exception:
1209 except Exception:
1210 content = error.wrap_exception()
1210 content = error.wrap_exception()
1211 self.session.send(self.query, "result_reply", content=content,
1211 self.session.send(self.query, "result_reply", content=content,
1212 parent=msg, ident=client_id)
1212 parent=msg, ident=client_id)
1213 return
1213 return
1214 else:
1214 else:
1215 records = {}
1215 records = {}
1216 for msg_id in msg_ids:
1216 for msg_id in msg_ids:
1217 if msg_id in self.pending:
1217 if msg_id in self.pending:
1218 pending.append(msg_id)
1218 pending.append(msg_id)
1219 elif msg_id in self.all_completed:
1219 elif msg_id in self.all_completed:
1220 completed.append(msg_id)
1220 completed.append(msg_id)
1221 if not statusonly:
1221 if not statusonly:
1222 c,bufs = self._extract_record(records[msg_id])
1222 c,bufs = self._extract_record(records[msg_id])
1223 content[msg_id] = c
1223 content[msg_id] = c
1224 buffers.extend(bufs)
1224 buffers.extend(bufs)
1225 elif msg_id in records:
1225 elif msg_id in records:
1226 if rec['completed']:
1226 if rec['completed']:
1227 completed.append(msg_id)
1227 completed.append(msg_id)
1228 c,bufs = self._extract_record(records[msg_id])
1228 c,bufs = self._extract_record(records[msg_id])
1229 content[msg_id] = c
1229 content[msg_id] = c
1230 buffers.extend(bufs)
1230 buffers.extend(bufs)
1231 else:
1231 else:
1232 pending.append(msg_id)
1232 pending.append(msg_id)
1233 else:
1233 else:
1234 try:
1234 try:
1235 raise KeyError('No such message: '+msg_id)
1235 raise KeyError('No such message: '+msg_id)
1236 except:
1236 except:
1237 content = error.wrap_exception()
1237 content = error.wrap_exception()
1238 break
1238 break
1239 self.session.send(self.query, "result_reply", content=content,
1239 self.session.send(self.query, "result_reply", content=content,
1240 parent=msg, ident=client_id,
1240 parent=msg, ident=client_id,
1241 buffers=buffers)
1241 buffers=buffers)
1242
1242
1243 def get_history(self, client_id, msg):
1243 def get_history(self, client_id, msg):
1244 """Get a list of all msg_ids in our DB records"""
1244 """Get a list of all msg_ids in our DB records"""
1245 try:
1245 try:
1246 msg_ids = self.db.get_history()
1246 msg_ids = self.db.get_history()
1247 except Exception as e:
1247 except Exception as e:
1248 content = error.wrap_exception()
1248 content = error.wrap_exception()
1249 else:
1249 else:
1250 content = dict(status='ok', history=msg_ids)
1250 content = dict(status='ok', history=msg_ids)
1251
1251
1252 self.session.send(self.query, "history_reply", content=content,
1252 self.session.send(self.query, "history_reply", content=content,
1253 parent=msg, ident=client_id)
1253 parent=msg, ident=client_id)
1254
1254
1255 def db_query(self, client_id, msg):
1255 def db_query(self, client_id, msg):
1256 """Perform a raw query on the task record database."""
1256 """Perform a raw query on the task record database."""
1257 content = msg['content']
1257 content = msg['content']
1258 query = content.get('query', {})
1258 query = content.get('query', {})
1259 keys = content.get('keys', None)
1259 keys = content.get('keys', None)
1260 buffers = []
1260 buffers = []
1261 empty = list()
1261 empty = list()
1262 try:
1262 try:
1263 records = self.db.find_records(query, keys)
1263 records = self.db.find_records(query, keys)
1264 except Exception as e:
1264 except Exception as e:
1265 content = error.wrap_exception()
1265 content = error.wrap_exception()
1266 else:
1266 else:
1267 # extract buffers from reply content:
1267 # extract buffers from reply content:
1268 if keys is not None:
1268 if keys is not None:
1269 buffer_lens = [] if 'buffers' in keys else None
1269 buffer_lens = [] if 'buffers' in keys else None
1270 result_buffer_lens = [] if 'result_buffers' in keys else None
1270 result_buffer_lens = [] if 'result_buffers' in keys else None
1271 else:
1271 else:
1272 buffer_lens = []
1272 buffer_lens = []
1273 result_buffer_lens = []
1273 result_buffer_lens = []
1274
1274
1275 for rec in records:
1275 for rec in records:
1276 # buffers may be None, so double check
1276 # buffers may be None, so double check
1277 if buffer_lens is not None:
1277 if buffer_lens is not None:
1278 b = rec.pop('buffers', empty) or empty
1278 b = rec.pop('buffers', empty) or empty
1279 buffer_lens.append(len(b))
1279 buffer_lens.append(len(b))
1280 buffers.extend(b)
1280 buffers.extend(b)
1281 if result_buffer_lens is not None:
1281 if result_buffer_lens is not None:
1282 rb = rec.pop('result_buffers', empty) or empty
1282 rb = rec.pop('result_buffers', empty) or empty
1283 result_buffer_lens.append(len(rb))
1283 result_buffer_lens.append(len(rb))
1284 buffers.extend(rb)
1284 buffers.extend(rb)
1285 content = dict(status='ok', records=records, buffer_lens=buffer_lens,
1285 content = dict(status='ok', records=records, buffer_lens=buffer_lens,
1286 result_buffer_lens=result_buffer_lens)
1286 result_buffer_lens=result_buffer_lens)
1287 # self.log.debug (content)
1287 # self.log.debug (content)
1288 self.session.send(self.query, "db_reply", content=content,
1288 self.session.send(self.query, "db_reply", content=content,
1289 parent=msg, ident=client_id,
1289 parent=msg, ident=client_id,
1290 buffers=buffers)
1290 buffers=buffers)
1291
1291
@@ -1,714 +1,714 b''
1 """The Python scheduler for rich scheduling.
1 """The Python scheduler for rich scheduling.
2
2
3 The Pure ZMQ scheduler does not allow routing schemes other than LRU,
3 The Pure ZMQ scheduler does not allow routing schemes other than LRU,
4 nor does it check msg_id DAG dependencies. For those, a slightly slower
4 nor does it check msg_id DAG dependencies. For those, a slightly slower
5 Python Scheduler exists.
5 Python Scheduler exists.
6
6
7 Authors:
7 Authors:
8
8
9 * Min RK
9 * Min RK
10 """
10 """
11 #-----------------------------------------------------------------------------
11 #-----------------------------------------------------------------------------
12 # Copyright (C) 2010-2011 The IPython Development Team
12 # Copyright (C) 2010-2011 The IPython Development Team
13 #
13 #
14 # Distributed under the terms of the BSD License. The full license is in
14 # Distributed under the terms of the BSD License. The full license is in
15 # the file COPYING, distributed as part of this software.
15 # the file COPYING, distributed as part of this software.
16 #-----------------------------------------------------------------------------
16 #-----------------------------------------------------------------------------
17
17
18 #----------------------------------------------------------------------
18 #----------------------------------------------------------------------
19 # Imports
19 # Imports
20 #----------------------------------------------------------------------
20 #----------------------------------------------------------------------
21
21
22 from __future__ import print_function
22 from __future__ import print_function
23
23
24 import logging
24 import logging
25 import sys
25 import sys
26
26
27 from datetime import datetime, timedelta
27 from datetime import datetime, timedelta
28 from random import randint, random
28 from random import randint, random
29 from types import FunctionType
29 from types import FunctionType
30
30
31 try:
31 try:
32 import numpy
32 import numpy
33 except ImportError:
33 except ImportError:
34 numpy = None
34 numpy = None
35
35
36 import zmq
36 import zmq
37 from zmq.eventloop import ioloop, zmqstream
37 from zmq.eventloop import ioloop, zmqstream
38
38
39 # local imports
39 # local imports
40 from IPython.external.decorator import decorator
40 from IPython.external.decorator import decorator
41 from IPython.config.application import Application
41 from IPython.config.application import Application
42 from IPython.config.loader import Config
42 from IPython.config.loader import Config
43 from IPython.utils.traitlets import Instance, Dict, List, Set, Int, Enum, CBytes
43 from IPython.utils.traitlets import Instance, Dict, List, Set, Int, Enum, CBytes
44
44
45 from IPython.parallel import error
45 from IPython.parallel import error
46 from IPython.parallel.factory import SessionFactory
46 from IPython.parallel.factory import SessionFactory
47 from IPython.parallel.util import connect_logger, local_logger, asbytes
47 from IPython.parallel.util import connect_logger, local_logger, asbytes
48
48
49 from .dependency import Dependency
49 from .dependency import Dependency
50
50
51 @decorator
51 @decorator
52 def logged(f,self,*args,**kwargs):
52 def logged(f,self,*args,**kwargs):
53 # print ("#--------------------")
53 # print ("#--------------------")
54 self.log.debug("scheduler::%s(*%s,**%s)", f.func_name, args, kwargs)
54 self.log.debug("scheduler::%s(*%s,**%s)", f.func_name, args, kwargs)
55 # print ("#--")
55 # print ("#--")
56 return f(self,*args, **kwargs)
56 return f(self,*args, **kwargs)
57
57
58 #----------------------------------------------------------------------
58 #----------------------------------------------------------------------
59 # Chooser functions
59 # Chooser functions
60 #----------------------------------------------------------------------
60 #----------------------------------------------------------------------
61
61
62 def plainrandom(loads):
62 def plainrandom(loads):
63 """Plain random pick."""
63 """Plain random pick."""
64 n = len(loads)
64 n = len(loads)
65 return randint(0,n-1)
65 return randint(0,n-1)
66
66
67 def lru(loads):
67 def lru(loads):
68 """Always pick the front of the line.
68 """Always pick the front of the line.
69
69
70 The content of `loads` is ignored.
70 The content of `loads` is ignored.
71
71
72 Assumes LRU ordering of loads, with oldest first.
72 Assumes LRU ordering of loads, with oldest first.
73 """
73 """
74 return 0
74 return 0
75
75
76 def twobin(loads):
76 def twobin(loads):
77 """Pick two at random, use the LRU of the two.
77 """Pick two at random, use the LRU of the two.
78
78
79 The content of loads is ignored.
79 The content of loads is ignored.
80
80
81 Assumes LRU ordering of loads, with oldest first.
81 Assumes LRU ordering of loads, with oldest first.
82 """
82 """
83 n = len(loads)
83 n = len(loads)
84 a = randint(0,n-1)
84 a = randint(0,n-1)
85 b = randint(0,n-1)
85 b = randint(0,n-1)
86 return min(a,b)
86 return min(a,b)
87
87
88 def weighted(loads):
88 def weighted(loads):
89 """Pick two at random using inverse load as weight.
89 """Pick two at random using inverse load as weight.
90
90
91 Return the less loaded of the two.
91 Return the less loaded of the two.
92 """
92 """
93 # weight 0 a million times more than 1:
93 # weight 0 a million times more than 1:
94 weights = 1./(1e-6+numpy.array(loads))
94 weights = 1./(1e-6+numpy.array(loads))
95 sums = weights.cumsum()
95 sums = weights.cumsum()
96 t = sums[-1]
96 t = sums[-1]
97 x = random()*t
97 x = random()*t
98 y = random()*t
98 y = random()*t
99 idx = 0
99 idx = 0
100 idy = 0
100 idy = 0
101 while sums[idx] < x:
101 while sums[idx] < x:
102 idx += 1
102 idx += 1
103 while sums[idy] < y:
103 while sums[idy] < y:
104 idy += 1
104 idy += 1
105 if weights[idy] > weights[idx]:
105 if weights[idy] > weights[idx]:
106 return idy
106 return idy
107 else:
107 else:
108 return idx
108 return idx
109
109
110 def leastload(loads):
110 def leastload(loads):
111 """Always choose the lowest load.
111 """Always choose the lowest load.
112
112
113 If the lowest load occurs more than once, the first
113 If the lowest load occurs more than once, the first
114 occurance will be used. If loads has LRU ordering, this means
114 occurance will be used. If loads has LRU ordering, this means
115 the LRU of those with the lowest load is chosen.
115 the LRU of those with the lowest load is chosen.
116 """
116 """
117 return loads.index(min(loads))
117 return loads.index(min(loads))
118
118
119 #---------------------------------------------------------------------
119 #---------------------------------------------------------------------
120 # Classes
120 # Classes
121 #---------------------------------------------------------------------
121 #---------------------------------------------------------------------
122 # store empty default dependency:
122 # store empty default dependency:
123 MET = Dependency([])
123 MET = Dependency([])
124
124
125 class TaskScheduler(SessionFactory):
125 class TaskScheduler(SessionFactory):
126 """Python TaskScheduler object.
126 """Python TaskScheduler object.
127
127
128 This is the simplest object that supports msg_id based
128 This is the simplest object that supports msg_id based
129 DAG dependencies. *Only* task msg_ids are checked, not
129 DAG dependencies. *Only* task msg_ids are checked, not
130 msg_ids of jobs submitted via the MUX queue.
130 msg_ids of jobs submitted via the MUX queue.
131
131
132 """
132 """
133
133
134 hwm = Int(0, config=True, shortname='hwm',
134 hwm = Int(0, config=True, shortname='hwm',
135 help="""specify the High Water Mark (HWM) for the downstream
135 help="""specify the High Water Mark (HWM) for the downstream
136 socket in the Task scheduler. This is the maximum number
136 socket in the Task scheduler. This is the maximum number
137 of allowed outstanding tasks on each engine."""
137 of allowed outstanding tasks on each engine."""
138 )
138 )
139 scheme_name = Enum(('leastload', 'pure', 'lru', 'plainrandom', 'weighted', 'twobin'),
139 scheme_name = Enum(('leastload', 'pure', 'lru', 'plainrandom', 'weighted', 'twobin'),
140 'leastload', config=True, shortname='scheme', allow_none=False,
140 'leastload', config=True, shortname='scheme', allow_none=False,
141 help="""select the task scheduler scheme [default: Python LRU]
141 help="""select the task scheduler scheme [default: Python LRU]
142 Options are: 'pure', 'lru', 'plainrandom', 'weighted', 'twobin','leastload'"""
142 Options are: 'pure', 'lru', 'plainrandom', 'weighted', 'twobin','leastload'"""
143 )
143 )
144 def _scheme_name_changed(self, old, new):
144 def _scheme_name_changed(self, old, new):
145 self.log.debug("Using scheme %r"%new)
145 self.log.debug("Using scheme %r"%new)
146 self.scheme = globals()[new]
146 self.scheme = globals()[new]
147
147
148 # input arguments:
148 # input arguments:
149 scheme = Instance(FunctionType) # function for determining the destination
149 scheme = Instance(FunctionType) # function for determining the destination
150 def _scheme_default(self):
150 def _scheme_default(self):
151 return leastload
151 return leastload
152 client_stream = Instance(zmqstream.ZMQStream) # client-facing stream
152 client_stream = Instance(zmqstream.ZMQStream) # client-facing stream
153 engine_stream = Instance(zmqstream.ZMQStream) # engine-facing stream
153 engine_stream = Instance(zmqstream.ZMQStream) # engine-facing stream
154 notifier_stream = Instance(zmqstream.ZMQStream) # hub-facing sub stream
154 notifier_stream = Instance(zmqstream.ZMQStream) # hub-facing sub stream
155 mon_stream = Instance(zmqstream.ZMQStream) # hub-facing pub stream
155 mon_stream = Instance(zmqstream.ZMQStream) # hub-facing pub stream
156
156
157 # internals:
157 # internals:
158 graph = Dict() # dict by msg_id of [ msg_ids that depend on key ]
158 graph = Dict() # dict by msg_id of [ msg_ids that depend on key ]
159 retries = Dict() # dict by msg_id of retries remaining (non-neg ints)
159 retries = Dict() # dict by msg_id of retries remaining (non-neg ints)
160 # waiting = List() # list of msg_ids ready to run, but haven't due to HWM
160 # waiting = List() # list of msg_ids ready to run, but haven't due to HWM
161 depending = Dict() # dict by msg_id of (msg_id, raw_msg, after, follow)
161 depending = Dict() # dict by msg_id of (msg_id, raw_msg, after, follow)
162 pending = Dict() # dict by engine_uuid of submitted tasks
162 pending = Dict() # dict by engine_uuid of submitted tasks
163 completed = Dict() # dict by engine_uuid of completed tasks
163 completed = Dict() # dict by engine_uuid of completed tasks
164 failed = Dict() # dict by engine_uuid of failed tasks
164 failed = Dict() # dict by engine_uuid of failed tasks
165 destinations = Dict() # dict by msg_id of engine_uuids where jobs ran (reverse of completed+failed)
165 destinations = Dict() # dict by msg_id of engine_uuids where jobs ran (reverse of completed+failed)
166 clients = Dict() # dict by msg_id for who submitted the task
166 clients = Dict() # dict by msg_id for who submitted the task
167 targets = List() # list of target IDENTs
167 targets = List() # list of target IDENTs
168 loads = List() # list of engine loads
168 loads = List() # list of engine loads
169 # full = Set() # set of IDENTs that have HWM outstanding tasks
169 # full = Set() # set of IDENTs that have HWM outstanding tasks
170 all_completed = Set() # set of all completed tasks
170 all_completed = Set() # set of all completed tasks
171 all_failed = Set() # set of all failed tasks
171 all_failed = Set() # set of all failed tasks
172 all_done = Set() # set of all finished tasks=union(completed,failed)
172 all_done = Set() # set of all finished tasks=union(completed,failed)
173 all_ids = Set() # set of all submitted task IDs
173 all_ids = Set() # set of all submitted task IDs
174 blacklist = Dict() # dict by msg_id of locations where a job has encountered UnmetDependency
174 blacklist = Dict() # dict by msg_id of locations where a job has encountered UnmetDependency
175 auditor = Instance('zmq.eventloop.ioloop.PeriodicCallback')
175 auditor = Instance('zmq.eventloop.ioloop.PeriodicCallback')
176
176
177 ident = CBytes() # ZMQ identity. This should just be self.session.session
177 ident = CBytes() # ZMQ identity. This should just be self.session.session
178 # but ensure Bytes
178 # but ensure Bytes
179 def _ident_default(self):
179 def _ident_default(self):
180 return asbytes(self.session.session)
180 return asbytes(self.session.session)
181
181
182 def start(self):
182 def start(self):
183 self.engine_stream.on_recv(self.dispatch_result, copy=False)
183 self.engine_stream.on_recv(self.dispatch_result, copy=False)
184 self._notification_handlers = dict(
184 self._notification_handlers = dict(
185 registration_notification = self._register_engine,
185 registration_notification = self._register_engine,
186 unregistration_notification = self._unregister_engine
186 unregistration_notification = self._unregister_engine
187 )
187 )
188 self.notifier_stream.on_recv(self.dispatch_notification)
188 self.notifier_stream.on_recv(self.dispatch_notification)
189 self.auditor = ioloop.PeriodicCallback(self.audit_timeouts, 2e3, self.loop) # 1 Hz
189 self.auditor = ioloop.PeriodicCallback(self.audit_timeouts, 2e3, self.loop) # 1 Hz
190 self.auditor.start()
190 self.auditor.start()
191 self.log.info("Scheduler started [%s]"%self.scheme_name)
191 self.log.info("Scheduler started [%s]"%self.scheme_name)
192
192
193 def resume_receiving(self):
193 def resume_receiving(self):
194 """Resume accepting jobs."""
194 """Resume accepting jobs."""
195 self.client_stream.on_recv(self.dispatch_submission, copy=False)
195 self.client_stream.on_recv(self.dispatch_submission, copy=False)
196
196
197 def stop_receiving(self):
197 def stop_receiving(self):
198 """Stop accepting jobs while there are no engines.
198 """Stop accepting jobs while there are no engines.
199 Leave them in the ZMQ queue."""
199 Leave them in the ZMQ queue."""
200 self.client_stream.on_recv(None)
200 self.client_stream.on_recv(None)
201
201
202 #-----------------------------------------------------------------------
202 #-----------------------------------------------------------------------
203 # [Un]Registration Handling
203 # [Un]Registration Handling
204 #-----------------------------------------------------------------------
204 #-----------------------------------------------------------------------
205
205
206 def dispatch_notification(self, msg):
206 def dispatch_notification(self, msg):
207 """dispatch register/unregister events."""
207 """dispatch register/unregister events."""
208 try:
208 try:
209 idents,msg = self.session.feed_identities(msg)
209 idents,msg = self.session.feed_identities(msg)
210 except ValueError:
210 except ValueError:
211 self.log.warn("task::Invalid Message: %r",msg)
211 self.log.warn("task::Invalid Message: %r",msg)
212 return
212 return
213 try:
213 try:
214 msg = self.session.unpack_message(msg)
214 msg = self.session.unpack_message(msg)
215 except ValueError:
215 except ValueError:
216 self.log.warn("task::Unauthorized message from: %r"%idents)
216 self.log.warn("task::Unauthorized message from: %r"%idents)
217 return
217 return
218
218
219 msg_type = msg['msg_type']
219 msg_type = msg['header']['msg_type']
220
220
221 handler = self._notification_handlers.get(msg_type, None)
221 handler = self._notification_handlers.get(msg_type, None)
222 if handler is None:
222 if handler is None:
223 self.log.error("Unhandled message type: %r"%msg_type)
223 self.log.error("Unhandled message type: %r"%msg_type)
224 else:
224 else:
225 try:
225 try:
226 handler(asbytes(msg['content']['queue']))
226 handler(asbytes(msg['content']['queue']))
227 except Exception:
227 except Exception:
228 self.log.error("task::Invalid notification msg: %r",msg)
228 self.log.error("task::Invalid notification msg: %r",msg)
229
229
230 def _register_engine(self, uid):
230 def _register_engine(self, uid):
231 """New engine with ident `uid` became available."""
231 """New engine with ident `uid` became available."""
232 # head of the line:
232 # head of the line:
233 self.targets.insert(0,uid)
233 self.targets.insert(0,uid)
234 self.loads.insert(0,0)
234 self.loads.insert(0,0)
235
235
236 # initialize sets
236 # initialize sets
237 self.completed[uid] = set()
237 self.completed[uid] = set()
238 self.failed[uid] = set()
238 self.failed[uid] = set()
239 self.pending[uid] = {}
239 self.pending[uid] = {}
240 if len(self.targets) == 1:
240 if len(self.targets) == 1:
241 self.resume_receiving()
241 self.resume_receiving()
242 # rescan the graph:
242 # rescan the graph:
243 self.update_graph(None)
243 self.update_graph(None)
244
244
245 def _unregister_engine(self, uid):
245 def _unregister_engine(self, uid):
246 """Existing engine with ident `uid` became unavailable."""
246 """Existing engine with ident `uid` became unavailable."""
247 if len(self.targets) == 1:
247 if len(self.targets) == 1:
248 # this was our only engine
248 # this was our only engine
249 self.stop_receiving()
249 self.stop_receiving()
250
250
251 # handle any potentially finished tasks:
251 # handle any potentially finished tasks:
252 self.engine_stream.flush()
252 self.engine_stream.flush()
253
253
254 # don't pop destinations, because they might be used later
254 # don't pop destinations, because they might be used later
255 # map(self.destinations.pop, self.completed.pop(uid))
255 # map(self.destinations.pop, self.completed.pop(uid))
256 # map(self.destinations.pop, self.failed.pop(uid))
256 # map(self.destinations.pop, self.failed.pop(uid))
257
257
258 # prevent this engine from receiving work
258 # prevent this engine from receiving work
259 idx = self.targets.index(uid)
259 idx = self.targets.index(uid)
260 self.targets.pop(idx)
260 self.targets.pop(idx)
261 self.loads.pop(idx)
261 self.loads.pop(idx)
262
262
263 # wait 5 seconds before cleaning up pending jobs, since the results might
263 # wait 5 seconds before cleaning up pending jobs, since the results might
264 # still be incoming
264 # still be incoming
265 if self.pending[uid]:
265 if self.pending[uid]:
266 dc = ioloop.DelayedCallback(lambda : self.handle_stranded_tasks(uid), 5000, self.loop)
266 dc = ioloop.DelayedCallback(lambda : self.handle_stranded_tasks(uid), 5000, self.loop)
267 dc.start()
267 dc.start()
268 else:
268 else:
269 self.completed.pop(uid)
269 self.completed.pop(uid)
270 self.failed.pop(uid)
270 self.failed.pop(uid)
271
271
272
272
273 def handle_stranded_tasks(self, engine):
273 def handle_stranded_tasks(self, engine):
274 """Deal with jobs resident in an engine that died."""
274 """Deal with jobs resident in an engine that died."""
275 lost = self.pending[engine]
275 lost = self.pending[engine]
276 for msg_id in lost.keys():
276 for msg_id in lost.keys():
277 if msg_id not in self.pending[engine]:
277 if msg_id not in self.pending[engine]:
278 # prevent double-handling of messages
278 # prevent double-handling of messages
279 continue
279 continue
280
280
281 raw_msg = lost[msg_id][0]
281 raw_msg = lost[msg_id][0]
282 idents,msg = self.session.feed_identities(raw_msg, copy=False)
282 idents,msg = self.session.feed_identities(raw_msg, copy=False)
283 parent = self.session.unpack(msg[1].bytes)
283 parent = self.session.unpack(msg[1].bytes)
284 idents = [engine, idents[0]]
284 idents = [engine, idents[0]]
285
285
286 # build fake error reply
286 # build fake error reply
287 try:
287 try:
288 raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
288 raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
289 except:
289 except:
290 content = error.wrap_exception()
290 content = error.wrap_exception()
291 msg = self.session.msg('apply_reply', content, parent=parent, subheader={'status':'error'})
291 msg = self.session.msg('apply_reply', content, parent=parent, subheader={'status':'error'})
292 raw_reply = map(zmq.Message, self.session.serialize(msg, ident=idents))
292 raw_reply = map(zmq.Message, self.session.serialize(msg, ident=idents))
293 # and dispatch it
293 # and dispatch it
294 self.dispatch_result(raw_reply)
294 self.dispatch_result(raw_reply)
295
295
296 # finally scrub completed/failed lists
296 # finally scrub completed/failed lists
297 self.completed.pop(engine)
297 self.completed.pop(engine)
298 self.failed.pop(engine)
298 self.failed.pop(engine)
299
299
300
300
301 #-----------------------------------------------------------------------
301 #-----------------------------------------------------------------------
302 # Job Submission
302 # Job Submission
303 #-----------------------------------------------------------------------
303 #-----------------------------------------------------------------------
304 def dispatch_submission(self, raw_msg):
304 def dispatch_submission(self, raw_msg):
305 """Dispatch job submission to appropriate handlers."""
305 """Dispatch job submission to appropriate handlers."""
306 # ensure targets up to date:
306 # ensure targets up to date:
307 self.notifier_stream.flush()
307 self.notifier_stream.flush()
308 try:
308 try:
309 idents, msg = self.session.feed_identities(raw_msg, copy=False)
309 idents, msg = self.session.feed_identities(raw_msg, copy=False)
310 msg = self.session.unpack_message(msg, content=False, copy=False)
310 msg = self.session.unpack_message(msg, content=False, copy=False)
311 except Exception:
311 except Exception:
312 self.log.error("task::Invaid task msg: %r"%raw_msg, exc_info=True)
312 self.log.error("task::Invaid task msg: %r"%raw_msg, exc_info=True)
313 return
313 return
314
314
315
315
316 # send to monitor
316 # send to monitor
317 self.mon_stream.send_multipart([b'intask']+raw_msg, copy=False)
317 self.mon_stream.send_multipart([b'intask']+raw_msg, copy=False)
318
318
319 header = msg['header']
319 header = msg['header']
320 msg_id = header['msg_id']
320 msg_id = header['msg_id']
321 self.all_ids.add(msg_id)
321 self.all_ids.add(msg_id)
322
322
323 # get targets as a set of bytes objects
323 # get targets as a set of bytes objects
324 # from a list of unicode objects
324 # from a list of unicode objects
325 targets = header.get('targets', [])
325 targets = header.get('targets', [])
326 targets = map(asbytes, targets)
326 targets = map(asbytes, targets)
327 targets = set(targets)
327 targets = set(targets)
328
328
329 retries = header.get('retries', 0)
329 retries = header.get('retries', 0)
330 self.retries[msg_id] = retries
330 self.retries[msg_id] = retries
331
331
332 # time dependencies
332 # time dependencies
333 after = header.get('after', None)
333 after = header.get('after', None)
334 if after:
334 if after:
335 after = Dependency(after)
335 after = Dependency(after)
336 if after.all:
336 if after.all:
337 if after.success:
337 if after.success:
338 after = Dependency(after.difference(self.all_completed),
338 after = Dependency(after.difference(self.all_completed),
339 success=after.success,
339 success=after.success,
340 failure=after.failure,
340 failure=after.failure,
341 all=after.all,
341 all=after.all,
342 )
342 )
343 if after.failure:
343 if after.failure:
344 after = Dependency(after.difference(self.all_failed),
344 after = Dependency(after.difference(self.all_failed),
345 success=after.success,
345 success=after.success,
346 failure=after.failure,
346 failure=after.failure,
347 all=after.all,
347 all=after.all,
348 )
348 )
349 if after.check(self.all_completed, self.all_failed):
349 if after.check(self.all_completed, self.all_failed):
350 # recast as empty set, if `after` already met,
350 # recast as empty set, if `after` already met,
351 # to prevent unnecessary set comparisons
351 # to prevent unnecessary set comparisons
352 after = MET
352 after = MET
353 else:
353 else:
354 after = MET
354 after = MET
355
355
356 # location dependencies
356 # location dependencies
357 follow = Dependency(header.get('follow', []))
357 follow = Dependency(header.get('follow', []))
358
358
359 # turn timeouts into datetime objects:
359 # turn timeouts into datetime objects:
360 timeout = header.get('timeout', None)
360 timeout = header.get('timeout', None)
361 if timeout:
361 if timeout:
362 timeout = datetime.now() + timedelta(0,timeout,0)
362 timeout = datetime.now() + timedelta(0,timeout,0)
363
363
364 args = [raw_msg, targets, after, follow, timeout]
364 args = [raw_msg, targets, after, follow, timeout]
365
365
366 # validate and reduce dependencies:
366 # validate and reduce dependencies:
367 for dep in after,follow:
367 for dep in after,follow:
368 if not dep: # empty dependency
368 if not dep: # empty dependency
369 continue
369 continue
370 # check valid:
370 # check valid:
371 if msg_id in dep or dep.difference(self.all_ids):
371 if msg_id in dep or dep.difference(self.all_ids):
372 self.depending[msg_id] = args
372 self.depending[msg_id] = args
373 return self.fail_unreachable(msg_id, error.InvalidDependency)
373 return self.fail_unreachable(msg_id, error.InvalidDependency)
374 # check if unreachable:
374 # check if unreachable:
375 if dep.unreachable(self.all_completed, self.all_failed):
375 if dep.unreachable(self.all_completed, self.all_failed):
376 self.depending[msg_id] = args
376 self.depending[msg_id] = args
377 return self.fail_unreachable(msg_id)
377 return self.fail_unreachable(msg_id)
378
378
379 if after.check(self.all_completed, self.all_failed):
379 if after.check(self.all_completed, self.all_failed):
380 # time deps already met, try to run
380 # time deps already met, try to run
381 if not self.maybe_run(msg_id, *args):
381 if not self.maybe_run(msg_id, *args):
382 # can't run yet
382 # can't run yet
383 if msg_id not in self.all_failed:
383 if msg_id not in self.all_failed:
384 # could have failed as unreachable
384 # could have failed as unreachable
385 self.save_unmet(msg_id, *args)
385 self.save_unmet(msg_id, *args)
386 else:
386 else:
387 self.save_unmet(msg_id, *args)
387 self.save_unmet(msg_id, *args)
388
388
389 def audit_timeouts(self):
389 def audit_timeouts(self):
390 """Audit all waiting tasks for expired timeouts."""
390 """Audit all waiting tasks for expired timeouts."""
391 now = datetime.now()
391 now = datetime.now()
392 for msg_id in self.depending.keys():
392 for msg_id in self.depending.keys():
393 # must recheck, in case one failure cascaded to another:
393 # must recheck, in case one failure cascaded to another:
394 if msg_id in self.depending:
394 if msg_id in self.depending:
395 raw,after,targets,follow,timeout = self.depending[msg_id]
395 raw,after,targets,follow,timeout = self.depending[msg_id]
396 if timeout and timeout < now:
396 if timeout and timeout < now:
397 self.fail_unreachable(msg_id, error.TaskTimeout)
397 self.fail_unreachable(msg_id, error.TaskTimeout)
398
398
399 def fail_unreachable(self, msg_id, why=error.ImpossibleDependency):
399 def fail_unreachable(self, msg_id, why=error.ImpossibleDependency):
400 """a task has become unreachable, send a reply with an ImpossibleDependency
400 """a task has become unreachable, send a reply with an ImpossibleDependency
401 error."""
401 error."""
402 if msg_id not in self.depending:
402 if msg_id not in self.depending:
403 self.log.error("msg %r already failed!", msg_id)
403 self.log.error("msg %r already failed!", msg_id)
404 return
404 return
405 raw_msg,targets,after,follow,timeout = self.depending.pop(msg_id)
405 raw_msg,targets,after,follow,timeout = self.depending.pop(msg_id)
406 for mid in follow.union(after):
406 for mid in follow.union(after):
407 if mid in self.graph:
407 if mid in self.graph:
408 self.graph[mid].remove(msg_id)
408 self.graph[mid].remove(msg_id)
409
409
410 # FIXME: unpacking a message I've already unpacked, but didn't save:
410 # FIXME: unpacking a message I've already unpacked, but didn't save:
411 idents,msg = self.session.feed_identities(raw_msg, copy=False)
411 idents,msg = self.session.feed_identities(raw_msg, copy=False)
412 header = self.session.unpack(msg[1].bytes)
412 header = self.session.unpack(msg[1].bytes)
413
413
414 try:
414 try:
415 raise why()
415 raise why()
416 except:
416 except:
417 content = error.wrap_exception()
417 content = error.wrap_exception()
418
418
419 self.all_done.add(msg_id)
419 self.all_done.add(msg_id)
420 self.all_failed.add(msg_id)
420 self.all_failed.add(msg_id)
421
421
422 msg = self.session.send(self.client_stream, 'apply_reply', content,
422 msg = self.session.send(self.client_stream, 'apply_reply', content,
423 parent=header, ident=idents)
423 parent=header, ident=idents)
424 self.session.send(self.mon_stream, msg, ident=[b'outtask']+idents)
424 self.session.send(self.mon_stream, msg, ident=[b'outtask']+idents)
425
425
426 self.update_graph(msg_id, success=False)
426 self.update_graph(msg_id, success=False)
427
427
428 def maybe_run(self, msg_id, raw_msg, targets, after, follow, timeout):
428 def maybe_run(self, msg_id, raw_msg, targets, after, follow, timeout):
429 """check location dependencies, and run if they are met."""
429 """check location dependencies, and run if they are met."""
430 blacklist = self.blacklist.setdefault(msg_id, set())
430 blacklist = self.blacklist.setdefault(msg_id, set())
431 if follow or targets or blacklist or self.hwm:
431 if follow or targets or blacklist or self.hwm:
432 # we need a can_run filter
432 # we need a can_run filter
433 def can_run(idx):
433 def can_run(idx):
434 # check hwm
434 # check hwm
435 if self.hwm and self.loads[idx] == self.hwm:
435 if self.hwm and self.loads[idx] == self.hwm:
436 return False
436 return False
437 target = self.targets[idx]
437 target = self.targets[idx]
438 # check blacklist
438 # check blacklist
439 if target in blacklist:
439 if target in blacklist:
440 return False
440 return False
441 # check targets
441 # check targets
442 if targets and target not in targets:
442 if targets and target not in targets:
443 return False
443 return False
444 # check follow
444 # check follow
445 return follow.check(self.completed[target], self.failed[target])
445 return follow.check(self.completed[target], self.failed[target])
446
446
447 indices = filter(can_run, range(len(self.targets)))
447 indices = filter(can_run, range(len(self.targets)))
448
448
449 if not indices:
449 if not indices:
450 # couldn't run
450 # couldn't run
451 if follow.all:
451 if follow.all:
452 # check follow for impossibility
452 # check follow for impossibility
453 dests = set()
453 dests = set()
454 relevant = set()
454 relevant = set()
455 if follow.success:
455 if follow.success:
456 relevant = self.all_completed
456 relevant = self.all_completed
457 if follow.failure:
457 if follow.failure:
458 relevant = relevant.union(self.all_failed)
458 relevant = relevant.union(self.all_failed)
459 for m in follow.intersection(relevant):
459 for m in follow.intersection(relevant):
460 dests.add(self.destinations[m])
460 dests.add(self.destinations[m])
461 if len(dests) > 1:
461 if len(dests) > 1:
462 self.depending[msg_id] = (raw_msg, targets, after, follow, timeout)
462 self.depending[msg_id] = (raw_msg, targets, after, follow, timeout)
463 self.fail_unreachable(msg_id)
463 self.fail_unreachable(msg_id)
464 return False
464 return False
465 if targets:
465 if targets:
466 # check blacklist+targets for impossibility
466 # check blacklist+targets for impossibility
467 targets.difference_update(blacklist)
467 targets.difference_update(blacklist)
468 if not targets or not targets.intersection(self.targets):
468 if not targets or not targets.intersection(self.targets):
469 self.depending[msg_id] = (raw_msg, targets, after, follow, timeout)
469 self.depending[msg_id] = (raw_msg, targets, after, follow, timeout)
470 self.fail_unreachable(msg_id)
470 self.fail_unreachable(msg_id)
471 return False
471 return False
472 return False
472 return False
473 else:
473 else:
474 indices = None
474 indices = None
475
475
476 self.submit_task(msg_id, raw_msg, targets, follow, timeout, indices)
476 self.submit_task(msg_id, raw_msg, targets, follow, timeout, indices)
477 return True
477 return True
478
478
479 def save_unmet(self, msg_id, raw_msg, targets, after, follow, timeout):
479 def save_unmet(self, msg_id, raw_msg, targets, after, follow, timeout):
480 """Save a message for later submission when its dependencies are met."""
480 """Save a message for later submission when its dependencies are met."""
481 self.depending[msg_id] = [raw_msg,targets,after,follow,timeout]
481 self.depending[msg_id] = [raw_msg,targets,after,follow,timeout]
482 # track the ids in follow or after, but not those already finished
482 # track the ids in follow or after, but not those already finished
483 for dep_id in after.union(follow).difference(self.all_done):
483 for dep_id in after.union(follow).difference(self.all_done):
484 if dep_id not in self.graph:
484 if dep_id not in self.graph:
485 self.graph[dep_id] = set()
485 self.graph[dep_id] = set()
486 self.graph[dep_id].add(msg_id)
486 self.graph[dep_id].add(msg_id)
487
487
488 def submit_task(self, msg_id, raw_msg, targets, follow, timeout, indices=None):
488 def submit_task(self, msg_id, raw_msg, targets, follow, timeout, indices=None):
489 """Submit a task to any of a subset of our targets."""
489 """Submit a task to any of a subset of our targets."""
490 if indices:
490 if indices:
491 loads = [self.loads[i] for i in indices]
491 loads = [self.loads[i] for i in indices]
492 else:
492 else:
493 loads = self.loads
493 loads = self.loads
494 idx = self.scheme(loads)
494 idx = self.scheme(loads)
495 if indices:
495 if indices:
496 idx = indices[idx]
496 idx = indices[idx]
497 target = self.targets[idx]
497 target = self.targets[idx]
498 # print (target, map(str, msg[:3]))
498 # print (target, map(str, msg[:3]))
499 # send job to the engine
499 # send job to the engine
500 self.engine_stream.send(target, flags=zmq.SNDMORE, copy=False)
500 self.engine_stream.send(target, flags=zmq.SNDMORE, copy=False)
501 self.engine_stream.send_multipart(raw_msg, copy=False)
501 self.engine_stream.send_multipart(raw_msg, copy=False)
502 # update load
502 # update load
503 self.add_job(idx)
503 self.add_job(idx)
504 self.pending[target][msg_id] = (raw_msg, targets, MET, follow, timeout)
504 self.pending[target][msg_id] = (raw_msg, targets, MET, follow, timeout)
505 # notify Hub
505 # notify Hub
506 content = dict(msg_id=msg_id, engine_id=target.decode('ascii'))
506 content = dict(msg_id=msg_id, engine_id=target.decode('ascii'))
507 self.session.send(self.mon_stream, 'task_destination', content=content,
507 self.session.send(self.mon_stream, 'task_destination', content=content,
508 ident=[b'tracktask',self.ident])
508 ident=[b'tracktask',self.ident])
509
509
510
510
511 #-----------------------------------------------------------------------
511 #-----------------------------------------------------------------------
512 # Result Handling
512 # Result Handling
513 #-----------------------------------------------------------------------
513 #-----------------------------------------------------------------------
514 def dispatch_result(self, raw_msg):
514 def dispatch_result(self, raw_msg):
515 """dispatch method for result replies"""
515 """dispatch method for result replies"""
516 try:
516 try:
517 idents,msg = self.session.feed_identities(raw_msg, copy=False)
517 idents,msg = self.session.feed_identities(raw_msg, copy=False)
518 msg = self.session.unpack_message(msg, content=False, copy=False)
518 msg = self.session.unpack_message(msg, content=False, copy=False)
519 engine = idents[0]
519 engine = idents[0]
520 try:
520 try:
521 idx = self.targets.index(engine)
521 idx = self.targets.index(engine)
522 except ValueError:
522 except ValueError:
523 pass # skip load-update for dead engines
523 pass # skip load-update for dead engines
524 else:
524 else:
525 self.finish_job(idx)
525 self.finish_job(idx)
526 except Exception:
526 except Exception:
527 self.log.error("task::Invaid result: %r", raw_msg, exc_info=True)
527 self.log.error("task::Invaid result: %r", raw_msg, exc_info=True)
528 return
528 return
529
529
530 header = msg['header']
530 header = msg['header']
531 parent = msg['parent_header']
531 parent = msg['parent_header']
532 if header.get('dependencies_met', True):
532 if header.get('dependencies_met', True):
533 success = (header['status'] == 'ok')
533 success = (header['status'] == 'ok')
534 msg_id = parent['msg_id']
534 msg_id = parent['msg_id']
535 retries = self.retries[msg_id]
535 retries = self.retries[msg_id]
536 if not success and retries > 0:
536 if not success and retries > 0:
537 # failed
537 # failed
538 self.retries[msg_id] = retries - 1
538 self.retries[msg_id] = retries - 1
539 self.handle_unmet_dependency(idents, parent)
539 self.handle_unmet_dependency(idents, parent)
540 else:
540 else:
541 del self.retries[msg_id]
541 del self.retries[msg_id]
542 # relay to client and update graph
542 # relay to client and update graph
543 self.handle_result(idents, parent, raw_msg, success)
543 self.handle_result(idents, parent, raw_msg, success)
544 # send to Hub monitor
544 # send to Hub monitor
545 self.mon_stream.send_multipart([b'outtask']+raw_msg, copy=False)
545 self.mon_stream.send_multipart([b'outtask']+raw_msg, copy=False)
546 else:
546 else:
547 self.handle_unmet_dependency(idents, parent)
547 self.handle_unmet_dependency(idents, parent)
548
548
549 def handle_result(self, idents, parent, raw_msg, success=True):
549 def handle_result(self, idents, parent, raw_msg, success=True):
550 """handle a real task result, either success or failure"""
550 """handle a real task result, either success or failure"""
551 # first, relay result to client
551 # first, relay result to client
552 engine = idents[0]
552 engine = idents[0]
553 client = idents[1]
553 client = idents[1]
554 # swap_ids for XREP-XREP mirror
554 # swap_ids for XREP-XREP mirror
555 raw_msg[:2] = [client,engine]
555 raw_msg[:2] = [client,engine]
556 # print (map(str, raw_msg[:4]))
556 # print (map(str, raw_msg[:4]))
557 self.client_stream.send_multipart(raw_msg, copy=False)
557 self.client_stream.send_multipart(raw_msg, copy=False)
558 # now, update our data structures
558 # now, update our data structures
559 msg_id = parent['msg_id']
559 msg_id = parent['msg_id']
560 self.blacklist.pop(msg_id, None)
560 self.blacklist.pop(msg_id, None)
561 self.pending[engine].pop(msg_id)
561 self.pending[engine].pop(msg_id)
562 if success:
562 if success:
563 self.completed[engine].add(msg_id)
563 self.completed[engine].add(msg_id)
564 self.all_completed.add(msg_id)
564 self.all_completed.add(msg_id)
565 else:
565 else:
566 self.failed[engine].add(msg_id)
566 self.failed[engine].add(msg_id)
567 self.all_failed.add(msg_id)
567 self.all_failed.add(msg_id)
568 self.all_done.add(msg_id)
568 self.all_done.add(msg_id)
569 self.destinations[msg_id] = engine
569 self.destinations[msg_id] = engine
570
570
571 self.update_graph(msg_id, success)
571 self.update_graph(msg_id, success)
572
572
573 def handle_unmet_dependency(self, idents, parent):
573 def handle_unmet_dependency(self, idents, parent):
574 """handle an unmet dependency"""
574 """handle an unmet dependency"""
575 engine = idents[0]
575 engine = idents[0]
576 msg_id = parent['msg_id']
576 msg_id = parent['msg_id']
577
577
578 if msg_id not in self.blacklist:
578 if msg_id not in self.blacklist:
579 self.blacklist[msg_id] = set()
579 self.blacklist[msg_id] = set()
580 self.blacklist[msg_id].add(engine)
580 self.blacklist[msg_id].add(engine)
581
581
582 args = self.pending[engine].pop(msg_id)
582 args = self.pending[engine].pop(msg_id)
583 raw,targets,after,follow,timeout = args
583 raw,targets,after,follow,timeout = args
584
584
585 if self.blacklist[msg_id] == targets:
585 if self.blacklist[msg_id] == targets:
586 self.depending[msg_id] = args
586 self.depending[msg_id] = args
587 self.fail_unreachable(msg_id)
587 self.fail_unreachable(msg_id)
588 elif not self.maybe_run(msg_id, *args):
588 elif not self.maybe_run(msg_id, *args):
589 # resubmit failed
589 # resubmit failed
590 if msg_id not in self.all_failed:
590 if msg_id not in self.all_failed:
591 # put it back in our dependency tree
591 # put it back in our dependency tree
592 self.save_unmet(msg_id, *args)
592 self.save_unmet(msg_id, *args)
593
593
594 if self.hwm:
594 if self.hwm:
595 try:
595 try:
596 idx = self.targets.index(engine)
596 idx = self.targets.index(engine)
597 except ValueError:
597 except ValueError:
598 pass # skip load-update for dead engines
598 pass # skip load-update for dead engines
599 else:
599 else:
600 if self.loads[idx] == self.hwm-1:
600 if self.loads[idx] == self.hwm-1:
601 self.update_graph(None)
601 self.update_graph(None)
602
602
603
603
604
604
605 def update_graph(self, dep_id=None, success=True):
605 def update_graph(self, dep_id=None, success=True):
606 """dep_id just finished. Update our dependency
606 """dep_id just finished. Update our dependency
607 graph and submit any jobs that just became runable.
607 graph and submit any jobs that just became runable.
608
608
609 Called with dep_id=None to update entire graph for hwm, but without finishing
609 Called with dep_id=None to update entire graph for hwm, but without finishing
610 a task.
610 a task.
611 """
611 """
612 # print ("\n\n***********")
612 # print ("\n\n***********")
613 # pprint (dep_id)
613 # pprint (dep_id)
614 # pprint (self.graph)
614 # pprint (self.graph)
615 # pprint (self.depending)
615 # pprint (self.depending)
616 # pprint (self.all_completed)
616 # pprint (self.all_completed)
617 # pprint (self.all_failed)
617 # pprint (self.all_failed)
618 # print ("\n\n***********\n\n")
618 # print ("\n\n***********\n\n")
619 # update any jobs that depended on the dependency
619 # update any jobs that depended on the dependency
620 jobs = self.graph.pop(dep_id, [])
620 jobs = self.graph.pop(dep_id, [])
621
621
622 # recheck *all* jobs if
622 # recheck *all* jobs if
623 # a) we have HWM and an engine just become no longer full
623 # a) we have HWM and an engine just become no longer full
624 # or b) dep_id was given as None
624 # or b) dep_id was given as None
625 if dep_id is None or self.hwm and any( [ load==self.hwm-1 for load in self.loads ]):
625 if dep_id is None or self.hwm and any( [ load==self.hwm-1 for load in self.loads ]):
626 jobs = self.depending.keys()
626 jobs = self.depending.keys()
627
627
628 for msg_id in jobs:
628 for msg_id in jobs:
629 raw_msg, targets, after, follow, timeout = self.depending[msg_id]
629 raw_msg, targets, after, follow, timeout = self.depending[msg_id]
630
630
631 if after.unreachable(self.all_completed, self.all_failed)\
631 if after.unreachable(self.all_completed, self.all_failed)\
632 or follow.unreachable(self.all_completed, self.all_failed):
632 or follow.unreachable(self.all_completed, self.all_failed):
633 self.fail_unreachable(msg_id)
633 self.fail_unreachable(msg_id)
634
634
635 elif after.check(self.all_completed, self.all_failed): # time deps met, maybe run
635 elif after.check(self.all_completed, self.all_failed): # time deps met, maybe run
636 if self.maybe_run(msg_id, raw_msg, targets, MET, follow, timeout):
636 if self.maybe_run(msg_id, raw_msg, targets, MET, follow, timeout):
637
637
638 self.depending.pop(msg_id)
638 self.depending.pop(msg_id)
639 for mid in follow.union(after):
639 for mid in follow.union(after):
640 if mid in self.graph:
640 if mid in self.graph:
641 self.graph[mid].remove(msg_id)
641 self.graph[mid].remove(msg_id)
642
642
643 #----------------------------------------------------------------------
643 #----------------------------------------------------------------------
644 # methods to be overridden by subclasses
644 # methods to be overridden by subclasses
645 #----------------------------------------------------------------------
645 #----------------------------------------------------------------------
646
646
647 def add_job(self, idx):
647 def add_job(self, idx):
648 """Called after self.targets[idx] just got the job with header.
648 """Called after self.targets[idx] just got the job with header.
649 Override with subclasses. The default ordering is simple LRU.
649 Override with subclasses. The default ordering is simple LRU.
650 The default loads are the number of outstanding jobs."""
650 The default loads are the number of outstanding jobs."""
651 self.loads[idx] += 1
651 self.loads[idx] += 1
652 for lis in (self.targets, self.loads):
652 for lis in (self.targets, self.loads):
653 lis.append(lis.pop(idx))
653 lis.append(lis.pop(idx))
654
654
655
655
656 def finish_job(self, idx):
656 def finish_job(self, idx):
657 """Called after self.targets[idx] just finished a job.
657 """Called after self.targets[idx] just finished a job.
658 Override with subclasses."""
658 Override with subclasses."""
659 self.loads[idx] -= 1
659 self.loads[idx] -= 1
660
660
661
661
662
662
663 def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, config=None,
663 def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, config=None,
664 logname='root', log_url=None, loglevel=logging.DEBUG,
664 logname='root', log_url=None, loglevel=logging.DEBUG,
665 identity=b'task', in_thread=False):
665 identity=b'task', in_thread=False):
666
666
667 ZMQStream = zmqstream.ZMQStream
667 ZMQStream = zmqstream.ZMQStream
668
668
669 if config:
669 if config:
670 # unwrap dict back into Config
670 # unwrap dict back into Config
671 config = Config(config)
671 config = Config(config)
672
672
673 if in_thread:
673 if in_thread:
674 # use instance() to get the same Context/Loop as our parent
674 # use instance() to get the same Context/Loop as our parent
675 ctx = zmq.Context.instance()
675 ctx = zmq.Context.instance()
676 loop = ioloop.IOLoop.instance()
676 loop = ioloop.IOLoop.instance()
677 else:
677 else:
678 # in a process, don't use instance()
678 # in a process, don't use instance()
679 # for safety with multiprocessing
679 # for safety with multiprocessing
680 ctx = zmq.Context()
680 ctx = zmq.Context()
681 loop = ioloop.IOLoop()
681 loop = ioloop.IOLoop()
682 ins = ZMQStream(ctx.socket(zmq.XREP),loop)
682 ins = ZMQStream(ctx.socket(zmq.XREP),loop)
683 ins.setsockopt(zmq.IDENTITY, identity)
683 ins.setsockopt(zmq.IDENTITY, identity)
684 ins.bind(in_addr)
684 ins.bind(in_addr)
685
685
686 outs = ZMQStream(ctx.socket(zmq.XREP),loop)
686 outs = ZMQStream(ctx.socket(zmq.XREP),loop)
687 outs.setsockopt(zmq.IDENTITY, identity)
687 outs.setsockopt(zmq.IDENTITY, identity)
688 outs.bind(out_addr)
688 outs.bind(out_addr)
689 mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB),loop)
689 mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB),loop)
690 mons.connect(mon_addr)
690 mons.connect(mon_addr)
691 nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB),loop)
691 nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB),loop)
692 nots.setsockopt(zmq.SUBSCRIBE, b'')
692 nots.setsockopt(zmq.SUBSCRIBE, b'')
693 nots.connect(not_addr)
693 nots.connect(not_addr)
694
694
695 # setup logging.
695 # setup logging.
696 if in_thread:
696 if in_thread:
697 log = Application.instance().log
697 log = Application.instance().log
698 else:
698 else:
699 if log_url:
699 if log_url:
700 log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel)
700 log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel)
701 else:
701 else:
702 log = local_logger(logname, loglevel)
702 log = local_logger(logname, loglevel)
703
703
704 scheduler = TaskScheduler(client_stream=ins, engine_stream=outs,
704 scheduler = TaskScheduler(client_stream=ins, engine_stream=outs,
705 mon_stream=mons, notifier_stream=nots,
705 mon_stream=mons, notifier_stream=nots,
706 loop=loop, log=log,
706 loop=loop, log=log,
707 config=config)
707 config=config)
708 scheduler.start()
708 scheduler.start()
709 if not in_thread:
709 if not in_thread:
710 try:
710 try:
711 loop.start()
711 loop.start()
712 except KeyboardInterrupt:
712 except KeyboardInterrupt:
713 print ("interrupted, exiting...", file=sys.__stderr__)
713 print ("interrupted, exiting...", file=sys.__stderr__)
714
714
@@ -1,230 +1,230 b''
1 """KernelStarter class that intercepts Control Queue messages, and handles process management.
1 """KernelStarter class that intercepts Control Queue messages, and handles process management.
2
2
3 Authors:
3 Authors:
4
4
5 * Min RK
5 * Min RK
6 """
6 """
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8 # Copyright (C) 2010-2011 The IPython Development Team
8 # Copyright (C) 2010-2011 The IPython Development Team
9 #
9 #
10 # Distributed under the terms of the BSD License. The full license is in
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
11 # the file COPYING, distributed as part of this software.
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13
13
14 from zmq.eventloop import ioloop
14 from zmq.eventloop import ioloop
15
15
16 from IPython.zmq.session import Session
16 from IPython.zmq.session import Session
17
17
18 class KernelStarter(object):
18 class KernelStarter(object):
19 """Object for resetting/killing the Kernel."""
19 """Object for resetting/killing the Kernel."""
20
20
21
21
22 def __init__(self, session, upstream, downstream, *kernel_args, **kernel_kwargs):
22 def __init__(self, session, upstream, downstream, *kernel_args, **kernel_kwargs):
23 self.session = session
23 self.session = session
24 self.upstream = upstream
24 self.upstream = upstream
25 self.downstream = downstream
25 self.downstream = downstream
26 self.kernel_args = kernel_args
26 self.kernel_args = kernel_args
27 self.kernel_kwargs = kernel_kwargs
27 self.kernel_kwargs = kernel_kwargs
28 self.handlers = {}
28 self.handlers = {}
29 for method in 'shutdown_request shutdown_reply'.split():
29 for method in 'shutdown_request shutdown_reply'.split():
30 self.handlers[method] = getattr(self, method)
30 self.handlers[method] = getattr(self, method)
31
31
32 def start(self):
32 def start(self):
33 self.upstream.on_recv(self.dispatch_request)
33 self.upstream.on_recv(self.dispatch_request)
34 self.downstream.on_recv(self.dispatch_reply)
34 self.downstream.on_recv(self.dispatch_reply)
35
35
36 #--------------------------------------------------------------------------
36 #--------------------------------------------------------------------------
37 # Dispatch methods
37 # Dispatch methods
38 #--------------------------------------------------------------------------
38 #--------------------------------------------------------------------------
39
39
40 def dispatch_request(self, raw_msg):
40 def dispatch_request(self, raw_msg):
41 idents, msg = self.session.feed_identities()
41 idents, msg = self.session.feed_identities()
42 try:
42 try:
43 msg = self.session.unpack_message(msg, content=False)
43 msg = self.session.unpack_message(msg, content=False)
44 except:
44 except:
45 print ("bad msg: %s"%msg)
45 print ("bad msg: %s"%msg)
46
46
47 msgtype = msg['msg_type']
47 msgtype = msg['header']['msg_type']
48 handler = self.handlers.get(msgtype, None)
48 handler = self.handlers.get(msgtype, None)
49 if handler is None:
49 if handler is None:
50 self.downstream.send_multipart(raw_msg, copy=False)
50 self.downstream.send_multipart(raw_msg, copy=False)
51 else:
51 else:
52 handler(msg)
52 handler(msg)
53
53
54 def dispatch_reply(self, raw_msg):
54 def dispatch_reply(self, raw_msg):
55 idents, msg = self.session.feed_identities()
55 idents, msg = self.session.feed_identities()
56 try:
56 try:
57 msg = self.session.unpack_message(msg, content=False)
57 msg = self.session.unpack_message(msg, content=False)
58 except:
58 except:
59 print ("bad msg: %s"%msg)
59 print ("bad msg: %s"%msg)
60
60
61 msgtype = msg['msg_type']
61 msgtype = msg['header']['msg_type']
62 handler = self.handlers.get(msgtype, None)
62 handler = self.handlers.get(msgtype, None)
63 if handler is None:
63 if handler is None:
64 self.upstream.send_multipart(raw_msg, copy=False)
64 self.upstream.send_multipart(raw_msg, copy=False)
65 else:
65 else:
66 handler(msg)
66 handler(msg)
67
67
68 #--------------------------------------------------------------------------
68 #--------------------------------------------------------------------------
69 # Handlers
69 # Handlers
70 #--------------------------------------------------------------------------
70 #--------------------------------------------------------------------------
71
71
72 def shutdown_request(self, msg):
72 def shutdown_request(self, msg):
73 """"""
73 """"""
74 self.downstream.send_multipart(msg)
74 self.downstream.send_multipart(msg)
75
75
76 #--------------------------------------------------------------------------
76 #--------------------------------------------------------------------------
77 # Kernel process management methods, from KernelManager:
77 # Kernel process management methods, from KernelManager:
78 #--------------------------------------------------------------------------
78 #--------------------------------------------------------------------------
79
79
80 def _check_local(addr):
80 def _check_local(addr):
81 if isinstance(addr, tuple):
81 if isinstance(addr, tuple):
82 addr = addr[0]
82 addr = addr[0]
83 return addr in LOCAL_IPS
83 return addr in LOCAL_IPS
84
84
85 def start_kernel(self, **kw):
85 def start_kernel(self, **kw):
86 """Starts a kernel process and configures the manager to use it.
86 """Starts a kernel process and configures the manager to use it.
87
87
88 If random ports (port=0) are being used, this method must be called
88 If random ports (port=0) are being used, this method must be called
89 before the channels are created.
89 before the channels are created.
90
90
91 Parameters:
91 Parameters:
92 -----------
92 -----------
93 ipython : bool, optional (default True)
93 ipython : bool, optional (default True)
94 Whether to use an IPython kernel instead of a plain Python kernel.
94 Whether to use an IPython kernel instead of a plain Python kernel.
95 """
95 """
96 self.kernel = Process(target=make_kernel, args=self.kernel_args,
96 self.kernel = Process(target=make_kernel, args=self.kernel_args,
97 kwargs=self.kernel_kwargs)
97 kwargs=self.kernel_kwargs)
98
98
99 def shutdown_kernel(self, restart=False):
99 def shutdown_kernel(self, restart=False):
100 """ Attempts to the stop the kernel process cleanly. If the kernel
100 """ Attempts to the stop the kernel process cleanly. If the kernel
101 cannot be stopped, it is killed, if possible.
101 cannot be stopped, it is killed, if possible.
102 """
102 """
103 # FIXME: Shutdown does not work on Windows due to ZMQ errors!
103 # FIXME: Shutdown does not work on Windows due to ZMQ errors!
104 if sys.platform == 'win32':
104 if sys.platform == 'win32':
105 self.kill_kernel()
105 self.kill_kernel()
106 return
106 return
107
107
108 # Don't send any additional kernel kill messages immediately, to give
108 # Don't send any additional kernel kill messages immediately, to give
109 # the kernel a chance to properly execute shutdown actions. Wait for at
109 # the kernel a chance to properly execute shutdown actions. Wait for at
110 # most 1s, checking every 0.1s.
110 # most 1s, checking every 0.1s.
111 self.xreq_channel.shutdown(restart=restart)
111 self.xreq_channel.shutdown(restart=restart)
112 for i in range(10):
112 for i in range(10):
113 if self.is_alive:
113 if self.is_alive:
114 time.sleep(0.1)
114 time.sleep(0.1)
115 else:
115 else:
116 break
116 break
117 else:
117 else:
118 # OK, we've waited long enough.
118 # OK, we've waited long enough.
119 if self.has_kernel:
119 if self.has_kernel:
120 self.kill_kernel()
120 self.kill_kernel()
121
121
122 def restart_kernel(self, now=False):
122 def restart_kernel(self, now=False):
123 """Restarts a kernel with the same arguments that were used to launch
123 """Restarts a kernel with the same arguments that were used to launch
124 it. If the old kernel was launched with random ports, the same ports
124 it. If the old kernel was launched with random ports, the same ports
125 will be used for the new kernel.
125 will be used for the new kernel.
126
126
127 Parameters
127 Parameters
128 ----------
128 ----------
129 now : bool, optional
129 now : bool, optional
130 If True, the kernel is forcefully restarted *immediately*, without
130 If True, the kernel is forcefully restarted *immediately*, without
131 having a chance to do any cleanup action. Otherwise the kernel is
131 having a chance to do any cleanup action. Otherwise the kernel is
132 given 1s to clean up before a forceful restart is issued.
132 given 1s to clean up before a forceful restart is issued.
133
133
134 In all cases the kernel is restarted, the only difference is whether
134 In all cases the kernel is restarted, the only difference is whether
135 it is given a chance to perform a clean shutdown or not.
135 it is given a chance to perform a clean shutdown or not.
136 """
136 """
137 if self._launch_args is None:
137 if self._launch_args is None:
138 raise RuntimeError("Cannot restart the kernel. "
138 raise RuntimeError("Cannot restart the kernel. "
139 "No previous call to 'start_kernel'.")
139 "No previous call to 'start_kernel'.")
140 else:
140 else:
141 if self.has_kernel:
141 if self.has_kernel:
142 if now:
142 if now:
143 self.kill_kernel()
143 self.kill_kernel()
144 else:
144 else:
145 self.shutdown_kernel(restart=True)
145 self.shutdown_kernel(restart=True)
146 self.start_kernel(**self._launch_args)
146 self.start_kernel(**self._launch_args)
147
147
148 # FIXME: Messages get dropped in Windows due to probable ZMQ bug
148 # FIXME: Messages get dropped in Windows due to probable ZMQ bug
149 # unless there is some delay here.
149 # unless there is some delay here.
150 if sys.platform == 'win32':
150 if sys.platform == 'win32':
151 time.sleep(0.2)
151 time.sleep(0.2)
152
152
153 @property
153 @property
154 def has_kernel(self):
154 def has_kernel(self):
155 """Returns whether a kernel process has been specified for the kernel
155 """Returns whether a kernel process has been specified for the kernel
156 manager.
156 manager.
157 """
157 """
158 return self.kernel is not None
158 return self.kernel is not None
159
159
160 def kill_kernel(self):
160 def kill_kernel(self):
161 """ Kill the running kernel. """
161 """ Kill the running kernel. """
162 if self.has_kernel:
162 if self.has_kernel:
163 # Pause the heart beat channel if it exists.
163 # Pause the heart beat channel if it exists.
164 if self._hb_channel is not None:
164 if self._hb_channel is not None:
165 self._hb_channel.pause()
165 self._hb_channel.pause()
166
166
167 # Attempt to kill the kernel.
167 # Attempt to kill the kernel.
168 try:
168 try:
169 self.kernel.kill()
169 self.kernel.kill()
170 except OSError, e:
170 except OSError, e:
171 # In Windows, we will get an Access Denied error if the process
171 # In Windows, we will get an Access Denied error if the process
172 # has already terminated. Ignore it.
172 # has already terminated. Ignore it.
173 if not (sys.platform == 'win32' and e.winerror == 5):
173 if not (sys.platform == 'win32' and e.winerror == 5):
174 raise
174 raise
175 self.kernel = None
175 self.kernel = None
176 else:
176 else:
177 raise RuntimeError("Cannot kill kernel. No kernel is running!")
177 raise RuntimeError("Cannot kill kernel. No kernel is running!")
178
178
179 def interrupt_kernel(self):
179 def interrupt_kernel(self):
180 """ Interrupts the kernel. Unlike ``signal_kernel``, this operation is
180 """ Interrupts the kernel. Unlike ``signal_kernel``, this operation is
181 well supported on all platforms.
181 well supported on all platforms.
182 """
182 """
183 if self.has_kernel:
183 if self.has_kernel:
184 if sys.platform == 'win32':
184 if sys.platform == 'win32':
185 from parentpoller import ParentPollerWindows as Poller
185 from parentpoller import ParentPollerWindows as Poller
186 Poller.send_interrupt(self.kernel.win32_interrupt_event)
186 Poller.send_interrupt(self.kernel.win32_interrupt_event)
187 else:
187 else:
188 self.kernel.send_signal(signal.SIGINT)
188 self.kernel.send_signal(signal.SIGINT)
189 else:
189 else:
190 raise RuntimeError("Cannot interrupt kernel. No kernel is running!")
190 raise RuntimeError("Cannot interrupt kernel. No kernel is running!")
191
191
192 def signal_kernel(self, signum):
192 def signal_kernel(self, signum):
193 """ Sends a signal to the kernel. Note that since only SIGTERM is
193 """ Sends a signal to the kernel. Note that since only SIGTERM is
194 supported on Windows, this function is only useful on Unix systems.
194 supported on Windows, this function is only useful on Unix systems.
195 """
195 """
196 if self.has_kernel:
196 if self.has_kernel:
197 self.kernel.send_signal(signum)
197 self.kernel.send_signal(signum)
198 else:
198 else:
199 raise RuntimeError("Cannot signal kernel. No kernel is running!")
199 raise RuntimeError("Cannot signal kernel. No kernel is running!")
200
200
201 @property
201 @property
202 def is_alive(self):
202 def is_alive(self):
203 """Is the kernel process still running?"""
203 """Is the kernel process still running?"""
204 # FIXME: not using a heartbeat means this method is broken for any
204 # FIXME: not using a heartbeat means this method is broken for any
205 # remote kernel, it's only capable of handling local kernels.
205 # remote kernel, it's only capable of handling local kernels.
206 if self.has_kernel:
206 if self.has_kernel:
207 if self.kernel.poll() is None:
207 if self.kernel.poll() is None:
208 return True
208 return True
209 else:
209 else:
210 return False
210 return False
211 else:
211 else:
212 # We didn't start the kernel with this KernelManager so we don't
212 # We didn't start the kernel with this KernelManager so we don't
213 # know if it is running. We should use a heartbeat for this case.
213 # know if it is running. We should use a heartbeat for this case.
214 return True
214 return True
215
215
216
216
217 def make_starter(up_addr, down_addr, *args, **kwargs):
217 def make_starter(up_addr, down_addr, *args, **kwargs):
218 """entry point function for launching a kernelstarter in a subprocess"""
218 """entry point function for launching a kernelstarter in a subprocess"""
219 loop = ioloop.IOLoop.instance()
219 loop = ioloop.IOLoop.instance()
220 ctx = zmq.Context()
220 ctx = zmq.Context()
221 session = Session()
221 session = Session()
222 upstream = zmqstream.ZMQStream(ctx.socket(zmq.XREQ),loop)
222 upstream = zmqstream.ZMQStream(ctx.socket(zmq.XREQ),loop)
223 upstream.connect(up_addr)
223 upstream.connect(up_addr)
224 downstream = zmqstream.ZMQStream(ctx.socket(zmq.XREQ),loop)
224 downstream = zmqstream.ZMQStream(ctx.socket(zmq.XREQ),loop)
225 downstream.connect(down_addr)
225 downstream.connect(down_addr)
226
226
227 starter = KernelStarter(session, upstream, downstream, *args, **kwargs)
227 starter = KernelStarter(session, upstream, downstream, *args, **kwargs)
228 starter.start()
228 starter.start()
229 loop.start()
229 loop.start()
230 No newline at end of file
230
@@ -1,438 +1,438 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """
2 """
3 Kernel adapted from kernel.py to use ZMQ Streams
3 Kernel adapted from kernel.py to use ZMQ Streams
4
4
5 Authors:
5 Authors:
6
6
7 * Min RK
7 * Min RK
8 * Brian Granger
8 * Brian Granger
9 * Fernando Perez
9 * Fernando Perez
10 * Evan Patterson
10 * Evan Patterson
11 """
11 """
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13 # Copyright (C) 2010-2011 The IPython Development Team
13 # Copyright (C) 2010-2011 The IPython Development Team
14 #
14 #
15 # Distributed under the terms of the BSD License. The full license is in
15 # Distributed under the terms of the BSD License. The full license is in
16 # the file COPYING, distributed as part of this software.
16 # the file COPYING, distributed as part of this software.
17 #-----------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18
18
19 #-----------------------------------------------------------------------------
19 #-----------------------------------------------------------------------------
20 # Imports
20 # Imports
21 #-----------------------------------------------------------------------------
21 #-----------------------------------------------------------------------------
22
22
23 # Standard library imports.
23 # Standard library imports.
24 from __future__ import print_function
24 from __future__ import print_function
25
25
26 import sys
26 import sys
27 import time
27 import time
28
28
29 from code import CommandCompiler
29 from code import CommandCompiler
30 from datetime import datetime
30 from datetime import datetime
31 from pprint import pprint
31 from pprint import pprint
32
32
33 # System library imports.
33 # System library imports.
34 import zmq
34 import zmq
35 from zmq.eventloop import ioloop, zmqstream
35 from zmq.eventloop import ioloop, zmqstream
36
36
37 # Local imports.
37 # Local imports.
38 from IPython.utils.traitlets import Instance, List, Int, Dict, Set, Unicode, CBytes
38 from IPython.utils.traitlets import Instance, List, Int, Dict, Set, Unicode, CBytes
39 from IPython.zmq.completer import KernelCompleter
39 from IPython.zmq.completer import KernelCompleter
40
40
41 from IPython.parallel.error import wrap_exception
41 from IPython.parallel.error import wrap_exception
42 from IPython.parallel.factory import SessionFactory
42 from IPython.parallel.factory import SessionFactory
43 from IPython.parallel.util import serialize_object, unpack_apply_message, asbytes
43 from IPython.parallel.util import serialize_object, unpack_apply_message, asbytes
44
44
45 def printer(*args):
45 def printer(*args):
46 pprint(args, stream=sys.__stdout__)
46 pprint(args, stream=sys.__stdout__)
47
47
48
48
49 class _Passer(zmqstream.ZMQStream):
49 class _Passer(zmqstream.ZMQStream):
50 """Empty class that implements `send()` that does nothing.
50 """Empty class that implements `send()` that does nothing.
51
51
52 Subclass ZMQStream for Session typechecking
52 Subclass ZMQStream for Session typechecking
53
53
54 """
54 """
55 def __init__(self, *args, **kwargs):
55 def __init__(self, *args, **kwargs):
56 pass
56 pass
57
57
58 def send(self, *args, **kwargs):
58 def send(self, *args, **kwargs):
59 pass
59 pass
60 send_multipart = send
60 send_multipart = send
61
61
62
62
63 #-----------------------------------------------------------------------------
63 #-----------------------------------------------------------------------------
64 # Main kernel class
64 # Main kernel class
65 #-----------------------------------------------------------------------------
65 #-----------------------------------------------------------------------------
66
66
67 class Kernel(SessionFactory):
67 class Kernel(SessionFactory):
68
68
69 #---------------------------------------------------------------------------
69 #---------------------------------------------------------------------------
70 # Kernel interface
70 # Kernel interface
71 #---------------------------------------------------------------------------
71 #---------------------------------------------------------------------------
72
72
73 # kwargs:
73 # kwargs:
74 exec_lines = List(Unicode, config=True,
74 exec_lines = List(Unicode, config=True,
75 help="List of lines to execute")
75 help="List of lines to execute")
76
76
77 # identities:
77 # identities:
78 int_id = Int(-1)
78 int_id = Int(-1)
79 bident = CBytes()
79 bident = CBytes()
80 ident = Unicode()
80 ident = Unicode()
81 def _ident_changed(self, name, old, new):
81 def _ident_changed(self, name, old, new):
82 self.bident = asbytes(new)
82 self.bident = asbytes(new)
83
83
84 user_ns = Dict(config=True, help="""Set the user's namespace of the Kernel""")
84 user_ns = Dict(config=True, help="""Set the user's namespace of the Kernel""")
85
85
86 control_stream = Instance(zmqstream.ZMQStream)
86 control_stream = Instance(zmqstream.ZMQStream)
87 task_stream = Instance(zmqstream.ZMQStream)
87 task_stream = Instance(zmqstream.ZMQStream)
88 iopub_stream = Instance(zmqstream.ZMQStream)
88 iopub_stream = Instance(zmqstream.ZMQStream)
89 client = Instance('IPython.parallel.Client')
89 client = Instance('IPython.parallel.Client')
90
90
91 # internals
91 # internals
92 shell_streams = List()
92 shell_streams = List()
93 compiler = Instance(CommandCompiler, (), {})
93 compiler = Instance(CommandCompiler, (), {})
94 completer = Instance(KernelCompleter)
94 completer = Instance(KernelCompleter)
95
95
96 aborted = Set()
96 aborted = Set()
97 shell_handlers = Dict()
97 shell_handlers = Dict()
98 control_handlers = Dict()
98 control_handlers = Dict()
99
99
100 def _set_prefix(self):
100 def _set_prefix(self):
101 self.prefix = "engine.%s"%self.int_id
101 self.prefix = "engine.%s"%self.int_id
102
102
103 def _connect_completer(self):
103 def _connect_completer(self):
104 self.completer = KernelCompleter(self.user_ns)
104 self.completer = KernelCompleter(self.user_ns)
105
105
106 def __init__(self, **kwargs):
106 def __init__(self, **kwargs):
107 super(Kernel, self).__init__(**kwargs)
107 super(Kernel, self).__init__(**kwargs)
108 self._set_prefix()
108 self._set_prefix()
109 self._connect_completer()
109 self._connect_completer()
110
110
111 self.on_trait_change(self._set_prefix, 'id')
111 self.on_trait_change(self._set_prefix, 'id')
112 self.on_trait_change(self._connect_completer, 'user_ns')
112 self.on_trait_change(self._connect_completer, 'user_ns')
113
113
114 # Build dict of handlers for message types
114 # Build dict of handlers for message types
115 for msg_type in ['execute_request', 'complete_request', 'apply_request',
115 for msg_type in ['execute_request', 'complete_request', 'apply_request',
116 'clear_request']:
116 'clear_request']:
117 self.shell_handlers[msg_type] = getattr(self, msg_type)
117 self.shell_handlers[msg_type] = getattr(self, msg_type)
118
118
119 for msg_type in ['shutdown_request', 'abort_request']+self.shell_handlers.keys():
119 for msg_type in ['shutdown_request', 'abort_request']+self.shell_handlers.keys():
120 self.control_handlers[msg_type] = getattr(self, msg_type)
120 self.control_handlers[msg_type] = getattr(self, msg_type)
121
121
122 self._initial_exec_lines()
122 self._initial_exec_lines()
123
123
124 def _wrap_exception(self, method=None):
124 def _wrap_exception(self, method=None):
125 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method)
125 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method)
126 content=wrap_exception(e_info)
126 content=wrap_exception(e_info)
127 return content
127 return content
128
128
129 def _initial_exec_lines(self):
129 def _initial_exec_lines(self):
130 s = _Passer()
130 s = _Passer()
131 content = dict(silent=True, user_variable=[],user_expressions=[])
131 content = dict(silent=True, user_variable=[],user_expressions=[])
132 for line in self.exec_lines:
132 for line in self.exec_lines:
133 self.log.debug("executing initialization: %s"%line)
133 self.log.debug("executing initialization: %s"%line)
134 content.update({'code':line})
134 content.update({'code':line})
135 msg = self.session.msg('execute_request', content)
135 msg = self.session.msg('execute_request', content)
136 self.execute_request(s, [], msg)
136 self.execute_request(s, [], msg)
137
137
138
138
139 #-------------------- control handlers -----------------------------
139 #-------------------- control handlers -----------------------------
140 def abort_queues(self):
140 def abort_queues(self):
141 for stream in self.shell_streams:
141 for stream in self.shell_streams:
142 if stream:
142 if stream:
143 self.abort_queue(stream)
143 self.abort_queue(stream)
144
144
145 def abort_queue(self, stream):
145 def abort_queue(self, stream):
146 while True:
146 while True:
147 idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
147 idents,msg = self.session.recv(stream, zmq.NOBLOCK, content=True)
148 if msg is None:
148 if msg is None:
149 return
149 return
150
150
151 self.log.info("Aborting:")
151 self.log.info("Aborting:")
152 self.log.info(str(msg))
152 self.log.info(str(msg))
153 msg_type = msg['msg_type']
153 msg_type = msg['header']['msg_type']
154 reply_type = msg_type.split('_')[0] + '_reply'
154 reply_type = msg_type.split('_')[0] + '_reply'
155 # reply_msg = self.session.msg(reply_type, {'status' : 'aborted'}, msg)
155 # reply_msg = self.session.msg(reply_type, {'status' : 'aborted'}, msg)
156 # self.reply_socket.send(ident,zmq.SNDMORE)
156 # self.reply_socket.send(ident,zmq.SNDMORE)
157 # self.reply_socket.send_json(reply_msg)
157 # self.reply_socket.send_json(reply_msg)
158 reply_msg = self.session.send(stream, reply_type,
158 reply_msg = self.session.send(stream, reply_type,
159 content={'status' : 'aborted'}, parent=msg, ident=idents)
159 content={'status' : 'aborted'}, parent=msg, ident=idents)
160 self.log.debug(str(reply_msg))
160 self.log.debug(str(reply_msg))
161 # We need to wait a bit for requests to come in. This can probably
161 # We need to wait a bit for requests to come in. This can probably
162 # be set shorter for true asynchronous clients.
162 # be set shorter for true asynchronous clients.
163 time.sleep(0.05)
163 time.sleep(0.05)
164
164
165 def abort_request(self, stream, ident, parent):
165 def abort_request(self, stream, ident, parent):
166 """abort a specifig msg by id"""
166 """abort a specifig msg by id"""
167 msg_ids = parent['content'].get('msg_ids', None)
167 msg_ids = parent['content'].get('msg_ids', None)
168 if isinstance(msg_ids, basestring):
168 if isinstance(msg_ids, basestring):
169 msg_ids = [msg_ids]
169 msg_ids = [msg_ids]
170 if not msg_ids:
170 if not msg_ids:
171 self.abort_queues()
171 self.abort_queues()
172 for mid in msg_ids:
172 for mid in msg_ids:
173 self.aborted.add(str(mid))
173 self.aborted.add(str(mid))
174
174
175 content = dict(status='ok')
175 content = dict(status='ok')
176 reply_msg = self.session.send(stream, 'abort_reply', content=content,
176 reply_msg = self.session.send(stream, 'abort_reply', content=content,
177 parent=parent, ident=ident)
177 parent=parent, ident=ident)
178 self.log.debug(str(reply_msg))
178 self.log.debug(str(reply_msg))
179
179
180 def shutdown_request(self, stream, ident, parent):
180 def shutdown_request(self, stream, ident, parent):
181 """kill ourself. This should really be handled in an external process"""
181 """kill ourself. This should really be handled in an external process"""
182 try:
182 try:
183 self.abort_queues()
183 self.abort_queues()
184 except:
184 except:
185 content = self._wrap_exception('shutdown')
185 content = self._wrap_exception('shutdown')
186 else:
186 else:
187 content = dict(parent['content'])
187 content = dict(parent['content'])
188 content['status'] = 'ok'
188 content['status'] = 'ok'
189 msg = self.session.send(stream, 'shutdown_reply',
189 msg = self.session.send(stream, 'shutdown_reply',
190 content=content, parent=parent, ident=ident)
190 content=content, parent=parent, ident=ident)
191 self.log.debug(str(msg))
191 self.log.debug(str(msg))
192 dc = ioloop.DelayedCallback(lambda : sys.exit(0), 1000, self.loop)
192 dc = ioloop.DelayedCallback(lambda : sys.exit(0), 1000, self.loop)
193 dc.start()
193 dc.start()
194
194
195 def dispatch_control(self, msg):
195 def dispatch_control(self, msg):
196 idents,msg = self.session.feed_identities(msg, copy=False)
196 idents,msg = self.session.feed_identities(msg, copy=False)
197 try:
197 try:
198 msg = self.session.unpack_message(msg, content=True, copy=False)
198 msg = self.session.unpack_message(msg, content=True, copy=False)
199 except:
199 except:
200 self.log.error("Invalid Message", exc_info=True)
200 self.log.error("Invalid Message", exc_info=True)
201 return
201 return
202 else:
202 else:
203 self.log.debug("Control received, %s", msg)
203 self.log.debug("Control received, %s", msg)
204
204
205 header = msg['header']
205 header = msg['header']
206 msg_id = header['msg_id']
206 msg_id = header['msg_id']
207
207
208 handler = self.control_handlers.get(msg['msg_type'], None)
208 handler = self.control_handlers.get(msg['header']['msg_type'], None)
209 if handler is None:
209 if handler is None:
210 self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r"%msg['msg_type'])
210 self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r"%msg['header']['msg_type'])
211 else:
211 else:
212 handler(self.control_stream, idents, msg)
212 handler(self.control_stream, idents, msg)
213
213
214
214
215 #-------------------- queue helpers ------------------------------
215 #-------------------- queue helpers ------------------------------
216
216
217 def check_dependencies(self, dependencies):
217 def check_dependencies(self, dependencies):
218 if not dependencies:
218 if not dependencies:
219 return True
219 return True
220 if len(dependencies) == 2 and dependencies[0] in 'any all'.split():
220 if len(dependencies) == 2 and dependencies[0] in 'any all'.split():
221 anyorall = dependencies[0]
221 anyorall = dependencies[0]
222 dependencies = dependencies[1]
222 dependencies = dependencies[1]
223 else:
223 else:
224 anyorall = 'all'
224 anyorall = 'all'
225 results = self.client.get_results(dependencies,status_only=True)
225 results = self.client.get_results(dependencies,status_only=True)
226 if results['status'] != 'ok':
226 if results['status'] != 'ok':
227 return False
227 return False
228
228
229 if anyorall == 'any':
229 if anyorall == 'any':
230 if not results['completed']:
230 if not results['completed']:
231 return False
231 return False
232 else:
232 else:
233 if results['pending']:
233 if results['pending']:
234 return False
234 return False
235
235
236 return True
236 return True
237
237
238 def check_aborted(self, msg_id):
238 def check_aborted(self, msg_id):
239 return msg_id in self.aborted
239 return msg_id in self.aborted
240
240
241 #-------------------- queue handlers -----------------------------
241 #-------------------- queue handlers -----------------------------
242
242
243 def clear_request(self, stream, idents, parent):
243 def clear_request(self, stream, idents, parent):
244 """Clear our namespace."""
244 """Clear our namespace."""
245 self.user_ns = {}
245 self.user_ns = {}
246 msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
246 msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
247 content = dict(status='ok'))
247 content = dict(status='ok'))
248 self._initial_exec_lines()
248 self._initial_exec_lines()
249
249
250 def execute_request(self, stream, ident, parent):
250 def execute_request(self, stream, ident, parent):
251 self.log.debug('execute request %s'%parent)
251 self.log.debug('execute request %s'%parent)
252 try:
252 try:
253 code = parent[u'content'][u'code']
253 code = parent[u'content'][u'code']
254 except:
254 except:
255 self.log.error("Got bad msg: %s"%parent, exc_info=True)
255 self.log.error("Got bad msg: %s"%parent, exc_info=True)
256 return
256 return
257 self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent,
257 self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent,
258 ident=asbytes('%s.pyin'%self.prefix))
258 ident=asbytes('%s.pyin'%self.prefix))
259 started = datetime.now()
259 started = datetime.now()
260 try:
260 try:
261 comp_code = self.compiler(code, '<zmq-kernel>')
261 comp_code = self.compiler(code, '<zmq-kernel>')
262 # allow for not overriding displayhook
262 # allow for not overriding displayhook
263 if hasattr(sys.displayhook, 'set_parent'):
263 if hasattr(sys.displayhook, 'set_parent'):
264 sys.displayhook.set_parent(parent)
264 sys.displayhook.set_parent(parent)
265 sys.stdout.set_parent(parent)
265 sys.stdout.set_parent(parent)
266 sys.stderr.set_parent(parent)
266 sys.stderr.set_parent(parent)
267 exec comp_code in self.user_ns, self.user_ns
267 exec comp_code in self.user_ns, self.user_ns
268 except:
268 except:
269 exc_content = self._wrap_exception('execute')
269 exc_content = self._wrap_exception('execute')
270 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
270 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
271 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
271 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
272 ident=asbytes('%s.pyerr'%self.prefix))
272 ident=asbytes('%s.pyerr'%self.prefix))
273 reply_content = exc_content
273 reply_content = exc_content
274 else:
274 else:
275 reply_content = {'status' : 'ok'}
275 reply_content = {'status' : 'ok'}
276
276
277 reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent=parent,
277 reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent=parent,
278 ident=ident, subheader = dict(started=started))
278 ident=ident, subheader = dict(started=started))
279 self.log.debug(str(reply_msg))
279 self.log.debug(str(reply_msg))
280 if reply_msg['content']['status'] == u'error':
280 if reply_msg['content']['status'] == u'error':
281 self.abort_queues()
281 self.abort_queues()
282
282
283 def complete_request(self, stream, ident, parent):
283 def complete_request(self, stream, ident, parent):
284 matches = {'matches' : self.complete(parent),
284 matches = {'matches' : self.complete(parent),
285 'status' : 'ok'}
285 'status' : 'ok'}
286 completion_msg = self.session.send(stream, 'complete_reply',
286 completion_msg = self.session.send(stream, 'complete_reply',
287 matches, parent, ident)
287 matches, parent, ident)
288 # print >> sys.__stdout__, completion_msg
288 # print >> sys.__stdout__, completion_msg
289
289
290 def complete(self, msg):
290 def complete(self, msg):
291 return self.completer.complete(msg.content.line, msg.content.text)
291 return self.completer.complete(msg.content.line, msg.content.text)
292
292
293 def apply_request(self, stream, ident, parent):
293 def apply_request(self, stream, ident, parent):
294 # flush previous reply, so this request won't block it
294 # flush previous reply, so this request won't block it
295 stream.flush(zmq.POLLOUT)
295 stream.flush(zmq.POLLOUT)
296 try:
296 try:
297 content = parent[u'content']
297 content = parent[u'content']
298 bufs = parent[u'buffers']
298 bufs = parent[u'buffers']
299 msg_id = parent['header']['msg_id']
299 msg_id = parent['header']['msg_id']
300 # bound = parent['header'].get('bound', False)
300 # bound = parent['header'].get('bound', False)
301 except:
301 except:
302 self.log.error("Got bad msg: %s"%parent, exc_info=True)
302 self.log.error("Got bad msg: %s"%parent, exc_info=True)
303 return
303 return
304 # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
304 # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
305 # self.iopub_stream.send(pyin_msg)
305 # self.iopub_stream.send(pyin_msg)
306 # self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent)
306 # self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent)
307 sub = {'dependencies_met' : True, 'engine' : self.ident,
307 sub = {'dependencies_met' : True, 'engine' : self.ident,
308 'started': datetime.now()}
308 'started': datetime.now()}
309 try:
309 try:
310 # allow for not overriding displayhook
310 # allow for not overriding displayhook
311 if hasattr(sys.displayhook, 'set_parent'):
311 if hasattr(sys.displayhook, 'set_parent'):
312 sys.displayhook.set_parent(parent)
312 sys.displayhook.set_parent(parent)
313 sys.stdout.set_parent(parent)
313 sys.stdout.set_parent(parent)
314 sys.stderr.set_parent(parent)
314 sys.stderr.set_parent(parent)
315 # exec "f(*args,**kwargs)" in self.user_ns, self.user_ns
315 # exec "f(*args,**kwargs)" in self.user_ns, self.user_ns
316 working = self.user_ns
316 working = self.user_ns
317 # suffix =
317 # suffix =
318 prefix = "_"+str(msg_id).replace("-","")+"_"
318 prefix = "_"+str(msg_id).replace("-","")+"_"
319
319
320 f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
320 f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
321 # if bound:
321 # if bound:
322 # bound_ns = Namespace(working)
322 # bound_ns = Namespace(working)
323 # args = [bound_ns]+list(args)
323 # args = [bound_ns]+list(args)
324
324
325 fname = getattr(f, '__name__', 'f')
325 fname = getattr(f, '__name__', 'f')
326
326
327 fname = prefix+"f"
327 fname = prefix+"f"
328 argname = prefix+"args"
328 argname = prefix+"args"
329 kwargname = prefix+"kwargs"
329 kwargname = prefix+"kwargs"
330 resultname = prefix+"result"
330 resultname = prefix+"result"
331
331
332 ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
332 ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
333 # print ns
333 # print ns
334 working.update(ns)
334 working.update(ns)
335 code = "%s=%s(*%s,**%s)"%(resultname, fname, argname, kwargname)
335 code = "%s=%s(*%s,**%s)"%(resultname, fname, argname, kwargname)
336 try:
336 try:
337 exec code in working,working
337 exec code in working,working
338 result = working.get(resultname)
338 result = working.get(resultname)
339 finally:
339 finally:
340 for key in ns.iterkeys():
340 for key in ns.iterkeys():
341 working.pop(key)
341 working.pop(key)
342 # if bound:
342 # if bound:
343 # working.update(bound_ns)
343 # working.update(bound_ns)
344
344
345 packed_result,buf = serialize_object(result)
345 packed_result,buf = serialize_object(result)
346 result_buf = [packed_result]+buf
346 result_buf = [packed_result]+buf
347 except:
347 except:
348 exc_content = self._wrap_exception('apply')
348 exc_content = self._wrap_exception('apply')
349 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
349 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
350 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
350 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
351 ident=asbytes('%s.pyerr'%self.prefix))
351 ident=asbytes('%s.pyerr'%self.prefix))
352 reply_content = exc_content
352 reply_content = exc_content
353 result_buf = []
353 result_buf = []
354
354
355 if exc_content['ename'] == 'UnmetDependency':
355 if exc_content['ename'] == 'UnmetDependency':
356 sub['dependencies_met'] = False
356 sub['dependencies_met'] = False
357 else:
357 else:
358 reply_content = {'status' : 'ok'}
358 reply_content = {'status' : 'ok'}
359
359
360 # put 'ok'/'error' status in header, for scheduler introspection:
360 # put 'ok'/'error' status in header, for scheduler introspection:
361 sub['status'] = reply_content['status']
361 sub['status'] = reply_content['status']
362
362
363 reply_msg = self.session.send(stream, u'apply_reply', reply_content,
363 reply_msg = self.session.send(stream, u'apply_reply', reply_content,
364 parent=parent, ident=ident,buffers=result_buf, subheader=sub)
364 parent=parent, ident=ident,buffers=result_buf, subheader=sub)
365
365
366 # flush i/o
366 # flush i/o
367 # should this be before reply_msg is sent, like in the single-kernel code,
367 # should this be before reply_msg is sent, like in the single-kernel code,
368 # or should nothing get in the way of real results?
368 # or should nothing get in the way of real results?
369 sys.stdout.flush()
369 sys.stdout.flush()
370 sys.stderr.flush()
370 sys.stderr.flush()
371
371
372 def dispatch_queue(self, stream, msg):
372 def dispatch_queue(self, stream, msg):
373 self.control_stream.flush()
373 self.control_stream.flush()
374 idents,msg = self.session.feed_identities(msg, copy=False)
374 idents,msg = self.session.feed_identities(msg, copy=False)
375 try:
375 try:
376 msg = self.session.unpack_message(msg, content=True, copy=False)
376 msg = self.session.unpack_message(msg, content=True, copy=False)
377 except:
377 except:
378 self.log.error("Invalid Message", exc_info=True)
378 self.log.error("Invalid Message", exc_info=True)
379 return
379 return
380 else:
380 else:
381 self.log.debug("Message received, %s", msg)
381 self.log.debug("Message received, %s", msg)
382
382
383
383
384 header = msg['header']
384 header = msg['header']
385 msg_id = header['msg_id']
385 msg_id = header['msg_id']
386 if self.check_aborted(msg_id):
386 if self.check_aborted(msg_id):
387 self.aborted.remove(msg_id)
387 self.aborted.remove(msg_id)
388 # is it safe to assume a msg_id will not be resubmitted?
388 # is it safe to assume a msg_id will not be resubmitted?
389 reply_type = msg['msg_type'].split('_')[0] + '_reply'
389 reply_type = msg['header']['msg_type'].split('_')[0] + '_reply'
390 status = {'status' : 'aborted'}
390 status = {'status' : 'aborted'}
391 reply_msg = self.session.send(stream, reply_type, subheader=status,
391 reply_msg = self.session.send(stream, reply_type, subheader=status,
392 content=status, parent=msg, ident=idents)
392 content=status, parent=msg, ident=idents)
393 return
393 return
394 handler = self.shell_handlers.get(msg['msg_type'], None)
394 handler = self.shell_handlers.get(msg['header']['msg_type'], None)
395 if handler is None:
395 if handler is None:
396 self.log.error("UNKNOWN MESSAGE TYPE: %r"%msg['msg_type'])
396 self.log.error("UNKNOWN MESSAGE TYPE: %r"%msg['header']['msg_type'])
397 else:
397 else:
398 handler(stream, idents, msg)
398 handler(stream, idents, msg)
399
399
400 def start(self):
400 def start(self):
401 #### stream mode:
401 #### stream mode:
402 if self.control_stream:
402 if self.control_stream:
403 self.control_stream.on_recv(self.dispatch_control, copy=False)
403 self.control_stream.on_recv(self.dispatch_control, copy=False)
404 self.control_stream.on_err(printer)
404 self.control_stream.on_err(printer)
405
405
406 def make_dispatcher(stream):
406 def make_dispatcher(stream):
407 def dispatcher(msg):
407 def dispatcher(msg):
408 return self.dispatch_queue(stream, msg)
408 return self.dispatch_queue(stream, msg)
409 return dispatcher
409 return dispatcher
410
410
411 for s in self.shell_streams:
411 for s in self.shell_streams:
412 s.on_recv(make_dispatcher(s), copy=False)
412 s.on_recv(make_dispatcher(s), copy=False)
413 s.on_err(printer)
413 s.on_err(printer)
414
414
415 if self.iopub_stream:
415 if self.iopub_stream:
416 self.iopub_stream.on_err(printer)
416 self.iopub_stream.on_err(printer)
417
417
418 #### while True mode:
418 #### while True mode:
419 # while True:
419 # while True:
420 # idle = True
420 # idle = True
421 # try:
421 # try:
422 # msg = self.shell_stream.socket.recv_multipart(
422 # msg = self.shell_stream.socket.recv_multipart(
423 # zmq.NOBLOCK, copy=False)
423 # zmq.NOBLOCK, copy=False)
424 # except zmq.ZMQError, e:
424 # except zmq.ZMQError, e:
425 # if e.errno != zmq.EAGAIN:
425 # if e.errno != zmq.EAGAIN:
426 # raise e
426 # raise e
427 # else:
427 # else:
428 # idle=False
428 # idle=False
429 # self.dispatch_queue(self.shell_stream, msg)
429 # self.dispatch_queue(self.shell_stream, msg)
430 #
430 #
431 # if not self.task_stream.empty():
431 # if not self.task_stream.empty():
432 # idle=False
432 # idle=False
433 # msg = self.task_stream.recv_multipart()
433 # msg = self.task_stream.recv_multipart()
434 # self.dispatch_queue(self.task_stream, msg)
434 # self.dispatch_queue(self.task_stream, msg)
435 # if idle:
435 # if idle:
436 # # don't busywait
436 # # don't busywait
437 # time.sleep(1e-3)
437 # time.sleep(1e-3)
438
438
@@ -1,179 +1,179 b''
1 """Tests for db backends
1 """Tests for db backends
2
2
3 Authors:
3 Authors:
4
4
5 * Min RK
5 * Min RK
6 """
6 """
7
7
8 #-------------------------------------------------------------------------------
8 #-------------------------------------------------------------------------------
9 # Copyright (C) 2011 The IPython Development Team
9 # Copyright (C) 2011 The IPython Development Team
10 #
10 #
11 # Distributed under the terms of the BSD License. The full license is in
11 # Distributed under the terms of the BSD License. The full license is in
12 # the file COPYING, distributed as part of this software.
12 # the file COPYING, distributed as part of this software.
13 #-------------------------------------------------------------------------------
13 #-------------------------------------------------------------------------------
14
14
15 #-------------------------------------------------------------------------------
15 #-------------------------------------------------------------------------------
16 # Imports
16 # Imports
17 #-------------------------------------------------------------------------------
17 #-------------------------------------------------------------------------------
18
18
19 from __future__ import division
19 from __future__ import division
20
20
21 import tempfile
21 import tempfile
22 import time
22 import time
23
23
24 from datetime import datetime, timedelta
24 from datetime import datetime, timedelta
25 from unittest import TestCase
25 from unittest import TestCase
26
26
27 from nose import SkipTest
27 from nose import SkipTest
28
28
29 from IPython.parallel import error
29 from IPython.parallel import error
30 from IPython.parallel.controller.dictdb import DictDB
30 from IPython.parallel.controller.dictdb import DictDB
31 from IPython.parallel.controller.sqlitedb import SQLiteDB
31 from IPython.parallel.controller.sqlitedb import SQLiteDB
32 from IPython.parallel.controller.hub import init_record, empty_record
32 from IPython.parallel.controller.hub import init_record, empty_record
33
33
34 from IPython.zmq.session import Session
34 from IPython.zmq.session import Session
35
35
36
36
37 #-------------------------------------------------------------------------------
37 #-------------------------------------------------------------------------------
38 # TestCases
38 # TestCases
39 #-------------------------------------------------------------------------------
39 #-------------------------------------------------------------------------------
40
40
41 class TestDictBackend(TestCase):
41 class TestDictBackend(TestCase):
42 def setUp(self):
42 def setUp(self):
43 self.session = Session()
43 self.session = Session()
44 self.db = self.create_db()
44 self.db = self.create_db()
45 self.load_records(16)
45 self.load_records(16)
46
46
47 def create_db(self):
47 def create_db(self):
48 return DictDB()
48 return DictDB()
49
49
50 def load_records(self, n=1):
50 def load_records(self, n=1):
51 """load n records for testing"""
51 """load n records for testing"""
52 #sleep 1/10 s, to ensure timestamp is different to previous calls
52 #sleep 1/10 s, to ensure timestamp is different to previous calls
53 time.sleep(0.1)
53 time.sleep(0.1)
54 msg_ids = []
54 msg_ids = []
55 for i in range(n):
55 for i in range(n):
56 msg = self.session.msg('apply_request', content=dict(a=5))
56 msg = self.session.msg('apply_request', content=dict(a=5))
57 msg['buffers'] = []
57 msg['buffers'] = []
58 rec = init_record(msg)
58 rec = init_record(msg)
59 msg_ids.append(msg['msg_id'])
59 msg_ids.append(msg['header']['msg_id'])
60 self.db.add_record(msg['msg_id'], rec)
60 self.db.add_record(msg['header']['msg_id'], rec)
61 return msg_ids
61 return msg_ids
62
62
63 def test_add_record(self):
63 def test_add_record(self):
64 before = self.db.get_history()
64 before = self.db.get_history()
65 self.load_records(5)
65 self.load_records(5)
66 after = self.db.get_history()
66 after = self.db.get_history()
67 self.assertEquals(len(after), len(before)+5)
67 self.assertEquals(len(after), len(before)+5)
68 self.assertEquals(after[:-5],before)
68 self.assertEquals(after[:-5],before)
69
69
70 def test_drop_record(self):
70 def test_drop_record(self):
71 msg_id = self.load_records()[-1]
71 msg_id = self.load_records()[-1]
72 rec = self.db.get_record(msg_id)
72 rec = self.db.get_record(msg_id)
73 self.db.drop_record(msg_id)
73 self.db.drop_record(msg_id)
74 self.assertRaises(KeyError,self.db.get_record, msg_id)
74 self.assertRaises(KeyError,self.db.get_record, msg_id)
75
75
76 def _round_to_millisecond(self, dt):
76 def _round_to_millisecond(self, dt):
77 """necessary because mongodb rounds microseconds"""
77 """necessary because mongodb rounds microseconds"""
78 micro = dt.microsecond
78 micro = dt.microsecond
79 extra = int(str(micro)[-3:])
79 extra = int(str(micro)[-3:])
80 return dt - timedelta(microseconds=extra)
80 return dt - timedelta(microseconds=extra)
81
81
82 def test_update_record(self):
82 def test_update_record(self):
83 now = self._round_to_millisecond(datetime.now())
83 now = self._round_to_millisecond(datetime.now())
84 #
84 #
85 msg_id = self.db.get_history()[-1]
85 msg_id = self.db.get_history()[-1]
86 rec1 = self.db.get_record(msg_id)
86 rec1 = self.db.get_record(msg_id)
87 data = {'stdout': 'hello there', 'completed' : now}
87 data = {'stdout': 'hello there', 'completed' : now}
88 self.db.update_record(msg_id, data)
88 self.db.update_record(msg_id, data)
89 rec2 = self.db.get_record(msg_id)
89 rec2 = self.db.get_record(msg_id)
90 self.assertEquals(rec2['stdout'], 'hello there')
90 self.assertEquals(rec2['stdout'], 'hello there')
91 self.assertEquals(rec2['completed'], now)
91 self.assertEquals(rec2['completed'], now)
92 rec1.update(data)
92 rec1.update(data)
93 self.assertEquals(rec1, rec2)
93 self.assertEquals(rec1, rec2)
94
94
95 # def test_update_record_bad(self):
95 # def test_update_record_bad(self):
96 # """test updating nonexistant records"""
96 # """test updating nonexistant records"""
97 # msg_id = str(uuid.uuid4())
97 # msg_id = str(uuid.uuid4())
98 # data = {'stdout': 'hello there'}
98 # data = {'stdout': 'hello there'}
99 # self.assertRaises(KeyError, self.db.update_record, msg_id, data)
99 # self.assertRaises(KeyError, self.db.update_record, msg_id, data)
100
100
101 def test_find_records_dt(self):
101 def test_find_records_dt(self):
102 """test finding records by date"""
102 """test finding records by date"""
103 hist = self.db.get_history()
103 hist = self.db.get_history()
104 middle = self.db.get_record(hist[len(hist)//2])
104 middle = self.db.get_record(hist[len(hist)//2])
105 tic = middle['submitted']
105 tic = middle['submitted']
106 before = self.db.find_records({'submitted' : {'$lt' : tic}})
106 before = self.db.find_records({'submitted' : {'$lt' : tic}})
107 after = self.db.find_records({'submitted' : {'$gte' : tic}})
107 after = self.db.find_records({'submitted' : {'$gte' : tic}})
108 self.assertEquals(len(before)+len(after),len(hist))
108 self.assertEquals(len(before)+len(after),len(hist))
109 for b in before:
109 for b in before:
110 self.assertTrue(b['submitted'] < tic)
110 self.assertTrue(b['submitted'] < tic)
111 for a in after:
111 for a in after:
112 self.assertTrue(a['submitted'] >= tic)
112 self.assertTrue(a['submitted'] >= tic)
113 same = self.db.find_records({'submitted' : tic})
113 same = self.db.find_records({'submitted' : tic})
114 for s in same:
114 for s in same:
115 self.assertTrue(s['submitted'] == tic)
115 self.assertTrue(s['submitted'] == tic)
116
116
117 def test_find_records_keys(self):
117 def test_find_records_keys(self):
118 """test extracting subset of record keys"""
118 """test extracting subset of record keys"""
119 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
119 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
120 for rec in found:
120 for rec in found:
121 self.assertEquals(set(rec.keys()), set(['msg_id', 'submitted', 'completed']))
121 self.assertEquals(set(rec.keys()), set(['msg_id', 'submitted', 'completed']))
122
122
123 def test_find_records_msg_id(self):
123 def test_find_records_msg_id(self):
124 """ensure msg_id is always in found records"""
124 """ensure msg_id is always in found records"""
125 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
125 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
126 for rec in found:
126 for rec in found:
127 self.assertTrue('msg_id' in rec.keys())
127 self.assertTrue('msg_id' in rec.keys())
128 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted'])
128 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted'])
129 for rec in found:
129 for rec in found:
130 self.assertTrue('msg_id' in rec.keys())
130 self.assertTrue('msg_id' in rec.keys())
131 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['msg_id'])
131 found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['msg_id'])
132 for rec in found:
132 for rec in found:
133 self.assertTrue('msg_id' in rec.keys())
133 self.assertTrue('msg_id' in rec.keys())
134
134
135 def test_find_records_in(self):
135 def test_find_records_in(self):
136 """test finding records with '$in','$nin' operators"""
136 """test finding records with '$in','$nin' operators"""
137 hist = self.db.get_history()
137 hist = self.db.get_history()
138 even = hist[::2]
138 even = hist[::2]
139 odd = hist[1::2]
139 odd = hist[1::2]
140 recs = self.db.find_records({ 'msg_id' : {'$in' : even}})
140 recs = self.db.find_records({ 'msg_id' : {'$in' : even}})
141 found = [ r['msg_id'] for r in recs ]
141 found = [ r['msg_id'] for r in recs ]
142 self.assertEquals(set(even), set(found))
142 self.assertEquals(set(even), set(found))
143 recs = self.db.find_records({ 'msg_id' : {'$nin' : even}})
143 recs = self.db.find_records({ 'msg_id' : {'$nin' : even}})
144 found = [ r['msg_id'] for r in recs ]
144 found = [ r['msg_id'] for r in recs ]
145 self.assertEquals(set(odd), set(found))
145 self.assertEquals(set(odd), set(found))
146
146
147 def test_get_history(self):
147 def test_get_history(self):
148 msg_ids = self.db.get_history()
148 msg_ids = self.db.get_history()
149 latest = datetime(1984,1,1)
149 latest = datetime(1984,1,1)
150 for msg_id in msg_ids:
150 for msg_id in msg_ids:
151 rec = self.db.get_record(msg_id)
151 rec = self.db.get_record(msg_id)
152 newt = rec['submitted']
152 newt = rec['submitted']
153 self.assertTrue(newt >= latest)
153 self.assertTrue(newt >= latest)
154 latest = newt
154 latest = newt
155 msg_id = self.load_records(1)[-1]
155 msg_id = self.load_records(1)[-1]
156 self.assertEquals(self.db.get_history()[-1],msg_id)
156 self.assertEquals(self.db.get_history()[-1],msg_id)
157
157
158 def test_datetime(self):
158 def test_datetime(self):
159 """get/set timestamps with datetime objects"""
159 """get/set timestamps with datetime objects"""
160 msg_id = self.db.get_history()[-1]
160 msg_id = self.db.get_history()[-1]
161 rec = self.db.get_record(msg_id)
161 rec = self.db.get_record(msg_id)
162 self.assertTrue(isinstance(rec['submitted'], datetime))
162 self.assertTrue(isinstance(rec['submitted'], datetime))
163 self.db.update_record(msg_id, dict(completed=datetime.now()))
163 self.db.update_record(msg_id, dict(completed=datetime.now()))
164 rec = self.db.get_record(msg_id)
164 rec = self.db.get_record(msg_id)
165 self.assertTrue(isinstance(rec['completed'], datetime))
165 self.assertTrue(isinstance(rec['completed'], datetime))
166
166
167 def test_drop_matching(self):
167 def test_drop_matching(self):
168 msg_ids = self.load_records(10)
168 msg_ids = self.load_records(10)
169 query = {'msg_id' : {'$in':msg_ids}}
169 query = {'msg_id' : {'$in':msg_ids}}
170 self.db.drop_matching_records(query)
170 self.db.drop_matching_records(query)
171 recs = self.db.find_records(query)
171 recs = self.db.find_records(query)
172 self.assertEquals(len(recs), 0)
172 self.assertEquals(len(recs), 0)
173
173
174 class TestSQLiteBackend(TestDictBackend):
174 class TestSQLiteBackend(TestDictBackend):
175 def create_db(self):
175 def create_db(self):
176 return SQLiteDB(location=tempfile.gettempdir())
176 return SQLiteDB(location=tempfile.gettempdir())
177
177
178 def tearDown(self):
178 def tearDown(self):
179 self.db._db.close()
179 self.db._db.close()
@@ -1,678 +1,678 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """A simple interactive kernel that talks to a frontend over 0MQ.
2 """A simple interactive kernel that talks to a frontend over 0MQ.
3
3
4 Things to do:
4 Things to do:
5
5
6 * Implement `set_parent` logic. Right before doing exec, the Kernel should
6 * Implement `set_parent` logic. Right before doing exec, the Kernel should
7 call set_parent on all the PUB objects with the message about to be executed.
7 call set_parent on all the PUB objects with the message about to be executed.
8 * Implement random port and security key logic.
8 * Implement random port and security key logic.
9 * Implement control messages.
9 * Implement control messages.
10 * Implement event loop and poll version.
10 * Implement event loop and poll version.
11 """
11 """
12
12
13 #-----------------------------------------------------------------------------
13 #-----------------------------------------------------------------------------
14 # Imports
14 # Imports
15 #-----------------------------------------------------------------------------
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
16 from __future__ import print_function
17
17
18 # Standard library imports.
18 # Standard library imports.
19 import __builtin__
19 import __builtin__
20 import atexit
20 import atexit
21 import sys
21 import sys
22 import time
22 import time
23 import traceback
23 import traceback
24 import logging
24 import logging
25 # System library imports.
25 # System library imports.
26 import zmq
26 import zmq
27
27
28 # Local imports.
28 # Local imports.
29 from IPython.config.configurable import Configurable
29 from IPython.config.configurable import Configurable
30 from IPython.config.application import boolean_flag
30 from IPython.config.application import boolean_flag
31 from IPython.core.application import ProfileDir
31 from IPython.core.application import ProfileDir
32 from IPython.core.shellapp import (
32 from IPython.core.shellapp import (
33 InteractiveShellApp, shell_flags, shell_aliases
33 InteractiveShellApp, shell_flags, shell_aliases
34 )
34 )
35 from IPython.utils import io
35 from IPython.utils import io
36 from IPython.utils.jsonutil import json_clean
36 from IPython.utils.jsonutil import json_clean
37 from IPython.lib import pylabtools
37 from IPython.lib import pylabtools
38 from IPython.utils.traitlets import (
38 from IPython.utils.traitlets import (
39 List, Instance, Float, Dict, Bool, Int, Unicode, CaselessStrEnum
39 List, Instance, Float, Dict, Bool, Int, Unicode, CaselessStrEnum
40 )
40 )
41
41
42 from entry_point import base_launch_kernel
42 from entry_point import base_launch_kernel
43 from kernelapp import KernelApp, kernel_flags, kernel_aliases
43 from kernelapp import KernelApp, kernel_flags, kernel_aliases
44 from iostream import OutStream
44 from iostream import OutStream
45 from session import Session, Message
45 from session import Session, Message
46 from zmqshell import ZMQInteractiveShell
46 from zmqshell import ZMQInteractiveShell
47
47
48
48
49
49
50 #-----------------------------------------------------------------------------
50 #-----------------------------------------------------------------------------
51 # Main kernel class
51 # Main kernel class
52 #-----------------------------------------------------------------------------
52 #-----------------------------------------------------------------------------
53
53
54 class Kernel(Configurable):
54 class Kernel(Configurable):
55
55
56 #---------------------------------------------------------------------------
56 #---------------------------------------------------------------------------
57 # Kernel interface
57 # Kernel interface
58 #---------------------------------------------------------------------------
58 #---------------------------------------------------------------------------
59
59
60 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
60 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
61 session = Instance(Session)
61 session = Instance(Session)
62 shell_socket = Instance('zmq.Socket')
62 shell_socket = Instance('zmq.Socket')
63 iopub_socket = Instance('zmq.Socket')
63 iopub_socket = Instance('zmq.Socket')
64 stdin_socket = Instance('zmq.Socket')
64 stdin_socket = Instance('zmq.Socket')
65 log = Instance(logging.Logger)
65 log = Instance(logging.Logger)
66
66
67 # Private interface
67 # Private interface
68
68
69 # Time to sleep after flushing the stdout/err buffers in each execute
69 # Time to sleep after flushing the stdout/err buffers in each execute
70 # cycle. While this introduces a hard limit on the minimal latency of the
70 # cycle. While this introduces a hard limit on the minimal latency of the
71 # execute cycle, it helps prevent output synchronization problems for
71 # execute cycle, it helps prevent output synchronization problems for
72 # clients.
72 # clients.
73 # Units are in seconds. The minimum zmq latency on local host is probably
73 # Units are in seconds. The minimum zmq latency on local host is probably
74 # ~150 microseconds, set this to 500us for now. We may need to increase it
74 # ~150 microseconds, set this to 500us for now. We may need to increase it
75 # a little if it's not enough after more interactive testing.
75 # a little if it's not enough after more interactive testing.
76 _execute_sleep = Float(0.0005, config=True)
76 _execute_sleep = Float(0.0005, config=True)
77
77
78 # Frequency of the kernel's event loop.
78 # Frequency of the kernel's event loop.
79 # Units are in seconds, kernel subclasses for GUI toolkits may need to
79 # Units are in seconds, kernel subclasses for GUI toolkits may need to
80 # adapt to milliseconds.
80 # adapt to milliseconds.
81 _poll_interval = Float(0.05, config=True)
81 _poll_interval = Float(0.05, config=True)
82
82
83 # If the shutdown was requested over the network, we leave here the
83 # If the shutdown was requested over the network, we leave here the
84 # necessary reply message so it can be sent by our registered atexit
84 # necessary reply message so it can be sent by our registered atexit
85 # handler. This ensures that the reply is only sent to clients truly at
85 # handler. This ensures that the reply is only sent to clients truly at
86 # the end of our shutdown process (which happens after the underlying
86 # the end of our shutdown process (which happens after the underlying
87 # IPython shell's own shutdown).
87 # IPython shell's own shutdown).
88 _shutdown_message = None
88 _shutdown_message = None
89
89
90 # This is a dict of port number that the kernel is listening on. It is set
90 # This is a dict of port number that the kernel is listening on. It is set
91 # by record_ports and used by connect_request.
91 # by record_ports and used by connect_request.
92 _recorded_ports = Dict()
92 _recorded_ports = Dict()
93
93
94
94
95
95
96 def __init__(self, **kwargs):
96 def __init__(self, **kwargs):
97 super(Kernel, self).__init__(**kwargs)
97 super(Kernel, self).__init__(**kwargs)
98
98
99 # Before we even start up the shell, register *first* our exit handlers
99 # Before we even start up the shell, register *first* our exit handlers
100 # so they come before the shell's
100 # so they come before the shell's
101 atexit.register(self._at_shutdown)
101 atexit.register(self._at_shutdown)
102
102
103 # Initialize the InteractiveShell subclass
103 # Initialize the InteractiveShell subclass
104 self.shell = ZMQInteractiveShell.instance(config=self.config)
104 self.shell = ZMQInteractiveShell.instance(config=self.config)
105 self.shell.displayhook.session = self.session
105 self.shell.displayhook.session = self.session
106 self.shell.displayhook.pub_socket = self.iopub_socket
106 self.shell.displayhook.pub_socket = self.iopub_socket
107 self.shell.display_pub.session = self.session
107 self.shell.display_pub.session = self.session
108 self.shell.display_pub.pub_socket = self.iopub_socket
108 self.shell.display_pub.pub_socket = self.iopub_socket
109
109
110 # TMP - hack while developing
110 # TMP - hack while developing
111 self.shell._reply_content = None
111 self.shell._reply_content = None
112
112
113 # Build dict of handlers for message types
113 # Build dict of handlers for message types
114 msg_types = [ 'execute_request', 'complete_request',
114 msg_types = [ 'execute_request', 'complete_request',
115 'object_info_request', 'history_request',
115 'object_info_request', 'history_request',
116 'connect_request', 'shutdown_request']
116 'connect_request', 'shutdown_request']
117 self.handlers = {}
117 self.handlers = {}
118 for msg_type in msg_types:
118 for msg_type in msg_types:
119 self.handlers[msg_type] = getattr(self, msg_type)
119 self.handlers[msg_type] = getattr(self, msg_type)
120
120
121 def do_one_iteration(self):
121 def do_one_iteration(self):
122 """Do one iteration of the kernel's evaluation loop.
122 """Do one iteration of the kernel's evaluation loop.
123 """
123 """
124 ident,msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
124 ident,msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
125 if msg is None:
125 if msg is None:
126 return
126 return
127
127
128 # This assert will raise in versions of zeromq 2.0.7 and lesser.
128 # This assert will raise in versions of zeromq 2.0.7 and lesser.
129 # We now require 2.0.8 or above, so we can uncomment for safety.
129 # We now require 2.0.8 or above, so we can uncomment for safety.
130 # print(ident,msg, file=sys.__stdout__)
130 # print(ident,msg, file=sys.__stdout__)
131 assert ident is not None, "Missing message part."
131 assert ident is not None, "Missing message part."
132
132
133 # Print some info about this message and leave a '--->' marker, so it's
133 # Print some info about this message and leave a '--->' marker, so it's
134 # easier to trace visually the message chain when debugging. Each
134 # easier to trace visually the message chain when debugging. Each
135 # handler prints its message at the end.
135 # handler prints its message at the end.
136 self.log.debug('\n*** MESSAGE TYPE:'+str(msg['msg_type'])+'***')
136 self.log.debug('\n*** MESSAGE TYPE:'+str(msg['header']['msg_type'])+'***')
137 self.log.debug(' Content: '+str(msg['content'])+'\n --->\n ')
137 self.log.debug(' Content: '+str(msg['content'])+'\n --->\n ')
138
138
139 # Find and call actual handler for message
139 # Find and call actual handler for message
140 handler = self.handlers.get(msg['msg_type'], None)
140 handler = self.handlers.get(msg['header']['msg_type'], None)
141 if handler is None:
141 if handler is None:
142 self.log.error("UNKNOWN MESSAGE TYPE:" +str(msg))
142 self.log.error("UNKNOWN MESSAGE TYPE:" +str(msg))
143 else:
143 else:
144 handler(ident, msg)
144 handler(ident, msg)
145
145
146 # Check whether we should exit, in case the incoming message set the
146 # Check whether we should exit, in case the incoming message set the
147 # exit flag on
147 # exit flag on
148 if self.shell.exit_now:
148 if self.shell.exit_now:
149 self.log.debug('\nExiting IPython kernel...')
149 self.log.debug('\nExiting IPython kernel...')
150 # We do a normal, clean exit, which allows any actions registered
150 # We do a normal, clean exit, which allows any actions registered
151 # via atexit (such as history saving) to take place.
151 # via atexit (such as history saving) to take place.
152 sys.exit(0)
152 sys.exit(0)
153
153
154
154
155 def start(self):
155 def start(self):
156 """ Start the kernel main loop.
156 """ Start the kernel main loop.
157 """
157 """
158 poller = zmq.Poller()
158 poller = zmq.Poller()
159 poller.register(self.shell_socket, zmq.POLLIN)
159 poller.register(self.shell_socket, zmq.POLLIN)
160 while True:
160 while True:
161 try:
161 try:
162 # scale by extra factor of 10, because there is no
162 # scale by extra factor of 10, because there is no
163 # reason for this to be anything less than ~ 0.1s
163 # reason for this to be anything less than ~ 0.1s
164 # since it is a real poller and will respond
164 # since it is a real poller and will respond
165 # to events immediately
165 # to events immediately
166 poller.poll(10*1000*self._poll_interval)
166 poller.poll(10*1000*self._poll_interval)
167 self.do_one_iteration()
167 self.do_one_iteration()
168 except KeyboardInterrupt:
168 except KeyboardInterrupt:
169 # Ctrl-C shouldn't crash the kernel
169 # Ctrl-C shouldn't crash the kernel
170 io.raw_print("KeyboardInterrupt caught in kernel")
170 io.raw_print("KeyboardInterrupt caught in kernel")
171
171
172 def record_ports(self, ports):
172 def record_ports(self, ports):
173 """Record the ports that this kernel is using.
173 """Record the ports that this kernel is using.
174
174
175 The creator of the Kernel instance must call this methods if they
175 The creator of the Kernel instance must call this methods if they
176 want the :meth:`connect_request` method to return the port numbers.
176 want the :meth:`connect_request` method to return the port numbers.
177 """
177 """
178 self._recorded_ports = ports
178 self._recorded_ports = ports
179
179
180 #---------------------------------------------------------------------------
180 #---------------------------------------------------------------------------
181 # Kernel request handlers
181 # Kernel request handlers
182 #---------------------------------------------------------------------------
182 #---------------------------------------------------------------------------
183
183
184 def _publish_pyin(self, code, parent):
184 def _publish_pyin(self, code, parent):
185 """Publish the code request on the pyin stream."""
185 """Publish the code request on the pyin stream."""
186
186
187 pyin_msg = self.session.send(self.iopub_socket, u'pyin',{u'code':code}, parent=parent)
187 pyin_msg = self.session.send(self.iopub_socket, u'pyin',{u'code':code}, parent=parent)
188
188
189 def execute_request(self, ident, parent):
189 def execute_request(self, ident, parent):
190
190
191 status_msg = self.session.send(self.iopub_socket,
191 status_msg = self.session.send(self.iopub_socket,
192 u'status',
192 u'status',
193 {u'execution_state':u'busy'},
193 {u'execution_state':u'busy'},
194 parent=parent
194 parent=parent
195 )
195 )
196
196
197 try:
197 try:
198 content = parent[u'content']
198 content = parent[u'content']
199 code = content[u'code']
199 code = content[u'code']
200 silent = content[u'silent']
200 silent = content[u'silent']
201 except:
201 except:
202 self.log.error("Got bad msg: ")
202 self.log.error("Got bad msg: ")
203 self.log.error(str(Message(parent)))
203 self.log.error(str(Message(parent)))
204 return
204 return
205
205
206 shell = self.shell # we'll need this a lot here
206 shell = self.shell # we'll need this a lot here
207
207
208 # Replace raw_input. Note that is not sufficient to replace
208 # Replace raw_input. Note that is not sufficient to replace
209 # raw_input in the user namespace.
209 # raw_input in the user namespace.
210 raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
210 raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
211 __builtin__.raw_input = raw_input
211 __builtin__.raw_input = raw_input
212
212
213 # Set the parent message of the display hook and out streams.
213 # Set the parent message of the display hook and out streams.
214 shell.displayhook.set_parent(parent)
214 shell.displayhook.set_parent(parent)
215 shell.display_pub.set_parent(parent)
215 shell.display_pub.set_parent(parent)
216 sys.stdout.set_parent(parent)
216 sys.stdout.set_parent(parent)
217 sys.stderr.set_parent(parent)
217 sys.stderr.set_parent(parent)
218
218
219 # Re-broadcast our input for the benefit of listening clients, and
219 # Re-broadcast our input for the benefit of listening clients, and
220 # start computing output
220 # start computing output
221 if not silent:
221 if not silent:
222 self._publish_pyin(code, parent)
222 self._publish_pyin(code, parent)
223
223
224 reply_content = {}
224 reply_content = {}
225 try:
225 try:
226 if silent:
226 if silent:
227 # run_code uses 'exec' mode, so no displayhook will fire, and it
227 # run_code uses 'exec' mode, so no displayhook will fire, and it
228 # doesn't call logging or history manipulations. Print
228 # doesn't call logging or history manipulations. Print
229 # statements in that code will obviously still execute.
229 # statements in that code will obviously still execute.
230 shell.run_code(code)
230 shell.run_code(code)
231 else:
231 else:
232 # FIXME: the shell calls the exception handler itself.
232 # FIXME: the shell calls the exception handler itself.
233 shell.run_cell(code)
233 shell.run_cell(code)
234 except:
234 except:
235 status = u'error'
235 status = u'error'
236 # FIXME: this code right now isn't being used yet by default,
236 # FIXME: this code right now isn't being used yet by default,
237 # because the run_cell() call above directly fires off exception
237 # because the run_cell() call above directly fires off exception
238 # reporting. This code, therefore, is only active in the scenario
238 # reporting. This code, therefore, is only active in the scenario
239 # where runlines itself has an unhandled exception. We need to
239 # where runlines itself has an unhandled exception. We need to
240 # uniformize this, for all exception construction to come from a
240 # uniformize this, for all exception construction to come from a
241 # single location in the codbase.
241 # single location in the codbase.
242 etype, evalue, tb = sys.exc_info()
242 etype, evalue, tb = sys.exc_info()
243 tb_list = traceback.format_exception(etype, evalue, tb)
243 tb_list = traceback.format_exception(etype, evalue, tb)
244 reply_content.update(shell._showtraceback(etype, evalue, tb_list))
244 reply_content.update(shell._showtraceback(etype, evalue, tb_list))
245 else:
245 else:
246 status = u'ok'
246 status = u'ok'
247
247
248 reply_content[u'status'] = status
248 reply_content[u'status'] = status
249
249
250 # Return the execution counter so clients can display prompts
250 # Return the execution counter so clients can display prompts
251 reply_content['execution_count'] = shell.execution_count -1
251 reply_content['execution_count'] = shell.execution_count -1
252
252
253 # FIXME - fish exception info out of shell, possibly left there by
253 # FIXME - fish exception info out of shell, possibly left there by
254 # runlines. We'll need to clean up this logic later.
254 # runlines. We'll need to clean up this logic later.
255 if shell._reply_content is not None:
255 if shell._reply_content is not None:
256 reply_content.update(shell._reply_content)
256 reply_content.update(shell._reply_content)
257 # reset after use
257 # reset after use
258 shell._reply_content = None
258 shell._reply_content = None
259
259
260 # At this point, we can tell whether the main code execution succeeded
260 # At this point, we can tell whether the main code execution succeeded
261 # or not. If it did, we proceed to evaluate user_variables/expressions
261 # or not. If it did, we proceed to evaluate user_variables/expressions
262 if reply_content['status'] == 'ok':
262 if reply_content['status'] == 'ok':
263 reply_content[u'user_variables'] = \
263 reply_content[u'user_variables'] = \
264 shell.user_variables(content[u'user_variables'])
264 shell.user_variables(content[u'user_variables'])
265 reply_content[u'user_expressions'] = \
265 reply_content[u'user_expressions'] = \
266 shell.user_expressions(content[u'user_expressions'])
266 shell.user_expressions(content[u'user_expressions'])
267 else:
267 else:
268 # If there was an error, don't even try to compute variables or
268 # If there was an error, don't even try to compute variables or
269 # expressions
269 # expressions
270 reply_content[u'user_variables'] = {}
270 reply_content[u'user_variables'] = {}
271 reply_content[u'user_expressions'] = {}
271 reply_content[u'user_expressions'] = {}
272
272
273 # Payloads should be retrieved regardless of outcome, so we can both
273 # Payloads should be retrieved regardless of outcome, so we can both
274 # recover partial output (that could have been generated early in a
274 # recover partial output (that could have been generated early in a
275 # block, before an error) and clear the payload system always.
275 # block, before an error) and clear the payload system always.
276 reply_content[u'payload'] = shell.payload_manager.read_payload()
276 reply_content[u'payload'] = shell.payload_manager.read_payload()
277 # Be agressive about clearing the payload because we don't want
277 # Be agressive about clearing the payload because we don't want
278 # it to sit in memory until the next execute_request comes in.
278 # it to sit in memory until the next execute_request comes in.
279 shell.payload_manager.clear_payload()
279 shell.payload_manager.clear_payload()
280
280
281 # Flush output before sending the reply.
281 # Flush output before sending the reply.
282 sys.stdout.flush()
282 sys.stdout.flush()
283 sys.stderr.flush()
283 sys.stderr.flush()
284 # FIXME: on rare occasions, the flush doesn't seem to make it to the
284 # FIXME: on rare occasions, the flush doesn't seem to make it to the
285 # clients... This seems to mitigate the problem, but we definitely need
285 # clients... This seems to mitigate the problem, but we definitely need
286 # to better understand what's going on.
286 # to better understand what's going on.
287 if self._execute_sleep:
287 if self._execute_sleep:
288 time.sleep(self._execute_sleep)
288 time.sleep(self._execute_sleep)
289
289
290 # Send the reply.
290 # Send the reply.
291 reply_msg = self.session.send(self.shell_socket, u'execute_reply',
291 reply_msg = self.session.send(self.shell_socket, u'execute_reply',
292 reply_content, parent, ident=ident)
292 reply_content, parent, ident=ident)
293 self.log.debug(str(reply_msg))
293 self.log.debug(str(reply_msg))
294
294
295 if reply_msg['content']['status'] == u'error':
295 if reply_msg['content']['status'] == u'error':
296 self._abort_queue()
296 self._abort_queue()
297
297
298 status_msg = self.session.send(self.iopub_socket,
298 status_msg = self.session.send(self.iopub_socket,
299 u'status',
299 u'status',
300 {u'execution_state':u'idle'},
300 {u'execution_state':u'idle'},
301 parent=parent
301 parent=parent
302 )
302 )
303
303
304 def complete_request(self, ident, parent):
304 def complete_request(self, ident, parent):
305 txt, matches = self._complete(parent)
305 txt, matches = self._complete(parent)
306 matches = {'matches' : matches,
306 matches = {'matches' : matches,
307 'matched_text' : txt,
307 'matched_text' : txt,
308 'status' : 'ok'}
308 'status' : 'ok'}
309 completion_msg = self.session.send(self.shell_socket, 'complete_reply',
309 completion_msg = self.session.send(self.shell_socket, 'complete_reply',
310 matches, parent, ident)
310 matches, parent, ident)
311 self.log.debug(str(completion_msg))
311 self.log.debug(str(completion_msg))
312
312
313 def object_info_request(self, ident, parent):
313 def object_info_request(self, ident, parent):
314 object_info = self.shell.object_inspect(parent['content']['oname'])
314 object_info = self.shell.object_inspect(parent['content']['oname'])
315 # Before we send this object over, we scrub it for JSON usage
315 # Before we send this object over, we scrub it for JSON usage
316 oinfo = json_clean(object_info)
316 oinfo = json_clean(object_info)
317 msg = self.session.send(self.shell_socket, 'object_info_reply',
317 msg = self.session.send(self.shell_socket, 'object_info_reply',
318 oinfo, parent, ident)
318 oinfo, parent, ident)
319 self.log.debug(msg)
319 self.log.debug(msg)
320
320
321 def history_request(self, ident, parent):
321 def history_request(self, ident, parent):
322 # We need to pull these out, as passing **kwargs doesn't work with
322 # We need to pull these out, as passing **kwargs doesn't work with
323 # unicode keys before Python 2.6.5.
323 # unicode keys before Python 2.6.5.
324 hist_access_type = parent['content']['hist_access_type']
324 hist_access_type = parent['content']['hist_access_type']
325 raw = parent['content']['raw']
325 raw = parent['content']['raw']
326 output = parent['content']['output']
326 output = parent['content']['output']
327 if hist_access_type == 'tail':
327 if hist_access_type == 'tail':
328 n = parent['content']['n']
328 n = parent['content']['n']
329 hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
329 hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
330 include_latest=True)
330 include_latest=True)
331
331
332 elif hist_access_type == 'range':
332 elif hist_access_type == 'range':
333 session = parent['content']['session']
333 session = parent['content']['session']
334 start = parent['content']['start']
334 start = parent['content']['start']
335 stop = parent['content']['stop']
335 stop = parent['content']['stop']
336 hist = self.shell.history_manager.get_range(session, start, stop,
336 hist = self.shell.history_manager.get_range(session, start, stop,
337 raw=raw, output=output)
337 raw=raw, output=output)
338
338
339 elif hist_access_type == 'search':
339 elif hist_access_type == 'search':
340 pattern = parent['content']['pattern']
340 pattern = parent['content']['pattern']
341 hist = self.shell.history_manager.search(pattern, raw=raw, output=output)
341 hist = self.shell.history_manager.search(pattern, raw=raw, output=output)
342
342
343 else:
343 else:
344 hist = []
344 hist = []
345 content = {'history' : list(hist)}
345 content = {'history' : list(hist)}
346 msg = self.session.send(self.shell_socket, 'history_reply',
346 msg = self.session.send(self.shell_socket, 'history_reply',
347 content, parent, ident)
347 content, parent, ident)
348 self.log.debug(str(msg))
348 self.log.debug(str(msg))
349
349
350 def connect_request(self, ident, parent):
350 def connect_request(self, ident, parent):
351 if self._recorded_ports is not None:
351 if self._recorded_ports is not None:
352 content = self._recorded_ports.copy()
352 content = self._recorded_ports.copy()
353 else:
353 else:
354 content = {}
354 content = {}
355 msg = self.session.send(self.shell_socket, 'connect_reply',
355 msg = self.session.send(self.shell_socket, 'connect_reply',
356 content, parent, ident)
356 content, parent, ident)
357 self.log.debug(msg)
357 self.log.debug(msg)
358
358
359 def shutdown_request(self, ident, parent):
359 def shutdown_request(self, ident, parent):
360 self.shell.exit_now = True
360 self.shell.exit_now = True
361 self._shutdown_message = self.session.msg(u'shutdown_reply', parent['content'], parent)
361 self._shutdown_message = self.session.msg(u'shutdown_reply', parent['content'], parent)
362 sys.exit(0)
362 sys.exit(0)
363
363
364 #---------------------------------------------------------------------------
364 #---------------------------------------------------------------------------
365 # Protected interface
365 # Protected interface
366 #---------------------------------------------------------------------------
366 #---------------------------------------------------------------------------
367
367
368 def _abort_queue(self):
368 def _abort_queue(self):
369 while True:
369 while True:
370 ident,msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
370 ident,msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
371 if msg is None:
371 if msg is None:
372 break
372 break
373 else:
373 else:
374 assert ident is not None, \
374 assert ident is not None, \
375 "Unexpected missing message part."
375 "Unexpected missing message part."
376
376
377 self.log.debug("Aborting:\n"+str(Message(msg)))
377 self.log.debug("Aborting:\n"+str(Message(msg)))
378 msg_type = msg['msg_type']
378 msg_type = msg['header']['msg_type']
379 reply_type = msg_type.split('_')[0] + '_reply'
379 reply_type = msg_type.split('_')[0] + '_reply'
380 reply_msg = self.session.send(self.shell_socket, reply_type,
380 reply_msg = self.session.send(self.shell_socket, reply_type,
381 {'status' : 'aborted'}, msg, ident=ident)
381 {'status' : 'aborted'}, msg, ident=ident)
382 self.log.debug(reply_msg)
382 self.log.debug(reply_msg)
383 # We need to wait a bit for requests to come in. This can probably
383 # We need to wait a bit for requests to come in. This can probably
384 # be set shorter for true asynchronous clients.
384 # be set shorter for true asynchronous clients.
385 time.sleep(0.1)
385 time.sleep(0.1)
386
386
387 def _raw_input(self, prompt, ident, parent):
387 def _raw_input(self, prompt, ident, parent):
388 # Flush output before making the request.
388 # Flush output before making the request.
389 sys.stderr.flush()
389 sys.stderr.flush()
390 sys.stdout.flush()
390 sys.stdout.flush()
391
391
392 # Send the input request.
392 # Send the input request.
393 content = dict(prompt=prompt)
393 content = dict(prompt=prompt)
394 msg = self.session.send(self.stdin_socket, u'input_request', content, parent)
394 msg = self.session.send(self.stdin_socket, u'input_request', content, parent)
395
395
396 # Await a response.
396 # Await a response.
397 ident, reply = self.session.recv(self.stdin_socket, 0)
397 ident, reply = self.session.recv(self.stdin_socket, 0)
398 try:
398 try:
399 value = reply['content']['value']
399 value = reply['content']['value']
400 except:
400 except:
401 self.log.error("Got bad raw_input reply: ")
401 self.log.error("Got bad raw_input reply: ")
402 self.log.error(str(Message(parent)))
402 self.log.error(str(Message(parent)))
403 value = ''
403 value = ''
404 return value
404 return value
405
405
406 def _complete(self, msg):
406 def _complete(self, msg):
407 c = msg['content']
407 c = msg['content']
408 try:
408 try:
409 cpos = int(c['cursor_pos'])
409 cpos = int(c['cursor_pos'])
410 except:
410 except:
411 # If we don't get something that we can convert to an integer, at
411 # If we don't get something that we can convert to an integer, at
412 # least attempt the completion guessing the cursor is at the end of
412 # least attempt the completion guessing the cursor is at the end of
413 # the text, if there's any, and otherwise of the line
413 # the text, if there's any, and otherwise of the line
414 cpos = len(c['text'])
414 cpos = len(c['text'])
415 if cpos==0:
415 if cpos==0:
416 cpos = len(c['line'])
416 cpos = len(c['line'])
417 return self.shell.complete(c['text'], c['line'], cpos)
417 return self.shell.complete(c['text'], c['line'], cpos)
418
418
419 def _object_info(self, context):
419 def _object_info(self, context):
420 symbol, leftover = self._symbol_from_context(context)
420 symbol, leftover = self._symbol_from_context(context)
421 if symbol is not None and not leftover:
421 if symbol is not None and not leftover:
422 doc = getattr(symbol, '__doc__', '')
422 doc = getattr(symbol, '__doc__', '')
423 else:
423 else:
424 doc = ''
424 doc = ''
425 object_info = dict(docstring = doc)
425 object_info = dict(docstring = doc)
426 return object_info
426 return object_info
427
427
428 def _symbol_from_context(self, context):
428 def _symbol_from_context(self, context):
429 if not context:
429 if not context:
430 return None, context
430 return None, context
431
431
432 base_symbol_string = context[0]
432 base_symbol_string = context[0]
433 symbol = self.shell.user_ns.get(base_symbol_string, None)
433 symbol = self.shell.user_ns.get(base_symbol_string, None)
434 if symbol is None:
434 if symbol is None:
435 symbol = __builtin__.__dict__.get(base_symbol_string, None)
435 symbol = __builtin__.__dict__.get(base_symbol_string, None)
436 if symbol is None:
436 if symbol is None:
437 return None, context
437 return None, context
438
438
439 context = context[1:]
439 context = context[1:]
440 for i, name in enumerate(context):
440 for i, name in enumerate(context):
441 new_symbol = getattr(symbol, name, None)
441 new_symbol = getattr(symbol, name, None)
442 if new_symbol is None:
442 if new_symbol is None:
443 return symbol, context[i:]
443 return symbol, context[i:]
444 else:
444 else:
445 symbol = new_symbol
445 symbol = new_symbol
446
446
447 return symbol, []
447 return symbol, []
448
448
449 def _at_shutdown(self):
449 def _at_shutdown(self):
450 """Actions taken at shutdown by the kernel, called by python's atexit.
450 """Actions taken at shutdown by the kernel, called by python's atexit.
451 """
451 """
452 # io.rprint("Kernel at_shutdown") # dbg
452 # io.rprint("Kernel at_shutdown") # dbg
453 if self._shutdown_message is not None:
453 if self._shutdown_message is not None:
454 self.session.send(self.shell_socket, self._shutdown_message)
454 self.session.send(self.shell_socket, self._shutdown_message)
455 self.session.send(self.iopub_socket, self._shutdown_message)
455 self.session.send(self.iopub_socket, self._shutdown_message)
456 self.log.debug(str(self._shutdown_message))
456 self.log.debug(str(self._shutdown_message))
457 # A very short sleep to give zmq time to flush its message buffers
457 # A very short sleep to give zmq time to flush its message buffers
458 # before Python truly shuts down.
458 # before Python truly shuts down.
459 time.sleep(0.01)
459 time.sleep(0.01)
460
460
461
461
462 class QtKernel(Kernel):
462 class QtKernel(Kernel):
463 """A Kernel subclass with Qt support."""
463 """A Kernel subclass with Qt support."""
464
464
465 def start(self):
465 def start(self):
466 """Start a kernel with QtPy4 event loop integration."""
466 """Start a kernel with QtPy4 event loop integration."""
467
467
468 from IPython.external.qt_for_kernel import QtCore
468 from IPython.external.qt_for_kernel import QtCore
469 from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
469 from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4
470
470
471 self.app = get_app_qt4([" "])
471 self.app = get_app_qt4([" "])
472 self.app.setQuitOnLastWindowClosed(False)
472 self.app.setQuitOnLastWindowClosed(False)
473 self.timer = QtCore.QTimer()
473 self.timer = QtCore.QTimer()
474 self.timer.timeout.connect(self.do_one_iteration)
474 self.timer.timeout.connect(self.do_one_iteration)
475 # Units for the timer are in milliseconds
475 # Units for the timer are in milliseconds
476 self.timer.start(1000*self._poll_interval)
476 self.timer.start(1000*self._poll_interval)
477 start_event_loop_qt4(self.app)
477 start_event_loop_qt4(self.app)
478
478
479
479
480 class WxKernel(Kernel):
480 class WxKernel(Kernel):
481 """A Kernel subclass with Wx support."""
481 """A Kernel subclass with Wx support."""
482
482
483 def start(self):
483 def start(self):
484 """Start a kernel with wx event loop support."""
484 """Start a kernel with wx event loop support."""
485
485
486 import wx
486 import wx
487 from IPython.lib.guisupport import start_event_loop_wx
487 from IPython.lib.guisupport import start_event_loop_wx
488
488
489 doi = self.do_one_iteration
489 doi = self.do_one_iteration
490 # Wx uses milliseconds
490 # Wx uses milliseconds
491 poll_interval = int(1000*self._poll_interval)
491 poll_interval = int(1000*self._poll_interval)
492
492
493 # We have to put the wx.Timer in a wx.Frame for it to fire properly.
493 # We have to put the wx.Timer in a wx.Frame for it to fire properly.
494 # We make the Frame hidden when we create it in the main app below.
494 # We make the Frame hidden when we create it in the main app below.
495 class TimerFrame(wx.Frame):
495 class TimerFrame(wx.Frame):
496 def __init__(self, func):
496 def __init__(self, func):
497 wx.Frame.__init__(self, None, -1)
497 wx.Frame.__init__(self, None, -1)
498 self.timer = wx.Timer(self)
498 self.timer = wx.Timer(self)
499 # Units for the timer are in milliseconds
499 # Units for the timer are in milliseconds
500 self.timer.Start(poll_interval)
500 self.timer.Start(poll_interval)
501 self.Bind(wx.EVT_TIMER, self.on_timer)
501 self.Bind(wx.EVT_TIMER, self.on_timer)
502 self.func = func
502 self.func = func
503
503
504 def on_timer(self, event):
504 def on_timer(self, event):
505 self.func()
505 self.func()
506
506
507 # We need a custom wx.App to create our Frame subclass that has the
507 # We need a custom wx.App to create our Frame subclass that has the
508 # wx.Timer to drive the ZMQ event loop.
508 # wx.Timer to drive the ZMQ event loop.
509 class IPWxApp(wx.App):
509 class IPWxApp(wx.App):
510 def OnInit(self):
510 def OnInit(self):
511 self.frame = TimerFrame(doi)
511 self.frame = TimerFrame(doi)
512 self.frame.Show(False)
512 self.frame.Show(False)
513 return True
513 return True
514
514
515 # The redirect=False here makes sure that wx doesn't replace
515 # The redirect=False here makes sure that wx doesn't replace
516 # sys.stdout/stderr with its own classes.
516 # sys.stdout/stderr with its own classes.
517 self.app = IPWxApp(redirect=False)
517 self.app = IPWxApp(redirect=False)
518 start_event_loop_wx(self.app)
518 start_event_loop_wx(self.app)
519
519
520
520
521 class TkKernel(Kernel):
521 class TkKernel(Kernel):
522 """A Kernel subclass with Tk support."""
522 """A Kernel subclass with Tk support."""
523
523
524 def start(self):
524 def start(self):
525 """Start a Tk enabled event loop."""
525 """Start a Tk enabled event loop."""
526
526
527 import Tkinter
527 import Tkinter
528 doi = self.do_one_iteration
528 doi = self.do_one_iteration
529 # Tk uses milliseconds
529 # Tk uses milliseconds
530 poll_interval = int(1000*self._poll_interval)
530 poll_interval = int(1000*self._poll_interval)
531 # For Tkinter, we create a Tk object and call its withdraw method.
531 # For Tkinter, we create a Tk object and call its withdraw method.
532 class Timer(object):
532 class Timer(object):
533 def __init__(self, func):
533 def __init__(self, func):
534 self.app = Tkinter.Tk()
534 self.app = Tkinter.Tk()
535 self.app.withdraw()
535 self.app.withdraw()
536 self.func = func
536 self.func = func
537
537
538 def on_timer(self):
538 def on_timer(self):
539 self.func()
539 self.func()
540 self.app.after(poll_interval, self.on_timer)
540 self.app.after(poll_interval, self.on_timer)
541
541
542 def start(self):
542 def start(self):
543 self.on_timer() # Call it once to get things going.
543 self.on_timer() # Call it once to get things going.
544 self.app.mainloop()
544 self.app.mainloop()
545
545
546 self.timer = Timer(doi)
546 self.timer = Timer(doi)
547 self.timer.start()
547 self.timer.start()
548
548
549
549
550 class GTKKernel(Kernel):
550 class GTKKernel(Kernel):
551 """A Kernel subclass with GTK support."""
551 """A Kernel subclass with GTK support."""
552
552
553 def start(self):
553 def start(self):
554 """Start the kernel, coordinating with the GTK event loop"""
554 """Start the kernel, coordinating with the GTK event loop"""
555 from .gui.gtkembed import GTKEmbed
555 from .gui.gtkembed import GTKEmbed
556
556
557 gtk_kernel = GTKEmbed(self)
557 gtk_kernel = GTKEmbed(self)
558 gtk_kernel.start()
558 gtk_kernel.start()
559
559
560
560
561 #-----------------------------------------------------------------------------
561 #-----------------------------------------------------------------------------
562 # Aliases and Flags for the IPKernelApp
562 # Aliases and Flags for the IPKernelApp
563 #-----------------------------------------------------------------------------
563 #-----------------------------------------------------------------------------
564
564
565 flags = dict(kernel_flags)
565 flags = dict(kernel_flags)
566 flags.update(shell_flags)
566 flags.update(shell_flags)
567
567
568 addflag = lambda *args: flags.update(boolean_flag(*args))
568 addflag = lambda *args: flags.update(boolean_flag(*args))
569
569
570 flags['pylab'] = (
570 flags['pylab'] = (
571 {'IPKernelApp' : {'pylab' : 'auto'}},
571 {'IPKernelApp' : {'pylab' : 'auto'}},
572 """Pre-load matplotlib and numpy for interactive use with
572 """Pre-load matplotlib and numpy for interactive use with
573 the default matplotlib backend."""
573 the default matplotlib backend."""
574 )
574 )
575
575
576 aliases = dict(kernel_aliases)
576 aliases = dict(kernel_aliases)
577 aliases.update(shell_aliases)
577 aliases.update(shell_aliases)
578
578
579 # it's possible we don't want short aliases for *all* of these:
579 # it's possible we don't want short aliases for *all* of these:
580 aliases.update(dict(
580 aliases.update(dict(
581 pylab='IPKernelApp.pylab',
581 pylab='IPKernelApp.pylab',
582 ))
582 ))
583
583
584 #-----------------------------------------------------------------------------
584 #-----------------------------------------------------------------------------
585 # The IPKernelApp class
585 # The IPKernelApp class
586 #-----------------------------------------------------------------------------
586 #-----------------------------------------------------------------------------
587
587
588 class IPKernelApp(KernelApp, InteractiveShellApp):
588 class IPKernelApp(KernelApp, InteractiveShellApp):
589 name = 'ipkernel'
589 name = 'ipkernel'
590
590
591 aliases = Dict(aliases)
591 aliases = Dict(aliases)
592 flags = Dict(flags)
592 flags = Dict(flags)
593 classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
593 classes = [Kernel, ZMQInteractiveShell, ProfileDir, Session]
594 # configurables
594 # configurables
595 pylab = CaselessStrEnum(['tk', 'qt', 'wx', 'gtk', 'osx', 'inline', 'auto'],
595 pylab = CaselessStrEnum(['tk', 'qt', 'wx', 'gtk', 'osx', 'inline', 'auto'],
596 config=True,
596 config=True,
597 help="""Pre-load matplotlib and numpy for interactive use,
597 help="""Pre-load matplotlib and numpy for interactive use,
598 selecting a particular matplotlib backend and loop integration.
598 selecting a particular matplotlib backend and loop integration.
599 """
599 """
600 )
600 )
601 pylab_import_all = Bool(True, config=True,
601 pylab_import_all = Bool(True, config=True,
602 help="""If true, an 'import *' is done from numpy and pylab,
602 help="""If true, an 'import *' is done from numpy and pylab,
603 when using pylab"""
603 when using pylab"""
604 )
604 )
605 def initialize(self, argv=None):
605 def initialize(self, argv=None):
606 super(IPKernelApp, self).initialize(argv)
606 super(IPKernelApp, self).initialize(argv)
607 self.init_shell()
607 self.init_shell()
608 self.init_extensions()
608 self.init_extensions()
609 self.init_code()
609 self.init_code()
610
610
611 def init_kernel(self):
611 def init_kernel(self):
612 kernel_factory = Kernel
612 kernel_factory = Kernel
613
613
614 kernel_map = {
614 kernel_map = {
615 'qt' : QtKernel,
615 'qt' : QtKernel,
616 'qt4': QtKernel,
616 'qt4': QtKernel,
617 'inline': Kernel,
617 'inline': Kernel,
618 'osx': TkKernel,
618 'osx': TkKernel,
619 'wx' : WxKernel,
619 'wx' : WxKernel,
620 'tk' : TkKernel,
620 'tk' : TkKernel,
621 'gtk': GTKKernel,
621 'gtk': GTKKernel,
622 }
622 }
623
623
624 if self.pylab:
624 if self.pylab:
625 key = None if self.pylab == 'auto' else self.pylab
625 key = None if self.pylab == 'auto' else self.pylab
626 gui, backend = pylabtools.find_gui_and_backend(key)
626 gui, backend = pylabtools.find_gui_and_backend(key)
627 kernel_factory = kernel_map.get(gui)
627 kernel_factory = kernel_map.get(gui)
628 if kernel_factory is None:
628 if kernel_factory is None:
629 raise ValueError('GUI is not supported: %r' % gui)
629 raise ValueError('GUI is not supported: %r' % gui)
630 pylabtools.activate_matplotlib(backend)
630 pylabtools.activate_matplotlib(backend)
631
631
632 kernel = kernel_factory(config=self.config, session=self.session,
632 kernel = kernel_factory(config=self.config, session=self.session,
633 shell_socket=self.shell_socket,
633 shell_socket=self.shell_socket,
634 iopub_socket=self.iopub_socket,
634 iopub_socket=self.iopub_socket,
635 stdin_socket=self.stdin_socket,
635 stdin_socket=self.stdin_socket,
636 log=self.log
636 log=self.log
637 )
637 )
638 self.kernel = kernel
638 self.kernel = kernel
639 kernel.record_ports(self.ports)
639 kernel.record_ports(self.ports)
640
640
641 if self.pylab:
641 if self.pylab:
642 import_all = self.pylab_import_all
642 import_all = self.pylab_import_all
643 pylabtools.import_pylab(kernel.shell.user_ns, backend, import_all,
643 pylabtools.import_pylab(kernel.shell.user_ns, backend, import_all,
644 shell=kernel.shell)
644 shell=kernel.shell)
645
645
646 def init_shell(self):
646 def init_shell(self):
647 self.shell = self.kernel.shell
647 self.shell = self.kernel.shell
648
648
649
649
650 #-----------------------------------------------------------------------------
650 #-----------------------------------------------------------------------------
651 # Kernel main and launch functions
651 # Kernel main and launch functions
652 #-----------------------------------------------------------------------------
652 #-----------------------------------------------------------------------------
653
653
654 def launch_kernel(*args, **kwargs):
654 def launch_kernel(*args, **kwargs):
655 """Launches a localhost IPython kernel, binding to the specified ports.
655 """Launches a localhost IPython kernel, binding to the specified ports.
656
656
657 This function simply calls entry_point.base_launch_kernel with the right first
657 This function simply calls entry_point.base_launch_kernel with the right first
658 command to start an ipkernel. See base_launch_kernel for arguments.
658 command to start an ipkernel. See base_launch_kernel for arguments.
659
659
660 Returns
660 Returns
661 -------
661 -------
662 A tuple of form:
662 A tuple of form:
663 (kernel_process, shell_port, iopub_port, stdin_port, hb_port)
663 (kernel_process, shell_port, iopub_port, stdin_port, hb_port)
664 where kernel_process is a Popen object and the ports are integers.
664 where kernel_process is a Popen object and the ports are integers.
665 """
665 """
666 return base_launch_kernel('from IPython.zmq.ipkernel import main; main()',
666 return base_launch_kernel('from IPython.zmq.ipkernel import main; main()',
667 *args, **kwargs)
667 *args, **kwargs)
668
668
669
669
670 def main():
670 def main():
671 """Run an IPKernel as an application"""
671 """Run an IPKernel as an application"""
672 app = IPKernelApp.instance()
672 app = IPKernelApp.instance()
673 app.initialize()
673 app.initialize()
674 app.start()
674 app.start()
675
675
676
676
677 if __name__ == '__main__':
677 if __name__ == '__main__':
678 main()
678 main()
@@ -1,278 +1,278 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """A simple interactive kernel that talks to a frontend over 0MQ.
2 """A simple interactive kernel that talks to a frontend over 0MQ.
3
3
4 Things to do:
4 Things to do:
5
5
6 * Implement `set_parent` logic. Right before doing exec, the Kernel should
6 * Implement `set_parent` logic. Right before doing exec, the Kernel should
7 call set_parent on all the PUB objects with the message about to be executed.
7 call set_parent on all the PUB objects with the message about to be executed.
8 * Implement random port and security key logic.
8 * Implement random port and security key logic.
9 * Implement control messages.
9 * Implement control messages.
10 * Implement event loop and poll version.
10 * Implement event loop and poll version.
11 """
11 """
12
12
13 #-----------------------------------------------------------------------------
13 #-----------------------------------------------------------------------------
14 # Imports
14 # Imports
15 #-----------------------------------------------------------------------------
15 #-----------------------------------------------------------------------------
16
16
17 # Standard library imports.
17 # Standard library imports.
18 import __builtin__
18 import __builtin__
19 from code import CommandCompiler
19 from code import CommandCompiler
20 import sys
20 import sys
21 import time
21 import time
22 import traceback
22 import traceback
23
23
24 # System library imports.
24 # System library imports.
25 import zmq
25 import zmq
26
26
27 # Local imports.
27 # Local imports.
28 from IPython.utils.traitlets import HasTraits, Instance, Dict, Float
28 from IPython.utils.traitlets import HasTraits, Instance, Dict, Float
29 from completer import KernelCompleter
29 from completer import KernelCompleter
30 from entry_point import base_launch_kernel
30 from entry_point import base_launch_kernel
31 from session import Session, Message
31 from session import Session, Message
32 from kernelapp import KernelApp
32 from kernelapp import KernelApp
33
33
34 #-----------------------------------------------------------------------------
34 #-----------------------------------------------------------------------------
35 # Main kernel class
35 # Main kernel class
36 #-----------------------------------------------------------------------------
36 #-----------------------------------------------------------------------------
37
37
38 class Kernel(HasTraits):
38 class Kernel(HasTraits):
39
39
40 # Private interface
40 # Private interface
41
41
42 # Time to sleep after flushing the stdout/err buffers in each execute
42 # Time to sleep after flushing the stdout/err buffers in each execute
43 # cycle. While this introduces a hard limit on the minimal latency of the
43 # cycle. While this introduces a hard limit on the minimal latency of the
44 # execute cycle, it helps prevent output synchronization problems for
44 # execute cycle, it helps prevent output synchronization problems for
45 # clients.
45 # clients.
46 # Units are in seconds. The minimum zmq latency on local host is probably
46 # Units are in seconds. The minimum zmq latency on local host is probably
47 # ~150 microseconds, set this to 500us for now. We may need to increase it
47 # ~150 microseconds, set this to 500us for now. We may need to increase it
48 # a little if it's not enough after more interactive testing.
48 # a little if it's not enough after more interactive testing.
49 _execute_sleep = Float(0.0005, config=True)
49 _execute_sleep = Float(0.0005, config=True)
50
50
51 # This is a dict of port number that the kernel is listening on. It is set
51 # This is a dict of port number that the kernel is listening on. It is set
52 # by record_ports and used by connect_request.
52 # by record_ports and used by connect_request.
53 _recorded_ports = Dict()
53 _recorded_ports = Dict()
54
54
55 #---------------------------------------------------------------------------
55 #---------------------------------------------------------------------------
56 # Kernel interface
56 # Kernel interface
57 #---------------------------------------------------------------------------
57 #---------------------------------------------------------------------------
58
58
59 session = Instance(Session)
59 session = Instance(Session)
60 shell_socket = Instance('zmq.Socket')
60 shell_socket = Instance('zmq.Socket')
61 iopub_socket = Instance('zmq.Socket')
61 iopub_socket = Instance('zmq.Socket')
62 stdin_socket = Instance('zmq.Socket')
62 stdin_socket = Instance('zmq.Socket')
63 log = Instance('logging.Logger')
63 log = Instance('logging.Logger')
64
64
65 def __init__(self, **kwargs):
65 def __init__(self, **kwargs):
66 super(Kernel, self).__init__(**kwargs)
66 super(Kernel, self).__init__(**kwargs)
67 self.user_ns = {}
67 self.user_ns = {}
68 self.history = []
68 self.history = []
69 self.compiler = CommandCompiler()
69 self.compiler = CommandCompiler()
70 self.completer = KernelCompleter(self.user_ns)
70 self.completer = KernelCompleter(self.user_ns)
71
71
72 # Build dict of handlers for message types
72 # Build dict of handlers for message types
73 msg_types = [ 'execute_request', 'complete_request',
73 msg_types = [ 'execute_request', 'complete_request',
74 'object_info_request', 'shutdown_request' ]
74 'object_info_request', 'shutdown_request' ]
75 self.handlers = {}
75 self.handlers = {}
76 for msg_type in msg_types:
76 for msg_type in msg_types:
77 self.handlers[msg_type] = getattr(self, msg_type)
77 self.handlers[msg_type] = getattr(self, msg_type)
78
78
79 def start(self):
79 def start(self):
80 """ Start the kernel main loop.
80 """ Start the kernel main loop.
81 """
81 """
82 while True:
82 while True:
83 ident,msg = self.session.recv(self.shell_socket,0)
83 ident,msg = self.session.recv(self.shell_socket,0)
84 assert ident is not None, "Missing message part."
84 assert ident is not None, "Missing message part."
85 omsg = Message(msg)
85 omsg = Message(msg)
86 self.log.debug(str(omsg))
86 self.log.debug(str(omsg))
87 handler = self.handlers.get(omsg.msg_type, None)
87 handler = self.handlers.get(omsg.msg_type, None)
88 if handler is None:
88 if handler is None:
89 self.log.error("UNKNOWN MESSAGE TYPE: %s"%omsg)
89 self.log.error("UNKNOWN MESSAGE TYPE: %s"%omsg)
90 else:
90 else:
91 handler(ident, omsg)
91 handler(ident, omsg)
92
92
93 def record_ports(self, ports):
93 def record_ports(self, ports):
94 """Record the ports that this kernel is using.
94 """Record the ports that this kernel is using.
95
95
96 The creator of the Kernel instance must call this methods if they
96 The creator of the Kernel instance must call this methods if they
97 want the :meth:`connect_request` method to return the port numbers.
97 want the :meth:`connect_request` method to return the port numbers.
98 """
98 """
99 self._recorded_ports = ports
99 self._recorded_ports = ports
100
100
101 #---------------------------------------------------------------------------
101 #---------------------------------------------------------------------------
102 # Kernel request handlers
102 # Kernel request handlers
103 #---------------------------------------------------------------------------
103 #---------------------------------------------------------------------------
104
104
105 def execute_request(self, ident, parent):
105 def execute_request(self, ident, parent):
106 try:
106 try:
107 code = parent[u'content'][u'code']
107 code = parent[u'content'][u'code']
108 except:
108 except:
109 self.log.error("Got bad msg: %s"%Message(parent))
109 self.log.error("Got bad msg: %s"%Message(parent))
110 return
110 return
111 pyin_msg = self.session.send(self.iopub_socket, u'pyin',{u'code':code}, parent=parent)
111 pyin_msg = self.session.send(self.iopub_socket, u'pyin',{u'code':code}, parent=parent)
112
112
113 try:
113 try:
114 comp_code = self.compiler(code, '<zmq-kernel>')
114 comp_code = self.compiler(code, '<zmq-kernel>')
115
115
116 # Replace raw_input. Note that is not sufficient to replace
116 # Replace raw_input. Note that is not sufficient to replace
117 # raw_input in the user namespace.
117 # raw_input in the user namespace.
118 raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
118 raw_input = lambda prompt='': self._raw_input(prompt, ident, parent)
119 __builtin__.raw_input = raw_input
119 __builtin__.raw_input = raw_input
120
120
121 # Set the parent message of the display hook and out streams.
121 # Set the parent message of the display hook and out streams.
122 sys.displayhook.set_parent(parent)
122 sys.displayhook.set_parent(parent)
123 sys.stdout.set_parent(parent)
123 sys.stdout.set_parent(parent)
124 sys.stderr.set_parent(parent)
124 sys.stderr.set_parent(parent)
125
125
126 exec comp_code in self.user_ns, self.user_ns
126 exec comp_code in self.user_ns, self.user_ns
127 except:
127 except:
128 etype, evalue, tb = sys.exc_info()
128 etype, evalue, tb = sys.exc_info()
129 tb = traceback.format_exception(etype, evalue, tb)
129 tb = traceback.format_exception(etype, evalue, tb)
130 exc_content = {
130 exc_content = {
131 u'status' : u'error',
131 u'status' : u'error',
132 u'traceback' : tb,
132 u'traceback' : tb,
133 u'ename' : unicode(etype.__name__),
133 u'ename' : unicode(etype.__name__),
134 u'evalue' : unicode(evalue)
134 u'evalue' : unicode(evalue)
135 }
135 }
136 exc_msg = self.session.send(self.iopub_socket, u'pyerr', exc_content, parent)
136 exc_msg = self.session.send(self.iopub_socket, u'pyerr', exc_content, parent)
137 reply_content = exc_content
137 reply_content = exc_content
138 else:
138 else:
139 reply_content = { 'status' : 'ok', 'payload' : {} }
139 reply_content = { 'status' : 'ok', 'payload' : {} }
140
140
141 # Flush output before sending the reply.
141 # Flush output before sending the reply.
142 sys.stderr.flush()
142 sys.stderr.flush()
143 sys.stdout.flush()
143 sys.stdout.flush()
144 # FIXME: on rare occasions, the flush doesn't seem to make it to the
144 # FIXME: on rare occasions, the flush doesn't seem to make it to the
145 # clients... This seems to mitigate the problem, but we definitely need
145 # clients... This seems to mitigate the problem, but we definitely need
146 # to better understand what's going on.
146 # to better understand what's going on.
147 if self._execute_sleep:
147 if self._execute_sleep:
148 time.sleep(self._execute_sleep)
148 time.sleep(self._execute_sleep)
149
149
150 # Send the reply.
150 # Send the reply.
151 reply_msg = self.session.send(self.shell_socket, u'execute_reply', reply_content, parent, ident=ident)
151 reply_msg = self.session.send(self.shell_socket, u'execute_reply', reply_content, parent, ident=ident)
152 self.log.debug(Message(reply_msg))
152 self.log.debug(Message(reply_msg))
153 if reply_msg['content']['status'] == u'error':
153 if reply_msg['content']['status'] == u'error':
154 self._abort_queue()
154 self._abort_queue()
155
155
156 def complete_request(self, ident, parent):
156 def complete_request(self, ident, parent):
157 matches = {'matches' : self._complete(parent),
157 matches = {'matches' : self._complete(parent),
158 'status' : 'ok'}
158 'status' : 'ok'}
159 completion_msg = self.session.send(self.shell_socket, 'complete_reply',
159 completion_msg = self.session.send(self.shell_socket, 'complete_reply',
160 matches, parent, ident)
160 matches, parent, ident)
161 self.log.debug(completion_msg)
161 self.log.debug(completion_msg)
162
162
163 def object_info_request(self, ident, parent):
163 def object_info_request(self, ident, parent):
164 context = parent['content']['oname'].split('.')
164 context = parent['content']['oname'].split('.')
165 object_info = self._object_info(context)
165 object_info = self._object_info(context)
166 msg = self.session.send(self.shell_socket, 'object_info_reply',
166 msg = self.session.send(self.shell_socket, 'object_info_reply',
167 object_info, parent, ident)
167 object_info, parent, ident)
168 self.log.debug(msg)
168 self.log.debug(msg)
169
169
170 def shutdown_request(self, ident, parent):
170 def shutdown_request(self, ident, parent):
171 content = dict(parent['content'])
171 content = dict(parent['content'])
172 msg = self.session.send(self.shell_socket, 'shutdown_reply',
172 msg = self.session.send(self.shell_socket, 'shutdown_reply',
173 content, parent, ident)
173 content, parent, ident)
174 msg = self.session.send(self.iopub_socket, 'shutdown_reply',
174 msg = self.session.send(self.iopub_socket, 'shutdown_reply',
175 content, parent, ident)
175 content, parent, ident)
176 self.log.debug(msg)
176 self.log.debug(msg)
177 time.sleep(0.1)
177 time.sleep(0.1)
178 sys.exit(0)
178 sys.exit(0)
179
179
180 #---------------------------------------------------------------------------
180 #---------------------------------------------------------------------------
181 # Protected interface
181 # Protected interface
182 #---------------------------------------------------------------------------
182 #---------------------------------------------------------------------------
183
183
184 def _abort_queue(self):
184 def _abort_queue(self):
185 while True:
185 while True:
186 ident,msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
186 ident,msg = self.session.recv(self.shell_socket, zmq.NOBLOCK)
187 if msg is None:
187 if msg is None:
188 # msg=None on EAGAIN
188 # msg=None on EAGAIN
189 break
189 break
190 else:
190 else:
191 assert ident is not None, "Missing message part."
191 assert ident is not None, "Missing message part."
192 self.log.debug("Aborting: %s"%Message(msg))
192 self.log.debug("Aborting: %s"%Message(msg))
193 msg_type = msg['msg_type']
193 msg_type = msg['header']['msg_type']
194 reply_type = msg_type.split('_')[0] + '_reply'
194 reply_type = msg_type.split('_')[0] + '_reply'
195 reply_msg = self.session.send(self.shell_socket, reply_type, {'status':'aborted'}, msg, ident=ident)
195 reply_msg = self.session.send(self.shell_socket, reply_type, {'status':'aborted'}, msg, ident=ident)
196 self.log.debug(Message(reply_msg))
196 self.log.debug(Message(reply_msg))
197 # We need to wait a bit for requests to come in. This can probably
197 # We need to wait a bit for requests to come in. This can probably
198 # be set shorter for true asynchronous clients.
198 # be set shorter for true asynchronous clients.
199 time.sleep(0.1)
199 time.sleep(0.1)
200
200
201 def _raw_input(self, prompt, ident, parent):
201 def _raw_input(self, prompt, ident, parent):
202 # Flush output before making the request.
202 # Flush output before making the request.
203 sys.stderr.flush()
203 sys.stderr.flush()
204 sys.stdout.flush()
204 sys.stdout.flush()
205
205
206 # Send the input request.
206 # Send the input request.
207 content = dict(prompt=prompt)
207 content = dict(prompt=prompt)
208 msg = self.session.send(self.stdin_socket, u'input_request', content, parent)
208 msg = self.session.send(self.stdin_socket, u'input_request', content, parent)
209
209
210 # Await a response.
210 # Await a response.
211 ident,reply = self.session.recv(self.stdin_socket, 0)
211 ident,reply = self.session.recv(self.stdin_socket, 0)
212 try:
212 try:
213 value = reply['content']['value']
213 value = reply['content']['value']
214 except:
214 except:
215 self.log.error("Got bad raw_input reply: %s"%Message(parent))
215 self.log.error("Got bad raw_input reply: %s"%Message(parent))
216 value = ''
216 value = ''
217 return value
217 return value
218
218
219 def _complete(self, msg):
219 def _complete(self, msg):
220 return self.completer.complete(msg.content.line, msg.content.text)
220 return self.completer.complete(msg.content.line, msg.content.text)
221
221
222 def _object_info(self, context):
222 def _object_info(self, context):
223 symbol, leftover = self._symbol_from_context(context)
223 symbol, leftover = self._symbol_from_context(context)
224 if symbol is not None and not leftover:
224 if symbol is not None and not leftover:
225 doc = getattr(symbol, '__doc__', '')
225 doc = getattr(symbol, '__doc__', '')
226 else:
226 else:
227 doc = ''
227 doc = ''
228 object_info = dict(docstring = doc)
228 object_info = dict(docstring = doc)
229 return object_info
229 return object_info
230
230
231 def _symbol_from_context(self, context):
231 def _symbol_from_context(self, context):
232 if not context:
232 if not context:
233 return None, context
233 return None, context
234
234
235 base_symbol_string = context[0]
235 base_symbol_string = context[0]
236 symbol = self.user_ns.get(base_symbol_string, None)
236 symbol = self.user_ns.get(base_symbol_string, None)
237 if symbol is None:
237 if symbol is None:
238 symbol = __builtin__.__dict__.get(base_symbol_string, None)
238 symbol = __builtin__.__dict__.get(base_symbol_string, None)
239 if symbol is None:
239 if symbol is None:
240 return None, context
240 return None, context
241
241
242 context = context[1:]
242 context = context[1:]
243 for i, name in enumerate(context):
243 for i, name in enumerate(context):
244 new_symbol = getattr(symbol, name, None)
244 new_symbol = getattr(symbol, name, None)
245 if new_symbol is None:
245 if new_symbol is None:
246 return symbol, context[i:]
246 return symbol, context[i:]
247 else:
247 else:
248 symbol = new_symbol
248 symbol = new_symbol
249
249
250 return symbol, []
250 return symbol, []
251
251
252 #-----------------------------------------------------------------------------
252 #-----------------------------------------------------------------------------
253 # Kernel main and launch functions
253 # Kernel main and launch functions
254 #-----------------------------------------------------------------------------
254 #-----------------------------------------------------------------------------
255
255
256 def launch_kernel(*args, **kwargs):
256 def launch_kernel(*args, **kwargs):
257 """ Launches a simple Python kernel, binding to the specified ports.
257 """ Launches a simple Python kernel, binding to the specified ports.
258
258
259 This function simply calls entry_point.base_launch_kernel with the right first
259 This function simply calls entry_point.base_launch_kernel with the right first
260 command to start a pykernel. See base_launch_kernel for arguments.
260 command to start a pykernel. See base_launch_kernel for arguments.
261
261
262 Returns
262 Returns
263 -------
263 -------
264 A tuple of form:
264 A tuple of form:
265 (kernel_process, xrep_port, pub_port, req_port, hb_port)
265 (kernel_process, xrep_port, pub_port, req_port, hb_port)
266 where kernel_process is a Popen object and the ports are integers.
266 where kernel_process is a Popen object and the ports are integers.
267 """
267 """
268 return base_launch_kernel('from IPython.zmq.pykernel import main; main()',
268 return base_launch_kernel('from IPython.zmq.pykernel import main; main()',
269 *args, **kwargs)
269 *args, **kwargs)
270
270
271 def main():
271 def main():
272 """Run a PyKernel as an application"""
272 """Run a PyKernel as an application"""
273 app = KernelApp.instance()
273 app = KernelApp.instance()
274 app.initialize()
274 app.initialize()
275 app.start()
275 app.start()
276
276
277 if __name__ == '__main__':
277 if __name__ == '__main__':
278 main()
278 main()
@@ -1,679 +1,676 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """Session object for building, serializing, sending, and receiving messages in
2 """Session object for building, serializing, sending, and receiving messages in
3 IPython. The Session object supports serialization, HMAC signatures, and
3 IPython. The Session object supports serialization, HMAC signatures, and
4 metadata on messages.
4 metadata on messages.
5
5
6 Also defined here are utilities for working with Sessions:
6 Also defined here are utilities for working with Sessions:
7 * A SessionFactory to be used as a base class for configurables that work with
7 * A SessionFactory to be used as a base class for configurables that work with
8 Sessions.
8 Sessions.
9 * A Message object for convenience that allows attribute-access to the msg dict.
9 * A Message object for convenience that allows attribute-access to the msg dict.
10
10
11 Authors:
11 Authors:
12
12
13 * Min RK
13 * Min RK
14 * Brian Granger
14 * Brian Granger
15 * Fernando Perez
15 * Fernando Perez
16 """
16 """
17 #-----------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18 # Copyright (C) 2010-2011 The IPython Development Team
18 # Copyright (C) 2010-2011 The IPython Development Team
19 #
19 #
20 # Distributed under the terms of the BSD License. The full license is in
20 # Distributed under the terms of the BSD License. The full license is in
21 # the file COPYING, distributed as part of this software.
21 # the file COPYING, distributed as part of this software.
22 #-----------------------------------------------------------------------------
22 #-----------------------------------------------------------------------------
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Imports
25 # Imports
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 import hmac
28 import hmac
29 import logging
29 import logging
30 import os
30 import os
31 import pprint
31 import pprint
32 import uuid
32 import uuid
33 from datetime import datetime
33 from datetime import datetime
34
34
35 try:
35 try:
36 import cPickle
36 import cPickle
37 pickle = cPickle
37 pickle = cPickle
38 except:
38 except:
39 cPickle = None
39 cPickle = None
40 import pickle
40 import pickle
41
41
42 import zmq
42 import zmq
43 from zmq.utils import jsonapi
43 from zmq.utils import jsonapi
44 from zmq.eventloop.ioloop import IOLoop
44 from zmq.eventloop.ioloop import IOLoop
45 from zmq.eventloop.zmqstream import ZMQStream
45 from zmq.eventloop.zmqstream import ZMQStream
46
46
47 from IPython.config.configurable import Configurable, LoggingConfigurable
47 from IPython.config.configurable import Configurable, LoggingConfigurable
48 from IPython.utils.importstring import import_item
48 from IPython.utils.importstring import import_item
49 from IPython.utils.jsonutil import extract_dates, squash_dates, date_default
49 from IPython.utils.jsonutil import extract_dates, squash_dates, date_default
50 from IPython.utils.traitlets import (CBytes, Unicode, Bool, Any, Instance, Set,
50 from IPython.utils.traitlets import (CBytes, Unicode, Bool, Any, Instance, Set,
51 DottedObjectName)
51 DottedObjectName)
52
52
53 #-----------------------------------------------------------------------------
53 #-----------------------------------------------------------------------------
54 # utility functions
54 # utility functions
55 #-----------------------------------------------------------------------------
55 #-----------------------------------------------------------------------------
56
56
57 def squash_unicode(obj):
57 def squash_unicode(obj):
58 """coerce unicode back to bytestrings."""
58 """coerce unicode back to bytestrings."""
59 if isinstance(obj,dict):
59 if isinstance(obj,dict):
60 for key in obj.keys():
60 for key in obj.keys():
61 obj[key] = squash_unicode(obj[key])
61 obj[key] = squash_unicode(obj[key])
62 if isinstance(key, unicode):
62 if isinstance(key, unicode):
63 obj[squash_unicode(key)] = obj.pop(key)
63 obj[squash_unicode(key)] = obj.pop(key)
64 elif isinstance(obj, list):
64 elif isinstance(obj, list):
65 for i,v in enumerate(obj):
65 for i,v in enumerate(obj):
66 obj[i] = squash_unicode(v)
66 obj[i] = squash_unicode(v)
67 elif isinstance(obj, unicode):
67 elif isinstance(obj, unicode):
68 obj = obj.encode('utf8')
68 obj = obj.encode('utf8')
69 return obj
69 return obj
70
70
71 #-----------------------------------------------------------------------------
71 #-----------------------------------------------------------------------------
72 # globals and defaults
72 # globals and defaults
73 #-----------------------------------------------------------------------------
73 #-----------------------------------------------------------------------------
74 key = 'on_unknown' if jsonapi.jsonmod.__name__ == 'jsonlib' else 'default'
74 key = 'on_unknown' if jsonapi.jsonmod.__name__ == 'jsonlib' else 'default'
75 json_packer = lambda obj: jsonapi.dumps(obj, **{key:date_default})
75 json_packer = lambda obj: jsonapi.dumps(obj, **{key:date_default})
76 json_unpacker = lambda s: extract_dates(jsonapi.loads(s))
76 json_unpacker = lambda s: extract_dates(jsonapi.loads(s))
77
77
78 pickle_packer = lambda o: pickle.dumps(o,-1)
78 pickle_packer = lambda o: pickle.dumps(o,-1)
79 pickle_unpacker = pickle.loads
79 pickle_unpacker = pickle.loads
80
80
81 default_packer = json_packer
81 default_packer = json_packer
82 default_unpacker = json_unpacker
82 default_unpacker = json_unpacker
83
83
84
84
85 DELIM=b"<IDS|MSG>"
85 DELIM=b"<IDS|MSG>"
86
86
87 #-----------------------------------------------------------------------------
87 #-----------------------------------------------------------------------------
88 # Classes
88 # Classes
89 #-----------------------------------------------------------------------------
89 #-----------------------------------------------------------------------------
90
90
91 class SessionFactory(LoggingConfigurable):
91 class SessionFactory(LoggingConfigurable):
92 """The Base class for configurables that have a Session, Context, logger,
92 """The Base class for configurables that have a Session, Context, logger,
93 and IOLoop.
93 and IOLoop.
94 """
94 """
95
95
96 logname = Unicode('')
96 logname = Unicode('')
97 def _logname_changed(self, name, old, new):
97 def _logname_changed(self, name, old, new):
98 self.log = logging.getLogger(new)
98 self.log = logging.getLogger(new)
99
99
100 # not configurable:
100 # not configurable:
101 context = Instance('zmq.Context')
101 context = Instance('zmq.Context')
102 def _context_default(self):
102 def _context_default(self):
103 return zmq.Context.instance()
103 return zmq.Context.instance()
104
104
105 session = Instance('IPython.zmq.session.Session')
105 session = Instance('IPython.zmq.session.Session')
106
106
107 loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
107 loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
108 def _loop_default(self):
108 def _loop_default(self):
109 return IOLoop.instance()
109 return IOLoop.instance()
110
110
111 def __init__(self, **kwargs):
111 def __init__(self, **kwargs):
112 super(SessionFactory, self).__init__(**kwargs)
112 super(SessionFactory, self).__init__(**kwargs)
113
113
114 if self.session is None:
114 if self.session is None:
115 # construct the session
115 # construct the session
116 self.session = Session(**kwargs)
116 self.session = Session(**kwargs)
117
117
118
118
119 class Message(object):
119 class Message(object):
120 """A simple message object that maps dict keys to attributes.
120 """A simple message object that maps dict keys to attributes.
121
121
122 A Message can be created from a dict and a dict from a Message instance
122 A Message can be created from a dict and a dict from a Message instance
123 simply by calling dict(msg_obj)."""
123 simply by calling dict(msg_obj)."""
124
124
125 def __init__(self, msg_dict):
125 def __init__(self, msg_dict):
126 dct = self.__dict__
126 dct = self.__dict__
127 for k, v in dict(msg_dict).iteritems():
127 for k, v in dict(msg_dict).iteritems():
128 if isinstance(v, dict):
128 if isinstance(v, dict):
129 v = Message(v)
129 v = Message(v)
130 dct[k] = v
130 dct[k] = v
131
131
132 # Having this iterator lets dict(msg_obj) work out of the box.
132 # Having this iterator lets dict(msg_obj) work out of the box.
133 def __iter__(self):
133 def __iter__(self):
134 return iter(self.__dict__.iteritems())
134 return iter(self.__dict__.iteritems())
135
135
136 def __repr__(self):
136 def __repr__(self):
137 return repr(self.__dict__)
137 return repr(self.__dict__)
138
138
139 def __str__(self):
139 def __str__(self):
140 return pprint.pformat(self.__dict__)
140 return pprint.pformat(self.__dict__)
141
141
142 def __contains__(self, k):
142 def __contains__(self, k):
143 return k in self.__dict__
143 return k in self.__dict__
144
144
145 def __getitem__(self, k):
145 def __getitem__(self, k):
146 return self.__dict__[k]
146 return self.__dict__[k]
147
147
148
148
149 def msg_header(msg_id, msg_type, username, session):
149 def msg_header(msg_id, msg_type, username, session):
150 date = datetime.now()
150 date = datetime.now()
151 return locals()
151 return locals()
152
152
153 def extract_header(msg_or_header):
153 def extract_header(msg_or_header):
154 """Given a message or header, return the header."""
154 """Given a message or header, return the header."""
155 if not msg_or_header:
155 if not msg_or_header:
156 return {}
156 return {}
157 try:
157 try:
158 # See if msg_or_header is the entire message.
158 # See if msg_or_header is the entire message.
159 h = msg_or_header['header']
159 h = msg_or_header['header']
160 except KeyError:
160 except KeyError:
161 try:
161 try:
162 # See if msg_or_header is just the header
162 # See if msg_or_header is just the header
163 h = msg_or_header['msg_id']
163 h = msg_or_header['msg_id']
164 except KeyError:
164 except KeyError:
165 raise
165 raise
166 else:
166 else:
167 h = msg_or_header
167 h = msg_or_header
168 if not isinstance(h, dict):
168 if not isinstance(h, dict):
169 h = dict(h)
169 h = dict(h)
170 return h
170 return h
171
171
172 class Session(Configurable):
172 class Session(Configurable):
173 """Object for handling serialization and sending of messages.
173 """Object for handling serialization and sending of messages.
174
174
175 The Session object handles building messages and sending them
175 The Session object handles building messages and sending them
176 with ZMQ sockets or ZMQStream objects. Objects can communicate with each
176 with ZMQ sockets or ZMQStream objects. Objects can communicate with each
177 other over the network via Session objects, and only need to work with the
177 other over the network via Session objects, and only need to work with the
178 dict-based IPython message spec. The Session will handle
178 dict-based IPython message spec. The Session will handle
179 serialization/deserialization, security, and metadata.
179 serialization/deserialization, security, and metadata.
180
180
181 Sessions support configurable serialiization via packer/unpacker traits,
181 Sessions support configurable serialiization via packer/unpacker traits,
182 and signing with HMAC digests via the key/keyfile traits.
182 and signing with HMAC digests via the key/keyfile traits.
183
183
184 Parameters
184 Parameters
185 ----------
185 ----------
186
186
187 debug : bool
187 debug : bool
188 whether to trigger extra debugging statements
188 whether to trigger extra debugging statements
189 packer/unpacker : str : 'json', 'pickle' or import_string
189 packer/unpacker : str : 'json', 'pickle' or import_string
190 importstrings for methods to serialize message parts. If just
190 importstrings for methods to serialize message parts. If just
191 'json' or 'pickle', predefined JSON and pickle packers will be used.
191 'json' or 'pickle', predefined JSON and pickle packers will be used.
192 Otherwise, the entire importstring must be used.
192 Otherwise, the entire importstring must be used.
193
193
194 The functions must accept at least valid JSON input, and output *bytes*.
194 The functions must accept at least valid JSON input, and output *bytes*.
195
195
196 For example, to use msgpack:
196 For example, to use msgpack:
197 packer = 'msgpack.packb', unpacker='msgpack.unpackb'
197 packer = 'msgpack.packb', unpacker='msgpack.unpackb'
198 pack/unpack : callables
198 pack/unpack : callables
199 You can also set the pack/unpack callables for serialization directly.
199 You can also set the pack/unpack callables for serialization directly.
200 session : bytes
200 session : bytes
201 the ID of this Session object. The default is to generate a new UUID.
201 the ID of this Session object. The default is to generate a new UUID.
202 username : unicode
202 username : unicode
203 username added to message headers. The default is to ask the OS.
203 username added to message headers. The default is to ask the OS.
204 key : bytes
204 key : bytes
205 The key used to initialize an HMAC signature. If unset, messages
205 The key used to initialize an HMAC signature. If unset, messages
206 will not be signed or checked.
206 will not be signed or checked.
207 keyfile : filepath
207 keyfile : filepath
208 The file containing a key. If this is set, `key` will be initialized
208 The file containing a key. If this is set, `key` will be initialized
209 to the contents of the file.
209 to the contents of the file.
210
210
211 """
211 """
212
212
213 debug=Bool(False, config=True, help="""Debug output in the Session""")
213 debug=Bool(False, config=True, help="""Debug output in the Session""")
214
214
215 packer = DottedObjectName('json',config=True,
215 packer = DottedObjectName('json',config=True,
216 help="""The name of the packer for serializing messages.
216 help="""The name of the packer for serializing messages.
217 Should be one of 'json', 'pickle', or an import name
217 Should be one of 'json', 'pickle', or an import name
218 for a custom callable serializer.""")
218 for a custom callable serializer.""")
219 def _packer_changed(self, name, old, new):
219 def _packer_changed(self, name, old, new):
220 if new.lower() == 'json':
220 if new.lower() == 'json':
221 self.pack = json_packer
221 self.pack = json_packer
222 self.unpack = json_unpacker
222 self.unpack = json_unpacker
223 elif new.lower() == 'pickle':
223 elif new.lower() == 'pickle':
224 self.pack = pickle_packer
224 self.pack = pickle_packer
225 self.unpack = pickle_unpacker
225 self.unpack = pickle_unpacker
226 else:
226 else:
227 self.pack = import_item(str(new))
227 self.pack = import_item(str(new))
228
228
229 unpacker = DottedObjectName('json', config=True,
229 unpacker = DottedObjectName('json', config=True,
230 help="""The name of the unpacker for unserializing messages.
230 help="""The name of the unpacker for unserializing messages.
231 Only used with custom functions for `packer`.""")
231 Only used with custom functions for `packer`.""")
232 def _unpacker_changed(self, name, old, new):
232 def _unpacker_changed(self, name, old, new):
233 if new.lower() == 'json':
233 if new.lower() == 'json':
234 self.pack = json_packer
234 self.pack = json_packer
235 self.unpack = json_unpacker
235 self.unpack = json_unpacker
236 elif new.lower() == 'pickle':
236 elif new.lower() == 'pickle':
237 self.pack = pickle_packer
237 self.pack = pickle_packer
238 self.unpack = pickle_unpacker
238 self.unpack = pickle_unpacker
239 else:
239 else:
240 self.unpack = import_item(str(new))
240 self.unpack = import_item(str(new))
241
241
242 session = CBytes(b'', config=True,
242 session = CBytes(b'', config=True,
243 help="""The UUID identifying this session.""")
243 help="""The UUID identifying this session.""")
244 def _session_default(self):
244 def _session_default(self):
245 return bytes(uuid.uuid4())
245 return bytes(uuid.uuid4())
246
246
247 username = Unicode(os.environ.get('USER','username'), config=True,
247 username = Unicode(os.environ.get('USER','username'), config=True,
248 help="""Username for the Session. Default is your system username.""")
248 help="""Username for the Session. Default is your system username.""")
249
249
250 # message signature related traits:
250 # message signature related traits:
251 key = CBytes(b'', config=True,
251 key = CBytes(b'', config=True,
252 help="""execution key, for extra authentication.""")
252 help="""execution key, for extra authentication.""")
253 def _key_changed(self, name, old, new):
253 def _key_changed(self, name, old, new):
254 if new:
254 if new:
255 self.auth = hmac.HMAC(new)
255 self.auth = hmac.HMAC(new)
256 else:
256 else:
257 self.auth = None
257 self.auth = None
258 auth = Instance(hmac.HMAC)
258 auth = Instance(hmac.HMAC)
259 digest_history = Set()
259 digest_history = Set()
260
260
261 keyfile = Unicode('', config=True,
261 keyfile = Unicode('', config=True,
262 help="""path to file containing execution key.""")
262 help="""path to file containing execution key.""")
263 def _keyfile_changed(self, name, old, new):
263 def _keyfile_changed(self, name, old, new):
264 with open(new, 'rb') as f:
264 with open(new, 'rb') as f:
265 self.key = f.read().strip()
265 self.key = f.read().strip()
266
266
267 pack = Any(default_packer) # the actual packer function
267 pack = Any(default_packer) # the actual packer function
268 def _pack_changed(self, name, old, new):
268 def _pack_changed(self, name, old, new):
269 if not callable(new):
269 if not callable(new):
270 raise TypeError("packer must be callable, not %s"%type(new))
270 raise TypeError("packer must be callable, not %s"%type(new))
271
271
272 unpack = Any(default_unpacker) # the actual packer function
272 unpack = Any(default_unpacker) # the actual packer function
273 def _unpack_changed(self, name, old, new):
273 def _unpack_changed(self, name, old, new):
274 # unpacker is not checked - it is assumed to be
274 # unpacker is not checked - it is assumed to be
275 if not callable(new):
275 if not callable(new):
276 raise TypeError("unpacker must be callable, not %s"%type(new))
276 raise TypeError("unpacker must be callable, not %s"%type(new))
277
277
278 def __init__(self, **kwargs):
278 def __init__(self, **kwargs):
279 """create a Session object
279 """create a Session object
280
280
281 Parameters
281 Parameters
282 ----------
282 ----------
283
283
284 debug : bool
284 debug : bool
285 whether to trigger extra debugging statements
285 whether to trigger extra debugging statements
286 packer/unpacker : str : 'json', 'pickle' or import_string
286 packer/unpacker : str : 'json', 'pickle' or import_string
287 importstrings for methods to serialize message parts. If just
287 importstrings for methods to serialize message parts. If just
288 'json' or 'pickle', predefined JSON and pickle packers will be used.
288 'json' or 'pickle', predefined JSON and pickle packers will be used.
289 Otherwise, the entire importstring must be used.
289 Otherwise, the entire importstring must be used.
290
290
291 The functions must accept at least valid JSON input, and output
291 The functions must accept at least valid JSON input, and output
292 *bytes*.
292 *bytes*.
293
293
294 For example, to use msgpack:
294 For example, to use msgpack:
295 packer = 'msgpack.packb', unpacker='msgpack.unpackb'
295 packer = 'msgpack.packb', unpacker='msgpack.unpackb'
296 pack/unpack : callables
296 pack/unpack : callables
297 You can also set the pack/unpack callables for serialization
297 You can also set the pack/unpack callables for serialization
298 directly.
298 directly.
299 session : bytes
299 session : bytes
300 the ID of this Session object. The default is to generate a new
300 the ID of this Session object. The default is to generate a new
301 UUID.
301 UUID.
302 username : unicode
302 username : unicode
303 username added to message headers. The default is to ask the OS.
303 username added to message headers. The default is to ask the OS.
304 key : bytes
304 key : bytes
305 The key used to initialize an HMAC signature. If unset, messages
305 The key used to initialize an HMAC signature. If unset, messages
306 will not be signed or checked.
306 will not be signed or checked.
307 keyfile : filepath
307 keyfile : filepath
308 The file containing a key. If this is set, `key` will be
308 The file containing a key. If this is set, `key` will be
309 initialized to the contents of the file.
309 initialized to the contents of the file.
310 """
310 """
311 super(Session, self).__init__(**kwargs)
311 super(Session, self).__init__(**kwargs)
312 self._check_packers()
312 self._check_packers()
313 self.none = self.pack({})
313 self.none = self.pack({})
314
314
315 @property
315 @property
316 def msg_id(self):
316 def msg_id(self):
317 """always return new uuid"""
317 """always return new uuid"""
318 return str(uuid.uuid4())
318 return str(uuid.uuid4())
319
319
320 def _check_packers(self):
320 def _check_packers(self):
321 """check packers for binary data and datetime support."""
321 """check packers for binary data and datetime support."""
322 pack = self.pack
322 pack = self.pack
323 unpack = self.unpack
323 unpack = self.unpack
324
324
325 # check simple serialization
325 # check simple serialization
326 msg = dict(a=[1,'hi'])
326 msg = dict(a=[1,'hi'])
327 try:
327 try:
328 packed = pack(msg)
328 packed = pack(msg)
329 except Exception:
329 except Exception:
330 raise ValueError("packer could not serialize a simple message")
330 raise ValueError("packer could not serialize a simple message")
331
331
332 # ensure packed message is bytes
332 # ensure packed message is bytes
333 if not isinstance(packed, bytes):
333 if not isinstance(packed, bytes):
334 raise ValueError("message packed to %r, but bytes are required"%type(packed))
334 raise ValueError("message packed to %r, but bytes are required"%type(packed))
335
335
336 # check that unpack is pack's inverse
336 # check that unpack is pack's inverse
337 try:
337 try:
338 unpacked = unpack(packed)
338 unpacked = unpack(packed)
339 except Exception:
339 except Exception:
340 raise ValueError("unpacker could not handle the packer's output")
340 raise ValueError("unpacker could not handle the packer's output")
341
341
342 # check datetime support
342 # check datetime support
343 msg = dict(t=datetime.now())
343 msg = dict(t=datetime.now())
344 try:
344 try:
345 unpacked = unpack(pack(msg))
345 unpacked = unpack(pack(msg))
346 except Exception:
346 except Exception:
347 self.pack = lambda o: pack(squash_dates(o))
347 self.pack = lambda o: pack(squash_dates(o))
348 self.unpack = lambda s: extract_dates(unpack(s))
348 self.unpack = lambda s: extract_dates(unpack(s))
349
349
350 def msg_header(self, msg_type):
350 def msg_header(self, msg_type):
351 return msg_header(self.msg_id, msg_type, self.username, self.session)
351 return msg_header(self.msg_id, msg_type, self.username, self.session)
352
352
353 def msg(self, msg_type, content=None, parent=None, subheader=None):
353 def msg(self, msg_type, content=None, parent=None, subheader=None):
354 """Return the nested message dict.
354 """Return the nested message dict.
355
355
356 This format is different from what is sent over the wire. The
356 This format is different from what is sent over the wire. The
357 self.serialize method converts this nested message dict to the wire
357 self.serialize method converts this nested message dict to the wire
358 format, which uses a message list.
358 format, which uses a message list.
359 """
359 """
360 msg = {}
360 msg = {}
361 msg['header'] = self.msg_header(msg_type)
361 msg['header'] = self.msg_header(msg_type)
362 msg['msg_id'] = msg['header']['msg_id']
363 msg['parent_header'] = {} if parent is None else extract_header(parent)
362 msg['parent_header'] = {} if parent is None else extract_header(parent)
364 msg['msg_type'] = msg_type
365 msg['content'] = {} if content is None else content
363 msg['content'] = {} if content is None else content
366 sub = {} if subheader is None else subheader
364 sub = {} if subheader is None else subheader
367 msg['header'].update(sub)
365 msg['header'].update(sub)
368 return msg
366 return msg
369
367
370 def sign(self, msg_list):
368 def sign(self, msg_list):
371 """Sign a message with HMAC digest. If no auth, return b''.
369 """Sign a message with HMAC digest. If no auth, return b''.
372
370
373 Parameters
371 Parameters
374 ----------
372 ----------
375 msg_list : list
373 msg_list : list
376 The [p_header,p_parent,p_content] part of the message list.
374 The [p_header,p_parent,p_content] part of the message list.
377 """
375 """
378 if self.auth is None:
376 if self.auth is None:
379 return b''
377 return b''
380 h = self.auth.copy()
378 h = self.auth.copy()
381 for m in msg_list:
379 for m in msg_list:
382 h.update(m)
380 h.update(m)
383 return h.hexdigest()
381 return h.hexdigest()
384
382
385 def serialize(self, msg, ident=None):
383 def serialize(self, msg, ident=None):
386 """Serialize the message components to bytes.
384 """Serialize the message components to bytes.
387
385
388 Parameters
386 Parameters
389 ----------
387 ----------
390 msg : dict or Message
388 msg : dict or Message
391 The nexted message dict as returned by the self.msg method.
389 The nexted message dict as returned by the self.msg method.
392
390
393 Returns
391 Returns
394 -------
392 -------
395 msg_list : list
393 msg_list : list
396 The list of bytes objects to be sent with the format:
394 The list of bytes objects to be sent with the format:
397 [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
395 [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
398 buffer1,buffer2,...]. In this list, the p_* entities are
396 buffer1,buffer2,...]. In this list, the p_* entities are
399 the packed or serialized versions, so if JSON is used, these
397 the packed or serialized versions, so if JSON is used, these
400 are uft8 encoded JSON strings.
398 are uft8 encoded JSON strings.
401 """
399 """
402 content = msg.get('content', {})
400 content = msg.get('content', {})
403 if content is None:
401 if content is None:
404 content = self.none
402 content = self.none
405 elif isinstance(content, dict):
403 elif isinstance(content, dict):
406 content = self.pack(content)
404 content = self.pack(content)
407 elif isinstance(content, bytes):
405 elif isinstance(content, bytes):
408 # content is already packed, as in a relayed message
406 # content is already packed, as in a relayed message
409 pass
407 pass
410 elif isinstance(content, unicode):
408 elif isinstance(content, unicode):
411 # should be bytes, but JSON often spits out unicode
409 # should be bytes, but JSON often spits out unicode
412 content = content.encode('utf8')
410 content = content.encode('utf8')
413 else:
411 else:
414 raise TypeError("Content incorrect type: %s"%type(content))
412 raise TypeError("Content incorrect type: %s"%type(content))
415
413
416 real_message = [self.pack(msg['header']),
414 real_message = [self.pack(msg['header']),
417 self.pack(msg['parent_header']),
415 self.pack(msg['parent_header']),
418 content
416 content
419 ]
417 ]
420
418
421 to_send = []
419 to_send = []
422
420
423 if isinstance(ident, list):
421 if isinstance(ident, list):
424 # accept list of idents
422 # accept list of idents
425 to_send.extend(ident)
423 to_send.extend(ident)
426 elif ident is not None:
424 elif ident is not None:
427 to_send.append(ident)
425 to_send.append(ident)
428 to_send.append(DELIM)
426 to_send.append(DELIM)
429
427
430 signature = self.sign(real_message)
428 signature = self.sign(real_message)
431 to_send.append(signature)
429 to_send.append(signature)
432
430
433 to_send.extend(real_message)
431 to_send.extend(real_message)
434
432
435 return to_send
433 return to_send
436
434
437 def send(self, stream, msg_or_type, content=None, parent=None, ident=None,
435 def send(self, stream, msg_or_type, content=None, parent=None, ident=None,
438 buffers=None, subheader=None, track=False):
436 buffers=None, subheader=None, track=False):
439 """Build and send a message via stream or socket.
437 """Build and send a message via stream or socket.
440
438
441 The message format used by this function internally is as follows:
439 The message format used by this function internally is as follows:
442
440
443 [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
441 [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content,
444 buffer1,buffer2,...]
442 buffer1,buffer2,...]
445
443
446 The self.serialize method converts the nested message dict into this
444 The self.serialize method converts the nested message dict into this
447 format.
445 format.
448
446
449 Parameters
447 Parameters
450 ----------
448 ----------
451
449
452 stream : zmq.Socket or ZMQStream
450 stream : zmq.Socket or ZMQStream
453 the socket-like object used to send the data
451 the socket-like object used to send the data
454 msg_or_type : str or Message/dict
452 msg_or_type : str or Message/dict
455 Normally, msg_or_type will be a msg_type unless a message is being
453 Normally, msg_or_type will be a msg_type unless a message is being
456 sent more than once.
454 sent more than once.
457
455
458 content : dict or None
456 content : dict or None
459 the content of the message (ignored if msg_or_type is a message)
457 the content of the message (ignored if msg_or_type is a message)
460 parent : Message or dict or None
458 parent : Message or dict or None
461 the parent or parent header describing the parent of this message
459 the parent or parent header describing the parent of this message
462 ident : bytes or list of bytes
460 ident : bytes or list of bytes
463 the zmq.IDENTITY routing path
461 the zmq.IDENTITY routing path
464 subheader : dict or None
462 subheader : dict or None
465 extra header keys for this message's header
463 extra header keys for this message's header
466 buffers : list or None
464 buffers : list or None
467 the already-serialized buffers to be appended to the message
465 the already-serialized buffers to be appended to the message
468 track : bool
466 track : bool
469 whether to track. Only for use with Sockets,
467 whether to track. Only for use with Sockets,
470 because ZMQStream objects cannot track messages.
468 because ZMQStream objects cannot track messages.
471
469
472 Returns
470 Returns
473 -------
471 -------
474 msg : message dict
472 msg : message dict
475 the constructed message
473 the constructed message
476 (msg,tracker) : (message dict, MessageTracker)
474 (msg,tracker) : (message dict, MessageTracker)
477 if track=True, then a 2-tuple will be returned,
475 if track=True, then a 2-tuple will be returned,
478 the first element being the constructed
476 the first element being the constructed
479 message, and the second being the MessageTracker
477 message, and the second being the MessageTracker
480
478
481 """
479 """
482
480
483 if not isinstance(stream, (zmq.Socket, ZMQStream)):
481 if not isinstance(stream, (zmq.Socket, ZMQStream)):
484 raise TypeError("stream must be Socket or ZMQStream, not %r"%type(stream))
482 raise TypeError("stream must be Socket or ZMQStream, not %r"%type(stream))
485 elif track and isinstance(stream, ZMQStream):
483 elif track and isinstance(stream, ZMQStream):
486 raise TypeError("ZMQStream cannot track messages")
484 raise TypeError("ZMQStream cannot track messages")
487
485
488 if isinstance(msg_or_type, (Message, dict)):
486 if isinstance(msg_or_type, (Message, dict)):
489 # we got a Message, not a msg_type
487 # we got a Message, not a msg_type
490 # don't build a new Message
488 # don't build a new Message
491 msg = msg_or_type
489 msg = msg_or_type
492 else:
490 else:
493 msg = self.msg(msg_or_type, content, parent, subheader)
491 msg = self.msg(msg_or_type, content, parent, subheader)
494
492
495 buffers = [] if buffers is None else buffers
493 buffers = [] if buffers is None else buffers
496 to_send = self.serialize(msg, ident)
494 to_send = self.serialize(msg, ident)
497 flag = 0
495 flag = 0
498 if buffers:
496 if buffers:
499 flag = zmq.SNDMORE
497 flag = zmq.SNDMORE
500 _track = False
498 _track = False
501 else:
499 else:
502 _track=track
500 _track=track
503 if track:
501 if track:
504 tracker = stream.send_multipart(to_send, flag, copy=False, track=_track)
502 tracker = stream.send_multipart(to_send, flag, copy=False, track=_track)
505 else:
503 else:
506 tracker = stream.send_multipart(to_send, flag, copy=False)
504 tracker = stream.send_multipart(to_send, flag, copy=False)
507 for b in buffers[:-1]:
505 for b in buffers[:-1]:
508 stream.send(b, flag, copy=False)
506 stream.send(b, flag, copy=False)
509 if buffers:
507 if buffers:
510 if track:
508 if track:
511 tracker = stream.send(buffers[-1], copy=False, track=track)
509 tracker = stream.send(buffers[-1], copy=False, track=track)
512 else:
510 else:
513 tracker = stream.send(buffers[-1], copy=False)
511 tracker = stream.send(buffers[-1], copy=False)
514
512
515 # omsg = Message(msg)
513 # omsg = Message(msg)
516 if self.debug:
514 if self.debug:
517 pprint.pprint(msg)
515 pprint.pprint(msg)
518 pprint.pprint(to_send)
516 pprint.pprint(to_send)
519 pprint.pprint(buffers)
517 pprint.pprint(buffers)
520
518
521 msg['tracker'] = tracker
519 msg['tracker'] = tracker
522
520
523 return msg
521 return msg
524
522
525 def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None):
523 def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None):
526 """Send a raw message via ident path.
524 """Send a raw message via ident path.
527
525
528 This method is used to send a already serialized message.
526 This method is used to send a already serialized message.
529
527
530 Parameters
528 Parameters
531 ----------
529 ----------
532 stream : ZMQStream or Socket
530 stream : ZMQStream or Socket
533 The ZMQ stream or socket to use for sending the message.
531 The ZMQ stream or socket to use for sending the message.
534 msg_list : list
532 msg_list : list
535 The serialized list of messages to send. This only includes the
533 The serialized list of messages to send. This only includes the
536 [p_header,p_parent,p_content,buffer1,buffer2,...] portion of
534 [p_header,p_parent,p_content,buffer1,buffer2,...] portion of
537 the message.
535 the message.
538 ident : ident or list
536 ident : ident or list
539 A single ident or a list of idents to use in sending.
537 A single ident or a list of idents to use in sending.
540 """
538 """
541 to_send = []
539 to_send = []
542 if isinstance(ident, bytes):
540 if isinstance(ident, bytes):
543 ident = [ident]
541 ident = [ident]
544 if ident is not None:
542 if ident is not None:
545 to_send.extend(ident)
543 to_send.extend(ident)
546
544
547 to_send.append(DELIM)
545 to_send.append(DELIM)
548 to_send.append(self.sign(msg_list))
546 to_send.append(self.sign(msg_list))
549 to_send.extend(msg_list)
547 to_send.extend(msg_list)
550 stream.send_multipart(msg_list, flags, copy=copy)
548 stream.send_multipart(msg_list, flags, copy=copy)
551
549
552 def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True):
550 def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True):
553 """Receive and unpack a message.
551 """Receive and unpack a message.
554
552
555 Parameters
553 Parameters
556 ----------
554 ----------
557 socket : ZMQStream or Socket
555 socket : ZMQStream or Socket
558 The socket or stream to use in receiving.
556 The socket or stream to use in receiving.
559
557
560 Returns
558 Returns
561 -------
559 -------
562 [idents], msg
560 [idents], msg
563 [idents] is a list of idents and msg is a nested message dict of
561 [idents] is a list of idents and msg is a nested message dict of
564 same format as self.msg returns.
562 same format as self.msg returns.
565 """
563 """
566 if isinstance(socket, ZMQStream):
564 if isinstance(socket, ZMQStream):
567 socket = socket.socket
565 socket = socket.socket
568 try:
566 try:
569 msg_list = socket.recv_multipart(mode)
567 msg_list = socket.recv_multipart(mode)
570 except zmq.ZMQError as e:
568 except zmq.ZMQError as e:
571 if e.errno == zmq.EAGAIN:
569 if e.errno == zmq.EAGAIN:
572 # We can convert EAGAIN to None as we know in this case
570 # We can convert EAGAIN to None as we know in this case
573 # recv_multipart won't return None.
571 # recv_multipart won't return None.
574 return None,None
572 return None,None
575 else:
573 else:
576 raise
574 raise
577 # split multipart message into identity list and message dict
575 # split multipart message into identity list and message dict
578 # invalid large messages can cause very expensive string comparisons
576 # invalid large messages can cause very expensive string comparisons
579 idents, msg_list = self.feed_identities(msg_list, copy)
577 idents, msg_list = self.feed_identities(msg_list, copy)
580 try:
578 try:
581 return idents, self.unpack_message(msg_list, content=content, copy=copy)
579 return idents, self.unpack_message(msg_list, content=content, copy=copy)
582 except Exception as e:
580 except Exception as e:
583 print (idents, msg_list)
581 print (idents, msg_list)
584 # TODO: handle it
582 # TODO: handle it
585 raise e
583 raise e
586
584
587 def feed_identities(self, msg_list, copy=True):
585 def feed_identities(self, msg_list, copy=True):
588 """Split the identities from the rest of the message.
586 """Split the identities from the rest of the message.
589
587
590 Feed until DELIM is reached, then return the prefix as idents and
588 Feed until DELIM is reached, then return the prefix as idents and
591 remainder as msg_list. This is easily broken by setting an IDENT to DELIM,
589 remainder as msg_list. This is easily broken by setting an IDENT to DELIM,
592 but that would be silly.
590 but that would be silly.
593
591
594 Parameters
592 Parameters
595 ----------
593 ----------
596 msg_list : a list of Message or bytes objects
594 msg_list : a list of Message or bytes objects
597 The message to be split.
595 The message to be split.
598 copy : bool
596 copy : bool
599 flag determining whether the arguments are bytes or Messages
597 flag determining whether the arguments are bytes or Messages
600
598
601 Returns
599 Returns
602 -------
600 -------
603 (idents,msg_list) : two lists
601 (idents,msg_list) : two lists
604 idents will always be a list of bytes - the indentity prefix
602 idents will always be a list of bytes - the indentity prefix
605 msg_list will be a list of bytes or Messages, unchanged from input
603 msg_list will be a list of bytes or Messages, unchanged from input
606 msg_list should be unpackable via self.unpack_message at this point.
604 msg_list should be unpackable via self.unpack_message at this point.
607 """
605 """
608 if copy:
606 if copy:
609 idx = msg_list.index(DELIM)
607 idx = msg_list.index(DELIM)
610 return msg_list[:idx], msg_list[idx+1:]
608 return msg_list[:idx], msg_list[idx+1:]
611 else:
609 else:
612 failed = True
610 failed = True
613 for idx,m in enumerate(msg_list):
611 for idx,m in enumerate(msg_list):
614 if m.bytes == DELIM:
612 if m.bytes == DELIM:
615 failed = False
613 failed = False
616 break
614 break
617 if failed:
615 if failed:
618 raise ValueError("DELIM not in msg_list")
616 raise ValueError("DELIM not in msg_list")
619 idents, msg_list = msg_list[:idx], msg_list[idx+1:]
617 idents, msg_list = msg_list[:idx], msg_list[idx+1:]
620 return [m.bytes for m in idents], msg_list
618 return [m.bytes for m in idents], msg_list
621
619
622 def unpack_message(self, msg_list, content=True, copy=True):
620 def unpack_message(self, msg_list, content=True, copy=True):
623 """Return a message object from the format
621 """Return a message object from the format
624 sent by self.send.
622 sent by self.send.
625
623
626 Parameters:
624 Parameters:
627 -----------
625 -----------
628
626
629 content : bool (True)
627 content : bool (True)
630 whether to unpack the content dict (True),
628 whether to unpack the content dict (True),
631 or leave it serialized (False)
629 or leave it serialized (False)
632
630
633 copy : bool (True)
631 copy : bool (True)
634 whether to return the bytes (True),
632 whether to return the bytes (True),
635 or the non-copying Message object in each place (False)
633 or the non-copying Message object in each place (False)
636
634
637 """
635 """
638 minlen = 4
636 minlen = 4
639 message = {}
637 message = {}
640 if not copy:
638 if not copy:
641 for i in range(minlen):
639 for i in range(minlen):
642 msg_list[i] = msg_list[i].bytes
640 msg_list[i] = msg_list[i].bytes
643 if self.auth is not None:
641 if self.auth is not None:
644 signature = msg_list[0]
642 signature = msg_list[0]
645 if signature in self.digest_history:
643 if signature in self.digest_history:
646 raise ValueError("Duplicate Signature: %r"%signature)
644 raise ValueError("Duplicate Signature: %r"%signature)
647 self.digest_history.add(signature)
645 self.digest_history.add(signature)
648 check = self.sign(msg_list[1:4])
646 check = self.sign(msg_list[1:4])
649 if not signature == check:
647 if not signature == check:
650 raise ValueError("Invalid Signature: %r"%signature)
648 raise ValueError("Invalid Signature: %r"%signature)
651 if not len(msg_list) >= minlen:
649 if not len(msg_list) >= minlen:
652 raise TypeError("malformed message, must have at least %i elements"%minlen)
650 raise TypeError("malformed message, must have at least %i elements"%minlen)
653 message['header'] = self.unpack(msg_list[1])
651 message['header'] = self.unpack(msg_list[1])
654 message['msg_type'] = message['header']['msg_type']
655 message['parent_header'] = self.unpack(msg_list[2])
652 message['parent_header'] = self.unpack(msg_list[2])
656 if content:
653 if content:
657 message['content'] = self.unpack(msg_list[3])
654 message['content'] = self.unpack(msg_list[3])
658 else:
655 else:
659 message['content'] = msg_list[3]
656 message['content'] = msg_list[3]
660
657
661 message['buffers'] = msg_list[4:]
658 message['buffers'] = msg_list[4:]
662 return message
659 return message
663
660
664 def test_msg2obj():
661 def test_msg2obj():
665 am = dict(x=1)
662 am = dict(x=1)
666 ao = Message(am)
663 ao = Message(am)
667 assert ao.x == am['x']
664 assert ao.x == am['x']
668
665
669 am['y'] = dict(z=1)
666 am['y'] = dict(z=1)
670 ao = Message(am)
667 ao = Message(am)
671 assert ao.y.z == am['y']['z']
668 assert ao.y.z == am['y']['z']
672
669
673 k1, k2 = 'y', 'z'
670 k1, k2 = 'y', 'z'
674 assert ao[k1][k2] == am[k1][k2]
671 assert ao[k1][k2] == am[k1][k2]
675
672
676 am2 = dict(ao)
673 am2 = dict(ao)
677 assert am['x'] == am2['x']
674 assert am['x'] == am2['x']
678 assert am['y']['z'] == am2['y']['z']
675 assert am['y']['z'] == am2['y']['z']
679
676
@@ -1,111 +1,109 b''
1 """test building messages with streamsession"""
1 """test building messages with streamsession"""
2
2
3 #-------------------------------------------------------------------------------
3 #-------------------------------------------------------------------------------
4 # Copyright (C) 2011 The IPython Development Team
4 # Copyright (C) 2011 The IPython Development Team
5 #
5 #
6 # Distributed under the terms of the BSD License. The full license is in
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING, distributed as part of this software.
7 # the file COPYING, distributed as part of this software.
8 #-------------------------------------------------------------------------------
8 #-------------------------------------------------------------------------------
9
9
10 #-------------------------------------------------------------------------------
10 #-------------------------------------------------------------------------------
11 # Imports
11 # Imports
12 #-------------------------------------------------------------------------------
12 #-------------------------------------------------------------------------------
13
13
14 import os
14 import os
15 import uuid
15 import uuid
16 import zmq
16 import zmq
17
17
18 from zmq.tests import BaseZMQTestCase
18 from zmq.tests import BaseZMQTestCase
19 from zmq.eventloop.zmqstream import ZMQStream
19 from zmq.eventloop.zmqstream import ZMQStream
20
20
21 from IPython.zmq import session as ss
21 from IPython.zmq import session as ss
22
22
23 class SessionTestCase(BaseZMQTestCase):
23 class SessionTestCase(BaseZMQTestCase):
24
24
25 def setUp(self):
25 def setUp(self):
26 BaseZMQTestCase.setUp(self)
26 BaseZMQTestCase.setUp(self)
27 self.session = ss.Session()
27 self.session = ss.Session()
28
28
29 class TestSession(SessionTestCase):
29 class TestSession(SessionTestCase):
30
30
31 def test_msg(self):
31 def test_msg(self):
32 """message format"""
32 """message format"""
33 msg = self.session.msg('execute')
33 msg = self.session.msg('execute')
34 thekeys = set('header msg_id parent_header msg_type content'.split())
34 thekeys = set('header msg_id parent_header msg_type content'.split())
35 s = set(msg.keys())
35 s = set(msg.keys())
36 self.assertEquals(s, thekeys)
36 self.assertEquals(s, thekeys)
37 self.assertTrue(isinstance(msg['content'],dict))
37 self.assertTrue(isinstance(msg['content'],dict))
38 self.assertTrue(isinstance(msg['header'],dict))
38 self.assertTrue(isinstance(msg['header'],dict))
39 self.assertTrue(isinstance(msg['parent_header'],dict))
39 self.assertTrue(isinstance(msg['parent_header'],dict))
40 self.assertEquals(msg['msg_type'], 'execute')
40 self.assertEquals(msg['header']['msg_type'], 'execute')
41
41
42
43
44 def test_args(self):
42 def test_args(self):
45 """initialization arguments for Session"""
43 """initialization arguments for Session"""
46 s = self.session
44 s = self.session
47 self.assertTrue(s.pack is ss.default_packer)
45 self.assertTrue(s.pack is ss.default_packer)
48 self.assertTrue(s.unpack is ss.default_unpacker)
46 self.assertTrue(s.unpack is ss.default_unpacker)
49 self.assertEquals(s.username, os.environ.get('USER', 'username'))
47 self.assertEquals(s.username, os.environ.get('USER', 'username'))
50
48
51 s = ss.Session()
49 s = ss.Session()
52 self.assertEquals(s.username, os.environ.get('USER', 'username'))
50 self.assertEquals(s.username, os.environ.get('USER', 'username'))
53
51
54 self.assertRaises(TypeError, ss.Session, pack='hi')
52 self.assertRaises(TypeError, ss.Session, pack='hi')
55 self.assertRaises(TypeError, ss.Session, unpack='hi')
53 self.assertRaises(TypeError, ss.Session, unpack='hi')
56 u = str(uuid.uuid4())
54 u = str(uuid.uuid4())
57 s = ss.Session(username='carrot', session=u)
55 s = ss.Session(username='carrot', session=u)
58 self.assertEquals(s.session, u)
56 self.assertEquals(s.session, u)
59 self.assertEquals(s.username, 'carrot')
57 self.assertEquals(s.username, 'carrot')
60
58
61 def test_tracking(self):
59 def test_tracking(self):
62 """test tracking messages"""
60 """test tracking messages"""
63 a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
61 a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
64 s = self.session
62 s = self.session
65 stream = ZMQStream(a)
63 stream = ZMQStream(a)
66 msg = s.send(a, 'hello', track=False)
64 msg = s.send(a, 'hello', track=False)
67 self.assertTrue(msg['tracker'] is None)
65 self.assertTrue(msg['tracker'] is None)
68 msg = s.send(a, 'hello', track=True)
66 msg = s.send(a, 'hello', track=True)
69 self.assertTrue(isinstance(msg['tracker'], zmq.MessageTracker))
67 self.assertTrue(isinstance(msg['tracker'], zmq.MessageTracker))
70 M = zmq.Message(b'hi there', track=True)
68 M = zmq.Message(b'hi there', track=True)
71 msg = s.send(a, 'hello', buffers=[M], track=True)
69 msg = s.send(a, 'hello', buffers=[M], track=True)
72 t = msg['tracker']
70 t = msg['tracker']
73 self.assertTrue(isinstance(t, zmq.MessageTracker))
71 self.assertTrue(isinstance(t, zmq.MessageTracker))
74 self.assertRaises(zmq.NotDone, t.wait, .1)
72 self.assertRaises(zmq.NotDone, t.wait, .1)
75 del M
73 del M
76 t.wait(1) # this will raise
74 t.wait(1) # this will raise
77
75
78
76
79 # def test_rekey(self):
77 # def test_rekey(self):
80 # """rekeying dict around json str keys"""
78 # """rekeying dict around json str keys"""
81 # d = {'0': uuid.uuid4(), 0:uuid.uuid4()}
79 # d = {'0': uuid.uuid4(), 0:uuid.uuid4()}
82 # self.assertRaises(KeyError, ss.rekey, d)
80 # self.assertRaises(KeyError, ss.rekey, d)
83 #
81 #
84 # d = {'0': uuid.uuid4(), 1:uuid.uuid4(), 'asdf':uuid.uuid4()}
82 # d = {'0': uuid.uuid4(), 1:uuid.uuid4(), 'asdf':uuid.uuid4()}
85 # d2 = {0:d['0'],1:d[1],'asdf':d['asdf']}
83 # d2 = {0:d['0'],1:d[1],'asdf':d['asdf']}
86 # rd = ss.rekey(d)
84 # rd = ss.rekey(d)
87 # self.assertEquals(d2,rd)
85 # self.assertEquals(d2,rd)
88 #
86 #
89 # d = {'1.5':uuid.uuid4(),'1':uuid.uuid4()}
87 # d = {'1.5':uuid.uuid4(),'1':uuid.uuid4()}
90 # d2 = {1.5:d['1.5'],1:d['1']}
88 # d2 = {1.5:d['1.5'],1:d['1']}
91 # rd = ss.rekey(d)
89 # rd = ss.rekey(d)
92 # self.assertEquals(d2,rd)
90 # self.assertEquals(d2,rd)
93 #
91 #
94 # d = {'1.0':uuid.uuid4(),'1':uuid.uuid4()}
92 # d = {'1.0':uuid.uuid4(),'1':uuid.uuid4()}
95 # self.assertRaises(KeyError, ss.rekey, d)
93 # self.assertRaises(KeyError, ss.rekey, d)
96 #
94 #
97 def test_unique_msg_ids(self):
95 def test_unique_msg_ids(self):
98 """test that messages receive unique ids"""
96 """test that messages receive unique ids"""
99 ids = set()
97 ids = set()
100 for i in range(2**12):
98 for i in range(2**12):
101 h = self.session.msg_header('test')
99 h = self.session.msg_header('test')
102 msg_id = h['msg_id']
100 msg_id = h['msg_id']
103 self.assertTrue(msg_id not in ids)
101 self.assertTrue(msg_id not in ids)
104 ids.add(msg_id)
102 ids.add(msg_id)
105
103
106 def test_feed_identities(self):
104 def test_feed_identities(self):
107 """scrub the front for zmq IDENTITIES"""
105 """scrub the front for zmq IDENTITIES"""
108 theids = "engine client other".split()
106 theids = "engine client other".split()
109 content = dict(code='whoda',stuff=object())
107 content = dict(code='whoda',stuff=object())
110 themsg = self.session.msg('execute',content=content)
108 themsg = self.session.msg('execute',content=content)
111 pmsg = theids
109 pmsg = theids
General Comments 0
You need to be logged in to leave comments. Login now