##// END OF EJS Templates
cleanup pass
MinRK -
Show More
@@ -0,0 +1,18 b''
1 """The IPython ZMQ-based parallel computing interface."""
2 #-----------------------------------------------------------------------------
3 # Copyright (C) 2011 The IPython Development Team
4 #
5 # Distributed under the terms of the BSD License. The full license is in
6 # the file COPYING, distributed as part of this software.
7 #-----------------------------------------------------------------------------
8
9 #-----------------------------------------------------------------------------
10 # Imports
11 #-----------------------------------------------------------------------------
12
13 from .asyncresult import *
14 from .client import Client
15 from .dependency import *
16 from .remotefunction import *
17 from .view import *
18
@@ -1,294 +1,305 b''
1 """AsyncResult objects for the client"""
1 """AsyncResult objects for the client"""
2 #-----------------------------------------------------------------------------
2 #-----------------------------------------------------------------------------
3 # Copyright (C) 2010 The IPython Development Team
3 # Copyright (C) 2010 The IPython Development Team
4 #
4 #
5 # Distributed under the terms of the BSD License. The full license is in
5 # Distributed under the terms of the BSD License. The full license is in
6 # the file COPYING, distributed as part of this software.
6 # the file COPYING, distributed as part of this software.
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8
8
9 #-----------------------------------------------------------------------------
9 #-----------------------------------------------------------------------------
10 # Imports
10 # Imports
11 #-----------------------------------------------------------------------------
11 #-----------------------------------------------------------------------------
12
12
13 import time
13 import time
14
14
15 from IPython.external.decorator import decorator
15 from IPython.external.decorator import decorator
16 from . import error
16 from . import error
17
17
18 #-----------------------------------------------------------------------------
18 #-----------------------------------------------------------------------------
19 # Classes
19 # Classes
20 #-----------------------------------------------------------------------------
20 #-----------------------------------------------------------------------------
21
21
22 @decorator
22 @decorator
23 def check_ready(f, self, *args, **kwargs):
23 def check_ready(f, self, *args, **kwargs):
24 """Call spin() to sync state prior to calling the method."""
24 """Call spin() to sync state prior to calling the method."""
25 self.wait(0)
25 self.wait(0)
26 if not self._ready:
26 if not self._ready:
27 raise error.TimeoutError("result not ready")
27 raise error.TimeoutError("result not ready")
28 return f(self, *args, **kwargs)
28 return f(self, *args, **kwargs)
29
29
30 class AsyncResult(object):
30 class AsyncResult(object):
31 """Class for representing results of non-blocking calls.
31 """Class for representing results of non-blocking calls.
32
32
33 Provides the same interface as :py:class:`multiprocessing.AsyncResult`.
33 Provides the same interface as :py:class:`multiprocessing.pool.AsyncResult`.
34 """
34 """
35
35
36 msg_ids = None
36 msg_ids = None
37
37
38 def __init__(self, client, msg_ids, fname='unknown'):
38 def __init__(self, client, msg_ids, fname='unknown'):
39 self._client = client
39 self._client = client
40 if isinstance(msg_ids, basestring):
40 if isinstance(msg_ids, basestring):
41 msg_ids = [msg_ids]
41 msg_ids = [msg_ids]
42 self.msg_ids = msg_ids
42 self.msg_ids = msg_ids
43 self._fname=fname
43 self._fname=fname
44 self._ready = False
44 self._ready = False
45 self._success = None
45 self._success = None
46 self._single_result = len(msg_ids) == 1
46 self._single_result = len(msg_ids) == 1
47
47
48 def __repr__(self):
48 def __repr__(self):
49 if self._ready:
49 if self._ready:
50 return "<%s: finished>"%(self.__class__.__name__)
50 return "<%s: finished>"%(self.__class__.__name__)
51 else:
51 else:
52 return "<%s: %s>"%(self.__class__.__name__,self._fname)
52 return "<%s: %s>"%(self.__class__.__name__,self._fname)
53
53
54
54
55 def _reconstruct_result(self, res):
55 def _reconstruct_result(self, res):
56 """
56 """Reconstruct our result from actual result list (always a list)
57
57 Override me in subclasses for turning a list of results
58 Override me in subclasses for turning a list of results
58 into the expected form.
59 into the expected form.
59 """
60 """
60 if self._single_result:
61 if self._single_result:
61 return res[0]
62 return res[0]
62 else:
63 else:
63 return res
64 return res
64
65
65 def get(self, timeout=-1):
66 def get(self, timeout=-1):
66 """Return the result when it arrives.
67 """Return the result when it arrives.
67
68
68 If `timeout` is not ``None`` and the result does not arrive within
69 If `timeout` is not ``None`` and the result does not arrive within
69 `timeout` seconds then ``TimeoutError`` is raised. If the
70 `timeout` seconds then ``TimeoutError`` is raised. If the
70 remote call raised an exception then that exception will be reraised
71 remote call raised an exception then that exception will be reraised
71 by get().
72 by get() inside a `RemoteError`.
72 """
73 """
73 if not self.ready():
74 if not self.ready():
74 self.wait(timeout)
75 self.wait(timeout)
75
76
76 if self._ready:
77 if self._ready:
77 if self._success:
78 if self._success:
78 return self._result
79 return self._result
79 else:
80 else:
80 raise self._exception
81 raise self._exception
81 else:
82 else:
82 raise error.TimeoutError("Result not ready.")
83 raise error.TimeoutError("Result not ready.")
83
84
84 def ready(self):
85 def ready(self):
85 """Return whether the call has completed."""
86 """Return whether the call has completed."""
86 if not self._ready:
87 if not self._ready:
87 self.wait(0)
88 self.wait(0)
88 return self._ready
89 return self._ready
89
90
90 def wait(self, timeout=-1):
91 def wait(self, timeout=-1):
91 """Wait until the result is available or until `timeout` seconds pass.
92 """Wait until the result is available or until `timeout` seconds pass.
93
94 This method always returns None.
92 """
95 """
93 if self._ready:
96 if self._ready:
94 return
97 return
95 self._ready = self._client.barrier(self.msg_ids, timeout)
98 self._ready = self._client.barrier(self.msg_ids, timeout)
96 if self._ready:
99 if self._ready:
97 try:
100 try:
98 results = map(self._client.results.get, self.msg_ids)
101 results = map(self._client.results.get, self.msg_ids)
99 self._result = results
102 self._result = results
100 if self._single_result:
103 if self._single_result:
101 r = results[0]
104 r = results[0]
102 if isinstance(r, Exception):
105 if isinstance(r, Exception):
103 raise r
106 raise r
104 else:
107 else:
105 results = error.collect_exceptions(results, self._fname)
108 results = error.collect_exceptions(results, self._fname)
106 self._result = self._reconstruct_result(results)
109 self._result = self._reconstruct_result(results)
107 except Exception, e:
110 except Exception, e:
108 self._exception = e
111 self._exception = e
109 self._success = False
112 self._success = False
110 else:
113 else:
111 self._success = True
114 self._success = True
112 finally:
115 finally:
113 self._metadata = map(self._client.metadata.get, self.msg_ids)
116 self._metadata = map(self._client.metadata.get, self.msg_ids)
114
117
115
118
116 def successful(self):
119 def successful(self):
117 """Return whether the call completed without raising an exception.
120 """Return whether the call completed without raising an exception.
118
121
119 Will raise ``AssertionError`` if the result is not ready.
122 Will raise ``AssertionError`` if the result is not ready.
120 """
123 """
121 assert self._ready
124 assert self.ready()
122 return self._success
125 return self._success
123
126
124 #----------------------------------------------------------------
127 #----------------------------------------------------------------
125 # Extra methods not in mp.pool.AsyncResult
128 # Extra methods not in mp.pool.AsyncResult
126 #----------------------------------------------------------------
129 #----------------------------------------------------------------
127
130
128 def get_dict(self, timeout=-1):
131 def get_dict(self, timeout=-1):
129 """Get the results as a dict, keyed by engine_id."""
132 """Get the results as a dict, keyed by engine_id.
133
134 timeout behavior is described in `get()`.
135 """
136
130 results = self.get(timeout)
137 results = self.get(timeout)
131 engine_ids = [ md['engine_id'] for md in self._metadata ]
138 engine_ids = [ md['engine_id'] for md in self._metadata ]
132 bycount = sorted(engine_ids, key=lambda k: engine_ids.count(k))
139 bycount = sorted(engine_ids, key=lambda k: engine_ids.count(k))
133 maxcount = bycount.count(bycount[-1])
140 maxcount = bycount.count(bycount[-1])
134 if maxcount > 1:
141 if maxcount > 1:
135 raise ValueError("Cannot build dict, %i jobs ran on engine #%i"%(
142 raise ValueError("Cannot build dict, %i jobs ran on engine #%i"%(
136 maxcount, bycount[-1]))
143 maxcount, bycount[-1]))
137
144
138 return dict(zip(engine_ids,results))
145 return dict(zip(engine_ids,results))
139
146
140 @property
147 @property
141 @check_ready
148 @check_ready
142 def result(self):
149 def result(self):
143 """result property."""
150 """result property wrapper for `get(timeout=0)`."""
144 return self._result
151 return self._result
145
152
146 # abbreviated alias:
153 # abbreviated alias:
147 r = result
154 r = result
148
155
149 @property
156 @property
150 @check_ready
157 @check_ready
151 def metadata(self):
158 def metadata(self):
152 """metadata property."""
159 """property for accessing execution metadata."""
153 if self._single_result:
160 if self._single_result:
154 return self._metadata[0]
161 return self._metadata[0]
155 else:
162 else:
156 return self._metadata
163 return self._metadata
157
164
158 @property
165 @property
159 def result_dict(self):
166 def result_dict(self):
160 """result property as a dict."""
167 """result property as a dict."""
161 return self.get_dict(0)
168 return self.get_dict(0)
162
169
163 def __dict__(self):
170 def __dict__(self):
164 return self.get_dict(0)
171 return self.get_dict(0)
165
172
166 #-------------------------------------
173 #-------------------------------------
167 # dict-access
174 # dict-access
168 #-------------------------------------
175 #-------------------------------------
169
176
170 @check_ready
177 @check_ready
171 def __getitem__(self, key):
178 def __getitem__(self, key):
172 """getitem returns result value(s) if keyed by int/slice, or metadata if key is str.
179 """getitem returns result value(s) if keyed by int/slice, or metadata if key is str.
173 """
180 """
174 if isinstance(key, int):
181 if isinstance(key, int):
175 return error.collect_exceptions([self._result[key]], self._fname)[0]
182 return error.collect_exceptions([self._result[key]], self._fname)[0]
176 elif isinstance(key, slice):
183 elif isinstance(key, slice):
177 return error.collect_exceptions(self._result[key], self._fname)
184 return error.collect_exceptions(self._result[key], self._fname)
178 elif isinstance(key, basestring):
185 elif isinstance(key, basestring):
179 values = [ md[key] for md in self._metadata ]
186 values = [ md[key] for md in self._metadata ]
180 if self._single_result:
187 if self._single_result:
181 return values[0]
188 return values[0]
182 else:
189 else:
183 return values
190 return values
184 else:
191 else:
185 raise TypeError("Invalid key type %r, must be 'int','slice', or 'str'"%type(key))
192 raise TypeError("Invalid key type %r, must be 'int','slice', or 'str'"%type(key))
186
193
187 @check_ready
194 @check_ready
188 def __getattr__(self, key):
195 def __getattr__(self, key):
189 """getattr maps to getitem for convenient access to metadata."""
196 """getattr maps to getitem for convenient attr access to metadata."""
190 if key not in self._metadata[0].keys():
197 if key not in self._metadata[0].keys():
191 raise AttributeError("%r object has no attribute %r"%(
198 raise AttributeError("%r object has no attribute %r"%(
192 self.__class__.__name__, key))
199 self.__class__.__name__, key))
193 return self.__getitem__(key)
200 return self.__getitem__(key)
194
201
195 # asynchronous iterator:
202 # asynchronous iterator:
196 def __iter__(self):
203 def __iter__(self):
197 if self._single_result:
204 if self._single_result:
198 raise TypeError("AsyncResults with a single result are not iterable.")
205 raise TypeError("AsyncResults with a single result are not iterable.")
199 try:
206 try:
200 rlist = self.get(0)
207 rlist = self.get(0)
201 except error.TimeoutError:
208 except error.TimeoutError:
202 # wait for each result individually
209 # wait for each result individually
203 for msg_id in self.msg_ids:
210 for msg_id in self.msg_ids:
204 ar = AsyncResult(self._client, msg_id, self._fname)
211 ar = AsyncResult(self._client, msg_id, self._fname)
205 yield ar.get()
212 yield ar.get()
206 else:
213 else:
207 # already done
214 # already done
208 for r in rlist:
215 for r in rlist:
209 yield r
216 yield r
210
217
211
218
212
219
213 class AsyncMapResult(AsyncResult):
220 class AsyncMapResult(AsyncResult):
214 """Class for representing results of non-blocking gathers.
221 """Class for representing results of non-blocking gathers.
215
222
216 This will properly reconstruct the gather.
223 This will properly reconstruct the gather.
217 """
224 """
218
225
219 def __init__(self, client, msg_ids, mapObject, fname=''):
226 def __init__(self, client, msg_ids, mapObject, fname=''):
220 AsyncResult.__init__(self, client, msg_ids, fname=fname)
227 AsyncResult.__init__(self, client, msg_ids, fname=fname)
221 self._mapObject = mapObject
228 self._mapObject = mapObject
222 self._single_result = False
229 self._single_result = False
223
230
224 def _reconstruct_result(self, res):
231 def _reconstruct_result(self, res):
225 """Perform the gather on the actual results."""
232 """Perform the gather on the actual results."""
226 return self._mapObject.joinPartitions(res)
233 return self._mapObject.joinPartitions(res)
227
234
228 # asynchronous iterator:
235 # asynchronous iterator:
229 def __iter__(self):
236 def __iter__(self):
230 try:
237 try:
231 rlist = self.get(0)
238 rlist = self.get(0)
232 except error.TimeoutError:
239 except error.TimeoutError:
233 # wait for each result individually
240 # wait for each result individually
234 for msg_id in self.msg_ids:
241 for msg_id in self.msg_ids:
235 ar = AsyncResult(self._client, msg_id, self._fname)
242 ar = AsyncResult(self._client, msg_id, self._fname)
236 rlist = ar.get()
243 rlist = ar.get()
237 try:
244 try:
238 for r in rlist:
245 for r in rlist:
239 yield r
246 yield r
240 except TypeError:
247 except TypeError:
241 # flattened, not a list
248 # flattened, not a list
242 # this could get broken by flattened data that returns iterables
249 # this could get broken by flattened data that returns iterables
243 # but most calls to map do not expose the `flatten` argument
250 # but most calls to map do not expose the `flatten` argument
244 yield rlist
251 yield rlist
245 else:
252 else:
246 # already done
253 # already done
247 for r in rlist:
254 for r in rlist:
248 yield r
255 yield r
249
256
250
257
251 class AsyncHubResult(AsyncResult):
258 class AsyncHubResult(AsyncResult):
252 """Class to wrap pending results that must be requested from the Hub"""
259 """Class to wrap pending results that must be requested from the Hub.
260
261 Note that waiting/polling on these objects requires polling the Hubover the network,
262 so use `AsyncHubResult.wait()` sparingly.
263 """
253
264
254 def wait(self, timeout=-1):
265 def wait(self, timeout=-1):
255 """wait for result to complete."""
266 """wait for result to complete."""
256 start = time.time()
267 start = time.time()
257 if self._ready:
268 if self._ready:
258 return
269 return
259 local_ids = filter(lambda msg_id: msg_id in self._client.outstanding, self.msg_ids)
270 local_ids = filter(lambda msg_id: msg_id in self._client.outstanding, self.msg_ids)
260 local_ready = self._client.barrier(local_ids, timeout)
271 local_ready = self._client.barrier(local_ids, timeout)
261 if local_ready:
272 if local_ready:
262 remote_ids = filter(lambda msg_id: msg_id not in self._client.results, self.msg_ids)
273 remote_ids = filter(lambda msg_id: msg_id not in self._client.results, self.msg_ids)
263 if not remote_ids:
274 if not remote_ids:
264 self._ready = True
275 self._ready = True
265 else:
276 else:
266 rdict = self._client.result_status(remote_ids, status_only=False)
277 rdict = self._client.result_status(remote_ids, status_only=False)
267 pending = rdict['pending']
278 pending = rdict['pending']
268 while pending and (timeout < 0 or time.time() < start+timeout):
279 while pending and (timeout < 0 or time.time() < start+timeout):
269 rdict = self._client.result_status(remote_ids, status_only=False)
280 rdict = self._client.result_status(remote_ids, status_only=False)
270 pending = rdict['pending']
281 pending = rdict['pending']
271 if pending:
282 if pending:
272 time.sleep(0.1)
283 time.sleep(0.1)
273 if not pending:
284 if not pending:
274 self._ready = True
285 self._ready = True
275 if self._ready:
286 if self._ready:
276 try:
287 try:
277 results = map(self._client.results.get, self.msg_ids)
288 results = map(self._client.results.get, self.msg_ids)
278 self._result = results
289 self._result = results
279 if self._single_result:
290 if self._single_result:
280 r = results[0]
291 r = results[0]
281 if isinstance(r, Exception):
292 if isinstance(r, Exception):
282 raise r
293 raise r
283 else:
294 else:
284 results = error.collect_exceptions(results, self._fname)
295 results = error.collect_exceptions(results, self._fname)
285 self._result = self._reconstruct_result(results)
296 self._result = self._reconstruct_result(results)
286 except Exception, e:
297 except Exception, e:
287 self._exception = e
298 self._exception = e
288 self._success = False
299 self._success = False
289 else:
300 else:
290 self._success = True
301 self._success = True
291 finally:
302 finally:
292 self._metadata = map(self._client.metadata.get, self.msg_ids)
303 self._metadata = map(self._client.metadata.get, self.msg_ids)
293
304
294 __all__ = ['AsyncResult', 'AsyncMapResult', 'AsyncHubResult'] No newline at end of file
305 __all__ = ['AsyncResult', 'AsyncMapResult', 'AsyncHubResult']
@@ -1,1499 +1,1501 b''
1 """A semi-synchronous Client for the ZMQ controller"""
1 """A semi-synchronous Client for the ZMQ controller"""
2 #-----------------------------------------------------------------------------
2 #-----------------------------------------------------------------------------
3 # Copyright (C) 2010 The IPython Development Team
3 # Copyright (C) 2010 The IPython Development Team
4 #
4 #
5 # Distributed under the terms of the BSD License. The full license is in
5 # Distributed under the terms of the BSD License. The full license is in
6 # the file COPYING, distributed as part of this software.
6 # the file COPYING, distributed as part of this software.
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8
8
9 #-----------------------------------------------------------------------------
9 #-----------------------------------------------------------------------------
10 # Imports
10 # Imports
11 #-----------------------------------------------------------------------------
11 #-----------------------------------------------------------------------------
12
12
13 import os
13 import os
14 import json
14 import json
15 import time
15 import time
16 import warnings
16 import warnings
17 from datetime import datetime
17 from datetime import datetime
18 from getpass import getpass
18 from getpass import getpass
19 from pprint import pprint
19 from pprint import pprint
20
20
21 pjoin = os.path.join
21 pjoin = os.path.join
22
22
23 import zmq
23 import zmq
24 # from zmq.eventloop import ioloop, zmqstream
24 # from zmq.eventloop import ioloop, zmqstream
25
25
26 from IPython.utils.path import get_ipython_dir
26 from IPython.utils.path import get_ipython_dir
27 from IPython.utils.pickleutil import Reference
27 from IPython.utils.pickleutil import Reference
28 from IPython.utils.traitlets import (HasTraits, Int, Instance, CUnicode,
28 from IPython.utils.traitlets import (HasTraits, Int, Instance, CUnicode,
29 Dict, List, Bool, Str, Set)
29 Dict, List, Bool, Str, Set)
30 from IPython.external.decorator import decorator
30 from IPython.external.decorator import decorator
31 from IPython.external.ssh import tunnel
31 from IPython.external.ssh import tunnel
32
32
33 from . import error
33 from . import error
34 from . import map as Map
34 from . import map as Map
35 from . import util
35 from . import streamsession as ss
36 from . import streamsession as ss
36 from .asyncresult import AsyncResult, AsyncMapResult, AsyncHubResult
37 from .asyncresult import AsyncResult, AsyncMapResult, AsyncHubResult
37 from .clusterdir import ClusterDir, ClusterDirError
38 from .clusterdir import ClusterDir, ClusterDirError
38 from .dependency import Dependency, depend, require, dependent
39 from .dependency import Dependency, depend, require, dependent
39 from .remotefunction import remote,parallel,ParallelFunction,RemoteFunction
40 from .remotefunction import remote, parallel, ParallelFunction, RemoteFunction
40 from .util import ReverseDict, disambiguate_url, validate_url
41 from .util import ReverseDict, validate_url, disambiguate_url
41 from .view import DirectView, LoadBalancedView
42 from .view import DirectView, LoadBalancedView
42
43
43 #--------------------------------------------------------------------------
44 #--------------------------------------------------------------------------
44 # helpers for implementing old MEC API via client.apply
45 # helpers for implementing old MEC API via client.apply
45 #--------------------------------------------------------------------------
46 #--------------------------------------------------------------------------
46
47
47 def _push(ns):
48 def _push(ns):
48 """helper method for implementing `client.push` via `client.apply`"""
49 """helper method for implementing `client.push` via `client.apply`"""
49 globals().update(ns)
50 globals().update(ns)
50
51
51 def _pull(keys):
52 def _pull(keys):
52 """helper method for implementing `client.pull` via `client.apply`"""
53 """helper method for implementing `client.pull` via `client.apply`"""
53 g = globals()
54 g = globals()
54 if isinstance(keys, (list,tuple, set)):
55 if isinstance(keys, (list,tuple, set)):
55 for key in keys:
56 for key in keys:
56 if not g.has_key(key):
57 if not g.has_key(key):
57 raise NameError("name '%s' is not defined"%key)
58 raise NameError("name '%s' is not defined"%key)
58 return map(g.get, keys)
59 return map(g.get, keys)
59 else:
60 else:
60 if not g.has_key(keys):
61 if not g.has_key(keys):
61 raise NameError("name '%s' is not defined"%keys)
62 raise NameError("name '%s' is not defined"%keys)
62 return g.get(keys)
63 return g.get(keys)
63
64
64 def _clear():
65 def _clear():
65 """helper method for implementing `client.clear` via `client.apply`"""
66 """helper method for implementing `client.clear` via `client.apply`"""
66 globals().clear()
67 globals().clear()
67
68
68 def _execute(code):
69 def _execute(code):
69 """helper method for implementing `client.execute` via `client.apply`"""
70 """helper method for implementing `client.execute` via `client.apply`"""
70 exec code in globals()
71 exec code in globals()
71
72
72
73
73 #--------------------------------------------------------------------------
74 #--------------------------------------------------------------------------
74 # Decorators for Client methods
75 # Decorators for Client methods
75 #--------------------------------------------------------------------------
76 #--------------------------------------------------------------------------
76
77
77 @decorator
78 @decorator
78 def spinfirst(f, self, *args, **kwargs):
79 def spinfirst(f, self, *args, **kwargs):
79 """Call spin() to sync state prior to calling the method."""
80 """Call spin() to sync state prior to calling the method."""
80 self.spin()
81 self.spin()
81 return f(self, *args, **kwargs)
82 return f(self, *args, **kwargs)
82
83
83 @decorator
84 @decorator
84 def defaultblock(f, self, *args, **kwargs):
85 def defaultblock(f, self, *args, **kwargs):
85 """Default to self.block; preserve self.block."""
86 """Default to self.block; preserve self.block."""
86 block = kwargs.get('block',None)
87 block = kwargs.get('block',None)
87 block = self.block if block is None else block
88 block = self.block if block is None else block
88 saveblock = self.block
89 saveblock = self.block
89 self.block = block
90 self.block = block
90 try:
91 try:
91 ret = f(self, *args, **kwargs)
92 ret = f(self, *args, **kwargs)
92 finally:
93 finally:
93 self.block = saveblock
94 self.block = saveblock
94 return ret
95 return ret
95
96
96
97
97 #--------------------------------------------------------------------------
98 #--------------------------------------------------------------------------
98 # Classes
99 # Classes
99 #--------------------------------------------------------------------------
100 #--------------------------------------------------------------------------
100
101
101 class Metadata(dict):
102 class Metadata(dict):
102 """Subclass of dict for initializing metadata values.
103 """Subclass of dict for initializing metadata values.
103
104
104 Attribute access works on keys.
105 Attribute access works on keys.
105
106
106 These objects have a strict set of keys - errors will raise if you try
107 These objects have a strict set of keys - errors will raise if you try
107 to add new keys.
108 to add new keys.
108 """
109 """
109 def __init__(self, *args, **kwargs):
110 def __init__(self, *args, **kwargs):
110 dict.__init__(self)
111 dict.__init__(self)
111 md = {'msg_id' : None,
112 md = {'msg_id' : None,
112 'submitted' : None,
113 'submitted' : None,
113 'started' : None,
114 'started' : None,
114 'completed' : None,
115 'completed' : None,
115 'received' : None,
116 'received' : None,
116 'engine_uuid' : None,
117 'engine_uuid' : None,
117 'engine_id' : None,
118 'engine_id' : None,
118 'follow' : None,
119 'follow' : None,
119 'after' : None,
120 'after' : None,
120 'status' : None,
121 'status' : None,
121
122
122 'pyin' : None,
123 'pyin' : None,
123 'pyout' : None,
124 'pyout' : None,
124 'pyerr' : None,
125 'pyerr' : None,
125 'stdout' : '',
126 'stdout' : '',
126 'stderr' : '',
127 'stderr' : '',
127 }
128 }
128 self.update(md)
129 self.update(md)
129 self.update(dict(*args, **kwargs))
130 self.update(dict(*args, **kwargs))
130
131
131 def __getattr__(self, key):
132 def __getattr__(self, key):
132 """getattr aliased to getitem"""
133 """getattr aliased to getitem"""
133 if key in self.iterkeys():
134 if key in self.iterkeys():
134 return self[key]
135 return self[key]
135 else:
136 else:
136 raise AttributeError(key)
137 raise AttributeError(key)
137
138
138 def __setattr__(self, key, value):
139 def __setattr__(self, key, value):
139 """setattr aliased to setitem, with strict"""
140 """setattr aliased to setitem, with strict"""
140 if key in self.iterkeys():
141 if key in self.iterkeys():
141 self[key] = value
142 self[key] = value
142 else:
143 else:
143 raise AttributeError(key)
144 raise AttributeError(key)
144
145
145 def __setitem__(self, key, value):
146 def __setitem__(self, key, value):
146 """strict static key enforcement"""
147 """strict static key enforcement"""
147 if key in self.iterkeys():
148 if key in self.iterkeys():
148 dict.__setitem__(self, key, value)
149 dict.__setitem__(self, key, value)
149 else:
150 else:
150 raise KeyError(key)
151 raise KeyError(key)
151
152
152
153
153 class Client(HasTraits):
154 class Client(HasTraits):
154 """A semi-synchronous client to the IPython ZMQ controller
155 """A semi-synchronous client to the IPython ZMQ controller
155
156
156 Parameters
157 Parameters
157 ----------
158 ----------
158
159
159 url_or_file : bytes; zmq url or path to ipcontroller-client.json
160 url_or_file : bytes; zmq url or path to ipcontroller-client.json
160 Connection information for the Hub's registration. If a json connector
161 Connection information for the Hub's registration. If a json connector
161 file is given, then likely no further configuration is necessary.
162 file is given, then likely no further configuration is necessary.
162 [Default: use profile]
163 [Default: use profile]
163 profile : bytes
164 profile : bytes
164 The name of the Cluster profile to be used to find connector information.
165 The name of the Cluster profile to be used to find connector information.
165 [Default: 'default']
166 [Default: 'default']
166 context : zmq.Context
167 context : zmq.Context
167 Pass an existing zmq.Context instance, otherwise the client will create its own.
168 Pass an existing zmq.Context instance, otherwise the client will create its own.
168 username : bytes
169 username : bytes
169 set username to be passed to the Session object
170 set username to be passed to the Session object
170 debug : bool
171 debug : bool
171 flag for lots of message printing for debug purposes
172 flag for lots of message printing for debug purposes
172
173
173 #-------------- ssh related args ----------------
174 #-------------- ssh related args ----------------
174 # These are args for configuring the ssh tunnel to be used
175 # These are args for configuring the ssh tunnel to be used
175 # credentials are used to forward connections over ssh to the Controller
176 # credentials are used to forward connections over ssh to the Controller
176 # Note that the ip given in `addr` needs to be relative to sshserver
177 # Note that the ip given in `addr` needs to be relative to sshserver
177 # The most basic case is to leave addr as pointing to localhost (127.0.0.1),
178 # The most basic case is to leave addr as pointing to localhost (127.0.0.1),
178 # and set sshserver as the same machine the Controller is on. However,
179 # and set sshserver as the same machine the Controller is on. However,
179 # the only requirement is that sshserver is able to see the Controller
180 # the only requirement is that sshserver is able to see the Controller
180 # (i.e. is within the same trusted network).
181 # (i.e. is within the same trusted network).
181
182
182 sshserver : str
183 sshserver : str
183 A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
184 A string of the form passed to ssh, i.e. 'server.tld' or 'user@server.tld:port'
184 If keyfile or password is specified, and this is not, it will default to
185 If keyfile or password is specified, and this is not, it will default to
185 the ip given in addr.
186 the ip given in addr.
186 sshkey : str; path to public ssh key file
187 sshkey : str; path to public ssh key file
187 This specifies a key to be used in ssh login, default None.
188 This specifies a key to be used in ssh login, default None.
188 Regular default ssh keys will be used without specifying this argument.
189 Regular default ssh keys will be used without specifying this argument.
189 password : str
190 password : str
190 Your ssh password to sshserver. Note that if this is left None,
191 Your ssh password to sshserver. Note that if this is left None,
191 you will be prompted for it if passwordless key based login is unavailable.
192 you will be prompted for it if passwordless key based login is unavailable.
192 paramiko : bool
193 paramiko : bool
193 flag for whether to use paramiko instead of shell ssh for tunneling.
194 flag for whether to use paramiko instead of shell ssh for tunneling.
194 [default: True on win32, False else]
195 [default: True on win32, False else]
195
196
196 #------- exec authentication args -------
197 #------- exec authentication args -------
197 # If even localhost is untrusted, you can have some protection against
198 # If even localhost is untrusted, you can have some protection against
198 # unauthorized execution by using a key. Messages are still sent
199 # unauthorized execution by using a key. Messages are still sent
199 # as cleartext, so if someone can snoop your loopback traffic this will
200 # as cleartext, so if someone can snoop your loopback traffic this will
200 # not help against malicious attacks.
201 # not help against malicious attacks.
201
202
202 exec_key : str
203 exec_key : str
203 an authentication key or file containing a key
204 an authentication key or file containing a key
204 default: None
205 default: None
205
206
206
207
207 Attributes
208 Attributes
208 ----------
209 ----------
209
210
210 ids : set of int engine IDs
211 ids : set of int engine IDs
211 requesting the ids attribute always synchronizes
212 requesting the ids attribute always synchronizes
212 the registration state. To request ids without synchronization,
213 the registration state. To request ids without synchronization,
213 use semi-private _ids attributes.
214 use semi-private _ids attributes.
214
215
215 history : list of msg_ids
216 history : list of msg_ids
216 a list of msg_ids, keeping track of all the execution
217 a list of msg_ids, keeping track of all the execution
217 messages you have submitted in order.
218 messages you have submitted in order.
218
219
219 outstanding : set of msg_ids
220 outstanding : set of msg_ids
220 a set of msg_ids that have been submitted, but whose
221 a set of msg_ids that have been submitted, but whose
221 results have not yet been received.
222 results have not yet been received.
222
223
223 results : dict
224 results : dict
224 a dict of all our results, keyed by msg_id
225 a dict of all our results, keyed by msg_id
225
226
226 block : bool
227 block : bool
227 determines default behavior when block not specified
228 determines default behavior when block not specified
228 in execution methods
229 in execution methods
229
230
230 Methods
231 Methods
231 -------
232 -------
232
233
233 spin
234 spin
234 flushes incoming results and registration state changes
235 flushes incoming results and registration state changes
235 control methods spin, and requesting `ids` also ensures up to date
236 control methods spin, and requesting `ids` also ensures up to date
236
237
237 barrier
238 barrier
238 wait on one or more msg_ids
239 wait on one or more msg_ids
239
240
240 execution methods
241 execution methods
241 apply
242 apply
242 legacy: execute, run
243 legacy: execute, run
243
244
244 query methods
245 query methods
245 queue_status, get_result, purge
246 queue_status, get_result, purge
246
247
247 control methods
248 control methods
248 abort, shutdown
249 abort, shutdown
249
250
250 """
251 """
251
252
252
253
253 block = Bool(False)
254 block = Bool(False)
254 outstanding=Set()
255 outstanding=Set()
255 results = Dict()
256 results = Dict()
256 metadata = Dict()
257 metadata = Dict()
257 history = List()
258 history = List()
258 debug = Bool(False)
259 debug = Bool(False)
259 profile=CUnicode('default')
260 profile=CUnicode('default')
260
261
261 _ids = List()
262 _ids = List()
262 _connected=Bool(False)
263 _connected=Bool(False)
263 _ssh=Bool(False)
264 _ssh=Bool(False)
264 _context = Instance('zmq.Context')
265 _context = Instance('zmq.Context')
265 _config = Dict()
266 _config = Dict()
266 _engines=Instance(ReverseDict, (), {})
267 _engines=Instance(ReverseDict, (), {})
267 _registration_socket=Instance('zmq.Socket')
268 _registration_socket=Instance('zmq.Socket')
268 _query_socket=Instance('zmq.Socket')
269 _query_socket=Instance('zmq.Socket')
269 _control_socket=Instance('zmq.Socket')
270 _control_socket=Instance('zmq.Socket')
270 _iopub_socket=Instance('zmq.Socket')
271 _iopub_socket=Instance('zmq.Socket')
271 _notification_socket=Instance('zmq.Socket')
272 _notification_socket=Instance('zmq.Socket')
272 _mux_socket=Instance('zmq.Socket')
273 _mux_socket=Instance('zmq.Socket')
273 _task_socket=Instance('zmq.Socket')
274 _task_socket=Instance('zmq.Socket')
274 _task_scheme=Str()
275 _task_scheme=Str()
275 _balanced_views=Dict()
276 _balanced_views=Dict()
276 _direct_views=Dict()
277 _direct_views=Dict()
277 _closed = False
278 _closed = False
278
279
279 def __init__(self, url_or_file=None, profile='default', cluster_dir=None, ipython_dir=None,
280 def __init__(self, url_or_file=None, profile='default', cluster_dir=None, ipython_dir=None,
280 context=None, username=None, debug=False, exec_key=None,
281 context=None, username=None, debug=False, exec_key=None,
281 sshserver=None, sshkey=None, password=None, paramiko=None,
282 sshserver=None, sshkey=None, password=None, paramiko=None,
282 ):
283 ):
283 super(Client, self).__init__(debug=debug, profile=profile)
284 super(Client, self).__init__(debug=debug, profile=profile)
284 if context is None:
285 if context is None:
285 context = zmq.Context()
286 context = zmq.Context()
286 self._context = context
287 self._context = context
287
288
288
289
289 self._setup_cluster_dir(profile, cluster_dir, ipython_dir)
290 self._setup_cluster_dir(profile, cluster_dir, ipython_dir)
290 if self._cd is not None:
291 if self._cd is not None:
291 if url_or_file is None:
292 if url_or_file is None:
292 url_or_file = pjoin(self._cd.security_dir, 'ipcontroller-client.json')
293 url_or_file = pjoin(self._cd.security_dir, 'ipcontroller-client.json')
293 assert url_or_file is not None, "I can't find enough information to connect to a controller!"\
294 assert url_or_file is not None, "I can't find enough information to connect to a controller!"\
294 " Please specify at least one of url_or_file or profile."
295 " Please specify at least one of url_or_file or profile."
295
296
296 try:
297 try:
297 validate_url(url_or_file)
298 validate_url(url_or_file)
298 except AssertionError:
299 except AssertionError:
299 if not os.path.exists(url_or_file):
300 if not os.path.exists(url_or_file):
300 if self._cd:
301 if self._cd:
301 url_or_file = os.path.join(self._cd.security_dir, url_or_file)
302 url_or_file = os.path.join(self._cd.security_dir, url_or_file)
302 assert os.path.exists(url_or_file), "Not a valid connection file or url: %r"%url_or_file
303 assert os.path.exists(url_or_file), "Not a valid connection file or url: %r"%url_or_file
303 with open(url_or_file) as f:
304 with open(url_or_file) as f:
304 cfg = json.loads(f.read())
305 cfg = json.loads(f.read())
305 else:
306 else:
306 cfg = {'url':url_or_file}
307 cfg = {'url':url_or_file}
307
308
308 # sync defaults from args, json:
309 # sync defaults from args, json:
309 if sshserver:
310 if sshserver:
310 cfg['ssh'] = sshserver
311 cfg['ssh'] = sshserver
311 if exec_key:
312 if exec_key:
312 cfg['exec_key'] = exec_key
313 cfg['exec_key'] = exec_key
313 exec_key = cfg['exec_key']
314 exec_key = cfg['exec_key']
314 sshserver=cfg['ssh']
315 sshserver=cfg['ssh']
315 url = cfg['url']
316 url = cfg['url']
316 location = cfg.setdefault('location', None)
317 location = cfg.setdefault('location', None)
317 cfg['url'] = disambiguate_url(cfg['url'], location)
318 cfg['url'] = disambiguate_url(cfg['url'], location)
318 url = cfg['url']
319 url = cfg['url']
319
320
320 self._config = cfg
321 self._config = cfg
321
322
322 self._ssh = bool(sshserver or sshkey or password)
323 self._ssh = bool(sshserver or sshkey or password)
323 if self._ssh and sshserver is None:
324 if self._ssh and sshserver is None:
324 # default to ssh via localhost
325 # default to ssh via localhost
325 sshserver = url.split('://')[1].split(':')[0]
326 sshserver = url.split('://')[1].split(':')[0]
326 if self._ssh and password is None:
327 if self._ssh and password is None:
327 if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
328 if tunnel.try_passwordless_ssh(sshserver, sshkey, paramiko):
328 password=False
329 password=False
329 else:
330 else:
330 password = getpass("SSH Password for %s: "%sshserver)
331 password = getpass("SSH Password for %s: "%sshserver)
331 ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
332 ssh_kwargs = dict(keyfile=sshkey, password=password, paramiko=paramiko)
332 if exec_key is not None and os.path.isfile(exec_key):
333 if exec_key is not None and os.path.isfile(exec_key):
333 arg = 'keyfile'
334 arg = 'keyfile'
334 else:
335 else:
335 arg = 'key'
336 arg = 'key'
336 key_arg = {arg:exec_key}
337 key_arg = {arg:exec_key}
337 if username is None:
338 if username is None:
338 self.session = ss.StreamSession(**key_arg)
339 self.session = ss.StreamSession(**key_arg)
339 else:
340 else:
340 self.session = ss.StreamSession(username, **key_arg)
341 self.session = ss.StreamSession(username, **key_arg)
341 self._registration_socket = self._context.socket(zmq.XREQ)
342 self._registration_socket = self._context.socket(zmq.XREQ)
342 self._registration_socket.setsockopt(zmq.IDENTITY, self.session.session)
343 self._registration_socket.setsockopt(zmq.IDENTITY, self.session.session)
343 if self._ssh:
344 if self._ssh:
344 tunnel.tunnel_connection(self._registration_socket, url, sshserver, **ssh_kwargs)
345 tunnel.tunnel_connection(self._registration_socket, url, sshserver, **ssh_kwargs)
345 else:
346 else:
346 self._registration_socket.connect(url)
347 self._registration_socket.connect(url)
347
348
348 self.session.debug = self.debug
349 self.session.debug = self.debug
349
350
350 self._notification_handlers = {'registration_notification' : self._register_engine,
351 self._notification_handlers = {'registration_notification' : self._register_engine,
351 'unregistration_notification' : self._unregister_engine,
352 'unregistration_notification' : self._unregister_engine,
352 }
353 }
353 self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
354 self._queue_handlers = {'execute_reply' : self._handle_execute_reply,
354 'apply_reply' : self._handle_apply_reply}
355 'apply_reply' : self._handle_apply_reply}
355 self._connect(sshserver, ssh_kwargs)
356 self._connect(sshserver, ssh_kwargs)
356
357
357
358
358 def _setup_cluster_dir(self, profile, cluster_dir, ipython_dir):
359 def _setup_cluster_dir(self, profile, cluster_dir, ipython_dir):
359 if ipython_dir is None:
360 if ipython_dir is None:
360 ipython_dir = get_ipython_dir()
361 ipython_dir = get_ipython_dir()
361 if cluster_dir is not None:
362 if cluster_dir is not None:
362 try:
363 try:
363 self._cd = ClusterDir.find_cluster_dir(cluster_dir)
364 self._cd = ClusterDir.find_cluster_dir(cluster_dir)
364 return
365 return
365 except ClusterDirError:
366 except ClusterDirError:
366 pass
367 pass
367 elif profile is not None:
368 elif profile is not None:
368 try:
369 try:
369 self._cd = ClusterDir.find_cluster_dir_by_profile(
370 self._cd = ClusterDir.find_cluster_dir_by_profile(
370 ipython_dir, profile)
371 ipython_dir, profile)
371 return
372 return
372 except ClusterDirError:
373 except ClusterDirError:
373 pass
374 pass
374 self._cd = None
375 self._cd = None
375
376
376 @property
377 @property
377 def ids(self):
378 def ids(self):
378 """Always up-to-date ids property."""
379 """Always up-to-date ids property."""
379 self._flush_notifications()
380 self._flush_notifications()
380 return self._ids
381 return self._ids
381
382
382 def close(self):
383 def close(self):
383 if self._closed:
384 if self._closed:
384 return
385 return
385 snames = filter(lambda n: n.endswith('socket'), dir(self))
386 snames = filter(lambda n: n.endswith('socket'), dir(self))
386 for socket in map(lambda name: getattr(self, name), snames):
387 for socket in map(lambda name: getattr(self, name), snames):
387 socket.close()
388 socket.close()
388 self._closed = True
389 self._closed = True
389
390
390 def _update_engines(self, engines):
391 def _update_engines(self, engines):
391 """Update our engines dict and _ids from a dict of the form: {id:uuid}."""
392 """Update our engines dict and _ids from a dict of the form: {id:uuid}."""
392 for k,v in engines.iteritems():
393 for k,v in engines.iteritems():
393 eid = int(k)
394 eid = int(k)
394 self._engines[eid] = bytes(v) # force not unicode
395 self._engines[eid] = bytes(v) # force not unicode
395 self._ids.append(eid)
396 self._ids.append(eid)
396 self._ids = sorted(self._ids)
397 self._ids = sorted(self._ids)
397 if sorted(self._engines.keys()) != range(len(self._engines)) and \
398 if sorted(self._engines.keys()) != range(len(self._engines)) and \
398 self._task_scheme == 'pure' and self._task_socket:
399 self._task_scheme == 'pure' and self._task_socket:
399 self._stop_scheduling_tasks()
400 self._stop_scheduling_tasks()
400
401
401 def _stop_scheduling_tasks(self):
402 def _stop_scheduling_tasks(self):
402 """Stop scheduling tasks because an engine has been unregistered
403 """Stop scheduling tasks because an engine has been unregistered
403 from a pure ZMQ scheduler.
404 from a pure ZMQ scheduler.
404 """
405 """
405
406
406 self._task_socket.close()
407 self._task_socket.close()
407 self._task_socket = None
408 self._task_socket = None
408 msg = "An engine has been unregistered, and we are using pure " +\
409 msg = "An engine has been unregistered, and we are using pure " +\
409 "ZMQ task scheduling. Task farming will be disabled."
410 "ZMQ task scheduling. Task farming will be disabled."
410 if self.outstanding:
411 if self.outstanding:
411 msg += " If you were running tasks when this happened, " +\
412 msg += " If you were running tasks when this happened, " +\
412 "some `outstanding` msg_ids may never resolve."
413 "some `outstanding` msg_ids may never resolve."
413 warnings.warn(msg, RuntimeWarning)
414 warnings.warn(msg, RuntimeWarning)
414
415
415 def _build_targets(self, targets):
416 def _build_targets(self, targets):
416 """Turn valid target IDs or 'all' into two lists:
417 """Turn valid target IDs or 'all' into two lists:
417 (int_ids, uuids).
418 (int_ids, uuids).
418 """
419 """
419 if targets is None:
420 if targets is None:
420 targets = self._ids
421 targets = self._ids
421 elif isinstance(targets, str):
422 elif isinstance(targets, str):
422 if targets.lower() == 'all':
423 if targets.lower() == 'all':
423 targets = self._ids
424 targets = self._ids
424 else:
425 else:
425 raise TypeError("%r not valid str target, must be 'all'"%(targets))
426 raise TypeError("%r not valid str target, must be 'all'"%(targets))
426 elif isinstance(targets, int):
427 elif isinstance(targets, int):
427 targets = [targets]
428 targets = [targets]
428 return [self._engines[t] for t in targets], list(targets)
429 return [self._engines[t] for t in targets], list(targets)
429
430
430 def _connect(self, sshserver, ssh_kwargs):
431 def _connect(self, sshserver, ssh_kwargs):
431 """setup all our socket connections to the controller. This is called from
432 """setup all our socket connections to the controller. This is called from
432 __init__."""
433 __init__."""
433
434
434 # Maybe allow reconnecting?
435 # Maybe allow reconnecting?
435 if self._connected:
436 if self._connected:
436 return
437 return
437 self._connected=True
438 self._connected=True
438
439
439 def connect_socket(s, url):
440 def connect_socket(s, url):
440 url = disambiguate_url(url, self._config['location'])
441 url = disambiguate_url(url, self._config['location'])
441 if self._ssh:
442 if self._ssh:
442 return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
443 return tunnel.tunnel_connection(s, url, sshserver, **ssh_kwargs)
443 else:
444 else:
444 return s.connect(url)
445 return s.connect(url)
445
446
446 self.session.send(self._registration_socket, 'connection_request')
447 self.session.send(self._registration_socket, 'connection_request')
447 idents,msg = self.session.recv(self._registration_socket,mode=0)
448 idents,msg = self.session.recv(self._registration_socket,mode=0)
448 if self.debug:
449 if self.debug:
449 pprint(msg)
450 pprint(msg)
450 msg = ss.Message(msg)
451 msg = ss.Message(msg)
451 content = msg.content
452 content = msg.content
452 self._config['registration'] = dict(content)
453 self._config['registration'] = dict(content)
453 if content.status == 'ok':
454 if content.status == 'ok':
454 if content.mux:
455 if content.mux:
455 self._mux_socket = self._context.socket(zmq.PAIR)
456 self._mux_socket = self._context.socket(zmq.PAIR)
456 self._mux_socket.setsockopt(zmq.IDENTITY, self.session.session)
457 self._mux_socket.setsockopt(zmq.IDENTITY, self.session.session)
457 connect_socket(self._mux_socket, content.mux)
458 connect_socket(self._mux_socket, content.mux)
458 if content.task:
459 if content.task:
459 self._task_scheme, task_addr = content.task
460 self._task_scheme, task_addr = content.task
460 self._task_socket = self._context.socket(zmq.PAIR)
461 self._task_socket = self._context.socket(zmq.PAIR)
461 self._task_socket.setsockopt(zmq.IDENTITY, self.session.session)
462 self._task_socket.setsockopt(zmq.IDENTITY, self.session.session)
462 connect_socket(self._task_socket, task_addr)
463 connect_socket(self._task_socket, task_addr)
463 if content.notification:
464 if content.notification:
464 self._notification_socket = self._context.socket(zmq.SUB)
465 self._notification_socket = self._context.socket(zmq.SUB)
465 connect_socket(self._notification_socket, content.notification)
466 connect_socket(self._notification_socket, content.notification)
466 self._notification_socket.setsockopt(zmq.SUBSCRIBE, "")
467 self._notification_socket.setsockopt(zmq.SUBSCRIBE, "")
467 if content.query:
468 if content.query:
468 self._query_socket = self._context.socket(zmq.PAIR)
469 self._query_socket = self._context.socket(zmq.PAIR)
469 self._query_socket.setsockopt(zmq.IDENTITY, self.session.session)
470 self._query_socket.setsockopt(zmq.IDENTITY, self.session.session)
470 connect_socket(self._query_socket, content.query)
471 connect_socket(self._query_socket, content.query)
471 if content.control:
472 if content.control:
472 self._control_socket = self._context.socket(zmq.PAIR)
473 self._control_socket = self._context.socket(zmq.PAIR)
473 self._control_socket.setsockopt(zmq.IDENTITY, self.session.session)
474 self._control_socket.setsockopt(zmq.IDENTITY, self.session.session)
474 connect_socket(self._control_socket, content.control)
475 connect_socket(self._control_socket, content.control)
475 if content.iopub:
476 if content.iopub:
476 self._iopub_socket = self._context.socket(zmq.SUB)
477 self._iopub_socket = self._context.socket(zmq.SUB)
477 self._iopub_socket.setsockopt(zmq.SUBSCRIBE, '')
478 self._iopub_socket.setsockopt(zmq.SUBSCRIBE, '')
478 self._iopub_socket.setsockopt(zmq.IDENTITY, self.session.session)
479 self._iopub_socket.setsockopt(zmq.IDENTITY, self.session.session)
479 connect_socket(self._iopub_socket, content.iopub)
480 connect_socket(self._iopub_socket, content.iopub)
480 self._update_engines(dict(content.engines))
481 self._update_engines(dict(content.engines))
481
482
482 else:
483 else:
483 self._connected = False
484 self._connected = False
484 raise Exception("Failed to connect!")
485 raise Exception("Failed to connect!")
485
486
486 #--------------------------------------------------------------------------
487 #--------------------------------------------------------------------------
487 # handlers and callbacks for incoming messages
488 # handlers and callbacks for incoming messages
488 #--------------------------------------------------------------------------
489 #--------------------------------------------------------------------------
489
490
490 def _unwrap_exception(self, content):
491 def _unwrap_exception(self, content):
491 """unwrap exception, and remap engineid to int."""
492 """unwrap exception, and remap engineid to int."""
492 e = ss.unwrap_exception(content)
493 e = error.unwrap_exception(content)
493 if e.engine_info:
494 if e.engine_info:
494 e_uuid = e.engine_info['engine_uuid']
495 e_uuid = e.engine_info['engine_uuid']
495 eid = self._engines[e_uuid]
496 eid = self._engines[e_uuid]
496 e.engine_info['engine_id'] = eid
497 e.engine_info['engine_id'] = eid
497 return e
498 return e
498
499
499 def _register_engine(self, msg):
500 def _register_engine(self, msg):
500 """Register a new engine, and update our connection info."""
501 """Register a new engine, and update our connection info."""
501 content = msg['content']
502 content = msg['content']
502 eid = content['id']
503 eid = content['id']
503 d = {eid : content['queue']}
504 d = {eid : content['queue']}
504 self._update_engines(d)
505 self._update_engines(d)
505
506
506 def _unregister_engine(self, msg):
507 def _unregister_engine(self, msg):
507 """Unregister an engine that has died."""
508 """Unregister an engine that has died."""
508 content = msg['content']
509 content = msg['content']
509 eid = int(content['id'])
510 eid = int(content['id'])
510 if eid in self._ids:
511 if eid in self._ids:
511 self._ids.remove(eid)
512 self._ids.remove(eid)
512 self._engines.pop(eid)
513 self._engines.pop(eid)
513 if self._task_socket and self._task_scheme == 'pure':
514 if self._task_socket and self._task_scheme == 'pure':
514 self._stop_scheduling_tasks()
515 self._stop_scheduling_tasks()
515
516
516 def _extract_metadata(self, header, parent, content):
517 def _extract_metadata(self, header, parent, content):
517 md = {'msg_id' : parent['msg_id'],
518 md = {'msg_id' : parent['msg_id'],
518 'received' : datetime.now(),
519 'received' : datetime.now(),
519 'engine_uuid' : header.get('engine', None),
520 'engine_uuid' : header.get('engine', None),
520 'follow' : parent.get('follow', []),
521 'follow' : parent.get('follow', []),
521 'after' : parent.get('after', []),
522 'after' : parent.get('after', []),
522 'status' : content['status'],
523 'status' : content['status'],
523 }
524 }
524
525
525 if md['engine_uuid'] is not None:
526 if md['engine_uuid'] is not None:
526 md['engine_id'] = self._engines.get(md['engine_uuid'], None)
527 md['engine_id'] = self._engines.get(md['engine_uuid'], None)
527
528
528 if 'date' in parent:
529 if 'date' in parent:
529 md['submitted'] = datetime.strptime(parent['date'], ss.ISO8601)
530 md['submitted'] = datetime.strptime(parent['date'], util.ISO8601)
530 if 'started' in header:
531 if 'started' in header:
531 md['started'] = datetime.strptime(header['started'], ss.ISO8601)
532 md['started'] = datetime.strptime(header['started'], util.ISO8601)
532 if 'date' in header:
533 if 'date' in header:
533 md['completed'] = datetime.strptime(header['date'], ss.ISO8601)
534 md['completed'] = datetime.strptime(header['date'], util.ISO8601)
534 return md
535 return md
535
536
536 def _handle_execute_reply(self, msg):
537 def _handle_execute_reply(self, msg):
537 """Save the reply to an execute_request into our results.
538 """Save the reply to an execute_request into our results.
538
539
539 execute messages are never actually used. apply is used instead.
540 execute messages are never actually used. apply is used instead.
540 """
541 """
541
542
542 parent = msg['parent_header']
543 parent = msg['parent_header']
543 msg_id = parent['msg_id']
544 msg_id = parent['msg_id']
544 if msg_id not in self.outstanding:
545 if msg_id not in self.outstanding:
545 if msg_id in self.history:
546 if msg_id in self.history:
546 print ("got stale result: %s"%msg_id)
547 print ("got stale result: %s"%msg_id)
547 else:
548 else:
548 print ("got unknown result: %s"%msg_id)
549 print ("got unknown result: %s"%msg_id)
549 else:
550 else:
550 self.outstanding.remove(msg_id)
551 self.outstanding.remove(msg_id)
551 self.results[msg_id] = self._unwrap_exception(msg['content'])
552 self.results[msg_id] = self._unwrap_exception(msg['content'])
552
553
553 def _handle_apply_reply(self, msg):
554 def _handle_apply_reply(self, msg):
554 """Save the reply to an apply_request into our results."""
555 """Save the reply to an apply_request into our results."""
555 parent = msg['parent_header']
556 parent = msg['parent_header']
556 msg_id = parent['msg_id']
557 msg_id = parent['msg_id']
557 if msg_id not in self.outstanding:
558 if msg_id not in self.outstanding:
558 if msg_id in self.history:
559 if msg_id in self.history:
559 print ("got stale result: %s"%msg_id)
560 print ("got stale result: %s"%msg_id)
560 print self.results[msg_id]
561 print self.results[msg_id]
561 print msg
562 print msg
562 else:
563 else:
563 print ("got unknown result: %s"%msg_id)
564 print ("got unknown result: %s"%msg_id)
564 else:
565 else:
565 self.outstanding.remove(msg_id)
566 self.outstanding.remove(msg_id)
566 content = msg['content']
567 content = msg['content']
567 header = msg['header']
568 header = msg['header']
568
569
569 # construct metadata:
570 # construct metadata:
570 md = self.metadata.setdefault(msg_id, Metadata())
571 md = self.metadata.setdefault(msg_id, Metadata())
571 md.update(self._extract_metadata(header, parent, content))
572 md.update(self._extract_metadata(header, parent, content))
572 self.metadata[msg_id] = md
573 self.metadata[msg_id] = md
573
574
574 # construct result:
575 # construct result:
575 if content['status'] == 'ok':
576 if content['status'] == 'ok':
576 self.results[msg_id] = ss.unserialize_object(msg['buffers'])[0]
577 self.results[msg_id] = util.unserialize_object(msg['buffers'])[0]
577 elif content['status'] == 'aborted':
578 elif content['status'] == 'aborted':
578 self.results[msg_id] = error.AbortedTask(msg_id)
579 self.results[msg_id] = error.AbortedTask(msg_id)
579 elif content['status'] == 'resubmitted':
580 elif content['status'] == 'resubmitted':
580 # TODO: handle resubmission
581 # TODO: handle resubmission
581 pass
582 pass
582 else:
583 else:
583 self.results[msg_id] = self._unwrap_exception(content)
584 self.results[msg_id] = self._unwrap_exception(content)
584
585
585 def _flush_notifications(self):
586 def _flush_notifications(self):
586 """Flush notifications of engine registrations waiting
587 """Flush notifications of engine registrations waiting
587 in ZMQ queue."""
588 in ZMQ queue."""
588 msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
589 msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
589 while msg is not None:
590 while msg is not None:
590 if self.debug:
591 if self.debug:
591 pprint(msg)
592 pprint(msg)
592 msg = msg[-1]
593 msg = msg[-1]
593 msg_type = msg['msg_type']
594 msg_type = msg['msg_type']
594 handler = self._notification_handlers.get(msg_type, None)
595 handler = self._notification_handlers.get(msg_type, None)
595 if handler is None:
596 if handler is None:
596 raise Exception("Unhandled message type: %s"%msg.msg_type)
597 raise Exception("Unhandled message type: %s"%msg.msg_type)
597 else:
598 else:
598 handler(msg)
599 handler(msg)
599 msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
600 msg = self.session.recv(self._notification_socket, mode=zmq.NOBLOCK)
600
601
601 def _flush_results(self, sock):
602 def _flush_results(self, sock):
602 """Flush task or queue results waiting in ZMQ queue."""
603 """Flush task or queue results waiting in ZMQ queue."""
603 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
604 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
604 while msg is not None:
605 while msg is not None:
605 if self.debug:
606 if self.debug:
606 pprint(msg)
607 pprint(msg)
607 msg = msg[-1]
608 msg = msg[-1]
608 msg_type = msg['msg_type']
609 msg_type = msg['msg_type']
609 handler = self._queue_handlers.get(msg_type, None)
610 handler = self._queue_handlers.get(msg_type, None)
610 if handler is None:
611 if handler is None:
611 raise Exception("Unhandled message type: %s"%msg.msg_type)
612 raise Exception("Unhandled message type: %s"%msg.msg_type)
612 else:
613 else:
613 handler(msg)
614 handler(msg)
614 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
615 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
615
616
616 def _flush_control(self, sock):
617 def _flush_control(self, sock):
617 """Flush replies from the control channel waiting
618 """Flush replies from the control channel waiting
618 in the ZMQ queue.
619 in the ZMQ queue.
619
620
620 Currently: ignore them."""
621 Currently: ignore them."""
621 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
622 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
622 while msg is not None:
623 while msg is not None:
623 if self.debug:
624 if self.debug:
624 pprint(msg)
625 pprint(msg)
625 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
626 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
626
627
627 def _flush_iopub(self, sock):
628 def _flush_iopub(self, sock):
628 """Flush replies from the iopub channel waiting
629 """Flush replies from the iopub channel waiting
629 in the ZMQ queue.
630 in the ZMQ queue.
630 """
631 """
631 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
632 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
632 while msg is not None:
633 while msg is not None:
633 if self.debug:
634 if self.debug:
634 pprint(msg)
635 pprint(msg)
635 msg = msg[-1]
636 msg = msg[-1]
636 parent = msg['parent_header']
637 parent = msg['parent_header']
637 msg_id = parent['msg_id']
638 msg_id = parent['msg_id']
638 content = msg['content']
639 content = msg['content']
639 header = msg['header']
640 header = msg['header']
640 msg_type = msg['msg_type']
641 msg_type = msg['msg_type']
641
642
642 # init metadata:
643 # init metadata:
643 md = self.metadata.setdefault(msg_id, Metadata())
644 md = self.metadata.setdefault(msg_id, Metadata())
644
645
645 if msg_type == 'stream':
646 if msg_type == 'stream':
646 name = content['name']
647 name = content['name']
647 s = md[name] or ''
648 s = md[name] or ''
648 md[name] = s + content['data']
649 md[name] = s + content['data']
649 elif msg_type == 'pyerr':
650 elif msg_type == 'pyerr':
650 md.update({'pyerr' : self._unwrap_exception(content)})
651 md.update({'pyerr' : self._unwrap_exception(content)})
651 else:
652 else:
652 md.update({msg_type : content['data']})
653 md.update({msg_type : content['data']})
653
654
654 self.metadata[msg_id] = md
655 self.metadata[msg_id] = md
655
656
656 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
657 msg = self.session.recv(sock, mode=zmq.NOBLOCK)
657
658
658 #--------------------------------------------------------------------------
659 #--------------------------------------------------------------------------
659 # len, getitem
660 # len, getitem
660 #--------------------------------------------------------------------------
661 #--------------------------------------------------------------------------
661
662
662 def __len__(self):
663 def __len__(self):
663 """len(client) returns # of engines."""
664 """len(client) returns # of engines."""
664 return len(self.ids)
665 return len(self.ids)
665
666
666 def __getitem__(self, key):
667 def __getitem__(self, key):
667 """index access returns DirectView multiplexer objects
668 """index access returns DirectView multiplexer objects
668
669
669 Must be int, slice, or list/tuple/xrange of ints"""
670 Must be int, slice, or list/tuple/xrange of ints"""
670 if not isinstance(key, (int, slice, tuple, list, xrange)):
671 if not isinstance(key, (int, slice, tuple, list, xrange)):
671 raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
672 raise TypeError("key by int/slice/iterable of ints only, not %s"%(type(key)))
672 else:
673 else:
673 return self.view(key, balanced=False)
674 return self.view(key, balanced=False)
674
675
675 #--------------------------------------------------------------------------
676 #--------------------------------------------------------------------------
676 # Begin public methods
677 # Begin public methods
677 #--------------------------------------------------------------------------
678 #--------------------------------------------------------------------------
678
679
679 def spin(self):
680 def spin(self):
680 """Flush any registration notifications and execution results
681 """Flush any registration notifications and execution results
681 waiting in the ZMQ queue.
682 waiting in the ZMQ queue.
682 """
683 """
683 if self._notification_socket:
684 if self._notification_socket:
684 self._flush_notifications()
685 self._flush_notifications()
685 if self._mux_socket:
686 if self._mux_socket:
686 self._flush_results(self._mux_socket)
687 self._flush_results(self._mux_socket)
687 if self._task_socket:
688 if self._task_socket:
688 self._flush_results(self._task_socket)
689 self._flush_results(self._task_socket)
689 if self._control_socket:
690 if self._control_socket:
690 self._flush_control(self._control_socket)
691 self._flush_control(self._control_socket)
691 if self._iopub_socket:
692 if self._iopub_socket:
692 self._flush_iopub(self._iopub_socket)
693 self._flush_iopub(self._iopub_socket)
693
694
694 def barrier(self, jobs=None, timeout=-1):
695 def barrier(self, jobs=None, timeout=-1):
695 """waits on one or more `jobs`, for up to `timeout` seconds.
696 """waits on one or more `jobs`, for up to `timeout` seconds.
696
697
697 Parameters
698 Parameters
698 ----------
699 ----------
699
700
700 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
701 jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects
701 ints are indices to self.history
702 ints are indices to self.history
702 strs are msg_ids
703 strs are msg_ids
703 default: wait on all outstanding messages
704 default: wait on all outstanding messages
704 timeout : float
705 timeout : float
705 a time in seconds, after which to give up.
706 a time in seconds, after which to give up.
706 default is -1, which means no timeout
707 default is -1, which means no timeout
707
708
708 Returns
709 Returns
709 -------
710 -------
710
711
711 True : when all msg_ids are done
712 True : when all msg_ids are done
712 False : timeout reached, some msg_ids still outstanding
713 False : timeout reached, some msg_ids still outstanding
713 """
714 """
714 tic = time.time()
715 tic = time.time()
715 if jobs is None:
716 if jobs is None:
716 theids = self.outstanding
717 theids = self.outstanding
717 else:
718 else:
718 if isinstance(jobs, (int, str, AsyncResult)):
719 if isinstance(jobs, (int, str, AsyncResult)):
719 jobs = [jobs]
720 jobs = [jobs]
720 theids = set()
721 theids = set()
721 for job in jobs:
722 for job in jobs:
722 if isinstance(job, int):
723 if isinstance(job, int):
723 # index access
724 # index access
724 job = self.history[job]
725 job = self.history[job]
725 elif isinstance(job, AsyncResult):
726 elif isinstance(job, AsyncResult):
726 map(theids.add, job.msg_ids)
727 map(theids.add, job.msg_ids)
727 continue
728 continue
728 theids.add(job)
729 theids.add(job)
729 if not theids.intersection(self.outstanding):
730 if not theids.intersection(self.outstanding):
730 return True
731 return True
731 self.spin()
732 self.spin()
732 while theids.intersection(self.outstanding):
733 while theids.intersection(self.outstanding):
733 if timeout >= 0 and ( time.time()-tic ) > timeout:
734 if timeout >= 0 and ( time.time()-tic ) > timeout:
734 break
735 break
735 time.sleep(1e-3)
736 time.sleep(1e-3)
736 self.spin()
737 self.spin()
737 return len(theids.intersection(self.outstanding)) == 0
738 return len(theids.intersection(self.outstanding)) == 0
738
739
739 #--------------------------------------------------------------------------
740 #--------------------------------------------------------------------------
740 # Control methods
741 # Control methods
741 #--------------------------------------------------------------------------
742 #--------------------------------------------------------------------------
742
743
743 @spinfirst
744 @spinfirst
744 @defaultblock
745 @defaultblock
745 def clear(self, targets=None, block=None):
746 def clear(self, targets=None, block=None):
746 """Clear the namespace in target(s)."""
747 """Clear the namespace in target(s)."""
747 targets = self._build_targets(targets)[0]
748 targets = self._build_targets(targets)[0]
748 for t in targets:
749 for t in targets:
749 self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
750 self.session.send(self._control_socket, 'clear_request', content={}, ident=t)
750 error = False
751 error = False
751 if self.block:
752 if self.block:
752 for i in range(len(targets)):
753 for i in range(len(targets)):
753 idents,msg = self.session.recv(self._control_socket,0)
754 idents,msg = self.session.recv(self._control_socket,0)
754 if self.debug:
755 if self.debug:
755 pprint(msg)
756 pprint(msg)
756 if msg['content']['status'] != 'ok':
757 if msg['content']['status'] != 'ok':
757 error = self._unwrap_exception(msg['content'])
758 error = self._unwrap_exception(msg['content'])
758 if error:
759 if error:
759 return error
760 return error
760
761
761
762
762 @spinfirst
763 @spinfirst
763 @defaultblock
764 @defaultblock
764 def abort(self, jobs=None, targets=None, block=None):
765 def abort(self, jobs=None, targets=None, block=None):
765 """Abort specific jobs from the execution queues of target(s).
766 """Abort specific jobs from the execution queues of target(s).
766
767
767 This is a mechanism to prevent jobs that have already been submitted
768 This is a mechanism to prevent jobs that have already been submitted
768 from executing.
769 from executing.
769
770
770 Parameters
771 Parameters
771 ----------
772 ----------
772
773
773 jobs : msg_id, list of msg_ids, or AsyncResult
774 jobs : msg_id, list of msg_ids, or AsyncResult
774 The jobs to be aborted
775 The jobs to be aborted
775
776
776
777
777 """
778 """
778 targets = self._build_targets(targets)[0]
779 targets = self._build_targets(targets)[0]
779 msg_ids = []
780 msg_ids = []
780 if isinstance(jobs, (basestring,AsyncResult)):
781 if isinstance(jobs, (basestring,AsyncResult)):
781 jobs = [jobs]
782 jobs = [jobs]
782 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
783 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
783 if bad_ids:
784 if bad_ids:
784 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
785 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
785 for j in jobs:
786 for j in jobs:
786 if isinstance(j, AsyncResult):
787 if isinstance(j, AsyncResult):
787 msg_ids.extend(j.msg_ids)
788 msg_ids.extend(j.msg_ids)
788 else:
789 else:
789 msg_ids.append(j)
790 msg_ids.append(j)
790 content = dict(msg_ids=msg_ids)
791 content = dict(msg_ids=msg_ids)
791 for t in targets:
792 for t in targets:
792 self.session.send(self._control_socket, 'abort_request',
793 self.session.send(self._control_socket, 'abort_request',
793 content=content, ident=t)
794 content=content, ident=t)
794 error = False
795 error = False
795 if self.block:
796 if self.block:
796 for i in range(len(targets)):
797 for i in range(len(targets)):
797 idents,msg = self.session.recv(self._control_socket,0)
798 idents,msg = self.session.recv(self._control_socket,0)
798 if self.debug:
799 if self.debug:
799 pprint(msg)
800 pprint(msg)
800 if msg['content']['status'] != 'ok':
801 if msg['content']['status'] != 'ok':
801 error = self._unwrap_exception(msg['content'])
802 error = self._unwrap_exception(msg['content'])
802 if error:
803 if error:
803 return error
804 return error
804
805
805 @spinfirst
806 @spinfirst
806 @defaultblock
807 @defaultblock
807 def shutdown(self, targets=None, restart=False, controller=False, block=None):
808 def shutdown(self, targets=None, restart=False, controller=False, block=None):
808 """Terminates one or more engine processes, optionally including the controller."""
809 """Terminates one or more engine processes, optionally including the controller."""
809 if controller:
810 if controller:
810 targets = 'all'
811 targets = 'all'
811 targets = self._build_targets(targets)[0]
812 targets = self._build_targets(targets)[0]
812 for t in targets:
813 for t in targets:
813 self.session.send(self._control_socket, 'shutdown_request',
814 self.session.send(self._control_socket, 'shutdown_request',
814 content={'restart':restart},ident=t)
815 content={'restart':restart},ident=t)
815 error = False
816 error = False
816 if block or controller:
817 if block or controller:
817 for i in range(len(targets)):
818 for i in range(len(targets)):
818 idents,msg = self.session.recv(self._control_socket,0)
819 idents,msg = self.session.recv(self._control_socket,0)
819 if self.debug:
820 if self.debug:
820 pprint(msg)
821 pprint(msg)
821 if msg['content']['status'] != 'ok':
822 if msg['content']['status'] != 'ok':
822 error = self._unwrap_exception(msg['content'])
823 error = self._unwrap_exception(msg['content'])
823
824
824 if controller:
825 if controller:
825 time.sleep(0.25)
826 time.sleep(0.25)
826 self.session.send(self._query_socket, 'shutdown_request')
827 self.session.send(self._query_socket, 'shutdown_request')
827 idents,msg = self.session.recv(self._query_socket, 0)
828 idents,msg = self.session.recv(self._query_socket, 0)
828 if self.debug:
829 if self.debug:
829 pprint(msg)
830 pprint(msg)
830 if msg['content']['status'] != 'ok':
831 if msg['content']['status'] != 'ok':
831 error = self._unwrap_exception(msg['content'])
832 error = self._unwrap_exception(msg['content'])
832
833
833 if error:
834 if error:
834 raise error
835 raise error
835
836
836 #--------------------------------------------------------------------------
837 #--------------------------------------------------------------------------
837 # Execution methods
838 # Execution methods
838 #--------------------------------------------------------------------------
839 #--------------------------------------------------------------------------
839
840
840 @defaultblock
841 @defaultblock
841 def execute(self, code, targets='all', block=None):
842 def execute(self, code, targets='all', block=None):
842 """Executes `code` on `targets` in blocking or nonblocking manner.
843 """Executes `code` on `targets` in blocking or nonblocking manner.
843
844
844 ``execute`` is always `bound` (affects engine namespace)
845 ``execute`` is always `bound` (affects engine namespace)
845
846
846 Parameters
847 Parameters
847 ----------
848 ----------
848
849
849 code : str
850 code : str
850 the code string to be executed
851 the code string to be executed
851 targets : int/str/list of ints/strs
852 targets : int/str/list of ints/strs
852 the engines on which to execute
853 the engines on which to execute
853 default : all
854 default : all
854 block : bool
855 block : bool
855 whether or not to wait until done to return
856 whether or not to wait until done to return
856 default: self.block
857 default: self.block
857 """
858 """
858 result = self.apply(_execute, (code,), targets=targets, block=block, bound=True, balanced=False)
859 result = self.apply(_execute, (code,), targets=targets, block=block, bound=True, balanced=False)
859 if not block:
860 if not block:
860 return result
861 return result
861
862
862 def run(self, filename, targets='all', block=None):
863 def run(self, filename, targets='all', block=None):
863 """Execute contents of `filename` on engine(s).
864 """Execute contents of `filename` on engine(s).
864
865
865 This simply reads the contents of the file and calls `execute`.
866 This simply reads the contents of the file and calls `execute`.
866
867
867 Parameters
868 Parameters
868 ----------
869 ----------
869
870
870 filename : str
871 filename : str
871 The path to the file
872 The path to the file
872 targets : int/str/list of ints/strs
873 targets : int/str/list of ints/strs
873 the engines on which to execute
874 the engines on which to execute
874 default : all
875 default : all
875 block : bool
876 block : bool
876 whether or not to wait until done
877 whether or not to wait until done
877 default: self.block
878 default: self.block
878
879
879 """
880 """
880 with open(filename, 'rb') as f:
881 with open(filename, 'rb') as f:
881 code = f.read()
882 code = f.read()
882 return self.execute(code, targets=targets, block=block)
883 return self.execute(code, targets=targets, block=block)
883
884
884 def _maybe_raise(self, result):
885 def _maybe_raise(self, result):
885 """wrapper for maybe raising an exception if apply failed."""
886 """wrapper for maybe raising an exception if apply failed."""
886 if isinstance(result, error.RemoteError):
887 if isinstance(result, error.RemoteError):
887 raise result
888 raise result
888
889
889 return result
890 return result
890
891
891 def _build_dependency(self, dep):
892 def _build_dependency(self, dep):
892 """helper for building jsonable dependencies from various input forms"""
893 """helper for building jsonable dependencies from various input forms"""
893 if isinstance(dep, Dependency):
894 if isinstance(dep, Dependency):
894 return dep.as_dict()
895 return dep.as_dict()
895 elif isinstance(dep, AsyncResult):
896 elif isinstance(dep, AsyncResult):
896 return dep.msg_ids
897 return dep.msg_ids
897 elif dep is None:
898 elif dep is None:
898 return []
899 return []
899 else:
900 else:
900 # pass to Dependency constructor
901 # pass to Dependency constructor
901 return list(Dependency(dep))
902 return list(Dependency(dep))
902
903
903 @defaultblock
904 @defaultblock
904 def apply(self, f, args=None, kwargs=None, bound=True, block=None,
905 def apply(self, f, args=None, kwargs=None, bound=True, block=None,
905 targets=None, balanced=None,
906 targets=None, balanced=None,
906 after=None, follow=None, timeout=None):
907 after=None, follow=None, timeout=None):
907 """Call `f(*args, **kwargs)` on a remote engine(s), returning the result.
908 """Call `f(*args, **kwargs)` on a remote engine(s), returning the result.
908
909
909 This is the central execution command for the client.
910 This is the central execution command for the client.
910
911
911 Parameters
912 Parameters
912 ----------
913 ----------
913
914
914 f : function
915 f : function
915 The fuction to be called remotely
916 The fuction to be called remotely
916 args : tuple/list
917 args : tuple/list
917 The positional arguments passed to `f`
918 The positional arguments passed to `f`
918 kwargs : dict
919 kwargs : dict
919 The keyword arguments passed to `f`
920 The keyword arguments passed to `f`
920 bound : bool (default: True)
921 bound : bool (default: True)
921 Whether to execute in the Engine(s) namespace, or in a clean
922 Whether to execute in the Engine(s) namespace, or in a clean
922 namespace not affecting the engine.
923 namespace not affecting the engine.
923 block : bool (default: self.block)
924 block : bool (default: self.block)
924 Whether to wait for the result, or return immediately.
925 Whether to wait for the result, or return immediately.
925 False:
926 False:
926 returns AsyncResult
927 returns AsyncResult
927 True:
928 True:
928 returns actual result(s) of f(*args, **kwargs)
929 returns actual result(s) of f(*args, **kwargs)
929 if multiple targets:
930 if multiple targets:
930 list of results, matching `targets`
931 list of results, matching `targets`
931 targets : int,list of ints, 'all', None
932 targets : int,list of ints, 'all', None
932 Specify the destination of the job.
933 Specify the destination of the job.
933 if None:
934 if None:
934 Submit via Task queue for load-balancing.
935 Submit via Task queue for load-balancing.
935 if 'all':
936 if 'all':
936 Run on all active engines
937 Run on all active engines
937 if list:
938 if list:
938 Run on each specified engine
939 Run on each specified engine
939 if int:
940 if int:
940 Run on single engine
941 Run on single engine
941
942
942 balanced : bool, default None
943 balanced : bool, default None
943 whether to load-balance. This will default to True
944 whether to load-balance. This will default to True
944 if targets is unspecified, or False if targets is specified.
945 if targets is unspecified, or False if targets is specified.
945
946
946 The following arguments are only used when balanced is True:
947 The following arguments are only used when balanced is True:
947 after : Dependency or collection of msg_ids
948 after : Dependency or collection of msg_ids
948 Only for load-balanced execution (targets=None)
949 Only for load-balanced execution (targets=None)
949 Specify a list of msg_ids as a time-based dependency.
950 Specify a list of msg_ids as a time-based dependency.
950 This job will only be run *after* the dependencies
951 This job will only be run *after* the dependencies
951 have been met.
952 have been met.
952
953
953 follow : Dependency or collection of msg_ids
954 follow : Dependency or collection of msg_ids
954 Only for load-balanced execution (targets=None)
955 Only for load-balanced execution (targets=None)
955 Specify a list of msg_ids as a location-based dependency.
956 Specify a list of msg_ids as a location-based dependency.
956 This job will only be run on an engine where this dependency
957 This job will only be run on an engine where this dependency
957 is met.
958 is met.
958
959
959 timeout : float/int or None
960 timeout : float/int or None
960 Only for load-balanced execution (targets=None)
961 Only for load-balanced execution (targets=None)
961 Specify an amount of time (in seconds) for the scheduler to
962 Specify an amount of time (in seconds) for the scheduler to
962 wait for dependencies to be met before failing with a
963 wait for dependencies to be met before failing with a
963 DependencyTimeout.
964 DependencyTimeout.
964
965
965 after,follow,timeout only used if `balanced=True`.
966 after,follow,timeout only used if `balanced=True`.
966
967
967 Returns
968 Returns
968 -------
969 -------
969
970
970 if block is False:
971 if block is False:
971 return AsyncResult wrapping msg_ids
972 return AsyncResult wrapping msg_ids
972 output of AsyncResult.get() is identical to that of `apply(...block=True)`
973 output of AsyncResult.get() is identical to that of `apply(...block=True)`
973 else:
974 else:
974 if single target:
975 if single target:
975 return result of `f(*args, **kwargs)`
976 return result of `f(*args, **kwargs)`
976 else:
977 else:
977 return list of results, matching `targets`
978 return list of results, matching `targets`
978 """
979 """
979 assert not self._closed, "cannot use me anymore, I'm closed!"
980 assert not self._closed, "cannot use me anymore, I'm closed!"
980 # defaults:
981 # defaults:
981 block = block if block is not None else self.block
982 block = block if block is not None else self.block
982 args = args if args is not None else []
983 args = args if args is not None else []
983 kwargs = kwargs if kwargs is not None else {}
984 kwargs = kwargs if kwargs is not None else {}
984
985
985 if balanced is None:
986 if balanced is None:
986 if targets is None:
987 if targets is None:
987 # default to balanced if targets unspecified
988 # default to balanced if targets unspecified
988 balanced = True
989 balanced = True
989 else:
990 else:
990 # otherwise default to multiplexing
991 # otherwise default to multiplexing
991 balanced = False
992 balanced = False
992
993
993 if targets is None and balanced is False:
994 if targets is None and balanced is False:
994 # default to all if *not* balanced, and targets is unspecified
995 # default to all if *not* balanced, and targets is unspecified
995 targets = 'all'
996 targets = 'all'
996
997
997 # enforce types of f,args,kwrags
998 # enforce types of f,args,kwrags
998 if not callable(f):
999 if not callable(f):
999 raise TypeError("f must be callable, not %s"%type(f))
1000 raise TypeError("f must be callable, not %s"%type(f))
1000 if not isinstance(args, (tuple, list)):
1001 if not isinstance(args, (tuple, list)):
1001 raise TypeError("args must be tuple or list, not %s"%type(args))
1002 raise TypeError("args must be tuple or list, not %s"%type(args))
1002 if not isinstance(kwargs, dict):
1003 if not isinstance(kwargs, dict):
1003 raise TypeError("kwargs must be dict, not %s"%type(kwargs))
1004 raise TypeError("kwargs must be dict, not %s"%type(kwargs))
1004
1005
1005 options = dict(bound=bound, block=block, targets=targets)
1006 options = dict(bound=bound, block=block, targets=targets)
1006
1007
1007 if balanced:
1008 if balanced:
1008 return self._apply_balanced(f, args, kwargs, timeout=timeout,
1009 return self._apply_balanced(f, args, kwargs, timeout=timeout,
1009 after=after, follow=follow, **options)
1010 after=after, follow=follow, **options)
1010 elif follow or after or timeout:
1011 elif follow or after or timeout:
1011 msg = "follow, after, and timeout args are only used for"
1012 msg = "follow, after, and timeout args are only used for"
1012 msg += " load-balanced execution."
1013 msg += " load-balanced execution."
1013 raise ValueError(msg)
1014 raise ValueError(msg)
1014 else:
1015 else:
1015 return self._apply_direct(f, args, kwargs, **options)
1016 return self._apply_direct(f, args, kwargs, **options)
1016
1017
1017 def _apply_balanced(self, f, args, kwargs, bound=None, block=None, targets=None,
1018 def _apply_balanced(self, f, args, kwargs, bound=None, block=None, targets=None,
1018 after=None, follow=None, timeout=None):
1019 after=None, follow=None, timeout=None):
1019 """call f(*args, **kwargs) remotely in a load-balanced manner.
1020 """call f(*args, **kwargs) remotely in a load-balanced manner.
1020
1021
1021 This is a private method, see `apply` for details.
1022 This is a private method, see `apply` for details.
1022 Not to be called directly!
1023 Not to be called directly!
1023 """
1024 """
1024
1025
1025 loc = locals()
1026 loc = locals()
1026 for name in ('bound', 'block'):
1027 for name in ('bound', 'block'):
1027 assert loc[name] is not None, "kwarg %r must be specified!"%name
1028 assert loc[name] is not None, "kwarg %r must be specified!"%name
1028
1029
1029 if self._task_socket is None:
1030 if self._task_socket is None:
1030 msg = "Task farming is disabled"
1031 msg = "Task farming is disabled"
1031 if self._task_scheme == 'pure':
1032 if self._task_scheme == 'pure':
1032 msg += " because the pure ZMQ scheduler cannot handle"
1033 msg += " because the pure ZMQ scheduler cannot handle"
1033 msg += " disappearing engines."
1034 msg += " disappearing engines."
1034 raise RuntimeError(msg)
1035 raise RuntimeError(msg)
1035
1036
1036 if self._task_scheme == 'pure':
1037 if self._task_scheme == 'pure':
1037 # pure zmq scheme doesn't support dependencies
1038 # pure zmq scheme doesn't support dependencies
1038 msg = "Pure ZMQ scheduler doesn't support dependencies"
1039 msg = "Pure ZMQ scheduler doesn't support dependencies"
1039 if (follow or after):
1040 if (follow or after):
1040 # hard fail on DAG dependencies
1041 # hard fail on DAG dependencies
1041 raise RuntimeError(msg)
1042 raise RuntimeError(msg)
1042 if isinstance(f, dependent):
1043 if isinstance(f, dependent):
1043 # soft warn on functional dependencies
1044 # soft warn on functional dependencies
1044 warnings.warn(msg, RuntimeWarning)
1045 warnings.warn(msg, RuntimeWarning)
1045
1046
1046 # defaults:
1047 # defaults:
1047 args = args if args is not None else []
1048 args = args if args is not None else []
1048 kwargs = kwargs if kwargs is not None else {}
1049 kwargs = kwargs if kwargs is not None else {}
1049
1050
1050 if targets:
1051 if targets:
1051 idents,_ = self._build_targets(targets)
1052 idents,_ = self._build_targets(targets)
1052 else:
1053 else:
1053 idents = []
1054 idents = []
1054
1055
1055 after = self._build_dependency(after)
1056 after = self._build_dependency(after)
1056 follow = self._build_dependency(follow)
1057 follow = self._build_dependency(follow)
1057 subheader = dict(after=after, follow=follow, timeout=timeout, targets=idents)
1058 subheader = dict(after=after, follow=follow, timeout=timeout, targets=idents)
1058 bufs = ss.pack_apply_message(f,args,kwargs)
1059 bufs = util.pack_apply_message(f,args,kwargs)
1059 content = dict(bound=bound)
1060 content = dict(bound=bound)
1060
1061
1061 msg = self.session.send(self._task_socket, "apply_request",
1062 msg = self.session.send(self._task_socket, "apply_request",
1062 content=content, buffers=bufs, subheader=subheader)
1063 content=content, buffers=bufs, subheader=subheader)
1063 msg_id = msg['msg_id']
1064 msg_id = msg['msg_id']
1064 self.outstanding.add(msg_id)
1065 self.outstanding.add(msg_id)
1065 self.history.append(msg_id)
1066 self.history.append(msg_id)
1066 ar = AsyncResult(self, [msg_id], fname=f.__name__)
1067 ar = AsyncResult(self, [msg_id], fname=f.__name__)
1067 if block:
1068 if block:
1068 try:
1069 try:
1069 return ar.get()
1070 return ar.get()
1070 except KeyboardInterrupt:
1071 except KeyboardInterrupt:
1071 return ar
1072 return ar
1072 else:
1073 else:
1073 return ar
1074 return ar
1074
1075
1075 def _apply_direct(self, f, args, kwargs, bound=None, block=None, targets=None):
1076 def _apply_direct(self, f, args, kwargs, bound=None, block=None, targets=None):
1076 """Then underlying method for applying functions to specific engines
1077 """Then underlying method for applying functions to specific engines
1077 via the MUX queue.
1078 via the MUX queue.
1078
1079
1079 This is a private method, see `apply` for details.
1080 This is a private method, see `apply` for details.
1080 Not to be called directly!
1081 Not to be called directly!
1081 """
1082 """
1082 loc = locals()
1083 loc = locals()
1083 for name in ('bound', 'block', 'targets'):
1084 for name in ('bound', 'block', 'targets'):
1084 assert loc[name] is not None, "kwarg %r must be specified!"%name
1085 assert loc[name] is not None, "kwarg %r must be specified!"%name
1085
1086
1086 idents,targets = self._build_targets(targets)
1087 idents,targets = self._build_targets(targets)
1087
1088
1088 subheader = {}
1089 subheader = {}
1089 content = dict(bound=bound)
1090 content = dict(bound=bound)
1090 bufs = ss.pack_apply_message(f,args,kwargs)
1091 bufs = util.pack_apply_message(f,args,kwargs)
1091
1092
1092 msg_ids = []
1093 msg_ids = []
1093 for ident in idents:
1094 for ident in idents:
1094 msg = self.session.send(self._mux_socket, "apply_request",
1095 msg = self.session.send(self._mux_socket, "apply_request",
1095 content=content, buffers=bufs, ident=ident, subheader=subheader)
1096 content=content, buffers=bufs, ident=ident, subheader=subheader)
1096 msg_id = msg['msg_id']
1097 msg_id = msg['msg_id']
1097 self.outstanding.add(msg_id)
1098 self.outstanding.add(msg_id)
1098 self.history.append(msg_id)
1099 self.history.append(msg_id)
1099 msg_ids.append(msg_id)
1100 msg_ids.append(msg_id)
1100 ar = AsyncResult(self, msg_ids, fname=f.__name__)
1101 ar = AsyncResult(self, msg_ids, fname=f.__name__)
1101 if block:
1102 if block:
1102 try:
1103 try:
1103 return ar.get()
1104 return ar.get()
1104 except KeyboardInterrupt:
1105 except KeyboardInterrupt:
1105 return ar
1106 return ar
1106 else:
1107 else:
1107 return ar
1108 return ar
1108
1109
1109 #--------------------------------------------------------------------------
1110 #--------------------------------------------------------------------------
1110 # construct a View object
1111 # construct a View object
1111 #--------------------------------------------------------------------------
1112 #--------------------------------------------------------------------------
1112
1113
1113 @defaultblock
1114 @defaultblock
1114 def remote(self, bound=True, block=None, targets=None, balanced=None):
1115 def remote(self, bound=True, block=None, targets=None, balanced=None):
1115 """Decorator for making a RemoteFunction"""
1116 """Decorator for making a RemoteFunction"""
1116 return remote(self, bound=bound, targets=targets, block=block, balanced=balanced)
1117 return remote(self, bound=bound, targets=targets, block=block, balanced=balanced)
1117
1118
1118 @defaultblock
1119 @defaultblock
1119 def parallel(self, dist='b', bound=True, block=None, targets=None, balanced=None):
1120 def parallel(self, dist='b', bound=True, block=None, targets=None, balanced=None):
1120 """Decorator for making a ParallelFunction"""
1121 """Decorator for making a ParallelFunction"""
1121 return parallel(self, bound=bound, targets=targets, block=block, balanced=balanced)
1122 return parallel(self, bound=bound, targets=targets, block=block, balanced=balanced)
1122
1123
1123 def _cache_view(self, targets, balanced):
1124 def _cache_view(self, targets, balanced):
1124 """save views, so subsequent requests don't create new objects."""
1125 """save views, so subsequent requests don't create new objects."""
1125 if balanced:
1126 if balanced:
1126 view_class = LoadBalancedView
1127 view_class = LoadBalancedView
1127 view_cache = self._balanced_views
1128 view_cache = self._balanced_views
1128 else:
1129 else:
1129 view_class = DirectView
1130 view_class = DirectView
1130 view_cache = self._direct_views
1131 view_cache = self._direct_views
1131
1132
1132 # use str, since often targets will be a list
1133 # use str, since often targets will be a list
1133 key = str(targets)
1134 key = str(targets)
1134 if key not in view_cache:
1135 if key not in view_cache:
1135 view_cache[key] = view_class(client=self, targets=targets)
1136 view_cache[key] = view_class(client=self, targets=targets)
1136
1137
1137 return view_cache[key]
1138 return view_cache[key]
1138
1139
1139 def view(self, targets=None, balanced=None):
1140 def view(self, targets=None, balanced=None):
1140 """Method for constructing View objects.
1141 """Method for constructing View objects.
1141
1142
1142 If no arguments are specified, create a LoadBalancedView
1143 If no arguments are specified, create a LoadBalancedView
1143 using all engines. If only `targets` specified, it will
1144 using all engines. If only `targets` specified, it will
1144 be a DirectView. This method is the underlying implementation
1145 be a DirectView. This method is the underlying implementation
1145 of ``client.__getitem__``.
1146 of ``client.__getitem__``.
1146
1147
1147 Parameters
1148 Parameters
1148 ----------
1149 ----------
1149
1150
1150 targets: list,slice,int,etc. [default: use all engines]
1151 targets: list,slice,int,etc. [default: use all engines]
1151 The engines to use for the View
1152 The engines to use for the View
1152 balanced : bool [default: False if targets specified, True else]
1153 balanced : bool [default: False if targets specified, True else]
1153 whether to build a LoadBalancedView or a DirectView
1154 whether to build a LoadBalancedView or a DirectView
1154
1155
1155 """
1156 """
1156
1157
1157 balanced = (targets is None) if balanced is None else balanced
1158 balanced = (targets is None) if balanced is None else balanced
1158
1159
1159 if targets is None:
1160 if targets is None:
1160 if balanced:
1161 if balanced:
1161 return self._cache_view(None,True)
1162 return self._cache_view(None,True)
1162 else:
1163 else:
1163 targets = slice(None)
1164 targets = slice(None)
1164
1165
1165 if isinstance(targets, int):
1166 if isinstance(targets, int):
1166 if targets < 0:
1167 if targets < 0:
1167 targets = self.ids[targets]
1168 targets = self.ids[targets]
1168 if targets not in self.ids:
1169 if targets not in self.ids:
1169 raise IndexError("No such engine: %i"%targets)
1170 raise IndexError("No such engine: %i"%targets)
1170 return self._cache_view(targets, balanced)
1171 return self._cache_view(targets, balanced)
1171
1172
1172 if isinstance(targets, slice):
1173 if isinstance(targets, slice):
1173 indices = range(len(self.ids))[targets]
1174 indices = range(len(self.ids))[targets]
1174 ids = sorted(self._ids)
1175 ids = sorted(self._ids)
1175 targets = [ ids[i] for i in indices ]
1176 targets = [ ids[i] for i in indices ]
1176
1177
1177 if isinstance(targets, (tuple, list, xrange)):
1178 if isinstance(targets, (tuple, list, xrange)):
1178 _,targets = self._build_targets(list(targets))
1179 _,targets = self._build_targets(list(targets))
1179 return self._cache_view(targets, balanced)
1180 return self._cache_view(targets, balanced)
1180 else:
1181 else:
1181 raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
1182 raise TypeError("targets by int/slice/collection of ints only, not %s"%(type(targets)))
1182
1183
1183 #--------------------------------------------------------------------------
1184 #--------------------------------------------------------------------------
1184 # Data movement
1185 # Data movement
1185 #--------------------------------------------------------------------------
1186 #--------------------------------------------------------------------------
1186
1187
1187 @defaultblock
1188 @defaultblock
1188 def push(self, ns, targets='all', block=None):
1189 def push(self, ns, targets='all', block=None):
1189 """Push the contents of `ns` into the namespace on `target`"""
1190 """Push the contents of `ns` into the namespace on `target`"""
1190 if not isinstance(ns, dict):
1191 if not isinstance(ns, dict):
1191 raise TypeError("Must be a dict, not %s"%type(ns))
1192 raise TypeError("Must be a dict, not %s"%type(ns))
1192 result = self.apply(_push, (ns,), targets=targets, block=block, bound=True, balanced=False)
1193 result = self.apply(_push, (ns,), targets=targets, block=block, bound=True, balanced=False)
1193 if not block:
1194 if not block:
1194 return result
1195 return result
1195
1196
1196 @defaultblock
1197 @defaultblock
1197 def pull(self, keys, targets='all', block=None):
1198 def pull(self, keys, targets='all', block=None):
1198 """Pull objects from `target`'s namespace by `keys`"""
1199 """Pull objects from `target`'s namespace by `keys`"""
1199 if isinstance(keys, str):
1200 if isinstance(keys, str):
1200 pass
1201 pass
1201 elif isinstance(keys, (list,tuple,set)):
1202 elif isinstance(keys, (list,tuple,set)):
1202 for key in keys:
1203 for key in keys:
1203 if not isinstance(key, str):
1204 if not isinstance(key, str):
1204 raise TypeError
1205 raise TypeError
1205 result = self.apply(_pull, (keys,), targets=targets, block=block, bound=True, balanced=False)
1206 result = self.apply(_pull, (keys,), targets=targets, block=block, bound=True, balanced=False)
1206 return result
1207 return result
1207
1208
1208 @defaultblock
1209 @defaultblock
1209 def scatter(self, key, seq, dist='b', flatten=False, targets='all', block=None):
1210 def scatter(self, key, seq, dist='b', flatten=False, targets='all', block=None):
1210 """
1211 """
1211 Partition a Python sequence and send the partitions to a set of engines.
1212 Partition a Python sequence and send the partitions to a set of engines.
1212 """
1213 """
1213 targets = self._build_targets(targets)[-1]
1214 targets = self._build_targets(targets)[-1]
1214 mapObject = Map.dists[dist]()
1215 mapObject = Map.dists[dist]()
1215 nparts = len(targets)
1216 nparts = len(targets)
1216 msg_ids = []
1217 msg_ids = []
1217 for index, engineid in enumerate(targets):
1218 for index, engineid in enumerate(targets):
1218 partition = mapObject.getPartition(seq, index, nparts)
1219 partition = mapObject.getPartition(seq, index, nparts)
1219 if flatten and len(partition) == 1:
1220 if flatten and len(partition) == 1:
1220 r = self.push({key: partition[0]}, targets=engineid, block=False)
1221 r = self.push({key: partition[0]}, targets=engineid, block=False)
1221 else:
1222 else:
1222 r = self.push({key: partition}, targets=engineid, block=False)
1223 r = self.push({key: partition}, targets=engineid, block=False)
1223 msg_ids.extend(r.msg_ids)
1224 msg_ids.extend(r.msg_ids)
1224 r = AsyncResult(self, msg_ids, fname='scatter')
1225 r = AsyncResult(self, msg_ids, fname='scatter')
1225 if block:
1226 if block:
1226 r.get()
1227 r.get()
1227 else:
1228 else:
1228 return r
1229 return r
1229
1230
1230 @defaultblock
1231 @defaultblock
1231 def gather(self, key, dist='b', targets='all', block=None):
1232 def gather(self, key, dist='b', targets='all', block=None):
1232 """
1233 """
1233 Gather a partitioned sequence on a set of engines as a single local seq.
1234 Gather a partitioned sequence on a set of engines as a single local seq.
1234 """
1235 """
1235
1236
1236 targets = self._build_targets(targets)[-1]
1237 targets = self._build_targets(targets)[-1]
1237 mapObject = Map.dists[dist]()
1238 mapObject = Map.dists[dist]()
1238 msg_ids = []
1239 msg_ids = []
1239 for index, engineid in enumerate(targets):
1240 for index, engineid in enumerate(targets):
1240 msg_ids.extend(self.pull(key, targets=engineid,block=False).msg_ids)
1241 msg_ids.extend(self.pull(key, targets=engineid,block=False).msg_ids)
1241
1242
1242 r = AsyncMapResult(self, msg_ids, mapObject, fname='gather')
1243 r = AsyncMapResult(self, msg_ids, mapObject, fname='gather')
1243 if block:
1244 if block:
1244 return r.get()
1245 return r.get()
1245 else:
1246 else:
1246 return r
1247 return r
1247
1248
1248 #--------------------------------------------------------------------------
1249 #--------------------------------------------------------------------------
1249 # Query methods
1250 # Query methods
1250 #--------------------------------------------------------------------------
1251 #--------------------------------------------------------------------------
1251
1252
1252 @spinfirst
1253 @spinfirst
1253 @defaultblock
1254 @defaultblock
1254 def get_result(self, indices_or_msg_ids=None, block=None):
1255 def get_result(self, indices_or_msg_ids=None, block=None):
1255 """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
1256 """Retrieve a result by msg_id or history index, wrapped in an AsyncResult object.
1256
1257
1257 If the client already has the results, no request to the Hub will be made.
1258 If the client already has the results, no request to the Hub will be made.
1258
1259
1259 This is a convenient way to construct AsyncResult objects, which are wrappers
1260 This is a convenient way to construct AsyncResult objects, which are wrappers
1260 that include metadata about execution, and allow for awaiting results that
1261 that include metadata about execution, and allow for awaiting results that
1261 were not submitted by this Client.
1262 were not submitted by this Client.
1262
1263
1263 It can also be a convenient way to retrieve the metadata associated with
1264 It can also be a convenient way to retrieve the metadata associated with
1264 blocking execution, since it always retrieves
1265 blocking execution, since it always retrieves
1265
1266
1266 Examples
1267 Examples
1267 --------
1268 --------
1268 ::
1269 ::
1269
1270
1270 In [10]: r = client.apply()
1271 In [10]: r = client.apply()
1271
1272
1272 Parameters
1273 Parameters
1273 ----------
1274 ----------
1274
1275
1275 indices_or_msg_ids : integer history index, str msg_id, or list of either
1276 indices_or_msg_ids : integer history index, str msg_id, or list of either
1276 The indices or msg_ids of indices to be retrieved
1277 The indices or msg_ids of indices to be retrieved
1277
1278
1278 block : bool
1279 block : bool
1279 Whether to wait for the result to be done
1280 Whether to wait for the result to be done
1280
1281
1281 Returns
1282 Returns
1282 -------
1283 -------
1283
1284
1284 AsyncResult
1285 AsyncResult
1285 A single AsyncResult object will always be returned.
1286 A single AsyncResult object will always be returned.
1286
1287
1287 AsyncHubResult
1288 AsyncHubResult
1288 A subclass of AsyncResult that retrieves results from the Hub
1289 A subclass of AsyncResult that retrieves results from the Hub
1289
1290
1290 """
1291 """
1291 if indices_or_msg_ids is None:
1292 if indices_or_msg_ids is None:
1292 indices_or_msg_ids = -1
1293 indices_or_msg_ids = -1
1293
1294
1294 if not isinstance(indices_or_msg_ids, (list,tuple)):
1295 if not isinstance(indices_or_msg_ids, (list,tuple)):
1295 indices_or_msg_ids = [indices_or_msg_ids]
1296 indices_or_msg_ids = [indices_or_msg_ids]
1296
1297
1297 theids = []
1298 theids = []
1298 for id in indices_or_msg_ids:
1299 for id in indices_or_msg_ids:
1299 if isinstance(id, int):
1300 if isinstance(id, int):
1300 id = self.history[id]
1301 id = self.history[id]
1301 if not isinstance(id, str):
1302 if not isinstance(id, str):
1302 raise TypeError("indices must be str or int, not %r"%id)
1303 raise TypeError("indices must be str or int, not %r"%id)
1303 theids.append(id)
1304 theids.append(id)
1304
1305
1305 local_ids = filter(lambda msg_id: msg_id in self.history or msg_id in self.results, theids)
1306 local_ids = filter(lambda msg_id: msg_id in self.history or msg_id in self.results, theids)
1306 remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids)
1307 remote_ids = filter(lambda msg_id: msg_id not in local_ids, theids)
1307
1308
1308 if remote_ids:
1309 if remote_ids:
1309 ar = AsyncHubResult(self, msg_ids=theids)
1310 ar = AsyncHubResult(self, msg_ids=theids)
1310 else:
1311 else:
1311 ar = AsyncResult(self, msg_ids=theids)
1312 ar = AsyncResult(self, msg_ids=theids)
1312
1313
1313 if block:
1314 if block:
1314 ar.wait()
1315 ar.wait()
1315
1316
1316 return ar
1317 return ar
1317
1318
1318 @spinfirst
1319 @spinfirst
1319 def result_status(self, msg_ids, status_only=True):
1320 def result_status(self, msg_ids, status_only=True):
1320 """Check on the status of the result(s) of the apply request with `msg_ids`.
1321 """Check on the status of the result(s) of the apply request with `msg_ids`.
1321
1322
1322 If status_only is False, then the actual results will be retrieved, else
1323 If status_only is False, then the actual results will be retrieved, else
1323 only the status of the results will be checked.
1324 only the status of the results will be checked.
1324
1325
1325 Parameters
1326 Parameters
1326 ----------
1327 ----------
1327
1328
1328 msg_ids : list of msg_ids
1329 msg_ids : list of msg_ids
1329 if int:
1330 if int:
1330 Passed as index to self.history for convenience.
1331 Passed as index to self.history for convenience.
1331 status_only : bool (default: True)
1332 status_only : bool (default: True)
1332 if False:
1333 if False:
1333 Retrieve the actual results of completed tasks.
1334 Retrieve the actual results of completed tasks.
1334
1335
1335 Returns
1336 Returns
1336 -------
1337 -------
1337
1338
1338 results : dict
1339 results : dict
1339 There will always be the keys 'pending' and 'completed', which will
1340 There will always be the keys 'pending' and 'completed', which will
1340 be lists of msg_ids that are incomplete or complete. If `status_only`
1341 be lists of msg_ids that are incomplete or complete. If `status_only`
1341 is False, then completed results will be keyed by their `msg_id`.
1342 is False, then completed results will be keyed by their `msg_id`.
1342 """
1343 """
1343 if not isinstance(msg_ids, (list,tuple)):
1344 if not isinstance(msg_ids, (list,tuple)):
1344 indices_or_msg_ids = [msg_ids]
1345 indices_or_msg_ids = [msg_ids]
1345
1346
1346 theids = []
1347 theids = []
1347 for msg_id in msg_ids:
1348 for msg_id in msg_ids:
1348 if isinstance(msg_id, int):
1349 if isinstance(msg_id, int):
1349 msg_id = self.history[msg_id]
1350 msg_id = self.history[msg_id]
1350 if not isinstance(msg_id, basestring):
1351 if not isinstance(msg_id, basestring):
1351 raise TypeError("msg_ids must be str, not %r"%msg_id)
1352 raise TypeError("msg_ids must be str, not %r"%msg_id)
1352 theids.append(msg_id)
1353 theids.append(msg_id)
1353
1354
1354 completed = []
1355 completed = []
1355 local_results = {}
1356 local_results = {}
1356
1357
1357 # comment this block out to temporarily disable local shortcut:
1358 # comment this block out to temporarily disable local shortcut:
1358 for msg_id in theids:
1359 for msg_id in theids:
1359 if msg_id in self.results:
1360 if msg_id in self.results:
1360 completed.append(msg_id)
1361 completed.append(msg_id)
1361 local_results[msg_id] = self.results[msg_id]
1362 local_results[msg_id] = self.results[msg_id]
1362 theids.remove(msg_id)
1363 theids.remove(msg_id)
1363
1364
1364 if theids: # some not locally cached
1365 if theids: # some not locally cached
1365 content = dict(msg_ids=theids, status_only=status_only)
1366 content = dict(msg_ids=theids, status_only=status_only)
1366 msg = self.session.send(self._query_socket, "result_request", content=content)
1367 msg = self.session.send(self._query_socket, "result_request", content=content)
1367 zmq.select([self._query_socket], [], [])
1368 zmq.select([self._query_socket], [], [])
1368 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1369 idents,msg = self.session.recv(self._query_socket, zmq.NOBLOCK)
1369 if self.debug:
1370 if self.debug:
1370 pprint(msg)
1371 pprint(msg)
1371 content = msg['content']
1372 content = msg['content']
1372 if content['status'] != 'ok':
1373 if content['status'] != 'ok':
1373 raise self._unwrap_exception(content)
1374 raise self._unwrap_exception(content)
1374 buffers = msg['buffers']
1375 buffers = msg['buffers']
1375 else:
1376 else:
1376 content = dict(completed=[],pending=[])
1377 content = dict(completed=[],pending=[])
1377
1378
1378 content['completed'].extend(completed)
1379 content['completed'].extend(completed)
1379
1380
1380 if status_only:
1381 if status_only:
1381 return content
1382 return content
1382
1383
1383 failures = []
1384 failures = []
1384 # load cached results into result:
1385 # load cached results into result:
1385 content.update(local_results)
1386 content.update(local_results)
1386 # update cache with results:
1387 # update cache with results:
1387 for msg_id in sorted(theids):
1388 for msg_id in sorted(theids):
1388 if msg_id in content['completed']:
1389 if msg_id in content['completed']:
1389 rec = content[msg_id]
1390 rec = content[msg_id]
1390 parent = rec['header']
1391 parent = rec['header']
1391 header = rec['result_header']
1392 header = rec['result_header']
1392 rcontent = rec['result_content']
1393 rcontent = rec['result_content']
1393 iodict = rec['io']
1394 iodict = rec['io']
1394 if isinstance(rcontent, str):
1395 if isinstance(rcontent, str):
1395 rcontent = self.session.unpack(rcontent)
1396 rcontent = self.session.unpack(rcontent)
1396
1397
1397 md = self.metadata.setdefault(msg_id, Metadata())
1398 md = self.metadata.setdefault(msg_id, Metadata())
1398 md.update(self._extract_metadata(header, parent, rcontent))
1399 md.update(self._extract_metadata(header, parent, rcontent))
1399 md.update(iodict)
1400 md.update(iodict)
1400
1401
1401 if rcontent['status'] == 'ok':
1402 if rcontent['status'] == 'ok':
1402 res,buffers = ss.unserialize_object(buffers)
1403 res,buffers = util.unserialize_object(buffers)
1403 else:
1404 else:
1404 print rcontent
1405 print rcontent
1405 res = self._unwrap_exception(rcontent)
1406 res = self._unwrap_exception(rcontent)
1406 failures.append(res)
1407 failures.append(res)
1407
1408
1408 self.results[msg_id] = res
1409 self.results[msg_id] = res
1409 content[msg_id] = res
1410 content[msg_id] = res
1410
1411
1411 if len(theids) == 1 and failures:
1412 if len(theids) == 1 and failures:
1412 raise failures[0]
1413 raise failures[0]
1413
1414
1414 error.collect_exceptions(failures, "result_status")
1415 error.collect_exceptions(failures, "result_status")
1415 return content
1416 return content
1416
1417
1417 @spinfirst
1418 @spinfirst
1418 def queue_status(self, targets='all', verbose=False):
1419 def queue_status(self, targets='all', verbose=False):
1419 """Fetch the status of engine queues.
1420 """Fetch the status of engine queues.
1420
1421
1421 Parameters
1422 Parameters
1422 ----------
1423 ----------
1423
1424
1424 targets : int/str/list of ints/strs
1425 targets : int/str/list of ints/strs
1425 the engines whose states are to be queried.
1426 the engines whose states are to be queried.
1426 default : all
1427 default : all
1427 verbose : bool
1428 verbose : bool
1428 Whether to return lengths only, or lists of ids for each element
1429 Whether to return lengths only, or lists of ids for each element
1429 """
1430 """
1430 targets = self._build_targets(targets)[1]
1431 targets = self._build_targets(targets)[1]
1431 content = dict(targets=targets, verbose=verbose)
1432 content = dict(targets=targets, verbose=verbose)
1432 self.session.send(self._query_socket, "queue_request", content=content)
1433 self.session.send(self._query_socket, "queue_request", content=content)
1433 idents,msg = self.session.recv(self._query_socket, 0)
1434 idents,msg = self.session.recv(self._query_socket, 0)
1434 if self.debug:
1435 if self.debug:
1435 pprint(msg)
1436 pprint(msg)
1436 content = msg['content']
1437 content = msg['content']
1437 status = content.pop('status')
1438 status = content.pop('status')
1438 if status != 'ok':
1439 if status != 'ok':
1439 raise self._unwrap_exception(content)
1440 raise self._unwrap_exception(content)
1440 return ss.rekey(content)
1441 return util.rekey(content)
1441
1442
1442 @spinfirst
1443 @spinfirst
1443 def purge_results(self, jobs=[], targets=[]):
1444 def purge_results(self, jobs=[], targets=[]):
1444 """Tell the controller to forget results.
1445 """Tell the controller to forget results.
1445
1446
1446 Individual results can be purged by msg_id, or the entire
1447 Individual results can be purged by msg_id, or the entire
1447 history of specific targets can be purged.
1448 history of specific targets can be purged.
1448
1449
1449 Parameters
1450 Parameters
1450 ----------
1451 ----------
1451
1452
1452 jobs : str or list of strs or AsyncResult objects
1453 jobs : str or list of strs or AsyncResult objects
1453 the msg_ids whose results should be forgotten.
1454 the msg_ids whose results should be forgotten.
1454 targets : int/str/list of ints/strs
1455 targets : int/str/list of ints/strs
1455 The targets, by uuid or int_id, whose entire history is to be purged.
1456 The targets, by uuid or int_id, whose entire history is to be purged.
1456 Use `targets='all'` to scrub everything from the controller's memory.
1457 Use `targets='all'` to scrub everything from the controller's memory.
1457
1458
1458 default : None
1459 default : None
1459 """
1460 """
1460 if not targets and not jobs:
1461 if not targets and not jobs:
1461 raise ValueError("Must specify at least one of `targets` and `jobs`")
1462 raise ValueError("Must specify at least one of `targets` and `jobs`")
1462 if targets:
1463 if targets:
1463 targets = self._build_targets(targets)[1]
1464 targets = self._build_targets(targets)[1]
1464
1465
1465 # construct msg_ids from jobs
1466 # construct msg_ids from jobs
1466 msg_ids = []
1467 msg_ids = []
1467 if isinstance(jobs, (basestring,AsyncResult)):
1468 if isinstance(jobs, (basestring,AsyncResult)):
1468 jobs = [jobs]
1469 jobs = [jobs]
1469 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
1470 bad_ids = filter(lambda obj: not isinstance(obj, (basestring, AsyncResult)), jobs)
1470 if bad_ids:
1471 if bad_ids:
1471 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1472 raise TypeError("Invalid msg_id type %r, expected str or AsyncResult"%bad_ids[0])
1472 for j in jobs:
1473 for j in jobs:
1473 if isinstance(j, AsyncResult):
1474 if isinstance(j, AsyncResult):
1474 msg_ids.extend(j.msg_ids)
1475 msg_ids.extend(j.msg_ids)
1475 else:
1476 else:
1476 msg_ids.append(j)
1477 msg_ids.append(j)
1477
1478
1478 content = dict(targets=targets, msg_ids=msg_ids)
1479 content = dict(targets=targets, msg_ids=msg_ids)
1479 self.session.send(self._query_socket, "purge_request", content=content)
1480 self.session.send(self._query_socket, "purge_request", content=content)
1480 idents, msg = self.session.recv(self._query_socket, 0)
1481 idents, msg = self.session.recv(self._query_socket, 0)
1481 if self.debug:
1482 if self.debug:
1482 pprint(msg)
1483 pprint(msg)
1483 content = msg['content']
1484 content = msg['content']
1484 if content['status'] != 'ok':
1485 if content['status'] != 'ok':
1485 raise self._unwrap_exception(content)
1486 raise self._unwrap_exception(content)
1486
1487
1487
1488
1488 __all__ = [ 'Client',
1489 __all__ = [ 'Client',
1489 'depend',
1490 'depend',
1490 'require',
1491 'require',
1491 'remote',
1492 'remote',
1492 'parallel',
1493 'parallel',
1493 'RemoteFunction',
1494 'RemoteFunction',
1494 'ParallelFunction',
1495 'ParallelFunction',
1495 'DirectView',
1496 'DirectView',
1496 'LoadBalancedView',
1497 'LoadBalancedView',
1497 'AsyncResult',
1498 'AsyncResult',
1498 'AsyncMapResult'
1499 'AsyncMapResult',
1500 'Reference'
1499 ]
1501 ]
@@ -1,538 +1,537 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 # encoding: utf-8
2 # encoding: utf-8
3 """
3 """
4 The IPython cluster directory
4 The IPython cluster directory
5 """
5 """
6
6
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8 # Copyright (C) 2008-2009 The IPython Development Team
8 # Copyright (C) 2008-2009 The IPython Development Team
9 #
9 #
10 # Distributed under the terms of the BSD License. The full license is in
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
11 # the file COPYING, distributed as part of this software.
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13
13
14 #-----------------------------------------------------------------------------
14 #-----------------------------------------------------------------------------
15 # Imports
15 # Imports
16 #-----------------------------------------------------------------------------
16 #-----------------------------------------------------------------------------
17
17
18 from __future__ import with_statement
18 from __future__ import with_statement
19
19
20 import os
20 import os
21 import logging
21 import logging
22 import re
22 import re
23 import shutil
23 import shutil
24 import sys
24 import sys
25 import warnings
26
25
27 from IPython.config.loader import PyFileConfigLoader
26 from IPython.config.loader import PyFileConfigLoader
28 from IPython.config.configurable import Configurable
27 from IPython.config.configurable import Configurable
29 from IPython.core.application import Application, BaseAppConfigLoader
28 from IPython.core.application import Application, BaseAppConfigLoader
30 from IPython.core.crashhandler import CrashHandler
29 from IPython.core.crashhandler import CrashHandler
31 from IPython.core import release
30 from IPython.core import release
32 from IPython.utils.path import (
31 from IPython.utils.path import (
33 get_ipython_package_dir,
32 get_ipython_package_dir,
34 expand_path
33 expand_path
35 )
34 )
36 from IPython.utils.traitlets import Unicode
35 from IPython.utils.traitlets import Unicode
37
36
38 #-----------------------------------------------------------------------------
37 #-----------------------------------------------------------------------------
39 # Module errors
38 # Module errors
40 #-----------------------------------------------------------------------------
39 #-----------------------------------------------------------------------------
41
40
42 class ClusterDirError(Exception):
41 class ClusterDirError(Exception):
43 pass
42 pass
44
43
45
44
46 class PIDFileError(Exception):
45 class PIDFileError(Exception):
47 pass
46 pass
48
47
49
48
50 #-----------------------------------------------------------------------------
49 #-----------------------------------------------------------------------------
51 # Class for managing cluster directories
50 # Class for managing cluster directories
52 #-----------------------------------------------------------------------------
51 #-----------------------------------------------------------------------------
53
52
54 class ClusterDir(Configurable):
53 class ClusterDir(Configurable):
55 """An object to manage the cluster directory and its resources.
54 """An object to manage the cluster directory and its resources.
56
55
57 The cluster directory is used by :command:`ipengine`,
56 The cluster directory is used by :command:`ipengine`,
58 :command:`ipcontroller` and :command:`ipclsuter` to manage the
57 :command:`ipcontroller` and :command:`ipclsuter` to manage the
59 configuration, logging and security of these applications.
58 configuration, logging and security of these applications.
60
59
61 This object knows how to find, create and manage these directories. This
60 This object knows how to find, create and manage these directories. This
62 should be used by any code that want's to handle cluster directories.
61 should be used by any code that want's to handle cluster directories.
63 """
62 """
64
63
65 security_dir_name = Unicode('security')
64 security_dir_name = Unicode('security')
66 log_dir_name = Unicode('log')
65 log_dir_name = Unicode('log')
67 pid_dir_name = Unicode('pid')
66 pid_dir_name = Unicode('pid')
68 security_dir = Unicode(u'')
67 security_dir = Unicode(u'')
69 log_dir = Unicode(u'')
68 log_dir = Unicode(u'')
70 pid_dir = Unicode(u'')
69 pid_dir = Unicode(u'')
71 location = Unicode(u'')
70 location = Unicode(u'')
72
71
73 def __init__(self, location=u''):
72 def __init__(self, location=u''):
74 super(ClusterDir, self).__init__(location=location)
73 super(ClusterDir, self).__init__(location=location)
75
74
76 def _location_changed(self, name, old, new):
75 def _location_changed(self, name, old, new):
77 if not os.path.isdir(new):
76 if not os.path.isdir(new):
78 os.makedirs(new)
77 os.makedirs(new)
79 self.security_dir = os.path.join(new, self.security_dir_name)
78 self.security_dir = os.path.join(new, self.security_dir_name)
80 self.log_dir = os.path.join(new, self.log_dir_name)
79 self.log_dir = os.path.join(new, self.log_dir_name)
81 self.pid_dir = os.path.join(new, self.pid_dir_name)
80 self.pid_dir = os.path.join(new, self.pid_dir_name)
82 self.check_dirs()
81 self.check_dirs()
83
82
84 def _log_dir_changed(self, name, old, new):
83 def _log_dir_changed(self, name, old, new):
85 self.check_log_dir()
84 self.check_log_dir()
86
85
87 def check_log_dir(self):
86 def check_log_dir(self):
88 if not os.path.isdir(self.log_dir):
87 if not os.path.isdir(self.log_dir):
89 os.mkdir(self.log_dir)
88 os.mkdir(self.log_dir)
90
89
91 def _security_dir_changed(self, name, old, new):
90 def _security_dir_changed(self, name, old, new):
92 self.check_security_dir()
91 self.check_security_dir()
93
92
94 def check_security_dir(self):
93 def check_security_dir(self):
95 if not os.path.isdir(self.security_dir):
94 if not os.path.isdir(self.security_dir):
96 os.mkdir(self.security_dir, 0700)
95 os.mkdir(self.security_dir, 0700)
97 os.chmod(self.security_dir, 0700)
96 os.chmod(self.security_dir, 0700)
98
97
99 def _pid_dir_changed(self, name, old, new):
98 def _pid_dir_changed(self, name, old, new):
100 self.check_pid_dir()
99 self.check_pid_dir()
101
100
102 def check_pid_dir(self):
101 def check_pid_dir(self):
103 if not os.path.isdir(self.pid_dir):
102 if not os.path.isdir(self.pid_dir):
104 os.mkdir(self.pid_dir, 0700)
103 os.mkdir(self.pid_dir, 0700)
105 os.chmod(self.pid_dir, 0700)
104 os.chmod(self.pid_dir, 0700)
106
105
107 def check_dirs(self):
106 def check_dirs(self):
108 self.check_security_dir()
107 self.check_security_dir()
109 self.check_log_dir()
108 self.check_log_dir()
110 self.check_pid_dir()
109 self.check_pid_dir()
111
110
112 def load_config_file(self, filename):
111 def load_config_file(self, filename):
113 """Load a config file from the top level of the cluster dir.
112 """Load a config file from the top level of the cluster dir.
114
113
115 Parameters
114 Parameters
116 ----------
115 ----------
117 filename : unicode or str
116 filename : unicode or str
118 The filename only of the config file that must be located in
117 The filename only of the config file that must be located in
119 the top-level of the cluster directory.
118 the top-level of the cluster directory.
120 """
119 """
121 loader = PyFileConfigLoader(filename, self.location)
120 loader = PyFileConfigLoader(filename, self.location)
122 return loader.load_config()
121 return loader.load_config()
123
122
124 def copy_config_file(self, config_file, path=None, overwrite=False):
123 def copy_config_file(self, config_file, path=None, overwrite=False):
125 """Copy a default config file into the active cluster directory.
124 """Copy a default config file into the active cluster directory.
126
125
127 Default configuration files are kept in :mod:`IPython.config.default`.
126 Default configuration files are kept in :mod:`IPython.config.default`.
128 This function moves these from that location to the working cluster
127 This function moves these from that location to the working cluster
129 directory.
128 directory.
130 """
129 """
131 if path is None:
130 if path is None:
132 import IPython.config.default
131 import IPython.config.default
133 path = IPython.config.default.__file__.split(os.path.sep)[:-1]
132 path = IPython.config.default.__file__.split(os.path.sep)[:-1]
134 path = os.path.sep.join(path)
133 path = os.path.sep.join(path)
135 src = os.path.join(path, config_file)
134 src = os.path.join(path, config_file)
136 dst = os.path.join(self.location, config_file)
135 dst = os.path.join(self.location, config_file)
137 if not os.path.isfile(dst) or overwrite:
136 if not os.path.isfile(dst) or overwrite:
138 shutil.copy(src, dst)
137 shutil.copy(src, dst)
139
138
140 def copy_all_config_files(self, path=None, overwrite=False):
139 def copy_all_config_files(self, path=None, overwrite=False):
141 """Copy all config files into the active cluster directory."""
140 """Copy all config files into the active cluster directory."""
142 for f in [u'ipcontrollerz_config.py', u'ipenginez_config.py',
141 for f in [u'ipcontrollerz_config.py', u'ipenginez_config.py',
143 u'ipclusterz_config.py']:
142 u'ipclusterz_config.py']:
144 self.copy_config_file(f, path=path, overwrite=overwrite)
143 self.copy_config_file(f, path=path, overwrite=overwrite)
145
144
146 @classmethod
145 @classmethod
147 def create_cluster_dir(csl, cluster_dir):
146 def create_cluster_dir(csl, cluster_dir):
148 """Create a new cluster directory given a full path.
147 """Create a new cluster directory given a full path.
149
148
150 Parameters
149 Parameters
151 ----------
150 ----------
152 cluster_dir : str
151 cluster_dir : str
153 The full path to the cluster directory. If it does exist, it will
152 The full path to the cluster directory. If it does exist, it will
154 be used. If not, it will be created.
153 be used. If not, it will be created.
155 """
154 """
156 return ClusterDir(location=cluster_dir)
155 return ClusterDir(location=cluster_dir)
157
156
158 @classmethod
157 @classmethod
159 def create_cluster_dir_by_profile(cls, path, profile=u'default'):
158 def create_cluster_dir_by_profile(cls, path, profile=u'default'):
160 """Create a cluster dir by profile name and path.
159 """Create a cluster dir by profile name and path.
161
160
162 Parameters
161 Parameters
163 ----------
162 ----------
164 path : str
163 path : str
165 The path (directory) to put the cluster directory in.
164 The path (directory) to put the cluster directory in.
166 profile : str
165 profile : str
167 The name of the profile. The name of the cluster directory will
166 The name of the profile. The name of the cluster directory will
168 be "clusterz_<profile>".
167 be "clusterz_<profile>".
169 """
168 """
170 if not os.path.isdir(path):
169 if not os.path.isdir(path):
171 raise ClusterDirError('Directory not found: %s' % path)
170 raise ClusterDirError('Directory not found: %s' % path)
172 cluster_dir = os.path.join(path, u'clusterz_' + profile)
171 cluster_dir = os.path.join(path, u'clusterz_' + profile)
173 return ClusterDir(location=cluster_dir)
172 return ClusterDir(location=cluster_dir)
174
173
175 @classmethod
174 @classmethod
176 def find_cluster_dir_by_profile(cls, ipython_dir, profile=u'default'):
175 def find_cluster_dir_by_profile(cls, ipython_dir, profile=u'default'):
177 """Find an existing cluster dir by profile name, return its ClusterDir.
176 """Find an existing cluster dir by profile name, return its ClusterDir.
178
177
179 This searches through a sequence of paths for a cluster dir. If it
178 This searches through a sequence of paths for a cluster dir. If it
180 is not found, a :class:`ClusterDirError` exception will be raised.
179 is not found, a :class:`ClusterDirError` exception will be raised.
181
180
182 The search path algorithm is:
181 The search path algorithm is:
183 1. ``os.getcwd()``
182 1. ``os.getcwd()``
184 2. ``ipython_dir``
183 2. ``ipython_dir``
185 3. The directories found in the ":" separated
184 3. The directories found in the ":" separated
186 :env:`IPCLUSTER_DIR_PATH` environment variable.
185 :env:`IPCLUSTER_DIR_PATH` environment variable.
187
186
188 Parameters
187 Parameters
189 ----------
188 ----------
190 ipython_dir : unicode or str
189 ipython_dir : unicode or str
191 The IPython directory to use.
190 The IPython directory to use.
192 profile : unicode or str
191 profile : unicode or str
193 The name of the profile. The name of the cluster directory
192 The name of the profile. The name of the cluster directory
194 will be "clusterz_<profile>".
193 will be "clusterz_<profile>".
195 """
194 """
196 dirname = u'clusterz_' + profile
195 dirname = u'clusterz_' + profile
197 cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
196 cluster_dir_paths = os.environ.get('IPCLUSTER_DIR_PATH','')
198 if cluster_dir_paths:
197 if cluster_dir_paths:
199 cluster_dir_paths = cluster_dir_paths.split(':')
198 cluster_dir_paths = cluster_dir_paths.split(':')
200 else:
199 else:
201 cluster_dir_paths = []
200 cluster_dir_paths = []
202 paths = [os.getcwd(), ipython_dir] + cluster_dir_paths
201 paths = [os.getcwd(), ipython_dir] + cluster_dir_paths
203 for p in paths:
202 for p in paths:
204 cluster_dir = os.path.join(p, dirname)
203 cluster_dir = os.path.join(p, dirname)
205 if os.path.isdir(cluster_dir):
204 if os.path.isdir(cluster_dir):
206 return ClusterDir(location=cluster_dir)
205 return ClusterDir(location=cluster_dir)
207 else:
206 else:
208 raise ClusterDirError('Cluster directory not found in paths: %s' % dirname)
207 raise ClusterDirError('Cluster directory not found in paths: %s' % dirname)
209
208
210 @classmethod
209 @classmethod
211 def find_cluster_dir(cls, cluster_dir):
210 def find_cluster_dir(cls, cluster_dir):
212 """Find/create a cluster dir and return its ClusterDir.
211 """Find/create a cluster dir and return its ClusterDir.
213
212
214 This will create the cluster directory if it doesn't exist.
213 This will create the cluster directory if it doesn't exist.
215
214
216 Parameters
215 Parameters
217 ----------
216 ----------
218 cluster_dir : unicode or str
217 cluster_dir : unicode or str
219 The path of the cluster directory. This is expanded using
218 The path of the cluster directory. This is expanded using
220 :func:`IPython.utils.genutils.expand_path`.
219 :func:`IPython.utils.genutils.expand_path`.
221 """
220 """
222 cluster_dir = expand_path(cluster_dir)
221 cluster_dir = expand_path(cluster_dir)
223 if not os.path.isdir(cluster_dir):
222 if not os.path.isdir(cluster_dir):
224 raise ClusterDirError('Cluster directory not found: %s' % cluster_dir)
223 raise ClusterDirError('Cluster directory not found: %s' % cluster_dir)
225 return ClusterDir(location=cluster_dir)
224 return ClusterDir(location=cluster_dir)
226
225
227
226
228 #-----------------------------------------------------------------------------
227 #-----------------------------------------------------------------------------
229 # Command line options
228 # Command line options
230 #-----------------------------------------------------------------------------
229 #-----------------------------------------------------------------------------
231
230
232 class ClusterDirConfigLoader(BaseAppConfigLoader):
231 class ClusterDirConfigLoader(BaseAppConfigLoader):
233
232
234 def _add_cluster_profile(self, parser):
233 def _add_cluster_profile(self, parser):
235 paa = parser.add_argument
234 paa = parser.add_argument
236 paa('-p', '--profile',
235 paa('-p', '--profile',
237 dest='Global.profile',type=unicode,
236 dest='Global.profile',type=unicode,
238 help=
237 help=
239 """The string name of the profile to be used. This determines the name
238 """The string name of the profile to be used. This determines the name
240 of the cluster dir as: cluster_<profile>. The default profile is named
239 of the cluster dir as: cluster_<profile>. The default profile is named
241 'default'. The cluster directory is resolve this way if the
240 'default'. The cluster directory is resolve this way if the
242 --cluster-dir option is not used.""",
241 --cluster-dir option is not used.""",
243 metavar='Global.profile')
242 metavar='Global.profile')
244
243
245 def _add_cluster_dir(self, parser):
244 def _add_cluster_dir(self, parser):
246 paa = parser.add_argument
245 paa = parser.add_argument
247 paa('--cluster-dir',
246 paa('--cluster-dir',
248 dest='Global.cluster_dir',type=unicode,
247 dest='Global.cluster_dir',type=unicode,
249 help="""Set the cluster dir. This overrides the logic used by the
248 help="""Set the cluster dir. This overrides the logic used by the
250 --profile option.""",
249 --profile option.""",
251 metavar='Global.cluster_dir')
250 metavar='Global.cluster_dir')
252
251
253 def _add_work_dir(self, parser):
252 def _add_work_dir(self, parser):
254 paa = parser.add_argument
253 paa = parser.add_argument
255 paa('--work-dir',
254 paa('--work-dir',
256 dest='Global.work_dir',type=unicode,
255 dest='Global.work_dir',type=unicode,
257 help='Set the working dir for the process.',
256 help='Set the working dir for the process.',
258 metavar='Global.work_dir')
257 metavar='Global.work_dir')
259
258
260 def _add_clean_logs(self, parser):
259 def _add_clean_logs(self, parser):
261 paa = parser.add_argument
260 paa = parser.add_argument
262 paa('--clean-logs',
261 paa('--clean-logs',
263 dest='Global.clean_logs', action='store_true',
262 dest='Global.clean_logs', action='store_true',
264 help='Delete old log flies before starting.')
263 help='Delete old log flies before starting.')
265
264
266 def _add_no_clean_logs(self, parser):
265 def _add_no_clean_logs(self, parser):
267 paa = parser.add_argument
266 paa = parser.add_argument
268 paa('--no-clean-logs',
267 paa('--no-clean-logs',
269 dest='Global.clean_logs', action='store_false',
268 dest='Global.clean_logs', action='store_false',
270 help="Don't Delete old log flies before starting.")
269 help="Don't Delete old log flies before starting.")
271
270
272 def _add_arguments(self):
271 def _add_arguments(self):
273 super(ClusterDirConfigLoader, self)._add_arguments()
272 super(ClusterDirConfigLoader, self)._add_arguments()
274 self._add_cluster_profile(self.parser)
273 self._add_cluster_profile(self.parser)
275 self._add_cluster_dir(self.parser)
274 self._add_cluster_dir(self.parser)
276 self._add_work_dir(self.parser)
275 self._add_work_dir(self.parser)
277 self._add_clean_logs(self.parser)
276 self._add_clean_logs(self.parser)
278 self._add_no_clean_logs(self.parser)
277 self._add_no_clean_logs(self.parser)
279
278
280
279
281 #-----------------------------------------------------------------------------
280 #-----------------------------------------------------------------------------
282 # Crash handler for this application
281 # Crash handler for this application
283 #-----------------------------------------------------------------------------
282 #-----------------------------------------------------------------------------
284
283
285
284
286 _message_template = """\
285 _message_template = """\
287 Oops, $self.app_name crashed. We do our best to make it stable, but...
286 Oops, $self.app_name crashed. We do our best to make it stable, but...
288
287
289 A crash report was automatically generated with the following information:
288 A crash report was automatically generated with the following information:
290 - A verbatim copy of the crash traceback.
289 - A verbatim copy of the crash traceback.
291 - Data on your current $self.app_name configuration.
290 - Data on your current $self.app_name configuration.
292
291
293 It was left in the file named:
292 It was left in the file named:
294 \t'$self.crash_report_fname'
293 \t'$self.crash_report_fname'
295 If you can email this file to the developers, the information in it will help
294 If you can email this file to the developers, the information in it will help
296 them in understanding and correcting the problem.
295 them in understanding and correcting the problem.
297
296
298 You can mail it to: $self.contact_name at $self.contact_email
297 You can mail it to: $self.contact_name at $self.contact_email
299 with the subject '$self.app_name Crash Report'.
298 with the subject '$self.app_name Crash Report'.
300
299
301 If you want to do it now, the following command will work (under Unix):
300 If you want to do it now, the following command will work (under Unix):
302 mail -s '$self.app_name Crash Report' $self.contact_email < $self.crash_report_fname
301 mail -s '$self.app_name Crash Report' $self.contact_email < $self.crash_report_fname
303
302
304 To ensure accurate tracking of this issue, please file a report about it at:
303 To ensure accurate tracking of this issue, please file a report about it at:
305 $self.bug_tracker
304 $self.bug_tracker
306 """
305 """
307
306
308 class ClusterDirCrashHandler(CrashHandler):
307 class ClusterDirCrashHandler(CrashHandler):
309 """sys.excepthook for IPython itself, leaves a detailed report on disk."""
308 """sys.excepthook for IPython itself, leaves a detailed report on disk."""
310
309
311 message_template = _message_template
310 message_template = _message_template
312
311
313 def __init__(self, app):
312 def __init__(self, app):
314 contact_name = release.authors['Brian'][0]
313 contact_name = release.authors['Brian'][0]
315 contact_email = release.authors['Brian'][1]
314 contact_email = release.authors['Brian'][1]
316 bug_tracker = 'http://github.com/ipython/ipython/issues'
315 bug_tracker = 'http://github.com/ipython/ipython/issues'
317 super(ClusterDirCrashHandler,self).__init__(
316 super(ClusterDirCrashHandler,self).__init__(
318 app, contact_name, contact_email, bug_tracker
317 app, contact_name, contact_email, bug_tracker
319 )
318 )
320
319
321
320
322 #-----------------------------------------------------------------------------
321 #-----------------------------------------------------------------------------
323 # Main application
322 # Main application
324 #-----------------------------------------------------------------------------
323 #-----------------------------------------------------------------------------
325
324
326 class ApplicationWithClusterDir(Application):
325 class ApplicationWithClusterDir(Application):
327 """An application that puts everything into a cluster directory.
326 """An application that puts everything into a cluster directory.
328
327
329 Instead of looking for things in the ipython_dir, this type of application
328 Instead of looking for things in the ipython_dir, this type of application
330 will use its own private directory called the "cluster directory"
329 will use its own private directory called the "cluster directory"
331 for things like config files, log files, etc.
330 for things like config files, log files, etc.
332
331
333 The cluster directory is resolved as follows:
332 The cluster directory is resolved as follows:
334
333
335 * If the ``--cluster-dir`` option is given, it is used.
334 * If the ``--cluster-dir`` option is given, it is used.
336 * If ``--cluster-dir`` is not given, the application directory is
335 * If ``--cluster-dir`` is not given, the application directory is
337 resolve using the profile name as ``cluster_<profile>``. The search
336 resolve using the profile name as ``cluster_<profile>``. The search
338 path for this directory is then i) cwd if it is found there
337 path for this directory is then i) cwd if it is found there
339 and ii) in ipython_dir otherwise.
338 and ii) in ipython_dir otherwise.
340
339
341 The config file for the application is to be put in the cluster
340 The config file for the application is to be put in the cluster
342 dir and named the value of the ``config_file_name`` class attribute.
341 dir and named the value of the ``config_file_name`` class attribute.
343 """
342 """
344
343
345 command_line_loader = ClusterDirConfigLoader
344 command_line_loader = ClusterDirConfigLoader
346 crash_handler_class = ClusterDirCrashHandler
345 crash_handler_class = ClusterDirCrashHandler
347 auto_create_cluster_dir = True
346 auto_create_cluster_dir = True
348 # temporarily override default_log_level to INFO
347 # temporarily override default_log_level to INFO
349 default_log_level = logging.INFO
348 default_log_level = logging.INFO
350
349
351 def create_default_config(self):
350 def create_default_config(self):
352 super(ApplicationWithClusterDir, self).create_default_config()
351 super(ApplicationWithClusterDir, self).create_default_config()
353 self.default_config.Global.profile = u'default'
352 self.default_config.Global.profile = u'default'
354 self.default_config.Global.cluster_dir = u''
353 self.default_config.Global.cluster_dir = u''
355 self.default_config.Global.work_dir = os.getcwd()
354 self.default_config.Global.work_dir = os.getcwd()
356 self.default_config.Global.log_to_file = False
355 self.default_config.Global.log_to_file = False
357 self.default_config.Global.log_url = None
356 self.default_config.Global.log_url = None
358 self.default_config.Global.clean_logs = False
357 self.default_config.Global.clean_logs = False
359
358
360 def find_resources(self):
359 def find_resources(self):
361 """This resolves the cluster directory.
360 """This resolves the cluster directory.
362
361
363 This tries to find the cluster directory and if successful, it will
362 This tries to find the cluster directory and if successful, it will
364 have done:
363 have done:
365 * Sets ``self.cluster_dir_obj`` to the :class:`ClusterDir` object for
364 * Sets ``self.cluster_dir_obj`` to the :class:`ClusterDir` object for
366 the application.
365 the application.
367 * Sets ``self.cluster_dir`` attribute of the application and config
366 * Sets ``self.cluster_dir`` attribute of the application and config
368 objects.
367 objects.
369
368
370 The algorithm used for this is as follows:
369 The algorithm used for this is as follows:
371 1. Try ``Global.cluster_dir``.
370 1. Try ``Global.cluster_dir``.
372 2. Try using ``Global.profile``.
371 2. Try using ``Global.profile``.
373 3. If both of these fail and ``self.auto_create_cluster_dir`` is
372 3. If both of these fail and ``self.auto_create_cluster_dir`` is
374 ``True``, then create the new cluster dir in the IPython directory.
373 ``True``, then create the new cluster dir in the IPython directory.
375 4. If all fails, then raise :class:`ClusterDirError`.
374 4. If all fails, then raise :class:`ClusterDirError`.
376 """
375 """
377
376
378 try:
377 try:
379 cluster_dir = self.command_line_config.Global.cluster_dir
378 cluster_dir = self.command_line_config.Global.cluster_dir
380 except AttributeError:
379 except AttributeError:
381 cluster_dir = self.default_config.Global.cluster_dir
380 cluster_dir = self.default_config.Global.cluster_dir
382 cluster_dir = expand_path(cluster_dir)
381 cluster_dir = expand_path(cluster_dir)
383 try:
382 try:
384 self.cluster_dir_obj = ClusterDir.find_cluster_dir(cluster_dir)
383 self.cluster_dir_obj = ClusterDir.find_cluster_dir(cluster_dir)
385 except ClusterDirError:
384 except ClusterDirError:
386 pass
385 pass
387 else:
386 else:
388 self.log.info('Using existing cluster dir: %s' % \
387 self.log.info('Using existing cluster dir: %s' % \
389 self.cluster_dir_obj.location
388 self.cluster_dir_obj.location
390 )
389 )
391 self.finish_cluster_dir()
390 self.finish_cluster_dir()
392 return
391 return
393
392
394 try:
393 try:
395 self.profile = self.command_line_config.Global.profile
394 self.profile = self.command_line_config.Global.profile
396 except AttributeError:
395 except AttributeError:
397 self.profile = self.default_config.Global.profile
396 self.profile = self.default_config.Global.profile
398 try:
397 try:
399 self.cluster_dir_obj = ClusterDir.find_cluster_dir_by_profile(
398 self.cluster_dir_obj = ClusterDir.find_cluster_dir_by_profile(
400 self.ipython_dir, self.profile)
399 self.ipython_dir, self.profile)
401 except ClusterDirError:
400 except ClusterDirError:
402 pass
401 pass
403 else:
402 else:
404 self.log.info('Using existing cluster dir: %s' % \
403 self.log.info('Using existing cluster dir: %s' % \
405 self.cluster_dir_obj.location
404 self.cluster_dir_obj.location
406 )
405 )
407 self.finish_cluster_dir()
406 self.finish_cluster_dir()
408 return
407 return
409
408
410 if self.auto_create_cluster_dir:
409 if self.auto_create_cluster_dir:
411 self.cluster_dir_obj = ClusterDir.create_cluster_dir_by_profile(
410 self.cluster_dir_obj = ClusterDir.create_cluster_dir_by_profile(
412 self.ipython_dir, self.profile
411 self.ipython_dir, self.profile
413 )
412 )
414 self.log.info('Creating new cluster dir: %s' % \
413 self.log.info('Creating new cluster dir: %s' % \
415 self.cluster_dir_obj.location
414 self.cluster_dir_obj.location
416 )
415 )
417 self.finish_cluster_dir()
416 self.finish_cluster_dir()
418 else:
417 else:
419 raise ClusterDirError('Could not find a valid cluster directory.')
418 raise ClusterDirError('Could not find a valid cluster directory.')
420
419
421 def finish_cluster_dir(self):
420 def finish_cluster_dir(self):
422 # Set the cluster directory
421 # Set the cluster directory
423 self.cluster_dir = self.cluster_dir_obj.location
422 self.cluster_dir = self.cluster_dir_obj.location
424
423
425 # These have to be set because they could be different from the one
424 # These have to be set because they could be different from the one
426 # that we just computed. Because command line has the highest
425 # that we just computed. Because command line has the highest
427 # priority, this will always end up in the master_config.
426 # priority, this will always end up in the master_config.
428 self.default_config.Global.cluster_dir = self.cluster_dir
427 self.default_config.Global.cluster_dir = self.cluster_dir
429 self.command_line_config.Global.cluster_dir = self.cluster_dir
428 self.command_line_config.Global.cluster_dir = self.cluster_dir
430
429
431 def find_config_file_name(self):
430 def find_config_file_name(self):
432 """Find the config file name for this application."""
431 """Find the config file name for this application."""
433 # For this type of Application it should be set as a class attribute.
432 # For this type of Application it should be set as a class attribute.
434 if not hasattr(self, 'default_config_file_name'):
433 if not hasattr(self, 'default_config_file_name'):
435 self.log.critical("No config filename found")
434 self.log.critical("No config filename found")
436 else:
435 else:
437 self.config_file_name = self.default_config_file_name
436 self.config_file_name = self.default_config_file_name
438
437
439 def find_config_file_paths(self):
438 def find_config_file_paths(self):
440 # Set the search path to to the cluster directory. We should NOT
439 # Set the search path to to the cluster directory. We should NOT
441 # include IPython.config.default here as the default config files
440 # include IPython.config.default here as the default config files
442 # are ALWAYS automatically moved to the cluster directory.
441 # are ALWAYS automatically moved to the cluster directory.
443 conf_dir = os.path.join(get_ipython_package_dir(), 'config', 'default')
442 conf_dir = os.path.join(get_ipython_package_dir(), 'config', 'default')
444 self.config_file_paths = (self.cluster_dir,)
443 self.config_file_paths = (self.cluster_dir,)
445
444
446 def pre_construct(self):
445 def pre_construct(self):
447 # The log and security dirs were set earlier, but here we put them
446 # The log and security dirs were set earlier, but here we put them
448 # into the config and log them.
447 # into the config and log them.
449 config = self.master_config
448 config = self.master_config
450 sdir = self.cluster_dir_obj.security_dir
449 sdir = self.cluster_dir_obj.security_dir
451 self.security_dir = config.Global.security_dir = sdir
450 self.security_dir = config.Global.security_dir = sdir
452 ldir = self.cluster_dir_obj.log_dir
451 ldir = self.cluster_dir_obj.log_dir
453 self.log_dir = config.Global.log_dir = ldir
452 self.log_dir = config.Global.log_dir = ldir
454 pdir = self.cluster_dir_obj.pid_dir
453 pdir = self.cluster_dir_obj.pid_dir
455 self.pid_dir = config.Global.pid_dir = pdir
454 self.pid_dir = config.Global.pid_dir = pdir
456 self.log.info("Cluster directory set to: %s" % self.cluster_dir)
455 self.log.info("Cluster directory set to: %s" % self.cluster_dir)
457 config.Global.work_dir = unicode(expand_path(config.Global.work_dir))
456 config.Global.work_dir = unicode(expand_path(config.Global.work_dir))
458 # Change to the working directory. We do this just before construct
457 # Change to the working directory. We do this just before construct
459 # is called so all the components there have the right working dir.
458 # is called so all the components there have the right working dir.
460 self.to_work_dir()
459 self.to_work_dir()
461
460
462 def to_work_dir(self):
461 def to_work_dir(self):
463 wd = self.master_config.Global.work_dir
462 wd = self.master_config.Global.work_dir
464 if unicode(wd) != unicode(os.getcwd()):
463 if unicode(wd) != unicode(os.getcwd()):
465 os.chdir(wd)
464 os.chdir(wd)
466 self.log.info("Changing to working dir: %s" % wd)
465 self.log.info("Changing to working dir: %s" % wd)
467
466
468 def start_logging(self):
467 def start_logging(self):
469 # Remove old log files
468 # Remove old log files
470 if self.master_config.Global.clean_logs:
469 if self.master_config.Global.clean_logs:
471 log_dir = self.master_config.Global.log_dir
470 log_dir = self.master_config.Global.log_dir
472 for f in os.listdir(log_dir):
471 for f in os.listdir(log_dir):
473 if re.match(r'%s-\d+\.(log|err|out)'%self.name,f):
472 if re.match(r'%s-\d+\.(log|err|out)'%self.name,f):
474 # if f.startswith(self.name + u'-') and f.endswith('.log'):
473 # if f.startswith(self.name + u'-') and f.endswith('.log'):
475 os.remove(os.path.join(log_dir, f))
474 os.remove(os.path.join(log_dir, f))
476 # Start logging to the new log file
475 # Start logging to the new log file
477 if self.master_config.Global.log_to_file:
476 if self.master_config.Global.log_to_file:
478 log_filename = self.name + u'-' + str(os.getpid()) + u'.log'
477 log_filename = self.name + u'-' + str(os.getpid()) + u'.log'
479 logfile = os.path.join(self.log_dir, log_filename)
478 logfile = os.path.join(self.log_dir, log_filename)
480 open_log_file = open(logfile, 'w')
479 open_log_file = open(logfile, 'w')
481 elif self.master_config.Global.log_url:
480 elif self.master_config.Global.log_url:
482 open_log_file = None
481 open_log_file = None
483 else:
482 else:
484 open_log_file = sys.stdout
483 open_log_file = sys.stdout
485 if open_log_file is not None:
484 if open_log_file is not None:
486 self.log.removeHandler(self._log_handler)
485 self.log.removeHandler(self._log_handler)
487 self._log_handler = logging.StreamHandler(open_log_file)
486 self._log_handler = logging.StreamHandler(open_log_file)
488 self._log_formatter = logging.Formatter("[%(name)s] %(message)s")
487 self._log_formatter = logging.Formatter("[%(name)s] %(message)s")
489 self._log_handler.setFormatter(self._log_formatter)
488 self._log_handler.setFormatter(self._log_formatter)
490 self.log.addHandler(self._log_handler)
489 self.log.addHandler(self._log_handler)
491 # log.startLogging(open_log_file)
490 # log.startLogging(open_log_file)
492
491
493 def write_pid_file(self, overwrite=False):
492 def write_pid_file(self, overwrite=False):
494 """Create a .pid file in the pid_dir with my pid.
493 """Create a .pid file in the pid_dir with my pid.
495
494
496 This must be called after pre_construct, which sets `self.pid_dir`.
495 This must be called after pre_construct, which sets `self.pid_dir`.
497 This raises :exc:`PIDFileError` if the pid file exists already.
496 This raises :exc:`PIDFileError` if the pid file exists already.
498 """
497 """
499 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
498 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
500 if os.path.isfile(pid_file):
499 if os.path.isfile(pid_file):
501 pid = self.get_pid_from_file()
500 pid = self.get_pid_from_file()
502 if not overwrite:
501 if not overwrite:
503 raise PIDFileError(
502 raise PIDFileError(
504 'The pid file [%s] already exists. \nThis could mean that this '
503 'The pid file [%s] already exists. \nThis could mean that this '
505 'server is already running with [pid=%s].' % (pid_file, pid)
504 'server is already running with [pid=%s].' % (pid_file, pid)
506 )
505 )
507 with open(pid_file, 'w') as f:
506 with open(pid_file, 'w') as f:
508 self.log.info("Creating pid file: %s" % pid_file)
507 self.log.info("Creating pid file: %s" % pid_file)
509 f.write(repr(os.getpid())+'\n')
508 f.write(repr(os.getpid())+'\n')
510
509
511 def remove_pid_file(self):
510 def remove_pid_file(self):
512 """Remove the pid file.
511 """Remove the pid file.
513
512
514 This should be called at shutdown by registering a callback with
513 This should be called at shutdown by registering a callback with
515 :func:`reactor.addSystemEventTrigger`. This needs to return
514 :func:`reactor.addSystemEventTrigger`. This needs to return
516 ``None``.
515 ``None``.
517 """
516 """
518 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
517 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
519 if os.path.isfile(pid_file):
518 if os.path.isfile(pid_file):
520 try:
519 try:
521 self.log.info("Removing pid file: %s" % pid_file)
520 self.log.info("Removing pid file: %s" % pid_file)
522 os.remove(pid_file)
521 os.remove(pid_file)
523 except:
522 except:
524 self.log.warn("Error removing the pid file: %s" % pid_file)
523 self.log.warn("Error removing the pid file: %s" % pid_file)
525
524
526 def get_pid_from_file(self):
525 def get_pid_from_file(self):
527 """Get the pid from the pid file.
526 """Get the pid from the pid file.
528
527
529 If the pid file doesn't exist a :exc:`PIDFileError` is raised.
528 If the pid file doesn't exist a :exc:`PIDFileError` is raised.
530 """
529 """
531 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
530 pid_file = os.path.join(self.pid_dir, self.name + u'.pid')
532 if os.path.isfile(pid_file):
531 if os.path.isfile(pid_file):
533 with open(pid_file, 'r') as f:
532 with open(pid_file, 'r') as f:
534 pid = int(f.read().strip())
533 pid = int(f.read().strip())
535 return pid
534 return pid
536 else:
535 else:
537 raise PIDFileError('pid file not found: %s' % pid_file)
536 raise PIDFileError('pid file not found: %s' % pid_file)
538
537
@@ -1,115 +1,115 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """The IPython Controller with 0MQ
2 """The IPython Controller with 0MQ
3 This is a collection of one Hub and several Schedulers.
3 This is a collection of one Hub and several Schedulers.
4 """
4 """
5 #-----------------------------------------------------------------------------
5 #-----------------------------------------------------------------------------
6 # Copyright (C) 2010 The IPython Development Team
6 # Copyright (C) 2010 The IPython Development Team
7 #
7 #
8 # Distributed under the terms of the BSD License. The full license is in
8 # Distributed under the terms of the BSD License. The full license is in
9 # the file COPYING, distributed as part of this software.
9 # the file COPYING, distributed as part of this software.
10 #-----------------------------------------------------------------------------
10 #-----------------------------------------------------------------------------
11
11
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13 # Imports
13 # Imports
14 #-----------------------------------------------------------------------------
14 #-----------------------------------------------------------------------------
15 from __future__ import print_function
15 from __future__ import print_function
16
16
17 import logging
17 import logging
18 from multiprocessing import Process
18 from multiprocessing import Process
19
19
20 import zmq
20 import zmq
21 from zmq.devices import ProcessMonitoredQueue
21 from zmq.devices import ProcessMonitoredQueue
22 # internal:
22 # internal:
23 from IPython.utils.importstring import import_item
23 from IPython.utils.importstring import import_item
24 from IPython.utils.traitlets import Int, Str, Instance, List, Bool
24 from IPython.utils.traitlets import Int, CStr, Instance, List, Bool
25
25
26 from .entry_point import signal_children
26 from .entry_point import signal_children
27 from .hub import Hub, HubFactory
27 from .hub import Hub, HubFactory
28 from .scheduler import launch_scheduler
28 from .scheduler import launch_scheduler
29
29
30 #-----------------------------------------------------------------------------
30 #-----------------------------------------------------------------------------
31 # Configurable
31 # Configurable
32 #-----------------------------------------------------------------------------
32 #-----------------------------------------------------------------------------
33
33
34
34
35 class ControllerFactory(HubFactory):
35 class ControllerFactory(HubFactory):
36 """Configurable for setting up a Hub and Schedulers."""
36 """Configurable for setting up a Hub and Schedulers."""
37
37
38 usethreads = Bool(False, config=True)
38 usethreads = Bool(False, config=True)
39 # pure-zmq downstream HWM
39 # pure-zmq downstream HWM
40 hwm = Int(0, config=True)
40 hwm = Int(0, config=True)
41
41
42 # internal
42 # internal
43 children = List()
43 children = List()
44 mq_class = Str('zmq.devices.ProcessMonitoredQueue')
44 mq_class = CStr('zmq.devices.ProcessMonitoredQueue')
45
45
46 def _usethreads_changed(self, name, old, new):
46 def _usethreads_changed(self, name, old, new):
47 self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
47 self.mq_class = 'zmq.devices.%sMonitoredQueue'%('Thread' if new else 'Process')
48
48
49 def __init__(self, **kwargs):
49 def __init__(self, **kwargs):
50 super(ControllerFactory, self).__init__(**kwargs)
50 super(ControllerFactory, self).__init__(**kwargs)
51 self.subconstructors.append(self.construct_schedulers)
51 self.subconstructors.append(self.construct_schedulers)
52
52
53 def start(self):
53 def start(self):
54 super(ControllerFactory, self).start()
54 super(ControllerFactory, self).start()
55 child_procs = []
55 child_procs = []
56 for child in self.children:
56 for child in self.children:
57 child.start()
57 child.start()
58 if isinstance(child, ProcessMonitoredQueue):
58 if isinstance(child, ProcessMonitoredQueue):
59 child_procs.append(child.launcher)
59 child_procs.append(child.launcher)
60 elif isinstance(child, Process):
60 elif isinstance(child, Process):
61 child_procs.append(child)
61 child_procs.append(child)
62 if child_procs:
62 if child_procs:
63 signal_children(child_procs)
63 signal_children(child_procs)
64
64
65
65
66 def construct_schedulers(self):
66 def construct_schedulers(self):
67 children = self.children
67 children = self.children
68 mq = import_item(self.mq_class)
68 mq = import_item(self.mq_class)
69
69
70 maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url
70 maybe_inproc = 'inproc://monitor' if self.usethreads else self.monitor_url
71 # IOPub relay (in a Process)
71 # IOPub relay (in a Process)
72 q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A','iopub')
72 q = mq(zmq.PUB, zmq.SUB, zmq.PUB, 'N/A','iopub')
73 q.bind_in(self.client_info['iopub'])
73 q.bind_in(self.client_info['iopub'])
74 q.bind_out(self.engine_info['iopub'])
74 q.bind_out(self.engine_info['iopub'])
75 q.setsockopt_out(zmq.SUBSCRIBE, '')
75 q.setsockopt_out(zmq.SUBSCRIBE, '')
76 q.connect_mon(maybe_inproc)
76 q.connect_mon(maybe_inproc)
77 q.daemon=True
77 q.daemon=True
78 children.append(q)
78 children.append(q)
79
79
80 # Multiplexer Queue (in a Process)
80 # Multiplexer Queue (in a Process)
81 q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'in', 'out')
81 q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'in', 'out')
82 q.bind_in(self.client_info['mux'])
82 q.bind_in(self.client_info['mux'])
83 q.bind_out(self.engine_info['mux'])
83 q.bind_out(self.engine_info['mux'])
84 q.connect_mon(maybe_inproc)
84 q.connect_mon(maybe_inproc)
85 q.daemon=True
85 q.daemon=True
86 children.append(q)
86 children.append(q)
87
87
88 # Control Queue (in a Process)
88 # Control Queue (in a Process)
89 q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'incontrol', 'outcontrol')
89 q = mq(zmq.XREP, zmq.XREP, zmq.PUB, 'incontrol', 'outcontrol')
90 q.bind_in(self.client_info['control'])
90 q.bind_in(self.client_info['control'])
91 q.bind_out(self.engine_info['control'])
91 q.bind_out(self.engine_info['control'])
92 q.connect_mon(maybe_inproc)
92 q.connect_mon(maybe_inproc)
93 q.daemon=True
93 q.daemon=True
94 children.append(q)
94 children.append(q)
95 # Task Queue (in a Process)
95 # Task Queue (in a Process)
96 if self.scheme == 'pure':
96 if self.scheme == 'pure':
97 self.log.warn("task::using pure XREQ Task scheduler")
97 self.log.warn("task::using pure XREQ Task scheduler")
98 q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, 'intask', 'outtask')
98 q = mq(zmq.XREP, zmq.XREQ, zmq.PUB, 'intask', 'outtask')
99 q.setsockopt_out(zmq.HWM, self.hwm)
99 q.setsockopt_out(zmq.HWM, self.hwm)
100 q.bind_in(self.client_info['task'][1])
100 q.bind_in(self.client_info['task'][1])
101 q.bind_out(self.engine_info['task'])
101 q.bind_out(self.engine_info['task'])
102 q.connect_mon(maybe_inproc)
102 q.connect_mon(maybe_inproc)
103 q.daemon=True
103 q.daemon=True
104 children.append(q)
104 children.append(q)
105 elif self.scheme == 'none':
105 elif self.scheme == 'none':
106 self.log.warn("task::using no Task scheduler")
106 self.log.warn("task::using no Task scheduler")
107
107
108 else:
108 else:
109 self.log.info("task::using Python %s Task scheduler"%self.scheme)
109 self.log.info("task::using Python %s Task scheduler"%self.scheme)
110 sargs = (self.client_info['task'][1], self.engine_info['task'], self.monitor_url, self.client_info['notification'])
110 sargs = (self.client_info['task'][1], self.engine_info['task'], self.monitor_url, self.client_info['notification'])
111 kwargs = dict(scheme=self.scheme,logname=self.log.name, loglevel=self.log.level, config=self.config)
111 kwargs = dict(scheme=self.scheme,logname=self.log.name, loglevel=self.log.level, config=self.config)
112 q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
112 q = Process(target=launch_scheduler, args=sargs, kwargs=kwargs)
113 q.daemon=True
113 q.daemon=True
114 children.append(q)
114 children.append(q)
115
115
@@ -1,111 +1,153 b''
1 """Dependency utilities"""
1 """Dependency utilities"""
2
2
3 from IPython.external.decorator import decorator
3 from IPython.external.decorator import decorator
4
4
5 from .asyncresult import AsyncResult
5 from .asyncresult import AsyncResult
6 from .error import UnmetDependency
6 from .error import UnmetDependency
7
7
8
8
9 class depend(object):
9 class depend(object):
10 """Dependency decorator, for use with tasks."""
10 """Dependency decorator, for use with tasks.
11
12 `@depend` lets you define a function for engine dependencies
13 just like you use `apply` for tasks.
14
15
16 Examples
17 --------
18 ::
19
20 @depend(df, a,b, c=5)
21 def f(m,n,p)
22
23 view.apply(f, 1,2,3)
24
25 will call df(a,b,c=5) on the engine, and if it returns False or
26 raises an UnmetDependency error, then the task will not be run
27 and another engine will be tried.
28 """
11 def __init__(self, f, *args, **kwargs):
29 def __init__(self, f, *args, **kwargs):
12 self.f = f
30 self.f = f
13 self.args = args
31 self.args = args
14 self.kwargs = kwargs
32 self.kwargs = kwargs
15
33
16 def __call__(self, f):
34 def __call__(self, f):
17 return dependent(f, self.f, *self.args, **self.kwargs)
35 return dependent(f, self.f, *self.args, **self.kwargs)
18
36
19 class dependent(object):
37 class dependent(object):
20 """A function that depends on another function.
38 """A function that depends on another function.
21 This is an object to prevent the closure used
39 This is an object to prevent the closure used
22 in traditional decorators, which are not picklable.
40 in traditional decorators, which are not picklable.
23 """
41 """
24
42
25 def __init__(self, f, df, *dargs, **dkwargs):
43 def __init__(self, f, df, *dargs, **dkwargs):
26 self.f = f
44 self.f = f
27 self.func_name = getattr(f, '__name__', 'f')
45 self.func_name = getattr(f, '__name__', 'f')
28 self.df = df
46 self.df = df
29 self.dargs = dargs
47 self.dargs = dargs
30 self.dkwargs = dkwargs
48 self.dkwargs = dkwargs
31
49
32 def __call__(self, *args, **kwargs):
50 def __call__(self, *args, **kwargs):
33 if self.df(*self.dargs, **self.dkwargs) is False:
51 if self.df(*self.dargs, **self.dkwargs) is False:
34 raise UnmetDependency()
52 raise UnmetDependency()
35 return self.f(*args, **kwargs)
53 return self.f(*args, **kwargs)
36
54
37 @property
55 @property
38 def __name__(self):
56 def __name__(self):
39 return self.func_name
57 return self.func_name
40
58
41 def _require(*names):
59 def _require(*names):
60 """Helper for @require decorator."""
42 for name in names:
61 for name in names:
43 try:
62 try:
44 __import__(name)
63 __import__(name)
45 except ImportError:
64 except ImportError:
46 return False
65 return False
47 return True
66 return True
48
67
49 def require(*names):
68 def require(*names):
69 """Simple decorator for requiring names to be importable.
70
71 Examples
72 --------
73
74 In [1]: @require('numpy')
75 ...: def norm(a):
76 ...: import numpy
77 ...: return numpy.linalg.norm(a,2)
78 """
50 return depend(_require, *names)
79 return depend(_require, *names)
51
80
52 class Dependency(set):
81 class Dependency(set):
53 """An object for representing a set of msg_id dependencies.
82 """An object for representing a set of msg_id dependencies.
54
83
55 Subclassed from set()."""
84 Subclassed from set().
85
86 Parameters
87 ----------
88 dependencies: list/set of msg_ids or AsyncResult objects or output of Dependency.as_dict()
89 The msg_ids to depend on
90 all : bool [default True]
91 Whether the dependency should be considered met when *all* depending tasks have completed
92 or only when *any* have been completed.
93 success_only : bool [default True]
94 Whether to consider only successes for Dependencies, or consider failures as well.
95 If `all=success_only=True`, then this task will fail with an ImpossibleDependency
96 as soon as the first depended-upon task fails.
97 """
56
98
57 all=True
99 all=True
58 success_only=True
100 success_only=True
59
101
60 def __init__(self, dependencies=[], all=True, success_only=True):
102 def __init__(self, dependencies=[], all=True, success_only=True):
61 if isinstance(dependencies, dict):
103 if isinstance(dependencies, dict):
62 # load from dict
104 # load from dict
63 all = dependencies.get('all', True)
105 all = dependencies.get('all', True)
64 success_only = dependencies.get('success_only', success_only)
106 success_only = dependencies.get('success_only', success_only)
65 dependencies = dependencies.get('dependencies', [])
107 dependencies = dependencies.get('dependencies', [])
66 ids = []
108 ids = []
67 if isinstance(dependencies, AsyncResult):
109 if isinstance(dependencies, AsyncResult):
68 ids.extend(AsyncResult.msg_ids)
110 ids.extend(AsyncResult.msg_ids)
69 else:
111 else:
70 for d in dependencies:
112 for d in dependencies:
71 if isinstance(d, basestring):
113 if isinstance(d, basestring):
72 ids.append(d)
114 ids.append(d)
73 elif isinstance(d, AsyncResult):
115 elif isinstance(d, AsyncResult):
74 ids.extend(d.msg_ids)
116 ids.extend(d.msg_ids)
75 else:
117 else:
76 raise TypeError("invalid dependency type: %r"%type(d))
118 raise TypeError("invalid dependency type: %r"%type(d))
77 set.__init__(self, ids)
119 set.__init__(self, ids)
78 self.all = all
120 self.all = all
79 self.success_only=success_only
121 self.success_only=success_only
80
122
81 def check(self, completed, failed=None):
123 def check(self, completed, failed=None):
82 if failed is not None and not self.success_only:
124 if failed is not None and not self.success_only:
83 completed = completed.union(failed)
125 completed = completed.union(failed)
84 if len(self) == 0:
126 if len(self) == 0:
85 return True
127 return True
86 if self.all:
128 if self.all:
87 return self.issubset(completed)
129 return self.issubset(completed)
88 else:
130 else:
89 return not self.isdisjoint(completed)
131 return not self.isdisjoint(completed)
90
132
91 def unreachable(self, failed):
133 def unreachable(self, failed):
92 if len(self) == 0 or len(failed) == 0 or not self.success_only:
134 if len(self) == 0 or len(failed) == 0 or not self.success_only:
93 return False
135 return False
94 # print self, self.success_only, self.all, failed
136 # print self, self.success_only, self.all, failed
95 if self.all:
137 if self.all:
96 return not self.isdisjoint(failed)
138 return not self.isdisjoint(failed)
97 else:
139 else:
98 return self.issubset(failed)
140 return self.issubset(failed)
99
141
100
142
101 def as_dict(self):
143 def as_dict(self):
102 """Represent this dependency as a dict. For json compatibility."""
144 """Represent this dependency as a dict. For json compatibility."""
103 return dict(
145 return dict(
104 dependencies=list(self),
146 dependencies=list(self),
105 all=self.all,
147 all=self.all,
106 success_only=self.success_only,
148 success_only=self.success_only,
107 )
149 )
108
150
109
151
110 __all__ = ['depend', 'require', 'dependent', 'Dependency']
152 __all__ = ['depend', 'require', 'dependent', 'Dependency']
111
153
@@ -1,152 +1,152 b''
1 """A Task logger that presents our DB interface,
1 """A Task logger that presents our DB interface,
2 but exists entirely in memory and implemented with dicts.
2 but exists entirely in memory and implemented with dicts.
3
3
4 TaskRecords are dicts of the form:
4 TaskRecords are dicts of the form:
5 {
5 {
6 'msg_id' : str(uuid),
6 'msg_id' : str(uuid),
7 'client_uuid' : str(uuid),
7 'client_uuid' : str(uuid),
8 'engine_uuid' : str(uuid) or None,
8 'engine_uuid' : str(uuid) or None,
9 'header' : dict(header),
9 'header' : dict(header),
10 'content': dict(content),
10 'content': dict(content),
11 'buffers': list(buffers),
11 'buffers': list(buffers),
12 'submitted': datetime,
12 'submitted': datetime,
13 'started': datetime or None,
13 'started': datetime or None,
14 'completed': datetime or None,
14 'completed': datetime or None,
15 'resubmitted': datetime or None,
15 'resubmitted': datetime or None,
16 'result_header' : dict(header) or None,
16 'result_header' : dict(header) or None,
17 'result_content' : dict(content) or None,
17 'result_content' : dict(content) or None,
18 'result_buffers' : list(buffers) or None,
18 'result_buffers' : list(buffers) or None,
19 }
19 }
20 With this info, many of the special categories of tasks can be defined by query:
20 With this info, many of the special categories of tasks can be defined by query:
21
21
22 pending: completed is None
22 pending: completed is None
23 client's outstanding: client_uuid = uuid && completed is None
23 client's outstanding: client_uuid = uuid && completed is None
24 MIA: arrived is None (and completed is None)
24 MIA: arrived is None (and completed is None)
25 etc.
25 etc.
26
26
27 EngineRecords are dicts of the form:
27 EngineRecords are dicts of the form:
28 {
28 {
29 'eid' : int(id),
29 'eid' : int(id),
30 'uuid': str(uuid)
30 'uuid': str(uuid)
31 }
31 }
32 This may be extended, but is currently.
32 This may be extended, but is currently.
33
33
34 We support a subset of mongodb operators:
34 We support a subset of mongodb operators:
35 $lt,$gt,$lte,$gte,$ne,$in,$nin,$all,$mod,$exists
35 $lt,$gt,$lte,$gte,$ne,$in,$nin,$all,$mod,$exists
36 """
36 """
37 #-----------------------------------------------------------------------------
37 #-----------------------------------------------------------------------------
38 # Copyright (C) 2010 The IPython Development Team
38 # Copyright (C) 2010 The IPython Development Team
39 #
39 #
40 # Distributed under the terms of the BSD License. The full license is in
40 # Distributed under the terms of the BSD License. The full license is in
41 # the file COPYING, distributed as part of this software.
41 # the file COPYING, distributed as part of this software.
42 #-----------------------------------------------------------------------------
42 #-----------------------------------------------------------------------------
43
43
44
44
45 from datetime import datetime
45 from datetime import datetime
46
46
47 filters = {
47 filters = {
48 '$eq' : lambda a,b: a==b,
49 '$lt' : lambda a,b: a < b,
48 '$lt' : lambda a,b: a < b,
50 '$gt' : lambda a,b: b > a,
49 '$gt' : lambda a,b: b > a,
50 '$eq' : lambda a,b: a == b,
51 '$ne' : lambda a,b: a != b,
51 '$lte': lambda a,b: a <= b,
52 '$lte': lambda a,b: a <= b,
52 '$gte': lambda a,b: a >= b,
53 '$gte': lambda a,b: a >= b,
53 '$ne' : lambda a,b: not a==b,
54 '$in' : lambda a,b: a in b,
54 '$in' : lambda a,b: a in b,
55 '$nin': lambda a,b: a not in b,
55 '$nin': lambda a,b: a not in b,
56 '$all' : lambda a,b: all([ a in bb for bb in b ]),
56 '$all': lambda a,b: all([ a in bb for bb in b ]),
57 '$mod': lambda a,b: a%b[0] == b[1],
57 '$mod': lambda a,b: a%b[0] == b[1],
58 '$exists' : lambda a,b: (b and a is not None) or (a is None and not b)
58 '$exists' : lambda a,b: (b and a is not None) or (a is None and not b)
59 }
59 }
60
60
61
61
62 class CompositeFilter(object):
62 class CompositeFilter(object):
63 """Composite filter for matching multiple properties."""
63 """Composite filter for matching multiple properties."""
64
64
65 def __init__(self, dikt):
65 def __init__(self, dikt):
66 self.tests = []
66 self.tests = []
67 self.values = []
67 self.values = []
68 for key, value in dikt.iteritems():
68 for key, value in dikt.iteritems():
69 self.tests.append(filters[key])
69 self.tests.append(filters[key])
70 self.values.append(value)
70 self.values.append(value)
71
71
72 def __call__(self, value):
72 def __call__(self, value):
73 for test,check in zip(self.tests, self.values):
73 for test,check in zip(self.tests, self.values):
74 if not test(value, check):
74 if not test(value, check):
75 return False
75 return False
76 return True
76 return True
77
77
78 class BaseDB(object):
78 class BaseDB(object):
79 """Empty Parent class so traitlets work on DB."""
79 """Empty Parent class so traitlets work on DB."""
80 pass
80 pass
81
81
82 class DictDB(BaseDB):
82 class DictDB(BaseDB):
83 """Basic in-memory dict-based object for saving Task Records.
83 """Basic in-memory dict-based object for saving Task Records.
84
84
85 This is the first object to present the DB interface
85 This is the first object to present the DB interface
86 for logging tasks out of memory.
86 for logging tasks out of memory.
87
87
88 The interface is based on MongoDB, so adding a MongoDB
88 The interface is based on MongoDB, so adding a MongoDB
89 backend should be straightforward.
89 backend should be straightforward.
90 """
90 """
91 _records = None
91 _records = None
92
92
93 def __init__(self, *args, **kwargs):
93 def __init__(self, *args, **kwargs):
94 self._records = dict()
94 self._records = dict()
95
95
96 def _match_one(self, rec, tests):
96 def _match_one(self, rec, tests):
97 """Check if a specific record matches tests."""
97 """Check if a specific record matches tests."""
98 for key,test in tests.iteritems():
98 for key,test in tests.iteritems():
99 if not test(rec.get(key, None)):
99 if not test(rec.get(key, None)):
100 return False
100 return False
101 return True
101 return True
102
102
103 def _match(self, check, id_only=True):
103 def _match(self, check, id_only=True):
104 """Find all the matches for a check dict."""
104 """Find all the matches for a check dict."""
105 matches = {}
105 matches = {}
106 tests = {}
106 tests = {}
107 for k,v in check.iteritems():
107 for k,v in check.iteritems():
108 if isinstance(v, dict):
108 if isinstance(v, dict):
109 tests[k] = CompositeFilter(v)
109 tests[k] = CompositeFilter(v)
110 else:
110 else:
111 tests[k] = lambda o: o==v
111 tests[k] = lambda o: o==v
112
112
113 for msg_id, rec in self._records.iteritems():
113 for msg_id, rec in self._records.iteritems():
114 if self._match_one(rec, tests):
114 if self._match_one(rec, tests):
115 matches[msg_id] = rec
115 matches[msg_id] = rec
116 if id_only:
116 if id_only:
117 return matches.keys()
117 return matches.keys()
118 else:
118 else:
119 return matches
119 return matches
120
120
121
121
122 def add_record(self, msg_id, rec):
122 def add_record(self, msg_id, rec):
123 """Add a new Task Record, by msg_id."""
123 """Add a new Task Record, by msg_id."""
124 if self._records.has_key(msg_id):
124 if self._records.has_key(msg_id):
125 raise KeyError("Already have msg_id %r"%(msg_id))
125 raise KeyError("Already have msg_id %r"%(msg_id))
126 self._records[msg_id] = rec
126 self._records[msg_id] = rec
127
127
128 def get_record(self, msg_id):
128 def get_record(self, msg_id):
129 """Get a specific Task Record, by msg_id."""
129 """Get a specific Task Record, by msg_id."""
130 if not self._records.has_key(msg_id):
130 if not self._records.has_key(msg_id):
131 raise KeyError("No such msg_id %r"%(msg_id))
131 raise KeyError("No such msg_id %r"%(msg_id))
132 return self._records[msg_id]
132 return self._records[msg_id]
133
133
134 def update_record(self, msg_id, rec):
134 def update_record(self, msg_id, rec):
135 """Update the data in an existing record."""
135 """Update the data in an existing record."""
136 self._records[msg_id].update(rec)
136 self._records[msg_id].update(rec)
137
137
138 def drop_matching_records(self, check):
138 def drop_matching_records(self, check):
139 """Remove a record from the DB."""
139 """Remove a record from the DB."""
140 matches = self._match(check, id_only=True)
140 matches = self._match(check, id_only=True)
141 for m in matches:
141 for m in matches:
142 del self._records[m]
142 del self._records[m]
143
143
144 def drop_record(self, msg_id):
144 def drop_record(self, msg_id):
145 """Remove a record from the DB."""
145 """Remove a record from the DB."""
146 del self._records[msg_id]
146 del self._records[msg_id]
147
147
148
148
149 def find_records(self, check, id_only=False):
149 def find_records(self, check, id_only=False):
150 """Find records matching a query dict."""
150 """Find records matching a query dict."""
151 matches = self._match(check, id_only)
151 matches = self._match(check, id_only)
152 return matches No newline at end of file
152 return matches
@@ -1,147 +1,139 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """A simple engine that talks to a controller over 0MQ.
2 """A simple engine that talks to a controller over 0MQ.
3 it handles registration, etc. and launches a kernel
3 it handles registration, etc. and launches a kernel
4 connected to the Controller's queue(s).
4 connected to the Controller's Schedulers.
5 """
5 """
6 from __future__ import print_function
6 from __future__ import print_function
7
7
8 import logging
9 import sys
8 import sys
10 import time
9 import time
11 import uuid
12 from pprint import pprint
13
10
14 import zmq
11 import zmq
15 from zmq.eventloop import ioloop, zmqstream
12 from zmq.eventloop import ioloop, zmqstream
16
13
17 # internal
14 # internal
18 from IPython.config.configurable import Configurable
19 from IPython.utils.traitlets import Instance, Str, Dict, Int, Type, CFloat
15 from IPython.utils.traitlets import Instance, Str, Dict, Int, Type, CFloat
20 # from IPython.utils.localinterfaces import LOCALHOST
16 # from IPython.utils.localinterfaces import LOCALHOST
21
17
22 from . import heartmonitor
18 from . import heartmonitor
23 from .factory import RegistrationFactory
19 from .factory import RegistrationFactory
24 from .streamkernel import Kernel
20 from .streamkernel import Kernel
25 from .streamsession import Message
21 from .streamsession import Message
26 from .util import disambiguate_url
22 from .util import disambiguate_url
27
23
28 def printer(*msg):
29 # print (self.log.handlers, file=sys.__stdout__)
30 self.log.info(str(msg))
31
32 class EngineFactory(RegistrationFactory):
24 class EngineFactory(RegistrationFactory):
33 """IPython engine"""
25 """IPython engine"""
34
26
35 # configurables:
27 # configurables:
36 user_ns=Dict(config=True)
28 user_ns=Dict(config=True)
37 out_stream_factory=Type('IPython.zmq.iostream.OutStream', config=True)
29 out_stream_factory=Type('IPython.zmq.iostream.OutStream', config=True)
38 display_hook_factory=Type('IPython.zmq.displayhook.DisplayHook', config=True)
30 display_hook_factory=Type('IPython.zmq.displayhook.DisplayHook', config=True)
39 location=Str(config=True)
31 location=Str(config=True)
40 timeout=CFloat(2,config=True)
32 timeout=CFloat(2,config=True)
41
33
42 # not configurable:
34 # not configurable:
43 id=Int(allow_none=True)
35 id=Int(allow_none=True)
44 registrar=Instance('zmq.eventloop.zmqstream.ZMQStream')
36 registrar=Instance('zmq.eventloop.zmqstream.ZMQStream')
45 kernel=Instance(Kernel)
37 kernel=Instance(Kernel)
46
38
47
39
48 def __init__(self, **kwargs):
40 def __init__(self, **kwargs):
49 super(EngineFactory, self).__init__(**kwargs)
41 super(EngineFactory, self).__init__(**kwargs)
50 ctx = self.context
42 ctx = self.context
51
43
52 reg = ctx.socket(zmq.PAIR)
44 reg = ctx.socket(zmq.PAIR)
53 reg.setsockopt(zmq.IDENTITY, self.ident)
45 reg.setsockopt(zmq.IDENTITY, self.ident)
54 reg.connect(self.url)
46 reg.connect(self.url)
55 self.registrar = zmqstream.ZMQStream(reg, self.loop)
47 self.registrar = zmqstream.ZMQStream(reg, self.loop)
56
48
57 def register(self):
49 def register(self):
58 """send the registration_request"""
50 """send the registration_request"""
59
51
60 self.log.info("registering")
52 self.log.info("registering")
61 content = dict(queue=self.ident, heartbeat=self.ident, control=self.ident)
53 content = dict(queue=self.ident, heartbeat=self.ident, control=self.ident)
62 self.registrar.on_recv(self.complete_registration)
54 self.registrar.on_recv(self.complete_registration)
63 # print (self.session.key)
55 # print (self.session.key)
64 self.session.send(self.registrar, "registration_request",content=content)
56 self.session.send(self.registrar, "registration_request",content=content)
65
57
66 def complete_registration(self, msg):
58 def complete_registration(self, msg):
67 # print msg
59 # print msg
68 self._abort_dc.stop()
60 self._abort_dc.stop()
69 ctx = self.context
61 ctx = self.context
70 loop = self.loop
62 loop = self.loop
71 identity = self.ident
63 identity = self.ident
72 print (identity)
64 print (identity)
73
65
74 idents,msg = self.session.feed_identities(msg)
66 idents,msg = self.session.feed_identities(msg)
75 msg = Message(self.session.unpack_message(msg))
67 msg = Message(self.session.unpack_message(msg))
76
68
77 if msg.content.status == 'ok':
69 if msg.content.status == 'ok':
78 self.id = int(msg.content.id)
70 self.id = int(msg.content.id)
79
71
80 # create Shell Streams (MUX, Task, etc.):
72 # create Shell Streams (MUX, Task, etc.):
81 queue_addr = msg.content.mux
73 queue_addr = msg.content.mux
82 shell_addrs = [ str(queue_addr) ]
74 shell_addrs = [ str(queue_addr) ]
83 task_addr = msg.content.task
75 task_addr = msg.content.task
84 if task_addr:
76 if task_addr:
85 shell_addrs.append(str(task_addr))
77 shell_addrs.append(str(task_addr))
86 shell_streams = []
78 shell_streams = []
87 for addr in shell_addrs:
79 for addr in shell_addrs:
88 stream = zmqstream.ZMQStream(ctx.socket(zmq.PAIR), loop)
80 stream = zmqstream.ZMQStream(ctx.socket(zmq.PAIR), loop)
89 stream.setsockopt(zmq.IDENTITY, identity)
81 stream.setsockopt(zmq.IDENTITY, identity)
90 stream.connect(disambiguate_url(addr, self.location))
82 stream.connect(disambiguate_url(addr, self.location))
91 shell_streams.append(stream)
83 shell_streams.append(stream)
92
84
93 # control stream:
85 # control stream:
94 control_addr = str(msg.content.control)
86 control_addr = str(msg.content.control)
95 control_stream = zmqstream.ZMQStream(ctx.socket(zmq.PAIR), loop)
87 control_stream = zmqstream.ZMQStream(ctx.socket(zmq.PAIR), loop)
96 control_stream.setsockopt(zmq.IDENTITY, identity)
88 control_stream.setsockopt(zmq.IDENTITY, identity)
97 control_stream.connect(disambiguate_url(control_addr, self.location))
89 control_stream.connect(disambiguate_url(control_addr, self.location))
98
90
99 # create iopub stream:
91 # create iopub stream:
100 iopub_addr = msg.content.iopub
92 iopub_addr = msg.content.iopub
101 iopub_stream = zmqstream.ZMQStream(ctx.socket(zmq.PUB), loop)
93 iopub_stream = zmqstream.ZMQStream(ctx.socket(zmq.PUB), loop)
102 iopub_stream.setsockopt(zmq.IDENTITY, identity)
94 iopub_stream.setsockopt(zmq.IDENTITY, identity)
103 iopub_stream.connect(disambiguate_url(iopub_addr, self.location))
95 iopub_stream.connect(disambiguate_url(iopub_addr, self.location))
104
96
105 # launch heartbeat
97 # launch heartbeat
106 hb_addrs = msg.content.heartbeat
98 hb_addrs = msg.content.heartbeat
107 # print (hb_addrs)
99 # print (hb_addrs)
108
100
109 # # Redirect input streams and set a display hook.
101 # # Redirect input streams and set a display hook.
110 if self.out_stream_factory:
102 if self.out_stream_factory:
111 sys.stdout = self.out_stream_factory(self.session, iopub_stream, u'stdout')
103 sys.stdout = self.out_stream_factory(self.session, iopub_stream, u'stdout')
112 sys.stdout.topic = 'engine.%i.stdout'%self.id
104 sys.stdout.topic = 'engine.%i.stdout'%self.id
113 sys.stderr = self.out_stream_factory(self.session, iopub_stream, u'stderr')
105 sys.stderr = self.out_stream_factory(self.session, iopub_stream, u'stderr')
114 sys.stderr.topic = 'engine.%i.stderr'%self.id
106 sys.stderr.topic = 'engine.%i.stderr'%self.id
115 if self.display_hook_factory:
107 if self.display_hook_factory:
116 sys.displayhook = self.display_hook_factory(self.session, iopub_stream)
108 sys.displayhook = self.display_hook_factory(self.session, iopub_stream)
117 sys.displayhook.topic = 'engine.%i.pyout'%self.id
109 sys.displayhook.topic = 'engine.%i.pyout'%self.id
118
110
119 self.kernel = Kernel(config=self.config, int_id=self.id, ident=self.ident, session=self.session,
111 self.kernel = Kernel(config=self.config, int_id=self.id, ident=self.ident, session=self.session,
120 control_stream=control_stream, shell_streams=shell_streams, iopub_stream=iopub_stream,
112 control_stream=control_stream, shell_streams=shell_streams, iopub_stream=iopub_stream,
121 loop=loop, user_ns = self.user_ns, logname=self.log.name)
113 loop=loop, user_ns = self.user_ns, logname=self.log.name)
122 self.kernel.start()
114 self.kernel.start()
123 hb_addrs = [ disambiguate_url(addr, self.location) for addr in hb_addrs ]
115 hb_addrs = [ disambiguate_url(addr, self.location) for addr in hb_addrs ]
124 heart = heartmonitor.Heart(*map(str, hb_addrs), heart_id=identity)
116 heart = heartmonitor.Heart(*map(str, hb_addrs), heart_id=identity)
125 # ioloop.DelayedCallback(heart.start, 1000, self.loop).start()
117 # ioloop.DelayedCallback(heart.start, 1000, self.loop).start()
126 heart.start()
118 heart.start()
127
119
128
120
129 else:
121 else:
130 self.log.fatal("Registration Failed: %s"%msg)
122 self.log.fatal("Registration Failed: %s"%msg)
131 raise Exception("Registration Failed: %s"%msg)
123 raise Exception("Registration Failed: %s"%msg)
132
124
133 self.log.info("Completed registration with id %i"%self.id)
125 self.log.info("Completed registration with id %i"%self.id)
134
126
135
127
136 def abort(self):
128 def abort(self):
137 self.log.fatal("Registration timed out")
129 self.log.fatal("Registration timed out")
138 self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id))
130 self.session.send(self.registrar, "unregistration_request", content=dict(id=self.id))
139 time.sleep(1)
131 time.sleep(1)
140 sys.exit(255)
132 sys.exit(255)
141
133
142 def start(self):
134 def start(self):
143 dc = ioloop.DelayedCallback(self.register, 0, self.loop)
135 dc = ioloop.DelayedCallback(self.register, 0, self.loop)
144 dc.start()
136 dc.start()
145 self._abort_dc = ioloop.DelayedCallback(self.abort, self.timeout*1000, self.loop)
137 self._abort_dc = ioloop.DelayedCallback(self.abort, self.timeout*1000, self.loop)
146 self._abort_dc.start()
138 self._abort_dc.start()
147
139
@@ -1,292 +1,313 b''
1 # encoding: utf-8
1 # encoding: utf-8
2
2
3 """Classes and functions for kernel related errors and exceptions."""
3 """Classes and functions for kernel related errors and exceptions."""
4 from __future__ import print_function
4 from __future__ import print_function
5
5
6 import sys
7 import traceback
8
6 __docformat__ = "restructuredtext en"
9 __docformat__ = "restructuredtext en"
7
10
8 # Tell nose to skip this module
11 # Tell nose to skip this module
9 __test__ = {}
12 __test__ = {}
10
13
11 #-------------------------------------------------------------------------------
14 #-------------------------------------------------------------------------------
12 # Copyright (C) 2008 The IPython Development Team
15 # Copyright (C) 2008 The IPython Development Team
13 #
16 #
14 # Distributed under the terms of the BSD License. The full license is in
17 # Distributed under the terms of the BSD License. The full license is in
15 # the file COPYING, distributed as part of this software.
18 # the file COPYING, distributed as part of this software.
16 #-------------------------------------------------------------------------------
19 #-------------------------------------------------------------------------------
17
20
18 #-------------------------------------------------------------------------------
21 #-------------------------------------------------------------------------------
19 # Error classes
22 # Error classes
20 #-------------------------------------------------------------------------------
23 #-------------------------------------------------------------------------------
21 class IPythonError(Exception):
24 class IPythonError(Exception):
22 """Base exception that all of our exceptions inherit from.
25 """Base exception that all of our exceptions inherit from.
23
26
24 This can be raised by code that doesn't have any more specific
27 This can be raised by code that doesn't have any more specific
25 information."""
28 information."""
26
29
27 pass
30 pass
28
31
29 # Exceptions associated with the controller objects
32 # Exceptions associated with the controller objects
30 class ControllerError(IPythonError): pass
33 class ControllerError(IPythonError): pass
31
34
32 class ControllerCreationError(ControllerError): pass
35 class ControllerCreationError(ControllerError): pass
33
36
34
37
35 # Exceptions associated with the Engines
38 # Exceptions associated with the Engines
36 class EngineError(IPythonError): pass
39 class EngineError(IPythonError): pass
37
40
38 class EngineCreationError(EngineError): pass
41 class EngineCreationError(EngineError): pass
39
42
40 class KernelError(IPythonError):
43 class KernelError(IPythonError):
41 pass
44 pass
42
45
43 class NotDefined(KernelError):
46 class NotDefined(KernelError):
44 def __init__(self, name):
47 def __init__(self, name):
45 self.name = name
48 self.name = name
46 self.args = (name,)
49 self.args = (name,)
47
50
48 def __repr__(self):
51 def __repr__(self):
49 return '<NotDefined: %s>' % self.name
52 return '<NotDefined: %s>' % self.name
50
53
51 __str__ = __repr__
54 __str__ = __repr__
52
55
53
56
54 class QueueCleared(KernelError):
57 class QueueCleared(KernelError):
55 pass
58 pass
56
59
57
60
58 class IdInUse(KernelError):
61 class IdInUse(KernelError):
59 pass
62 pass
60
63
61
64
62 class ProtocolError(KernelError):
65 class ProtocolError(KernelError):
63 pass
66 pass
64
67
65
68
66 class ConnectionError(KernelError):
69 class ConnectionError(KernelError):
67 pass
70 pass
68
71
69
72
70 class InvalidEngineID(KernelError):
73 class InvalidEngineID(KernelError):
71 pass
74 pass
72
75
73
76
74 class NoEnginesRegistered(KernelError):
77 class NoEnginesRegistered(KernelError):
75 pass
78 pass
76
79
77
80
78 class InvalidClientID(KernelError):
81 class InvalidClientID(KernelError):
79 pass
82 pass
80
83
81
84
82 class InvalidDeferredID(KernelError):
85 class InvalidDeferredID(KernelError):
83 pass
86 pass
84
87
85
88
86 class SerializationError(KernelError):
89 class SerializationError(KernelError):
87 pass
90 pass
88
91
89
92
90 class MessageSizeError(KernelError):
93 class MessageSizeError(KernelError):
91 pass
94 pass
92
95
93
96
94 class PBMessageSizeError(MessageSizeError):
97 class PBMessageSizeError(MessageSizeError):
95 pass
98 pass
96
99
97
100
98 class ResultNotCompleted(KernelError):
101 class ResultNotCompleted(KernelError):
99 pass
102 pass
100
103
101
104
102 class ResultAlreadyRetrieved(KernelError):
105 class ResultAlreadyRetrieved(KernelError):
103 pass
106 pass
104
107
105 class ClientError(KernelError):
108 class ClientError(KernelError):
106 pass
109 pass
107
110
108
111
109 class TaskAborted(KernelError):
112 class TaskAborted(KernelError):
110 pass
113 pass
111
114
112
115
113 class TaskTimeout(KernelError):
116 class TaskTimeout(KernelError):
114 pass
117 pass
115
118
116
119
117 class NotAPendingResult(KernelError):
120 class NotAPendingResult(KernelError):
118 pass
121 pass
119
122
120
123
121 class UnpickleableException(KernelError):
124 class UnpickleableException(KernelError):
122 pass
125 pass
123
126
124
127
125 class AbortedPendingDeferredError(KernelError):
128 class AbortedPendingDeferredError(KernelError):
126 pass
129 pass
127
130
128
131
129 class InvalidProperty(KernelError):
132 class InvalidProperty(KernelError):
130 pass
133 pass
131
134
132
135
133 class MissingBlockArgument(KernelError):
136 class MissingBlockArgument(KernelError):
134 pass
137 pass
135
138
136
139
137 class StopLocalExecution(KernelError):
140 class StopLocalExecution(KernelError):
138 pass
141 pass
139
142
140
143
141 class SecurityError(KernelError):
144 class SecurityError(KernelError):
142 pass
145 pass
143
146
144
147
145 class FileTimeoutError(KernelError):
148 class FileTimeoutError(KernelError):
146 pass
149 pass
147
150
148 class TimeoutError(KernelError):
151 class TimeoutError(KernelError):
149 pass
152 pass
150
153
151 class UnmetDependency(KernelError):
154 class UnmetDependency(KernelError):
152 pass
155 pass
153
156
154 class ImpossibleDependency(UnmetDependency):
157 class ImpossibleDependency(UnmetDependency):
155 pass
158 pass
156
159
157 class DependencyTimeout(ImpossibleDependency):
160 class DependencyTimeout(ImpossibleDependency):
158 pass
161 pass
159
162
160 class InvalidDependency(ImpossibleDependency):
163 class InvalidDependency(ImpossibleDependency):
161 pass
164 pass
162
165
163 class RemoteError(KernelError):
166 class RemoteError(KernelError):
164 """Error raised elsewhere"""
167 """Error raised elsewhere"""
165 ename=None
168 ename=None
166 evalue=None
169 evalue=None
167 traceback=None
170 traceback=None
168 engine_info=None
171 engine_info=None
169
172
170 def __init__(self, ename, evalue, traceback, engine_info=None):
173 def __init__(self, ename, evalue, traceback, engine_info=None):
171 self.ename=ename
174 self.ename=ename
172 self.evalue=evalue
175 self.evalue=evalue
173 self.traceback=traceback
176 self.traceback=traceback
174 self.engine_info=engine_info or {}
177 self.engine_info=engine_info or {}
175 self.args=(ename, evalue)
178 self.args=(ename, evalue)
176
179
177 def __repr__(self):
180 def __repr__(self):
178 engineid = self.engine_info.get('engine_id', ' ')
181 engineid = self.engine_info.get('engine_id', ' ')
179 return "<Remote[%s]:%s(%s)>"%(engineid, self.ename, self.evalue)
182 return "<Remote[%s]:%s(%s)>"%(engineid, self.ename, self.evalue)
180
183
181 def __str__(self):
184 def __str__(self):
182 sig = "%s(%s)"%(self.ename, self.evalue)
185 sig = "%s(%s)"%(self.ename, self.evalue)
183 if self.traceback:
186 if self.traceback:
184 return sig + '\n' + self.traceback
187 return sig + '\n' + self.traceback
185 else:
188 else:
186 return sig
189 return sig
187
190
188
191
189 class TaskRejectError(KernelError):
192 class TaskRejectError(KernelError):
190 """Exception to raise when a task should be rejected by an engine.
193 """Exception to raise when a task should be rejected by an engine.
191
194
192 This exception can be used to allow a task running on an engine to test
195 This exception can be used to allow a task running on an engine to test
193 if the engine (or the user's namespace on the engine) has the needed
196 if the engine (or the user's namespace on the engine) has the needed
194 task dependencies. If not, the task should raise this exception. For
197 task dependencies. If not, the task should raise this exception. For
195 the task to be retried on another engine, the task should be created
198 the task to be retried on another engine, the task should be created
196 with the `retries` argument > 1.
199 with the `retries` argument > 1.
197
200
198 The advantage of this approach over our older properties system is that
201 The advantage of this approach over our older properties system is that
199 tasks have full access to the user's namespace on the engines and the
202 tasks have full access to the user's namespace on the engines and the
200 properties don't have to be managed or tested by the controller.
203 properties don't have to be managed or tested by the controller.
201 """
204 """
202
205
203
206
204 class CompositeError(RemoteError):
207 class CompositeError(RemoteError):
205 """Error for representing possibly multiple errors on engines"""
208 """Error for representing possibly multiple errors on engines"""
206 def __init__(self, message, elist):
209 def __init__(self, message, elist):
207 Exception.__init__(self, *(message, elist))
210 Exception.__init__(self, *(message, elist))
208 # Don't use pack_exception because it will conflict with the .message
211 # Don't use pack_exception because it will conflict with the .message
209 # attribute that is being deprecated in 2.6 and beyond.
212 # attribute that is being deprecated in 2.6 and beyond.
210 self.msg = message
213 self.msg = message
211 self.elist = elist
214 self.elist = elist
212 self.args = [ e[0] for e in elist ]
215 self.args = [ e[0] for e in elist ]
213
216
214 def _get_engine_str(self, ei):
217 def _get_engine_str(self, ei):
215 if not ei:
218 if not ei:
216 return '[Engine Exception]'
219 return '[Engine Exception]'
217 else:
220 else:
218 return '[%s:%s]: ' % (ei['engine_id'], ei['method'])
221 return '[%s:%s]: ' % (ei['engine_id'], ei['method'])
219
222
220 def _get_traceback(self, ev):
223 def _get_traceback(self, ev):
221 try:
224 try:
222 tb = ev._ipython_traceback_text
225 tb = ev._ipython_traceback_text
223 except AttributeError:
226 except AttributeError:
224 return 'No traceback available'
227 return 'No traceback available'
225 else:
228 else:
226 return tb
229 return tb
227
230
228 def __str__(self):
231 def __str__(self):
229 s = str(self.msg)
232 s = str(self.msg)
230 for en, ev, etb, ei in self.elist:
233 for en, ev, etb, ei in self.elist:
231 engine_str = self._get_engine_str(ei)
234 engine_str = self._get_engine_str(ei)
232 s = s + '\n' + engine_str + en + ': ' + str(ev)
235 s = s + '\n' + engine_str + en + ': ' + str(ev)
233 return s
236 return s
234
237
235 def __repr__(self):
238 def __repr__(self):
236 return "CompositeError(%i)"%len(self.elist)
239 return "CompositeError(%i)"%len(self.elist)
237
240
238 def print_tracebacks(self, excid=None):
241 def print_tracebacks(self, excid=None):
239 if excid is None:
242 if excid is None:
240 for (en,ev,etb,ei) in self.elist:
243 for (en,ev,etb,ei) in self.elist:
241 print (self._get_engine_str(ei))
244 print (self._get_engine_str(ei))
242 print (etb or 'No traceback available')
245 print (etb or 'No traceback available')
243 print ()
246 print ()
244 else:
247 else:
245 try:
248 try:
246 en,ev,etb,ei = self.elist[excid]
249 en,ev,etb,ei = self.elist[excid]
247 except:
250 except:
248 raise IndexError("an exception with index %i does not exist"%excid)
251 raise IndexError("an exception with index %i does not exist"%excid)
249 else:
252 else:
250 print (self._get_engine_str(ei))
253 print (self._get_engine_str(ei))
251 print (etb or 'No traceback available')
254 print (etb or 'No traceback available')
252
255
253 def raise_exception(self, excid=0):
256 def raise_exception(self, excid=0):
254 try:
257 try:
255 en,ev,etb,ei = self.elist[excid]
258 en,ev,etb,ei = self.elist[excid]
256 except:
259 except:
257 raise IndexError("an exception with index %i does not exist"%excid)
260 raise IndexError("an exception with index %i does not exist"%excid)
258 else:
261 else:
259 raise RemoteError(en, ev, etb, ei)
262 raise RemoteError(en, ev, etb, ei)
260
263
261
264
262 def collect_exceptions(rdict_or_list, method='unspecified'):
265 def collect_exceptions(rdict_or_list, method='unspecified'):
263 """check a result dict for errors, and raise CompositeError if any exist.
266 """check a result dict for errors, and raise CompositeError if any exist.
264 Passthrough otherwise."""
267 Passthrough otherwise."""
265 elist = []
268 elist = []
266 if isinstance(rdict_or_list, dict):
269 if isinstance(rdict_or_list, dict):
267 rlist = rdict_or_list.values()
270 rlist = rdict_or_list.values()
268 else:
271 else:
269 rlist = rdict_or_list
272 rlist = rdict_or_list
270 for r in rlist:
273 for r in rlist:
271 if isinstance(r, RemoteError):
274 if isinstance(r, RemoteError):
272 en, ev, etb, ei = r.ename, r.evalue, r.traceback, r.engine_info
275 en, ev, etb, ei = r.ename, r.evalue, r.traceback, r.engine_info
273 # Sometimes we could have CompositeError in our list. Just take
276 # Sometimes we could have CompositeError in our list. Just take
274 # the errors out of them and put them in our new list. This
277 # the errors out of them and put them in our new list. This
275 # has the effect of flattening lists of CompositeErrors into one
278 # has the effect of flattening lists of CompositeErrors into one
276 # CompositeError
279 # CompositeError
277 if en=='CompositeError':
280 if en=='CompositeError':
278 for e in ev.elist:
281 for e in ev.elist:
279 elist.append(e)
282 elist.append(e)
280 else:
283 else:
281 elist.append((en, ev, etb, ei))
284 elist.append((en, ev, etb, ei))
282 if len(elist)==0:
285 if len(elist)==0:
283 return rdict_or_list
286 return rdict_or_list
284 else:
287 else:
285 msg = "one or more exceptions from call to method: %s" % (method)
288 msg = "one or more exceptions from call to method: %s" % (method)
286 # This silliness is needed so the debugger has access to the exception
289 # This silliness is needed so the debugger has access to the exception
287 # instance (e in this case)
290 # instance (e in this case)
288 try:
291 try:
289 raise CompositeError(msg, elist)
292 raise CompositeError(msg, elist)
290 except CompositeError as e:
293 except CompositeError as e:
291 raise e
294 raise e
292
295
296 def wrap_exception(engine_info={}):
297 etype, evalue, tb = sys.exc_info()
298 stb = traceback.format_exception(etype, evalue, tb)
299 exc_content = {
300 'status' : 'error',
301 'traceback' : stb,
302 'ename' : unicode(etype.__name__),
303 'evalue' : unicode(evalue),
304 'engine_info' : engine_info
305 }
306 return exc_content
307
308 def unwrap_exception(content):
309 err = RemoteError(content['ename'], content['evalue'],
310 ''.join(content['traceback']),
311 content.get('engine_info', {}))
312 return err
313
@@ -1,152 +1,152 b''
1 """Base config factories."""
1 """Base config factories."""
2
2
3 #-----------------------------------------------------------------------------
3 #-----------------------------------------------------------------------------
4 # Copyright (C) 2008-2009 The IPython Development Team
4 # Copyright (C) 2008-2009 The IPython Development Team
5 #
5 #
6 # Distributed under the terms of the BSD License. The full license is in
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING, distributed as part of this software.
7 # the file COPYING, distributed as part of this software.
8 #-----------------------------------------------------------------------------
8 #-----------------------------------------------------------------------------
9
9
10 #-----------------------------------------------------------------------------
10 #-----------------------------------------------------------------------------
11 # Imports
11 # Imports
12 #-----------------------------------------------------------------------------
12 #-----------------------------------------------------------------------------
13
13
14
14
15 import logging
15 import logging
16 import os
16 import os
17 import uuid
17 import uuid
18
18
19 from zmq.eventloop.ioloop import IOLoop
19 from zmq.eventloop.ioloop import IOLoop
20
20
21 from IPython.config.configurable import Configurable
21 from IPython.config.configurable import Configurable
22 from IPython.utils.importstring import import_item
22 from IPython.utils.importstring import import_item
23 from IPython.utils.traitlets import Str,Int,Instance, CUnicode, CStr
23 from IPython.utils.traitlets import Str,Int,Instance, CUnicode, CStr
24
24
25 import IPython.zmq.parallel.streamsession as ss
25 import IPython.zmq.parallel.streamsession as ss
26 from IPython.zmq.parallel.entry_point import select_random_ports
26 from IPython.zmq.parallel.entry_point import select_random_ports
27
27
28 #-----------------------------------------------------------------------------
28 #-----------------------------------------------------------------------------
29 # Classes
29 # Classes
30 #-----------------------------------------------------------------------------
30 #-----------------------------------------------------------------------------
31 class LoggingFactory(Configurable):
31 class LoggingFactory(Configurable):
32 """A most basic class, that has a `log` (type:`Logger`) attribute, set via a `logname` Trait."""
32 """A most basic class, that has a `log` (type:`Logger`) attribute, set via a `logname` Trait."""
33 log = Instance('logging.Logger', ('ZMQ', logging.WARN))
33 log = Instance('logging.Logger', ('ZMQ', logging.WARN))
34 logname = CStr('ZMQ')
34 logname = CUnicode('ZMQ')
35 def _logname_changed(self, name, old, new):
35 def _logname_changed(self, name, old, new):
36 self.log = logging.getLogger(new)
36 self.log = logging.getLogger(new)
37
37
38
38
39 class SessionFactory(LoggingFactory):
39 class SessionFactory(LoggingFactory):
40 """The Base factory from which every factory in IPython.zmq.parallel inherits"""
40 """The Base factory from which every factory in IPython.zmq.parallel inherits"""
41
41
42 packer = Str('',config=True)
42 packer = Str('',config=True)
43 unpacker = Str('',config=True)
43 unpacker = Str('',config=True)
44 ident = CStr('',config=True)
44 ident = CStr('',config=True)
45 def _ident_default(self):
45 def _ident_default(self):
46 return str(uuid.uuid4())
46 return str(uuid.uuid4())
47 username = Str(os.environ.get('USER','username'),config=True)
47 username = CUnicode(os.environ.get('USER','username'),config=True)
48 exec_key = CStr('',config=True)
48 exec_key = CUnicode('',config=True)
49 # not configurable:
49 # not configurable:
50 context = Instance('zmq.Context', (), {})
50 context = Instance('zmq.Context', (), {})
51 session = Instance('IPython.zmq.parallel.streamsession.StreamSession')
51 session = Instance('IPython.zmq.parallel.streamsession.StreamSession')
52 loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
52 loop = Instance('zmq.eventloop.ioloop.IOLoop', allow_none=False)
53 def _loop_default(self):
53 def _loop_default(self):
54 return IOLoop.instance()
54 return IOLoop.instance()
55
55
56
56
57 def __init__(self, **kwargs):
57 def __init__(self, **kwargs):
58 super(SessionFactory, self).__init__(**kwargs)
58 super(SessionFactory, self).__init__(**kwargs)
59 exec_key = self.exec_key or None
59 exec_key = self.exec_key or None
60 # set the packers:
60 # set the packers:
61 if not self.packer:
61 if not self.packer:
62 packer_f = unpacker_f = None
62 packer_f = unpacker_f = None
63 elif self.packer.lower() == 'json':
63 elif self.packer.lower() == 'json':
64 packer_f = ss.json_packer
64 packer_f = ss.json_packer
65 unpacker_f = ss.json_unpacker
65 unpacker_f = ss.json_unpacker
66 elif self.packer.lower() == 'pickle':
66 elif self.packer.lower() == 'pickle':
67 packer_f = ss.pickle_packer
67 packer_f = ss.pickle_packer
68 unpacker_f = ss.pickle_unpacker
68 unpacker_f = ss.pickle_unpacker
69 else:
69 else:
70 packer_f = import_item(self.packer)
70 packer_f = import_item(self.packer)
71 unpacker_f = import_item(self.unpacker)
71 unpacker_f = import_item(self.unpacker)
72
72
73 # construct the session
73 # construct the session
74 self.session = ss.StreamSession(self.username, self.ident, packer=packer_f, unpacker=unpacker_f, key=exec_key)
74 self.session = ss.StreamSession(self.username, self.ident, packer=packer_f, unpacker=unpacker_f, key=exec_key)
75
75
76
76
77 class RegistrationFactory(SessionFactory):
77 class RegistrationFactory(SessionFactory):
78 """The Base Configurable for objects that involve registration."""
78 """The Base Configurable for objects that involve registration."""
79
79
80 url = Str('', config=True) # url takes precedence over ip,regport,transport
80 url = Str('', config=True) # url takes precedence over ip,regport,transport
81 transport = Str('tcp', config=True)
81 transport = Str('tcp', config=True)
82 ip = Str('127.0.0.1', config=True)
82 ip = Str('127.0.0.1', config=True)
83 regport = Instance(int, config=True)
83 regport = Instance(int, config=True)
84 def _regport_default(self):
84 def _regport_default(self):
85 # return 10101
85 # return 10101
86 return select_random_ports(1)[0]
86 return select_random_ports(1)[0]
87
87
88 def __init__(self, **kwargs):
88 def __init__(self, **kwargs):
89 super(RegistrationFactory, self).__init__(**kwargs)
89 super(RegistrationFactory, self).__init__(**kwargs)
90 self._propagate_url()
90 self._propagate_url()
91 self._rebuild_url()
91 self._rebuild_url()
92 self.on_trait_change(self._propagate_url, 'url')
92 self.on_trait_change(self._propagate_url, 'url')
93 self.on_trait_change(self._rebuild_url, 'ip')
93 self.on_trait_change(self._rebuild_url, 'ip')
94 self.on_trait_change(self._rebuild_url, 'transport')
94 self.on_trait_change(self._rebuild_url, 'transport')
95 self.on_trait_change(self._rebuild_url, 'regport')
95 self.on_trait_change(self._rebuild_url, 'regport')
96
96
97 def _rebuild_url(self):
97 def _rebuild_url(self):
98 self.url = "%s://%s:%i"%(self.transport, self.ip, self.regport)
98 self.url = "%s://%s:%i"%(self.transport, self.ip, self.regport)
99
99
100 def _propagate_url(self):
100 def _propagate_url(self):
101 """Ensure self.url contains full transport://interface:port"""
101 """Ensure self.url contains full transport://interface:port"""
102 if self.url:
102 if self.url:
103 iface = self.url.split('://',1)
103 iface = self.url.split('://',1)
104 if len(iface) == 2:
104 if len(iface) == 2:
105 self.transport,iface = iface
105 self.transport,iface = iface
106 iface = iface.split(':')
106 iface = iface.split(':')
107 self.ip = iface[0]
107 self.ip = iface[0]
108 if iface[1]:
108 if iface[1]:
109 self.regport = int(iface[1])
109 self.regport = int(iface[1])
110
110
111 #-----------------------------------------------------------------------------
111 #-----------------------------------------------------------------------------
112 # argparse argument extenders
112 # argparse argument extenders
113 #-----------------------------------------------------------------------------
113 #-----------------------------------------------------------------------------
114
114
115
115
116 def add_session_arguments(parser):
116 def add_session_arguments(parser):
117 paa = parser.add_argument
117 paa = parser.add_argument
118 paa('--ident',
118 paa('--ident',
119 type=str, dest='SessionFactory.ident',
119 type=str, dest='SessionFactory.ident',
120 help='set the ZMQ and session identity [default: random uuid]',
120 help='set the ZMQ and session identity [default: random uuid]',
121 metavar='identity')
121 metavar='identity')
122 # paa('--execkey',
122 # paa('--execkey',
123 # type=str, dest='SessionFactory.exec_key',
123 # type=str, dest='SessionFactory.exec_key',
124 # help='path to a file containing an execution key.',
124 # help='path to a file containing an execution key.',
125 # metavar='execkey')
125 # metavar='execkey')
126 paa('--packer',
126 paa('--packer',
127 type=str, dest='SessionFactory.packer',
127 type=str, dest='SessionFactory.packer',
128 help='method to serialize messages: {json,pickle} [default: json]',
128 help='method to serialize messages: {json,pickle} [default: json]',
129 metavar='packer')
129 metavar='packer')
130 paa('--unpacker',
130 paa('--unpacker',
131 type=str, dest='SessionFactory.unpacker',
131 type=str, dest='SessionFactory.unpacker',
132 help='inverse function of `packer`. Only necessary when using something other than json|pickle',
132 help='inverse function of `packer`. Only necessary when using something other than json|pickle',
133 metavar='packer')
133 metavar='packer')
134
134
135 def add_registration_arguments(parser):
135 def add_registration_arguments(parser):
136 paa = parser.add_argument
136 paa = parser.add_argument
137 paa('--ip',
137 paa('--ip',
138 type=str, dest='RegistrationFactory.ip',
138 type=str, dest='RegistrationFactory.ip',
139 help="The IP used for registration [default: localhost]",
139 help="The IP used for registration [default: localhost]",
140 metavar='ip')
140 metavar='ip')
141 paa('--transport',
141 paa('--transport',
142 type=str, dest='RegistrationFactory.transport',
142 type=str, dest='RegistrationFactory.transport',
143 help="The ZeroMQ transport used for registration [default: tcp]",
143 help="The ZeroMQ transport used for registration [default: tcp]",
144 metavar='transport')
144 metavar='transport')
145 paa('--url',
145 paa('--url',
146 type=str, dest='RegistrationFactory.url',
146 type=str, dest='RegistrationFactory.url',
147 help='set transport,ip,regport in one go, e.g. tcp://127.0.0.1:10101',
147 help='set transport,ip,regport in one go, e.g. tcp://127.0.0.1:10101',
148 metavar='url')
148 metavar='url')
149 paa('--regport',
149 paa('--regport',
150 type=int, dest='RegistrationFactory.regport',
150 type=int, dest='RegistrationFactory.regport',
151 help="The port used for registration [default: 10101]",
151 help="The port used for registration [default: 10101]",
152 metavar='ip')
152 metavar='ip')
@@ -1,1054 +1,1052 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """The IPython Controller Hub with 0MQ
2 """The IPython Controller Hub with 0MQ
3 This is the master object that handles connections from engines and clients,
3 This is the master object that handles connections from engines and clients,
4 and monitors traffic through the various queues.
4 and monitors traffic through the various queues.
5 """
5 """
6 #-----------------------------------------------------------------------------
6 #-----------------------------------------------------------------------------
7 # Copyright (C) 2010 The IPython Development Team
7 # Copyright (C) 2010 The IPython Development Team
8 #
8 #
9 # Distributed under the terms of the BSD License. The full license is in
9 # Distributed under the terms of the BSD License. The full license is in
10 # the file COPYING, distributed as part of this software.
10 # the file COPYING, distributed as part of this software.
11 #-----------------------------------------------------------------------------
11 #-----------------------------------------------------------------------------
12
12
13 #-----------------------------------------------------------------------------
13 #-----------------------------------------------------------------------------
14 # Imports
14 # Imports
15 #-----------------------------------------------------------------------------
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
16 from __future__ import print_function
17
17
18 import logging
19 import sys
18 import sys
20 import time
19 import time
21 from datetime import datetime
20 from datetime import datetime
22
21
23 import zmq
22 import zmq
24 from zmq.eventloop import ioloop
23 from zmq.eventloop import ioloop
25 from zmq.eventloop.zmqstream import ZMQStream
24 from zmq.eventloop.zmqstream import ZMQStream
26
25
27 # internal:
26 # internal:
28 from IPython.config.configurable import Configurable
29 from IPython.utils.importstring import import_item
27 from IPython.utils.importstring import import_item
30 from IPython.utils.traitlets import HasTraits, Instance, Int, CStr, Str, Dict, Set, List, Bool
28 from IPython.utils.traitlets import HasTraits, Instance, Int, CStr, Str, Dict, Set, List, Bool
31
29
32 from .entry_point import select_random_ports
30 from .entry_point import select_random_ports
33 from .factory import RegistrationFactory, LoggingFactory
31 from .factory import RegistrationFactory, LoggingFactory
34
32
33 from . import error
35 from .heartmonitor import HeartMonitor
34 from .heartmonitor import HeartMonitor
36 from .streamsession import Message, wrap_exception, ISO8601
35 from .util import validate_url_container, ISO8601
37 from .util import validate_url_container
38
36
39 try:
37 try:
40 from pymongo.binary import Binary
38 from pymongo.binary import Binary
41 except ImportError:
39 except ImportError:
42 MongoDB=None
40 MongoDB=None
43 else:
41 else:
44 from mongodb import MongoDB
42 from mongodb import MongoDB
45
43
46 #-----------------------------------------------------------------------------
44 #-----------------------------------------------------------------------------
47 # Code
45 # Code
48 #-----------------------------------------------------------------------------
46 #-----------------------------------------------------------------------------
49
47
50 def _passer(*args, **kwargs):
48 def _passer(*args, **kwargs):
51 return
49 return
52
50
53 def _printer(*args, **kwargs):
51 def _printer(*args, **kwargs):
54 print (args)
52 print (args)
55 print (kwargs)
53 print (kwargs)
56
54
57 def init_record(msg):
55 def init_record(msg):
58 """Initialize a TaskRecord based on a request."""
56 """Initialize a TaskRecord based on a request."""
59 header = msg['header']
57 header = msg['header']
60 return {
58 return {
61 'msg_id' : header['msg_id'],
59 'msg_id' : header['msg_id'],
62 'header' : header,
60 'header' : header,
63 'content': msg['content'],
61 'content': msg['content'],
64 'buffers': msg['buffers'],
62 'buffers': msg['buffers'],
65 'submitted': datetime.strptime(header['date'], ISO8601),
63 'submitted': datetime.strptime(header['date'], ISO8601),
66 'client_uuid' : None,
64 'client_uuid' : None,
67 'engine_uuid' : None,
65 'engine_uuid' : None,
68 'started': None,
66 'started': None,
69 'completed': None,
67 'completed': None,
70 'resubmitted': None,
68 'resubmitted': None,
71 'result_header' : None,
69 'result_header' : None,
72 'result_content' : None,
70 'result_content' : None,
73 'result_buffers' : None,
71 'result_buffers' : None,
74 'queue' : None,
72 'queue' : None,
75 'pyin' : None,
73 'pyin' : None,
76 'pyout': None,
74 'pyout': None,
77 'pyerr': None,
75 'pyerr': None,
78 'stdout': '',
76 'stdout': '',
79 'stderr': '',
77 'stderr': '',
80 }
78 }
81
79
82
80
83 class EngineConnector(HasTraits):
81 class EngineConnector(HasTraits):
84 """A simple object for accessing the various zmq connections of an object.
82 """A simple object for accessing the various zmq connections of an object.
85 Attributes are:
83 Attributes are:
86 id (int): engine ID
84 id (int): engine ID
87 uuid (str): uuid (unused?)
85 uuid (str): uuid (unused?)
88 queue (str): identity of queue's XREQ socket
86 queue (str): identity of queue's XREQ socket
89 registration (str): identity of registration XREQ socket
87 registration (str): identity of registration XREQ socket
90 heartbeat (str): identity of heartbeat XREQ socket
88 heartbeat (str): identity of heartbeat XREQ socket
91 """
89 """
92 id=Int(0)
90 id=Int(0)
93 queue=Str()
91 queue=Str()
94 control=Str()
92 control=Str()
95 registration=Str()
93 registration=Str()
96 heartbeat=Str()
94 heartbeat=Str()
97 pending=Set()
95 pending=Set()
98
96
99 class HubFactory(RegistrationFactory):
97 class HubFactory(RegistrationFactory):
100 """The Configurable for setting up a Hub."""
98 """The Configurable for setting up a Hub."""
101
99
102 # name of a scheduler scheme
100 # name of a scheduler scheme
103 scheme = Str('leastload', config=True)
101 scheme = Str('leastload', config=True)
104
102
105 # port-pairs for monitoredqueues:
103 # port-pairs for monitoredqueues:
106 hb = Instance(list, config=True)
104 hb = Instance(list, config=True)
107 def _hb_default(self):
105 def _hb_default(self):
108 return select_random_ports(2)
106 return select_random_ports(2)
109
107
110 mux = Instance(list, config=True)
108 mux = Instance(list, config=True)
111 def _mux_default(self):
109 def _mux_default(self):
112 return select_random_ports(2)
110 return select_random_ports(2)
113
111
114 task = Instance(list, config=True)
112 task = Instance(list, config=True)
115 def _task_default(self):
113 def _task_default(self):
116 return select_random_ports(2)
114 return select_random_ports(2)
117
115
118 control = Instance(list, config=True)
116 control = Instance(list, config=True)
119 def _control_default(self):
117 def _control_default(self):
120 return select_random_ports(2)
118 return select_random_ports(2)
121
119
122 iopub = Instance(list, config=True)
120 iopub = Instance(list, config=True)
123 def _iopub_default(self):
121 def _iopub_default(self):
124 return select_random_ports(2)
122 return select_random_ports(2)
125
123
126 # single ports:
124 # single ports:
127 mon_port = Instance(int, config=True)
125 mon_port = Instance(int, config=True)
128 def _mon_port_default(self):
126 def _mon_port_default(self):
129 return select_random_ports(1)[0]
127 return select_random_ports(1)[0]
130
128
131 query_port = Instance(int, config=True)
129 query_port = Instance(int, config=True)
132 def _query_port_default(self):
130 def _query_port_default(self):
133 return select_random_ports(1)[0]
131 return select_random_ports(1)[0]
134
132
135 notifier_port = Instance(int, config=True)
133 notifier_port = Instance(int, config=True)
136 def _notifier_port_default(self):
134 def _notifier_port_default(self):
137 return select_random_ports(1)[0]
135 return select_random_ports(1)[0]
138
136
139 ping = Int(1000, config=True) # ping frequency
137 ping = Int(1000, config=True) # ping frequency
140
138
141 engine_ip = CStr('127.0.0.1', config=True)
139 engine_ip = CStr('127.0.0.1', config=True)
142 engine_transport = CStr('tcp', config=True)
140 engine_transport = CStr('tcp', config=True)
143
141
144 client_ip = CStr('127.0.0.1', config=True)
142 client_ip = CStr('127.0.0.1', config=True)
145 client_transport = CStr('tcp', config=True)
143 client_transport = CStr('tcp', config=True)
146
144
147 monitor_ip = CStr('127.0.0.1', config=True)
145 monitor_ip = CStr('127.0.0.1', config=True)
148 monitor_transport = CStr('tcp', config=True)
146 monitor_transport = CStr('tcp', config=True)
149
147
150 monitor_url = CStr('')
148 monitor_url = CStr('')
151
149
152 db_class = CStr('IPython.zmq.parallel.dictdb.DictDB', config=True)
150 db_class = CStr('IPython.zmq.parallel.dictdb.DictDB', config=True)
153
151
154 # not configurable
152 # not configurable
155 db = Instance('IPython.zmq.parallel.dictdb.BaseDB')
153 db = Instance('IPython.zmq.parallel.dictdb.BaseDB')
156 heartmonitor = Instance('IPython.zmq.parallel.heartmonitor.HeartMonitor')
154 heartmonitor = Instance('IPython.zmq.parallel.heartmonitor.HeartMonitor')
157 subconstructors = List()
155 subconstructors = List()
158 _constructed = Bool(False)
156 _constructed = Bool(False)
159
157
160 def _ip_changed(self, name, old, new):
158 def _ip_changed(self, name, old, new):
161 self.engine_ip = new
159 self.engine_ip = new
162 self.client_ip = new
160 self.client_ip = new
163 self.monitor_ip = new
161 self.monitor_ip = new
164 self._update_monitor_url()
162 self._update_monitor_url()
165
163
166 def _update_monitor_url(self):
164 def _update_monitor_url(self):
167 self.monitor_url = "%s://%s:%i"%(self.monitor_transport, self.monitor_ip, self.mon_port)
165 self.monitor_url = "%s://%s:%i"%(self.monitor_transport, self.monitor_ip, self.mon_port)
168
166
169 def _transport_changed(self, name, old, new):
167 def _transport_changed(self, name, old, new):
170 self.engine_transport = new
168 self.engine_transport = new
171 self.client_transport = new
169 self.client_transport = new
172 self.monitor_transport = new
170 self.monitor_transport = new
173 self._update_monitor_url()
171 self._update_monitor_url()
174
172
175 def __init__(self, **kwargs):
173 def __init__(self, **kwargs):
176 super(HubFactory, self).__init__(**kwargs)
174 super(HubFactory, self).__init__(**kwargs)
177 self._update_monitor_url()
175 self._update_monitor_url()
178 # self.on_trait_change(self._sync_ips, 'ip')
176 # self.on_trait_change(self._sync_ips, 'ip')
179 # self.on_trait_change(self._sync_transports, 'transport')
177 # self.on_trait_change(self._sync_transports, 'transport')
180 self.subconstructors.append(self.construct_hub)
178 self.subconstructors.append(self.construct_hub)
181
179
182
180
183 def construct(self):
181 def construct(self):
184 assert not self._constructed, "already constructed!"
182 assert not self._constructed, "already constructed!"
185
183
186 for subc in self.subconstructors:
184 for subc in self.subconstructors:
187 subc()
185 subc()
188
186
189 self._constructed = True
187 self._constructed = True
190
188
191
189
192 def start(self):
190 def start(self):
193 assert self._constructed, "must be constructed by self.construct() first!"
191 assert self._constructed, "must be constructed by self.construct() first!"
194 self.heartmonitor.start()
192 self.heartmonitor.start()
195 self.log.info("Heartmonitor started")
193 self.log.info("Heartmonitor started")
196
194
197 def construct_hub(self):
195 def construct_hub(self):
198 """construct"""
196 """construct"""
199 client_iface = "%s://%s:"%(self.client_transport, self.client_ip) + "%i"
197 client_iface = "%s://%s:"%(self.client_transport, self.client_ip) + "%i"
200 engine_iface = "%s://%s:"%(self.engine_transport, self.engine_ip) + "%i"
198 engine_iface = "%s://%s:"%(self.engine_transport, self.engine_ip) + "%i"
201
199
202 ctx = self.context
200 ctx = self.context
203 loop = self.loop
201 loop = self.loop
204
202
205 # Registrar socket
203 # Registrar socket
206 reg = ZMQStream(ctx.socket(zmq.XREP), loop)
204 reg = ZMQStream(ctx.socket(zmq.XREP), loop)
207 reg.bind(client_iface % self.regport)
205 reg.bind(client_iface % self.regport)
208 self.log.info("Hub listening on %s for registration."%(client_iface%self.regport))
206 self.log.info("Hub listening on %s for registration."%(client_iface%self.regport))
209 if self.client_ip != self.engine_ip:
207 if self.client_ip != self.engine_ip:
210 reg.bind(engine_iface % self.regport)
208 reg.bind(engine_iface % self.regport)
211 self.log.info("Hub listening on %s for registration."%(engine_iface%self.regport))
209 self.log.info("Hub listening on %s for registration."%(engine_iface%self.regport))
212
210
213 ### Engine connections ###
211 ### Engine connections ###
214
212
215 # heartbeat
213 # heartbeat
216 hpub = ctx.socket(zmq.PUB)
214 hpub = ctx.socket(zmq.PUB)
217 hpub.bind(engine_iface % self.hb[0])
215 hpub.bind(engine_iface % self.hb[0])
218 hrep = ctx.socket(zmq.XREP)
216 hrep = ctx.socket(zmq.XREP)
219 hrep.bind(engine_iface % self.hb[1])
217 hrep.bind(engine_iface % self.hb[1])
220 self.heartmonitor = HeartMonitor(loop=loop, pingstream=ZMQStream(hpub,loop), pongstream=ZMQStream(hrep,loop),
218 self.heartmonitor = HeartMonitor(loop=loop, pingstream=ZMQStream(hpub,loop), pongstream=ZMQStream(hrep,loop),
221 period=self.ping, logname=self.log.name)
219 period=self.ping, logname=self.log.name)
222
220
223 ### Client connections ###
221 ### Client connections ###
224 # Clientele socket
222 # Clientele socket
225 c = ZMQStream(ctx.socket(zmq.XREP), loop)
223 c = ZMQStream(ctx.socket(zmq.XREP), loop)
226 c.bind(client_iface%self.query_port)
224 c.bind(client_iface%self.query_port)
227 # Notifier socket
225 # Notifier socket
228 n = ZMQStream(ctx.socket(zmq.PUB), loop)
226 n = ZMQStream(ctx.socket(zmq.PUB), loop)
229 n.bind(client_iface%self.notifier_port)
227 n.bind(client_iface%self.notifier_port)
230
228
231 ### build and launch the queues ###
229 ### build and launch the queues ###
232
230
233 # monitor socket
231 # monitor socket
234 sub = ctx.socket(zmq.SUB)
232 sub = ctx.socket(zmq.SUB)
235 sub.setsockopt(zmq.SUBSCRIBE, "")
233 sub.setsockopt(zmq.SUBSCRIBE, "")
236 sub.bind(self.monitor_url)
234 sub.bind(self.monitor_url)
237 sub.bind('inproc://monitor')
235 sub.bind('inproc://monitor')
238 sub = ZMQStream(sub, loop)
236 sub = ZMQStream(sub, loop)
239
237
240 # connect the db
238 # connect the db
241 self.db = import_item(self.db_class)(self.session.session)
239 self.db = import_item(self.db_class)(self.session.session)
242 time.sleep(.25)
240 time.sleep(.25)
243
241
244 # build connection dicts
242 # build connection dicts
245 self.engine_info = {
243 self.engine_info = {
246 'control' : engine_iface%self.control[1],
244 'control' : engine_iface%self.control[1],
247 'mux': engine_iface%self.mux[1],
245 'mux': engine_iface%self.mux[1],
248 'heartbeat': (engine_iface%self.hb[0], engine_iface%self.hb[1]),
246 'heartbeat': (engine_iface%self.hb[0], engine_iface%self.hb[1]),
249 'task' : engine_iface%self.task[1],
247 'task' : engine_iface%self.task[1],
250 'iopub' : engine_iface%self.iopub[1],
248 'iopub' : engine_iface%self.iopub[1],
251 # 'monitor' : engine_iface%self.mon_port,
249 # 'monitor' : engine_iface%self.mon_port,
252 }
250 }
253
251
254 self.client_info = {
252 self.client_info = {
255 'control' : client_iface%self.control[0],
253 'control' : client_iface%self.control[0],
256 'query': client_iface%self.query_port,
254 'query': client_iface%self.query_port,
257 'mux': client_iface%self.mux[0],
255 'mux': client_iface%self.mux[0],
258 'task' : (self.scheme, client_iface%self.task[0]),
256 'task' : (self.scheme, client_iface%self.task[0]),
259 'iopub' : client_iface%self.iopub[0],
257 'iopub' : client_iface%self.iopub[0],
260 'notification': client_iface%self.notifier_port
258 'notification': client_iface%self.notifier_port
261 }
259 }
262 self.log.debug("hub::Hub engine addrs: %s"%self.engine_info)
260 self.log.debug("hub::Hub engine addrs: %s"%self.engine_info)
263 self.log.debug("hub::Hub client addrs: %s"%self.client_info)
261 self.log.debug("hub::Hub client addrs: %s"%self.client_info)
264 self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
262 self.hub = Hub(loop=loop, session=self.session, monitor=sub, heartmonitor=self.heartmonitor,
265 registrar=reg, clientele=c, notifier=n, db=self.db,
263 registrar=reg, clientele=c, notifier=n, db=self.db,
266 engine_info=self.engine_info, client_info=self.client_info,
264 engine_info=self.engine_info, client_info=self.client_info,
267 logname=self.log.name)
265 logname=self.log.name)
268
266
269
267
270 class Hub(LoggingFactory):
268 class Hub(LoggingFactory):
271 """The IPython Controller Hub with 0MQ connections
269 """The IPython Controller Hub with 0MQ connections
272
270
273 Parameters
271 Parameters
274 ==========
272 ==========
275 loop: zmq IOLoop instance
273 loop: zmq IOLoop instance
276 session: StreamSession object
274 session: StreamSession object
277 <removed> context: zmq context for creating new connections (?)
275 <removed> context: zmq context for creating new connections (?)
278 queue: ZMQStream for monitoring the command queue (SUB)
276 queue: ZMQStream for monitoring the command queue (SUB)
279 registrar: ZMQStream for engine registration requests (XREP)
277 registrar: ZMQStream for engine registration requests (XREP)
280 heartbeat: HeartMonitor object checking the pulse of the engines
278 heartbeat: HeartMonitor object checking the pulse of the engines
281 clientele: ZMQStream for client connections (XREP)
279 clientele: ZMQStream for client connections (XREP)
282 not used for jobs, only query/control commands
280 not used for jobs, only query/control commands
283 notifier: ZMQStream for broadcasting engine registration changes (PUB)
281 notifier: ZMQStream for broadcasting engine registration changes (PUB)
284 db: connection to db for out of memory logging of commands
282 db: connection to db for out of memory logging of commands
285 NotImplemented
283 NotImplemented
286 engine_info: dict of zmq connection information for engines to connect
284 engine_info: dict of zmq connection information for engines to connect
287 to the queues.
285 to the queues.
288 client_info: dict of zmq connection information for engines to connect
286 client_info: dict of zmq connection information for engines to connect
289 to the queues.
287 to the queues.
290 """
288 """
291 # internal data structures:
289 # internal data structures:
292 ids=Set() # engine IDs
290 ids=Set() # engine IDs
293 keytable=Dict()
291 keytable=Dict()
294 by_ident=Dict()
292 by_ident=Dict()
295 engines=Dict()
293 engines=Dict()
296 clients=Dict()
294 clients=Dict()
297 hearts=Dict()
295 hearts=Dict()
298 pending=Set()
296 pending=Set()
299 queues=Dict() # pending msg_ids keyed by engine_id
297 queues=Dict() # pending msg_ids keyed by engine_id
300 tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
298 tasks=Dict() # pending msg_ids submitted as tasks, keyed by client_id
301 completed=Dict() # completed msg_ids keyed by engine_id
299 completed=Dict() # completed msg_ids keyed by engine_id
302 all_completed=Set() # completed msg_ids keyed by engine_id
300 all_completed=Set() # completed msg_ids keyed by engine_id
303 # mia=None
301 # mia=None
304 incoming_registrations=Dict()
302 incoming_registrations=Dict()
305 registration_timeout=Int()
303 registration_timeout=Int()
306 _idcounter=Int(0)
304 _idcounter=Int(0)
307
305
308 # objects from constructor:
306 # objects from constructor:
309 loop=Instance(ioloop.IOLoop)
307 loop=Instance(ioloop.IOLoop)
310 registrar=Instance(ZMQStream)
308 registrar=Instance(ZMQStream)
311 clientele=Instance(ZMQStream)
309 clientele=Instance(ZMQStream)
312 monitor=Instance(ZMQStream)
310 monitor=Instance(ZMQStream)
313 heartmonitor=Instance(HeartMonitor)
311 heartmonitor=Instance(HeartMonitor)
314 notifier=Instance(ZMQStream)
312 notifier=Instance(ZMQStream)
315 db=Instance(object)
313 db=Instance(object)
316 client_info=Dict()
314 client_info=Dict()
317 engine_info=Dict()
315 engine_info=Dict()
318
316
319
317
320 def __init__(self, **kwargs):
318 def __init__(self, **kwargs):
321 """
319 """
322 # universal:
320 # universal:
323 loop: IOLoop for creating future connections
321 loop: IOLoop for creating future connections
324 session: streamsession for sending serialized data
322 session: streamsession for sending serialized data
325 # engine:
323 # engine:
326 queue: ZMQStream for monitoring queue messages
324 queue: ZMQStream for monitoring queue messages
327 registrar: ZMQStream for engine registration
325 registrar: ZMQStream for engine registration
328 heartbeat: HeartMonitor object for tracking engines
326 heartbeat: HeartMonitor object for tracking engines
329 # client:
327 # client:
330 clientele: ZMQStream for client connections
328 clientele: ZMQStream for client connections
331 # extra:
329 # extra:
332 db: ZMQStream for db connection (NotImplemented)
330 db: ZMQStream for db connection (NotImplemented)
333 engine_info: zmq address/protocol dict for engine connections
331 engine_info: zmq address/protocol dict for engine connections
334 client_info: zmq address/protocol dict for client connections
332 client_info: zmq address/protocol dict for client connections
335 """
333 """
336
334
337 super(Hub, self).__init__(**kwargs)
335 super(Hub, self).__init__(**kwargs)
338 self.registration_timeout = max(5000, 2*self.heartmonitor.period)
336 self.registration_timeout = max(5000, 2*self.heartmonitor.period)
339
337
340 # validate connection dicts:
338 # validate connection dicts:
341 for k,v in self.client_info.iteritems():
339 for k,v in self.client_info.iteritems():
342 if k == 'task':
340 if k == 'task':
343 validate_url_container(v[1])
341 validate_url_container(v[1])
344 else:
342 else:
345 validate_url_container(v)
343 validate_url_container(v)
346 # validate_url_container(self.client_info)
344 # validate_url_container(self.client_info)
347 validate_url_container(self.engine_info)
345 validate_url_container(self.engine_info)
348
346
349 # register our callbacks
347 # register our callbacks
350 self.registrar.on_recv(self.dispatch_register_request)
348 self.registrar.on_recv(self.dispatch_register_request)
351 self.clientele.on_recv(self.dispatch_client_msg)
349 self.clientele.on_recv(self.dispatch_client_msg)
352 self.monitor.on_recv(self.dispatch_monitor_traffic)
350 self.monitor.on_recv(self.dispatch_monitor_traffic)
353
351
354 self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
352 self.heartmonitor.add_heart_failure_handler(self.handle_heart_failure)
355 self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
353 self.heartmonitor.add_new_heart_handler(self.handle_new_heart)
356
354
357 self.monitor_handlers = { 'in' : self.save_queue_request,
355 self.monitor_handlers = { 'in' : self.save_queue_request,
358 'out': self.save_queue_result,
356 'out': self.save_queue_result,
359 'intask': self.save_task_request,
357 'intask': self.save_task_request,
360 'outtask': self.save_task_result,
358 'outtask': self.save_task_result,
361 'tracktask': self.save_task_destination,
359 'tracktask': self.save_task_destination,
362 'incontrol': _passer,
360 'incontrol': _passer,
363 'outcontrol': _passer,
361 'outcontrol': _passer,
364 'iopub': self.save_iopub_message,
362 'iopub': self.save_iopub_message,
365 }
363 }
366
364
367 self.client_handlers = {'queue_request': self.queue_status,
365 self.client_handlers = {'queue_request': self.queue_status,
368 'result_request': self.get_results,
366 'result_request': self.get_results,
369 'purge_request': self.purge_results,
367 'purge_request': self.purge_results,
370 'load_request': self.check_load,
368 'load_request': self.check_load,
371 'resubmit_request': self.resubmit_task,
369 'resubmit_request': self.resubmit_task,
372 'shutdown_request': self.shutdown_request,
370 'shutdown_request': self.shutdown_request,
373 }
371 }
374
372
375 self.registrar_handlers = {'registration_request' : self.register_engine,
373 self.registrar_handlers = {'registration_request' : self.register_engine,
376 'unregistration_request' : self.unregister_engine,
374 'unregistration_request' : self.unregister_engine,
377 'connection_request': self.connection_request,
375 'connection_request': self.connection_request,
378 }
376 }
379
377
380 self.log.info("hub::created hub")
378 self.log.info("hub::created hub")
381
379
382 @property
380 @property
383 def _next_id(self):
381 def _next_id(self):
384 """gemerate a new ID.
382 """gemerate a new ID.
385
383
386 No longer reuse old ids, just count from 0."""
384 No longer reuse old ids, just count from 0."""
387 newid = self._idcounter
385 newid = self._idcounter
388 self._idcounter += 1
386 self._idcounter += 1
389 return newid
387 return newid
390 # newid = 0
388 # newid = 0
391 # incoming = [id[0] for id in self.incoming_registrations.itervalues()]
389 # incoming = [id[0] for id in self.incoming_registrations.itervalues()]
392 # # print newid, self.ids, self.incoming_registrations
390 # # print newid, self.ids, self.incoming_registrations
393 # while newid in self.ids or newid in incoming:
391 # while newid in self.ids or newid in incoming:
394 # newid += 1
392 # newid += 1
395 # return newid
393 # return newid
396
394
397 #-----------------------------------------------------------------------------
395 #-----------------------------------------------------------------------------
398 # message validation
396 # message validation
399 #-----------------------------------------------------------------------------
397 #-----------------------------------------------------------------------------
400
398
401 def _validate_targets(self, targets):
399 def _validate_targets(self, targets):
402 """turn any valid targets argument into a list of integer ids"""
400 """turn any valid targets argument into a list of integer ids"""
403 if targets is None:
401 if targets is None:
404 # default to all
402 # default to all
405 targets = self.ids
403 targets = self.ids
406
404
407 if isinstance(targets, (int,str,unicode)):
405 if isinstance(targets, (int,str,unicode)):
408 # only one target specified
406 # only one target specified
409 targets = [targets]
407 targets = [targets]
410 _targets = []
408 _targets = []
411 for t in targets:
409 for t in targets:
412 # map raw identities to ids
410 # map raw identities to ids
413 if isinstance(t, (str,unicode)):
411 if isinstance(t, (str,unicode)):
414 t = self.by_ident.get(t, t)
412 t = self.by_ident.get(t, t)
415 _targets.append(t)
413 _targets.append(t)
416 targets = _targets
414 targets = _targets
417 bad_targets = [ t for t in targets if t not in self.ids ]
415 bad_targets = [ t for t in targets if t not in self.ids ]
418 if bad_targets:
416 if bad_targets:
419 raise IndexError("No Such Engine: %r"%bad_targets)
417 raise IndexError("No Such Engine: %r"%bad_targets)
420 if not targets:
418 if not targets:
421 raise IndexError("No Engines Registered")
419 raise IndexError("No Engines Registered")
422 return targets
420 return targets
423
421
424 def _validate_client_msg(self, msg):
422 def _validate_client_msg(self, msg):
425 """validates and unpacks headers of a message. Returns False if invalid,
423 """validates and unpacks headers of a message. Returns False if invalid,
426 (ident, header, parent, content)"""
424 (ident, header, parent, content)"""
427 client_id = msg[0]
425 client_id = msg[0]
428 try:
426 try:
429 msg = self.session.unpack_message(msg[1:], content=True)
427 msg = self.session.unpack_message(msg[1:], content=True)
430 except:
428 except:
431 self.log.error("client::Invalid Message %s"%msg, exc_info=True)
429 self.log.error("client::Invalid Message %s"%msg, exc_info=True)
432 return False
430 return False
433
431
434 msg_type = msg.get('msg_type', None)
432 msg_type = msg.get('msg_type', None)
435 if msg_type is None:
433 if msg_type is None:
436 return False
434 return False
437 header = msg.get('header')
435 header = msg.get('header')
438 # session doesn't handle split content for now:
436 # session doesn't handle split content for now:
439 return client_id, msg
437 return client_id, msg
440
438
441
439
442 #-----------------------------------------------------------------------------
440 #-----------------------------------------------------------------------------
443 # dispatch methods (1 per stream)
441 # dispatch methods (1 per stream)
444 #-----------------------------------------------------------------------------
442 #-----------------------------------------------------------------------------
445
443
446 def dispatch_register_request(self, msg):
444 def dispatch_register_request(self, msg):
447 """"""
445 """"""
448 self.log.debug("registration::dispatch_register_request(%s)"%msg)
446 self.log.debug("registration::dispatch_register_request(%s)"%msg)
449 idents,msg = self.session.feed_identities(msg)
447 idents,msg = self.session.feed_identities(msg)
450 if not idents:
448 if not idents:
451 self.log.error("Bad Queue Message: %s"%msg, exc_info=True)
449 self.log.error("Bad Queue Message: %s"%msg, exc_info=True)
452 return
450 return
453 try:
451 try:
454 msg = self.session.unpack_message(msg,content=True)
452 msg = self.session.unpack_message(msg,content=True)
455 except:
453 except:
456 self.log.error("registration::got bad registration message: %s"%msg, exc_info=True)
454 self.log.error("registration::got bad registration message: %s"%msg, exc_info=True)
457 return
455 return
458
456
459 msg_type = msg['msg_type']
457 msg_type = msg['msg_type']
460 content = msg['content']
458 content = msg['content']
461
459
462 handler = self.registrar_handlers.get(msg_type, None)
460 handler = self.registrar_handlers.get(msg_type, None)
463 if handler is None:
461 if handler is None:
464 self.log.error("registration::got bad registration message: %s"%msg)
462 self.log.error("registration::got bad registration message: %s"%msg)
465 else:
463 else:
466 handler(idents, msg)
464 handler(idents, msg)
467
465
468 def dispatch_monitor_traffic(self, msg):
466 def dispatch_monitor_traffic(self, msg):
469 """all ME and Task queue messages come through here, as well as
467 """all ME and Task queue messages come through here, as well as
470 IOPub traffic."""
468 IOPub traffic."""
471 self.log.debug("monitor traffic: %s"%msg[:2])
469 self.log.debug("monitor traffic: %s"%msg[:2])
472 switch = msg[0]
470 switch = msg[0]
473 idents, msg = self.session.feed_identities(msg[1:])
471 idents, msg = self.session.feed_identities(msg[1:])
474 if not idents:
472 if not idents:
475 self.log.error("Bad Monitor Message: %s"%msg)
473 self.log.error("Bad Monitor Message: %s"%msg)
476 return
474 return
477 handler = self.monitor_handlers.get(switch, None)
475 handler = self.monitor_handlers.get(switch, None)
478 if handler is not None:
476 if handler is not None:
479 handler(idents, msg)
477 handler(idents, msg)
480 else:
478 else:
481 self.log.error("Invalid monitor topic: %s"%switch)
479 self.log.error("Invalid monitor topic: %s"%switch)
482
480
483
481
484 def dispatch_client_msg(self, msg):
482 def dispatch_client_msg(self, msg):
485 """Route messages from clients"""
483 """Route messages from clients"""
486 idents, msg = self.session.feed_identities(msg)
484 idents, msg = self.session.feed_identities(msg)
487 if not idents:
485 if not idents:
488 self.log.error("Bad Client Message: %s"%msg)
486 self.log.error("Bad Client Message: %s"%msg)
489 return
487 return
490 client_id = idents[0]
488 client_id = idents[0]
491 try:
489 try:
492 msg = self.session.unpack_message(msg, content=True)
490 msg = self.session.unpack_message(msg, content=True)
493 except:
491 except:
494 content = wrap_exception()
492 content = error.wrap_exception()
495 self.log.error("Bad Client Message: %s"%msg, exc_info=True)
493 self.log.error("Bad Client Message: %s"%msg, exc_info=True)
496 self.session.send(self.clientele, "hub_error", ident=client_id,
494 self.session.send(self.clientele, "hub_error", ident=client_id,
497 content=content)
495 content=content)
498 return
496 return
499
497
500 # print client_id, header, parent, content
498 # print client_id, header, parent, content
501 #switch on message type:
499 #switch on message type:
502 msg_type = msg['msg_type']
500 msg_type = msg['msg_type']
503 self.log.info("client:: client %s requested %s"%(client_id, msg_type))
501 self.log.info("client:: client %s requested %s"%(client_id, msg_type))
504 handler = self.client_handlers.get(msg_type, None)
502 handler = self.client_handlers.get(msg_type, None)
505 try:
503 try:
506 assert handler is not None, "Bad Message Type: %s"%msg_type
504 assert handler is not None, "Bad Message Type: %s"%msg_type
507 except:
505 except:
508 content = wrap_exception()
506 content = error.wrap_exception()
509 self.log.error("Bad Message Type: %s"%msg_type, exc_info=True)
507 self.log.error("Bad Message Type: %s"%msg_type, exc_info=True)
510 self.session.send(self.clientele, "hub_error", ident=client_id,
508 self.session.send(self.clientele, "hub_error", ident=client_id,
511 content=content)
509 content=content)
512 return
510 return
513 else:
511 else:
514 handler(client_id, msg)
512 handler(client_id, msg)
515
513
516 def dispatch_db(self, msg):
514 def dispatch_db(self, msg):
517 """"""
515 """"""
518 raise NotImplementedError
516 raise NotImplementedError
519
517
520 #---------------------------------------------------------------------------
518 #---------------------------------------------------------------------------
521 # handler methods (1 per event)
519 # handler methods (1 per event)
522 #---------------------------------------------------------------------------
520 #---------------------------------------------------------------------------
523
521
524 #----------------------- Heartbeat --------------------------------------
522 #----------------------- Heartbeat --------------------------------------
525
523
526 def handle_new_heart(self, heart):
524 def handle_new_heart(self, heart):
527 """handler to attach to heartbeater.
525 """handler to attach to heartbeater.
528 Called when a new heart starts to beat.
526 Called when a new heart starts to beat.
529 Triggers completion of registration."""
527 Triggers completion of registration."""
530 self.log.debug("heartbeat::handle_new_heart(%r)"%heart)
528 self.log.debug("heartbeat::handle_new_heart(%r)"%heart)
531 if heart not in self.incoming_registrations:
529 if heart not in self.incoming_registrations:
532 self.log.info("heartbeat::ignoring new heart: %r"%heart)
530 self.log.info("heartbeat::ignoring new heart: %r"%heart)
533 else:
531 else:
534 self.finish_registration(heart)
532 self.finish_registration(heart)
535
533
536
534
537 def handle_heart_failure(self, heart):
535 def handle_heart_failure(self, heart):
538 """handler to attach to heartbeater.
536 """handler to attach to heartbeater.
539 called when a previously registered heart fails to respond to beat request.
537 called when a previously registered heart fails to respond to beat request.
540 triggers unregistration"""
538 triggers unregistration"""
541 self.log.debug("heartbeat::handle_heart_failure(%r)"%heart)
539 self.log.debug("heartbeat::handle_heart_failure(%r)"%heart)
542 eid = self.hearts.get(heart, None)
540 eid = self.hearts.get(heart, None)
543 queue = self.engines[eid].queue
541 queue = self.engines[eid].queue
544 if eid is None:
542 if eid is None:
545 self.log.info("heartbeat::ignoring heart failure %r"%heart)
543 self.log.info("heartbeat::ignoring heart failure %r"%heart)
546 else:
544 else:
547 self.unregister_engine(heart, dict(content=dict(id=eid, queue=queue)))
545 self.unregister_engine(heart, dict(content=dict(id=eid, queue=queue)))
548
546
549 #----------------------- MUX Queue Traffic ------------------------------
547 #----------------------- MUX Queue Traffic ------------------------------
550
548
551 def save_queue_request(self, idents, msg):
549 def save_queue_request(self, idents, msg):
552 if len(idents) < 2:
550 if len(idents) < 2:
553 self.log.error("invalid identity prefix: %s"%idents)
551 self.log.error("invalid identity prefix: %s"%idents)
554 return
552 return
555 queue_id, client_id = idents[:2]
553 queue_id, client_id = idents[:2]
556 try:
554 try:
557 msg = self.session.unpack_message(msg, content=False)
555 msg = self.session.unpack_message(msg, content=False)
558 except:
556 except:
559 self.log.error("queue::client %r sent invalid message to %r: %s"%(client_id, queue_id, msg), exc_info=True)
557 self.log.error("queue::client %r sent invalid message to %r: %s"%(client_id, queue_id, msg), exc_info=True)
560 return
558 return
561
559
562 eid = self.by_ident.get(queue_id, None)
560 eid = self.by_ident.get(queue_id, None)
563 if eid is None:
561 if eid is None:
564 self.log.error("queue::target %r not registered"%queue_id)
562 self.log.error("queue::target %r not registered"%queue_id)
565 self.log.debug("queue:: valid are: %s"%(self.by_ident.keys()))
563 self.log.debug("queue:: valid are: %s"%(self.by_ident.keys()))
566 return
564 return
567
565
568 header = msg['header']
566 header = msg['header']
569 msg_id = header['msg_id']
567 msg_id = header['msg_id']
570 record = init_record(msg)
568 record = init_record(msg)
571 record['engine_uuid'] = queue_id
569 record['engine_uuid'] = queue_id
572 record['client_uuid'] = client_id
570 record['client_uuid'] = client_id
573 record['queue'] = 'mux'
571 record['queue'] = 'mux'
574 if MongoDB is not None and isinstance(self.db, MongoDB):
572 if MongoDB is not None and isinstance(self.db, MongoDB):
575 record['buffers'] = map(Binary, record['buffers'])
573 record['buffers'] = map(Binary, record['buffers'])
576 self.pending.add(msg_id)
574 self.pending.add(msg_id)
577 self.queues[eid].append(msg_id)
575 self.queues[eid].append(msg_id)
578 self.db.add_record(msg_id, record)
576 self.db.add_record(msg_id, record)
579
577
580 def save_queue_result(self, idents, msg):
578 def save_queue_result(self, idents, msg):
581 if len(idents) < 2:
579 if len(idents) < 2:
582 self.log.error("invalid identity prefix: %s"%idents)
580 self.log.error("invalid identity prefix: %s"%idents)
583 return
581 return
584
582
585 client_id, queue_id = idents[:2]
583 client_id, queue_id = idents[:2]
586 try:
584 try:
587 msg = self.session.unpack_message(msg, content=False)
585 msg = self.session.unpack_message(msg, content=False)
588 except:
586 except:
589 self.log.error("queue::engine %r sent invalid message to %r: %s"%(
587 self.log.error("queue::engine %r sent invalid message to %r: %s"%(
590 queue_id,client_id, msg), exc_info=True)
588 queue_id,client_id, msg), exc_info=True)
591 return
589 return
592
590
593 eid = self.by_ident.get(queue_id, None)
591 eid = self.by_ident.get(queue_id, None)
594 if eid is None:
592 if eid is None:
595 self.log.error("queue::unknown engine %r is sending a reply: "%queue_id)
593 self.log.error("queue::unknown engine %r is sending a reply: "%queue_id)
596 self.log.debug("queue:: %s"%msg[2:])
594 self.log.debug("queue:: %s"%msg[2:])
597 return
595 return
598
596
599 parent = msg['parent_header']
597 parent = msg['parent_header']
600 if not parent:
598 if not parent:
601 return
599 return
602 msg_id = parent['msg_id']
600 msg_id = parent['msg_id']
603 if msg_id in self.pending:
601 if msg_id in self.pending:
604 self.pending.remove(msg_id)
602 self.pending.remove(msg_id)
605 self.all_completed.add(msg_id)
603 self.all_completed.add(msg_id)
606 self.queues[eid].remove(msg_id)
604 self.queues[eid].remove(msg_id)
607 self.completed[eid].append(msg_id)
605 self.completed[eid].append(msg_id)
608 rheader = msg['header']
606 rheader = msg['header']
609 completed = datetime.strptime(rheader['date'], ISO8601)
607 completed = datetime.strptime(rheader['date'], ISO8601)
610 started = rheader.get('started', None)
608 started = rheader.get('started', None)
611 if started is not None:
609 if started is not None:
612 started = datetime.strptime(started, ISO8601)
610 started = datetime.strptime(started, ISO8601)
613 result = {
611 result = {
614 'result_header' : rheader,
612 'result_header' : rheader,
615 'result_content': msg['content'],
613 'result_content': msg['content'],
616 'started' : started,
614 'started' : started,
617 'completed' : completed
615 'completed' : completed
618 }
616 }
619 if MongoDB is not None and isinstance(self.db, MongoDB):
617 if MongoDB is not None and isinstance(self.db, MongoDB):
620 result['result_buffers'] = map(Binary, msg['buffers'])
618 result['result_buffers'] = map(Binary, msg['buffers'])
621 else:
619 else:
622 result['result_buffers'] = msg['buffers']
620 result['result_buffers'] = msg['buffers']
623 self.db.update_record(msg_id, result)
621 self.db.update_record(msg_id, result)
624 else:
622 else:
625 self.log.debug("queue:: unknown msg finished %s"%msg_id)
623 self.log.debug("queue:: unknown msg finished %s"%msg_id)
626
624
627 #--------------------- Task Queue Traffic ------------------------------
625 #--------------------- Task Queue Traffic ------------------------------
628
626
629 def save_task_request(self, idents, msg):
627 def save_task_request(self, idents, msg):
630 """Save the submission of a task."""
628 """Save the submission of a task."""
631 client_id = idents[0]
629 client_id = idents[0]
632
630
633 try:
631 try:
634 msg = self.session.unpack_message(msg, content=False)
632 msg = self.session.unpack_message(msg, content=False)
635 except:
633 except:
636 self.log.error("task::client %r sent invalid task message: %s"%(
634 self.log.error("task::client %r sent invalid task message: %s"%(
637 client_id, msg), exc_info=True)
635 client_id, msg), exc_info=True)
638 return
636 return
639 record = init_record(msg)
637 record = init_record(msg)
640 if MongoDB is not None and isinstance(self.db, MongoDB):
638 if MongoDB is not None and isinstance(self.db, MongoDB):
641 record['buffers'] = map(Binary, record['buffers'])
639 record['buffers'] = map(Binary, record['buffers'])
642 record['client_uuid'] = client_id
640 record['client_uuid'] = client_id
643 record['queue'] = 'task'
641 record['queue'] = 'task'
644 header = msg['header']
642 header = msg['header']
645 msg_id = header['msg_id']
643 msg_id = header['msg_id']
646 self.pending.add(msg_id)
644 self.pending.add(msg_id)
647 self.db.add_record(msg_id, record)
645 self.db.add_record(msg_id, record)
648
646
649 def save_task_result(self, idents, msg):
647 def save_task_result(self, idents, msg):
650 """save the result of a completed task."""
648 """save the result of a completed task."""
651 client_id = idents[0]
649 client_id = idents[0]
652 try:
650 try:
653 msg = self.session.unpack_message(msg, content=False)
651 msg = self.session.unpack_message(msg, content=False)
654 except:
652 except:
655 self.log.error("task::invalid task result message send to %r: %s"%(
653 self.log.error("task::invalid task result message send to %r: %s"%(
656 client_id, msg), exc_info=True)
654 client_id, msg), exc_info=True)
657 raise
655 raise
658 return
656 return
659
657
660 parent = msg['parent_header']
658 parent = msg['parent_header']
661 if not parent:
659 if not parent:
662 # print msg
660 # print msg
663 self.log.warn("Task %r had no parent!"%msg)
661 self.log.warn("Task %r had no parent!"%msg)
664 return
662 return
665 msg_id = parent['msg_id']
663 msg_id = parent['msg_id']
666
664
667 header = msg['header']
665 header = msg['header']
668 engine_uuid = header.get('engine', None)
666 engine_uuid = header.get('engine', None)
669 eid = self.by_ident.get(engine_uuid, None)
667 eid = self.by_ident.get(engine_uuid, None)
670
668
671 if msg_id in self.pending:
669 if msg_id in self.pending:
672 self.pending.remove(msg_id)
670 self.pending.remove(msg_id)
673 self.all_completed.add(msg_id)
671 self.all_completed.add(msg_id)
674 if eid is not None:
672 if eid is not None:
675 self.completed[eid].append(msg_id)
673 self.completed[eid].append(msg_id)
676 if msg_id in self.tasks[eid]:
674 if msg_id in self.tasks[eid]:
677 self.tasks[eid].remove(msg_id)
675 self.tasks[eid].remove(msg_id)
678 completed = datetime.strptime(header['date'], ISO8601)
676 completed = datetime.strptime(header['date'], ISO8601)
679 started = header.get('started', None)
677 started = header.get('started', None)
680 if started is not None:
678 if started is not None:
681 started = datetime.strptime(started, ISO8601)
679 started = datetime.strptime(started, ISO8601)
682 result = {
680 result = {
683 'result_header' : header,
681 'result_header' : header,
684 'result_content': msg['content'],
682 'result_content': msg['content'],
685 'started' : started,
683 'started' : started,
686 'completed' : completed,
684 'completed' : completed,
687 'engine_uuid': engine_uuid
685 'engine_uuid': engine_uuid
688 }
686 }
689 if MongoDB is not None and isinstance(self.db, MongoDB):
687 if MongoDB is not None and isinstance(self.db, MongoDB):
690 result['result_buffers'] = map(Binary, msg['buffers'])
688 result['result_buffers'] = map(Binary, msg['buffers'])
691 else:
689 else:
692 result['result_buffers'] = msg['buffers']
690 result['result_buffers'] = msg['buffers']
693 self.db.update_record(msg_id, result)
691 self.db.update_record(msg_id, result)
694
692
695 else:
693 else:
696 self.log.debug("task::unknown task %s finished"%msg_id)
694 self.log.debug("task::unknown task %s finished"%msg_id)
697
695
698 def save_task_destination(self, idents, msg):
696 def save_task_destination(self, idents, msg):
699 try:
697 try:
700 msg = self.session.unpack_message(msg, content=True)
698 msg = self.session.unpack_message(msg, content=True)
701 except:
699 except:
702 self.log.error("task::invalid task tracking message", exc_info=True)
700 self.log.error("task::invalid task tracking message", exc_info=True)
703 return
701 return
704 content = msg['content']
702 content = msg['content']
705 # print (content)
703 # print (content)
706 msg_id = content['msg_id']
704 msg_id = content['msg_id']
707 engine_uuid = content['engine_id']
705 engine_uuid = content['engine_id']
708 eid = self.by_ident[engine_uuid]
706 eid = self.by_ident[engine_uuid]
709
707
710 self.log.info("task::task %s arrived on %s"%(msg_id, eid))
708 self.log.info("task::task %s arrived on %s"%(msg_id, eid))
711 # if msg_id in self.mia:
709 # if msg_id in self.mia:
712 # self.mia.remove(msg_id)
710 # self.mia.remove(msg_id)
713 # else:
711 # else:
714 # self.log.debug("task::task %s not listed as MIA?!"%(msg_id))
712 # self.log.debug("task::task %s not listed as MIA?!"%(msg_id))
715
713
716 self.tasks[eid].append(msg_id)
714 self.tasks[eid].append(msg_id)
717 # self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
715 # self.pending[msg_id][1].update(received=datetime.now(),engine=(eid,engine_uuid))
718 self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
716 self.db.update_record(msg_id, dict(engine_uuid=engine_uuid))
719
717
720 def mia_task_request(self, idents, msg):
718 def mia_task_request(self, idents, msg):
721 raise NotImplementedError
719 raise NotImplementedError
722 client_id = idents[0]
720 client_id = idents[0]
723 # content = dict(mia=self.mia,status='ok')
721 # content = dict(mia=self.mia,status='ok')
724 # self.session.send('mia_reply', content=content, idents=client_id)
722 # self.session.send('mia_reply', content=content, idents=client_id)
725
723
726
724
727 #--------------------- IOPub Traffic ------------------------------
725 #--------------------- IOPub Traffic ------------------------------
728
726
729 def save_iopub_message(self, topics, msg):
727 def save_iopub_message(self, topics, msg):
730 """save an iopub message into the db"""
728 """save an iopub message into the db"""
731 # print (topics)
729 # print (topics)
732 try:
730 try:
733 msg = self.session.unpack_message(msg, content=True)
731 msg = self.session.unpack_message(msg, content=True)
734 except:
732 except:
735 self.log.error("iopub::invalid IOPub message", exc_info=True)
733 self.log.error("iopub::invalid IOPub message", exc_info=True)
736 return
734 return
737
735
738 parent = msg['parent_header']
736 parent = msg['parent_header']
739 if not parent:
737 if not parent:
740 self.log.error("iopub::invalid IOPub message: %s"%msg)
738 self.log.error("iopub::invalid IOPub message: %s"%msg)
741 return
739 return
742 msg_id = parent['msg_id']
740 msg_id = parent['msg_id']
743 msg_type = msg['msg_type']
741 msg_type = msg['msg_type']
744 content = msg['content']
742 content = msg['content']
745
743
746 # ensure msg_id is in db
744 # ensure msg_id is in db
747 try:
745 try:
748 rec = self.db.get_record(msg_id)
746 rec = self.db.get_record(msg_id)
749 except:
747 except:
750 self.log.error("iopub::IOPub message has invalid parent", exc_info=True)
748 self.log.error("iopub::IOPub message has invalid parent", exc_info=True)
751 return
749 return
752 # stream
750 # stream
753 d = {}
751 d = {}
754 if msg_type == 'stream':
752 if msg_type == 'stream':
755 name = content['name']
753 name = content['name']
756 s = rec[name] or ''
754 s = rec[name] or ''
757 d[name] = s + content['data']
755 d[name] = s + content['data']
758
756
759 elif msg_type == 'pyerr':
757 elif msg_type == 'pyerr':
760 d['pyerr'] = content
758 d['pyerr'] = content
761 else:
759 else:
762 d[msg_type] = content['data']
760 d[msg_type] = content['data']
763
761
764 self.db.update_record(msg_id, d)
762 self.db.update_record(msg_id, d)
765
763
766
764
767
765
768 #-------------------------------------------------------------------------
766 #-------------------------------------------------------------------------
769 # Registration requests
767 # Registration requests
770 #-------------------------------------------------------------------------
768 #-------------------------------------------------------------------------
771
769
772 def connection_request(self, client_id, msg):
770 def connection_request(self, client_id, msg):
773 """Reply with connection addresses for clients."""
771 """Reply with connection addresses for clients."""
774 self.log.info("client::client %s connected"%client_id)
772 self.log.info("client::client %s connected"%client_id)
775 content = dict(status='ok')
773 content = dict(status='ok')
776 content.update(self.client_info)
774 content.update(self.client_info)
777 jsonable = {}
775 jsonable = {}
778 for k,v in self.keytable.iteritems():
776 for k,v in self.keytable.iteritems():
779 jsonable[str(k)] = v
777 jsonable[str(k)] = v
780 content['engines'] = jsonable
778 content['engines'] = jsonable
781 self.session.send(self.registrar, 'connection_reply', content, parent=msg, ident=client_id)
779 self.session.send(self.registrar, 'connection_reply', content, parent=msg, ident=client_id)
782
780
783 def register_engine(self, reg, msg):
781 def register_engine(self, reg, msg):
784 """Register a new engine."""
782 """Register a new engine."""
785 content = msg['content']
783 content = msg['content']
786 try:
784 try:
787 queue = content['queue']
785 queue = content['queue']
788 except KeyError:
786 except KeyError:
789 self.log.error("registration::queue not specified", exc_info=True)
787 self.log.error("registration::queue not specified", exc_info=True)
790 return
788 return
791 heart = content.get('heartbeat', None)
789 heart = content.get('heartbeat', None)
792 """register a new engine, and create the socket(s) necessary"""
790 """register a new engine, and create the socket(s) necessary"""
793 eid = self._next_id
791 eid = self._next_id
794 # print (eid, queue, reg, heart)
792 # print (eid, queue, reg, heart)
795
793
796 self.log.debug("registration::register_engine(%i, %r, %r, %r)"%(eid, queue, reg, heart))
794 self.log.debug("registration::register_engine(%i, %r, %r, %r)"%(eid, queue, reg, heart))
797
795
798 content = dict(id=eid,status='ok')
796 content = dict(id=eid,status='ok')
799 content.update(self.engine_info)
797 content.update(self.engine_info)
800 # check if requesting available IDs:
798 # check if requesting available IDs:
801 if queue in self.by_ident:
799 if queue in self.by_ident:
802 try:
800 try:
803 raise KeyError("queue_id %r in use"%queue)
801 raise KeyError("queue_id %r in use"%queue)
804 except:
802 except:
805 content = wrap_exception()
803 content = error.wrap_exception()
806 self.log.error("queue_id %r in use"%queue, exc_info=True)
804 self.log.error("queue_id %r in use"%queue, exc_info=True)
807 elif heart in self.hearts: # need to check unique hearts?
805 elif heart in self.hearts: # need to check unique hearts?
808 try:
806 try:
809 raise KeyError("heart_id %r in use"%heart)
807 raise KeyError("heart_id %r in use"%heart)
810 except:
808 except:
811 self.log.error("heart_id %r in use"%heart, exc_info=True)
809 self.log.error("heart_id %r in use"%heart, exc_info=True)
812 content = wrap_exception()
810 content = error.wrap_exception()
813 else:
811 else:
814 for h, pack in self.incoming_registrations.iteritems():
812 for h, pack in self.incoming_registrations.iteritems():
815 if heart == h:
813 if heart == h:
816 try:
814 try:
817 raise KeyError("heart_id %r in use"%heart)
815 raise KeyError("heart_id %r in use"%heart)
818 except:
816 except:
819 self.log.error("heart_id %r in use"%heart, exc_info=True)
817 self.log.error("heart_id %r in use"%heart, exc_info=True)
820 content = wrap_exception()
818 content = error.wrap_exception()
821 break
819 break
822 elif queue == pack[1]:
820 elif queue == pack[1]:
823 try:
821 try:
824 raise KeyError("queue_id %r in use"%queue)
822 raise KeyError("queue_id %r in use"%queue)
825 except:
823 except:
826 self.log.error("queue_id %r in use"%queue, exc_info=True)
824 self.log.error("queue_id %r in use"%queue, exc_info=True)
827 content = wrap_exception()
825 content = error.wrap_exception()
828 break
826 break
829
827
830 msg = self.session.send(self.registrar, "registration_reply",
828 msg = self.session.send(self.registrar, "registration_reply",
831 content=content,
829 content=content,
832 ident=reg)
830 ident=reg)
833
831
834 if content['status'] == 'ok':
832 if content['status'] == 'ok':
835 if heart in self.heartmonitor.hearts:
833 if heart in self.heartmonitor.hearts:
836 # already beating
834 # already beating
837 self.incoming_registrations[heart] = (eid,queue,reg[0],None)
835 self.incoming_registrations[heart] = (eid,queue,reg[0],None)
838 self.finish_registration(heart)
836 self.finish_registration(heart)
839 else:
837 else:
840 purge = lambda : self._purge_stalled_registration(heart)
838 purge = lambda : self._purge_stalled_registration(heart)
841 dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
839 dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop)
842 dc.start()
840 dc.start()
843 self.incoming_registrations[heart] = (eid,queue,reg[0],dc)
841 self.incoming_registrations[heart] = (eid,queue,reg[0],dc)
844 else:
842 else:
845 self.log.error("registration::registration %i failed: %s"%(eid, content['evalue']))
843 self.log.error("registration::registration %i failed: %s"%(eid, content['evalue']))
846 return eid
844 return eid
847
845
848 def unregister_engine(self, ident, msg):
846 def unregister_engine(self, ident, msg):
849 """Unregister an engine that explicitly requested to leave."""
847 """Unregister an engine that explicitly requested to leave."""
850 try:
848 try:
851 eid = msg['content']['id']
849 eid = msg['content']['id']
852 except:
850 except:
853 self.log.error("registration::bad engine id for unregistration: %s"%ident, exc_info=True)
851 self.log.error("registration::bad engine id for unregistration: %s"%ident, exc_info=True)
854 return
852 return
855 self.log.info("registration::unregister_engine(%s)"%eid)
853 self.log.info("registration::unregister_engine(%s)"%eid)
856 content=dict(id=eid, queue=self.engines[eid].queue)
854 content=dict(id=eid, queue=self.engines[eid].queue)
857 self.ids.remove(eid)
855 self.ids.remove(eid)
858 self.keytable.pop(eid)
856 self.keytable.pop(eid)
859 ec = self.engines.pop(eid)
857 ec = self.engines.pop(eid)
860 self.hearts.pop(ec.heartbeat)
858 self.hearts.pop(ec.heartbeat)
861 self.by_ident.pop(ec.queue)
859 self.by_ident.pop(ec.queue)
862 self.completed.pop(eid)
860 self.completed.pop(eid)
863 for msg_id in self.queues.pop(eid):
861 for msg_id in self.queues.pop(eid):
864 msg = self.pending.remove(msg_id)
862 msg = self.pending.remove(msg_id)
865 ############## TODO: HANDLE IT ################
863 ############## TODO: HANDLE IT ################
866
864
867 if self.notifier:
865 if self.notifier:
868 self.session.send(self.notifier, "unregistration_notification", content=content)
866 self.session.send(self.notifier, "unregistration_notification", content=content)
869
867
870 def finish_registration(self, heart):
868 def finish_registration(self, heart):
871 """Second half of engine registration, called after our HeartMonitor
869 """Second half of engine registration, called after our HeartMonitor
872 has received a beat from the Engine's Heart."""
870 has received a beat from the Engine's Heart."""
873 try:
871 try:
874 (eid,queue,reg,purge) = self.incoming_registrations.pop(heart)
872 (eid,queue,reg,purge) = self.incoming_registrations.pop(heart)
875 except KeyError:
873 except KeyError:
876 self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
874 self.log.error("registration::tried to finish nonexistant registration", exc_info=True)
877 return
875 return
878 self.log.info("registration::finished registering engine %i:%r"%(eid,queue))
876 self.log.info("registration::finished registering engine %i:%r"%(eid,queue))
879 if purge is not None:
877 if purge is not None:
880 purge.stop()
878 purge.stop()
881 control = queue
879 control = queue
882 self.ids.add(eid)
880 self.ids.add(eid)
883 self.keytable[eid] = queue
881 self.keytable[eid] = queue
884 self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg,
882 self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg,
885 control=control, heartbeat=heart)
883 control=control, heartbeat=heart)
886 self.by_ident[queue] = eid
884 self.by_ident[queue] = eid
887 self.queues[eid] = list()
885 self.queues[eid] = list()
888 self.tasks[eid] = list()
886 self.tasks[eid] = list()
889 self.completed[eid] = list()
887 self.completed[eid] = list()
890 self.hearts[heart] = eid
888 self.hearts[heart] = eid
891 content = dict(id=eid, queue=self.engines[eid].queue)
889 content = dict(id=eid, queue=self.engines[eid].queue)
892 if self.notifier:
890 if self.notifier:
893 self.session.send(self.notifier, "registration_notification", content=content)
891 self.session.send(self.notifier, "registration_notification", content=content)
894 self.log.info("engine::Engine Connected: %i"%eid)
892 self.log.info("engine::Engine Connected: %i"%eid)
895
893
896 def _purge_stalled_registration(self, heart):
894 def _purge_stalled_registration(self, heart):
897 if heart in self.incoming_registrations:
895 if heart in self.incoming_registrations:
898 eid = self.incoming_registrations.pop(heart)[0]
896 eid = self.incoming_registrations.pop(heart)[0]
899 self.log.info("registration::purging stalled registration: %i"%eid)
897 self.log.info("registration::purging stalled registration: %i"%eid)
900 else:
898 else:
901 pass
899 pass
902
900
903 #-------------------------------------------------------------------------
901 #-------------------------------------------------------------------------
904 # Client Requests
902 # Client Requests
905 #-------------------------------------------------------------------------
903 #-------------------------------------------------------------------------
906
904
907 def shutdown_request(self, client_id, msg):
905 def shutdown_request(self, client_id, msg):
908 """handle shutdown request."""
906 """handle shutdown request."""
909 # s = self.context.socket(zmq.XREQ)
907 # s = self.context.socket(zmq.XREQ)
910 # s.connect(self.client_connections['mux'])
908 # s.connect(self.client_connections['mux'])
911 # time.sleep(0.1)
909 # time.sleep(0.1)
912 # for eid,ec in self.engines.iteritems():
910 # for eid,ec in self.engines.iteritems():
913 # self.session.send(s, 'shutdown_request', content=dict(restart=False), ident=ec.queue)
911 # self.session.send(s, 'shutdown_request', content=dict(restart=False), ident=ec.queue)
914 # time.sleep(1)
912 # time.sleep(1)
915 self.session.send(self.clientele, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
913 self.session.send(self.clientele, 'shutdown_reply', content={'status': 'ok'}, ident=client_id)
916 dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
914 dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop)
917 dc.start()
915 dc.start()
918
916
919 def _shutdown(self):
917 def _shutdown(self):
920 self.log.info("hub::hub shutting down.")
918 self.log.info("hub::hub shutting down.")
921 time.sleep(0.1)
919 time.sleep(0.1)
922 sys.exit(0)
920 sys.exit(0)
923
921
924
922
925 def check_load(self, client_id, msg):
923 def check_load(self, client_id, msg):
926 content = msg['content']
924 content = msg['content']
927 try:
925 try:
928 targets = content['targets']
926 targets = content['targets']
929 targets = self._validate_targets(targets)
927 targets = self._validate_targets(targets)
930 except:
928 except:
931 content = wrap_exception()
929 content = error.wrap_exception()
932 self.session.send(self.clientele, "hub_error",
930 self.session.send(self.clientele, "hub_error",
933 content=content, ident=client_id)
931 content=content, ident=client_id)
934 return
932 return
935
933
936 content = dict(status='ok')
934 content = dict(status='ok')
937 # loads = {}
935 # loads = {}
938 for t in targets:
936 for t in targets:
939 content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
937 content[bytes(t)] = len(self.queues[t])+len(self.tasks[t])
940 self.session.send(self.clientele, "load_reply", content=content, ident=client_id)
938 self.session.send(self.clientele, "load_reply", content=content, ident=client_id)
941
939
942
940
943 def queue_status(self, client_id, msg):
941 def queue_status(self, client_id, msg):
944 """Return the Queue status of one or more targets.
942 """Return the Queue status of one or more targets.
945 if verbose: return the msg_ids
943 if verbose: return the msg_ids
946 else: return len of each type.
944 else: return len of each type.
947 keys: queue (pending MUX jobs)
945 keys: queue (pending MUX jobs)
948 tasks (pending Task jobs)
946 tasks (pending Task jobs)
949 completed (finished jobs from both queues)"""
947 completed (finished jobs from both queues)"""
950 content = msg['content']
948 content = msg['content']
951 targets = content['targets']
949 targets = content['targets']
952 try:
950 try:
953 targets = self._validate_targets(targets)
951 targets = self._validate_targets(targets)
954 except:
952 except:
955 content = wrap_exception()
953 content = error.wrap_exception()
956 self.session.send(self.clientele, "hub_error",
954 self.session.send(self.clientele, "hub_error",
957 content=content, ident=client_id)
955 content=content, ident=client_id)
958 return
956 return
959 verbose = content.get('verbose', False)
957 verbose = content.get('verbose', False)
960 content = dict(status='ok')
958 content = dict(status='ok')
961 for t in targets:
959 for t in targets:
962 queue = self.queues[t]
960 queue = self.queues[t]
963 completed = self.completed[t]
961 completed = self.completed[t]
964 tasks = self.tasks[t]
962 tasks = self.tasks[t]
965 if not verbose:
963 if not verbose:
966 queue = len(queue)
964 queue = len(queue)
967 completed = len(completed)
965 completed = len(completed)
968 tasks = len(tasks)
966 tasks = len(tasks)
969 content[bytes(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
967 content[bytes(t)] = {'queue': queue, 'completed': completed , 'tasks': tasks}
970 # pending
968 # pending
971 self.session.send(self.clientele, "queue_reply", content=content, ident=client_id)
969 self.session.send(self.clientele, "queue_reply", content=content, ident=client_id)
972
970
973 def purge_results(self, client_id, msg):
971 def purge_results(self, client_id, msg):
974 """Purge results from memory. This method is more valuable before we move
972 """Purge results from memory. This method is more valuable before we move
975 to a DB based message storage mechanism."""
973 to a DB based message storage mechanism."""
976 content = msg['content']
974 content = msg['content']
977 msg_ids = content.get('msg_ids', [])
975 msg_ids = content.get('msg_ids', [])
978 reply = dict(status='ok')
976 reply = dict(status='ok')
979 if msg_ids == 'all':
977 if msg_ids == 'all':
980 self.db.drop_matching_records(dict(completed={'$ne':None}))
978 self.db.drop_matching_records(dict(completed={'$ne':None}))
981 else:
979 else:
982 for msg_id in msg_ids:
980 for msg_id in msg_ids:
983 if msg_id in self.all_completed:
981 if msg_id in self.all_completed:
984 self.db.drop_record(msg_id)
982 self.db.drop_record(msg_id)
985 else:
983 else:
986 if msg_id in self.pending:
984 if msg_id in self.pending:
987 try:
985 try:
988 raise IndexError("msg pending: %r"%msg_id)
986 raise IndexError("msg pending: %r"%msg_id)
989 except:
987 except:
990 reply = wrap_exception()
988 reply = error.wrap_exception()
991 else:
989 else:
992 try:
990 try:
993 raise IndexError("No such msg: %r"%msg_id)
991 raise IndexError("No such msg: %r"%msg_id)
994 except:
992 except:
995 reply = wrap_exception()
993 reply = error.wrap_exception()
996 break
994 break
997 eids = content.get('engine_ids', [])
995 eids = content.get('engine_ids', [])
998 for eid in eids:
996 for eid in eids:
999 if eid not in self.engines:
997 if eid not in self.engines:
1000 try:
998 try:
1001 raise IndexError("No such engine: %i"%eid)
999 raise IndexError("No such engine: %i"%eid)
1002 except:
1000 except:
1003 reply = wrap_exception()
1001 reply = error.wrap_exception()
1004 break
1002 break
1005 msg_ids = self.completed.pop(eid)
1003 msg_ids = self.completed.pop(eid)
1006 uid = self.engines[eid].queue
1004 uid = self.engines[eid].queue
1007 self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
1005 self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None}))
1008
1006
1009 self.session.send(self.clientele, 'purge_reply', content=reply, ident=client_id)
1007 self.session.send(self.clientele, 'purge_reply', content=reply, ident=client_id)
1010
1008
1011 def resubmit_task(self, client_id, msg, buffers):
1009 def resubmit_task(self, client_id, msg, buffers):
1012 """Resubmit a task."""
1010 """Resubmit a task."""
1013 raise NotImplementedError
1011 raise NotImplementedError
1014
1012
1015 def get_results(self, client_id, msg):
1013 def get_results(self, client_id, msg):
1016 """Get the result of 1 or more messages."""
1014 """Get the result of 1 or more messages."""
1017 content = msg['content']
1015 content = msg['content']
1018 msg_ids = sorted(set(content['msg_ids']))
1016 msg_ids = sorted(set(content['msg_ids']))
1019 statusonly = content.get('status_only', False)
1017 statusonly = content.get('status_only', False)
1020 pending = []
1018 pending = []
1021 completed = []
1019 completed = []
1022 content = dict(status='ok')
1020 content = dict(status='ok')
1023 content['pending'] = pending
1021 content['pending'] = pending
1024 content['completed'] = completed
1022 content['completed'] = completed
1025 buffers = []
1023 buffers = []
1026 if not statusonly:
1024 if not statusonly:
1027 content['results'] = {}
1025 content['results'] = {}
1028 records = self.db.find_records(dict(msg_id={'$in':msg_ids}))
1026 records = self.db.find_records(dict(msg_id={'$in':msg_ids}))
1029 for msg_id in msg_ids:
1027 for msg_id in msg_ids:
1030 if msg_id in self.pending:
1028 if msg_id in self.pending:
1031 pending.append(msg_id)
1029 pending.append(msg_id)
1032 elif msg_id in self.all_completed:
1030 elif msg_id in self.all_completed:
1033 completed.append(msg_id)
1031 completed.append(msg_id)
1034 if not statusonly:
1032 if not statusonly:
1035 rec = records[msg_id]
1033 rec = records[msg_id]
1036 io_dict = {}
1034 io_dict = {}
1037 for key in 'pyin pyout pyerr stdout stderr'.split():
1035 for key in 'pyin pyout pyerr stdout stderr'.split():
1038 io_dict[key] = rec[key]
1036 io_dict[key] = rec[key]
1039 content[msg_id] = { 'result_content': rec['result_content'],
1037 content[msg_id] = { 'result_content': rec['result_content'],
1040 'header': rec['header'],
1038 'header': rec['header'],
1041 'result_header' : rec['result_header'],
1039 'result_header' : rec['result_header'],
1042 'io' : io_dict,
1040 'io' : io_dict,
1043 }
1041 }
1044 buffers.extend(map(str, rec['result_buffers']))
1042 buffers.extend(map(str, rec['result_buffers']))
1045 else:
1043 else:
1046 try:
1044 try:
1047 raise KeyError('No such message: '+msg_id)
1045 raise KeyError('No such message: '+msg_id)
1048 except:
1046 except:
1049 content = wrap_exception()
1047 content = error.wrap_exception()
1050 break
1048 break
1051 self.session.send(self.clientele, "result_reply", content=content,
1049 self.session.send(self.clientele, "result_reply", content=content,
1052 parent=msg, ident=client_id,
1050 parent=msg, ident=client_id,
1053 buffers=buffers)
1051 buffers=buffers)
1054
1052
@@ -1,174 +1,203 b''
1 """Remote Functions and decorators for the client."""
1 """Remote Functions and decorators for the client."""
2 #-----------------------------------------------------------------------------
2 #-----------------------------------------------------------------------------
3 # Copyright (C) 2010 The IPython Development Team
3 # Copyright (C) 2010 The IPython Development Team
4 #
4 #
5 # Distributed under the terms of the BSD License. The full license is in
5 # Distributed under the terms of the BSD License. The full license is in
6 # the file COPYING, distributed as part of this software.
6 # the file COPYING, distributed as part of this software.
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8
8
9 #-----------------------------------------------------------------------------
9 #-----------------------------------------------------------------------------
10 # Imports
10 # Imports
11 #-----------------------------------------------------------------------------
11 #-----------------------------------------------------------------------------
12
12
13 import warnings
13 import warnings
14
14
15 from IPython.testing import decorators as testdec
15 from IPython.testing import decorators as testdec
16
16
17 from . import map as Map
17 from . import map as Map
18 from .asyncresult import AsyncMapResult
18 from .asyncresult import AsyncMapResult
19
19
20 #-----------------------------------------------------------------------------
20 #-----------------------------------------------------------------------------
21 # Decorators
21 # Decorators
22 #-----------------------------------------------------------------------------
22 #-----------------------------------------------------------------------------
23
23
24 @testdec.skip_doctest
24 @testdec.skip_doctest
25 def remote(client, bound=True, block=None, targets=None, balanced=None):
25 def remote(client, bound=True, block=None, targets=None, balanced=None):
26 """Turn a function into a remote function.
26 """Turn a function into a remote function.
27
27
28 This method can be used for map:
28 This method can be used for map:
29
29
30 In [1]: @remote(client,block=True)
30 In [1]: @remote(client,block=True)
31 ...: def func(a):
31 ...: def func(a):
32 ...: pass
32 ...: pass
33 """
33 """
34
34
35 def remote_function(f):
35 def remote_function(f):
36 return RemoteFunction(client, f, bound, block, targets, balanced)
36 return RemoteFunction(client, f, bound, block, targets, balanced)
37 return remote_function
37 return remote_function
38
38
39 @testdec.skip_doctest
39 @testdec.skip_doctest
40 def parallel(client, dist='b', bound=True, block=None, targets='all', balanced=None):
40 def parallel(client, dist='b', bound=True, block=None, targets='all', balanced=None):
41 """Turn a function into a parallel remote function.
41 """Turn a function into a parallel remote function.
42
42
43 This method can be used for map:
43 This method can be used for map:
44
44
45 In [1]: @parallel(client,block=True)
45 In [1]: @parallel(client,block=True)
46 ...: def func(a):
46 ...: def func(a):
47 ...: pass
47 ...: pass
48 """
48 """
49
49
50 def parallel_function(f):
50 def parallel_function(f):
51 return ParallelFunction(client, f, dist, bound, block, targets, balanced)
51 return ParallelFunction(client, f, dist, bound, block, targets, balanced)
52 return parallel_function
52 return parallel_function
53
53
54 #--------------------------------------------------------------------------
54 #--------------------------------------------------------------------------
55 # Classes
55 # Classes
56 #--------------------------------------------------------------------------
56 #--------------------------------------------------------------------------
57
57
58 class RemoteFunction(object):
58 class RemoteFunction(object):
59 """Turn an existing function into a remote function.
59 """Turn an existing function into a remote function.
60
60
61 Parameters
61 Parameters
62 ----------
62 ----------
63
63
64 client : Client instance
64 client : Client instance
65 The client to be used to connect to engines
65 The client to be used to connect to engines
66 f : callable
66 f : callable
67 The function to be wrapped into a remote function
67 The function to be wrapped into a remote function
68 bound : bool [default: False]
68 bound : bool [default: False]
69 Whether the affect the remote namespace when called
69 Whether the affect the remote namespace when called
70 block : bool [default: None]
70 block : bool [default: None]
71 Whether to wait for results or not. The default behavior is
71 Whether to wait for results or not. The default behavior is
72 to use the current `block` attribute of `client`
72 to use the current `block` attribute of `client`
73 targets : valid target list [default: all]
73 targets : valid target list [default: all]
74 The targets on which to execute.
74 The targets on which to execute.
75 balanced : bool
75 balanced : bool
76 Whether to load-balance with the Task scheduler or not
76 Whether to load-balance with the Task scheduler or not
77 """
77 """
78
78
79 client = None # the remote connection
79 client = None # the remote connection
80 func = None # the wrapped function
80 func = None # the wrapped function
81 block = None # whether to block
81 block = None # whether to block
82 bound = None # whether to affect the namespace
82 bound = None # whether to affect the namespace
83 targets = None # where to execute
83 targets = None # where to execute
84 balanced = None # whether to load-balance
84 balanced = None # whether to load-balance
85
85
86 def __init__(self, client, f, bound=False, block=None, targets=None, balanced=None):
86 def __init__(self, client, f, bound=False, block=None, targets=None, balanced=None):
87 self.client = client
87 self.client = client
88 self.func = f
88 self.func = f
89 self.block=block
89 self.block=block
90 self.bound=bound
90 self.bound=bound
91 self.targets=targets
91 self.targets=targets
92 if balanced is None:
92 if balanced is None:
93 if targets is None:
93 if targets is None:
94 balanced = True
94 balanced = True
95 else:
95 else:
96 balanced = False
96 balanced = False
97 self.balanced = balanced
97 self.balanced = balanced
98
98
99 def __call__(self, *args, **kwargs):
99 def __call__(self, *args, **kwargs):
100 return self.client.apply(self.func, args=args, kwargs=kwargs,
100 return self.client.apply(self.func, args=args, kwargs=kwargs,
101 block=self.block, targets=self.targets, bound=self.bound, balanced=self.balanced)
101 block=self.block, targets=self.targets, bound=self.bound, balanced=self.balanced)
102
102
103
103
104 class ParallelFunction(RemoteFunction):
104 class ParallelFunction(RemoteFunction):
105 """Class for mapping a function to sequences."""
105 """Class for mapping a function to sequences.
106
107 This will distribute the sequences according the a mapper, and call
108 the function on each sub-sequence. If called via map, then the function
109 will be called once on each element, rather that each sub-sequence.
110
111 Parameters
112 ----------
113
114 client : Client instance
115 The client to be used to connect to engines
116 f : callable
117 The function to be wrapped into a remote function
118 bound : bool [default: False]
119 Whether the affect the remote namespace when called
120 block : bool [default: None]
121 Whether to wait for results or not. The default behavior is
122 to use the current `block` attribute of `client`
123 targets : valid target list [default: all]
124 The targets on which to execute.
125 balanced : bool
126 Whether to load-balance with the Task scheduler or not
127 chunk_size : int or None
128 The size of chunk to use when breaking up sequences in a load-balanced manner
129 """
106 def __init__(self, client, f, dist='b', bound=False, block=None, targets='all', balanced=None, chunk_size=None):
130 def __init__(self, client, f, dist='b', bound=False, block=None, targets='all', balanced=None, chunk_size=None):
107 super(ParallelFunction, self).__init__(client,f,bound,block,targets,balanced)
131 super(ParallelFunction, self).__init__(client,f,bound,block,targets,balanced)
108 self.chunk_size = chunk_size
132 self.chunk_size = chunk_size
109
133
110 mapClass = Map.dists[dist]
134 mapClass = Map.dists[dist]
111 self.mapObject = mapClass()
135 self.mapObject = mapClass()
112
136
113 def __call__(self, *sequences):
137 def __call__(self, *sequences):
114 len_0 = len(sequences[0])
138 len_0 = len(sequences[0])
115 for s in sequences:
139 for s in sequences:
116 if len(s)!=len_0:
140 if len(s)!=len_0:
117 msg = 'all sequences must have equal length, but %i!=%i'%(len_0,len(s))
141 msg = 'all sequences must have equal length, but %i!=%i'%(len_0,len(s))
118 raise ValueError(msg)
142 raise ValueError(msg)
119
143
120 if self.balanced:
144 if self.balanced:
121 if self.chunk_size:
145 if self.chunk_size:
122 nparts = len_0/self.chunk_size + int(len_0%self.chunk_size > 0)
146 nparts = len_0/self.chunk_size + int(len_0%self.chunk_size > 0)
123 else:
147 else:
124 nparts = len_0
148 nparts = len_0
125 targets = [self.targets]*nparts
149 targets = [self.targets]*nparts
126 else:
150 else:
127 if self.chunk_size:
151 if self.chunk_size:
128 warnings.warn("`chunk_size` is ignored when `balanced=False", UserWarning)
152 warnings.warn("`chunk_size` is ignored when `balanced=False", UserWarning)
129 # multiplexed:
153 # multiplexed:
130 targets = self.client._build_targets(self.targets)[-1]
154 targets = self.client._build_targets(self.targets)[-1]
131 nparts = len(targets)
155 nparts = len(targets)
132
156
133 msg_ids = []
157 msg_ids = []
134 # my_f = lambda *a: map(self.func, *a)
158 # my_f = lambda *a: map(self.func, *a)
135 for index, t in enumerate(targets):
159 for index, t in enumerate(targets):
136 args = []
160 args = []
137 for seq in sequences:
161 for seq in sequences:
138 part = self.mapObject.getPartition(seq, index, nparts)
162 part = self.mapObject.getPartition(seq, index, nparts)
139 if len(part) == 0:
163 if len(part) == 0:
140 continue
164 continue
141 else:
165 else:
142 args.append(part)
166 args.append(part)
143 if not args:
167 if not args:
144 continue
168 continue
145
169
146 # print (args)
170 # print (args)
147 if hasattr(self, '_map'):
171 if hasattr(self, '_map'):
148 f = map
172 f = map
149 args = [self.func]+args
173 args = [self.func]+args
150 else:
174 else:
151 f=self.func
175 f=self.func
152 ar = self.client.apply(f, args=args, block=False, bound=self.bound,
176 ar = self.client.apply(f, args=args, block=False, bound=self.bound,
153 targets=t, balanced=self.balanced)
177 targets=t, balanced=self.balanced)
154
178
155 msg_ids.append(ar.msg_ids[0])
179 msg_ids.append(ar.msg_ids[0])
156
180
157 r = AsyncMapResult(self.client, msg_ids, self.mapObject, fname=self.func.__name__)
181 r = AsyncMapResult(self.client, msg_ids, self.mapObject, fname=self.func.__name__)
158 if self.block:
182 if self.block:
159 try:
183 try:
160 return r.get()
184 return r.get()
161 except KeyboardInterrupt:
185 except KeyboardInterrupt:
162 return r
186 return r
163 else:
187 else:
164 return r
188 return r
165
189
166 def map(self, *sequences):
190 def map(self, *sequences):
167 """call a function on each element of a sequence remotely."""
191 """call a function on each element of a sequence remotely.
192 This should behave very much like the builtin map, but return an AsyncMapResult
193 if self.block is False.
194 """
195 # set _map as a flag for use inside self.__call__
168 self._map = True
196 self._map = True
169 try:
197 try:
170 ret = self.__call__(*sequences)
198 ret = self.__call__(*sequences)
171 finally:
199 finally:
172 del self._map
200 del self._map
173 return ret
201 return ret
174
202
203 __all__ = ['remote', 'parallel', 'RemoteFunction', 'ParallelFunction'] No newline at end of file
@@ -1,581 +1,580 b''
1 """The Python scheduler for rich scheduling.
1 """The Python scheduler for rich scheduling.
2
2
3 The Pure ZMQ scheduler does not allow routing schemes other than LRU,
3 The Pure ZMQ scheduler does not allow routing schemes other than LRU,
4 nor does it check msg_id DAG dependencies. For those, a slightly slower
4 nor does it check msg_id DAG dependencies. For those, a slightly slower
5 Python Scheduler exists.
5 Python Scheduler exists.
6 """
6 """
7
7
8 #----------------------------------------------------------------------
8 #----------------------------------------------------------------------
9 # Imports
9 # Imports
10 #----------------------------------------------------------------------
10 #----------------------------------------------------------------------
11
11
12 from __future__ import print_function
12 from __future__ import print_function
13
13
14 import logging
14 import logging
15 import sys
15 import sys
16
16
17 from datetime import datetime, timedelta
17 from datetime import datetime, timedelta
18 from random import randint, random
18 from random import randint, random
19 from types import FunctionType
19 from types import FunctionType
20
20
21 try:
21 try:
22 import numpy
22 import numpy
23 except ImportError:
23 except ImportError:
24 numpy = None
24 numpy = None
25
25
26 import zmq
26 import zmq
27 from zmq.eventloop import ioloop, zmqstream
27 from zmq.eventloop import ioloop, zmqstream
28
28
29 # local imports
29 # local imports
30 from IPython.external.decorator import decorator
30 from IPython.external.decorator import decorator
31 from IPython.utils.traitlets import Instance, Dict, List, Set
31 from IPython.utils.traitlets import Instance, Dict, List, Set
32
32
33 from . import error
33 from . import error
34 from . import streamsession as ss
35 from .dependency import Dependency
34 from .dependency import Dependency
36 from .entry_point import connect_logger, local_logger
35 from .entry_point import connect_logger, local_logger
37 from .factory import SessionFactory
36 from .factory import SessionFactory
38
37
39
38
40 @decorator
39 @decorator
41 def logged(f,self,*args,**kwargs):
40 def logged(f,self,*args,**kwargs):
42 # print ("#--------------------")
41 # print ("#--------------------")
43 self.log.debug("scheduler::%s(*%s,**%s)"%(f.func_name, args, kwargs))
42 self.log.debug("scheduler::%s(*%s,**%s)"%(f.func_name, args, kwargs))
44 # print ("#--")
43 # print ("#--")
45 return f(self,*args, **kwargs)
44 return f(self,*args, **kwargs)
46
45
47 #----------------------------------------------------------------------
46 #----------------------------------------------------------------------
48 # Chooser functions
47 # Chooser functions
49 #----------------------------------------------------------------------
48 #----------------------------------------------------------------------
50
49
51 def plainrandom(loads):
50 def plainrandom(loads):
52 """Plain random pick."""
51 """Plain random pick."""
53 n = len(loads)
52 n = len(loads)
54 return randint(0,n-1)
53 return randint(0,n-1)
55
54
56 def lru(loads):
55 def lru(loads):
57 """Always pick the front of the line.
56 """Always pick the front of the line.
58
57
59 The content of `loads` is ignored.
58 The content of `loads` is ignored.
60
59
61 Assumes LRU ordering of loads, with oldest first.
60 Assumes LRU ordering of loads, with oldest first.
62 """
61 """
63 return 0
62 return 0
64
63
65 def twobin(loads):
64 def twobin(loads):
66 """Pick two at random, use the LRU of the two.
65 """Pick two at random, use the LRU of the two.
67
66
68 The content of loads is ignored.
67 The content of loads is ignored.
69
68
70 Assumes LRU ordering of loads, with oldest first.
69 Assumes LRU ordering of loads, with oldest first.
71 """
70 """
72 n = len(loads)
71 n = len(loads)
73 a = randint(0,n-1)
72 a = randint(0,n-1)
74 b = randint(0,n-1)
73 b = randint(0,n-1)
75 return min(a,b)
74 return min(a,b)
76
75
77 def weighted(loads):
76 def weighted(loads):
78 """Pick two at random using inverse load as weight.
77 """Pick two at random using inverse load as weight.
79
78
80 Return the less loaded of the two.
79 Return the less loaded of the two.
81 """
80 """
82 # weight 0 a million times more than 1:
81 # weight 0 a million times more than 1:
83 weights = 1./(1e-6+numpy.array(loads))
82 weights = 1./(1e-6+numpy.array(loads))
84 sums = weights.cumsum()
83 sums = weights.cumsum()
85 t = sums[-1]
84 t = sums[-1]
86 x = random()*t
85 x = random()*t
87 y = random()*t
86 y = random()*t
88 idx = 0
87 idx = 0
89 idy = 0
88 idy = 0
90 while sums[idx] < x:
89 while sums[idx] < x:
91 idx += 1
90 idx += 1
92 while sums[idy] < y:
91 while sums[idy] < y:
93 idy += 1
92 idy += 1
94 if weights[idy] > weights[idx]:
93 if weights[idy] > weights[idx]:
95 return idy
94 return idy
96 else:
95 else:
97 return idx
96 return idx
98
97
99 def leastload(loads):
98 def leastload(loads):
100 """Always choose the lowest load.
99 """Always choose the lowest load.
101
100
102 If the lowest load occurs more than once, the first
101 If the lowest load occurs more than once, the first
103 occurance will be used. If loads has LRU ordering, this means
102 occurance will be used. If loads has LRU ordering, this means
104 the LRU of those with the lowest load is chosen.
103 the LRU of those with the lowest load is chosen.
105 """
104 """
106 return loads.index(min(loads))
105 return loads.index(min(loads))
107
106
108 #---------------------------------------------------------------------
107 #---------------------------------------------------------------------
109 # Classes
108 # Classes
110 #---------------------------------------------------------------------
109 #---------------------------------------------------------------------
111 # store empty default dependency:
110 # store empty default dependency:
112 MET = Dependency([])
111 MET = Dependency([])
113
112
114 class TaskScheduler(SessionFactory):
113 class TaskScheduler(SessionFactory):
115 """Python TaskScheduler object.
114 """Python TaskScheduler object.
116
115
117 This is the simplest object that supports msg_id based
116 This is the simplest object that supports msg_id based
118 DAG dependencies. *Only* task msg_ids are checked, not
117 DAG dependencies. *Only* task msg_ids are checked, not
119 msg_ids of jobs submitted via the MUX queue.
118 msg_ids of jobs submitted via the MUX queue.
120
119
121 """
120 """
122
121
123 # input arguments:
122 # input arguments:
124 scheme = Instance(FunctionType, default=leastload) # function for determining the destination
123 scheme = Instance(FunctionType, default=leastload) # function for determining the destination
125 client_stream = Instance(zmqstream.ZMQStream) # client-facing stream
124 client_stream = Instance(zmqstream.ZMQStream) # client-facing stream
126 engine_stream = Instance(zmqstream.ZMQStream) # engine-facing stream
125 engine_stream = Instance(zmqstream.ZMQStream) # engine-facing stream
127 notifier_stream = Instance(zmqstream.ZMQStream) # hub-facing sub stream
126 notifier_stream = Instance(zmqstream.ZMQStream) # hub-facing sub stream
128 mon_stream = Instance(zmqstream.ZMQStream) # hub-facing pub stream
127 mon_stream = Instance(zmqstream.ZMQStream) # hub-facing pub stream
129
128
130 # internals:
129 # internals:
131 graph = Dict() # dict by msg_id of [ msg_ids that depend on key ]
130 graph = Dict() # dict by msg_id of [ msg_ids that depend on key ]
132 depending = Dict() # dict by msg_id of (msg_id, raw_msg, after, follow)
131 depending = Dict() # dict by msg_id of (msg_id, raw_msg, after, follow)
133 pending = Dict() # dict by engine_uuid of submitted tasks
132 pending = Dict() # dict by engine_uuid of submitted tasks
134 completed = Dict() # dict by engine_uuid of completed tasks
133 completed = Dict() # dict by engine_uuid of completed tasks
135 failed = Dict() # dict by engine_uuid of failed tasks
134 failed = Dict() # dict by engine_uuid of failed tasks
136 destinations = Dict() # dict by msg_id of engine_uuids where jobs ran (reverse of completed+failed)
135 destinations = Dict() # dict by msg_id of engine_uuids where jobs ran (reverse of completed+failed)
137 clients = Dict() # dict by msg_id for who submitted the task
136 clients = Dict() # dict by msg_id for who submitted the task
138 targets = List() # list of target IDENTs
137 targets = List() # list of target IDENTs
139 loads = List() # list of engine loads
138 loads = List() # list of engine loads
140 all_completed = Set() # set of all completed tasks
139 all_completed = Set() # set of all completed tasks
141 all_failed = Set() # set of all failed tasks
140 all_failed = Set() # set of all failed tasks
142 all_done = Set() # set of all finished tasks=union(completed,failed)
141 all_done = Set() # set of all finished tasks=union(completed,failed)
143 all_ids = Set() # set of all submitted task IDs
142 all_ids = Set() # set of all submitted task IDs
144 blacklist = Dict() # dict by msg_id of locations where a job has encountered UnmetDependency
143 blacklist = Dict() # dict by msg_id of locations where a job has encountered UnmetDependency
145 auditor = Instance('zmq.eventloop.ioloop.PeriodicCallback')
144 auditor = Instance('zmq.eventloop.ioloop.PeriodicCallback')
146
145
147
146
148 def start(self):
147 def start(self):
149 self.engine_stream.on_recv(self.dispatch_result, copy=False)
148 self.engine_stream.on_recv(self.dispatch_result, copy=False)
150 self._notification_handlers = dict(
149 self._notification_handlers = dict(
151 registration_notification = self._register_engine,
150 registration_notification = self._register_engine,
152 unregistration_notification = self._unregister_engine
151 unregistration_notification = self._unregister_engine
153 )
152 )
154 self.notifier_stream.on_recv(self.dispatch_notification)
153 self.notifier_stream.on_recv(self.dispatch_notification)
155 self.auditor = ioloop.PeriodicCallback(self.audit_timeouts, 2e3, self.loop) # 1 Hz
154 self.auditor = ioloop.PeriodicCallback(self.audit_timeouts, 2e3, self.loop) # 1 Hz
156 self.auditor.start()
155 self.auditor.start()
157 self.log.info("Scheduler started...%r"%self)
156 self.log.info("Scheduler started...%r"%self)
158
157
159 def resume_receiving(self):
158 def resume_receiving(self):
160 """Resume accepting jobs."""
159 """Resume accepting jobs."""
161 self.client_stream.on_recv(self.dispatch_submission, copy=False)
160 self.client_stream.on_recv(self.dispatch_submission, copy=False)
162
161
163 def stop_receiving(self):
162 def stop_receiving(self):
164 """Stop accepting jobs while there are no engines.
163 """Stop accepting jobs while there are no engines.
165 Leave them in the ZMQ queue."""
164 Leave them in the ZMQ queue."""
166 self.client_stream.on_recv(None)
165 self.client_stream.on_recv(None)
167
166
168 #-----------------------------------------------------------------------
167 #-----------------------------------------------------------------------
169 # [Un]Registration Handling
168 # [Un]Registration Handling
170 #-----------------------------------------------------------------------
169 #-----------------------------------------------------------------------
171
170
172 def dispatch_notification(self, msg):
171 def dispatch_notification(self, msg):
173 """dispatch register/unregister events."""
172 """dispatch register/unregister events."""
174 idents,msg = self.session.feed_identities(msg)
173 idents,msg = self.session.feed_identities(msg)
175 msg = self.session.unpack_message(msg)
174 msg = self.session.unpack_message(msg)
176 msg_type = msg['msg_type']
175 msg_type = msg['msg_type']
177 handler = self._notification_handlers.get(msg_type, None)
176 handler = self._notification_handlers.get(msg_type, None)
178 if handler is None:
177 if handler is None:
179 raise Exception("Unhandled message type: %s"%msg_type)
178 raise Exception("Unhandled message type: %s"%msg_type)
180 else:
179 else:
181 try:
180 try:
182 handler(str(msg['content']['queue']))
181 handler(str(msg['content']['queue']))
183 except KeyError:
182 except KeyError:
184 self.log.error("task::Invalid notification msg: %s"%msg)
183 self.log.error("task::Invalid notification msg: %s"%msg)
185
184
186 @logged
185 @logged
187 def _register_engine(self, uid):
186 def _register_engine(self, uid):
188 """New engine with ident `uid` became available."""
187 """New engine with ident `uid` became available."""
189 # head of the line:
188 # head of the line:
190 self.targets.insert(0,uid)
189 self.targets.insert(0,uid)
191 self.loads.insert(0,0)
190 self.loads.insert(0,0)
192 # initialize sets
191 # initialize sets
193 self.completed[uid] = set()
192 self.completed[uid] = set()
194 self.failed[uid] = set()
193 self.failed[uid] = set()
195 self.pending[uid] = {}
194 self.pending[uid] = {}
196 if len(self.targets) == 1:
195 if len(self.targets) == 1:
197 self.resume_receiving()
196 self.resume_receiving()
198
197
199 def _unregister_engine(self, uid):
198 def _unregister_engine(self, uid):
200 """Existing engine with ident `uid` became unavailable."""
199 """Existing engine with ident `uid` became unavailable."""
201 if len(self.targets) == 1:
200 if len(self.targets) == 1:
202 # this was our only engine
201 # this was our only engine
203 self.stop_receiving()
202 self.stop_receiving()
204
203
205 # handle any potentially finished tasks:
204 # handle any potentially finished tasks:
206 self.engine_stream.flush()
205 self.engine_stream.flush()
207
206
208 self.completed.pop(uid)
207 self.completed.pop(uid)
209 self.failed.pop(uid)
208 self.failed.pop(uid)
210 # don't pop destinations, because it might be used later
209 # don't pop destinations, because it might be used later
211 # map(self.destinations.pop, self.completed.pop(uid))
210 # map(self.destinations.pop, self.completed.pop(uid))
212 # map(self.destinations.pop, self.failed.pop(uid))
211 # map(self.destinations.pop, self.failed.pop(uid))
213
212
214 idx = self.targets.index(uid)
213 idx = self.targets.index(uid)
215 self.targets.pop(idx)
214 self.targets.pop(idx)
216 self.loads.pop(idx)
215 self.loads.pop(idx)
217
216
218 # wait 5 seconds before cleaning up pending jobs, since the results might
217 # wait 5 seconds before cleaning up pending jobs, since the results might
219 # still be incoming
218 # still be incoming
220 if self.pending[uid]:
219 if self.pending[uid]:
221 dc = ioloop.DelayedCallback(lambda : self.handle_stranded_tasks(uid), 5000, self.loop)
220 dc = ioloop.DelayedCallback(lambda : self.handle_stranded_tasks(uid), 5000, self.loop)
222 dc.start()
221 dc.start()
223
222
224 @logged
223 @logged
225 def handle_stranded_tasks(self, engine):
224 def handle_stranded_tasks(self, engine):
226 """Deal with jobs resident in an engine that died."""
225 """Deal with jobs resident in an engine that died."""
227 lost = self.pending.pop(engine)
226 lost = self.pending.pop(engine)
228
227
229 for msg_id, (raw_msg, targets, MET, follow, timeout) in lost.iteritems():
228 for msg_id, (raw_msg, targets, MET, follow, timeout) in lost.iteritems():
230 self.all_failed.add(msg_id)
229 self.all_failed.add(msg_id)
231 self.all_done.add(msg_id)
230 self.all_done.add(msg_id)
232 idents,msg = self.session.feed_identities(raw_msg, copy=False)
231 idents,msg = self.session.feed_identities(raw_msg, copy=False)
233 msg = self.session.unpack_message(msg, copy=False, content=False)
232 msg = self.session.unpack_message(msg, copy=False, content=False)
234 parent = msg['header']
233 parent = msg['header']
235 idents = [idents[0],engine]+idents[1:]
234 idents = [idents[0],engine]+idents[1:]
236 print (idents)
235 print (idents)
237 try:
236 try:
238 raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
237 raise error.EngineError("Engine %r died while running task %r"%(engine, msg_id))
239 except:
238 except:
240 content = ss.wrap_exception()
239 content = error.wrap_exception()
241 msg = self.session.send(self.client_stream, 'apply_reply', content,
240 msg = self.session.send(self.client_stream, 'apply_reply', content,
242 parent=parent, ident=idents)
241 parent=parent, ident=idents)
243 self.session.send(self.mon_stream, msg, ident=['outtask']+idents)
242 self.session.send(self.mon_stream, msg, ident=['outtask']+idents)
244 self.update_graph(msg_id)
243 self.update_graph(msg_id)
245
244
246
245
247 #-----------------------------------------------------------------------
246 #-----------------------------------------------------------------------
248 # Job Submission
247 # Job Submission
249 #-----------------------------------------------------------------------
248 #-----------------------------------------------------------------------
250 @logged
249 @logged
251 def dispatch_submission(self, raw_msg):
250 def dispatch_submission(self, raw_msg):
252 """Dispatch job submission to appropriate handlers."""
251 """Dispatch job submission to appropriate handlers."""
253 # ensure targets up to date:
252 # ensure targets up to date:
254 self.notifier_stream.flush()
253 self.notifier_stream.flush()
255 try:
254 try:
256 idents, msg = self.session.feed_identities(raw_msg, copy=False)
255 idents, msg = self.session.feed_identities(raw_msg, copy=False)
257 msg = self.session.unpack_message(msg, content=False, copy=False)
256 msg = self.session.unpack_message(msg, content=False, copy=False)
258 except:
257 except:
259 self.log.error("task::Invaid task: %s"%raw_msg, exc_info=True)
258 self.log.error("task::Invaid task: %s"%raw_msg, exc_info=True)
260 return
259 return
261
260
262 # send to monitor
261 # send to monitor
263 self.mon_stream.send_multipart(['intask']+raw_msg, copy=False)
262 self.mon_stream.send_multipart(['intask']+raw_msg, copy=False)
264
263
265 header = msg['header']
264 header = msg['header']
266 msg_id = header['msg_id']
265 msg_id = header['msg_id']
267 self.all_ids.add(msg_id)
266 self.all_ids.add(msg_id)
268
267
269 # targets
268 # targets
270 targets = set(header.get('targets', []))
269 targets = set(header.get('targets', []))
271
270
272 # time dependencies
271 # time dependencies
273 after = Dependency(header.get('after', []))
272 after = Dependency(header.get('after', []))
274 if after.all:
273 if after.all:
275 after.difference_update(self.all_completed)
274 after.difference_update(self.all_completed)
276 if not after.success_only:
275 if not after.success_only:
277 after.difference_update(self.all_failed)
276 after.difference_update(self.all_failed)
278 if after.check(self.all_completed, self.all_failed):
277 if after.check(self.all_completed, self.all_failed):
279 # recast as empty set, if `after` already met,
278 # recast as empty set, if `after` already met,
280 # to prevent unnecessary set comparisons
279 # to prevent unnecessary set comparisons
281 after = MET
280 after = MET
282
281
283 # location dependencies
282 # location dependencies
284 follow = Dependency(header.get('follow', []))
283 follow = Dependency(header.get('follow', []))
285
284
286 # turn timeouts into datetime objects:
285 # turn timeouts into datetime objects:
287 timeout = header.get('timeout', None)
286 timeout = header.get('timeout', None)
288 if timeout:
287 if timeout:
289 timeout = datetime.now() + timedelta(0,timeout,0)
288 timeout = datetime.now() + timedelta(0,timeout,0)
290
289
291 args = [raw_msg, targets, after, follow, timeout]
290 args = [raw_msg, targets, after, follow, timeout]
292
291
293 # validate and reduce dependencies:
292 # validate and reduce dependencies:
294 for dep in after,follow:
293 for dep in after,follow:
295 # check valid:
294 # check valid:
296 if msg_id in dep or dep.difference(self.all_ids):
295 if msg_id in dep or dep.difference(self.all_ids):
297 self.depending[msg_id] = args
296 self.depending[msg_id] = args
298 return self.fail_unreachable(msg_id, error.InvalidDependency)
297 return self.fail_unreachable(msg_id, error.InvalidDependency)
299 # check if unreachable:
298 # check if unreachable:
300 if dep.unreachable(self.all_failed):
299 if dep.unreachable(self.all_failed):
301 self.depending[msg_id] = args
300 self.depending[msg_id] = args
302 return self.fail_unreachable(msg_id)
301 return self.fail_unreachable(msg_id)
303
302
304 if after.check(self.all_completed, self.all_failed):
303 if after.check(self.all_completed, self.all_failed):
305 # time deps already met, try to run
304 # time deps already met, try to run
306 if not self.maybe_run(msg_id, *args):
305 if not self.maybe_run(msg_id, *args):
307 # can't run yet
306 # can't run yet
308 self.save_unmet(msg_id, *args)
307 self.save_unmet(msg_id, *args)
309 else:
308 else:
310 self.save_unmet(msg_id, *args)
309 self.save_unmet(msg_id, *args)
311
310
312 # @logged
311 # @logged
313 def audit_timeouts(self):
312 def audit_timeouts(self):
314 """Audit all waiting tasks for expired timeouts."""
313 """Audit all waiting tasks for expired timeouts."""
315 now = datetime.now()
314 now = datetime.now()
316 for msg_id in self.depending.keys():
315 for msg_id in self.depending.keys():
317 # must recheck, in case one failure cascaded to another:
316 # must recheck, in case one failure cascaded to another:
318 if msg_id in self.depending:
317 if msg_id in self.depending:
319 raw,after,targets,follow,timeout = self.depending[msg_id]
318 raw,after,targets,follow,timeout = self.depending[msg_id]
320 if timeout and timeout < now:
319 if timeout and timeout < now:
321 self.fail_unreachable(msg_id, timeout=True)
320 self.fail_unreachable(msg_id, timeout=True)
322
321
323 @logged
322 @logged
324 def fail_unreachable(self, msg_id, why=error.ImpossibleDependency):
323 def fail_unreachable(self, msg_id, why=error.ImpossibleDependency):
325 """a task has become unreachable, send a reply with an ImpossibleDependency
324 """a task has become unreachable, send a reply with an ImpossibleDependency
326 error."""
325 error."""
327 if msg_id not in self.depending:
326 if msg_id not in self.depending:
328 self.log.error("msg %r already failed!"%msg_id)
327 self.log.error("msg %r already failed!"%msg_id)
329 return
328 return
330 raw_msg,targets,after,follow,timeout = self.depending.pop(msg_id)
329 raw_msg,targets,after,follow,timeout = self.depending.pop(msg_id)
331 for mid in follow.union(after):
330 for mid in follow.union(after):
332 if mid in self.graph:
331 if mid in self.graph:
333 self.graph[mid].remove(msg_id)
332 self.graph[mid].remove(msg_id)
334
333
335 # FIXME: unpacking a message I've already unpacked, but didn't save:
334 # FIXME: unpacking a message I've already unpacked, but didn't save:
336 idents,msg = self.session.feed_identities(raw_msg, copy=False)
335 idents,msg = self.session.feed_identities(raw_msg, copy=False)
337 msg = self.session.unpack_message(msg, copy=False, content=False)
336 msg = self.session.unpack_message(msg, copy=False, content=False)
338 header = msg['header']
337 header = msg['header']
339
338
340 try:
339 try:
341 raise why()
340 raise why()
342 except:
341 except:
343 content = ss.wrap_exception()
342 content = error.wrap_exception()
344
343
345 self.all_done.add(msg_id)
344 self.all_done.add(msg_id)
346 self.all_failed.add(msg_id)
345 self.all_failed.add(msg_id)
347
346
348 msg = self.session.send(self.client_stream, 'apply_reply', content,
347 msg = self.session.send(self.client_stream, 'apply_reply', content,
349 parent=header, ident=idents)
348 parent=header, ident=idents)
350 self.session.send(self.mon_stream, msg, ident=['outtask']+idents)
349 self.session.send(self.mon_stream, msg, ident=['outtask']+idents)
351
350
352 self.update_graph(msg_id, success=False)
351 self.update_graph(msg_id, success=False)
353
352
354 @logged
353 @logged
355 def maybe_run(self, msg_id, raw_msg, targets, after, follow, timeout):
354 def maybe_run(self, msg_id, raw_msg, targets, after, follow, timeout):
356 """check location dependencies, and run if they are met."""
355 """check location dependencies, and run if they are met."""
357 blacklist = self.blacklist.setdefault(msg_id, set())
356 blacklist = self.blacklist.setdefault(msg_id, set())
358 if follow or targets or blacklist:
357 if follow or targets or blacklist:
359 # we need a can_run filter
358 # we need a can_run filter
360 def can_run(idx):
359 def can_run(idx):
361 target = self.targets[idx]
360 target = self.targets[idx]
362 # check targets
361 # check targets
363 if targets and target not in targets:
362 if targets and target not in targets:
364 return False
363 return False
365 # check blacklist
364 # check blacklist
366 if target in blacklist:
365 if target in blacklist:
367 return False
366 return False
368 # check follow
367 # check follow
369 return follow.check(self.completed[target], self.failed[target])
368 return follow.check(self.completed[target], self.failed[target])
370
369
371 indices = filter(can_run, range(len(self.targets)))
370 indices = filter(can_run, range(len(self.targets)))
372 if not indices:
371 if not indices:
373 # couldn't run
372 # couldn't run
374 if follow.all:
373 if follow.all:
375 # check follow for impossibility
374 # check follow for impossibility
376 dests = set()
375 dests = set()
377 relevant = self.all_completed if follow.success_only else self.all_done
376 relevant = self.all_completed if follow.success_only else self.all_done
378 for m in follow.intersection(relevant):
377 for m in follow.intersection(relevant):
379 dests.add(self.destinations[m])
378 dests.add(self.destinations[m])
380 if len(dests) > 1:
379 if len(dests) > 1:
381 self.fail_unreachable(msg_id)
380 self.fail_unreachable(msg_id)
382 return False
381 return False
383 if targets:
382 if targets:
384 # check blacklist+targets for impossibility
383 # check blacklist+targets for impossibility
385 targets.difference_update(blacklist)
384 targets.difference_update(blacklist)
386 if not targets or not targets.intersection(self.targets):
385 if not targets or not targets.intersection(self.targets):
387 self.fail_unreachable(msg_id)
386 self.fail_unreachable(msg_id)
388 return False
387 return False
389 return False
388 return False
390 else:
389 else:
391 indices = None
390 indices = None
392
391
393 self.submit_task(msg_id, raw_msg, targets, follow, timeout, indices)
392 self.submit_task(msg_id, raw_msg, targets, follow, timeout, indices)
394 return True
393 return True
395
394
396 @logged
395 @logged
397 def save_unmet(self, msg_id, raw_msg, targets, after, follow, timeout):
396 def save_unmet(self, msg_id, raw_msg, targets, after, follow, timeout):
398 """Save a message for later submission when its dependencies are met."""
397 """Save a message for later submission when its dependencies are met."""
399 self.depending[msg_id] = [raw_msg,targets,after,follow,timeout]
398 self.depending[msg_id] = [raw_msg,targets,after,follow,timeout]
400 # track the ids in follow or after, but not those already finished
399 # track the ids in follow or after, but not those already finished
401 for dep_id in after.union(follow).difference(self.all_done):
400 for dep_id in after.union(follow).difference(self.all_done):
402 if dep_id not in self.graph:
401 if dep_id not in self.graph:
403 self.graph[dep_id] = set()
402 self.graph[dep_id] = set()
404 self.graph[dep_id].add(msg_id)
403 self.graph[dep_id].add(msg_id)
405
404
406 @logged
405 @logged
407 def submit_task(self, msg_id, raw_msg, targets, follow, timeout, indices=None):
406 def submit_task(self, msg_id, raw_msg, targets, follow, timeout, indices=None):
408 """Submit a task to any of a subset of our targets."""
407 """Submit a task to any of a subset of our targets."""
409 if indices:
408 if indices:
410 loads = [self.loads[i] for i in indices]
409 loads = [self.loads[i] for i in indices]
411 else:
410 else:
412 loads = self.loads
411 loads = self.loads
413 idx = self.scheme(loads)
412 idx = self.scheme(loads)
414 if indices:
413 if indices:
415 idx = indices[idx]
414 idx = indices[idx]
416 target = self.targets[idx]
415 target = self.targets[idx]
417 # print (target, map(str, msg[:3]))
416 # print (target, map(str, msg[:3]))
418 self.engine_stream.send(target, flags=zmq.SNDMORE, copy=False)
417 self.engine_stream.send(target, flags=zmq.SNDMORE, copy=False)
419 self.engine_stream.send_multipart(raw_msg, copy=False)
418 self.engine_stream.send_multipart(raw_msg, copy=False)
420 self.add_job(idx)
419 self.add_job(idx)
421 self.pending[target][msg_id] = (raw_msg, targets, MET, follow, timeout)
420 self.pending[target][msg_id] = (raw_msg, targets, MET, follow, timeout)
422 content = dict(msg_id=msg_id, engine_id=target)
421 content = dict(msg_id=msg_id, engine_id=target)
423 self.session.send(self.mon_stream, 'task_destination', content=content,
422 self.session.send(self.mon_stream, 'task_destination', content=content,
424 ident=['tracktask',self.session.session])
423 ident=['tracktask',self.session.session])
425
424
426 #-----------------------------------------------------------------------
425 #-----------------------------------------------------------------------
427 # Result Handling
426 # Result Handling
428 #-----------------------------------------------------------------------
427 #-----------------------------------------------------------------------
429 @logged
428 @logged
430 def dispatch_result(self, raw_msg):
429 def dispatch_result(self, raw_msg):
431 """dispatch method for result replies"""
430 """dispatch method for result replies"""
432 try:
431 try:
433 idents,msg = self.session.feed_identities(raw_msg, copy=False)
432 idents,msg = self.session.feed_identities(raw_msg, copy=False)
434 msg = self.session.unpack_message(msg, content=False, copy=False)
433 msg = self.session.unpack_message(msg, content=False, copy=False)
435 except:
434 except:
436 self.log.error("task::Invaid result: %s"%raw_msg, exc_info=True)
435 self.log.error("task::Invaid result: %s"%raw_msg, exc_info=True)
437 return
436 return
438
437
439 header = msg['header']
438 header = msg['header']
440 if header.get('dependencies_met', True):
439 if header.get('dependencies_met', True):
441 success = (header['status'] == 'ok')
440 success = (header['status'] == 'ok')
442 self.handle_result(idents, msg['parent_header'], raw_msg, success)
441 self.handle_result(idents, msg['parent_header'], raw_msg, success)
443 # send to Hub monitor
442 # send to Hub monitor
444 self.mon_stream.send_multipart(['outtask']+raw_msg, copy=False)
443 self.mon_stream.send_multipart(['outtask']+raw_msg, copy=False)
445 else:
444 else:
446 self.handle_unmet_dependency(idents, msg['parent_header'])
445 self.handle_unmet_dependency(idents, msg['parent_header'])
447
446
448 @logged
447 @logged
449 def handle_result(self, idents, parent, raw_msg, success=True):
448 def handle_result(self, idents, parent, raw_msg, success=True):
450 """handle a real task result, either success or failure"""
449 """handle a real task result, either success or failure"""
451 # first, relay result to client
450 # first, relay result to client
452 engine = idents[0]
451 engine = idents[0]
453 client = idents[1]
452 client = idents[1]
454 # swap_ids for XREP-XREP mirror
453 # swap_ids for XREP-XREP mirror
455 raw_msg[:2] = [client,engine]
454 raw_msg[:2] = [client,engine]
456 # print (map(str, raw_msg[:4]))
455 # print (map(str, raw_msg[:4]))
457 self.client_stream.send_multipart(raw_msg, copy=False)
456 self.client_stream.send_multipart(raw_msg, copy=False)
458 # now, update our data structures
457 # now, update our data structures
459 msg_id = parent['msg_id']
458 msg_id = parent['msg_id']
460 self.blacklist.pop(msg_id, None)
459 self.blacklist.pop(msg_id, None)
461 self.pending[engine].pop(msg_id)
460 self.pending[engine].pop(msg_id)
462 if success:
461 if success:
463 self.completed[engine].add(msg_id)
462 self.completed[engine].add(msg_id)
464 self.all_completed.add(msg_id)
463 self.all_completed.add(msg_id)
465 else:
464 else:
466 self.failed[engine].add(msg_id)
465 self.failed[engine].add(msg_id)
467 self.all_failed.add(msg_id)
466 self.all_failed.add(msg_id)
468 self.all_done.add(msg_id)
467 self.all_done.add(msg_id)
469 self.destinations[msg_id] = engine
468 self.destinations[msg_id] = engine
470
469
471 self.update_graph(msg_id, success)
470 self.update_graph(msg_id, success)
472
471
473 @logged
472 @logged
474 def handle_unmet_dependency(self, idents, parent):
473 def handle_unmet_dependency(self, idents, parent):
475 """handle an unmet dependency"""
474 """handle an unmet dependency"""
476 engine = idents[0]
475 engine = idents[0]
477 msg_id = parent['msg_id']
476 msg_id = parent['msg_id']
478
477
479 if msg_id not in self.blacklist:
478 if msg_id not in self.blacklist:
480 self.blacklist[msg_id] = set()
479 self.blacklist[msg_id] = set()
481 self.blacklist[msg_id].add(engine)
480 self.blacklist[msg_id].add(engine)
482
481
483 args = self.pending[engine].pop(msg_id)
482 args = self.pending[engine].pop(msg_id)
484 raw,targets,after,follow,timeout = args
483 raw,targets,after,follow,timeout = args
485
484
486 if self.blacklist[msg_id] == targets:
485 if self.blacklist[msg_id] == targets:
487 self.depending[msg_id] = args
486 self.depending[msg_id] = args
488 return self.fail_unreachable(msg_id)
487 return self.fail_unreachable(msg_id)
489
488
490 elif not self.maybe_run(msg_id, *args):
489 elif not self.maybe_run(msg_id, *args):
491 # resubmit failed, put it back in our dependency tree
490 # resubmit failed, put it back in our dependency tree
492 self.save_unmet(msg_id, *args)
491 self.save_unmet(msg_id, *args)
493
492
494
493
495 @logged
494 @logged
496 def update_graph(self, dep_id, success=True):
495 def update_graph(self, dep_id, success=True):
497 """dep_id just finished. Update our dependency
496 """dep_id just finished. Update our dependency
498 graph and submit any jobs that just became runable."""
497 graph and submit any jobs that just became runable."""
499 # print ("\n\n***********")
498 # print ("\n\n***********")
500 # pprint (dep_id)
499 # pprint (dep_id)
501 # pprint (self.graph)
500 # pprint (self.graph)
502 # pprint (self.depending)
501 # pprint (self.depending)
503 # pprint (self.all_completed)
502 # pprint (self.all_completed)
504 # pprint (self.all_failed)
503 # pprint (self.all_failed)
505 # print ("\n\n***********\n\n")
504 # print ("\n\n***********\n\n")
506 if dep_id not in self.graph:
505 if dep_id not in self.graph:
507 return
506 return
508 jobs = self.graph.pop(dep_id)
507 jobs = self.graph.pop(dep_id)
509
508
510 for msg_id in jobs:
509 for msg_id in jobs:
511 raw_msg, targets, after, follow, timeout = self.depending[msg_id]
510 raw_msg, targets, after, follow, timeout = self.depending[msg_id]
512 # if dep_id in after:
511 # if dep_id in after:
513 # if after.all and (success or not after.success_only):
512 # if after.all and (success or not after.success_only):
514 # after.remove(dep_id)
513 # after.remove(dep_id)
515
514
516 if after.unreachable(self.all_failed) or follow.unreachable(self.all_failed):
515 if after.unreachable(self.all_failed) or follow.unreachable(self.all_failed):
517 self.fail_unreachable(msg_id)
516 self.fail_unreachable(msg_id)
518
517
519 elif after.check(self.all_completed, self.all_failed): # time deps met, maybe run
518 elif after.check(self.all_completed, self.all_failed): # time deps met, maybe run
520 if self.maybe_run(msg_id, raw_msg, targets, MET, follow, timeout):
519 if self.maybe_run(msg_id, raw_msg, targets, MET, follow, timeout):
521
520
522 self.depending.pop(msg_id)
521 self.depending.pop(msg_id)
523 for mid in follow.union(after):
522 for mid in follow.union(after):
524 if mid in self.graph:
523 if mid in self.graph:
525 self.graph[mid].remove(msg_id)
524 self.graph[mid].remove(msg_id)
526
525
527 #----------------------------------------------------------------------
526 #----------------------------------------------------------------------
528 # methods to be overridden by subclasses
527 # methods to be overridden by subclasses
529 #----------------------------------------------------------------------
528 #----------------------------------------------------------------------
530
529
531 def add_job(self, idx):
530 def add_job(self, idx):
532 """Called after self.targets[idx] just got the job with header.
531 """Called after self.targets[idx] just got the job with header.
533 Override with subclasses. The default ordering is simple LRU.
532 Override with subclasses. The default ordering is simple LRU.
534 The default loads are the number of outstanding jobs."""
533 The default loads are the number of outstanding jobs."""
535 self.loads[idx] += 1
534 self.loads[idx] += 1
536 for lis in (self.targets, self.loads):
535 for lis in (self.targets, self.loads):
537 lis.append(lis.pop(idx))
536 lis.append(lis.pop(idx))
538
537
539
538
540 def finish_job(self, idx):
539 def finish_job(self, idx):
541 """Called after self.targets[idx] just finished a job.
540 """Called after self.targets[idx] just finished a job.
542 Override with subclasses."""
541 Override with subclasses."""
543 self.loads[idx] -= 1
542 self.loads[idx] -= 1
544
543
545
544
546
545
547 def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, config=None,logname='ZMQ',
546 def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, config=None,logname='ZMQ',
548 log_addr=None, loglevel=logging.DEBUG, scheme='lru'):
547 log_addr=None, loglevel=logging.DEBUG, scheme='lru'):
549 from zmq.eventloop import ioloop
548 from zmq.eventloop import ioloop
550 from zmq.eventloop.zmqstream import ZMQStream
549 from zmq.eventloop.zmqstream import ZMQStream
551
550
552 ctx = zmq.Context()
551 ctx = zmq.Context()
553 loop = ioloop.IOLoop()
552 loop = ioloop.IOLoop()
554 print (in_addr, out_addr, mon_addr, not_addr)
553 print (in_addr, out_addr, mon_addr, not_addr)
555 ins = ZMQStream(ctx.socket(zmq.XREP),loop)
554 ins = ZMQStream(ctx.socket(zmq.XREP),loop)
556 ins.bind(in_addr)
555 ins.bind(in_addr)
557 outs = ZMQStream(ctx.socket(zmq.XREP),loop)
556 outs = ZMQStream(ctx.socket(zmq.XREP),loop)
558 outs.bind(out_addr)
557 outs.bind(out_addr)
559 mons = ZMQStream(ctx.socket(zmq.PUB),loop)
558 mons = ZMQStream(ctx.socket(zmq.PUB),loop)
560 mons.connect(mon_addr)
559 mons.connect(mon_addr)
561 nots = ZMQStream(ctx.socket(zmq.SUB),loop)
560 nots = ZMQStream(ctx.socket(zmq.SUB),loop)
562 nots.setsockopt(zmq.SUBSCRIBE, '')
561 nots.setsockopt(zmq.SUBSCRIBE, '')
563 nots.connect(not_addr)
562 nots.connect(not_addr)
564
563
565 scheme = globals().get(scheme, None)
564 scheme = globals().get(scheme, None)
566 # setup logging
565 # setup logging
567 if log_addr:
566 if log_addr:
568 connect_logger(logname, ctx, log_addr, root="scheduler", loglevel=loglevel)
567 connect_logger(logname, ctx, log_addr, root="scheduler", loglevel=loglevel)
569 else:
568 else:
570 local_logger(logname, loglevel)
569 local_logger(logname, loglevel)
571
570
572 scheduler = TaskScheduler(client_stream=ins, engine_stream=outs,
571 scheduler = TaskScheduler(client_stream=ins, engine_stream=outs,
573 mon_stream=mons, notifier_stream=nots,
572 mon_stream=mons, notifier_stream=nots,
574 scheme=scheme, loop=loop, logname=logname,
573 scheme=scheme, loop=loop, logname=logname,
575 config=config)
574 config=config)
576 scheduler.start()
575 scheduler.start()
577 try:
576 try:
578 loop.start()
577 loop.start()
579 except KeyboardInterrupt:
578 except KeyboardInterrupt:
580 print ("interrupted, exiting...", file=sys.__stderr__)
579 print ("interrupted, exiting...", file=sys.__stderr__)
581
580
@@ -1,487 +1,484 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """
2 """
3 Kernel adapted from kernel.py to use ZMQ Streams
3 Kernel adapted from kernel.py to use ZMQ Streams
4 """
4 """
5
5
6 #-----------------------------------------------------------------------------
6 #-----------------------------------------------------------------------------
7 # Imports
7 # Imports
8 #-----------------------------------------------------------------------------
8 #-----------------------------------------------------------------------------
9
9
10 # Standard library imports.
10 # Standard library imports.
11 from __future__ import print_function
11 from __future__ import print_function
12 import __builtin__
13
12
14 import logging
15 import os
16 import sys
13 import sys
17 import time
14 import time
18 import traceback
19
15
20 from code import CommandCompiler
16 from code import CommandCompiler
21 from datetime import datetime
17 from datetime import datetime
22 from pprint import pprint
18 from pprint import pprint
23 from signal import SIGTERM, SIGKILL
19 from signal import SIGTERM, SIGKILL
24
20
25 # System library imports.
21 # System library imports.
26 import zmq
22 import zmq
27 from zmq.eventloop import ioloop, zmqstream
23 from zmq.eventloop import ioloop, zmqstream
28
24
29 # Local imports.
25 # Local imports.
30 from IPython.core import ultratb
26 from IPython.core import ultratb
31 from IPython.utils.traitlets import HasTraits, Instance, List, Int, Dict, Set, Str
27 from IPython.utils.traitlets import Instance, List, Int, Dict, Set, Str
32 from IPython.zmq.completer import KernelCompleter
28 from IPython.zmq.completer import KernelCompleter
33 from IPython.zmq.iostream import OutStream
29 from IPython.zmq.iostream import OutStream
34 from IPython.zmq.displayhook import DisplayHook
30 from IPython.zmq.displayhook import DisplayHook
35
31
36 from . import heartmonitor
32 from . import heartmonitor
37 from .client import Client
33 from .client import Client
34 from .error import wrap_exception
38 from .factory import SessionFactory
35 from .factory import SessionFactory
39 from .streamsession import StreamSession, Message, extract_header, serialize_object,\
36 from .streamsession import StreamSession
40 unpack_apply_message, ISO8601, wrap_exception
37 from .util import serialize_object, unpack_apply_message, ISO8601
41
38
42 def printer(*args):
39 def printer(*args):
43 pprint(args, stream=sys.__stdout__)
40 pprint(args, stream=sys.__stdout__)
44
41
45
42
46 class _Passer:
43 class _Passer:
47 """Empty class that implements `send()` that does nothing."""
44 """Empty class that implements `send()` that does nothing."""
48 def send(self, *args, **kwargs):
45 def send(self, *args, **kwargs):
49 pass
46 pass
50 send_multipart = send
47 send_multipart = send
51
48
52
49
53 #-----------------------------------------------------------------------------
50 #-----------------------------------------------------------------------------
54 # Main kernel class
51 # Main kernel class
55 #-----------------------------------------------------------------------------
52 #-----------------------------------------------------------------------------
56
53
57 class Kernel(SessionFactory):
54 class Kernel(SessionFactory):
58
55
59 #---------------------------------------------------------------------------
56 #---------------------------------------------------------------------------
60 # Kernel interface
57 # Kernel interface
61 #---------------------------------------------------------------------------
58 #---------------------------------------------------------------------------
62
59
63 # kwargs:
60 # kwargs:
64 int_id = Int(-1, config=True)
61 int_id = Int(-1, config=True)
65 user_ns = Dict(config=True)
62 user_ns = Dict(config=True)
66 exec_lines = List(config=True)
63 exec_lines = List(config=True)
67
64
68 control_stream = Instance(zmqstream.ZMQStream)
65 control_stream = Instance(zmqstream.ZMQStream)
69 task_stream = Instance(zmqstream.ZMQStream)
66 task_stream = Instance(zmqstream.ZMQStream)
70 iopub_stream = Instance(zmqstream.ZMQStream)
67 iopub_stream = Instance(zmqstream.ZMQStream)
71 client = Instance('IPython.zmq.parallel.client.Client')
68 client = Instance('IPython.zmq.parallel.client.Client')
72
69
73 # internals
70 # internals
74 shell_streams = List()
71 shell_streams = List()
75 compiler = Instance(CommandCompiler, (), {})
72 compiler = Instance(CommandCompiler, (), {})
76 completer = Instance(KernelCompleter)
73 completer = Instance(KernelCompleter)
77
74
78 aborted = Set()
75 aborted = Set()
79 shell_handlers = Dict()
76 shell_handlers = Dict()
80 control_handlers = Dict()
77 control_handlers = Dict()
81
78
82 def _set_prefix(self):
79 def _set_prefix(self):
83 self.prefix = "engine.%s"%self.int_id
80 self.prefix = "engine.%s"%self.int_id
84
81
85 def _connect_completer(self):
82 def _connect_completer(self):
86 self.completer = KernelCompleter(self.user_ns)
83 self.completer = KernelCompleter(self.user_ns)
87
84
88 def __init__(self, **kwargs):
85 def __init__(self, **kwargs):
89 super(Kernel, self).__init__(**kwargs)
86 super(Kernel, self).__init__(**kwargs)
90 self._set_prefix()
87 self._set_prefix()
91 self._connect_completer()
88 self._connect_completer()
92
89
93 self.on_trait_change(self._set_prefix, 'id')
90 self.on_trait_change(self._set_prefix, 'id')
94 self.on_trait_change(self._connect_completer, 'user_ns')
91 self.on_trait_change(self._connect_completer, 'user_ns')
95
92
96 # Build dict of handlers for message types
93 # Build dict of handlers for message types
97 for msg_type in ['execute_request', 'complete_request', 'apply_request',
94 for msg_type in ['execute_request', 'complete_request', 'apply_request',
98 'clear_request']:
95 'clear_request']:
99 self.shell_handlers[msg_type] = getattr(self, msg_type)
96 self.shell_handlers[msg_type] = getattr(self, msg_type)
100
97
101 for msg_type in ['shutdown_request', 'abort_request']+self.shell_handlers.keys():
98 for msg_type in ['shutdown_request', 'abort_request']+self.shell_handlers.keys():
102 self.control_handlers[msg_type] = getattr(self, msg_type)
99 self.control_handlers[msg_type] = getattr(self, msg_type)
103
100
104 self._initial_exec_lines()
101 self._initial_exec_lines()
105
102
106 def _wrap_exception(self, method=None):
103 def _wrap_exception(self, method=None):
107 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method)
104 e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method=method)
108 content=wrap_exception(e_info)
105 content=wrap_exception(e_info)
109 return content
106 return content
110
107
111 def _initial_exec_lines(self):
108 def _initial_exec_lines(self):
112 s = _Passer()
109 s = _Passer()
113 content = dict(silent=True, user_variable=[],user_expressions=[])
110 content = dict(silent=True, user_variable=[],user_expressions=[])
114 for line in self.exec_lines:
111 for line in self.exec_lines:
115 self.log.debug("executing initialization: %s"%line)
112 self.log.debug("executing initialization: %s"%line)
116 content.update({'code':line})
113 content.update({'code':line})
117 msg = self.session.msg('execute_request', content)
114 msg = self.session.msg('execute_request', content)
118 self.execute_request(s, [], msg)
115 self.execute_request(s, [], msg)
119
116
120
117
121 #-------------------- control handlers -----------------------------
118 #-------------------- control handlers -----------------------------
122 def abort_queues(self):
119 def abort_queues(self):
123 for stream in self.shell_streams:
120 for stream in self.shell_streams:
124 if stream:
121 if stream:
125 self.abort_queue(stream)
122 self.abort_queue(stream)
126
123
127 def abort_queue(self, stream):
124 def abort_queue(self, stream):
128 while True:
125 while True:
129 try:
126 try:
130 msg = self.session.recv(stream, zmq.NOBLOCK,content=True)
127 msg = self.session.recv(stream, zmq.NOBLOCK,content=True)
131 except zmq.ZMQError as e:
128 except zmq.ZMQError as e:
132 if e.errno == zmq.EAGAIN:
129 if e.errno == zmq.EAGAIN:
133 break
130 break
134 else:
131 else:
135 return
132 return
136 else:
133 else:
137 if msg is None:
134 if msg is None:
138 return
135 return
139 else:
136 else:
140 idents,msg = msg
137 idents,msg = msg
141
138
142 # assert self.reply_socketly_socket.rcvmore(), "Unexpected missing message part."
139 # assert self.reply_socketly_socket.rcvmore(), "Unexpected missing message part."
143 # msg = self.reply_socket.recv_json()
140 # msg = self.reply_socket.recv_json()
144 self.log.info("Aborting:")
141 self.log.info("Aborting:")
145 self.log.info(str(msg))
142 self.log.info(str(msg))
146 msg_type = msg['msg_type']
143 msg_type = msg['msg_type']
147 reply_type = msg_type.split('_')[0] + '_reply'
144 reply_type = msg_type.split('_')[0] + '_reply'
148 # reply_msg = self.session.msg(reply_type, {'status' : 'aborted'}, msg)
145 # reply_msg = self.session.msg(reply_type, {'status' : 'aborted'}, msg)
149 # self.reply_socket.send(ident,zmq.SNDMORE)
146 # self.reply_socket.send(ident,zmq.SNDMORE)
150 # self.reply_socket.send_json(reply_msg)
147 # self.reply_socket.send_json(reply_msg)
151 reply_msg = self.session.send(stream, reply_type,
148 reply_msg = self.session.send(stream, reply_type,
152 content={'status' : 'aborted'}, parent=msg, ident=idents)[0]
149 content={'status' : 'aborted'}, parent=msg, ident=idents)[0]
153 self.log.debug(str(reply_msg))
150 self.log.debug(str(reply_msg))
154 # We need to wait a bit for requests to come in. This can probably
151 # We need to wait a bit for requests to come in. This can probably
155 # be set shorter for true asynchronous clients.
152 # be set shorter for true asynchronous clients.
156 time.sleep(0.05)
153 time.sleep(0.05)
157
154
158 def abort_request(self, stream, ident, parent):
155 def abort_request(self, stream, ident, parent):
159 """abort a specifig msg by id"""
156 """abort a specifig msg by id"""
160 msg_ids = parent['content'].get('msg_ids', None)
157 msg_ids = parent['content'].get('msg_ids', None)
161 if isinstance(msg_ids, basestring):
158 if isinstance(msg_ids, basestring):
162 msg_ids = [msg_ids]
159 msg_ids = [msg_ids]
163 if not msg_ids:
160 if not msg_ids:
164 self.abort_queues()
161 self.abort_queues()
165 for mid in msg_ids:
162 for mid in msg_ids:
166 self.aborted.add(str(mid))
163 self.aborted.add(str(mid))
167
164
168 content = dict(status='ok')
165 content = dict(status='ok')
169 reply_msg = self.session.send(stream, 'abort_reply', content=content,
166 reply_msg = self.session.send(stream, 'abort_reply', content=content,
170 parent=parent, ident=ident)[0]
167 parent=parent, ident=ident)[0]
171 self.log.debug(str(reply_msg))
168 self.log.debug(str(reply_msg))
172
169
173 def shutdown_request(self, stream, ident, parent):
170 def shutdown_request(self, stream, ident, parent):
174 """kill ourself. This should really be handled in an external process"""
171 """kill ourself. This should really be handled in an external process"""
175 try:
172 try:
176 self.abort_queues()
173 self.abort_queues()
177 except:
174 except:
178 content = self._wrap_exception('shutdown')
175 content = self._wrap_exception('shutdown')
179 else:
176 else:
180 content = dict(parent['content'])
177 content = dict(parent['content'])
181 content['status'] = 'ok'
178 content['status'] = 'ok'
182 msg = self.session.send(stream, 'shutdown_reply',
179 msg = self.session.send(stream, 'shutdown_reply',
183 content=content, parent=parent, ident=ident)
180 content=content, parent=parent, ident=ident)
184 # msg = self.session.send(self.pub_socket, 'shutdown_reply',
181 # msg = self.session.send(self.pub_socket, 'shutdown_reply',
185 # content, parent, ident)
182 # content, parent, ident)
186 # print >> sys.__stdout__, msg
183 # print >> sys.__stdout__, msg
187 # time.sleep(0.2)
184 # time.sleep(0.2)
188 dc = ioloop.DelayedCallback(lambda : sys.exit(0), 1000, self.loop)
185 dc = ioloop.DelayedCallback(lambda : sys.exit(0), 1000, self.loop)
189 dc.start()
186 dc.start()
190
187
191 def dispatch_control(self, msg):
188 def dispatch_control(self, msg):
192 idents,msg = self.session.feed_identities(msg, copy=False)
189 idents,msg = self.session.feed_identities(msg, copy=False)
193 try:
190 try:
194 msg = self.session.unpack_message(msg, content=True, copy=False)
191 msg = self.session.unpack_message(msg, content=True, copy=False)
195 except:
192 except:
196 self.log.error("Invalid Message", exc_info=True)
193 self.log.error("Invalid Message", exc_info=True)
197 return
194 return
198
195
199 header = msg['header']
196 header = msg['header']
200 msg_id = header['msg_id']
197 msg_id = header['msg_id']
201
198
202 handler = self.control_handlers.get(msg['msg_type'], None)
199 handler = self.control_handlers.get(msg['msg_type'], None)
203 if handler is None:
200 if handler is None:
204 self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r"%msg['msg_type'])
201 self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r"%msg['msg_type'])
205 else:
202 else:
206 handler(self.control_stream, idents, msg)
203 handler(self.control_stream, idents, msg)
207
204
208
205
209 #-------------------- queue helpers ------------------------------
206 #-------------------- queue helpers ------------------------------
210
207
211 def check_dependencies(self, dependencies):
208 def check_dependencies(self, dependencies):
212 if not dependencies:
209 if not dependencies:
213 return True
210 return True
214 if len(dependencies) == 2 and dependencies[0] in 'any all'.split():
211 if len(dependencies) == 2 and dependencies[0] in 'any all'.split():
215 anyorall = dependencies[0]
212 anyorall = dependencies[0]
216 dependencies = dependencies[1]
213 dependencies = dependencies[1]
217 else:
214 else:
218 anyorall = 'all'
215 anyorall = 'all'
219 results = self.client.get_results(dependencies,status_only=True)
216 results = self.client.get_results(dependencies,status_only=True)
220 if results['status'] != 'ok':
217 if results['status'] != 'ok':
221 return False
218 return False
222
219
223 if anyorall == 'any':
220 if anyorall == 'any':
224 if not results['completed']:
221 if not results['completed']:
225 return False
222 return False
226 else:
223 else:
227 if results['pending']:
224 if results['pending']:
228 return False
225 return False
229
226
230 return True
227 return True
231
228
232 def check_aborted(self, msg_id):
229 def check_aborted(self, msg_id):
233 return msg_id in self.aborted
230 return msg_id in self.aborted
234
231
235 #-------------------- queue handlers -----------------------------
232 #-------------------- queue handlers -----------------------------
236
233
237 def clear_request(self, stream, idents, parent):
234 def clear_request(self, stream, idents, parent):
238 """Clear our namespace."""
235 """Clear our namespace."""
239 self.user_ns = {}
236 self.user_ns = {}
240 msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
237 msg = self.session.send(stream, 'clear_reply', ident=idents, parent=parent,
241 content = dict(status='ok'))
238 content = dict(status='ok'))
242 self._initial_exec_lines()
239 self._initial_exec_lines()
243
240
244 def execute_request(self, stream, ident, parent):
241 def execute_request(self, stream, ident, parent):
245 self.log.debug('execute request %s'%parent)
242 self.log.debug('execute request %s'%parent)
246 try:
243 try:
247 code = parent[u'content'][u'code']
244 code = parent[u'content'][u'code']
248 except:
245 except:
249 self.log.error("Got bad msg: %s"%parent, exc_info=True)
246 self.log.error("Got bad msg: %s"%parent, exc_info=True)
250 return
247 return
251 self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent,
248 self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent,
252 ident='%s.pyin'%self.prefix)
249 ident='%s.pyin'%self.prefix)
253 started = datetime.now().strftime(ISO8601)
250 started = datetime.now().strftime(ISO8601)
254 try:
251 try:
255 comp_code = self.compiler(code, '<zmq-kernel>')
252 comp_code = self.compiler(code, '<zmq-kernel>')
256 # allow for not overriding displayhook
253 # allow for not overriding displayhook
257 if hasattr(sys.displayhook, 'set_parent'):
254 if hasattr(sys.displayhook, 'set_parent'):
258 sys.displayhook.set_parent(parent)
255 sys.displayhook.set_parent(parent)
259 sys.stdout.set_parent(parent)
256 sys.stdout.set_parent(parent)
260 sys.stderr.set_parent(parent)
257 sys.stderr.set_parent(parent)
261 exec comp_code in self.user_ns, self.user_ns
258 exec comp_code in self.user_ns, self.user_ns
262 except:
259 except:
263 exc_content = self._wrap_exception('execute')
260 exc_content = self._wrap_exception('execute')
264 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
261 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
265 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
262 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
266 ident='%s.pyerr'%self.prefix)
263 ident='%s.pyerr'%self.prefix)
267 reply_content = exc_content
264 reply_content = exc_content
268 else:
265 else:
269 reply_content = {'status' : 'ok'}
266 reply_content = {'status' : 'ok'}
270
267
271 reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent=parent,
268 reply_msg = self.session.send(stream, u'execute_reply', reply_content, parent=parent,
272 ident=ident, subheader = dict(started=started))
269 ident=ident, subheader = dict(started=started))
273 self.log.debug(str(reply_msg))
270 self.log.debug(str(reply_msg))
274 if reply_msg['content']['status'] == u'error':
271 if reply_msg['content']['status'] == u'error':
275 self.abort_queues()
272 self.abort_queues()
276
273
277 def complete_request(self, stream, ident, parent):
274 def complete_request(self, stream, ident, parent):
278 matches = {'matches' : self.complete(parent),
275 matches = {'matches' : self.complete(parent),
279 'status' : 'ok'}
276 'status' : 'ok'}
280 completion_msg = self.session.send(stream, 'complete_reply',
277 completion_msg = self.session.send(stream, 'complete_reply',
281 matches, parent, ident)
278 matches, parent, ident)
282 # print >> sys.__stdout__, completion_msg
279 # print >> sys.__stdout__, completion_msg
283
280
284 def complete(self, msg):
281 def complete(self, msg):
285 return self.completer.complete(msg.content.line, msg.content.text)
282 return self.completer.complete(msg.content.line, msg.content.text)
286
283
287 def apply_request(self, stream, ident, parent):
284 def apply_request(self, stream, ident, parent):
288 # flush previous reply, so this request won't block it
285 # flush previous reply, so this request won't block it
289 stream.flush(zmq.POLLOUT)
286 stream.flush(zmq.POLLOUT)
290
287
291 try:
288 try:
292 content = parent[u'content']
289 content = parent[u'content']
293 bufs = parent[u'buffers']
290 bufs = parent[u'buffers']
294 msg_id = parent['header']['msg_id']
291 msg_id = parent['header']['msg_id']
295 bound = content.get('bound', False)
292 bound = content.get('bound', False)
296 except:
293 except:
297 self.log.error("Got bad msg: %s"%parent, exc_info=True)
294 self.log.error("Got bad msg: %s"%parent, exc_info=True)
298 return
295 return
299 # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
296 # pyin_msg = self.session.msg(u'pyin',{u'code':code}, parent=parent)
300 # self.iopub_stream.send(pyin_msg)
297 # self.iopub_stream.send(pyin_msg)
301 # self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent)
298 # self.session.send(self.iopub_stream, u'pyin', {u'code':code},parent=parent)
302 sub = {'dependencies_met' : True, 'engine' : self.ident,
299 sub = {'dependencies_met' : True, 'engine' : self.ident,
303 'started': datetime.now().strftime(ISO8601)}
300 'started': datetime.now().strftime(ISO8601)}
304 try:
301 try:
305 # allow for not overriding displayhook
302 # allow for not overriding displayhook
306 if hasattr(sys.displayhook, 'set_parent'):
303 if hasattr(sys.displayhook, 'set_parent'):
307 sys.displayhook.set_parent(parent)
304 sys.displayhook.set_parent(parent)
308 sys.stdout.set_parent(parent)
305 sys.stdout.set_parent(parent)
309 sys.stderr.set_parent(parent)
306 sys.stderr.set_parent(parent)
310 # exec "f(*args,**kwargs)" in self.user_ns, self.user_ns
307 # exec "f(*args,**kwargs)" in self.user_ns, self.user_ns
311 if bound:
308 if bound:
312 working = self.user_ns
309 working = self.user_ns
313 suffix = str(msg_id).replace("-","")
310 suffix = str(msg_id).replace("-","")
314 prefix = "_"
311 prefix = "_"
315
312
316 else:
313 else:
317 working = dict()
314 working = dict()
318 suffix = prefix = "_" # prevent keyword collisions with lambda
315 suffix = prefix = "_" # prevent keyword collisions with lambda
319 f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
316 f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
320 # if f.fun
317 # if f.fun
321 fname = getattr(f, '__name__', 'f')
318 fname = getattr(f, '__name__', 'f')
322
319
323 fname = prefix+fname.strip('<>')+suffix
320 fname = prefix+fname.strip('<>')+suffix
324 argname = prefix+"args"+suffix
321 argname = prefix+"args"+suffix
325 kwargname = prefix+"kwargs"+suffix
322 kwargname = prefix+"kwargs"+suffix
326 resultname = prefix+"result"+suffix
323 resultname = prefix+"result"+suffix
327
324
328 ns = { fname : f, argname : args, kwargname : kwargs }
325 ns = { fname : f, argname : args, kwargname : kwargs }
329 # print ns
326 # print ns
330 working.update(ns)
327 working.update(ns)
331 code = "%s=%s(*%s,**%s)"%(resultname, fname, argname, kwargname)
328 code = "%s=%s(*%s,**%s)"%(resultname, fname, argname, kwargname)
332 exec code in working, working
329 exec code in working, working
333 result = working.get(resultname)
330 result = working.get(resultname)
334 # clear the namespace
331 # clear the namespace
335 if bound:
332 if bound:
336 for key in ns.iterkeys():
333 for key in ns.iterkeys():
337 self.user_ns.pop(key)
334 self.user_ns.pop(key)
338 else:
335 else:
339 del working
336 del working
340
337
341 packed_result,buf = serialize_object(result)
338 packed_result,buf = serialize_object(result)
342 result_buf = [packed_result]+buf
339 result_buf = [packed_result]+buf
343 except:
340 except:
344 exc_content = self._wrap_exception('apply')
341 exc_content = self._wrap_exception('apply')
345 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
342 # exc_msg = self.session.msg(u'pyerr', exc_content, parent)
346 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
343 self.session.send(self.iopub_stream, u'pyerr', exc_content, parent=parent,
347 ident='%s.pyerr'%self.prefix)
344 ident='%s.pyerr'%self.prefix)
348 reply_content = exc_content
345 reply_content = exc_content
349 result_buf = []
346 result_buf = []
350
347
351 if exc_content['ename'] == 'UnmetDependency':
348 if exc_content['ename'] == 'UnmetDependency':
352 sub['dependencies_met'] = False
349 sub['dependencies_met'] = False
353 else:
350 else:
354 reply_content = {'status' : 'ok'}
351 reply_content = {'status' : 'ok'}
355
352
356 # put 'ok'/'error' status in header, for scheduler introspection:
353 # put 'ok'/'error' status in header, for scheduler introspection:
357 sub['status'] = reply_content['status']
354 sub['status'] = reply_content['status']
358
355
359 reply_msg = self.session.send(stream, u'apply_reply', reply_content,
356 reply_msg = self.session.send(stream, u'apply_reply', reply_content,
360 parent=parent, ident=ident,buffers=result_buf, subheader=sub)
357 parent=parent, ident=ident,buffers=result_buf, subheader=sub)
361
358
362 # if reply_msg['content']['status'] == u'error':
359 # if reply_msg['content']['status'] == u'error':
363 # self.abort_queues()
360 # self.abort_queues()
364
361
365 def dispatch_queue(self, stream, msg):
362 def dispatch_queue(self, stream, msg):
366 self.control_stream.flush()
363 self.control_stream.flush()
367 idents,msg = self.session.feed_identities(msg, copy=False)
364 idents,msg = self.session.feed_identities(msg, copy=False)
368 try:
365 try:
369 msg = self.session.unpack_message(msg, content=True, copy=False)
366 msg = self.session.unpack_message(msg, content=True, copy=False)
370 except:
367 except:
371 self.log.error("Invalid Message", exc_info=True)
368 self.log.error("Invalid Message", exc_info=True)
372 return
369 return
373
370
374
371
375 header = msg['header']
372 header = msg['header']
376 msg_id = header['msg_id']
373 msg_id = header['msg_id']
377 if self.check_aborted(msg_id):
374 if self.check_aborted(msg_id):
378 self.aborted.remove(msg_id)
375 self.aborted.remove(msg_id)
379 # is it safe to assume a msg_id will not be resubmitted?
376 # is it safe to assume a msg_id will not be resubmitted?
380 reply_type = msg['msg_type'].split('_')[0] + '_reply'
377 reply_type = msg['msg_type'].split('_')[0] + '_reply'
381 reply_msg = self.session.send(stream, reply_type,
378 reply_msg = self.session.send(stream, reply_type,
382 content={'status' : 'aborted'}, parent=msg, ident=idents)
379 content={'status' : 'aborted'}, parent=msg, ident=idents)
383 return
380 return
384 handler = self.shell_handlers.get(msg['msg_type'], None)
381 handler = self.shell_handlers.get(msg['msg_type'], None)
385 if handler is None:
382 if handler is None:
386 self.log.error("UNKNOWN MESSAGE TYPE: %r"%msg['msg_type'])
383 self.log.error("UNKNOWN MESSAGE TYPE: %r"%msg['msg_type'])
387 else:
384 else:
388 handler(stream, idents, msg)
385 handler(stream, idents, msg)
389
386
390 def start(self):
387 def start(self):
391 #### stream mode:
388 #### stream mode:
392 if self.control_stream:
389 if self.control_stream:
393 self.control_stream.on_recv(self.dispatch_control, copy=False)
390 self.control_stream.on_recv(self.dispatch_control, copy=False)
394 self.control_stream.on_err(printer)
391 self.control_stream.on_err(printer)
395
392
396 def make_dispatcher(stream):
393 def make_dispatcher(stream):
397 def dispatcher(msg):
394 def dispatcher(msg):
398 return self.dispatch_queue(stream, msg)
395 return self.dispatch_queue(stream, msg)
399 return dispatcher
396 return dispatcher
400
397
401 for s in self.shell_streams:
398 for s in self.shell_streams:
402 s.on_recv(make_dispatcher(s), copy=False)
399 s.on_recv(make_dispatcher(s), copy=False)
403 s.on_err(printer)
400 s.on_err(printer)
404
401
405 if self.iopub_stream:
402 if self.iopub_stream:
406 self.iopub_stream.on_err(printer)
403 self.iopub_stream.on_err(printer)
407
404
408 #### while True mode:
405 #### while True mode:
409 # while True:
406 # while True:
410 # idle = True
407 # idle = True
411 # try:
408 # try:
412 # msg = self.shell_stream.socket.recv_multipart(
409 # msg = self.shell_stream.socket.recv_multipart(
413 # zmq.NOBLOCK, copy=False)
410 # zmq.NOBLOCK, copy=False)
414 # except zmq.ZMQError, e:
411 # except zmq.ZMQError, e:
415 # if e.errno != zmq.EAGAIN:
412 # if e.errno != zmq.EAGAIN:
416 # raise e
413 # raise e
417 # else:
414 # else:
418 # idle=False
415 # idle=False
419 # self.dispatch_queue(self.shell_stream, msg)
416 # self.dispatch_queue(self.shell_stream, msg)
420 #
417 #
421 # if not self.task_stream.empty():
418 # if not self.task_stream.empty():
422 # idle=False
419 # idle=False
423 # msg = self.task_stream.recv_multipart()
420 # msg = self.task_stream.recv_multipart()
424 # self.dispatch_queue(self.task_stream, msg)
421 # self.dispatch_queue(self.task_stream, msg)
425 # if idle:
422 # if idle:
426 # # don't busywait
423 # # don't busywait
427 # time.sleep(1e-3)
424 # time.sleep(1e-3)
428
425
429 def make_kernel(int_id, identity, control_addr, shell_addrs, iopub_addr, hb_addrs,
426 def make_kernel(int_id, identity, control_addr, shell_addrs, iopub_addr, hb_addrs,
430 client_addr=None, loop=None, context=None, key=None,
427 client_addr=None, loop=None, context=None, key=None,
431 out_stream_factory=OutStream, display_hook_factory=DisplayHook):
428 out_stream_factory=OutStream, display_hook_factory=DisplayHook):
432 """NO LONGER IN USE"""
429 """NO LONGER IN USE"""
433 # create loop, context, and session:
430 # create loop, context, and session:
434 if loop is None:
431 if loop is None:
435 loop = ioloop.IOLoop.instance()
432 loop = ioloop.IOLoop.instance()
436 if context is None:
433 if context is None:
437 context = zmq.Context()
434 context = zmq.Context()
438 c = context
435 c = context
439 session = StreamSession(key=key)
436 session = StreamSession(key=key)
440 # print (session.key)
437 # print (session.key)
441 # print (control_addr, shell_addrs, iopub_addr, hb_addrs)
438 # print (control_addr, shell_addrs, iopub_addr, hb_addrs)
442
439
443 # create Control Stream
440 # create Control Stream
444 control_stream = zmqstream.ZMQStream(c.socket(zmq.PAIR), loop)
441 control_stream = zmqstream.ZMQStream(c.socket(zmq.PAIR), loop)
445 control_stream.setsockopt(zmq.IDENTITY, identity)
442 control_stream.setsockopt(zmq.IDENTITY, identity)
446 control_stream.connect(control_addr)
443 control_stream.connect(control_addr)
447
444
448 # create Shell Streams (MUX, Task, etc.):
445 # create Shell Streams (MUX, Task, etc.):
449 shell_streams = []
446 shell_streams = []
450 for addr in shell_addrs:
447 for addr in shell_addrs:
451 stream = zmqstream.ZMQStream(c.socket(zmq.PAIR), loop)
448 stream = zmqstream.ZMQStream(c.socket(zmq.PAIR), loop)
452 stream.setsockopt(zmq.IDENTITY, identity)
449 stream.setsockopt(zmq.IDENTITY, identity)
453 stream.connect(addr)
450 stream.connect(addr)
454 shell_streams.append(stream)
451 shell_streams.append(stream)
455
452
456 # create iopub stream:
453 # create iopub stream:
457 iopub_stream = zmqstream.ZMQStream(c.socket(zmq.PUB), loop)
454 iopub_stream = zmqstream.ZMQStream(c.socket(zmq.PUB), loop)
458 iopub_stream.setsockopt(zmq.IDENTITY, identity)
455 iopub_stream.setsockopt(zmq.IDENTITY, identity)
459 iopub_stream.connect(iopub_addr)
456 iopub_stream.connect(iopub_addr)
460
457
461 # Redirect input streams and set a display hook.
458 # Redirect input streams and set a display hook.
462 if out_stream_factory:
459 if out_stream_factory:
463 sys.stdout = out_stream_factory(session, iopub_stream, u'stdout')
460 sys.stdout = out_stream_factory(session, iopub_stream, u'stdout')
464 sys.stdout.topic = 'engine.%i.stdout'%int_id
461 sys.stdout.topic = 'engine.%i.stdout'%int_id
465 sys.stderr = out_stream_factory(session, iopub_stream, u'stderr')
462 sys.stderr = out_stream_factory(session, iopub_stream, u'stderr')
466 sys.stderr.topic = 'engine.%i.stderr'%int_id
463 sys.stderr.topic = 'engine.%i.stderr'%int_id
467 if display_hook_factory:
464 if display_hook_factory:
468 sys.displayhook = display_hook_factory(session, iopub_stream)
465 sys.displayhook = display_hook_factory(session, iopub_stream)
469 sys.displayhook.topic = 'engine.%i.pyout'%int_id
466 sys.displayhook.topic = 'engine.%i.pyout'%int_id
470
467
471
468
472 # launch heartbeat
469 # launch heartbeat
473 heart = heartmonitor.Heart(*map(str, hb_addrs), heart_id=identity)
470 heart = heartmonitor.Heart(*map(str, hb_addrs), heart_id=identity)
474 heart.start()
471 heart.start()
475
472
476 # create (optional) Client
473 # create (optional) Client
477 if client_addr:
474 if client_addr:
478 client = Client(client_addr, username=identity)
475 client = Client(client_addr, username=identity)
479 else:
476 else:
480 client = None
477 client = None
481
478
482 kernel = Kernel(id=int_id, session=session, control_stream=control_stream,
479 kernel = Kernel(id=int_id, session=session, control_stream=control_stream,
483 shell_streams=shell_streams, iopub_stream=iopub_stream,
480 shell_streams=shell_streams, iopub_stream=iopub_stream,
484 client=client, loop=loop)
481 client=client, loop=loop)
485 kernel.start()
482 kernel.start()
486 return loop, c, kernel
483 return loop, c, kernel
487
484
@@ -1,542 +1,377 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 """edited session.py to work with streams, and move msg_type to the header
2 """edited session.py to work with streams, and move msg_type to the header
3 """
3 """
4
4
5
5
6 import os
6 import os
7 import pprint
7 import pprint
8 import sys
9 import traceback
10 import uuid
8 import uuid
11 from datetime import datetime
9 from datetime import datetime
12
10
13 try:
11 try:
14 import cPickle
12 import cPickle
15 pickle = cPickle
13 pickle = cPickle
16 except:
14 except:
17 cPickle = None
15 cPickle = None
18 import pickle
16 import pickle
19
17
20 import zmq
18 import zmq
21 from zmq.utils import jsonapi
19 from zmq.utils import jsonapi
22 from zmq.eventloop.zmqstream import ZMQStream
20 from zmq.eventloop.zmqstream import ZMQStream
23
21
24 from IPython.utils.pickleutil import can, uncan, canSequence, uncanSequence
22 from .util import ISO8601
25 from IPython.utils.newserialized import serialize, unserialize
26
27 from .error import RemoteError
28
23
29 # packer priority: jsonlib[2], cPickle, simplejson/json, pickle
24 # packer priority: jsonlib[2], cPickle, simplejson/json, pickle
30 json_name = '' if not jsonapi.jsonmod else jsonapi.jsonmod.__name__
25 json_name = '' if not jsonapi.jsonmod else jsonapi.jsonmod.__name__
31 if json_name in ('jsonlib', 'jsonlib2'):
26 if json_name in ('jsonlib', 'jsonlib2'):
32 use_json = True
27 use_json = True
33 elif json_name:
28 elif json_name:
34 if cPickle is None:
29 if cPickle is None:
35 use_json = True
30 use_json = True
36 else:
31 else:
37 use_json = False
32 use_json = False
38 else:
33 else:
39 use_json = False
34 use_json = False
40
35
41 def squash_unicode(obj):
36 def squash_unicode(obj):
42 if isinstance(obj,dict):
37 if isinstance(obj,dict):
43 for key in obj.keys():
38 for key in obj.keys():
44 obj[key] = squash_unicode(obj[key])
39 obj[key] = squash_unicode(obj[key])
45 if isinstance(key, unicode):
40 if isinstance(key, unicode):
46 obj[squash_unicode(key)] = obj.pop(key)
41 obj[squash_unicode(key)] = obj.pop(key)
47 elif isinstance(obj, list):
42 elif isinstance(obj, list):
48 for i,v in enumerate(obj):
43 for i,v in enumerate(obj):
49 obj[i] = squash_unicode(v)
44 obj[i] = squash_unicode(v)
50 elif isinstance(obj, unicode):
45 elif isinstance(obj, unicode):
51 obj = obj.encode('utf8')
46 obj = obj.encode('utf8')
52 return obj
47 return obj
53
48
54 json_packer = jsonapi.dumps
49 json_packer = jsonapi.dumps
55 json_unpacker = lambda s: squash_unicode(jsonapi.loads(s))
50 json_unpacker = lambda s: squash_unicode(jsonapi.loads(s))
56
51
57 pickle_packer = lambda o: pickle.dumps(o,-1)
52 pickle_packer = lambda o: pickle.dumps(o,-1)
58 pickle_unpacker = pickle.loads
53 pickle_unpacker = pickle.loads
59
54
60 if use_json:
55 if use_json:
61 default_packer = json_packer
56 default_packer = json_packer
62 default_unpacker = json_unpacker
57 default_unpacker = json_unpacker
63 else:
58 else:
64 default_packer = pickle_packer
59 default_packer = pickle_packer
65 default_unpacker = pickle_unpacker
60 default_unpacker = pickle_unpacker
66
61
67
62
68 DELIM="<IDS|MSG>"
63 DELIM="<IDS|MSG>"
69 ISO8601="%Y-%m-%dT%H:%M:%S.%f"
70
71 def wrap_exception(engine_info={}):
72 etype, evalue, tb = sys.exc_info()
73 stb = traceback.format_exception(etype, evalue, tb)
74 exc_content = {
75 'status' : 'error',
76 'traceback' : stb,
77 'ename' : unicode(etype.__name__),
78 'evalue' : unicode(evalue),
79 'engine_info' : engine_info
80 }
81 return exc_content
82
83 def unwrap_exception(content):
84 err = RemoteError(content['ename'], content['evalue'],
85 ''.join(content['traceback']),
86 content.get('engine_info', {}))
87 return err
88
89
64
90 class Message(object):
65 class Message(object):
91 """A simple message object that maps dict keys to attributes.
66 """A simple message object that maps dict keys to attributes.
92
67
93 A Message can be created from a dict and a dict from a Message instance
68 A Message can be created from a dict and a dict from a Message instance
94 simply by calling dict(msg_obj)."""
69 simply by calling dict(msg_obj)."""
95
70
96 def __init__(self, msg_dict):
71 def __init__(self, msg_dict):
97 dct = self.__dict__
72 dct = self.__dict__
98 for k, v in dict(msg_dict).iteritems():
73 for k, v in dict(msg_dict).iteritems():
99 if isinstance(v, dict):
74 if isinstance(v, dict):
100 v = Message(v)
75 v = Message(v)
101 dct[k] = v
76 dct[k] = v
102
77
103 # Having this iterator lets dict(msg_obj) work out of the box.
78 # Having this iterator lets dict(msg_obj) work out of the box.
104 def __iter__(self):
79 def __iter__(self):
105 return iter(self.__dict__.iteritems())
80 return iter(self.__dict__.iteritems())
106
81
107 def __repr__(self):
82 def __repr__(self):
108 return repr(self.__dict__)
83 return repr(self.__dict__)
109
84
110 def __str__(self):
85 def __str__(self):
111 return pprint.pformat(self.__dict__)
86 return pprint.pformat(self.__dict__)
112
87
113 def __contains__(self, k):
88 def __contains__(self, k):
114 return k in self.__dict__
89 return k in self.__dict__
115
90
116 def __getitem__(self, k):
91 def __getitem__(self, k):
117 return self.__dict__[k]
92 return self.__dict__[k]
118
93
119
94
120 def msg_header(msg_id, msg_type, username, session):
95 def msg_header(msg_id, msg_type, username, session):
121 date=datetime.now().strftime(ISO8601)
96 date=datetime.now().strftime(ISO8601)
122 return locals()
97 return locals()
123
98
124 def extract_header(msg_or_header):
99 def extract_header(msg_or_header):
125 """Given a message or header, return the header."""
100 """Given a message or header, return the header."""
126 if not msg_or_header:
101 if not msg_or_header:
127 return {}
102 return {}
128 try:
103 try:
129 # See if msg_or_header is the entire message.
104 # See if msg_or_header is the entire message.
130 h = msg_or_header['header']
105 h = msg_or_header['header']
131 except KeyError:
106 except KeyError:
132 try:
107 try:
133 # See if msg_or_header is just the header
108 # See if msg_or_header is just the header
134 h = msg_or_header['msg_id']
109 h = msg_or_header['msg_id']
135 except KeyError:
110 except KeyError:
136 raise
111 raise
137 else:
112 else:
138 h = msg_or_header
113 h = msg_or_header
139 if not isinstance(h, dict):
114 if not isinstance(h, dict):
140 h = dict(h)
115 h = dict(h)
141 return h
116 return h
142
117
143 def rekey(dikt):
144 """Rekey a dict that has been forced to use str keys where there should be
145 ints by json. This belongs in the jsonutil added by fperez."""
146 for k in dikt.iterkeys():
147 if isinstance(k, str):
148 ik=fk=None
149 try:
150 ik = int(k)
151 except ValueError:
152 try:
153 fk = float(k)
154 except ValueError:
155 continue
156 if ik is not None:
157 nk = ik
158 else:
159 nk = fk
160 if nk in dikt:
161 raise KeyError("already have key %r"%nk)
162 dikt[nk] = dikt.pop(k)
163 return dikt
164
165 def serialize_object(obj, threshold=64e-6):
166 """Serialize an object into a list of sendable buffers.
167
168 Parameters
169 ----------
170
171 obj : object
172 The object to be serialized
173 threshold : float
174 The threshold for not double-pickling the content.
175
176
177 Returns
178 -------
179 ('pmd', [bufs]) :
180 where pmd is the pickled metadata wrapper,
181 bufs is a list of data buffers
182 """
183 databuffers = []
184 if isinstance(obj, (list, tuple)):
185 clist = canSequence(obj)
186 slist = map(serialize, clist)
187 for s in slist:
188 if s.typeDescriptor in ('buffer', 'ndarray') or s.getDataSize() > threshold:
189 databuffers.append(s.getData())
190 s.data = None
191 return pickle.dumps(slist,-1), databuffers
192 elif isinstance(obj, dict):
193 sobj = {}
194 for k in sorted(obj.iterkeys()):
195 s = serialize(can(obj[k]))
196 if s.typeDescriptor in ('buffer', 'ndarray') or s.getDataSize() > threshold:
197 databuffers.append(s.getData())
198 s.data = None
199 sobj[k] = s
200 return pickle.dumps(sobj,-1),databuffers
201 else:
202 s = serialize(can(obj))
203 if s.typeDescriptor in ('buffer', 'ndarray') or s.getDataSize() > threshold:
204 databuffers.append(s.getData())
205 s.data = None
206 return pickle.dumps(s,-1),databuffers
207
208
209 def unserialize_object(bufs):
210 """reconstruct an object serialized by serialize_object from data buffers."""
211 bufs = list(bufs)
212 sobj = pickle.loads(bufs.pop(0))
213 if isinstance(sobj, (list, tuple)):
214 for s in sobj:
215 if s.data is None:
216 s.data = bufs.pop(0)
217 return uncanSequence(map(unserialize, sobj)), bufs
218 elif isinstance(sobj, dict):
219 newobj = {}
220 for k in sorted(sobj.iterkeys()):
221 s = sobj[k]
222 if s.data is None:
223 s.data = bufs.pop(0)
224 newobj[k] = uncan(unserialize(s))
225 return newobj, bufs
226 else:
227 if sobj.data is None:
228 sobj.data = bufs.pop(0)
229 return uncan(unserialize(sobj)), bufs
230
231 def pack_apply_message(f, args, kwargs, threshold=64e-6):
232 """pack up a function, args, and kwargs to be sent over the wire
233 as a series of buffers. Any object whose data is larger than `threshold`
234 will not have their data copied (currently only numpy arrays support zero-copy)"""
235 msg = [pickle.dumps(can(f),-1)]
236 databuffers = [] # for large objects
237 sargs, bufs = serialize_object(args,threshold)
238 msg.append(sargs)
239 databuffers.extend(bufs)
240 skwargs, bufs = serialize_object(kwargs,threshold)
241 msg.append(skwargs)
242 databuffers.extend(bufs)
243 msg.extend(databuffers)
244 return msg
245
246 def unpack_apply_message(bufs, g=None, copy=True):
247 """unpack f,args,kwargs from buffers packed by pack_apply_message()
248 Returns: original f,args,kwargs"""
249 bufs = list(bufs) # allow us to pop
250 assert len(bufs) >= 3, "not enough buffers!"
251 if not copy:
252 for i in range(3):
253 bufs[i] = bufs[i].bytes
254 cf = pickle.loads(bufs.pop(0))
255 sargs = list(pickle.loads(bufs.pop(0)))
256 skwargs = dict(pickle.loads(bufs.pop(0)))
257 # print sargs, skwargs
258 f = uncan(cf, g)
259 for sa in sargs:
260 if sa.data is None:
261 m = bufs.pop(0)
262 if sa.getTypeDescriptor() in ('buffer', 'ndarray'):
263 if copy:
264 sa.data = buffer(m)
265 else:
266 sa.data = m.buffer
267 else:
268 if copy:
269 sa.data = m
270 else:
271 sa.data = m.bytes
272
273 args = uncanSequence(map(unserialize, sargs), g)
274 kwargs = {}
275 for k in sorted(skwargs.iterkeys()):
276 sa = skwargs[k]
277 if sa.data is None:
278 sa.data = bufs.pop(0)
279 kwargs[k] = uncan(unserialize(sa), g)
280
281 return f,args,kwargs
282
283 class StreamSession(object):
118 class StreamSession(object):
284 """tweaked version of IPython.zmq.session.Session, for development in Parallel"""
119 """tweaked version of IPython.zmq.session.Session, for development in Parallel"""
285 debug=False
120 debug=False
286 key=None
121 key=None
287
122
288 def __init__(self, username=None, session=None, packer=None, unpacker=None, key=None, keyfile=None):
123 def __init__(self, username=None, session=None, packer=None, unpacker=None, key=None, keyfile=None):
289 if username is None:
124 if username is None:
290 username = os.environ.get('USER','username')
125 username = os.environ.get('USER','username')
291 self.username = username
126 self.username = username
292 if session is None:
127 if session is None:
293 self.session = str(uuid.uuid4())
128 self.session = str(uuid.uuid4())
294 else:
129 else:
295 self.session = session
130 self.session = session
296 self.msg_id = str(uuid.uuid4())
131 self.msg_id = str(uuid.uuid4())
297 if packer is None:
132 if packer is None:
298 self.pack = default_packer
133 self.pack = default_packer
299 else:
134 else:
300 if not callable(packer):
135 if not callable(packer):
301 raise TypeError("packer must be callable, not %s"%type(packer))
136 raise TypeError("packer must be callable, not %s"%type(packer))
302 self.pack = packer
137 self.pack = packer
303
138
304 if unpacker is None:
139 if unpacker is None:
305 self.unpack = default_unpacker
140 self.unpack = default_unpacker
306 else:
141 else:
307 if not callable(unpacker):
142 if not callable(unpacker):
308 raise TypeError("unpacker must be callable, not %s"%type(unpacker))
143 raise TypeError("unpacker must be callable, not %s"%type(unpacker))
309 self.unpack = unpacker
144 self.unpack = unpacker
310
145
311 if key is not None and keyfile is not None:
146 if key is not None and keyfile is not None:
312 raise TypeError("Must specify key OR keyfile, not both")
147 raise TypeError("Must specify key OR keyfile, not both")
313 if keyfile is not None:
148 if keyfile is not None:
314 with open(keyfile) as f:
149 with open(keyfile) as f:
315 self.key = f.read().strip()
150 self.key = f.read().strip()
316 else:
151 else:
317 self.key = key
152 self.key = key
318 if isinstance(self.key, unicode):
153 if isinstance(self.key, unicode):
319 self.key = self.key.encode('utf8')
154 self.key = self.key.encode('utf8')
320 # print key, keyfile, self.key
155 # print key, keyfile, self.key
321 self.none = self.pack({})
156 self.none = self.pack({})
322
157
323 def msg_header(self, msg_type):
158 def msg_header(self, msg_type):
324 h = msg_header(self.msg_id, msg_type, self.username, self.session)
159 h = msg_header(self.msg_id, msg_type, self.username, self.session)
325 self.msg_id = str(uuid.uuid4())
160 self.msg_id = str(uuid.uuid4())
326 return h
161 return h
327
162
328 def msg(self, msg_type, content=None, parent=None, subheader=None):
163 def msg(self, msg_type, content=None, parent=None, subheader=None):
329 msg = {}
164 msg = {}
330 msg['header'] = self.msg_header(msg_type)
165 msg['header'] = self.msg_header(msg_type)
331 msg['msg_id'] = msg['header']['msg_id']
166 msg['msg_id'] = msg['header']['msg_id']
332 msg['parent_header'] = {} if parent is None else extract_header(parent)
167 msg['parent_header'] = {} if parent is None else extract_header(parent)
333 msg['msg_type'] = msg_type
168 msg['msg_type'] = msg_type
334 msg['content'] = {} if content is None else content
169 msg['content'] = {} if content is None else content
335 sub = {} if subheader is None else subheader
170 sub = {} if subheader is None else subheader
336 msg['header'].update(sub)
171 msg['header'].update(sub)
337 return msg
172 return msg
338
173
339 def check_key(self, msg_or_header):
174 def check_key(self, msg_or_header):
340 """Check that a message's header has the right key"""
175 """Check that a message's header has the right key"""
341 if self.key is None:
176 if self.key is None:
342 return True
177 return True
343 header = extract_header(msg_or_header)
178 header = extract_header(msg_or_header)
344 return header.get('key', None) == self.key
179 return header.get('key', None) == self.key
345
180
346
181
347 def send(self, stream, msg_or_type, content=None, buffers=None, parent=None, subheader=None, ident=None):
182 def send(self, stream, msg_or_type, content=None, buffers=None, parent=None, subheader=None, ident=None):
348 """Build and send a message via stream or socket.
183 """Build and send a message via stream or socket.
349
184
350 Parameters
185 Parameters
351 ----------
186 ----------
352
187
353 stream : zmq.Socket or ZMQStream
188 stream : zmq.Socket or ZMQStream
354 the socket-like object used to send the data
189 the socket-like object used to send the data
355 msg_or_type : str or Message/dict
190 msg_or_type : str or Message/dict
356 Normally, msg_or_type will be a msg_type unless a message is being sent more
191 Normally, msg_or_type will be a msg_type unless a message is being sent more
357 than once.
192 than once.
358
193
359 Returns
194 Returns
360 -------
195 -------
361 (msg,sent) : tuple
196 (msg,sent) : tuple
362 msg : Message
197 msg : Message
363 the nice wrapped dict-like object containing the headers
198 the nice wrapped dict-like object containing the headers
364
199
365 """
200 """
366 if isinstance(msg_or_type, (Message, dict)):
201 if isinstance(msg_or_type, (Message, dict)):
367 # we got a Message, not a msg_type
202 # we got a Message, not a msg_type
368 # don't build a new Message
203 # don't build a new Message
369 msg = msg_or_type
204 msg = msg_or_type
370 content = msg['content']
205 content = msg['content']
371 else:
206 else:
372 msg = self.msg(msg_or_type, content, parent, subheader)
207 msg = self.msg(msg_or_type, content, parent, subheader)
373 buffers = [] if buffers is None else buffers
208 buffers = [] if buffers is None else buffers
374 to_send = []
209 to_send = []
375 if isinstance(ident, list):
210 if isinstance(ident, list):
376 # accept list of idents
211 # accept list of idents
377 to_send.extend(ident)
212 to_send.extend(ident)
378 elif ident is not None:
213 elif ident is not None:
379 to_send.append(ident)
214 to_send.append(ident)
380 to_send.append(DELIM)
215 to_send.append(DELIM)
381 if self.key is not None:
216 if self.key is not None:
382 to_send.append(self.key)
217 to_send.append(self.key)
383 to_send.append(self.pack(msg['header']))
218 to_send.append(self.pack(msg['header']))
384 to_send.append(self.pack(msg['parent_header']))
219 to_send.append(self.pack(msg['parent_header']))
385
220
386 if content is None:
221 if content is None:
387 content = self.none
222 content = self.none
388 elif isinstance(content, dict):
223 elif isinstance(content, dict):
389 content = self.pack(content)
224 content = self.pack(content)
390 elif isinstance(content, str):
225 elif isinstance(content, str):
391 # content is already packed, as in a relayed message
226 # content is already packed, as in a relayed message
392 pass
227 pass
393 else:
228 else:
394 raise TypeError("Content incorrect type: %s"%type(content))
229 raise TypeError("Content incorrect type: %s"%type(content))
395 to_send.append(content)
230 to_send.append(content)
396 flag = 0
231 flag = 0
397 if buffers:
232 if buffers:
398 flag = zmq.SNDMORE
233 flag = zmq.SNDMORE
399 stream.send_multipart(to_send, flag, copy=False)
234 stream.send_multipart(to_send, flag, copy=False)
400 for b in buffers[:-1]:
235 for b in buffers[:-1]:
401 stream.send(b, flag, copy=False)
236 stream.send(b, flag, copy=False)
402 if buffers:
237 if buffers:
403 stream.send(buffers[-1], copy=False)
238 stream.send(buffers[-1], copy=False)
404 # omsg = Message(msg)
239 # omsg = Message(msg)
405 if self.debug:
240 if self.debug:
406 pprint.pprint(msg)
241 pprint.pprint(msg)
407 pprint.pprint(to_send)
242 pprint.pprint(to_send)
408 pprint.pprint(buffers)
243 pprint.pprint(buffers)
409 return msg
244 return msg
410
245
411 def send_raw(self, stream, msg, flags=0, copy=True, ident=None):
246 def send_raw(self, stream, msg, flags=0, copy=True, ident=None):
412 """Send a raw message via ident path.
247 """Send a raw message via ident path.
413
248
414 Parameters
249 Parameters
415 ----------
250 ----------
416 msg : list of sendable buffers"""
251 msg : list of sendable buffers"""
417 to_send = []
252 to_send = []
418 if isinstance(ident, str):
253 if isinstance(ident, str):
419 ident = [ident]
254 ident = [ident]
420 if ident is not None:
255 if ident is not None:
421 to_send.extend(ident)
256 to_send.extend(ident)
422 to_send.append(DELIM)
257 to_send.append(DELIM)
423 if self.key is not None:
258 if self.key is not None:
424 to_send.append(self.key)
259 to_send.append(self.key)
425 to_send.extend(msg)
260 to_send.extend(msg)
426 stream.send_multipart(msg, flags, copy=copy)
261 stream.send_multipart(msg, flags, copy=copy)
427
262
428 def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True):
263 def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True):
429 """receives and unpacks a message
264 """receives and unpacks a message
430 returns [idents], msg"""
265 returns [idents], msg"""
431 if isinstance(socket, ZMQStream):
266 if isinstance(socket, ZMQStream):
432 socket = socket.socket
267 socket = socket.socket
433 try:
268 try:
434 msg = socket.recv_multipart(mode)
269 msg = socket.recv_multipart(mode)
435 except zmq.ZMQError as e:
270 except zmq.ZMQError as e:
436 if e.errno == zmq.EAGAIN:
271 if e.errno == zmq.EAGAIN:
437 # We can convert EAGAIN to None as we know in this case
272 # We can convert EAGAIN to None as we know in this case
438 # recv_multipart won't return None.
273 # recv_multipart won't return None.
439 return None
274 return None
440 else:
275 else:
441 raise
276 raise
442 # return an actual Message object
277 # return an actual Message object
443 # determine the number of idents by trying to unpack them.
278 # determine the number of idents by trying to unpack them.
444 # this is terrible:
279 # this is terrible:
445 idents, msg = self.feed_identities(msg, copy)
280 idents, msg = self.feed_identities(msg, copy)
446 try:
281 try:
447 return idents, self.unpack_message(msg, content=content, copy=copy)
282 return idents, self.unpack_message(msg, content=content, copy=copy)
448 except Exception as e:
283 except Exception as e:
449 print (idents, msg)
284 print (idents, msg)
450 # TODO: handle it
285 # TODO: handle it
451 raise e
286 raise e
452
287
453 def feed_identities(self, msg, copy=True):
288 def feed_identities(self, msg, copy=True):
454 """feed until DELIM is reached, then return the prefix as idents and remainder as
289 """feed until DELIM is reached, then return the prefix as idents and remainder as
455 msg. This is easily broken by setting an IDENT to DELIM, but that would be silly.
290 msg. This is easily broken by setting an IDENT to DELIM, but that would be silly.
456
291
457 Parameters
292 Parameters
458 ----------
293 ----------
459 msg : a list of Message or bytes objects
294 msg : a list of Message or bytes objects
460 the message to be split
295 the message to be split
461 copy : bool
296 copy : bool
462 flag determining whether the arguments are bytes or Messages
297 flag determining whether the arguments are bytes or Messages
463
298
464 Returns
299 Returns
465 -------
300 -------
466 (idents,msg) : two lists
301 (idents,msg) : two lists
467 idents will always be a list of bytes - the indentity prefix
302 idents will always be a list of bytes - the indentity prefix
468 msg will be a list of bytes or Messages, unchanged from input
303 msg will be a list of bytes or Messages, unchanged from input
469 msg should be unpackable via self.unpack_message at this point.
304 msg should be unpackable via self.unpack_message at this point.
470 """
305 """
471 ikey = int(self.key is not None)
306 ikey = int(self.key is not None)
472 minlen = 3 + ikey
307 minlen = 3 + ikey
473 msg = list(msg)
308 msg = list(msg)
474 idents = []
309 idents = []
475 while len(msg) > minlen:
310 while len(msg) > minlen:
476 if copy:
311 if copy:
477 s = msg[0]
312 s = msg[0]
478 else:
313 else:
479 s = msg[0].bytes
314 s = msg[0].bytes
480 if s == DELIM:
315 if s == DELIM:
481 msg.pop(0)
316 msg.pop(0)
482 break
317 break
483 else:
318 else:
484 idents.append(s)
319 idents.append(s)
485 msg.pop(0)
320 msg.pop(0)
486
321
487 return idents, msg
322 return idents, msg
488
323
489 def unpack_message(self, msg, content=True, copy=True):
324 def unpack_message(self, msg, content=True, copy=True):
490 """Return a message object from the format
325 """Return a message object from the format
491 sent by self.send.
326 sent by self.send.
492
327
493 Parameters:
328 Parameters:
494 -----------
329 -----------
495
330
496 content : bool (True)
331 content : bool (True)
497 whether to unpack the content dict (True),
332 whether to unpack the content dict (True),
498 or leave it serialized (False)
333 or leave it serialized (False)
499
334
500 copy : bool (True)
335 copy : bool (True)
501 whether to return the bytes (True),
336 whether to return the bytes (True),
502 or the non-copying Message object in each place (False)
337 or the non-copying Message object in each place (False)
503
338
504 """
339 """
505 ikey = int(self.key is not None)
340 ikey = int(self.key is not None)
506 minlen = 3 + ikey
341 minlen = 3 + ikey
507 message = {}
342 message = {}
508 if not copy:
343 if not copy:
509 for i in range(minlen):
344 for i in range(minlen):
510 msg[i] = msg[i].bytes
345 msg[i] = msg[i].bytes
511 if ikey:
346 if ikey:
512 if not self.key == msg[0]:
347 if not self.key == msg[0]:
513 raise KeyError("Invalid Session Key: %s"%msg[0])
348 raise KeyError("Invalid Session Key: %s"%msg[0])
514 if not len(msg) >= minlen:
349 if not len(msg) >= minlen:
515 raise TypeError("malformed message, must have at least %i elements"%minlen)
350 raise TypeError("malformed message, must have at least %i elements"%minlen)
516 message['header'] = self.unpack(msg[ikey+0])
351 message['header'] = self.unpack(msg[ikey+0])
517 message['msg_type'] = message['header']['msg_type']
352 message['msg_type'] = message['header']['msg_type']
518 message['parent_header'] = self.unpack(msg[ikey+1])
353 message['parent_header'] = self.unpack(msg[ikey+1])
519 if content:
354 if content:
520 message['content'] = self.unpack(msg[ikey+2])
355 message['content'] = self.unpack(msg[ikey+2])
521 else:
356 else:
522 message['content'] = msg[ikey+2]
357 message['content'] = msg[ikey+2]
523
358
524 message['buffers'] = msg[ikey+3:]# [ m.buffer for m in msg[3:] ]
359 message['buffers'] = msg[ikey+3:]# [ m.buffer for m in msg[3:] ]
525 return message
360 return message
526
361
527
362
528 def test_msg2obj():
363 def test_msg2obj():
529 am = dict(x=1)
364 am = dict(x=1)
530 ao = Message(am)
365 ao = Message(am)
531 assert ao.x == am['x']
366 assert ao.x == am['x']
532
367
533 am['y'] = dict(z=1)
368 am['y'] = dict(z=1)
534 ao = Message(am)
369 ao = Message(am)
535 assert ao.y.z == am['y']['z']
370 assert ao.y.z == am['y']['z']
536
371
537 k1, k2 = 'y', 'z'
372 k1, k2 = 'y', 'z'
538 assert ao[k1][k2] == am[k1][k2]
373 assert ao[k1][k2] == am[k1][k2]
539
374
540 am2 = dict(ao)
375 am2 = dict(ao)
541 assert am['x'] == am2['x']
376 assert am['x'] == am2['x']
542 assert am['y']['z'] == am2['y']['z']
377 assert am['y']['z'] == am2['y']['z']
@@ -1,82 +1,82 b''
1
1
2 import os
2 import os
3 import uuid
3 import uuid
4 import zmq
4 import zmq
5
5
6 from zmq.tests import BaseZMQTestCase
6 from zmq.tests import BaseZMQTestCase
7
7
8 # from IPython.zmq.tests import SessionTestCase
8 # from IPython.zmq.tests import SessionTestCase
9 from IPython.zmq.parallel import streamsession as ss
9 from IPython.zmq.parallel import streamsession as ss
10
10
11 class SessionTestCase(BaseZMQTestCase):
11 class SessionTestCase(BaseZMQTestCase):
12
12
13 def setUp(self):
13 def setUp(self):
14 BaseZMQTestCase.setUp(self)
14 BaseZMQTestCase.setUp(self)
15 self.session = ss.StreamSession()
15 self.session = ss.StreamSession()
16
16
17 class TestSession(SessionTestCase):
17 class TestSession(SessionTestCase):
18
18
19 def test_msg(self):
19 def test_msg(self):
20 """message format"""
20 """message format"""
21 msg = self.session.msg('execute')
21 msg = self.session.msg('execute')
22 thekeys = set('header msg_id parent_header msg_type content'.split())
22 thekeys = set('header msg_id parent_header msg_type content'.split())
23 s = set(msg.keys())
23 s = set(msg.keys())
24 self.assertEquals(s, thekeys)
24 self.assertEquals(s, thekeys)
25 self.assertTrue(isinstance(msg['content'],dict))
25 self.assertTrue(isinstance(msg['content'],dict))
26 self.assertTrue(isinstance(msg['header'],dict))
26 self.assertTrue(isinstance(msg['header'],dict))
27 self.assertTrue(isinstance(msg['parent_header'],dict))
27 self.assertTrue(isinstance(msg['parent_header'],dict))
28 self.assertEquals(msg['msg_type'], 'execute')
28 self.assertEquals(msg['msg_type'], 'execute')
29
29
30
30
31
31
32 def test_args(self):
32 def test_args(self):
33 """initialization arguments for StreamSession"""
33 """initialization arguments for StreamSession"""
34 s = ss.StreamSession()
34 s = ss.StreamSession()
35 self.assertTrue(s.pack is ss.default_packer)
35 self.assertTrue(s.pack is ss.default_packer)
36 self.assertTrue(s.unpack is ss.default_unpacker)
36 self.assertTrue(s.unpack is ss.default_unpacker)
37 self.assertEquals(s.username, os.environ.get('USER', 'username'))
37 self.assertEquals(s.username, os.environ.get('USER', 'username'))
38
38
39 s = ss.StreamSession(username=None)
39 s = ss.StreamSession(username=None)
40 self.assertEquals(s.username, os.environ.get('USER', 'username'))
40 self.assertEquals(s.username, os.environ.get('USER', 'username'))
41
41
42 self.assertRaises(TypeError, ss.StreamSession, packer='hi')
42 self.assertRaises(TypeError, ss.StreamSession, packer='hi')
43 self.assertRaises(TypeError, ss.StreamSession, unpacker='hi')
43 self.assertRaises(TypeError, ss.StreamSession, unpacker='hi')
44 u = str(uuid.uuid4())
44 u = str(uuid.uuid4())
45 s = ss.StreamSession(username='carrot', session=u)
45 s = ss.StreamSession(username='carrot', session=u)
46 self.assertEquals(s.session, u)
46 self.assertEquals(s.session, u)
47 self.assertEquals(s.username, 'carrot')
47 self.assertEquals(s.username, 'carrot')
48
48
49
49
50 def test_rekey(self):
50 # def test_rekey(self):
51 """rekeying dict around json str keys"""
51 # """rekeying dict around json str keys"""
52 d = {'0': uuid.uuid4(), 0:uuid.uuid4()}
52 # d = {'0': uuid.uuid4(), 0:uuid.uuid4()}
53 self.assertRaises(KeyError, ss.rekey, d)
53 # self.assertRaises(KeyError, ss.rekey, d)
54
54 #
55 d = {'0': uuid.uuid4(), 1:uuid.uuid4(), 'asdf':uuid.uuid4()}
55 # d = {'0': uuid.uuid4(), 1:uuid.uuid4(), 'asdf':uuid.uuid4()}
56 d2 = {0:d['0'],1:d[1],'asdf':d['asdf']}
56 # d2 = {0:d['0'],1:d[1],'asdf':d['asdf']}
57 rd = ss.rekey(d)
57 # rd = ss.rekey(d)
58 self.assertEquals(d2,rd)
58 # self.assertEquals(d2,rd)
59
59 #
60 d = {'1.5':uuid.uuid4(),'1':uuid.uuid4()}
60 # d = {'1.5':uuid.uuid4(),'1':uuid.uuid4()}
61 d2 = {1.5:d['1.5'],1:d['1']}
61 # d2 = {1.5:d['1.5'],1:d['1']}
62 rd = ss.rekey(d)
62 # rd = ss.rekey(d)
63 self.assertEquals(d2,rd)
63 # self.assertEquals(d2,rd)
64
64 #
65 d = {'1.0':uuid.uuid4(),'1':uuid.uuid4()}
65 # d = {'1.0':uuid.uuid4(),'1':uuid.uuid4()}
66 self.assertRaises(KeyError, ss.rekey, d)
66 # self.assertRaises(KeyError, ss.rekey, d)
67
67 #
68 def test_unique_msg_ids(self):
68 def test_unique_msg_ids(self):
69 """test that messages receive unique ids"""
69 """test that messages receive unique ids"""
70 ids = set()
70 ids = set()
71 for i in range(2**12):
71 for i in range(2**12):
72 h = self.session.msg_header('test')
72 h = self.session.msg_header('test')
73 msg_id = h['msg_id']
73 msg_id = h['msg_id']
74 self.assertTrue(msg_id not in ids)
74 self.assertTrue(msg_id not in ids)
75 ids.add(msg_id)
75 ids.add(msg_id)
76
76
77 def test_feed_identities(self):
77 def test_feed_identities(self):
78 """scrub the front for zmq IDENTITIES"""
78 """scrub the front for zmq IDENTITIES"""
79 theids = "engine client other".split()
79 theids = "engine client other".split()
80 content = dict(code='whoda',stuff=object())
80 content = dict(code='whoda',stuff=object())
81 themsg = self.session.msg('execute',content=content)
81 themsg = self.session.msg('execute',content=content)
82 pmsg = theids
82 pmsg = theids
@@ -1,119 +1,271 b''
1 """some generic utilities"""
1 """some generic utilities for dealing with classes, urls, and serialization"""
2 import re
2 import re
3 import socket
3 import socket
4
4
5 try:
6 import cPickle
7 pickle = cPickle
8 except:
9 cPickle = None
10 import pickle
11
12
13 from IPython.utils.pickleutil import can, uncan, canSequence, uncanSequence
14 from IPython.utils.newserialized import serialize, unserialize
15
16 ISO8601="%Y-%m-%dT%H:%M:%S.%f"
17
5 class ReverseDict(dict):
18 class ReverseDict(dict):
6 """simple double-keyed subset of dict methods."""
19 """simple double-keyed subset of dict methods."""
7
20
8 def __init__(self, *args, **kwargs):
21 def __init__(self, *args, **kwargs):
9 dict.__init__(self, *args, **kwargs)
22 dict.__init__(self, *args, **kwargs)
10 self._reverse = dict()
23 self._reverse = dict()
11 for key, value in self.iteritems():
24 for key, value in self.iteritems():
12 self._reverse[value] = key
25 self._reverse[value] = key
13
26
14 def __getitem__(self, key):
27 def __getitem__(self, key):
15 try:
28 try:
16 return dict.__getitem__(self, key)
29 return dict.__getitem__(self, key)
17 except KeyError:
30 except KeyError:
18 return self._reverse[key]
31 return self._reverse[key]
19
32
20 def __setitem__(self, key, value):
33 def __setitem__(self, key, value):
21 if key in self._reverse:
34 if key in self._reverse:
22 raise KeyError("Can't have key %r on both sides!"%key)
35 raise KeyError("Can't have key %r on both sides!"%key)
23 dict.__setitem__(self, key, value)
36 dict.__setitem__(self, key, value)
24 self._reverse[value] = key
37 self._reverse[value] = key
25
38
26 def pop(self, key):
39 def pop(self, key):
27 value = dict.pop(self, key)
40 value = dict.pop(self, key)
28 self._reverse.pop(value)
41 self._reverse.pop(value)
29 return value
42 return value
30
43
31 def get(self, key, default=None):
44 def get(self, key, default=None):
32 try:
45 try:
33 return self[key]
46 return self[key]
34 except KeyError:
47 except KeyError:
35 return default
48 return default
36
37
49
38 def validate_url(url):
50 def validate_url(url):
39 """validate a url for zeromq"""
51 """validate a url for zeromq"""
40 if not isinstance(url, basestring):
52 if not isinstance(url, basestring):
41 raise TypeError("url must be a string, not %r"%type(url))
53 raise TypeError("url must be a string, not %r"%type(url))
42 url = url.lower()
54 url = url.lower()
43
55
44 proto_addr = url.split('://')
56 proto_addr = url.split('://')
45 assert len(proto_addr) == 2, 'Invalid url: %r'%url
57 assert len(proto_addr) == 2, 'Invalid url: %r'%url
46 proto, addr = proto_addr
58 proto, addr = proto_addr
47 assert proto in ['tcp','pgm','epgm','ipc','inproc'], "Invalid protocol: %r"%proto
59 assert proto in ['tcp','pgm','epgm','ipc','inproc'], "Invalid protocol: %r"%proto
48
60
49 # domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391
61 # domain pattern adapted from http://www.regexlib.com/REDetails.aspx?regexp_id=391
50 # author: Remi Sabourin
62 # author: Remi Sabourin
51 pat = re.compile(r'^([\w\d]([\w\d\-]{0,61}[\w\d])?\.)*[\w\d]([\w\d\-]{0,61}[\w\d])?$')
63 pat = re.compile(r'^([\w\d]([\w\d\-]{0,61}[\w\d])?\.)*[\w\d]([\w\d\-]{0,61}[\w\d])?$')
52
64
53 if proto == 'tcp':
65 if proto == 'tcp':
54 lis = addr.split(':')
66 lis = addr.split(':')
55 assert len(lis) == 2, 'Invalid url: %r'%url
67 assert len(lis) == 2, 'Invalid url: %r'%url
56 addr,s_port = lis
68 addr,s_port = lis
57 try:
69 try:
58 port = int(s_port)
70 port = int(s_port)
59 except ValueError:
71 except ValueError:
60 raise AssertionError("Invalid port %r in url: %r"%(port, url))
72 raise AssertionError("Invalid port %r in url: %r"%(port, url))
61
73
62 assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url
74 assert addr == '*' or pat.match(addr) is not None, 'Invalid url: %r'%url
63
75
64 else:
76 else:
65 # only validate tcp urls currently
77 # only validate tcp urls currently
66 pass
78 pass
67
79
68 return True
80 return True
69
81
70
82
71 def validate_url_container(container):
83 def validate_url_container(container):
72 """validate a potentially nested collection of urls."""
84 """validate a potentially nested collection of urls."""
73 if isinstance(container, basestring):
85 if isinstance(container, basestring):
74 url = container
86 url = container
75 return validate_url(url)
87 return validate_url(url)
76 elif isinstance(container, dict):
88 elif isinstance(container, dict):
77 container = container.itervalues()
89 container = container.itervalues()
78
90
79 for element in container:
91 for element in container:
80 validate_url_container(element)
92 validate_url_container(element)
81
93
82
94
83 def split_url(url):
95 def split_url(url):
84 """split a zmq url (tcp://ip:port) into ('tcp','ip','port')."""
96 """split a zmq url (tcp://ip:port) into ('tcp','ip','port')."""
85 proto_addr = url.split('://')
97 proto_addr = url.split('://')
86 assert len(proto_addr) == 2, 'Invalid url: %r'%url
98 assert len(proto_addr) == 2, 'Invalid url: %r'%url
87 proto, addr = proto_addr
99 proto, addr = proto_addr
88 lis = addr.split(':')
100 lis = addr.split(':')
89 assert len(lis) == 2, 'Invalid url: %r'%url
101 assert len(lis) == 2, 'Invalid url: %r'%url
90 addr,s_port = lis
102 addr,s_port = lis
91 return proto,addr,s_port
103 return proto,addr,s_port
92
104
93 def disambiguate_ip_address(ip, location=None):
105 def disambiguate_ip_address(ip, location=None):
94 """turn multi-ip interfaces '0.0.0.0' and '*' into connectable
106 """turn multi-ip interfaces '0.0.0.0' and '*' into connectable
95 ones, based on the location (default interpretation of location is localhost)."""
107 ones, based on the location (default interpretation of location is localhost)."""
96 if ip in ('0.0.0.0', '*'):
108 if ip in ('0.0.0.0', '*'):
97 external_ips = socket.gethostbyname_ex(socket.gethostname())[2]
109 external_ips = socket.gethostbyname_ex(socket.gethostname())[2]
98 if location is None or location in external_ips:
110 if location is None or location in external_ips:
99 ip='127.0.0.1'
111 ip='127.0.0.1'
100 elif location:
112 elif location:
101 return location
113 return location
102 return ip
114 return ip
103
115
104 def disambiguate_url(url, location=None):
116 def disambiguate_url(url, location=None):
105 """turn multi-ip interfaces '0.0.0.0' and '*' into connectable
117 """turn multi-ip interfaces '0.0.0.0' and '*' into connectable
106 ones, based on the location (default interpretation is localhost).
118 ones, based on the location (default interpretation is localhost).
107
119
108 This is for zeromq urls, such as tcp://*:10101."""
120 This is for zeromq urls, such as tcp://*:10101."""
109 try:
121 try:
110 proto,ip,port = split_url(url)
122 proto,ip,port = split_url(url)
111 except AssertionError:
123 except AssertionError:
112 # probably not tcp url; could be ipc, etc.
124 # probably not tcp url; could be ipc, etc.
113 return url
125 return url
114
126
115 ip = disambiguate_ip_address(ip,location)
127 ip = disambiguate_ip_address(ip,location)
116
128
117 return "%s://%s:%s"%(proto,ip,port)
129 return "%s://%s:%s"%(proto,ip,port)
118
130
119
131
132 def rekey(dikt):
133 """Rekey a dict that has been forced to use str keys where there should be
134 ints by json. This belongs in the jsonutil added by fperez."""
135 for k in dikt.iterkeys():
136 if isinstance(k, str):
137 ik=fk=None
138 try:
139 ik = int(k)
140 except ValueError:
141 try:
142 fk = float(k)
143 except ValueError:
144 continue
145 if ik is not None:
146 nk = ik
147 else:
148 nk = fk
149 if nk in dikt:
150 raise KeyError("already have key %r"%nk)
151 dikt[nk] = dikt.pop(k)
152 return dikt
153
154 def serialize_object(obj, threshold=64e-6):
155 """Serialize an object into a list of sendable buffers.
156
157 Parameters
158 ----------
159
160 obj : object
161 The object to be serialized
162 threshold : float
163 The threshold for not double-pickling the content.
164
165
166 Returns
167 -------
168 ('pmd', [bufs]) :
169 where pmd is the pickled metadata wrapper,
170 bufs is a list of data buffers
171 """
172 databuffers = []
173 if isinstance(obj, (list, tuple)):
174 clist = canSequence(obj)
175 slist = map(serialize, clist)
176 for s in slist:
177 if s.typeDescriptor in ('buffer', 'ndarray') or s.getDataSize() > threshold:
178 databuffers.append(s.getData())
179 s.data = None
180 return pickle.dumps(slist,-1), databuffers
181 elif isinstance(obj, dict):
182 sobj = {}
183 for k in sorted(obj.iterkeys()):
184 s = serialize(can(obj[k]))
185 if s.typeDescriptor in ('buffer', 'ndarray') or s.getDataSize() > threshold:
186 databuffers.append(s.getData())
187 s.data = None
188 sobj[k] = s
189 return pickle.dumps(sobj,-1),databuffers
190 else:
191 s = serialize(can(obj))
192 if s.typeDescriptor in ('buffer', 'ndarray') or s.getDataSize() > threshold:
193 databuffers.append(s.getData())
194 s.data = None
195 return pickle.dumps(s,-1),databuffers
196
197
198 def unserialize_object(bufs):
199 """reconstruct an object serialized by serialize_object from data buffers."""
200 bufs = list(bufs)
201 sobj = pickle.loads(bufs.pop(0))
202 if isinstance(sobj, (list, tuple)):
203 for s in sobj:
204 if s.data is None:
205 s.data = bufs.pop(0)
206 return uncanSequence(map(unserialize, sobj)), bufs
207 elif isinstance(sobj, dict):
208 newobj = {}
209 for k in sorted(sobj.iterkeys()):
210 s = sobj[k]
211 if s.data is None:
212 s.data = bufs.pop(0)
213 newobj[k] = uncan(unserialize(s))
214 return newobj, bufs
215 else:
216 if sobj.data is None:
217 sobj.data = bufs.pop(0)
218 return uncan(unserialize(sobj)), bufs
219
220 def pack_apply_message(f, args, kwargs, threshold=64e-6):
221 """pack up a function, args, and kwargs to be sent over the wire
222 as a series of buffers. Any object whose data is larger than `threshold`
223 will not have their data copied (currently only numpy arrays support zero-copy)"""
224 msg = [pickle.dumps(can(f),-1)]
225 databuffers = [] # for large objects
226 sargs, bufs = serialize_object(args,threshold)
227 msg.append(sargs)
228 databuffers.extend(bufs)
229 skwargs, bufs = serialize_object(kwargs,threshold)
230 msg.append(skwargs)
231 databuffers.extend(bufs)
232 msg.extend(databuffers)
233 return msg
234
235 def unpack_apply_message(bufs, g=None, copy=True):
236 """unpack f,args,kwargs from buffers packed by pack_apply_message()
237 Returns: original f,args,kwargs"""
238 bufs = list(bufs) # allow us to pop
239 assert len(bufs) >= 3, "not enough buffers!"
240 if not copy:
241 for i in range(3):
242 bufs[i] = bufs[i].bytes
243 cf = pickle.loads(bufs.pop(0))
244 sargs = list(pickle.loads(bufs.pop(0)))
245 skwargs = dict(pickle.loads(bufs.pop(0)))
246 # print sargs, skwargs
247 f = uncan(cf, g)
248 for sa in sargs:
249 if sa.data is None:
250 m = bufs.pop(0)
251 if sa.getTypeDescriptor() in ('buffer', 'ndarray'):
252 if copy:
253 sa.data = buffer(m)
254 else:
255 sa.data = m.buffer
256 else:
257 if copy:
258 sa.data = m
259 else:
260 sa.data = m.bytes
261
262 args = uncanSequence(map(unserialize, sargs), g)
263 kwargs = {}
264 for k in sorted(skwargs.iterkeys()):
265 sa = skwargs[k]
266 if sa.data is None:
267 sa.data = bufs.pop(0)
268 kwargs[k] = uncan(unserialize(sa), g)
269
270 return f,args,kwargs
271
@@ -1,657 +1,658 b''
1 """Views of remote engines"""
1 """Views of remote engines."""
2 #-----------------------------------------------------------------------------
2 #-----------------------------------------------------------------------------
3 # Copyright (C) 2010 The IPython Development Team
3 # Copyright (C) 2010 The IPython Development Team
4 #
4 #
5 # Distributed under the terms of the BSD License. The full license is in
5 # Distributed under the terms of the BSD License. The full license is in
6 # the file COPYING, distributed as part of this software.
6 # the file COPYING, distributed as part of this software.
7 #-----------------------------------------------------------------------------
7 #-----------------------------------------------------------------------------
8
8
9 #-----------------------------------------------------------------------------
9 #-----------------------------------------------------------------------------
10 # Imports
10 # Imports
11 #-----------------------------------------------------------------------------
11 #-----------------------------------------------------------------------------
12
12
13 from IPython.testing import decorators as testdec
13 from IPython.testing import decorators as testdec
14 from IPython.utils.traitlets import HasTraits, Bool, List, Dict, Set, Int, Instance
14 from IPython.utils.traitlets import HasTraits, Any, Bool, List, Dict, Set, Int, Instance
15
15
16 from IPython.external.decorator import decorator
16 from IPython.external.decorator import decorator
17
17
18 from .asyncresult import AsyncResult
18 from .asyncresult import AsyncResult
19 from .dependency import Dependency
19 from .dependency import Dependency
20 from .remotefunction import ParallelFunction, parallel, remote
20 from .remotefunction import ParallelFunction, parallel, remote
21
21
22 #-----------------------------------------------------------------------------
22 #-----------------------------------------------------------------------------
23 # Decorators
23 # Decorators
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25
25
26 @decorator
26 @decorator
27 def myblock(f, self, *args, **kwargs):
27 def myblock(f, self, *args, **kwargs):
28 """override client.block with self.block during a call"""
28 """override client.block with self.block during a call"""
29 block = self.client.block
29 block = self.client.block
30 self.client.block = self.block
30 self.client.block = self.block
31 try:
31 try:
32 ret = f(self, *args, **kwargs)
32 ret = f(self, *args, **kwargs)
33 finally:
33 finally:
34 self.client.block = block
34 self.client.block = block
35 return ret
35 return ret
36
36
37 @decorator
37 @decorator
38 def save_ids(f, self, *args, **kwargs):
38 def save_ids(f, self, *args, **kwargs):
39 """Keep our history and outstanding attributes up to date after a method call."""
39 """Keep our history and outstanding attributes up to date after a method call."""
40 n_previous = len(self.client.history)
40 n_previous = len(self.client.history)
41 ret = f(self, *args, **kwargs)
41 ret = f(self, *args, **kwargs)
42 nmsgs = len(self.client.history) - n_previous
42 nmsgs = len(self.client.history) - n_previous
43 msg_ids = self.client.history[-nmsgs:]
43 msg_ids = self.client.history[-nmsgs:]
44 self.history.extend(msg_ids)
44 self.history.extend(msg_ids)
45 map(self.outstanding.add, msg_ids)
45 map(self.outstanding.add, msg_ids)
46 return ret
46 return ret
47
47
48 @decorator
48 @decorator
49 def sync_results(f, self, *args, **kwargs):
49 def sync_results(f, self, *args, **kwargs):
50 """sync relevant results from self.client to our results attribute."""
50 """sync relevant results from self.client to our results attribute."""
51 ret = f(self, *args, **kwargs)
51 ret = f(self, *args, **kwargs)
52 delta = self.outstanding.difference(self.client.outstanding)
52 delta = self.outstanding.difference(self.client.outstanding)
53 completed = self.outstanding.intersection(delta)
53 completed = self.outstanding.intersection(delta)
54 self.outstanding = self.outstanding.difference(completed)
54 self.outstanding = self.outstanding.difference(completed)
55 for msg_id in completed:
55 for msg_id in completed:
56 self.results[msg_id] = self.client.results[msg_id]
56 self.results[msg_id] = self.client.results[msg_id]
57 return ret
57 return ret
58
58
59 @decorator
59 @decorator
60 def spin_after(f, self, *args, **kwargs):
60 def spin_after(f, self, *args, **kwargs):
61 """call spin after the method."""
61 """call spin after the method."""
62 ret = f(self, *args, **kwargs)
62 ret = f(self, *args, **kwargs)
63 self.spin()
63 self.spin()
64 return ret
64 return ret
65
65
66 #-----------------------------------------------------------------------------
66 #-----------------------------------------------------------------------------
67 # Classes
67 # Classes
68 #-----------------------------------------------------------------------------
68 #-----------------------------------------------------------------------------
69
69
70 class View(HasTraits):
70 class View(HasTraits):
71 """Base View class for more convenint apply(f,*args,**kwargs) syntax via attributes.
71 """Base View class for more convenint apply(f,*args,**kwargs) syntax via attributes.
72
72
73 Don't use this class, use subclasses.
73 Don't use this class, use subclasses.
74 """
74 """
75 block=Bool(False)
75 block=Bool(False)
76 bound=Bool(False)
76 bound=Bool(False)
77 history=List()
77 history=List()
78 outstanding = Set()
78 outstanding = Set()
79 results = Dict()
79 results = Dict()
80 client = Instance('IPython.zmq.parallel.client.Client')
80 client = Instance('IPython.zmq.parallel.client.Client')
81
81
82 _ntargets = Int(1)
82 _ntargets = Int(1)
83 _balanced = Bool(False)
83 _balanced = Bool(False)
84 _default_names = List(['block', 'bound'])
84 _default_names = List(['block', 'bound'])
85 _targets = None
85 _targets = Any()
86
86
87 def __init__(self, client=None, targets=None):
87 def __init__(self, client=None, targets=None):
88 super(View, self).__init__(client=client)
88 super(View, self).__init__(client=client)
89 self._targets = targets
89 self._targets = targets
90 self._ntargets = 1 if isinstance(targets, (int,type(None))) else len(targets)
90 self._ntargets = 1 if isinstance(targets, (int,type(None))) else len(targets)
91 self.block = client.block
91 self.block = client.block
92
92
93 for name in self._default_names:
93 for name in self._default_names:
94 setattr(self, name, getattr(self, name, None))
94 setattr(self, name, getattr(self, name, None))
95
95
96 assert not self.__class__ is View, "Don't use base View objects, use subclasses"
96 assert not self.__class__ is View, "Don't use base View objects, use subclasses"
97
97
98
98
99 def __repr__(self):
99 def __repr__(self):
100 strtargets = str(self._targets)
100 strtargets = str(self._targets)
101 if len(strtargets) > 16:
101 if len(strtargets) > 16:
102 strtargets = strtargets[:12]+'...]'
102 strtargets = strtargets[:12]+'...]'
103 return "<%s %s>"%(self.__class__.__name__, strtargets)
103 return "<%s %s>"%(self.__class__.__name__, strtargets)
104
104
105 @property
105 @property
106 def targets(self):
106 def targets(self):
107 return self._targets
107 return self._targets
108
108
109 @targets.setter
109 @targets.setter
110 def targets(self, value):
110 def targets(self, value):
111 raise AttributeError("Cannot set View `targets` after construction!")
111 raise AttributeError("Cannot set View `targets` after construction!")
112
112
113 @property
113 @property
114 def balanced(self):
114 def balanced(self):
115 return self._balanced
115 return self._balanced
116
116
117 @balanced.setter
117 @balanced.setter
118 def balanced(self, value):
118 def balanced(self, value):
119 raise AttributeError("Cannot set View `balanced` after construction!")
119 raise AttributeError("Cannot set View `balanced` after construction!")
120
120
121 def _defaults(self, *excludes):
121 def _defaults(self, *excludes):
122 """return dict of our default attributes, excluding names given."""
122 """return dict of our default attributes, excluding names given."""
123 d = dict(balanced=self._balanced, targets=self._targets)
123 d = dict(balanced=self._balanced, targets=self._targets)
124 for name in self._default_names:
124 for name in self._default_names:
125 if name not in excludes:
125 if name not in excludes:
126 d[name] = getattr(self, name)
126 d[name] = getattr(self, name)
127 return d
127 return d
128
128
129 def set_flags(self, **kwargs):
129 def set_flags(self, **kwargs):
130 """set my attribute flags by keyword.
130 """set my attribute flags by keyword.
131
131
132 A View is a wrapper for the Client's apply method, but
132 A View is a wrapper for the Client's apply method, but
133 with attributes that specify keyword arguments, those attributes
133 with attributes that specify keyword arguments, those attributes
134 can be set by keyword argument with this method.
134 can be set by keyword argument with this method.
135
135
136 Parameters
136 Parameters
137 ----------
137 ----------
138
138
139 block : bool
139 block : bool
140 whether to wait for results
140 whether to wait for results
141 bound : bool
141 bound : bool
142 whether to use the client's namespace
142 whether to use the client's namespace
143 """
143 """
144 for key in kwargs:
144 for key in kwargs:
145 if key not in self._default_names:
145 if key not in self._default_names:
146 raise KeyError("Invalid name: %r"%key)
146 raise KeyError("Invalid name: %r"%key)
147 for name in ('block', 'bound'):
147 for name in ('block', 'bound'):
148 if name in kwargs:
148 if name in kwargs:
149 setattr(self, name, kwargs[name])
149 setattr(self, name, kwargs[name])
150
150
151 #----------------------------------------------------------------
151 #----------------------------------------------------------------
152 # wrappers for client methods:
152 # wrappers for client methods:
153 #----------------------------------------------------------------
153 #----------------------------------------------------------------
154 @sync_results
154 @sync_results
155 def spin(self):
155 def spin(self):
156 """spin the client, and sync"""
156 """spin the client, and sync"""
157 self.client.spin()
157 self.client.spin()
158
158
159 @sync_results
159 @sync_results
160 @save_ids
160 @save_ids
161 def apply(self, f, *args, **kwargs):
161 def apply(self, f, *args, **kwargs):
162 """calls f(*args, **kwargs) on remote engines, returning the result.
162 """calls f(*args, **kwargs) on remote engines, returning the result.
163
163
164 This method does not involve the engine's namespace.
164 This method does not involve the engine's namespace.
165
165
166 if self.block is False:
166 if self.block is False:
167 returns msg_id
167 returns msg_id
168 else:
168 else:
169 returns actual result of f(*args, **kwargs)
169 returns actual result of f(*args, **kwargs)
170 """
170 """
171 return self.client.apply(f, args, kwargs, **self._defaults())
171 return self.client.apply(f, args, kwargs, **self._defaults())
172
172
173 @save_ids
173 @save_ids
174 def apply_async(self, f, *args, **kwargs):
174 def apply_async(self, f, *args, **kwargs):
175 """calls f(*args, **kwargs) on remote engines in a nonblocking manner.
175 """calls f(*args, **kwargs) on remote engines in a nonblocking manner.
176
176
177 This method does not involve the engine's namespace.
177 This method does not involve the engine's namespace.
178
178
179 returns msg_id
179 returns msg_id
180 """
180 """
181 d = self._defaults('block', 'bound')
181 d = self._defaults('block', 'bound')
182 return self.client.apply(f,args,kwargs, block=False, bound=False, **d)
182 return self.client.apply(f,args,kwargs, block=False, bound=False, **d)
183
183
184 @spin_after
184 @spin_after
185 @save_ids
185 @save_ids
186 def apply_sync(self, f, *args, **kwargs):
186 def apply_sync(self, f, *args, **kwargs):
187 """calls f(*args, **kwargs) on remote engines in a blocking manner,
187 """calls f(*args, **kwargs) on remote engines in a blocking manner,
188 returning the result.
188 returning the result.
189
189
190 This method does not involve the engine's namespace.
190 This method does not involve the engine's namespace.
191
191
192 returns: actual result of f(*args, **kwargs)
192 returns: actual result of f(*args, **kwargs)
193 """
193 """
194 d = self._defaults('block', 'bound')
194 d = self._defaults('block', 'bound')
195 return self.client.apply(f,args,kwargs, block=True, bound=False, **d)
195 return self.client.apply(f,args,kwargs, block=True, bound=False, **d)
196
196
197 # @sync_results
197 # @sync_results
198 # @save_ids
198 # @save_ids
199 # def apply_bound(self, f, *args, **kwargs):
199 # def apply_bound(self, f, *args, **kwargs):
200 # """calls f(*args, **kwargs) bound to engine namespace(s).
200 # """calls f(*args, **kwargs) bound to engine namespace(s).
201 #
201 #
202 # if self.block is False:
202 # if self.block is False:
203 # returns msg_id
203 # returns msg_id
204 # else:
204 # else:
205 # returns actual result of f(*args, **kwargs)
205 # returns actual result of f(*args, **kwargs)
206 #
206 #
207 # This method has access to the targets' namespace via globals()
207 # This method has access to the targets' namespace via globals()
208 #
208 #
209 # """
209 # """
210 # d = self._defaults('bound')
210 # d = self._defaults('bound')
211 # return self.client.apply(f, args, kwargs, bound=True, **d)
211 # return self.client.apply(f, args, kwargs, bound=True, **d)
212 #
212 #
213 @sync_results
213 @sync_results
214 @save_ids
214 @save_ids
215 def apply_async_bound(self, f, *args, **kwargs):
215 def apply_async_bound(self, f, *args, **kwargs):
216 """calls f(*args, **kwargs) bound to engine namespace(s)
216 """calls f(*args, **kwargs) bound to engine namespace(s)
217 in a nonblocking manner.
217 in a nonblocking manner.
218
218
219 returns: msg_id
219 returns: msg_id
220
220
221 This method has access to the targets' namespace via globals()
221 This method has access to the targets' namespace via globals()
222
222
223 """
223 """
224 d = self._defaults('block', 'bound')
224 d = self._defaults('block', 'bound')
225 return self.client.apply(f, args, kwargs, block=False, bound=True, **d)
225 return self.client.apply(f, args, kwargs, block=False, bound=True, **d)
226
226
227 @spin_after
227 @spin_after
228 @save_ids
228 @save_ids
229 def apply_sync_bound(self, f, *args, **kwargs):
229 def apply_sync_bound(self, f, *args, **kwargs):
230 """calls f(*args, **kwargs) bound to engine namespace(s), waiting for the result.
230 """calls f(*args, **kwargs) bound to engine namespace(s), waiting for the result.
231
231
232 returns: actual result of f(*args, **kwargs)
232 returns: actual result of f(*args, **kwargs)
233
233
234 This method has access to the targets' namespace via globals()
234 This method has access to the targets' namespace via globals()
235
235
236 """
236 """
237 d = self._defaults('block', 'bound')
237 d = self._defaults('block', 'bound')
238 return self.client.apply(f, args, kwargs, block=True, bound=True, **d)
238 return self.client.apply(f, args, kwargs, block=True, bound=True, **d)
239
239
240 def abort(self, jobs=None, block=None):
240 def abort(self, jobs=None, block=None):
241 """Abort jobs on my engines.
241 """Abort jobs on my engines.
242
242
243 Parameters
243 Parameters
244 ----------
244 ----------
245
245
246 jobs : None, str, list of strs, optional
246 jobs : None, str, list of strs, optional
247 if None: abort all jobs.
247 if None: abort all jobs.
248 else: abort specific msg_id(s).
248 else: abort specific msg_id(s).
249 """
249 """
250 block = block if block is not None else self.block
250 block = block if block is not None else self.block
251 return self.client.abort(jobs=jobs, targets=self._targets, block=block)
251 return self.client.abort(jobs=jobs, targets=self._targets, block=block)
252
252
253 def queue_status(self, verbose=False):
253 def queue_status(self, verbose=False):
254 """Fetch the Queue status of my engines"""
254 """Fetch the Queue status of my engines"""
255 return self.client.queue_status(targets=self._targets, verbose=verbose)
255 return self.client.queue_status(targets=self._targets, verbose=verbose)
256
256
257 def purge_results(self, jobs=[], targets=[]):
257 def purge_results(self, jobs=[], targets=[]):
258 """Instruct the controller to forget specific results."""
258 """Instruct the controller to forget specific results."""
259 if targets is None or targets == 'all':
259 if targets is None or targets == 'all':
260 targets = self._targets
260 targets = self._targets
261 return self.client.purge_results(jobs=jobs, targets=targets)
261 return self.client.purge_results(jobs=jobs, targets=targets)
262
262
263 @spin_after
263 @spin_after
264 def get_result(self, indices_or_msg_ids=None):
264 def get_result(self, indices_or_msg_ids=None):
265 """return one or more results, specified by history index or msg_id.
265 """return one or more results, specified by history index or msg_id.
266
266
267 See client.get_result for details.
267 See client.get_result for details.
268
268
269 """
269 """
270
270
271 if indices_or_msg_ids is None:
271 if indices_or_msg_ids is None:
272 indices_or_msg_ids = -1
272 indices_or_msg_ids = -1
273 if isinstance(indices_or_msg_ids, int):
273 if isinstance(indices_or_msg_ids, int):
274 indices_or_msg_ids = self.history[indices_or_msg_ids]
274 indices_or_msg_ids = self.history[indices_or_msg_ids]
275 elif isinstance(indices_or_msg_ids, (list,tuple,set)):
275 elif isinstance(indices_or_msg_ids, (list,tuple,set)):
276 indices_or_msg_ids = list(indices_or_msg_ids)
276 indices_or_msg_ids = list(indices_or_msg_ids)
277 for i,index in enumerate(indices_or_msg_ids):
277 for i,index in enumerate(indices_or_msg_ids):
278 if isinstance(index, int):
278 if isinstance(index, int):
279 indices_or_msg_ids[i] = self.history[index]
279 indices_or_msg_ids[i] = self.history[index]
280 return self.client.get_result(indices_or_msg_ids)
280 return self.client.get_result(indices_or_msg_ids)
281
281
282 #-------------------------------------------------------------------
282 #-------------------------------------------------------------------
283 # Map
283 # Map
284 #-------------------------------------------------------------------
284 #-------------------------------------------------------------------
285
285
286 def map(self, f, *sequences, **kwargs):
286 def map(self, f, *sequences, **kwargs):
287 """override in subclasses"""
287 """override in subclasses"""
288 raise NotImplementedError
288 raise NotImplementedError
289
289
290 def map_async(self, f, *sequences, **kwargs):
290 def map_async(self, f, *sequences, **kwargs):
291 """Parallel version of builtin `map`, using this view's engines.
291 """Parallel version of builtin `map`, using this view's engines.
292
292
293 This is equivalent to map(...block=False)
293 This is equivalent to map(...block=False)
294
294
295 See `self.map` for details.
295 See `self.map` for details.
296 """
296 """
297 if 'block' in kwargs:
297 if 'block' in kwargs:
298 raise TypeError("map_async doesn't take a `block` keyword argument.")
298 raise TypeError("map_async doesn't take a `block` keyword argument.")
299 kwargs['block'] = False
299 kwargs['block'] = False
300 return self.map(f,*sequences,**kwargs)
300 return self.map(f,*sequences,**kwargs)
301
301
302 def map_sync(self, f, *sequences, **kwargs):
302 def map_sync(self, f, *sequences, **kwargs):
303 """Parallel version of builtin `map`, using this view's engines.
303 """Parallel version of builtin `map`, using this view's engines.
304
304
305 This is equivalent to map(...block=True)
305 This is equivalent to map(...block=True)
306
306
307 See `self.map` for details.
307 See `self.map` for details.
308 """
308 """
309 if 'block' in kwargs:
309 if 'block' in kwargs:
310 raise TypeError("map_sync doesn't take a `block` keyword argument.")
310 raise TypeError("map_sync doesn't take a `block` keyword argument.")
311 kwargs['block'] = True
311 kwargs['block'] = True
312 return self.map(f,*sequences,**kwargs)
312 return self.map(f,*sequences,**kwargs)
313
313
314 def imap(self, f, *sequences, **kwargs):
314 def imap(self, f, *sequences, **kwargs):
315 """Parallel version of `itertools.imap`.
315 """Parallel version of `itertools.imap`.
316
316
317 See `self.map` for details.
317 See `self.map` for details.
318 """
318 """
319
319
320 return iter(self.map_async(f,*sequences, **kwargs))
320 return iter(self.map_async(f,*sequences, **kwargs))
321
321
322 #-------------------------------------------------------------------
322 #-------------------------------------------------------------------
323 # Decorators
323 # Decorators
324 #-------------------------------------------------------------------
324 #-------------------------------------------------------------------
325
325
326 def remote(self, bound=True, block=True):
326 def remote(self, bound=True, block=True):
327 """Decorator for making a RemoteFunction"""
327 """Decorator for making a RemoteFunction"""
328 return remote(self.client, bound=bound, targets=self._targets, block=block, balanced=self._balanced)
328 return remote(self.client, bound=bound, targets=self._targets, block=block, balanced=self._balanced)
329
329
330 def parallel(self, dist='b', bound=True, block=None):
330 def parallel(self, dist='b', bound=True, block=None):
331 """Decorator for making a ParallelFunction"""
331 """Decorator for making a ParallelFunction"""
332 block = self.block if block is None else block
332 block = self.block if block is None else block
333 return parallel(self.client, bound=bound, targets=self._targets, block=block, balanced=self._balanced)
333 return parallel(self.client, bound=bound, targets=self._targets, block=block, balanced=self._balanced)
334
334
335 @testdec.skip_doctest
335 @testdec.skip_doctest
336 class DirectView(View):
336 class DirectView(View):
337 """Direct Multiplexer View of one or more engines.
337 """Direct Multiplexer View of one or more engines.
338
338
339 These are created via indexed access to a client:
339 These are created via indexed access to a client:
340
340
341 >>> dv_1 = client[1]
341 >>> dv_1 = client[1]
342 >>> dv_all = client[:]
342 >>> dv_all = client[:]
343 >>> dv_even = client[::2]
343 >>> dv_even = client[::2]
344 >>> dv_some = client[1:3]
344 >>> dv_some = client[1:3]
345
345
346 This object provides dictionary access to engine namespaces:
346 This object provides dictionary access to engine namespaces:
347
347
348 # push a=5:
348 # push a=5:
349 >>> dv['a'] = 5
349 >>> dv['a'] = 5
350 # pull 'foo':
350 # pull 'foo':
351 >>> db['foo']
351 >>> db['foo']
352
352
353 """
353 """
354
354
355 def __init__(self, client=None, targets=None):
355 def __init__(self, client=None, targets=None):
356 super(DirectView, self).__init__(client=client, targets=targets)
356 super(DirectView, self).__init__(client=client, targets=targets)
357 self._balanced = False
357 self._balanced = False
358
358
359 @spin_after
359 @spin_after
360 @save_ids
360 @save_ids
361 def map(self, f, *sequences, **kwargs):
361 def map(self, f, *sequences, **kwargs):
362 """view.map(f, *sequences, block=self.block, bound=self.bound) => list|AsyncMapResult
362 """view.map(f, *sequences, block=self.block, bound=self.bound) => list|AsyncMapResult
363
363
364 Parallel version of builtin `map`, using this View's `targets`.
364 Parallel version of builtin `map`, using this View's `targets`.
365
365
366 There will be one task per target, so work will be chunked
366 There will be one task per target, so work will be chunked
367 if the sequences are longer than `targets`.
367 if the sequences are longer than `targets`.
368
368
369 Results can be iterated as they are ready, but will become available in chunks.
369 Results can be iterated as they are ready, but will become available in chunks.
370
370
371 Parameters
371 Parameters
372 ----------
372 ----------
373
373
374 f : callable
374 f : callable
375 function to be mapped
375 function to be mapped
376 *sequences: one or more sequences of matching length
376 *sequences: one or more sequences of matching length
377 the sequences to be distributed and passed to `f`
377 the sequences to be distributed and passed to `f`
378 block : bool
378 block : bool
379 whether to wait for the result or not [default self.block]
379 whether to wait for the result or not [default self.block]
380 bound : bool
380 bound : bool
381 whether to have access to the engines' namespaces [default self.bound]
381 whether to have access to the engines' namespaces [default self.bound]
382
382
383 Returns
383 Returns
384 -------
384 -------
385
385
386 if block=False:
386 if block=False:
387 AsyncMapResult
387 AsyncMapResult
388 An object like AsyncResult, but which reassembles the sequence of results
388 An object like AsyncResult, but which reassembles the sequence of results
389 into a single list. AsyncMapResults can be iterated through before all
389 into a single list. AsyncMapResults can be iterated through before all
390 results are complete.
390 results are complete.
391 else:
391 else:
392 list
392 list
393 the result of map(f,*sequences)
393 the result of map(f,*sequences)
394 """
394 """
395
395
396 block = kwargs.get('block', self.block)
396 block = kwargs.get('block', self.block)
397 bound = kwargs.get('bound', self.bound)
397 bound = kwargs.get('bound', self.bound)
398 for k in kwargs.keys():
398 for k in kwargs.keys():
399 if k not in ['block', 'bound']:
399 if k not in ['block', 'bound']:
400 raise TypeError("invalid keyword arg, %r"%k)
400 raise TypeError("invalid keyword arg, %r"%k)
401
401
402 assert len(sequences) > 0, "must have some sequences to map onto!"
402 assert len(sequences) > 0, "must have some sequences to map onto!"
403 pf = ParallelFunction(self.client, f, block=block, bound=bound,
403 pf = ParallelFunction(self.client, f, block=block, bound=bound,
404 targets=self._targets, balanced=False)
404 targets=self._targets, balanced=False)
405 return pf.map(*sequences)
405 return pf.map(*sequences)
406
406
407 @sync_results
407 @sync_results
408 @save_ids
408 @save_ids
409 def execute(self, code, block=None):
409 def execute(self, code, block=None):
410 """execute some code on my targets."""
410 """execute some code on my targets."""
411
411
412 block = block if block is not None else self.block
412 block = block if block is not None else self.block
413
413
414 return self.client.execute(code, block=block, targets=self._targets)
414 return self.client.execute(code, block=block, targets=self._targets)
415
415
416 @sync_results
416 @sync_results
417 @save_ids
417 @save_ids
418 def run(self, fname, block=None):
418 def run(self, fname, block=None):
419 """execute the code in a file on my targets."""
419 """execute the code in a file on my targets."""
420
420
421 block = block if block is not None else self.block
421 block = block if block is not None else self.block
422
422
423 return self.client.run(fname, block=block, targets=self._targets)
423 return self.client.run(fname, block=block, targets=self._targets)
424
424
425 def update(self, ns):
425 def update(self, ns):
426 """update remote namespace with dict `ns`"""
426 """update remote namespace with dict `ns`"""
427 return self.client.push(ns, targets=self._targets, block=self.block)
427 return self.client.push(ns, targets=self._targets, block=self.block)
428
428
429 def push(self, ns, block=None):
429 def push(self, ns, block=None):
430 """update remote namespace with dict `ns`"""
430 """update remote namespace with dict `ns`"""
431
431
432 block = block if block is not None else self.block
432 block = block if block is not None else self.block
433
433
434 return self.client.push(ns, targets=self._targets, block=block)
434 return self.client.push(ns, targets=self._targets, block=block)
435
435
436 def get(self, key_s):
436 def get(self, key_s):
437 """get object(s) by `key_s` from remote namespace
437 """get object(s) by `key_s` from remote namespace
438 will return one object if it is a key.
438 will return one object if it is a key.
439 It also takes a list of keys, and will return a list of objects."""
439 It also takes a list of keys, and will return a list of objects."""
440 # block = block if block is not None else self.block
440 # block = block if block is not None else self.block
441 return self.client.pull(key_s, block=True, targets=self._targets)
441 return self.client.pull(key_s, block=True, targets=self._targets)
442
442
443 @sync_results
443 @sync_results
444 @save_ids
444 @save_ids
445 def pull(self, key_s, block=True):
445 def pull(self, key_s, block=True):
446 """get object(s) by `key_s` from remote namespace
446 """get object(s) by `key_s` from remote namespace
447 will return one object if it is a key.
447 will return one object if it is a key.
448 It also takes a list of keys, and will return a list of objects."""
448 It also takes a list of keys, and will return a list of objects."""
449 block = block if block is not None else self.block
449 block = block if block is not None else self.block
450 return self.client.pull(key_s, block=block, targets=self._targets)
450 return self.client.pull(key_s, block=block, targets=self._targets)
451
451
452 def scatter(self, key, seq, dist='b', flatten=False, block=None):
452 def scatter(self, key, seq, dist='b', flatten=False, block=None):
453 """
453 """
454 Partition a Python sequence and send the partitions to a set of engines.
454 Partition a Python sequence and send the partitions to a set of engines.
455 """
455 """
456 block = block if block is not None else self.block
456 block = block if block is not None else self.block
457
457
458 return self.client.scatter(key, seq, dist=dist, flatten=flatten,
458 return self.client.scatter(key, seq, dist=dist, flatten=flatten,
459 targets=self._targets, block=block)
459 targets=self._targets, block=block)
460
460
461 @sync_results
461 @sync_results
462 @save_ids
462 @save_ids
463 def gather(self, key, dist='b', block=None):
463 def gather(self, key, dist='b', block=None):
464 """
464 """
465 Gather a partitioned sequence on a set of engines as a single local seq.
465 Gather a partitioned sequence on a set of engines as a single local seq.
466 """
466 """
467 block = block if block is not None else self.block
467 block = block if block is not None else self.block
468
468
469 return self.client.gather(key, dist=dist, targets=self._targets, block=block)
469 return self.client.gather(key, dist=dist, targets=self._targets, block=block)
470
470
471 def __getitem__(self, key):
471 def __getitem__(self, key):
472 return self.get(key)
472 return self.get(key)
473
473
474 def __setitem__(self,key, value):
474 def __setitem__(self,key, value):
475 self.update({key:value})
475 self.update({key:value})
476
476
477 def clear(self, block=False):
477 def clear(self, block=False):
478 """Clear the remote namespaces on my engines."""
478 """Clear the remote namespaces on my engines."""
479 block = block if block is not None else self.block
479 block = block if block is not None else self.block
480 return self.client.clear(targets=self._targets, block=block)
480 return self.client.clear(targets=self._targets, block=block)
481
481
482 def kill(self, block=True):
482 def kill(self, block=True):
483 """Kill my engines."""
483 """Kill my engines."""
484 block = block if block is not None else self.block
484 block = block if block is not None else self.block
485 return self.client.kill(targets=self._targets, block=block)
485 return self.client.kill(targets=self._targets, block=block)
486
486
487 #----------------------------------------
487 #----------------------------------------
488 # activate for %px,%autopx magics
488 # activate for %px,%autopx magics
489 #----------------------------------------
489 #----------------------------------------
490 def activate(self):
490 def activate(self):
491 """Make this `View` active for parallel magic commands.
491 """Make this `View` active for parallel magic commands.
492
492
493 IPython has a magic command syntax to work with `MultiEngineClient` objects.
493 IPython has a magic command syntax to work with `MultiEngineClient` objects.
494 In a given IPython session there is a single active one. While
494 In a given IPython session there is a single active one. While
495 there can be many `Views` created and used by the user,
495 there can be many `Views` created and used by the user,
496 there is only one active one. The active `View` is used whenever
496 there is only one active one. The active `View` is used whenever
497 the magic commands %px and %autopx are used.
497 the magic commands %px and %autopx are used.
498
498
499 The activate() method is called on a given `View` to make it
499 The activate() method is called on a given `View` to make it
500 active. Once this has been done, the magic commands can be used.
500 active. Once this has been done, the magic commands can be used.
501 """
501 """
502
502
503 try:
503 try:
504 # This is injected into __builtins__.
504 # This is injected into __builtins__.
505 ip = get_ipython()
505 ip = get_ipython()
506 except NameError:
506 except NameError:
507 print "The IPython parallel magics (%result, %px, %autopx) only work within IPython."
507 print "The IPython parallel magics (%result, %px, %autopx) only work within IPython."
508 else:
508 else:
509 pmagic = ip.plugin_manager.get_plugin('parallelmagic')
509 pmagic = ip.plugin_manager.get_plugin('parallelmagic')
510 if pmagic is not None:
510 if pmagic is not None:
511 pmagic.active_multiengine_client = self
511 pmagic.active_multiengine_client = self
512 else:
512 else:
513 print "You must first load the parallelmagic extension " \
513 print "You must first load the parallelmagic extension " \
514 "by doing '%load_ext parallelmagic'"
514 "by doing '%load_ext parallelmagic'"
515
515
516
516
517 @testdec.skip_doctest
517 @testdec.skip_doctest
518 class LoadBalancedView(View):
518 class LoadBalancedView(View):
519 """An load-balancing View that only executes via the Task scheduler.
519 """An load-balancing View that only executes via the Task scheduler.
520
520
521 Load-balanced views can be created with the client's `view` method:
521 Load-balanced views can be created with the client's `view` method:
522
522
523 >>> v = client.view(balanced=True)
523 >>> v = client.view(balanced=True)
524
524
525 or targets can be specified, to restrict the potential destinations:
525 or targets can be specified, to restrict the potential destinations:
526
526
527 >>> v = client.view([1,3],balanced=True)
527 >>> v = client.view([1,3],balanced=True)
528
528
529 which would restrict loadbalancing to between engines 1 and 3.
529 which would restrict loadbalancing to between engines 1 and 3.
530
530
531 """
531 """
532
532
533 _default_names = ['block', 'bound', 'follow', 'after', 'timeout']
533 _default_names = ['block', 'bound', 'follow', 'after', 'timeout']
534
534
535 def __init__(self, client=None, targets=None):
535 def __init__(self, client=None, targets=None):
536 super(LoadBalancedView, self).__init__(client=client, targets=targets)
536 super(LoadBalancedView, self).__init__(client=client, targets=targets)
537 self._ntargets = 1
537 self._ntargets = 1
538 self._balanced = True
538 self._balanced = True
539
539
540 def _validate_dependency(self, dep):
540 def _validate_dependency(self, dep):
541 """validate a dependency.
541 """validate a dependency.
542
542
543 For use in `set_flags`.
543 For use in `set_flags`.
544 """
544 """
545 if dep is None or isinstance(dep, (str, AsyncResult, Dependency)):
545 if dep is None or isinstance(dep, (str, AsyncResult, Dependency)):
546 return True
546 return True
547 elif isinstance(dep, (list,set, tuple)):
547 elif isinstance(dep, (list,set, tuple)):
548 for d in dep:
548 for d in dep:
549 if not isinstance(d, str, AsyncResult):
549 if not isinstance(d, str, AsyncResult):
550 return False
550 return False
551 elif isinstance(dep, dict):
551 elif isinstance(dep, dict):
552 if set(dep.keys()) != set(Dependency().as_dict().keys()):
552 if set(dep.keys()) != set(Dependency().as_dict().keys()):
553 return False
553 return False
554 if not isinstance(dep['msg_ids'], list):
554 if not isinstance(dep['msg_ids'], list):
555 return False
555 return False
556 for d in dep['msg_ids']:
556 for d in dep['msg_ids']:
557 if not isinstance(d, str):
557 if not isinstance(d, str):
558 return False
558 return False
559 else:
559 else:
560 return False
560 return False
561
561
562 def set_flags(self, **kwargs):
562 def set_flags(self, **kwargs):
563 """set my attribute flags by keyword.
563 """set my attribute flags by keyword.
564
564
565 A View is a wrapper for the Client's apply method, but with attributes
565 A View is a wrapper for the Client's apply method, but with attributes
566 that specify keyword arguments, those attributes can be set by keyword
566 that specify keyword arguments, those attributes can be set by keyword
567 argument with this method.
567 argument with this method.
568
568
569 Parameters
569 Parameters
570 ----------
570 ----------
571
571
572 block : bool
572 block : bool
573 whether to wait for results
573 whether to wait for results
574 bound : bool
574 bound : bool
575 whether to use the engine's namespace
575 whether to use the engine's namespace
576 follow : Dependency, list, msg_id, AsyncResult
576 follow : Dependency, list, msg_id, AsyncResult
577 the location dependencies of tasks
577 the location dependencies of tasks
578 after : Dependency, list, msg_id, AsyncResult
578 after : Dependency, list, msg_id, AsyncResult
579 the time dependencies of tasks
579 the time dependencies of tasks
580 timeout : int,None
580 timeout : int,None
581 the timeout to be used for tasks
581 the timeout to be used for tasks
582 """
582 """
583
583
584 super(LoadBalancedView, self).set_flags(**kwargs)
584 super(LoadBalancedView, self).set_flags(**kwargs)
585 for name in ('follow', 'after'):
585 for name in ('follow', 'after'):
586 if name in kwargs:
586 if name in kwargs:
587 value = kwargs[name]
587 value = kwargs[name]
588 if self._validate_dependency(value):
588 if self._validate_dependency(value):
589 setattr(self, name, value)
589 setattr(self, name, value)
590 else:
590 else:
591 raise ValueError("Invalid dependency: %r"%value)
591 raise ValueError("Invalid dependency: %r"%value)
592 if 'timeout' in kwargs:
592 if 'timeout' in kwargs:
593 t = kwargs['timeout']
593 t = kwargs['timeout']
594 if not isinstance(t, (int, long, float, None)):
594 if not isinstance(t, (int, long, float, None)):
595 raise TypeError("Invalid type for timeout: %r"%type(t))
595 raise TypeError("Invalid type for timeout: %r"%type(t))
596 if t is not None:
596 if t is not None:
597 if t < 0:
597 if t < 0:
598 raise ValueError("Invalid timeout: %s"%t)
598 raise ValueError("Invalid timeout: %s"%t)
599 self.timeout = t
599 self.timeout = t
600
600
601 @spin_after
601 @spin_after
602 @save_ids
602 @save_ids
603 def map(self, f, *sequences, **kwargs):
603 def map(self, f, *sequences, **kwargs):
604 """view.map(f, *sequences, block=self.block, bound=self.bound, chunk_size=1) => list|AsyncMapResult
604 """view.map(f, *sequences, block=self.block, bound=self.bound, chunk_size=1) => list|AsyncMapResult
605
605
606 Parallel version of builtin `map`, load-balanced by this View.
606 Parallel version of builtin `map`, load-balanced by this View.
607
607
608 `block`, `bound`, and `chunk_size` can be specified by keyword only.
608 `block`, `bound`, and `chunk_size` can be specified by keyword only.
609
609
610 Each `chunk_size` elements will be a separate task, and will be
610 Each `chunk_size` elements will be a separate task, and will be
611 load-balanced. This lets individual elements be available for iteration
611 load-balanced. This lets individual elements be available for iteration
612 as soon as they arrive.
612 as soon as they arrive.
613
613
614 Parameters
614 Parameters
615 ----------
615 ----------
616
616
617 f : callable
617 f : callable
618 function to be mapped
618 function to be mapped
619 *sequences: one or more sequences of matching length
619 *sequences: one or more sequences of matching length
620 the sequences to be distributed and passed to `f`
620 the sequences to be distributed and passed to `f`
621 block : bool
621 block : bool
622 whether to wait for the result or not [default self.block]
622 whether to wait for the result or not [default self.block]
623 bound : bool
623 bound : bool
624 whether to use the engine's namespace [default self.bound]
624 whether to use the engine's namespace [default self.bound]
625 chunk_size : int
625 chunk_size : int
626 how many elements should be in each task [default 1]
626 how many elements should be in each task [default 1]
627
627
628 Returns
628 Returns
629 -------
629 -------
630
630
631 if block=False:
631 if block=False:
632 AsyncMapResult
632 AsyncMapResult
633 An object like AsyncResult, but which reassembles the sequence of results
633 An object like AsyncResult, but which reassembles the sequence of results
634 into a single list. AsyncMapResults can be iterated through before all
634 into a single list. AsyncMapResults can be iterated through before all
635 results are complete.
635 results are complete.
636 else:
636 else:
637 the result of map(f,*sequences)
637 the result of map(f,*sequences)
638
638
639 """
639 """
640
640
641 # default
641 # default
642 block = kwargs.get('block', self.block)
642 block = kwargs.get('block', self.block)
643 bound = kwargs.get('bound', self.bound)
643 bound = kwargs.get('bound', self.bound)
644 chunk_size = kwargs.get('chunk_size', 1)
644 chunk_size = kwargs.get('chunk_size', 1)
645
645
646 keyset = set(kwargs.keys())
646 keyset = set(kwargs.keys())
647 extra_keys = keyset.difference_update(set(['block', 'bound', 'chunk_size']))
647 extra_keys = keyset.difference_update(set(['block', 'bound', 'chunk_size']))
648 if extra_keys:
648 if extra_keys:
649 raise TypeError("Invalid kwargs: %s"%list(extra_keys))
649 raise TypeError("Invalid kwargs: %s"%list(extra_keys))
650
650
651 assert len(sequences) > 0, "must have some sequences to map onto!"
651 assert len(sequences) > 0, "must have some sequences to map onto!"
652
652
653 pf = ParallelFunction(self.client, f, block=block, bound=bound,
653 pf = ParallelFunction(self.client, f, block=block, bound=bound,
654 targets=self._targets, balanced=True,
654 targets=self._targets, balanced=True,
655 chunk_size=chunk_size)
655 chunk_size=chunk_size)
656 return pf.map(*sequences)
656 return pf.map(*sequences)
657
657
658 __all__ = ['LoadBalancedView', 'DirectView'] No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now