##// END OF EJS Templates
Merge with upstream
Fernando Perez -
r1405:dc02b179 merge
parent child Browse files
Show More
@@ -0,0 +1,233 b''
1 # encoding: utf-8
2
3 """A parallelized version of Python's builtin map."""
4
5 __docformat__ = "restructuredtext en"
6
7 #----------------------------------------------------------------------------
8 # Copyright (C) 2008 The IPython Development Team
9 #
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
12 #----------------------------------------------------------------------------
13
14 #----------------------------------------------------------------------------
15 # Imports
16 #----------------------------------------------------------------------------
17
18 from types import FunctionType
19 from zope.interface import Interface, implements
20 from IPython.kernel.task import MapTask
21 from IPython.kernel.twistedutil import DeferredList, gatherBoth
22 from IPython.kernel.util import printer
23 from IPython.kernel.error import collect_exceptions
24
25 #----------------------------------------------------------------------------
26 # Code
27 #----------------------------------------------------------------------------
28
29 class IMapper(Interface):
30 """The basic interface for a Mapper.
31
32 This defines a generic interface for mapping. The idea of this is
33 similar to that of Python's builtin `map` function, which applies a function
34 elementwise to a sequence.
35 """
36
37 def map(func, *seqs):
38 """Do map in parallel.
39
40 Equivalent to map(func, *seqs) or:
41
42 [func(seqs[0][0], seqs[1][0],...), func(seqs[0][1], seqs[1][1],...),...]
43
44 :Parameters:
45 func : FunctionType
46 The function to apply to the sequence
47 sequences : tuple of iterables
48 A sequence of iterables that are used for sucessive function
49 arguments. This work just like map
50 """
51
52 class IMultiEngineMapperFactory(Interface):
53 """
54 An interface for something that creates `IMapper` instances.
55 """
56
57 def mapper(dist='b', targets='all', block=True):
58 """
59 Create an `IMapper` implementer with a given set of arguments.
60
61 The `IMapper` created using a multiengine controller is
62 not load balanced.
63 """
64
65 class ITaskMapperFactory(Interface):
66 """
67 An interface for something that creates `IMapper` instances.
68 """
69
70 def mapper(clear_before=False, clear_after=False, retries=0,
71 recovery_task=None, depend=None, block=True):
72 """
73 Create an `IMapper` implementer with a given set of arguments.
74
75 The `IMapper` created using a task controller is load balanced.
76
77 See the documentation for `IPython.kernel.task.BaseTask` for
78 documentation on the arguments to this method.
79 """
80
81
82 class MultiEngineMapper(object):
83 """
84 A Mapper for `IMultiEngine` implementers.
85 """
86
87 implements(IMapper)
88
89 def __init__(self, multiengine, dist='b', targets='all', block=True):
90 """
91 Create a Mapper for a multiengine.
92
93 The value of all arguments are used for all calls to `map`. This
94 class allows these arguemnts to be set for a series of map calls.
95
96 :Parameters:
97 multiengine : `IMultiEngine` implementer
98 The multiengine to use for running the map commands
99 dist : str
100 The type of decomposition to use. Only block ('b') is
101 supported currently
102 targets : (str, int, tuple of ints)
103 The engines to use in the map
104 block : boolean
105 Whether to block when the map is applied
106 """
107 self.multiengine = multiengine
108 self.dist = dist
109 self.targets = targets
110 self.block = block
111
112 def map(self, func, *sequences):
113 """
114 Apply func to *sequences elementwise. Like Python's builtin map.
115
116 This version is not load balanced.
117 """
118 max_len = max(len(s) for s in sequences)
119 for s in sequences:
120 if len(s)!=max_len:
121 raise ValueError('all sequences must have equal length')
122 assert isinstance(func, (str, FunctionType)), "func must be a fuction or str"
123 return self.multiengine.raw_map(func, sequences, dist=self.dist,
124 targets=self.targets, block=self.block)
125
126 class TaskMapper(object):
127 """
128 Make an `ITaskController` look like an `IMapper`.
129
130 This class provides a load balanced version of `map`.
131 """
132
133 def __init__(self, task_controller, clear_before=False, clear_after=False, retries=0,
134 recovery_task=None, depend=None, block=True):
135 """
136 Create a `IMapper` given a `TaskController` and arguments.
137
138 The additional arguments are those that are common to all types of
139 tasks and are described in the documentation for
140 `IPython.kernel.task.BaseTask`.
141
142 :Parameters:
143 task_controller : an `IBlockingTaskClient` implementer
144 The `TaskController` to use for calls to `map`
145 """
146 self.task_controller = task_controller
147 self.clear_before = clear_before
148 self.clear_after = clear_after
149 self.retries = retries
150 self.recovery_task = recovery_task
151 self.depend = depend
152 self.block = block
153
154 def map(self, func, *sequences):
155 """
156 Apply func to *sequences elementwise. Like Python's builtin map.
157
158 This version is load balanced.
159 """
160 max_len = max(len(s) for s in sequences)
161 for s in sequences:
162 if len(s)!=max_len:
163 raise ValueError('all sequences must have equal length')
164 task_args = zip(*sequences)
165 task_ids = []
166 dlist = []
167 for ta in task_args:
168 task = MapTask(func, ta, clear_before=self.clear_before,
169 clear_after=self.clear_after, retries=self.retries,
170 recovery_task=self.recovery_task, depend=self.depend)
171 dlist.append(self.task_controller.run(task))
172 dlist = gatherBoth(dlist, consumeErrors=1)
173 dlist.addCallback(collect_exceptions,'map')
174 if self.block:
175 def get_results(task_ids):
176 d = self.task_controller.barrier(task_ids)
177 d.addCallback(lambda _: gatherBoth([self.task_controller.get_task_result(tid) for tid in task_ids], consumeErrors=1))
178 d.addCallback(collect_exceptions, 'map')
179 return d
180 dlist.addCallback(get_results)
181 return dlist
182
183 class SynchronousTaskMapper(object):
184 """
185 Make an `IBlockingTaskClient` look like an `IMapper`.
186
187 This class provides a load balanced version of `map`.
188 """
189
190 def __init__(self, task_controller, clear_before=False, clear_after=False, retries=0,
191 recovery_task=None, depend=None, block=True):
192 """
193 Create a `IMapper` given a `IBlockingTaskClient` and arguments.
194
195 The additional arguments are those that are common to all types of
196 tasks and are described in the documentation for
197 `IPython.kernel.task.BaseTask`.
198
199 :Parameters:
200 task_controller : an `IBlockingTaskClient` implementer
201 The `TaskController` to use for calls to `map`
202 """
203 self.task_controller = task_controller
204 self.clear_before = clear_before
205 self.clear_after = clear_after
206 self.retries = retries
207 self.recovery_task = recovery_task
208 self.depend = depend
209 self.block = block
210
211 def map(self, func, *sequences):
212 """
213 Apply func to *sequences elementwise. Like Python's builtin map.
214
215 This version is load balanced.
216 """
217 max_len = max(len(s) for s in sequences)
218 for s in sequences:
219 if len(s)!=max_len:
220 raise ValueError('all sequences must have equal length')
221 task_args = zip(*sequences)
222 task_ids = []
223 for ta in task_args:
224 task = MapTask(func, ta, clear_before=self.clear_before,
225 clear_after=self.clear_after, retries=self.retries,
226 recovery_task=self.recovery_task, depend=self.depend)
227 task_ids.append(self.task_controller.run(task))
228 if self.block:
229 self.task_controller.barrier(task_ids)
230 task_results = [self.task_controller.get_task_result(tid) for tid in task_ids]
231 return task_results
232 else:
233 return task_ids No newline at end of file
1 NO CONTENT: new file 100644
@@ -0,0 +1,18 b''
1 from IPython.kernel import client
2
3 mec = client.MultiEngineClient()
4
5 result = mec.map(lambda x: 2*x, range(10))
6 print "Simple, default map: ", result
7
8 m = mec.mapper(block=False)
9 pr = m.map(lambda x: 2*x, range(10))
10 print "Submitted map, got PendingResult: ", pr
11 result = pr.r
12 print "Using a mapper: ", result
13
14 @mec.parallel()
15 def f(x): return 2*x
16
17 result = f(range(10))
18 print "Using a parallel function: ", result No newline at end of file
@@ -0,0 +1,19 b''
1 from IPython.kernel import client
2
3 tc = client.TaskClient()
4
5 result = tc.map(lambda x: 2*x, range(10))
6 print "Simple, default map: ", result
7
8 m = tc.mapper(block=False, clear_after=True, clear_before=True)
9 tids = m.map(lambda x: 2*x, range(10))
10 print "Submitted tasks, got ids: ", tids
11 tc.barrier(tids)
12 result = [tc.get_task_result(tid) for tid in tids]
13 print "Using a mapper: ", result
14
15 @tc.parallel()
16 def f(x): return 2*x
17
18 result = f(range(10))
19 print "Using a parallel function: ", result No newline at end of file
@@ -1,151 +1,151 b''
1 1 # encoding: utf-8
2 2
3 3 """This file contains unittests for the frontendbase module."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #---------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #---------------------------------------------------------------------------
13 13
14 14 #---------------------------------------------------------------------------
15 15 # Imports
16 16 #---------------------------------------------------------------------------
17 17
18 18 import unittest
19 19 from IPython.frontend import frontendbase
20 20 from IPython.kernel.engineservice import EngineService
21 21
22 22 class FrontEndCallbackChecker(frontendbase.AsyncFrontEndBase):
23 23 """FrontEndBase subclass for checking callbacks"""
24 24 def __init__(self, engine=None, history=None):
25 25 super(FrontEndCallbackChecker, self).__init__(engine=engine,
26 26 history=history)
27 27 self.updateCalled = False
28 28 self.renderResultCalled = False
29 29 self.renderErrorCalled = False
30 30
31 31 def update_cell_prompt(self, result, blockID=None):
32 32 self.updateCalled = True
33 33 return result
34 34
35 35 def render_result(self, result):
36 36 self.renderResultCalled = True
37 37 return result
38 38
39 39
40 40 def render_error(self, failure):
41 41 self.renderErrorCalled = True
42 42 return failure
43 43
44 44
45 45
46 46
47 47 class TestAsyncFrontendBase(unittest.TestCase):
48 48 def setUp(self):
49 49 """Setup the EngineService and FrontEndBase"""
50 50
51 51 self.fb = FrontEndCallbackChecker(engine=EngineService())
52 52
53 53
54 54 def test_implements_IFrontEnd(self):
55 55 assert(frontendbase.IFrontEnd.implementedBy(
56 56 frontendbase.AsyncFrontEndBase))
57 57
58 58
59 59 def test_is_complete_returns_False_for_incomplete_block(self):
60 60 """"""
61 61
62 62 block = """def test(a):"""
63 63
64 64 assert(self.fb.is_complete(block) == False)
65 65
66 66 def test_is_complete_returns_True_for_complete_block(self):
67 67 """"""
68 68
69 69 block = """def test(a): pass"""
70 70
71 71 assert(self.fb.is_complete(block))
72 72
73 73 block = """a=3"""
74 74
75 75 assert(self.fb.is_complete(block))
76 76
77 77
78 78 def test_blockID_added_to_result(self):
79 79 block = """3+3"""
80 80
81 81 d = self.fb.execute(block, blockID='TEST_ID')
82 82
83 83 d.addCallback(self.checkBlockID, expected='TEST_ID')
84 84
85 85 def test_blockID_added_to_failure(self):
86 block = "raise Exception()"
86 block = "raise Exception()"
87 87
88 88 d = self.fb.execute(block,blockID='TEST_ID')
89 89 d.addErrback(self.checkFailureID, expected='TEST_ID')
90 90
91 91 def checkBlockID(self, result, expected=""):
92 92 assert(result['blockID'] == expected)
93 93
94 94
95 95 def checkFailureID(self, failure, expected=""):
96 96 assert(failure.blockID == expected)
97 97
98 98
99 99 def test_callbacks_added_to_execute(self):
100 100 """test that
101 101 update_cell_prompt
102 102 render_result
103 103
104 104 are added to execute request
105 105 """
106 106
107 107 d = self.fb.execute("10+10")
108 108 d.addCallback(self.checkCallbacks)
109 109
110 110
111 111 def checkCallbacks(self, result):
112 112 assert(self.fb.updateCalled)
113 113 assert(self.fb.renderResultCalled)
114 114
115 115
116 116 def test_error_callback_added_to_execute(self):
117 117 """test that render_error called on execution error"""
118 118
119 119 d = self.fb.execute("raise Exception()")
120 120 d.addCallback(self.checkRenderError)
121 121
122 122 def checkRenderError(self, result):
123 123 assert(self.fb.renderErrorCalled)
124 124
125 125 def test_history_returns_expected_block(self):
126 126 """Make sure history browsing doesn't fail"""
127 127
128 128 blocks = ["a=1","a=2","a=3"]
129 129 for b in blocks:
130 130 d = self.fb.execute(b)
131 131
132 132 # d is now the deferred for the last executed block
133 133 d.addCallback(self.historyTests, blocks)
134 134
135 135
136 136 def historyTests(self, result, blocks):
137 137 """historyTests"""
138 138
139 139 assert(len(blocks) >= 3)
140 140 assert(self.fb.get_history_previous("") == blocks[-2])
141 141 assert(self.fb.get_history_previous("") == blocks[-3])
142 142 assert(self.fb.get_history_next() == blocks[-2])
143 143
144 144
145 145 def test_history_returns_none_at_startup(self):
146 146 """test_history_returns_none_at_startup"""
147 147
148 148 assert(self.fb.get_history_previous("")==None)
149 149 assert(self.fb.get_history_next()==None)
150 150
151 151
@@ -1,41 +1,41 b''
1 1 # encoding: utf-8
2 2
3 3 """Asynchronous clients for the IPython controller.
4 4
5 5 This module has clients for using the various interfaces of the controller
6 6 in a fully asynchronous manner. This means that you will need to run the
7 7 Twisted reactor yourself and that all methods of the client classes return
8 8 deferreds to the result.
9 9
10 10 The main methods are are `get_*_client` and `get_client`.
11 11 """
12 12
13 13 __docformat__ = "restructuredtext en"
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Copyright (C) 2008 The IPython Development Team
17 17 #
18 18 # Distributed under the terms of the BSD License. The full license is in
19 19 # the file COPYING, distributed as part of this software.
20 20 #-------------------------------------------------------------------------------
21 21
22 22 #-------------------------------------------------------------------------------
23 23 # Imports
24 24 #-------------------------------------------------------------------------------
25 25
26 26 from IPython.kernel import codeutil
27 27 from IPython.kernel.clientconnector import ClientConnector
28 28
29 29 # Other things that the user will need
30 from IPython.kernel.task import Task
30 from IPython.kernel.task import MapTask, StringTask
31 31 from IPython.kernel.error import CompositeError
32 32
33 33 #-------------------------------------------------------------------------------
34 34 # Code
35 35 #-------------------------------------------------------------------------------
36 36
37 37 _client_tub = ClientConnector()
38 38 get_multiengine_client = _client_tub.get_multiengine_client
39 39 get_task_client = _client_tub.get_task_client
40 40 get_client = _client_tub.get_client
41 41
@@ -1,96 +1,96 b''
1 1 # encoding: utf-8
2 2
3 3 """This module contains blocking clients for the controller interfaces.
4 4
5 5 Unlike the clients in `asyncclient.py`, the clients in this module are fully
6 6 blocking. This means that methods on the clients return the actual results
7 7 rather than a deferred to the result. Also, we manage the Twisted reactor
8 8 for you. This is done by running the reactor in a thread.
9 9
10 10 The main classes in this module are:
11 11
12 12 * MultiEngineClient
13 13 * TaskClient
14 14 * Task
15 15 * CompositeError
16 16 """
17 17
18 18 __docformat__ = "restructuredtext en"
19 19
20 20 #-------------------------------------------------------------------------------
21 21 # Copyright (C) 2008 The IPython Development Team
22 22 #
23 23 # Distributed under the terms of the BSD License. The full license is in
24 24 # the file COPYING, distributed as part of this software.
25 25 #-------------------------------------------------------------------------------
26 26
27 27 #-------------------------------------------------------------------------------
28 28 # Imports
29 29 #-------------------------------------------------------------------------------
30 30
31 31 import sys
32 32
33 33 # from IPython.tools import growl
34 34 # growl.start("IPython1 Client")
35 35
36 36
37 37 from twisted.internet import reactor
38 38 from IPython.kernel.clientconnector import ClientConnector
39 39 from IPython.kernel.twistedutil import ReactorInThread
40 40 from IPython.kernel.twistedutil import blockingCallFromThread
41 41
42 42 # These enable various things
43 43 from IPython.kernel import codeutil
44 44 import IPython.kernel.magic
45 45
46 46 # Other things that the user will need
47 from IPython.kernel.task import Task
47 from IPython.kernel.task import MapTask, StringTask
48 48 from IPython.kernel.error import CompositeError
49 49
50 50 #-------------------------------------------------------------------------------
51 51 # Code
52 52 #-------------------------------------------------------------------------------
53 53
54 54 _client_tub = ClientConnector()
55 55
56 56
57 57 def get_multiengine_client(furl_or_file=''):
58 58 """Get the blocking MultiEngine client.
59 59
60 60 :Parameters:
61 61 furl_or_file : str
62 62 A furl or a filename containing a furl. If empty, the
63 63 default furl_file will be used
64 64
65 65 :Returns:
66 66 The connected MultiEngineClient instance
67 67 """
68 68 client = blockingCallFromThread(_client_tub.get_multiengine_client,
69 69 furl_or_file)
70 70 return client.adapt_to_blocking_client()
71 71
72 72 def get_task_client(furl_or_file=''):
73 73 """Get the blocking Task client.
74 74
75 75 :Parameters:
76 76 furl_or_file : str
77 77 A furl or a filename containing a furl. If empty, the
78 78 default furl_file will be used
79 79
80 80 :Returns:
81 81 The connected TaskClient instance
82 82 """
83 83 client = blockingCallFromThread(_client_tub.get_task_client,
84 84 furl_or_file)
85 85 return client.adapt_to_blocking_client()
86 86
87 87
88 88 MultiEngineClient = get_multiengine_client
89 89 TaskClient = get_task_client
90 90
91 91
92 92
93 93 # Now we start the reactor in a thread
94 94 rit = ReactorInThread()
95 95 rit.setDaemon(True)
96 96 rit.start() No newline at end of file
@@ -1,186 +1,143 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.test.test_contexts -*-
3 3 """Context managers for IPython.
4 4
5 5 Python 2.5 introduced the `with` statement, which is based on the context
6 6 manager protocol. This module offers a few context managers for common cases,
7 7 which can also be useful as templates for writing new, application-specific
8 8 managers.
9 9 """
10 10
11 11 from __future__ import with_statement
12 12
13 13 __docformat__ = "restructuredtext en"
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Copyright (C) 2008 The IPython Development Team
17 17 #
18 18 # Distributed under the terms of the BSD License. The full license is in
19 19 # the file COPYING, distributed as part of this software.
20 20 #-------------------------------------------------------------------------------
21 21
22 22 #-------------------------------------------------------------------------------
23 23 # Imports
24 24 #-------------------------------------------------------------------------------
25 25
26 26 import linecache
27 27 import sys
28 28
29 29 from twisted.internet.error import ConnectionRefusedError
30 30
31 31 from IPython.ultraTB import _fixed_getinnerframes, findsource
32 32 from IPython import ipapi
33 33
34 34 from IPython.kernel import error
35 35
36 36 #---------------------------------------------------------------------------
37 37 # Utility functions needed by all context managers.
38 38 #---------------------------------------------------------------------------
39 39
40 40 def remote():
41 41 """Raises a special exception meant to be caught by context managers.
42 42 """
43 43 m = 'Special exception to stop local execution of parallel code.'
44 44 raise error.StopLocalExecution(m)
45 45
46 46
47 47 def strip_whitespace(source,require_remote=True):
48 48 """strip leading whitespace from input source.
49 49
50 50 :Parameters:
51 51
52 52 """
53 53 remote_mark = 'remote()'
54 54 # Expand tabs to avoid any confusion.
55 55 wsource = [l.expandtabs(4) for l in source]
56 56 # Detect the indentation level
57 57 done = False
58 58 for line in wsource:
59 59 if line.isspace():
60 60 continue
61 61 for col,char in enumerate(line):
62 62 if char != ' ':
63 63 done = True
64 64 break
65 65 if done:
66 66 break
67 67 # Now we know how much leading space there is in the code. Next, we
68 68 # extract up to the first line that has less indentation.
69 69 # WARNINGS: we skip comments that may be misindented, but we do NOT yet
70 70 # detect triple quoted strings that may have flush left text.
71 71 for lno,line in enumerate(wsource):
72 72 lead = line[:col]
73 73 if lead.isspace():
74 74 continue
75 75 else:
76 76 if not lead.lstrip().startswith('#'):
77 77 break
78 78 # The real 'with' source is up to lno
79 79 src_lines = [l[col:] for l in wsource[:lno+1]]
80 80
81 81 # Finally, check that the source's first non-comment line begins with the
82 82 # special call 'remote()'
83 83 if require_remote:
84 84 for nline,line in enumerate(src_lines):
85 85 if line.isspace() or line.startswith('#'):
86 86 continue
87 87 if line.startswith(remote_mark):
88 88 break
89 89 else:
90 90 raise ValueError('%s call missing at the start of code' %
91 91 remote_mark)
92 92 out_lines = src_lines[nline+1:]
93 93 else:
94 94 # If the user specified that the remote() call wasn't mandatory
95 95 out_lines = src_lines
96 96
97 97 # src = ''.join(out_lines) # dbg
98 98 #print 'SRC:\n<<<<<<<>>>>>>>\n%s<<<<<>>>>>>' % src # dbg
99 99 return ''.join(out_lines)
100 100
101 101 class RemoteContextBase(object):
102 102 def __init__(self):
103 103 self.ip = ipapi.get()
104 104
105 105 def _findsource_file(self,f):
106 106 linecache.checkcache()
107 107 s = findsource(f.f_code)
108 108 lnum = f.f_lineno
109 109 wsource = s[0][f.f_lineno:]
110 110 return strip_whitespace(wsource)
111 111
112 112 def _findsource_ipython(self,f):
113 113 from IPython import ipapi
114 114 self.ip = ipapi.get()
115 115 buf = self.ip.IP.input_hist_raw[-1].splitlines()[1:]
116 116 wsource = [l+'\n' for l in buf ]
117 117
118 118 return strip_whitespace(wsource)
119 119
120 120 def findsource(self,frame):
121 121 local_ns = frame.f_locals
122 122 global_ns = frame.f_globals
123 123 if frame.f_code.co_filename == '<ipython console>':
124 124 src = self._findsource_ipython(frame)
125 125 else:
126 126 src = self._findsource_file(frame)
127 127 return src
128 128
129 129 def __enter__(self):
130 130 raise NotImplementedError
131 131
132 132 def __exit__ (self, etype, value, tb):
133 133 if issubclass(etype,error.StopLocalExecution):
134 134 return True
135 135
136 136 class RemoteMultiEngine(RemoteContextBase):
137 137 def __init__(self,mec):
138 138 self.mec = mec
139 139 RemoteContextBase.__init__(self)
140 140
141 141 def __enter__(self):
142 142 src = self.findsource(sys._getframe(1))
143 143 return self.mec.execute(src)
144
145
146 # XXX - Temporary hackish testing, we'll move this into proper tests right
147 # away
148
149 if __name__ == '__main__':
150
151 # XXX - for now, we need a running cluster to be started separately. The
152 # daemon work is almost finished, and will make much of this unnecessary.
153 from IPython.kernel import client
154 mec = client.MultiEngineClient(('127.0.0.1',10105))
155
156 try:
157 mec.get_ids()
158 except ConnectionRefusedError:
159 import os, time
160 os.system('ipcluster -n 2 &')
161 time.sleep(2)
162 mec = client.MultiEngineClient(('127.0.0.1',10105))
163
164 mec.block = False
165
166 import itertools
167 c = itertools.count()
168
169 parallel = RemoteMultiEngine(mec)
170
171 mec.pushAll()
172
173 with parallel as pr:
174 # A comment
175 remote() # this means the code below only runs remotely
176 print 'Hello remote world'
177 x = range(10)
178 # Comments are OK
179 # Even misindented.
180 y = x+1
181
182
183 with pfor('i',sequence) as pr:
184 print x[i]
185
186 print pr.x + pr.y
@@ -1,171 +1,171 b''
1 1 # encoding: utf-8
2 2
3 3 """Magic command interface for interactive parallel work."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 import new
19 19
20 20 from IPython.iplib import InteractiveShell
21 21 from IPython.Shell import MTInteractiveShell
22 22
23 23 from twisted.internet.defer import Deferred
24 24
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Definitions of magic functions for use with IPython
28 28 #-------------------------------------------------------------------------------
29 29
30 30 NO_ACTIVE_CONTROLLER = """
31 31 Error: No Controller is activated
32 32 Use activate() on a RemoteController object to activate it for magics.
33 33 """
34 34
35 35 def magic_result(self,parameter_s=''):
36 36 """Print the result of command i on all engines of the active controller.
37 37
38 38 To activate a controller in IPython, first create it and then call
39 39 the activate() method.
40 40
41 41 Then you can do the following:
42 42
43 43 >>> result # Print the latest result
44 44 Printing result...
45 45 [127.0.0.1:0] In [1]: b = 10
46 46 [127.0.0.1:1] In [1]: b = 10
47 47
48 48 >>> result 0 # Print result 0
49 49 In [14]: result 0
50 50 Printing result...
51 51 [127.0.0.1:0] In [0]: a = 5
52 52 [127.0.0.1:1] In [0]: a = 5
53 53 """
54 54 try:
55 55 activeController = __IPYTHON__.activeController
56 56 except AttributeError:
57 57 print NO_ACTIVE_CONTROLLER
58 58 else:
59 59 try:
60 60 index = int(parameter_s)
61 61 except:
62 62 index = None
63 63 result = activeController.get_result(index)
64 64 return result
65 65
66 66 def magic_px(self,parameter_s=''):
67 67 """Executes the given python command on the active IPython Controller.
68 68
69 69 To activate a Controller in IPython, first create it and then call
70 70 the activate() method.
71 71
72 72 Then you can do the following:
73 73
74 74 >>> %px a = 5 # Runs a = 5 on all nodes
75 75 """
76 76
77 77 try:
78 78 activeController = __IPYTHON__.activeController
79 79 except AttributeError:
80 80 print NO_ACTIVE_CONTROLLER
81 81 else:
82 print "Executing command on Controller"
82 print "Parallel execution on engines: %s" % activeController.targets
83 83 result = activeController.execute(parameter_s)
84 84 return result
85 85
86 86 def pxrunsource(self, source, filename="<input>", symbol="single"):
87 87
88 88 try:
89 89 code = self.compile(source, filename, symbol)
90 90 except (OverflowError, SyntaxError, ValueError):
91 91 # Case 1
92 92 self.showsyntaxerror(filename)
93 93 return None
94 94
95 95 if code is None:
96 96 # Case 2
97 97 return True
98 98
99 99 # Case 3
100 100 # Because autopx is enabled, we now call executeAll or disable autopx if
101 101 # %autopx or autopx has been called
102 102 if '_ip.magic("%autopx' in source or '_ip.magic("autopx' in source:
103 103 _disable_autopx(self)
104 104 return False
105 105 else:
106 106 try:
107 107 result = self.activeController.execute(source)
108 108 except:
109 109 self.showtraceback()
110 110 else:
111 111 print result.__repr__()
112 112 return False
113 113
114 114 def magic_autopx(self, parameter_s=''):
115 115 """Toggles auto parallel mode for the active IPython Controller.
116 116
117 117 To activate a Controller in IPython, first create it and then call
118 118 the activate() method.
119 119
120 120 Then you can do the following:
121 121
122 122 >>> %autopx # Now all commands are executed in parallel
123 123 Auto Parallel Enabled
124 124 Type %autopx to disable
125 125 ...
126 126 >>> %autopx # Now all commands are locally executed
127 127 Auto Parallel Disabled
128 128 """
129 129
130 130 if hasattr(self, 'autopx'):
131 131 if self.autopx == True:
132 132 _disable_autopx(self)
133 133 else:
134 134 _enable_autopx(self)
135 135 else:
136 136 _enable_autopx(self)
137 137
138 138 def _enable_autopx(self):
139 139 """Enable %autopx mode by saving the original runsource and installing
140 140 pxrunsource.
141 141 """
142 142 try:
143 143 activeController = __IPYTHON__.activeController
144 144 except AttributeError:
145 145 print "No active RemoteController found, use RemoteController.activate()."
146 146 else:
147 147 self._original_runsource = self.runsource
148 148 self.runsource = new.instancemethod(pxrunsource, self, self.__class__)
149 149 self.autopx = True
150 150 print "Auto Parallel Enabled\nType %autopx to disable"
151 151
152 152 def _disable_autopx(self):
153 153 """Disable %autopx by restoring the original runsource."""
154 154 if hasattr(self, 'autopx'):
155 155 if self.autopx == True:
156 156 self.runsource = self._original_runsource
157 157 self.autopx = False
158 158 print "Auto Parallel Disabled"
159 159
160 160 # Add the new magic function to the class dict:
161 161
162 162 InteractiveShell.magic_result = magic_result
163 163 InteractiveShell.magic_px = magic_px
164 164 InteractiveShell.magic_autopx = magic_autopx
165 165
166 166 # And remove the global name to keep global namespace clean. Don't worry, the
167 167 # copy bound to IPython stays, we're just removing the global name.
168 168 del magic_result
169 169 del magic_px
170 170 del magic_autopx
171 171
@@ -1,121 +1,121 b''
1 1 # encoding: utf-8
2 2
3 3 """Classes used in scattering and gathering sequences.
4 4
5 5 Scattering consists of partitioning a sequence and sending the various
6 6 pieces to individual nodes in a cluster.
7 7 """
8 8
9 9 __docformat__ = "restructuredtext en"
10 10
11 11 #-------------------------------------------------------------------------------
12 12 # Copyright (C) 2008 The IPython Development Team
13 13 #
14 14 # Distributed under the terms of the BSD License. The full license is in
15 15 # the file COPYING, distributed as part of this software.
16 16 #-------------------------------------------------------------------------------
17 17
18 18 #-------------------------------------------------------------------------------
19 19 # Imports
20 20 #-------------------------------------------------------------------------------
21 21
22 22 import types
23 23
24 24 from IPython.genutils import flatten as genutil_flatten
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Figure out which array packages are present and their array types
28 28 #-------------------------------------------------------------------------------
29 29
30 30 arrayModules = []
31 31 try:
32 32 import Numeric
33 33 except ImportError:
34 34 pass
35 35 else:
36 36 arrayModules.append({'module':Numeric, 'type':Numeric.arraytype})
37 37 try:
38 38 import numpy
39 39 except ImportError:
40 40 pass
41 41 else:
42 42 arrayModules.append({'module':numpy, 'type':numpy.ndarray})
43 43 try:
44 44 import numarray
45 45 except ImportError:
46 46 pass
47 47 else:
48 48 arrayModules.append({'module':numarray,
49 49 'type':numarray.numarraycore.NumArray})
50 50
51 51 class Map:
52 52 """A class for partitioning a sequence using a map."""
53 53
54 54 def getPartition(self, seq, p, q):
55 55 """Returns the pth partition of q partitions of seq."""
56 56
57 57 # Test for error conditions here
58 58 if p<0 or p>=q:
59 59 print "No partition exists."
60 60 return
61 61
62 62 remainder = len(seq)%q
63 63 basesize = len(seq)/q
64 64 hi = []
65 65 lo = []
66 66 for n in range(q):
67 67 if n < remainder:
68 68 lo.append(n * (basesize + 1))
69 69 hi.append(lo[-1] + basesize + 1)
70 70 else:
71 71 lo.append(n*basesize + remainder)
72 72 hi.append(lo[-1] + basesize)
73 73
74 74
75 75 result = seq[lo[p]:hi[p]]
76 76 return result
77 77
78 78 def joinPartitions(self, listOfPartitions):
79 79 return self.concatenate(listOfPartitions)
80 80
81 81 def concatenate(self, listOfPartitions):
82 82 testObject = listOfPartitions[0]
83 83 # First see if we have a known array type
84 84 for m in arrayModules:
85 85 #print m
86 86 if isinstance(testObject, m['type']):
87 87 return m['module'].concatenate(listOfPartitions)
88 88 # Next try for Python sequence types
89 89 if isinstance(testObject, (types.ListType, types.TupleType)):
90 90 return genutil_flatten(listOfPartitions)
91 91 # If we have scalars, just return listOfPartitions
92 92 return listOfPartitions
93 93
94 94 class RoundRobinMap(Map):
95 95 """Partitions a sequence in a roun robin fashion.
96 96
97 97 This currently does not work!
98 98 """
99 99
100 100 def getPartition(self, seq, p, q):
101 101 return seq[p:len(seq):q]
102 102 #result = []
103 103 #for i in range(p,len(seq),q):
104 104 # result.append(seq[i])
105 105 #return result
106 106
107 107 def joinPartitions(self, listOfPartitions):
108 108 #lengths = [len(x) for x in listOfPartitions]
109 109 #maxPartitionLength = len(listOfPartitions[0])
110 110 #numberOfPartitions = len(listOfPartitions)
111 111 #concat = self.concatenate(listOfPartitions)
112 112 #totalLength = len(concat)
113 113 #result = []
114 114 #for i in range(maxPartitionLength):
115 115 # result.append(concat[i:totalLength:maxPartitionLength])
116 116 return self.concatenate(listOfPartitions)
117 117
118 styles = {'basic':Map}
118 dists = {'b':Map}
119 119
120 120
121 121
@@ -1,780 +1,753 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.test.test_multiengine -*-
3 3
4 4 """Adapt the IPython ControllerServer to IMultiEngine.
5 5
6 6 This module provides classes that adapt a ControllerService to the
7 7 IMultiEngine interface. This interface is a basic interactive interface
8 8 for working with a set of engines where it is desired to have explicit
9 9 access to each registered engine.
10 10
11 11 The classes here are exposed to the network in files like:
12 12
13 13 * multienginevanilla.py
14 14 * multienginepb.py
15 15 """
16 16
17 17 __docformat__ = "restructuredtext en"
18 18
19 19 #-------------------------------------------------------------------------------
20 20 # Copyright (C) 2008 The IPython Development Team
21 21 #
22 22 # Distributed under the terms of the BSD License. The full license is in
23 23 # the file COPYING, distributed as part of this software.
24 24 #-------------------------------------------------------------------------------
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Imports
28 28 #-------------------------------------------------------------------------------
29 29
30 30 from new import instancemethod
31 31 from types import FunctionType
32 32
33 33 from twisted.application import service
34 34 from twisted.internet import defer, reactor
35 35 from twisted.python import log, components, failure
36 36 from zope.interface import Interface, implements, Attribute
37 37
38 38 from IPython.tools import growl
39 39 from IPython.kernel.util import printer
40 40 from IPython.kernel.twistedutil import gatherBoth
41 41 from IPython.kernel import map as Map
42 42 from IPython.kernel import error
43 43 from IPython.kernel.pendingdeferred import PendingDeferredManager, two_phase
44 44 from IPython.kernel.controllerservice import \
45 45 ControllerAdapterBase, \
46 46 ControllerService, \
47 47 IControllerBase
48 48
49 49
50 50 #-------------------------------------------------------------------------------
51 51 # Interfaces for the MultiEngine representation of a controller
52 52 #-------------------------------------------------------------------------------
53 53
54 54 class IEngineMultiplexer(Interface):
55 55 """Interface to multiple engines implementing IEngineCore/Serialized/Queued.
56 56
57 57 This class simply acts as a multiplexer of methods that are in the
58 58 various IEngines* interfaces. Thus the methods here are jut like those
59 59 in the IEngine* interfaces, but with an extra first argument, targets.
60 60 The targets argument can have the following forms:
61 61
62 62 * targets = 10 # Engines are indexed by ints
63 63 * targets = [0,1,2,3] # A list of ints
64 64 * targets = 'all' # A string to indicate all targets
65 65
66 66 If targets is bad in any way, an InvalidEngineID will be raised. This
67 67 includes engines not being registered.
68 68
69 69 All IEngineMultiplexer multiplexer methods must return a Deferred to a list
70 70 with length equal to the number of targets. The elements of the list will
71 71 correspond to the return of the corresponding IEngine method.
72 72
73 73 Failures are aggressive, meaning that if an action fails for any target,
74 74 the overall action will fail immediately with that Failure.
75 75
76 76 :Parameters:
77 77 targets : int, list of ints, or 'all'
78 78 Engine ids the action will apply to.
79 79
80 80 :Returns: Deferred to a list of results for each engine.
81 81
82 82 :Exception:
83 83 InvalidEngineID
84 84 If the targets argument is bad or engines aren't registered.
85 85 NoEnginesRegistered
86 86 If there are no engines registered and targets='all'
87 87 """
88 88
89 89 #---------------------------------------------------------------------------
90 90 # Mutiplexed methods
91 91 #---------------------------------------------------------------------------
92 92
93 93 def execute(lines, targets='all'):
94 94 """Execute lines of Python code on targets.
95 95
96 96 See the class docstring for information about targets and possible
97 97 exceptions this method can raise.
98 98
99 99 :Parameters:
100 100 lines : str
101 101 String of python code to be executed on targets.
102 102 """
103 103
104 104 def push(namespace, targets='all'):
105 105 """Push dict namespace into the user's namespace on targets.
106 106
107 107 See the class docstring for information about targets and possible
108 108 exceptions this method can raise.
109 109
110 110 :Parameters:
111 111 namspace : dict
112 112 Dict of key value pairs to be put into the users namspace.
113 113 """
114 114
115 115 def pull(keys, targets='all'):
116 116 """Pull values out of the user's namespace on targets by keys.
117 117
118 118 See the class docstring for information about targets and possible
119 119 exceptions this method can raise.
120 120
121 121 :Parameters:
122 122 keys : tuple of strings
123 123 Sequence of keys to be pulled from user's namespace.
124 124 """
125 125
126 126 def push_function(namespace, targets='all'):
127 127 """"""
128 128
129 129 def pull_function(keys, targets='all'):
130 130 """"""
131 131
132 132 def get_result(i=None, targets='all'):
133 133 """Get the result for command i from targets.
134 134
135 135 See the class docstring for information about targets and possible
136 136 exceptions this method can raise.
137 137
138 138 :Parameters:
139 139 i : int or None
140 140 Command index or None to indicate most recent command.
141 141 """
142 142
143 143 def reset(targets='all'):
144 144 """Reset targets.
145 145
146 146 This clears the users namespace of the Engines, but won't cause
147 147 modules to be reloaded.
148 148 """
149 149
150 150 def keys(targets='all'):
151 151 """Get variable names defined in user's namespace on targets."""
152 152
153 153 def kill(controller=False, targets='all'):
154 154 """Kill the targets Engines and possibly the controller.
155 155
156 156 :Parameters:
157 157 controller : boolean
158 158 Should the controller be killed as well. If so all the
159 159 engines will be killed first no matter what targets is.
160 160 """
161 161
162 162 def push_serialized(namespace, targets='all'):
163 163 """Push a namespace of Serialized objects to targets.
164 164
165 165 :Parameters:
166 166 namespace : dict
167 167 A dict whose keys are the variable names and whose values
168 168 are serialized version of the objects.
169 169 """
170 170
171 171 def pull_serialized(keys, targets='all'):
172 172 """Pull Serialized objects by keys from targets.
173 173
174 174 :Parameters:
175 175 keys : tuple of strings
176 176 Sequence of variable names to pull as serialized objects.
177 177 """
178 178
179 179 def clear_queue(targets='all'):
180 180 """Clear the queue of pending command for targets."""
181 181
182 182 def queue_status(targets='all'):
183 183 """Get the status of the queue on the targets."""
184 184
185 185 def set_properties(properties, targets='all'):
186 186 """set properties by key and value"""
187 187
188 188 def get_properties(keys=None, targets='all'):
189 189 """get a list of properties by `keys`, if no keys specified, get all"""
190 190
191 191 def del_properties(keys, targets='all'):
192 192 """delete properties by `keys`"""
193 193
194 194 def has_properties(keys, targets='all'):
195 195 """get a list of bool values for whether `properties` has `keys`"""
196 196
197 197 def clear_properties(targets='all'):
198 198 """clear the properties dict"""
199 199
200 200
201 201 class IMultiEngine(IEngineMultiplexer):
202 202 """A controller that exposes an explicit interface to all of its engines.
203 203
204 204 This is the primary inteface for interactive usage.
205 205 """
206 206
207 207 def get_ids():
208 208 """Return list of currently registered ids.
209 209
210 210 :Returns: A Deferred to a list of registered engine ids.
211 211 """
212 212
213 213
214 214
215 215 #-------------------------------------------------------------------------------
216 216 # Implementation of the core MultiEngine classes
217 217 #-------------------------------------------------------------------------------
218 218
219 219 class MultiEngine(ControllerAdapterBase):
220 220 """The representation of a ControllerService as a IMultiEngine.
221 221
222 222 Although it is not implemented currently, this class would be where a
223 223 client/notification API is implemented. It could inherit from something
224 224 like results.NotifierParent and then use the notify method to send
225 225 notifications.
226 226 """
227 227
228 228 implements(IMultiEngine)
229 229
230 230 def __init(self, controller):
231 231 ControllerAdapterBase.__init__(self, controller)
232 232
233 233 #---------------------------------------------------------------------------
234 234 # Helper methods
235 235 #---------------------------------------------------------------------------
236 236
237 237 def engineList(self, targets):
238 238 """Parse the targets argument into a list of valid engine objects.
239 239
240 240 :Parameters:
241 241 targets : int, list of ints or 'all'
242 242 The targets argument to be parsed.
243 243
244 244 :Returns: List of engine objects.
245 245
246 246 :Exception:
247 247 InvalidEngineID
248 248 If targets is not valid or if an engine is not registered.
249 249 """
250 250 if isinstance(targets, int):
251 251 if targets not in self.engines.keys():
252 252 log.msg("Engine with id %i is not registered" % targets)
253 253 raise error.InvalidEngineID("Engine with id %i is not registered" % targets)
254 254 else:
255 255 return [self.engines[targets]]
256 256 elif isinstance(targets, (list, tuple)):
257 257 for id in targets:
258 258 if id not in self.engines.keys():
259 259 log.msg("Engine with id %r is not registered" % id)
260 260 raise error.InvalidEngineID("Engine with id %r is not registered" % id)
261 261 return map(self.engines.get, targets)
262 262 elif targets == 'all':
263 263 eList = self.engines.values()
264 264 if len(eList) == 0:
265 265 msg = """There are no engines registered.
266 266 Check the logs in ~/.ipython/log if you think there should have been."""
267 267 raise error.NoEnginesRegistered(msg)
268 268 else:
269 269 return eList
270 270 else:
271 271 raise error.InvalidEngineID("targets argument is not an int, list of ints or 'all': %r"%targets)
272 272
273 273 def _performOnEngines(self, methodName, *args, **kwargs):
274 274 """Calls a method on engines and returns deferred to list of results.
275 275
276 276 :Parameters:
277 277 methodName : str
278 278 Name of the method to be called.
279 279 targets : int, list of ints, 'all'
280 280 The targets argument to be parsed into a list of engine objects.
281 281 args
282 282 The positional keyword arguments to be passed to the engines.
283 283 kwargs
284 284 The keyword arguments passed to the method
285 285
286 286 :Returns: List of deferreds to the results on each engine
287 287
288 288 :Exception:
289 289 InvalidEngineID
290 290 If the targets argument is bad in any way.
291 291 AttributeError
292 292 If the method doesn't exist on one of the engines.
293 293 """
294 294 targets = kwargs.pop('targets')
295 295 log.msg("Performing %s on %r" % (methodName, targets))
296 296 # log.msg("Performing %s(%r, %r) on %r" % (methodName, args, kwargs, targets))
297 297 # This will and should raise if targets is not valid!
298 298 engines = self.engineList(targets)
299 299 dList = []
300 300 for e in engines:
301 301 meth = getattr(e, methodName, None)
302 302 if meth is not None:
303 303 dList.append(meth(*args, **kwargs))
304 304 else:
305 305 raise AttributeError("Engine %i does not have method %s" % (e.id, methodName))
306 306 return dList
307 307
308 308 def _performOnEnginesAndGatherBoth(self, methodName, *args, **kwargs):
309 309 """Called _performOnEngines and wraps result/exception into deferred."""
310 310 try:
311 311 dList = self._performOnEngines(methodName, *args, **kwargs)
312 312 except (error.InvalidEngineID, AttributeError, KeyError, error.NoEnginesRegistered):
313 313 return defer.fail(failure.Failure())
314 314 else:
315 315 # Having fireOnOneErrback is causing problems with the determinacy
316 316 # of the system. Basically, once a single engine has errbacked, this
317 317 # method returns. In some cases, this will cause client to submit
318 318 # another command. Because the previous command is still running
319 319 # on some engines, this command will be queued. When those commands
320 320 # then errback, the second command will raise QueueCleared. Ahhh!
321 321 d = gatherBoth(dList,
322 322 fireOnOneErrback=0,
323 323 consumeErrors=1,
324 324 logErrors=0)
325 325 d.addCallback(error.collect_exceptions, methodName)
326 326 return d
327 327
328 328 #---------------------------------------------------------------------------
329 329 # General IMultiEngine methods
330 330 #---------------------------------------------------------------------------
331 331
332 332 def get_ids(self):
333 333 return defer.succeed(self.engines.keys())
334 334
335 335 #---------------------------------------------------------------------------
336 336 # IEngineMultiplexer methods
337 337 #---------------------------------------------------------------------------
338 338
339 339 def execute(self, lines, targets='all'):
340 340 return self._performOnEnginesAndGatherBoth('execute', lines, targets=targets)
341 341
342 342 def push(self, ns, targets='all'):
343 343 return self._performOnEnginesAndGatherBoth('push', ns, targets=targets)
344 344
345 345 def pull(self, keys, targets='all'):
346 346 return self._performOnEnginesAndGatherBoth('pull', keys, targets=targets)
347 347
348 348 def push_function(self, ns, targets='all'):
349 349 return self._performOnEnginesAndGatherBoth('push_function', ns, targets=targets)
350 350
351 351 def pull_function(self, keys, targets='all'):
352 352 return self._performOnEnginesAndGatherBoth('pull_function', keys, targets=targets)
353 353
354 354 def get_result(self, i=None, targets='all'):
355 355 return self._performOnEnginesAndGatherBoth('get_result', i, targets=targets)
356 356
357 357 def reset(self, targets='all'):
358 358 return self._performOnEnginesAndGatherBoth('reset', targets=targets)
359 359
360 360 def keys(self, targets='all'):
361 361 return self._performOnEnginesAndGatherBoth('keys', targets=targets)
362 362
363 363 def kill(self, controller=False, targets='all'):
364 364 if controller:
365 365 targets = 'all'
366 366 d = self._performOnEnginesAndGatherBoth('kill', targets=targets)
367 367 if controller:
368 368 log.msg("Killing controller")
369 369 d.addCallback(lambda _: reactor.callLater(2.0, reactor.stop))
370 370 # Consume any weird stuff coming back
371 371 d.addBoth(lambda _: None)
372 372 return d
373 373
374 374 def push_serialized(self, namespace, targets='all'):
375 375 for k, v in namespace.iteritems():
376 376 log.msg("Pushed object %s is %f MB" % (k, v.getDataSize()))
377 377 d = self._performOnEnginesAndGatherBoth('push_serialized', namespace, targets=targets)
378 378 return d
379 379
380 380 def pull_serialized(self, keys, targets='all'):
381 381 try:
382 382 dList = self._performOnEngines('pull_serialized', keys, targets=targets)
383 383 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
384 384 return defer.fail(failure.Failure())
385 385 else:
386 386 for d in dList:
387 387 d.addCallback(self._logSizes)
388 388 d = gatherBoth(dList,
389 389 fireOnOneErrback=0,
390 390 consumeErrors=1,
391 391 logErrors=0)
392 392 d.addCallback(error.collect_exceptions, 'pull_serialized')
393 393 return d
394 394
395 395 def _logSizes(self, listOfSerialized):
396 396 if isinstance(listOfSerialized, (list, tuple)):
397 397 for s in listOfSerialized:
398 398 log.msg("Pulled object is %f MB" % s.getDataSize())
399 399 else:
400 400 log.msg("Pulled object is %f MB" % listOfSerialized.getDataSize())
401 401 return listOfSerialized
402 402
403 403 def clear_queue(self, targets='all'):
404 404 return self._performOnEnginesAndGatherBoth('clear_queue', targets=targets)
405 405
406 406 def queue_status(self, targets='all'):
407 407 log.msg("Getting queue status on %r" % targets)
408 408 try:
409 409 engines = self.engineList(targets)
410 410 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
411 411 return defer.fail(failure.Failure())
412 412 else:
413 413 dList = []
414 414 for e in engines:
415 415 dList.append(e.queue_status().addCallback(lambda s:(e.id, s)))
416 416 d = gatherBoth(dList,
417 417 fireOnOneErrback=0,
418 418 consumeErrors=1,
419 419 logErrors=0)
420 420 d.addCallback(error.collect_exceptions, 'queue_status')
421 421 return d
422 422
423 423 def get_properties(self, keys=None, targets='all'):
424 424 log.msg("Getting properties on %r" % targets)
425 425 try:
426 426 engines = self.engineList(targets)
427 427 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
428 428 return defer.fail(failure.Failure())
429 429 else:
430 430 dList = [e.get_properties(keys) for e in engines]
431 431 d = gatherBoth(dList,
432 432 fireOnOneErrback=0,
433 433 consumeErrors=1,
434 434 logErrors=0)
435 435 d.addCallback(error.collect_exceptions, 'get_properties')
436 436 return d
437 437
438 438 def set_properties(self, properties, targets='all'):
439 439 log.msg("Setting properties on %r" % targets)
440 440 try:
441 441 engines = self.engineList(targets)
442 442 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
443 443 return defer.fail(failure.Failure())
444 444 else:
445 445 dList = [e.set_properties(properties) for e in engines]
446 446 d = gatherBoth(dList,
447 447 fireOnOneErrback=0,
448 448 consumeErrors=1,
449 449 logErrors=0)
450 450 d.addCallback(error.collect_exceptions, 'set_properties')
451 451 return d
452 452
453 453 def has_properties(self, keys, targets='all'):
454 454 log.msg("Checking properties on %r" % targets)
455 455 try:
456 456 engines = self.engineList(targets)
457 457 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
458 458 return defer.fail(failure.Failure())
459 459 else:
460 460 dList = [e.has_properties(keys) for e in engines]
461 461 d = gatherBoth(dList,
462 462 fireOnOneErrback=0,
463 463 consumeErrors=1,
464 464 logErrors=0)
465 465 d.addCallback(error.collect_exceptions, 'has_properties')
466 466 return d
467 467
468 468 def del_properties(self, keys, targets='all'):
469 469 log.msg("Deleting properties on %r" % targets)
470 470 try:
471 471 engines = self.engineList(targets)
472 472 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
473 473 return defer.fail(failure.Failure())
474 474 else:
475 475 dList = [e.del_properties(keys) for e in engines]
476 476 d = gatherBoth(dList,
477 477 fireOnOneErrback=0,
478 478 consumeErrors=1,
479 479 logErrors=0)
480 480 d.addCallback(error.collect_exceptions, 'del_properties')
481 481 return d
482 482
483 483 def clear_properties(self, targets='all'):
484 484 log.msg("Clearing properties on %r" % targets)
485 485 try:
486 486 engines = self.engineList(targets)
487 487 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
488 488 return defer.fail(failure.Failure())
489 489 else:
490 490 dList = [e.clear_properties() for e in engines]
491 491 d = gatherBoth(dList,
492 492 fireOnOneErrback=0,
493 493 consumeErrors=1,
494 494 logErrors=0)
495 495 d.addCallback(error.collect_exceptions, 'clear_properties')
496 496 return d
497 497
498 498
499 499 components.registerAdapter(MultiEngine,
500 500 IControllerBase,
501 501 IMultiEngine)
502 502
503 503
504 504 #-------------------------------------------------------------------------------
505 505 # Interfaces for the Synchronous MultiEngine
506 506 #-------------------------------------------------------------------------------
507 507
508 508 class ISynchronousEngineMultiplexer(Interface):
509 509 pass
510 510
511 511
512 512 class ISynchronousMultiEngine(ISynchronousEngineMultiplexer):
513 513 """Synchronous, two-phase version of IMultiEngine.
514 514
515 515 Methods in this interface are identical to those of IMultiEngine, but they
516 516 take one additional argument:
517 517
518 518 execute(lines, targets='all') -> execute(lines, targets='all, block=True)
519 519
520 520 :Parameters:
521 521 block : boolean
522 522 Should the method return a deferred to a deferredID or the
523 523 actual result. If block=False a deferred to a deferredID is
524 524 returned and the user must call `get_pending_deferred` at a later
525 525 point. If block=True, a deferred to the actual result comes back.
526 526 """
527 527 def get_pending_deferred(deferredID, block=True):
528 528 """"""
529 529
530 530 def clear_pending_deferreds():
531 531 """"""
532 532
533 533
534 534 #-------------------------------------------------------------------------------
535 535 # Implementation of the Synchronous MultiEngine
536 536 #-------------------------------------------------------------------------------
537 537
538 538 class SynchronousMultiEngine(PendingDeferredManager):
539 539 """Adapt an `IMultiEngine` -> `ISynchronousMultiEngine`
540 540
541 541 Warning, this class uses a decorator that currently uses **kwargs.
542 542 Because of this block must be passed as a kwarg, not positionally.
543 543 """
544 544
545 545 implements(ISynchronousMultiEngine)
546 546
547 547 def __init__(self, multiengine):
548 548 self.multiengine = multiengine
549 549 PendingDeferredManager.__init__(self)
550 550
551 551 #---------------------------------------------------------------------------
552 552 # Decorated pending deferred methods
553 553 #---------------------------------------------------------------------------
554 554
555 555 @two_phase
556 556 def execute(self, lines, targets='all'):
557 557 d = self.multiengine.execute(lines, targets)
558 558 return d
559 559
560 560 @two_phase
561 561 def push(self, namespace, targets='all'):
562 562 return self.multiengine.push(namespace, targets)
563 563
564 564 @two_phase
565 565 def pull(self, keys, targets='all'):
566 566 d = self.multiengine.pull(keys, targets)
567 567 return d
568 568
569 569 @two_phase
570 570 def push_function(self, namespace, targets='all'):
571 571 return self.multiengine.push_function(namespace, targets)
572 572
573 573 @two_phase
574 574 def pull_function(self, keys, targets='all'):
575 575 d = self.multiengine.pull_function(keys, targets)
576 576 return d
577 577
578 578 @two_phase
579 579 def get_result(self, i=None, targets='all'):
580 580 return self.multiengine.get_result(i, targets='all')
581 581
582 582 @two_phase
583 583 def reset(self, targets='all'):
584 584 return self.multiengine.reset(targets)
585 585
586 586 @two_phase
587 587 def keys(self, targets='all'):
588 588 return self.multiengine.keys(targets)
589 589
590 590 @two_phase
591 591 def kill(self, controller=False, targets='all'):
592 592 return self.multiengine.kill(controller, targets)
593 593
594 594 @two_phase
595 595 def push_serialized(self, namespace, targets='all'):
596 596 return self.multiengine.push_serialized(namespace, targets)
597 597
598 598 @two_phase
599 599 def pull_serialized(self, keys, targets='all'):
600 600 return self.multiengine.pull_serialized(keys, targets)
601 601
602 602 @two_phase
603 603 def clear_queue(self, targets='all'):
604 604 return self.multiengine.clear_queue(targets)
605 605
606 606 @two_phase
607 607 def queue_status(self, targets='all'):
608 608 return self.multiengine.queue_status(targets)
609 609
610 610 @two_phase
611 611 def set_properties(self, properties, targets='all'):
612 612 return self.multiengine.set_properties(properties, targets)
613 613
614 614 @two_phase
615 615 def get_properties(self, keys=None, targets='all'):
616 616 return self.multiengine.get_properties(keys, targets)
617 617
618 618 @two_phase
619 619 def has_properties(self, keys, targets='all'):
620 620 return self.multiengine.has_properties(keys, targets)
621 621
622 622 @two_phase
623 623 def del_properties(self, keys, targets='all'):
624 624 return self.multiengine.del_properties(keys, targets)
625 625
626 626 @two_phase
627 627 def clear_properties(self, targets='all'):
628 628 return self.multiengine.clear_properties(targets)
629 629
630 630 #---------------------------------------------------------------------------
631 631 # IMultiEngine methods
632 632 #---------------------------------------------------------------------------
633 633
634 634 def get_ids(self):
635 635 """Return a list of registered engine ids.
636 636
637 637 Never use the two phase block/non-block stuff for this.
638 638 """
639 639 return self.multiengine.get_ids()
640 640
641 641
642 642 components.registerAdapter(SynchronousMultiEngine, IMultiEngine, ISynchronousMultiEngine)
643 643
644 644
645 645 #-------------------------------------------------------------------------------
646 646 # Various high-level interfaces that can be used as MultiEngine mix-ins
647 647 #-------------------------------------------------------------------------------
648 648
649 649 #-------------------------------------------------------------------------------
650 650 # IMultiEngineCoordinator
651 651 #-------------------------------------------------------------------------------
652 652
653 653 class IMultiEngineCoordinator(Interface):
654 654 """Methods that work on multiple engines explicitly."""
655 655
656 def scatter(key, seq, style='basic', flatten=False, targets='all'):
657 """Partition and distribute a sequence to targets.
656 def scatter(key, seq, dist='b', flatten=False, targets='all'):
657 """Partition and distribute a sequence to targets."""
658 658
659 :Parameters:
660 key : str
661 The variable name to call the scattered sequence.
662 seq : list, tuple, array
663 The sequence to scatter. The type should be preserved.
664 style : string
665 A specification of how the sequence is partitioned. Currently
666 only 'basic' is implemented.
667 flatten : boolean
668 Should single element sequences be converted to scalars.
669 """
670
671 def gather(key, style='basic', targets='all'):
672 """Gather object key from targets.
659 def gather(key, dist='b', targets='all'):
660 """Gather object key from targets."""
673 661
674 :Parameters:
675 key : string
676 The name of a sequence on the targets to gather.
677 style : string
678 A specification of how the sequence is partitioned. Currently
679 only 'basic' is implemented.
662 def raw_map(func, seqs, dist='b', targets='all'):
680 663 """
681
682 def map(func, seq, style='basic', targets='all'):
683 """A parallelized version of Python's builtin map.
664 A parallelized version of Python's builtin `map` function.
684 665
685 This function implements the following pattern:
666 This has a slightly different syntax than the builtin `map`.
667 This is needed because we need to have keyword arguments and thus
668 can't use *args to capture all the sequences. Instead, they must
669 be passed in a list or tuple.
686 670
687 1. The sequence seq is scattered to the given targets.
688 2. map(functionSource, seq) is called on each engine.
689 3. The resulting sequences are gathered back to the local machine.
690
691 :Parameters:
692 targets : int, list or 'all'
693 The engine ids the action will apply to. Call `get_ids` to see
694 a list of currently available engines.
695 func : str, function
696 An actual function object or a Python string that names a
697 callable defined on the engines.
698 seq : list, tuple or numpy array
699 The local sequence to be scattered.
700 style : str
701 Only 'basic' is supported for now.
702
703 :Returns: A list of len(seq) with functionSource called on each element
704 of seq.
705
706 Example
707 =======
671 The equivalence is:
708 672
709 >>> rc.mapAll('lambda x: x*x', range(10000))
710 [0,2,4,9,25,36,...]
673 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
674
675 Most users will want to use parallel functions or the `mapper`
676 and `map` methods for an API that follows that of the builtin
677 `map`.
711 678 """
712 679
713 680
714 681 class ISynchronousMultiEngineCoordinator(IMultiEngineCoordinator):
715 682 """Methods that work on multiple engines explicitly."""
716 pass
683
684 def scatter(key, seq, dist='b', flatten=False, targets='all', block=True):
685 """Partition and distribute a sequence to targets."""
686
687 def gather(key, dist='b', targets='all', block=True):
688 """Gather object key from targets"""
689
690 def raw_map(func, seqs, dist='b', targets='all', block=True):
691 """
692 A parallelized version of Python's builtin map.
693
694 This has a slightly different syntax than the builtin `map`.
695 This is needed because we need to have keyword arguments and thus
696 can't use *args to capture all the sequences. Instead, they must
697 be passed in a list or tuple.
698
699 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
700
701 Most users will want to use parallel functions or the `mapper`
702 and `map` methods for an API that follows that of the builtin
703 `map`.
704 """
717 705
718 706
719 707 #-------------------------------------------------------------------------------
720 708 # IMultiEngineExtras
721 709 #-------------------------------------------------------------------------------
722 710
723 711 class IMultiEngineExtras(Interface):
724 712
725 def zip_pull(targets, *keys):
726 """Pull, but return results in a different format from `pull`.
713 def zip_pull(targets, keys):
714 """
715 Pull, but return results in a different format from `pull`.
727 716
728 717 This method basically returns zip(pull(targets, *keys)), with a few
729 718 edge cases handled differently. Users of chainsaw will find this format
730 719 familiar.
731
732 :Parameters:
733 targets : int, list or 'all'
734 The engine ids the action will apply to. Call `get_ids` to see
735 a list of currently available engines.
736 keys: list or tuple of str
737 A list of variable names as string of the Python objects to be pulled
738 back to the client.
739
740 :Returns: A list of pulled Python objects for each target.
741 720 """
742 721
743 722 def run(targets, fname):
744 """Run a .py file on targets.
745
746 :Parameters:
747 targets : int, list or 'all'
748 The engine ids the action will apply to. Call `get_ids` to see
749 a list of currently available engines.
750 fname : str
751 The filename of a .py file on the local system to be sent to and run
752 on the engines.
753 block : boolean
754 Should I block or not. If block=True, wait for the action to
755 complete and return the result. If block=False, return a
756 `PendingResult` object that can be used to later get the
757 result. If block is not specified, the block attribute
758 will be used instead.
759 """
723 """Run a .py file on targets."""
760 724
761 725
762 726 class ISynchronousMultiEngineExtras(IMultiEngineExtras):
763 pass
764
727 def zip_pull(targets, keys, block=True):
728 """
729 Pull, but return results in a different format from `pull`.
730
731 This method basically returns zip(pull(targets, *keys)), with a few
732 edge cases handled differently. Users of chainsaw will find this format
733 familiar.
734 """
735
736 def run(targets, fname, block=True):
737 """Run a .py file on targets."""
765 738
766 739 #-------------------------------------------------------------------------------
767 740 # The full MultiEngine interface
768 741 #-------------------------------------------------------------------------------
769 742
770 743 class IFullMultiEngine(IMultiEngine,
771 744 IMultiEngineCoordinator,
772 745 IMultiEngineExtras):
773 746 pass
774 747
775 748
776 749 class IFullSynchronousMultiEngine(ISynchronousMultiEngine,
777 750 ISynchronousMultiEngineCoordinator,
778 751 ISynchronousMultiEngineExtras):
779 752 pass
780 753
@@ -1,833 +1,896 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.test.test_multiengineclient -*-
3 3
4 4 """General Classes for IMultiEngine clients."""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Imports
17 17 #-------------------------------------------------------------------------------
18 18
19 19 import sys
20 20 import cPickle as pickle
21 21 from types import FunctionType
22 22 import linecache
23 23
24 24 from twisted.internet import reactor
25 25 from twisted.python import components, log
26 26 from twisted.python.failure import Failure
27 27 from zope.interface import Interface, implements, Attribute
28 28
29 29 from IPython.ColorANSI import TermColors
30 30
31 31 from IPython.kernel.twistedutil import blockingCallFromThread
32 32 from IPython.kernel import error
33 33 from IPython.kernel.parallelfunction import ParallelFunction
34 from IPython.kernel.mapper import (
35 MultiEngineMapper,
36 IMultiEngineMapperFactory,
37 IMapper
38 )
34 39 from IPython.kernel import map as Map
35 40 from IPython.kernel import multiengine as me
36 41 from IPython.kernel.multiengine import (IFullMultiEngine,
37 42 IFullSynchronousMultiEngine)
38 43
39 44
40 45 #-------------------------------------------------------------------------------
41 46 # Pending Result things
42 47 #-------------------------------------------------------------------------------
43 48
44 49 class IPendingResult(Interface):
45 50 """A representation of a result that is pending.
46 51
47 52 This class is similar to Twisted's `Deferred` object, but is designed to be
48 53 used in a synchronous context.
49 54 """
50 55
51 56 result_id=Attribute("ID of the deferred on the other side")
52 57 client=Attribute("A client that I came from")
53 58 r=Attribute("An attribute that is a property that calls and returns get_result")
54 59
55 60 def get_result(default=None, block=True):
56 61 """
57 62 Get a result that is pending.
58 63
59 64 :Parameters:
60 65 default
61 66 The value to return if the result is not ready.
62 67 block : boolean
63 68 Should I block for the result.
64 69
65 70 :Returns: The actual result or the default value.
66 71 """
67 72
68 73 def add_callback(f, *args, **kwargs):
69 74 """
70 75 Add a callback that is called with the result.
71 76
72 77 If the original result is foo, adding a callback will cause
73 78 f(foo, *args, **kwargs) to be returned instead. If multiple
74 79 callbacks are registered, they are chained together: the result of
75 80 one is passed to the next and so on.
76 81
77 82 Unlike Twisted's Deferred object, there is no errback chain. Thus
78 83 any exception raised will not be caught and handled. User must
79 84 catch these by hand when calling `get_result`.
80 85 """
81 86
82 87
83 88 class PendingResult(object):
84 89 """A representation of a result that is not yet ready.
85 90
86 91 A user should not create a `PendingResult` instance by hand.
87 92
88 93 Methods
89 94 =======
90 95
91 96 * `get_result`
92 97 * `add_callback`
93 98
94 99 Properties
95 100 ==========
96 101 * `r`
97 102 """
98 103
99 104 def __init__(self, client, result_id):
100 105 """Create a PendingResult with a result_id and a client instance.
101 106
102 107 The client should implement `_getPendingResult(result_id, block)`.
103 108 """
104 109 self.client = client
105 110 self.result_id = result_id
106 111 self.called = False
107 112 self.raised = False
108 113 self.callbacks = []
109 114
110 115 def get_result(self, default=None, block=True):
111 116 """Get a result that is pending.
112 117
113 118 This method will connect to an IMultiEngine adapted controller
114 119 and see if the result is ready. If the action triggers an exception
115 120 raise it and record it. This method records the result/exception once it is
116 121 retrieved. Calling `get_result` again will get this cached result or will
117 122 re-raise the exception. The .r attribute is a property that calls
118 123 `get_result` with block=True.
119 124
120 125 :Parameters:
121 126 default
122 127 The value to return if the result is not ready.
123 128 block : boolean
124 129 Should I block for the result.
125 130
126 131 :Returns: The actual result or the default value.
127 132 """
128 133
129 134 if self.called:
130 135 if self.raised:
131 136 raise self.result[0], self.result[1], self.result[2]
132 137 else:
133 138 return self.result
134 139 try:
135 140 result = self.client.get_pending_deferred(self.result_id, block)
136 141 except error.ResultNotCompleted:
137 142 return default
138 143 except:
139 144 # Reraise other error, but first record them so they can be reraised
140 145 # later if .r or get_result is called again.
141 146 self.result = sys.exc_info()
142 147 self.called = True
143 148 self.raised = True
144 149 raise
145 150 else:
146 151 for cb in self.callbacks:
147 152 result = cb[0](result, *cb[1], **cb[2])
148 153 self.result = result
149 154 self.called = True
150 155 return result
151 156
152 157 def add_callback(self, f, *args, **kwargs):
153 158 """Add a callback that is called with the result.
154 159
155 160 If the original result is result, adding a callback will cause
156 161 f(result, *args, **kwargs) to be returned instead. If multiple
157 162 callbacks are registered, they are chained together: the result of
158 163 one is passed to the next and so on.
159 164
160 165 Unlike Twisted's Deferred object, there is no errback chain. Thus
161 166 any exception raised will not be caught and handled. User must
162 167 catch these by hand when calling `get_result`.
163 168 """
164 169 assert callable(f)
165 170 self.callbacks.append((f, args, kwargs))
166 171
167 172 def __cmp__(self, other):
168 173 if self.result_id < other.result_id:
169 174 return -1
170 175 else:
171 176 return 1
172 177
173 178 def _get_r(self):
174 179 return self.get_result(block=True)
175 180
176 181 r = property(_get_r)
177 182 """This property is a shortcut to a `get_result(block=True)`."""
178 183
179 184
180 185 #-------------------------------------------------------------------------------
181 186 # Pretty printing wrappers for certain lists
182 187 #-------------------------------------------------------------------------------
183 188
184 189 class ResultList(list):
185 190 """A subclass of list that pretty prints the output of `execute`/`get_result`."""
186 191
187 192 def __repr__(self):
188 193 output = []
189 blue = TermColors.Blue
190 normal = TermColors.Normal
191 red = TermColors.Red
192 green = TermColors.Green
194 # These colored prompts were not working on Windows
195 if sys.platform == 'win32':
196 blue = normal = red = green = ''
197 else:
198 blue = TermColors.Blue
199 normal = TermColors.Normal
200 red = TermColors.Red
201 green = TermColors.Green
193 202 output.append("<Results List>\n")
194 203 for cmd in self:
195 204 if isinstance(cmd, Failure):
196 205 output.append(cmd)
197 206 else:
198 207 target = cmd.get('id',None)
199 208 cmd_num = cmd.get('number',None)
200 209 cmd_stdin = cmd.get('input',{}).get('translated','No Input')
201 210 cmd_stdout = cmd.get('stdout', None)
202 211 cmd_stderr = cmd.get('stderr', None)
203 212 output.append("%s[%i]%s In [%i]:%s %s\n" % \
204 213 (green, target,
205 214 blue, cmd_num, normal, cmd_stdin))
206 215 if cmd_stdout:
207 216 output.append("%s[%i]%s Out[%i]:%s %s\n" % \
208 217 (green, target,
209 218 red, cmd_num, normal, cmd_stdout))
210 219 if cmd_stderr:
211 220 output.append("%s[%i]%s Err[%i]:\n%s %s" % \
212 221 (green, target,
213 222 red, cmd_num, normal, cmd_stderr))
214 223 return ''.join(output)
215 224
216 225
217 226 def wrapResultList(result):
218 227 """A function that wraps the output of `execute`/`get_result` -> `ResultList`."""
219 228 if len(result) == 0:
220 229 result = [result]
221 230 return ResultList(result)
222 231
223 232
224 233 class QueueStatusList(list):
225 234 """A subclass of list that pretty prints the output of `queue_status`."""
226 235
227 236 def __repr__(self):
228 237 output = []
229 238 output.append("<Queue Status List>\n")
230 239 for e in self:
231 240 output.append("Engine: %s\n" % repr(e[0]))
232 241 output.append(" Pending: %s\n" % repr(e[1]['pending']))
233 242 for q in e[1]['queue']:
234 243 output.append(" Command: %s\n" % repr(q))
235 244 return ''.join(output)
236 245
237 246
238 247 #-------------------------------------------------------------------------------
239 248 # InteractiveMultiEngineClient
240 249 #-------------------------------------------------------------------------------
241 250
242 251 class InteractiveMultiEngineClient(object):
243 252 """A mixin class that add a few methods to a multiengine client.
244 253
245 254 The methods in this mixin class are designed for interactive usage.
246 255 """
247 256
248 257 def activate(self):
249 258 """Make this `MultiEngineClient` active for parallel magic commands.
250 259
251 260 IPython has a magic command syntax to work with `MultiEngineClient` objects.
252 261 In a given IPython session there is a single active one. While
253 262 there can be many `MultiEngineClient` created and used by the user,
254 263 there is only one active one. The active `MultiEngineClient` is used whenever
255 264 the magic commands %px and %autopx are used.
256 265
257 266 The activate() method is called on a given `MultiEngineClient` to make it
258 267 active. Once this has been done, the magic commands can be used.
259 268 """
260 269
261 270 try:
262 271 __IPYTHON__.activeController = self
263 272 except NameError:
264 273 print "The IPython Controller magics only work within IPython."
265 274
266 275 def __setitem__(self, key, value):
267 276 """Add a dictionary interface for pushing/pulling.
268 277
269 278 This functions as a shorthand for `push`.
270 279
271 280 :Parameters:
272 281 key : str
273 282 What to call the remote object.
274 283 value : object
275 284 The local Python object to push.
276 285 """
277 286 targets, block = self._findTargetsAndBlock()
278 287 return self.push({key:value}, targets=targets, block=block)
279 288
280 289 def __getitem__(self, key):
281 290 """Add a dictionary interface for pushing/pulling.
282 291
283 292 This functions as a shorthand to `pull`.
284 293
285 294 :Parameters:
286 295 - `key`: A string representing the key.
287 296 """
288 297 if isinstance(key, str):
289 298 targets, block = self._findTargetsAndBlock()
290 299 return self.pull(key, targets=targets, block=block)
291 300 else:
292 301 raise TypeError("__getitem__ only takes strs")
293 302
294 303 def __len__(self):
295 304 """Return the number of available engines."""
296 305 return len(self.get_ids())
297
298 def parallelize(self, func, targets=None, block=None):
299 """Build a `ParallelFunction` object for functionName on engines.
300
301 The returned object will implement a parallel version of functionName
302 that takes a local sequence as its only argument and calls (in
303 parallel) functionName on each element of that sequence. The
304 `ParallelFunction` object has a `targets` attribute that controls
305 which engines the function is run on.
306
307 :Parameters:
308 targets : int, list or 'all'
309 The engine ids the action will apply to. Call `get_ids` to see
310 a list of currently available engines.
311 functionName : str
312 A Python string that names a callable defined on the engines.
313
314 :Returns: A `ParallelFunction` object.
315 """
316 targets, block = self._findTargetsAndBlock(targets, block)
317 return ParallelFunction(func, self, targets, block)
318
306
319 307 #---------------------------------------------------------------------------
320 308 # Make this a context manager for with
321 309 #---------------------------------------------------------------------------
322 310
323 311 def findsource_file(self,f):
324 312 linecache.checkcache()
325 313 s = findsource(f.f_code)
326 314 lnum = f.f_lineno
327 315 wsource = s[0][f.f_lineno:]
328 316 return strip_whitespace(wsource)
329 317
330 318 def findsource_ipython(self,f):
331 319 from IPython import ipapi
332 320 self.ip = ipapi.get()
333 321 wsource = [l+'\n' for l in
334 322 self.ip.IP.input_hist_raw[-1].splitlines()[1:]]
335 323 return strip_whitespace(wsource)
336 324
337 325 def __enter__(self):
338 326 f = sys._getframe(1)
339 327 local_ns = f.f_locals
340 328 global_ns = f.f_globals
341 329 if f.f_code.co_filename == '<ipython console>':
342 330 s = self.findsource_ipython(f)
343 331 else:
344 332 s = self.findsource_file(f)
345 333
346 334 self._with_context_result = self.execute(s)
347 335
348 336 def __exit__ (self, etype, value, tb):
349 337 if issubclass(etype,error.StopLocalExecution):
350 338 return True
351 339
352 340
353 341 def remote():
354 342 m = 'Special exception to stop local execution of parallel code.'
355 343 raise error.StopLocalExecution(m)
356 344
357 345 def strip_whitespace(source):
358 346 # Expand tabs to avoid any confusion.
359 347 wsource = [l.expandtabs(4) for l in source]
360 348 # Detect the indentation level
361 349 done = False
362 350 for line in wsource:
363 351 if line.isspace():
364 352 continue
365 353 for col,char in enumerate(line):
366 354 if char != ' ':
367 355 done = True
368 356 break
369 357 if done:
370 358 break
371 359 # Now we know how much leading space there is in the code. Next, we
372 360 # extract up to the first line that has less indentation.
373 361 # WARNINGS: we skip comments that may be misindented, but we do NOT yet
374 362 # detect triple quoted strings that may have flush left text.
375 363 for lno,line in enumerate(wsource):
376 364 lead = line[:col]
377 365 if lead.isspace():
378 366 continue
379 367 else:
380 368 if not lead.lstrip().startswith('#'):
381 369 break
382 370 # The real 'with' source is up to lno
383 371 src_lines = [l[col:] for l in wsource[:lno+1]]
384 372
385 373 # Finally, check that the source's first non-comment line begins with the
386 374 # special call 'remote()'
387 375 for nline,line in enumerate(src_lines):
388 376 if line.isspace() or line.startswith('#'):
389 377 continue
390 378 if 'remote()' in line:
391 379 break
392 380 else:
393 381 raise ValueError('remote() call missing at the start of code')
394 382 src = ''.join(src_lines[nline+1:])
395 383 #print 'SRC:\n<<<<<<<>>>>>>>\n%s<<<<<>>>>>>' % src # dbg
396 384 return src
397 385
398 386
399 387 #-------------------------------------------------------------------------------
400 388 # The top-level MultiEngine client adaptor
401 389 #-------------------------------------------------------------------------------
402 390
403 391
404 392 class IFullBlockingMultiEngineClient(Interface):
405 393 pass
406 394
407 395
408 396 class FullBlockingMultiEngineClient(InteractiveMultiEngineClient):
409 397 """
410 398 A blocking client to the `IMultiEngine` controller interface.
411 399
412 400 This class allows users to use a set of engines for a parallel
413 401 computation through the `IMultiEngine` interface. In this interface,
414 402 each engine has a specific id (an int) that is used to refer to the
415 403 engine, run code on it, etc.
416 404 """
417 405
418 implements(IFullBlockingMultiEngineClient)
406 implements(
407 IFullBlockingMultiEngineClient,
408 IMultiEngineMapperFactory,
409 IMapper
410 )
419 411
420 412 def __init__(self, smultiengine):
421 413 self.smultiengine = smultiengine
422 414 self.block = True
423 415 self.targets = 'all'
424 416
425 417 def _findBlock(self, block=None):
426 418 if block is None:
427 419 return self.block
428 420 else:
429 421 if block in (True, False):
430 422 return block
431 423 else:
432 424 raise ValueError("block must be True or False")
433 425
434 426 def _findTargets(self, targets=None):
435 427 if targets is None:
436 428 return self.targets
437 429 else:
438 430 if not isinstance(targets, (str,list,tuple,int)):
439 431 raise ValueError("targets must be a str, list, tuple or int")
440 432 return targets
441 433
442 434 def _findTargetsAndBlock(self, targets=None, block=None):
443 435 return self._findTargets(targets), self._findBlock(block)
444 436
445 437 def _blockFromThread(self, function, *args, **kwargs):
446 438 block = kwargs.get('block', None)
447 439 if block is None:
448 440 raise error.MissingBlockArgument("'block' keyword argument is missing")
449 441 result = blockingCallFromThread(function, *args, **kwargs)
450 442 if not block:
451 443 result = PendingResult(self, result)
452 444 return result
453 445
454 446 def get_pending_deferred(self, deferredID, block):
455 447 return blockingCallFromThread(self.smultiengine.get_pending_deferred, deferredID, block)
456 448
457 449 def barrier(self, pendingResults):
458 450 """Synchronize a set of `PendingResults`.
459 451
460 452 This method is a synchronization primitive that waits for a set of
461 453 `PendingResult` objects to complete. More specifically, barier does
462 454 the following.
463 455
464 456 * The `PendingResult`s are sorted by result_id.
465 457 * The `get_result` method is called for each `PendingResult` sequentially
466 458 with block=True.
467 459 * If a `PendingResult` gets a result that is an exception, it is
468 460 trapped and can be re-raised later by calling `get_result` again.
469 461 * The `PendingResult`s are flushed from the controller.
470 462
471 463 After barrier has been called on a `PendingResult`, its results can
472 464 be retrieved by calling `get_result` again or accesing the `r` attribute
473 465 of the instance.
474 466 """
475 467
476 468 # Convert to list for sorting and check class type
477 469 prList = list(pendingResults)
478 470 for pr in prList:
479 471 if not isinstance(pr, PendingResult):
480 472 raise error.NotAPendingResult("Objects passed to barrier must be PendingResult instances")
481 473
482 474 # Sort the PendingResults so they are in order
483 475 prList.sort()
484 476 # Block on each PendingResult object
485 477 for pr in prList:
486 478 try:
487 479 result = pr.get_result(block=True)
488 480 except Exception:
489 481 pass
490 482
491 483 def flush(self):
492 484 """
493 485 Clear all pending deferreds/results from the controller.
494 486
495 487 For each `PendingResult` that is created by this client, the controller
496 488 holds on to the result for that `PendingResult`. This can be a problem
497 489 if there are a large number of `PendingResult` objects that are created.
498 490
499 491 Once the result of the `PendingResult` has been retrieved, the result
500 492 is removed from the controller, but if a user doesn't get a result (
501 493 they just ignore the `PendingResult`) the result is kept forever on the
502 494 controller. This method allows the user to clear out all un-retrieved
503 495 results on the controller.
504 496 """
505 497 r = blockingCallFromThread(self.smultiengine.clear_pending_deferreds)
506 498 return r
507 499
508 500 clear_pending_results = flush
509 501
510 502 #---------------------------------------------------------------------------
511 503 # IEngineMultiplexer related methods
512 504 #---------------------------------------------------------------------------
513 505
514 506 def execute(self, lines, targets=None, block=None):
515 507 """
516 508 Execute code on a set of engines.
517 509
518 510 :Parameters:
519 511 lines : str
520 512 The Python code to execute as a string
521 513 targets : id or list of ids
522 514 The engine to use for the execution
523 515 block : boolean
524 516 If False, this method will return the actual result. If False,
525 517 a `PendingResult` is returned which can be used to get the result
526 518 at a later time.
527 519 """
528 520 targets, block = self._findTargetsAndBlock(targets, block)
529 521 result = blockingCallFromThread(self.smultiengine.execute, lines,
530 522 targets=targets, block=block)
531 523 if block:
532 524 result = ResultList(result)
533 525 else:
534 526 result = PendingResult(self, result)
535 527 result.add_callback(wrapResultList)
536 528 return result
537 529
538 530 def push(self, namespace, targets=None, block=None):
539 531 """
540 532 Push a dictionary of keys and values to engines namespace.
541 533
542 534 Each engine has a persistent namespace. This method is used to push
543 535 Python objects into that namespace.
544 536
545 537 The objects in the namespace must be pickleable.
546 538
547 539 :Parameters:
548 540 namespace : dict
549 541 A dict that contains Python objects to be injected into
550 542 the engine persistent namespace.
551 543 targets : id or list of ids
552 544 The engine to use for the execution
553 545 block : boolean
554 546 If False, this method will return the actual result. If False,
555 547 a `PendingResult` is returned which can be used to get the result
556 548 at a later time.
557 549 """
558 550 targets, block = self._findTargetsAndBlock(targets, block)
559 551 return self._blockFromThread(self.smultiengine.push, namespace,
560 552 targets=targets, block=block)
561 553
562 554 def pull(self, keys, targets=None, block=None):
563 555 """
564 556 Pull Python objects by key out of engines namespaces.
565 557
566 558 :Parameters:
567 559 keys : str or list of str
568 560 The names of the variables to be pulled
569 561 targets : id or list of ids
570 562 The engine to use for the execution
571 563 block : boolean
572 564 If False, this method will return the actual result. If False,
573 565 a `PendingResult` is returned which can be used to get the result
574 566 at a later time.
575 567 """
576 568 targets, block = self._findTargetsAndBlock(targets, block)
577 569 return self._blockFromThread(self.smultiengine.pull, keys, targets=targets, block=block)
578 570
579 571 def push_function(self, namespace, targets=None, block=None):
580 572 """
581 573 Push a Python function to an engine.
582 574
583 575 This method is used to push a Python function to an engine. This
584 576 method can then be used in code on the engines. Closures are not supported.
585 577
586 578 :Parameters:
587 579 namespace : dict
588 580 A dict whose values are the functions to be pushed. The keys give
589 581 that names that the function will appear as in the engines
590 582 namespace.
591 583 targets : id or list of ids
592 584 The engine to use for the execution
593 585 block : boolean
594 586 If False, this method will return the actual result. If False,
595 587 a `PendingResult` is returned which can be used to get the result
596 588 at a later time.
597 589 """
598 590 targets, block = self._findTargetsAndBlock(targets, block)
599 591 return self._blockFromThread(self.smultiengine.push_function, namespace, targets=targets, block=block)
600 592
601 593 def pull_function(self, keys, targets=None, block=None):
602 594 """
603 595 Pull a Python function from an engine.
604 596
605 597 This method is used to pull a Python function from an engine.
606 598 Closures are not supported.
607 599
608 600 :Parameters:
609 601 keys : str or list of str
610 602 The names of the functions to be pulled
611 603 targets : id or list of ids
612 604 The engine to use for the execution
613 605 block : boolean
614 606 If False, this method will return the actual result. If False,
615 607 a `PendingResult` is returned which can be used to get the result
616 608 at a later time.
617 609 """
618 610 targets, block = self._findTargetsAndBlock(targets, block)
619 611 return self._blockFromThread(self.smultiengine.pull_function, keys, targets=targets, block=block)
620 612
621 613 def push_serialized(self, namespace, targets=None, block=None):
622 614 targets, block = self._findTargetsAndBlock(targets, block)
623 615 return self._blockFromThread(self.smultiengine.push_serialized, namespace, targets=targets, block=block)
624 616
625 617 def pull_serialized(self, keys, targets=None, block=None):
626 618 targets, block = self._findTargetsAndBlock(targets, block)
627 619 return self._blockFromThread(self.smultiengine.pull_serialized, keys, targets=targets, block=block)
628 620
629 621 def get_result(self, i=None, targets=None, block=None):
630 622 """
631 623 Get a previous result.
632 624
633 625 When code is executed in an engine, a dict is created and returned. This
634 626 method retrieves that dict for previous commands.
635 627
636 628 :Parameters:
637 629 i : int
638 630 The number of the result to get
639 631 targets : id or list of ids
640 632 The engine to use for the execution
641 633 block : boolean
642 634 If False, this method will return the actual result. If False,
643 635 a `PendingResult` is returned which can be used to get the result
644 636 at a later time.
645 637 """
646 638 targets, block = self._findTargetsAndBlock(targets, block)
647 639 result = blockingCallFromThread(self.smultiengine.get_result, i, targets=targets, block=block)
648 640 if block:
649 641 result = ResultList(result)
650 642 else:
651 643 result = PendingResult(self, result)
652 644 result.add_callback(wrapResultList)
653 645 return result
654 646
655 647 def reset(self, targets=None, block=None):
656 648 """
657 649 Reset an engine.
658 650
659 651 This method clears out the namespace of an engine.
660 652
661 653 :Parameters:
662 654 targets : id or list of ids
663 655 The engine to use for the execution
664 656 block : boolean
665 657 If False, this method will return the actual result. If False,
666 658 a `PendingResult` is returned which can be used to get the result
667 659 at a later time.
668 660 """
669 661 targets, block = self._findTargetsAndBlock(targets, block)
670 662 return self._blockFromThread(self.smultiengine.reset, targets=targets, block=block)
671 663
672 664 def keys(self, targets=None, block=None):
673 665 """
674 666 Get a list of all the variables in an engine's namespace.
675 667
676 668 :Parameters:
677 669 targets : id or list of ids
678 670 The engine to use for the execution
679 671 block : boolean
680 672 If False, this method will return the actual result. If False,
681 673 a `PendingResult` is returned which can be used to get the result
682 674 at a later time.
683 675 """
684 676 targets, block = self._findTargetsAndBlock(targets, block)
685 677 return self._blockFromThread(self.smultiengine.keys, targets=targets, block=block)
686 678
687 679 def kill(self, controller=False, targets=None, block=None):
688 680 """
689 681 Kill the engines and controller.
690 682
691 683 This method is used to stop the engine and controller by calling
692 684 `reactor.stop`.
693 685
694 686 :Parameters:
695 687 controller : boolean
696 688 If True, kill the engines and controller. If False, just the
697 689 engines
698 690 targets : id or list of ids
699 691 The engine to use for the execution
700 692 block : boolean
701 693 If False, this method will return the actual result. If False,
702 694 a `PendingResult` is returned which can be used to get the result
703 695 at a later time.
704 696 """
705 697 targets, block = self._findTargetsAndBlock(targets, block)
706 698 return self._blockFromThread(self.smultiengine.kill, controller, targets=targets, block=block)
707 699
708 700 def clear_queue(self, targets=None, block=None):
709 701 """
710 702 Clear out the controller's queue for an engine.
711 703
712 704 The controller maintains a queue for each engine. This clear it out.
713 705
714 706 :Parameters:
715 707 targets : id or list of ids
716 708 The engine to use for the execution
717 709 block : boolean
718 710 If False, this method will return the actual result. If False,
719 711 a `PendingResult` is returned which can be used to get the result
720 712 at a later time.
721 713 """
722 714 targets, block = self._findTargetsAndBlock(targets, block)
723 715 return self._blockFromThread(self.smultiengine.clear_queue, targets=targets, block=block)
724 716
725 717 def queue_status(self, targets=None, block=None):
726 718 """
727 719 Get the status of an engines queue.
728 720
729 721 :Parameters:
730 722 targets : id or list of ids
731 723 The engine to use for the execution
732 724 block : boolean
733 725 If False, this method will return the actual result. If False,
734 726 a `PendingResult` is returned which can be used to get the result
735 727 at a later time.
736 728 """
737 729 targets, block = self._findTargetsAndBlock(targets, block)
738 730 return self._blockFromThread(self.smultiengine.queue_status, targets=targets, block=block)
739 731
740 732 def set_properties(self, properties, targets=None, block=None):
741 733 targets, block = self._findTargetsAndBlock(targets, block)
742 734 return self._blockFromThread(self.smultiengine.set_properties, properties, targets=targets, block=block)
743 735
744 736 def get_properties(self, keys=None, targets=None, block=None):
745 737 targets, block = self._findTargetsAndBlock(targets, block)
746 738 return self._blockFromThread(self.smultiengine.get_properties, keys, targets=targets, block=block)
747 739
748 740 def has_properties(self, keys, targets=None, block=None):
749 741 targets, block = self._findTargetsAndBlock(targets, block)
750 742 return self._blockFromThread(self.smultiengine.has_properties, keys, targets=targets, block=block)
751 743
752 744 def del_properties(self, keys, targets=None, block=None):
753 745 targets, block = self._findTargetsAndBlock(targets, block)
754 746 return self._blockFromThread(self.smultiengine.del_properties, keys, targets=targets, block=block)
755 747
756 748 def clear_properties(self, targets=None, block=None):
757 749 targets, block = self._findTargetsAndBlock(targets, block)
758 750 return self._blockFromThread(self.smultiengine.clear_properties, targets=targets, block=block)
759 751
760 752 #---------------------------------------------------------------------------
761 753 # IMultiEngine related methods
762 754 #---------------------------------------------------------------------------
763 755
764 756 def get_ids(self):
765 757 """
766 758 Returns the ids of currently registered engines.
767 759 """
768 760 result = blockingCallFromThread(self.smultiengine.get_ids)
769 761 return result
770 762
771 763 #---------------------------------------------------------------------------
772 764 # IMultiEngineCoordinator
773 765 #---------------------------------------------------------------------------
774 766
775 def scatter(self, key, seq, style='basic', flatten=False, targets=None, block=None):
767 def scatter(self, key, seq, dist='b', flatten=False, targets=None, block=None):
776 768 """
777 769 Partition a Python sequence and send the partitions to a set of engines.
778 770 """
779 771 targets, block = self._findTargetsAndBlock(targets, block)
780 772 return self._blockFromThread(self.smultiengine.scatter, key, seq,
781 style, flatten, targets=targets, block=block)
773 dist, flatten, targets=targets, block=block)
782 774
783 def gather(self, key, style='basic', targets=None, block=None):
775 def gather(self, key, dist='b', targets=None, block=None):
784 776 """
785 777 Gather a partitioned sequence on a set of engines as a single local seq.
786 778 """
787 779 targets, block = self._findTargetsAndBlock(targets, block)
788 return self._blockFromThread(self.smultiengine.gather, key, style,
780 return self._blockFromThread(self.smultiengine.gather, key, dist,
789 781 targets=targets, block=block)
790 782
791 def map(self, func, seq, style='basic', targets=None, block=None):
783 def raw_map(self, func, seq, dist='b', targets=None, block=None):
792 784 """
793 A parallelized version of Python's builtin map
785 A parallelized version of Python's builtin map.
786
787 This has a slightly different syntax than the builtin `map`.
788 This is needed because we need to have keyword arguments and thus
789 can't use *args to capture all the sequences. Instead, they must
790 be passed in a list or tuple.
791
792 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
793
794 Most users will want to use parallel functions or the `mapper`
795 and `map` methods for an API that follows that of the builtin
796 `map`.
794 797 """
795 798 targets, block = self._findTargetsAndBlock(targets, block)
796 return self._blockFromThread(self.smultiengine.map, func, seq,
797 style, targets=targets, block=block)
799 return self._blockFromThread(self.smultiengine.raw_map, func, seq,
800 dist, targets=targets, block=block)
801
802 def map(self, func, *sequences):
803 """
804 A parallel version of Python's builtin `map` function.
805
806 This method applies a function to sequences of arguments. It
807 follows the same syntax as the builtin `map`.
808
809 This method creates a mapper objects by calling `self.mapper` with
810 no arguments and then uses that mapper to do the mapping. See
811 the documentation of `mapper` for more details.
812 """
813 return self.mapper().map(func, *sequences)
814
815 def mapper(self, dist='b', targets='all', block=None):
816 """
817 Create a mapper object that has a `map` method.
818
819 This method returns an object that implements the `IMapper`
820 interface. This method is a factory that is used to control how
821 the map happens.
822
823 :Parameters:
824 dist : str
825 What decomposition to use, 'b' is the only one supported
826 currently
827 targets : str, int, sequence of ints
828 Which engines to use for the map
829 block : boolean
830 Should calls to `map` block or not
831 """
832 return MultiEngineMapper(self, dist, targets, block)
833
834 def parallel(self, dist='b', targets=None, block=None):
835 """
836 A decorator that turns a function into a parallel function.
837
838 This can be used as:
839
840 @parallel()
841 def f(x, y)
842 ...
843
844 f(range(10), range(10))
845
846 This causes f(0,0), f(1,1), ... to be called in parallel.
847
848 :Parameters:
849 dist : str
850 What decomposition to use, 'b' is the only one supported
851 currently
852 targets : str, int, sequence of ints
853 Which engines to use for the map
854 block : boolean
855 Should calls to `map` block or not
856 """
857 targets, block = self._findTargetsAndBlock(targets, block)
858 mapper = self.mapper(dist, targets, block)
859 pf = ParallelFunction(mapper)
860 return pf
798 861
799 862 #---------------------------------------------------------------------------
800 863 # IMultiEngineExtras
801 864 #---------------------------------------------------------------------------
802 865
803 866 def zip_pull(self, keys, targets=None, block=None):
804 867 targets, block = self._findTargetsAndBlock(targets, block)
805 868 return self._blockFromThread(self.smultiengine.zip_pull, keys,
806 869 targets=targets, block=block)
807 870
808 871 def run(self, filename, targets=None, block=None):
809 872 """
810 873 Run a Python code in a file on the engines.
811 874
812 875 :Parameters:
813 876 filename : str
814 877 The name of the local file to run
815 878 targets : id or list of ids
816 879 The engine to use for the execution
817 880 block : boolean
818 881 If False, this method will return the actual result. If False,
819 882 a `PendingResult` is returned which can be used to get the result
820 883 at a later time.
821 884 """
822 885 targets, block = self._findTargetsAndBlock(targets, block)
823 886 return self._blockFromThread(self.smultiengine.run, filename,
824 887 targets=targets, block=block)
825 888
826 889
827 890
828 891 components.registerAdapter(FullBlockingMultiEngineClient,
829 892 IFullSynchronousMultiEngine, IFullBlockingMultiEngineClient)
830 893
831 894
832 895
833 896
@@ -1,668 +1,757 b''
1 1 # encoding: utf-8
2 2
3 3 """
4 4 Expose the multiengine controller over the Foolscap network protocol.
5 5 """
6 6
7 7 __docformat__ = "restructuredtext en"
8 8
9 9 #-------------------------------------------------------------------------------
10 10 # Copyright (C) 2008 The IPython Development Team
11 11 #
12 12 # Distributed under the terms of the BSD License. The full license is in
13 13 # the file COPYING, distributed as part of this software.
14 14 #-------------------------------------------------------------------------------
15 15
16 16 #-------------------------------------------------------------------------------
17 17 # Imports
18 18 #-------------------------------------------------------------------------------
19 19
20 20 import cPickle as pickle
21 21 from types import FunctionType
22 22
23 23 from zope.interface import Interface, implements
24 24 from twisted.internet import defer
25 25 from twisted.python import components, failure, log
26 26
27 27 from foolscap import Referenceable
28 28
29 29 from IPython.kernel import error
30 30 from IPython.kernel.util import printer
31 31 from IPython.kernel import map as Map
32 from IPython.kernel.parallelfunction import ParallelFunction
33 from IPython.kernel.mapper import (
34 MultiEngineMapper,
35 IMultiEngineMapperFactory,
36 IMapper
37 )
32 38 from IPython.kernel.twistedutil import gatherBoth
33 39 from IPython.kernel.multiengine import (MultiEngine,
34 40 IMultiEngine,
35 41 IFullSynchronousMultiEngine,
36 42 ISynchronousMultiEngine)
37 43 from IPython.kernel.multiengineclient import wrapResultList
38 44 from IPython.kernel.pendingdeferred import PendingDeferredManager
39 45 from IPython.kernel.pickleutil import (can, canDict,
40 46 canSequence, uncan, uncanDict, uncanSequence)
41 47
42 48 from IPython.kernel.clientinterfaces import (
43 49 IFCClientInterfaceProvider,
44 50 IBlockingClientAdaptor
45 51 )
46 52
47 53 # Needed to access the true globals from __main__.__dict__
48 54 import __main__
49 55
50 56 #-------------------------------------------------------------------------------
51 57 # The Controller side of things
52 58 #-------------------------------------------------------------------------------
53 59
54 60 def packageResult(wrappedMethod):
55 61
56 62 def wrappedPackageResult(self, *args, **kwargs):
57 63 d = wrappedMethod(self, *args, **kwargs)
58 64 d.addCallback(self.packageSuccess)
59 65 d.addErrback(self.packageFailure)
60 66 return d
61 67 return wrappedPackageResult
62 68
63 69
64 70 class IFCSynchronousMultiEngine(Interface):
65 71 """Foolscap interface to `ISynchronousMultiEngine`.
66 72
67 73 The methods in this interface are similar to those of
68 74 `ISynchronousMultiEngine`, but their arguments and return values are pickled
69 75 if they are not already simple Python types that can be send over XML-RPC.
70 76
71 77 See the documentation of `ISynchronousMultiEngine` and `IMultiEngine` for
72 78 documentation about the methods.
73 79
74 80 Most methods in this interface act like the `ISynchronousMultiEngine`
75 81 versions and can be called in blocking or non-blocking mode.
76 82 """
77 83 pass
78 84
79 85
80 86 class FCSynchronousMultiEngineFromMultiEngine(Referenceable):
81 87 """Adapt `IMultiEngine` -> `ISynchronousMultiEngine` -> `IFCSynchronousMultiEngine`.
82 88 """
83 89
84 90 implements(IFCSynchronousMultiEngine, IFCClientInterfaceProvider)
85 91
86 92 addSlash = True
87 93
88 94 def __init__(self, multiengine):
89 95 # Adapt the raw multiengine to `ISynchronousMultiEngine` before saving
90 96 # it. This allow this class to do two adaptation steps.
91 97 self.smultiengine = ISynchronousMultiEngine(multiengine)
92 98 self._deferredIDCallbacks = {}
93 99
94 100 #---------------------------------------------------------------------------
95 101 # Non interface methods
96 102 #---------------------------------------------------------------------------
97 103
98 104 def packageFailure(self, f):
99 105 f.cleanFailure()
100 106 return self.packageSuccess(f)
101 107
102 108 def packageSuccess(self, obj):
103 109 serial = pickle.dumps(obj, 2)
104 110 return serial
105 111
106 112 #---------------------------------------------------------------------------
107 113 # Things related to PendingDeferredManager
108 114 #---------------------------------------------------------------------------
109 115
110 116 @packageResult
111 117 def remote_get_pending_deferred(self, deferredID, block):
112 118 d = self.smultiengine.get_pending_deferred(deferredID, block)
113 119 try:
114 120 callback = self._deferredIDCallbacks.pop(deferredID)
115 121 except KeyError:
116 122 callback = None
117 123 if callback is not None:
118 124 d.addCallback(callback[0], *callback[1], **callback[2])
119 125 return d
120 126
121 127 @packageResult
122 128 def remote_clear_pending_deferreds(self):
123 129 return defer.maybeDeferred(self.smultiengine.clear_pending_deferreds)
124 130
125 131 def _addDeferredIDCallback(self, did, callback, *args, **kwargs):
126 132 self._deferredIDCallbacks[did] = (callback, args, kwargs)
127 133 return did
128 134
129 135 #---------------------------------------------------------------------------
130 136 # IEngineMultiplexer related methods
131 137 #---------------------------------------------------------------------------
132 138
133 139 @packageResult
134 140 def remote_execute(self, lines, targets, block):
135 141 return self.smultiengine.execute(lines, targets=targets, block=block)
136 142
137 143 @packageResult
138 144 def remote_push(self, binaryNS, targets, block):
139 145 try:
140 146 namespace = pickle.loads(binaryNS)
141 147 except:
142 148 d = defer.fail(failure.Failure())
143 149 else:
144 150 d = self.smultiengine.push(namespace, targets=targets, block=block)
145 151 return d
146 152
147 153 @packageResult
148 154 def remote_pull(self, keys, targets, block):
149 155 d = self.smultiengine.pull(keys, targets=targets, block=block)
150 156 return d
151 157
152 158 @packageResult
153 159 def remote_push_function(self, binaryNS, targets, block):
154 160 try:
155 161 namespace = pickle.loads(binaryNS)
156 162 except:
157 163 d = defer.fail(failure.Failure())
158 164 else:
159 165 namespace = uncanDict(namespace)
160 166 d = self.smultiengine.push_function(namespace, targets=targets, block=block)
161 167 return d
162 168
163 169 def _canMultipleKeys(self, result):
164 170 return [canSequence(r) for r in result]
165 171
166 172 @packageResult
167 173 def remote_pull_function(self, keys, targets, block):
168 174 def can_functions(r, keys):
169 175 if len(keys)==1 or isinstance(keys, str):
170 176 result = canSequence(r)
171 177 elif len(keys)>1:
172 178 result = [canSequence(s) for s in r]
173 179 return result
174 180 d = self.smultiengine.pull_function(keys, targets=targets, block=block)
175 181 if block:
176 182 d.addCallback(can_functions, keys)
177 183 else:
178 184 d.addCallback(lambda did: self._addDeferredIDCallback(did, can_functions, keys))
179 185 return d
180 186
181 187 @packageResult
182 188 def remote_push_serialized(self, binaryNS, targets, block):
183 189 try:
184 190 namespace = pickle.loads(binaryNS)
185 191 except:
186 192 d = defer.fail(failure.Failure())
187 193 else:
188 194 d = self.smultiengine.push_serialized(namespace, targets=targets, block=block)
189 195 return d
190 196
191 197 @packageResult
192 198 def remote_pull_serialized(self, keys, targets, block):
193 199 d = self.smultiengine.pull_serialized(keys, targets=targets, block=block)
194 200 return d
195 201
196 202 @packageResult
197 203 def remote_get_result(self, i, targets, block):
198 204 if i == 'None':
199 205 i = None
200 206 return self.smultiengine.get_result(i, targets=targets, block=block)
201 207
202 208 @packageResult
203 209 def remote_reset(self, targets, block):
204 210 return self.smultiengine.reset(targets=targets, block=block)
205 211
206 212 @packageResult
207 213 def remote_keys(self, targets, block):
208 214 return self.smultiengine.keys(targets=targets, block=block)
209 215
210 216 @packageResult
211 217 def remote_kill(self, controller, targets, block):
212 218 return self.smultiengine.kill(controller, targets=targets, block=block)
213 219
214 220 @packageResult
215 221 def remote_clear_queue(self, targets, block):
216 222 return self.smultiengine.clear_queue(targets=targets, block=block)
217 223
218 224 @packageResult
219 225 def remote_queue_status(self, targets, block):
220 226 return self.smultiengine.queue_status(targets=targets, block=block)
221 227
222 228 @packageResult
223 229 def remote_set_properties(self, binaryNS, targets, block):
224 230 try:
225 231 ns = pickle.loads(binaryNS)
226 232 except:
227 233 d = defer.fail(failure.Failure())
228 234 else:
229 235 d = self.smultiengine.set_properties(ns, targets=targets, block=block)
230 236 return d
231 237
232 238 @packageResult
233 239 def remote_get_properties(self, keys, targets, block):
234 240 if keys=='None':
235 241 keys=None
236 242 return self.smultiengine.get_properties(keys, targets=targets, block=block)
237 243
238 244 @packageResult
239 245 def remote_has_properties(self, keys, targets, block):
240 246 return self.smultiengine.has_properties(keys, targets=targets, block=block)
241 247
242 248 @packageResult
243 249 def remote_del_properties(self, keys, targets, block):
244 250 return self.smultiengine.del_properties(keys, targets=targets, block=block)
245 251
246 252 @packageResult
247 253 def remote_clear_properties(self, targets, block):
248 254 return self.smultiengine.clear_properties(targets=targets, block=block)
249 255
250 256 #---------------------------------------------------------------------------
251 257 # IMultiEngine related methods
252 258 #---------------------------------------------------------------------------
253 259
254 260 def remote_get_ids(self):
255 261 """Get the ids of the registered engines.
256 262
257 263 This method always blocks.
258 264 """
259 265 return self.smultiengine.get_ids()
260 266
261 267 #---------------------------------------------------------------------------
262 268 # IFCClientInterfaceProvider related methods
263 269 #---------------------------------------------------------------------------
264 270
265 271 def remote_get_client_name(self):
266 272 return 'IPython.kernel.multienginefc.FCFullSynchronousMultiEngineClient'
267 273
268 274
269 275 # The __init__ method of `FCMultiEngineFromMultiEngine` first adapts the
270 276 # `IMultiEngine` to `ISynchronousMultiEngine` so this is actually doing a
271 277 # two phase adaptation.
272 278 components.registerAdapter(FCSynchronousMultiEngineFromMultiEngine,
273 279 IMultiEngine, IFCSynchronousMultiEngine)
274 280
275 281
276 282 #-------------------------------------------------------------------------------
277 283 # The Client side of things
278 284 #-------------------------------------------------------------------------------
279 285
280 286
281 287 class FCFullSynchronousMultiEngineClient(object):
282 288
283 implements(IFullSynchronousMultiEngine, IBlockingClientAdaptor)
289 implements(
290 IFullSynchronousMultiEngine,
291 IBlockingClientAdaptor,
292 IMultiEngineMapperFactory,
293 IMapper
294 )
284 295
285 296 def __init__(self, remote_reference):
286 297 self.remote_reference = remote_reference
287 298 self._deferredIDCallbacks = {}
288 299 # This class manages some pending deferreds through this instance. This
289 300 # is required for methods like gather/scatter as it enables us to
290 301 # create our own pending deferreds for composite operations.
291 302 self.pdm = PendingDeferredManager()
292 303
293 304 #---------------------------------------------------------------------------
294 305 # Non interface methods
295 306 #---------------------------------------------------------------------------
296 307
297 308 def unpackage(self, r):
298 309 return pickle.loads(r)
299 310
300 311 #---------------------------------------------------------------------------
301 312 # Things related to PendingDeferredManager
302 313 #---------------------------------------------------------------------------
303 314
304 315 def get_pending_deferred(self, deferredID, block=True):
305 316
306 317 # Because we are managing some pending deferreds locally (through
307 318 # self.pdm) and some remotely (on the controller), we first try the
308 319 # local one and then the remote one.
309 320 if self.pdm.quick_has_id(deferredID):
310 321 d = self.pdm.get_pending_deferred(deferredID, block)
311 322 return d
312 323 else:
313 324 d = self.remote_reference.callRemote('get_pending_deferred', deferredID, block)
314 325 d.addCallback(self.unpackage)
315 326 try:
316 327 callback = self._deferredIDCallbacks.pop(deferredID)
317 328 except KeyError:
318 329 callback = None
319 330 if callback is not None:
320 331 d.addCallback(callback[0], *callback[1], **callback[2])
321 332 return d
322 333
323 334 def clear_pending_deferreds(self):
324 335
325 336 # This clear both the local (self.pdm) and remote pending deferreds
326 337 self.pdm.clear_pending_deferreds()
327 338 d2 = self.remote_reference.callRemote('clear_pending_deferreds')
328 339 d2.addCallback(self.unpackage)
329 340 return d2
330 341
331 342 def _addDeferredIDCallback(self, did, callback, *args, **kwargs):
332 343 self._deferredIDCallbacks[did] = (callback, args, kwargs)
333 344 return did
334 345
335 346 #---------------------------------------------------------------------------
336 347 # IEngineMultiplexer related methods
337 348 #---------------------------------------------------------------------------
338 349
339 350 def execute(self, lines, targets='all', block=True):
340 351 d = self.remote_reference.callRemote('execute', lines, targets, block)
341 352 d.addCallback(self.unpackage)
342 353 return d
343 354
344 355 def push(self, namespace, targets='all', block=True):
345 356 serial = pickle.dumps(namespace, 2)
346 357 d = self.remote_reference.callRemote('push', serial, targets, block)
347 358 d.addCallback(self.unpackage)
348 359 return d
349 360
350 361 def pull(self, keys, targets='all', block=True):
351 362 d = self.remote_reference.callRemote('pull', keys, targets, block)
352 363 d.addCallback(self.unpackage)
353 364 return d
354 365
355 366 def push_function(self, namespace, targets='all', block=True):
356 367 cannedNamespace = canDict(namespace)
357 368 serial = pickle.dumps(cannedNamespace, 2)
358 369 d = self.remote_reference.callRemote('push_function', serial, targets, block)
359 370 d.addCallback(self.unpackage)
360 371 return d
361 372
362 373 def pull_function(self, keys, targets='all', block=True):
363 374 def uncan_functions(r, keys):
364 375 if len(keys)==1 or isinstance(keys, str):
365 376 return uncanSequence(r)
366 377 elif len(keys)>1:
367 378 return [uncanSequence(s) for s in r]
368 379 d = self.remote_reference.callRemote('pull_function', keys, targets, block)
369 380 if block:
370 381 d.addCallback(self.unpackage)
371 382 d.addCallback(uncan_functions, keys)
372 383 else:
373 384 d.addCallback(self.unpackage)
374 385 d.addCallback(lambda did: self._addDeferredIDCallback(did, uncan_functions, keys))
375 386 return d
376 387
377 388 def push_serialized(self, namespace, targets='all', block=True):
378 389 cannedNamespace = canDict(namespace)
379 390 serial = pickle.dumps(cannedNamespace, 2)
380 391 d = self.remote_reference.callRemote('push_serialized', serial, targets, block)
381 392 d.addCallback(self.unpackage)
382 393 return d
383 394
384 395 def pull_serialized(self, keys, targets='all', block=True):
385 396 d = self.remote_reference.callRemote('pull_serialized', keys, targets, block)
386 397 d.addCallback(self.unpackage)
387 398 return d
388 399
389 400 def get_result(self, i=None, targets='all', block=True):
390 401 if i is None: # This is because None cannot be marshalled by xml-rpc
391 402 i = 'None'
392 403 d = self.remote_reference.callRemote('get_result', i, targets, block)
393 404 d.addCallback(self.unpackage)
394 405 return d
395 406
396 407 def reset(self, targets='all', block=True):
397 408 d = self.remote_reference.callRemote('reset', targets, block)
398 409 d.addCallback(self.unpackage)
399 410 return d
400 411
401 412 def keys(self, targets='all', block=True):
402 413 d = self.remote_reference.callRemote('keys', targets, block)
403 414 d.addCallback(self.unpackage)
404 415 return d
405 416
406 417 def kill(self, controller=False, targets='all', block=True):
407 418 d = self.remote_reference.callRemote('kill', controller, targets, block)
408 419 d.addCallback(self.unpackage)
409 420 return d
410 421
411 422 def clear_queue(self, targets='all', block=True):
412 423 d = self.remote_reference.callRemote('clear_queue', targets, block)
413 424 d.addCallback(self.unpackage)
414 425 return d
415 426
416 427 def queue_status(self, targets='all', block=True):
417 428 d = self.remote_reference.callRemote('queue_status', targets, block)
418 429 d.addCallback(self.unpackage)
419 430 return d
420 431
421 432 def set_properties(self, properties, targets='all', block=True):
422 433 serial = pickle.dumps(properties, 2)
423 434 d = self.remote_reference.callRemote('set_properties', serial, targets, block)
424 435 d.addCallback(self.unpackage)
425 436 return d
426 437
427 438 def get_properties(self, keys=None, targets='all', block=True):
428 439 if keys==None:
429 440 keys='None'
430 441 d = self.remote_reference.callRemote('get_properties', keys, targets, block)
431 442 d.addCallback(self.unpackage)
432 443 return d
433 444
434 445 def has_properties(self, keys, targets='all', block=True):
435 446 d = self.remote_reference.callRemote('has_properties', keys, targets, block)
436 447 d.addCallback(self.unpackage)
437 448 return d
438 449
439 450 def del_properties(self, keys, targets='all', block=True):
440 451 d = self.remote_reference.callRemote('del_properties', keys, targets, block)
441 452 d.addCallback(self.unpackage)
442 453 return d
443 454
444 455 def clear_properties(self, targets='all', block=True):
445 456 d = self.remote_reference.callRemote('clear_properties', targets, block)
446 457 d.addCallback(self.unpackage)
447 458 return d
448 459
449 460 #---------------------------------------------------------------------------
450 461 # IMultiEngine related methods
451 462 #---------------------------------------------------------------------------
452 463
453 464 def get_ids(self):
454 465 d = self.remote_reference.callRemote('get_ids')
455 466 return d
456 467
457 468 #---------------------------------------------------------------------------
458 469 # ISynchronousMultiEngineCoordinator related methods
459 470 #---------------------------------------------------------------------------
460 471
461 472 def _process_targets(self, targets):
462 473 def create_targets(ids):
463 474 if isinstance(targets, int):
464 475 engines = [targets]
465 476 elif targets=='all':
466 477 engines = ids
467 478 elif isinstance(targets, (list, tuple)):
468 479 engines = targets
469 480 for t in engines:
470 481 if not t in ids:
471 482 raise error.InvalidEngineID("engine with id %r does not exist"%t)
472 483 return engines
473 484
474 485 d = self.get_ids()
475 486 d.addCallback(create_targets)
476 487 return d
477 488
478 def scatter(self, key, seq, style='basic', flatten=False, targets='all', block=True):
489 def scatter(self, key, seq, dist='b', flatten=False, targets='all', block=True):
479 490
480 491 # Note: scatter and gather handle pending deferreds locally through self.pdm.
481 492 # This enables us to collect a bunch fo deferred ids and make a secondary
482 493 # deferred id that corresponds to the entire group. This logic is extremely
483 494 # difficult to get right though.
484 495 def do_scatter(engines):
485 496 nEngines = len(engines)
486 mapClass = Map.styles[style]
497 mapClass = Map.dists[dist]
487 498 mapObject = mapClass()
488 499 d_list = []
489 500 # Loop through and push to each engine in non-blocking mode.
490 501 # This returns a set of deferreds to deferred_ids
491 502 for index, engineid in enumerate(engines):
492 503 partition = mapObject.getPartition(seq, index, nEngines)
493 504 if flatten and len(partition) == 1:
494 505 d = self.push({key: partition[0]}, targets=engineid, block=False)
495 506 else:
496 507 d = self.push({key: partition}, targets=engineid, block=False)
497 508 d_list.append(d)
498 509 # Collect the deferred to deferred_ids
499 510 d = gatherBoth(d_list,
500 511 fireOnOneErrback=0,
501 512 consumeErrors=1,
502 513 logErrors=0)
503 514 # Now d has a list of deferred_ids or Failures coming
504 515 d.addCallback(error.collect_exceptions, 'scatter')
505 516 def process_did_list(did_list):
506 517 """Turn a list of deferred_ids into a final result or failure."""
507 518 new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
508 519 final_d = gatherBoth(new_d_list,
509 520 fireOnOneErrback=0,
510 521 consumeErrors=1,
511 522 logErrors=0)
512 523 final_d.addCallback(error.collect_exceptions, 'scatter')
513 524 final_d.addCallback(lambda lop: [i[0] for i in lop])
514 525 return final_d
515 526 # Now, depending on block, we need to handle the list deferred_ids
516 527 # coming down the pipe diferently.
517 528 if block:
518 529 # If we are blocking register a callback that will transform the
519 530 # list of deferred_ids into the final result.
520 531 d.addCallback(process_did_list)
521 532 return d
522 533 else:
523 534 # Here we are going to use a _local_ PendingDeferredManager.
524 535 deferred_id = self.pdm.get_deferred_id()
525 536 # This is the deferred we will return to the user that will fire
526 537 # with the local deferred_id AFTER we have received the list of
527 538 # primary deferred_ids
528 539 d_to_return = defer.Deferred()
529 540 def do_it(did_list):
530 541 """Produce a deferred to the final result, but first fire the
531 542 deferred we will return to the user that has the local
532 543 deferred id."""
533 544 d_to_return.callback(deferred_id)
534 545 return process_did_list(did_list)
535 546 d.addCallback(do_it)
536 547 # Now save the deferred to the final result
537 548 self.pdm.save_pending_deferred(d, deferred_id)
538 549 return d_to_return
539 550
540 551 d = self._process_targets(targets)
541 552 d.addCallback(do_scatter)
542 553 return d
543 554
544 def gather(self, key, style='basic', targets='all', block=True):
555 def gather(self, key, dist='b', targets='all', block=True):
545 556
546 557 # Note: scatter and gather handle pending deferreds locally through self.pdm.
547 558 # This enables us to collect a bunch fo deferred ids and make a secondary
548 559 # deferred id that corresponds to the entire group. This logic is extremely
549 560 # difficult to get right though.
550 561 def do_gather(engines):
551 562 nEngines = len(engines)
552 mapClass = Map.styles[style]
563 mapClass = Map.dists[dist]
553 564 mapObject = mapClass()
554 565 d_list = []
555 566 # Loop through and push to each engine in non-blocking mode.
556 567 # This returns a set of deferreds to deferred_ids
557 568 for index, engineid in enumerate(engines):
558 569 d = self.pull(key, targets=engineid, block=False)
559 570 d_list.append(d)
560 571 # Collect the deferred to deferred_ids
561 572 d = gatherBoth(d_list,
562 573 fireOnOneErrback=0,
563 574 consumeErrors=1,
564 575 logErrors=0)
565 576 # Now d has a list of deferred_ids or Failures coming
566 577 d.addCallback(error.collect_exceptions, 'scatter')
567 578 def process_did_list(did_list):
568 579 """Turn a list of deferred_ids into a final result or failure."""
569 580 new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
570 581 final_d = gatherBoth(new_d_list,
571 582 fireOnOneErrback=0,
572 583 consumeErrors=1,
573 584 logErrors=0)
574 585 final_d.addCallback(error.collect_exceptions, 'gather')
575 586 final_d.addCallback(lambda lop: [i[0] for i in lop])
576 587 final_d.addCallback(mapObject.joinPartitions)
577 588 return final_d
578 589 # Now, depending on block, we need to handle the list deferred_ids
579 590 # coming down the pipe diferently.
580 591 if block:
581 592 # If we are blocking register a callback that will transform the
582 593 # list of deferred_ids into the final result.
583 594 d.addCallback(process_did_list)
584 595 return d
585 596 else:
586 597 # Here we are going to use a _local_ PendingDeferredManager.
587 598 deferred_id = self.pdm.get_deferred_id()
588 599 # This is the deferred we will return to the user that will fire
589 600 # with the local deferred_id AFTER we have received the list of
590 601 # primary deferred_ids
591 602 d_to_return = defer.Deferred()
592 603 def do_it(did_list):
593 604 """Produce a deferred to the final result, but first fire the
594 605 deferred we will return to the user that has the local
595 606 deferred id."""
596 607 d_to_return.callback(deferred_id)
597 608 return process_did_list(did_list)
598 609 d.addCallback(do_it)
599 610 # Now save the deferred to the final result
600 611 self.pdm.save_pending_deferred(d, deferred_id)
601 612 return d_to_return
602 613
603 614 d = self._process_targets(targets)
604 615 d.addCallback(do_gather)
605 616 return d
606 617
607 def map(self, func, seq, style='basic', targets='all', block=True):
608 d_list = []
618 def raw_map(self, func, sequences, dist='b', targets='all', block=True):
619 """
620 A parallelized version of Python's builtin map.
621
622 This has a slightly different syntax than the builtin `map`.
623 This is needed because we need to have keyword arguments and thus
624 can't use *args to capture all the sequences. Instead, they must
625 be passed in a list or tuple.
626
627 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
628
629 Most users will want to use parallel functions or the `mapper`
630 and `map` methods for an API that follows that of the builtin
631 `map`.
632 """
633 if not isinstance(sequences, (list, tuple)):
634 raise TypeError('sequences must be a list or tuple')
635 max_len = max(len(s) for s in sequences)
636 for s in sequences:
637 if len(s)!=max_len:
638 raise ValueError('all sequences must have equal length')
609 639 if isinstance(func, FunctionType):
610 640 d = self.push_function(dict(_ipython_map_func=func), targets=targets, block=False)
611 641 d.addCallback(lambda did: self.get_pending_deferred(did, True))
612 sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, _ipython_map_seq)'
642 sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, *zip(*_ipython_map_seq))'
613 643 elif isinstance(func, str):
614 644 d = defer.succeed(None)
615 645 sourceToRun = \
616 '_ipython_map_seq_result = map(%s, _ipython_map_seq)' % func
646 '_ipython_map_seq_result = map(%s, *zip(*_ipython_map_seq))' % func
617 647 else:
618 648 raise TypeError("func must be a function or str")
619 649
620 d.addCallback(lambda _: self.scatter('_ipython_map_seq', seq, style, targets=targets))
650 d.addCallback(lambda _: self.scatter('_ipython_map_seq', zip(*sequences), dist, targets=targets))
621 651 d.addCallback(lambda _: self.execute(sourceToRun, targets=targets, block=False))
622 652 d.addCallback(lambda did: self.get_pending_deferred(did, True))
623 d.addCallback(lambda _: self.gather('_ipython_map_seq_result', style, targets=targets, block=block))
653 d.addCallback(lambda _: self.gather('_ipython_map_seq_result', dist, targets=targets, block=block))
624 654 return d
625 655
656 def map(self, func, *sequences):
657 """
658 A parallel version of Python's builtin `map` function.
659
660 This method applies a function to sequences of arguments. It
661 follows the same syntax as the builtin `map`.
662
663 This method creates a mapper objects by calling `self.mapper` with
664 no arguments and then uses that mapper to do the mapping. See
665 the documentation of `mapper` for more details.
666 """
667 return self.mapper().map(func, *sequences)
668
669 def mapper(self, dist='b', targets='all', block=True):
670 """
671 Create a mapper object that has a `map` method.
672
673 This method returns an object that implements the `IMapper`
674 interface. This method is a factory that is used to control how
675 the map happens.
676
677 :Parameters:
678 dist : str
679 What decomposition to use, 'b' is the only one supported
680 currently
681 targets : str, int, sequence of ints
682 Which engines to use for the map
683 block : boolean
684 Should calls to `map` block or not
685 """
686 return MultiEngineMapper(self, dist, targets, block)
687
688 def parallel(self, dist='b', targets='all', block=True):
689 """
690 A decorator that turns a function into a parallel function.
691
692 This can be used as:
693
694 @parallel()
695 def f(x, y)
696 ...
697
698 f(range(10), range(10))
699
700 This causes f(0,0), f(1,1), ... to be called in parallel.
701
702 :Parameters:
703 dist : str
704 What decomposition to use, 'b' is the only one supported
705 currently
706 targets : str, int, sequence of ints
707 Which engines to use for the map
708 block : boolean
709 Should calls to `map` block or not
710 """
711 mapper = self.mapper(dist, targets, block)
712 pf = ParallelFunction(mapper)
713 return pf
714
626 715 #---------------------------------------------------------------------------
627 716 # ISynchronousMultiEngineExtras related methods
628 717 #---------------------------------------------------------------------------
629 718
630 719 def _transformPullResult(self, pushResult, multitargets, lenKeys):
631 720 if not multitargets:
632 721 result = pushResult[0]
633 722 elif lenKeys > 1:
634 723 result = zip(*pushResult)
635 724 elif lenKeys is 1:
636 725 result = list(pushResult)
637 726 return result
638 727
639 728 def zip_pull(self, keys, targets='all', block=True):
640 729 multitargets = not isinstance(targets, int) and len(targets) > 1
641 730 lenKeys = len(keys)
642 731 d = self.pull(keys, targets=targets, block=block)
643 732 if block:
644 733 d.addCallback(self._transformPullResult, multitargets, lenKeys)
645 734 else:
646 735 d.addCallback(lambda did: self._addDeferredIDCallback(did, self._transformPullResult, multitargets, lenKeys))
647 736 return d
648 737
649 738 def run(self, fname, targets='all', block=True):
650 739 fileobj = open(fname,'r')
651 740 source = fileobj.read()
652 741 fileobj.close()
653 742 # if the compilation blows, we get a local error right away
654 743 try:
655 744 code = compile(source,fname,'exec')
656 745 except:
657 746 return defer.fail(failure.Failure())
658 747 # Now run the code
659 748 d = self.execute(source, targets=targets, block=block)
660 749 return d
661 750
662 751 #---------------------------------------------------------------------------
663 752 # IBlockingClientAdaptor related methods
664 753 #---------------------------------------------------------------------------
665 754
666 755 def adapt_to_blocking_client(self):
667 756 from IPython.kernel.multiengineclient import IFullBlockingMultiEngineClient
668 757 return IFullBlockingMultiEngineClient(self)
@@ -1,32 +1,107 b''
1 1 # encoding: utf-8
2 2
3 3 """A parallelized function that does scatter/execute/gather."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 from types import FunctionType
19 from zope.interface import Interface, implements
19 20
20 class ParallelFunction:
21 """A function that operates in parallel on sequences."""
22 def __init__(self, func, multiengine, targets, block):
23 """Create a `ParallelFunction`.
21
22 class IMultiEngineParallelDecorator(Interface):
23 """A decorator that creates a parallel function."""
24
25 def parallel(dist='b', targets=None, block=None):
26 """
27 A decorator that turns a function into a parallel function.
28
29 This can be used as:
30
31 @parallel()
32 def f(x, y)
33 ...
34
35 f(range(10), range(10))
36
37 This causes f(0,0), f(1,1), ... to be called in parallel.
38
39 :Parameters:
40 dist : str
41 What decomposition to use, 'b' is the only one supported
42 currently
43 targets : str, int, sequence of ints
44 Which engines to use for the map
45 block : boolean
46 Should calls to `map` block or not
47 """
48
49 class ITaskParallelDecorator(Interface):
50 """A decorator that creates a parallel function."""
51
52 def parallel(clear_before=False, clear_after=False, retries=0,
53 recovery_task=None, depend=None, block=True):
54 """
55 A decorator that turns a function into a parallel function.
56
57 This can be used as:
58
59 @parallel()
60 def f(x, y)
61 ...
62
63 f(range(10), range(10))
64
65 This causes f(0,0), f(1,1), ... to be called in parallel.
66
67 See the documentation for `IPython.kernel.task.BaseTask` for
68 documentation on the arguments to this method.
69 """
70
71 class IParallelFunction(Interface):
72 pass
73
74 class ParallelFunction(object):
75 """
76 The implementation of a parallel function.
77
78 A parallel function is similar to Python's map function:
79
80 map(func, *sequences) -> pfunc(*sequences)
81
82 Parallel functions should be created by using the @parallel decorator.
83 """
84
85 implements(IParallelFunction)
86
87 def __init__(self, mapper):
88 """
89 Create a parallel function from an `IMapper`.
90
91 :Parameters:
92 mapper : an `IMapper` implementer.
93 The mapper to use for the parallel function
94 """
95 self.mapper = mapper
96
97 def __call__(self, func):
98 """
99 Decorate a function to make it run in parallel.
24 100 """
25 101 assert isinstance(func, (str, FunctionType)), "func must be a fuction or str"
26 102 self.func = func
27 self.multiengine = multiengine
28 self.targets = targets
29 self.block = block
30
31 def __call__(self, sequence):
32 return self.multiengine.map(self.func, sequence, targets=self.targets, block=self.block) No newline at end of file
103 def call_function(*sequences):
104 return self.mapper.map(self.func, *sequences)
105 return call_function
106
107 No newline at end of file
This diff has been collapsed as it changes many lines, (682 lines changed) Show them Hide them
@@ -1,803 +1,1113 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_task -*-
3 3
4 4 """Task farming representation of the ControllerService."""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 #-------------------------------------------------------------------------------
8 #-----------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 #-------------------------------------------------------------------------------
13 #-----------------------------------------------------------------------------
14 14
15 #-------------------------------------------------------------------------------
15 #-----------------------------------------------------------------------------
16 16 # Imports
17 #-------------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18 18
19 19 import copy, time
20 from types import FunctionType as function
20 from types import FunctionType
21 21
22 22 import zope.interface as zi, string
23 23 from twisted.internet import defer, reactor
24 24 from twisted.python import components, log, failure
25 25
26 # from IPython.genutils import time
27
26 from IPython.kernel.util import printer
28 27 from IPython.kernel import engineservice as es, error
29 28 from IPython.kernel import controllerservice as cs
30 29 from IPython.kernel.twistedutil import gatherBoth, DeferredList
31 30
32 from IPython.kernel.pickleutil import can,uncan, CannedFunction
33
34 def canTask(task):
35 t = copy.copy(task)
36 t.depend = can(t.depend)
37 if t.recovery_task:
38 t.recovery_task = canTask(t.recovery_task)
39 return t
31 from IPython.kernel.pickleutil import can, uncan, CannedFunction
40 32
41 def uncanTask(task):
42 t = copy.copy(task)
43 t.depend = uncan(t.depend)
44 if t.recovery_task and t.recovery_task is not task:
45 t.recovery_task = uncanTask(t.recovery_task)
46 return t
33 #-----------------------------------------------------------------------------
34 # Definition of the Task objects
35 #-----------------------------------------------------------------------------
47 36
48 37 time_format = '%Y/%m/%d %H:%M:%S'
49 38
50 class Task(object):
51 r"""Our representation of a task for the `TaskController` interface.
52
53 The user should create instances of this class to represent a task that
54 needs to be done.
55
56 :Parameters:
57 expression : str
58 A str that is valid python code that is the task.
59 pull : str or list of str
60 The names of objects to be pulled as results. If not specified,
61 will return {'result', None}
62 push : dict
63 A dict of objects to be pushed into the engines namespace before
64 execution of the expression.
65 clear_before : boolean
66 Should the engine's namespace be cleared before the task is run.
67 Default=False.
68 clear_after : boolean
69 Should the engine's namespace be cleared after the task is run.
70 Default=False.
71 retries : int
72 The number of times to resumbit the task if it fails. Default=0.
73 recovery_task : Task
74 This is the Task to be run when the task has exhausted its retries
75 Default=None.
76 depend : bool function(properties)
77 This is the dependency function for the Task, which determines
78 whether a task can be run on a Worker. `depend` is called with
79 one argument, the worker's properties dict, and should return
80 True if the worker meets the dependencies or False if it does
81 not.
82 Default=None - run on any worker
83 options : dict
84 Any other keyword options for more elaborate uses of tasks
85
86 Examples
87 --------
39 class ITask(zi.Interface):
40 """
41 This interface provides a generic definition of what constitutes a task.
42
43 There are two sides to a task. First a task needs to take input from
44 a user to determine what work is performed by the task. Second, the
45 task needs to have the logic that knows how to turn that information
46 info specific calls to a worker, through the `IQueuedEngine` interface.
47
48 Many method in this class get two things passed to them: a Deferred
49 and an IQueuedEngine implementer. Such methods should register callbacks
50 on the Deferred that use the IQueuedEngine to accomplish something. See
51 the existing task objects for examples.
52 """
53
54 zi.Attribute('retries','How many times to retry the task')
55 zi.Attribute('recovery_task','A task to try if the initial one fails')
56 zi.Attribute('taskid','the id of the task')
57
58 def start_time(result):
59 """
60 Do anything needed to start the timing of the task.
61
62 Must simply return the result after starting the timers.
63 """
88 64
89 >>> t = Task('dostuff(args)')
90 >>> t = Task('a=5', pull='a')
91 >>> t = Task('a=5\nb=4', pull=['a','b'])
92 >>> t = Task('os.kill(os.getpid(),9)', retries=100) # this is a bad idea
65 def stop_time(result):
66 """
67 Do anything needed to stop the timing of the task.
68
69 Must simply return the result after stopping the timers. This
70 method will usually set attributes that are used by `process_result`
71 in building result of the task.
72 """
73
74 def pre_task(d, queued_engine):
75 """Do something with the queued_engine before the task is run.
76
77 This method should simply add callbacks to the input Deferred
78 that do something with the `queued_engine` before the task is run.
79
80 :Parameters:
81 d : Deferred
82 The deferred that actions should be attached to
83 queued_engine : IQueuedEngine implementer
84 The worker that has been allocated to perform the task
85 """
86
87 def post_task(d, queued_engine):
88 """Do something with the queued_engine after the task is run.
89
90 This method should simply add callbacks to the input Deferred
91 that do something with the `queued_engine` before the task is run.
92
93 :Parameters:
94 d : Deferred
95 The deferred that actions should be attached to
96 queued_engine : IQueuedEngine implementer
97 The worker that has been allocated to perform the task
98 """
99
100 def submit_task(d, queued_engine):
101 """Submit a task using the `queued_engine` we have been allocated.
102
103 When a task is ready to run, this method is called. This method
104 must take the internal information of the task and make suitable
105 calls on the queued_engine to have the actual work done.
106
107 This method should simply add callbacks to the input Deferred
108 that do something with the `queued_engine` before the task is run.
109
110 :Parameters:
111 d : Deferred
112 The deferred that actions should be attached to
113 queued_engine : IQueuedEngine implementer
114 The worker that has been allocated to perform the task
115 """
93 116
94 A dependency case:
95 >>> def hasMPI(props):
96 ... return props.get('mpi') is not None
97 >>> t = Task('mpi.send(blah,blah)', depend = hasMPI)
117 def process_result(d, result, engine_id):
118 """Take a raw task result.
119
120 Objects that implement `ITask` can choose how the result of running
121 the task is presented. This method takes the raw result and
122 does this logic. Two example are the `MapTask` which simply returns
123 the raw result or a `Failure` object and the `StringTask` which
124 returns a `TaskResult` object.
125
126 :Parameters:
127 d : Deferred
128 The deferred that actions should be attached to
129 result : object
130 The raw task result that needs to be wrapped
131 engine_id : int
132 The id of the engine that did the task
133
134 :Returns:
135 The result, as a tuple of the form: (success, result).
136 Here, success is a boolean indicating if the task
137 succeeded or failed and result is the result.
138 """
139
140 def check_depend(properties):
141 """Check properties to see if the task should be run.
142
143 :Parameters:
144 properties : dict
145 A dictionary of properties that an engine has set
146
147 :Returns:
148 True if the task should be run, False otherwise
149 """
150
151 def can_task(self):
152 """Serialize (can) any functions in the task for pickling.
153
154 Subclasses must override this method and make sure that all
155 functions in the task are canned by calling `can` on the
156 function.
157 """
158
159 def uncan_task(self):
160 """Unserialize (uncan) any canned function in the task."""
161
162 class BaseTask(object):
163 """
164 Common fuctionality for all objects implementing `ITask`.
98 165 """
99 166
100 def __init__(self, expression, pull=None, push=None,
101 clear_before=False, clear_after=False, retries=0,
102 recovery_task=None, depend=None, **options):
103 self.expression = expression
104 if isinstance(pull, str):
105 self.pull = [pull]
106 else:
107 self.pull = pull
108 self.push = push
167 zi.implements(ITask)
168
169 def __init__(self, clear_before=False, clear_after=False, retries=0,
170 recovery_task=None, depend=None):
171 """
172 Make a generic task.
173
174 :Parameters:
175 clear_before : boolean
176 Should the engines namespace be cleared before the task
177 is run
178 clear_after : boolean
179 Should the engines namespace be clear after the task is run
180 retries : int
181 The number of times a task should be retries upon failure
182 recovery_task : any task object
183 If a task fails and it has a recovery_task, that is run
184 upon a retry
185 depend : FunctionType
186 A function that is called to test for properties. This function
187 must take one argument, the properties dict and return a boolean
188 """
109 189 self.clear_before = clear_before
110 190 self.clear_after = clear_after
111 self.retries=retries
191 self.retries = retries
112 192 self.recovery_task = recovery_task
113 193 self.depend = depend
114 self.options = options
115 194 self.taskid = None
195
196 def start_time(self, result):
197 """
198 Start the basic timers.
199 """
200 self.start = time.time()
201 self.start_struct = time.localtime()
202 return result
203
204 def stop_time(self, result):
205 """
206 Stop the basic timers.
207 """
208 self.stop = time.time()
209 self.stop_struct = time.localtime()
210 self.duration = self.stop - self.start
211 self.submitted = time.strftime(time_format, self.start_struct)
212 self.completed = time.strftime(time_format)
213 return result
214
215 def pre_task(self, d, queued_engine):
216 """
217 Clear the engine before running the task if clear_before is set.
218 """
219 if self.clear_before:
220 d.addCallback(lambda r: queued_engine.reset())
221
222 def post_task(self, d, queued_engine):
223 """
224 Clear the engine after running the task if clear_after is set.
225 """
226 def reseter(result):
227 queued_engine.reset()
228 return result
229 if self.clear_after:
230 d.addBoth(reseter)
231
232 def submit_task(self, d, queued_engine):
233 raise NotImplementedError('submit_task must be implemented in a subclass')
234
235 def process_result(self, result, engine_id):
236 """
237 Process a task result.
238
239 This is the default `process_result` that just returns the raw
240 result or a `Failure`.
241 """
242 if isinstance(result, failure.Failure):
243 return (False, result)
244 else:
245 return (True, result)
246
247 def check_depend(self, properties):
248 """
249 Calls self.depend(properties) to see if a task should be run.
250 """
251 if self.depend is not None:
252 return self.depend(properties)
253 else:
254 return True
255
256 def can_task(self):
257 self.depend = can(self.depend)
258 if isinstance(self.recovery_task, BaseTask):
259 self.recovery_task.can_task()
260
261 def uncan_task(self):
262 self.depend = uncan(self.depend)
263 if isinstance(self.recovery_task, BaseTask):
264 self.recovery_task.uncan_task()
265
266 class MapTask(BaseTask):
267 """
268 A task that consists of a function and arguments.
269 """
270
271 zi.implements(ITask)
272
273 def __init__(self, function, args=None, kwargs=None, clear_before=False,
274 clear_after=False, retries=0, recovery_task=None, depend=None):
275 """
276 Create a task based on a function, args and kwargs.
277
278 This is a simple type of task that consists of calling:
279 function(*args, **kwargs) and wrapping the result in a `TaskResult`.
280
281 The return value of the function, or a `Failure` wrapping an
282 exception is the task result for this type of task.
283 """
284 BaseTask.__init__(self, clear_before, clear_after, retries,
285 recovery_task, depend)
286 if not isinstance(function, FunctionType):
287 raise TypeError('a task function must be a FunctionType')
288 self.function = function
289 if args is None:
290 self.args = ()
291 else:
292 self.args = args
293 if not isinstance(self.args, (list, tuple)):
294 raise TypeError('a task args must be a list or tuple')
295 if kwargs is None:
296 self.kwargs = {}
297 else:
298 self.kwargs = kwargs
299 if not isinstance(self.kwargs, dict):
300 raise TypeError('a task kwargs must be a dict')
301
302 def submit_task(self, d, queued_engine):
303 d.addCallback(lambda r: queued_engine.push_function(
304 dict(_ipython_task_function=self.function))
305 )
306 d.addCallback(lambda r: queued_engine.push(
307 dict(_ipython_task_args=self.args,_ipython_task_kwargs=self.kwargs))
308 )
309 d.addCallback(lambda r: queued_engine.execute(
310 '_ipython_task_result = _ipython_task_function(*_ipython_task_args,**_ipython_task_kwargs)')
311 )
312 d.addCallback(lambda r: queued_engine.pull('_ipython_task_result'))
313
314 def can_task(self):
315 self.function = can(self.function)
316 BaseTask.can_task(self)
317
318 def uncan_task(self):
319 self.function = uncan(self.function)
320 BaseTask.uncan_task(self)
321
322
323 class StringTask(BaseTask):
324 """
325 A task that consists of a string of Python code to run.
326 """
327
328 def __init__(self, expression, pull=None, push=None,
329 clear_before=False, clear_after=False, retries=0,
330 recovery_task=None, depend=None):
331 """
332 Create a task based on a Python expression and variables
333
334 This type of task lets you push a set of variables to the engines
335 namespace, run a Python string in that namespace and then bring back
336 a different set of Python variables as the result.
337
338 Because this type of task can return many results (through the
339 `pull` keyword argument) it returns a special `TaskResult` object
340 that wraps the pulled variables, statistics about the run and
341 any exceptions raised.
342 """
343 if not isinstance(expression, str):
344 raise TypeError('a task expression must be a string')
345 self.expression = expression
346
347 if pull==None:
348 self.pull = ()
349 elif isinstance(pull, str):
350 self.pull = (pull,)
351 elif isinstance(pull, (list, tuple)):
352 self.pull = pull
353 else:
354 raise TypeError('pull must be str or a sequence of strs')
355
356 if push==None:
357 self.push = {}
358 elif isinstance(push, dict):
359 self.push = push
360 else:
361 raise TypeError('push must be a dict')
362
363 BaseTask.__init__(self, clear_before, clear_after, retries,
364 recovery_task, depend)
116 365
117 class ResultNS:
118 """The result namespace object for use in TaskResult objects as tr.ns.
366 def submit_task(self, d, queued_engine):
367 if self.push is not None:
368 d.addCallback(lambda r: queued_engine.push(self.push))
369
370 d.addCallback(lambda r: queued_engine.execute(self.expression))
371
372 if self.pull is not None:
373 d.addCallback(lambda r: queued_engine.pull(self.pull))
374 else:
375 d.addCallback(lambda r: None)
376
377 def process_result(self, result, engine_id):
378 if isinstance(result, failure.Failure):
379 tr = TaskResult(result, engine_id)
380 else:
381 if self.pull is None:
382 resultDict = {}
383 elif len(self.pull) == 1:
384 resultDict = {self.pull[0]:result}
385 else:
386 resultDict = dict(zip(self.pull, result))
387 tr = TaskResult(resultDict, engine_id)
388 # Assign task attributes
389 tr.submitted = self.submitted
390 tr.completed = self.completed
391 tr.duration = self.duration
392 if hasattr(self,'taskid'):
393 tr.taskid = self.taskid
394 else:
395 tr.taskid = None
396 if isinstance(result, failure.Failure):
397 return (False, tr)
398 else:
399 return (True, tr)
400
401 class ResultNS(object):
402 """
403 A dict like object for holding the results of a task.
404
405 The result namespace object for use in `TaskResult` objects as tr.ns.
119 406 It builds an object from a dictionary, such that it has attributes
120 407 according to the key,value pairs of the dictionary.
121 408
122 409 This works by calling setattr on ALL key,value pairs in the dict. If a user
123 410 chooses to overwrite the `__repr__` or `__getattr__` attributes, they can.
124 411 This can be a bad idea, as it may corrupt standard behavior of the
125 412 ns object.
126 413
127 414 Example
128 415 --------
129 416
130 417 >>> ns = ResultNS({'a':17,'foo':range(3)})
131
132 418 >>> print ns
133 NS{'a': 17, 'foo': [0, 1, 2]}
134
419 NS{'a':17,'foo':range(3)}
135 420 >>> ns.a
136 17
137
421 17
138 422 >>> ns['foo']
139 [0, 1, 2]
423 [0,1,2]
140 424 """
141 425 def __init__(self, dikt):
142 426 for k,v in dikt.iteritems():
143 427 setattr(self,k,v)
144 428
145 429 def __repr__(self):
146 430 l = dir(self)
147 431 d = {}
148 432 for k in l:
149 433 # do not print private objects
150 434 if k[:2] != '__' and k[-2:] != '__':
151 435 d[k] = getattr(self, k)
152 436 return "NS"+repr(d)
153 437
154 438 def __getitem__(self, key):
155 439 return getattr(self, key)
156 440
157 441 class TaskResult(object):
158 442 """
159 An object for returning task results.
443 An object for returning task results for certain types of tasks.
160 444
161 445 This object encapsulates the results of a task. On task
162 446 success it will have a keys attribute that will have a list
163 447 of the variables that have been pulled back. These variables
164 448 are accessible as attributes of this class as well. On
165 449 success the failure attribute will be None.
166 450
167 451 In task failure, keys will be empty, but failure will contain
168 452 the failure object that encapsulates the remote exception.
169 One can also simply call the raiseException() method of
453 One can also simply call the `raise_exception` method of
170 454 this class to re-raise any remote exception in the local
171 455 session.
172 456
173 The TaskResult has a .ns member, which is a property for access
457 The `TaskResult` has a `.ns` member, which is a property for access
174 458 to the results. If the Task had pull=['a', 'b'], then the
175 Task Result will have attributes tr.ns.a, tr.ns.b for those values.
176 Accessing tr.ns will raise the remote failure if the task failed.
459 Task Result will have attributes `tr.ns.a`, `tr.ns.b` for those values.
460 Accessing `tr.ns` will raise the remote failure if the task failed.
177 461
178 The engineid attribute should have the engineid of the engine
179 that ran the task. But, because engines can come and go in
180 the ipython task system, the engineid may not continue to be
462 The `engineid` attribute should have the `engineid` of the engine
463 that ran the task. But, because engines can come and go,
464 the `engineid` may not continue to be
181 465 valid or accurate.
182 466
183 The taskid attribute simply gives the taskid that the task
467 The `taskid` attribute simply gives the `taskid` that the task
184 468 is tracked under.
185 469 """
186 470 taskid = None
187 471
188 472 def _getNS(self):
189 473 if isinstance(self.failure, failure.Failure):
190 474 return self.failure.raiseException()
191 475 else:
192 476 return self._ns
193 477
194 478 def _setNS(self, v):
195 raise Exception("I am protected!")
479 raise Exception("the ns attribute cannot be changed")
196 480
197 481 ns = property(_getNS, _setNS)
198 482
199 483 def __init__(self, results, engineid):
200 484 self.engineid = engineid
201 485 if isinstance(results, failure.Failure):
202 486 self.failure = results
203 487 self.results = {}
204 488 else:
205 489 self.results = results
206 490 self.failure = None
207 491
208 492 self._ns = ResultNS(self.results)
209 493
210 494 self.keys = self.results.keys()
211 495
212 496 def __repr__(self):
213 497 if self.failure is not None:
214 498 contents = self.failure
215 499 else:
216 500 contents = self.results
217 501 return "TaskResult[ID:%r]:%r"%(self.taskid, contents)
218 502
219 503 def __getitem__(self, key):
220 504 if self.failure is not None:
221 self.raiseException()
505 self.raise_exception()
222 506 return self.results[key]
223 507
224 def raiseException(self):
508 def raise_exception(self):
225 509 """Re-raise any remote exceptions in the local python session."""
226 510 if self.failure is not None:
227 511 self.failure.raiseException()
228 512
229 513
514 #-----------------------------------------------------------------------------
515 # The controller side of things
516 #-----------------------------------------------------------------------------
517
230 518 class IWorker(zi.Interface):
231 519 """The Basic Worker Interface.
232 520
233 521 A worked is a representation of an Engine that is ready to run tasks.
234 522 """
235 523
236 524 zi.Attribute("workerid", "the id of the worker")
237 525
238 526 def run(task):
239 527 """Run task in worker's namespace.
240 528
241 529 :Parameters:
242 530 task : a `Task` object
243 531
244 :Returns: `Deferred` to a `TaskResult` object.
532 :Returns: `Deferred` to a tuple of (success, result) where
533 success if a boolean that signifies success or failure
534 and result is the task result.
245 535 """
246 536
247 537
248 538 class WorkerFromQueuedEngine(object):
249 539 """Adapt an `IQueuedEngine` to an `IWorker` object"""
540
250 541 zi.implements(IWorker)
251 542
252 543 def __init__(self, qe):
253 544 self.queuedEngine = qe
254 545 self.workerid = None
255 546
256 547 def _get_properties(self):
257 548 return self.queuedEngine.properties
258 549
259 550 properties = property(_get_properties, lambda self, _:None)
260 551
261 552 def run(self, task):
262 553 """Run task in worker's namespace.
263 554
555 This takes a task and calls methods on the task that actually
556 cause `self.queuedEngine` to do the task. See the methods of
557 `ITask` for more information about how these methods are called.
558
264 559 :Parameters:
265 560 task : a `Task` object
266 561
267 :Returns: `Deferred` to a `TaskResult` object.
562 :Returns: `Deferred` to a tuple of (success, result) where
563 success if a boolean that signifies success or failure
564 and result is the task result.
268 565 """
269 if task.clear_before:
270 d = self.queuedEngine.reset()
271 else:
272 d = defer.succeed(None)
273
274 if task.push is not None:
275 d.addCallback(lambda r: self.queuedEngine.push(task.push))
276
277 d.addCallback(lambda r: self.queuedEngine.execute(task.expression))
278
279 if task.pull is not None:
280 d.addCallback(lambda r: self.queuedEngine.pull(task.pull))
281 else:
282 d.addCallback(lambda r: None)
283
284 def reseter(result):
285 self.queuedEngine.reset()
286 return result
287
288 if task.clear_after:
289 d.addBoth(reseter)
290
291 return d.addBoth(self._zipResults, task.pull, time.time(), time.localtime())
292
293 def _zipResults(self, result, names, start, start_struct):
294 """Callback for construting the TaskResult object."""
295 if isinstance(result, failure.Failure):
296 tr = TaskResult(result, self.queuedEngine.id)
297 else:
298 if names is None:
299 resultDict = {}
300 elif len(names) == 1:
301 resultDict = {names[0]:result}
302 else:
303 resultDict = dict(zip(names, result))
304 tr = TaskResult(resultDict, self.queuedEngine.id)
305 # the time info
306 tr.submitted = time.strftime(time_format, start_struct)
307 tr.completed = time.strftime(time_format)
308 tr.duration = time.time()-start
309 return tr
310
566 d = defer.succeed(None)
567 d.addCallback(task.start_time)
568 task.pre_task(d, self.queuedEngine)
569 task.submit_task(d, self.queuedEngine)
570 task.post_task(d, self.queuedEngine)
571 d.addBoth(task.stop_time)
572 d.addBoth(task.process_result, self.queuedEngine.id)
573 # At this point, there will be (success, result) coming down the line
574 return d
575
311 576
312 577 components.registerAdapter(WorkerFromQueuedEngine, es.IEngineQueued, IWorker)
313 578
314 579 class IScheduler(zi.Interface):
315 580 """The interface for a Scheduler.
316 581 """
317 582 zi.Attribute("nworkers", "the number of unassigned workers")
318 583 zi.Attribute("ntasks", "the number of unscheduled tasks")
319 584 zi.Attribute("workerids", "a list of the worker ids")
320 585 zi.Attribute("taskids", "a list of the task ids")
321 586
322 587 def add_task(task, **flags):
323 588 """Add a task to the queue of the Scheduler.
324 589
325 590 :Parameters:
326 task : a `Task` object
591 task : an `ITask` implementer
327 592 The task to be queued.
328 593 flags : dict
329 594 General keywords for more sophisticated scheduling
330 595 """
331 596
332 597 def pop_task(id=None):
333 """Pops a Task object.
598 """Pops a task object from the queue.
334 599
335 600 This gets the next task to be run. If no `id` is requested, the highest priority
336 601 task is returned.
337 602
338 603 :Parameters:
339 604 id
340 605 The id of the task to be popped. The default (None) is to return
341 606 the highest priority task.
342 607
343 :Returns: a `Task` object
608 :Returns: an `ITask` implementer
344 609
345 610 :Exceptions:
346 611 IndexError : raised if no taskid in queue
347 612 """
348 613
349 614 def add_worker(worker, **flags):
350 615 """Add a worker to the worker queue.
351 616
352 617 :Parameters:
353 worker : an IWorker implementing object
354 flags : General keywords for more sophisticated scheduling
618 worker : an `IWorker` implementer
619 flags : dict
620 General keywords for more sophisticated scheduling
355 621 """
356 622
357 623 def pop_worker(id=None):
358 624 """Pops an IWorker object that is ready to do work.
359 625
360 626 This gets the next IWorker that is ready to do work.
361 627
362 628 :Parameters:
363 629 id : if specified, will pop worker with workerid=id, else pops
364 630 highest priority worker. Defaults to None.
365 631
366 632 :Returns:
367 633 an IWorker object
368 634
369 635 :Exceptions:
370 636 IndexError : raised if no workerid in queue
371 637 """
372 638
373 639 def ready():
374 640 """Returns True if there is something to do, False otherwise"""
375 641
376 642 def schedule():
377 """Returns a tuple of the worker and task pair for the next
378 task to be run.
379 """
643 """Returns (worker,task) pair for the next task to be run."""
380 644
381 645
382 646 class FIFOScheduler(object):
383 """A basic First-In-First-Out (Queue) Scheduler.
384 This is the default Scheduler for the TaskController.
385 See the docstrings for IScheduler for interface details.
647 """
648 A basic First-In-First-Out (Queue) Scheduler.
649
650 This is the default Scheduler for the `TaskController`.
651 See the docstrings for `IScheduler` for interface details.
386 652 """
387 653
388 654 zi.implements(IScheduler)
389 655
390 656 def __init__(self):
391 657 self.tasks = []
392 658 self.workers = []
393 659
394 660 def _ntasks(self):
395 661 return len(self.tasks)
396 662
397 663 def _nworkers(self):
398 664 return len(self.workers)
399 665
400 666 ntasks = property(_ntasks, lambda self, _:None)
401 667 nworkers = property(_nworkers, lambda self, _:None)
402 668
403 669 def _taskids(self):
404 670 return [t.taskid for t in self.tasks]
405 671
406 672 def _workerids(self):
407 673 return [w.workerid for w in self.workers]
408 674
409 675 taskids = property(_taskids, lambda self,_:None)
410 676 workerids = property(_workerids, lambda self,_:None)
411 677
412 678 def add_task(self, task, **flags):
413 679 self.tasks.append(task)
414 680
415 681 def pop_task(self, id=None):
416 682 if id is None:
417 683 return self.tasks.pop(0)
418 684 else:
419 685 for i in range(len(self.tasks)):
420 686 taskid = self.tasks[i].taskid
421 687 if id == taskid:
422 688 return self.tasks.pop(i)
423 689 raise IndexError("No task #%i"%id)
424 690
425 691 def add_worker(self, worker, **flags):
426 692 self.workers.append(worker)
427 693
428 694 def pop_worker(self, id=None):
429 695 if id is None:
430 696 return self.workers.pop(0)
431 697 else:
432 698 for i in range(len(self.workers)):
433 699 workerid = self.workers[i].workerid
434 700 if id == workerid:
435 701 return self.workers.pop(i)
436 702 raise IndexError("No worker #%i"%id)
437 703
438 704 def schedule(self):
439 705 for t in self.tasks:
440 706 for w in self.workers:
441 707 try:# do not allow exceptions to break this
442 cando = t.depend is None or t.depend(w.properties)
708 # Allow the task to check itself using its
709 # check_depend method.
710 cando = t.check_depend(w.properties)
443 711 except:
444 712 cando = False
445 713 if cando:
446 714 return self.pop_worker(w.workerid), self.pop_task(t.taskid)
447 715 return None, None
448 716
449 717
450 718
451 719 class LIFOScheduler(FIFOScheduler):
452 """A Last-In-First-Out (Stack) Scheduler. This scheduler should naively
453 reward fast engines by giving them more jobs. This risks starvation, but
454 only in cases with low load, where starvation does not really matter.
720 """
721 A Last-In-First-Out (Stack) Scheduler.
722
723 This scheduler should naively reward fast engines by giving
724 them more jobs. This risks starvation, but only in cases with
725 low load, where starvation does not really matter.
455 726 """
456 727
457 728 def add_task(self, task, **flags):
458 729 # self.tasks.reverse()
459 730 self.tasks.insert(0, task)
460 731 # self.tasks.reverse()
461 732
462 733 def add_worker(self, worker, **flags):
463 734 # self.workers.reverse()
464 735 self.workers.insert(0, worker)
465 736 # self.workers.reverse()
466 737
467 738
468 739 class ITaskController(cs.IControllerBase):
469 """The Task based interface to a `ControllerService` object
740 """
741 The Task based interface to a `ControllerService` object
470 742
471 743 This adapts a `ControllerService` to the ITaskController interface.
472 744 """
473 745
474 746 def run(task):
475 """Run a task.
747 """
748 Run a task.
476 749
477 750 :Parameters:
478 751 task : an IPython `Task` object
479 752
480 753 :Returns: the integer ID of the task
481 754 """
482 755
483 756 def get_task_result(taskid, block=False):
484 """Get the result of a task by its ID.
757 """
758 Get the result of a task by its ID.
485 759
486 760 :Parameters:
487 761 taskid : int
488 762 the id of the task whose result is requested
489 763
490 :Returns: `Deferred` to (taskid, actualResult) if the task is done, and None
764 :Returns: `Deferred` to the task result if the task is done, and None
491 765 if not.
492 766
493 767 :Exceptions:
494 768 actualResult will be an `IndexError` if no such task has been submitted
495 769 """
496 770
497 771 def abort(taskid):
498 772 """Remove task from queue if task is has not been submitted.
499 773
500 774 If the task has already been submitted, wait for it to finish and discard
501 775 results and prevent resubmission.
502 776
503 777 :Parameters:
504 778 taskid : the id of the task to be aborted
505 779
506 780 :Returns:
507 781 `Deferred` to abort attempt completion. Will be None on success.
508 782
509 783 :Exceptions:
510 784 deferred will fail with `IndexError` if no such task has been submitted
511 785 or the task has already completed.
512 786 """
513 787
514 788 def barrier(taskids):
515 """Block until the list of taskids are completed.
789 """
790 Block until the list of taskids are completed.
516 791
517 792 Returns None on success.
518 793 """
519 794
520 795 def spin():
521 """touch the scheduler, to resume scheduling without submitting
522 a task.
796 """
797 Touch the scheduler, to resume scheduling without submitting a task.
523 798 """
524 799
525 def queue_status(self, verbose=False):
526 """Get a dictionary with the current state of the task queue.
800 def queue_status(verbose=False):
801 """
802 Get a dictionary with the current state of the task queue.
527 803
528 804 If verbose is True, then return lists of taskids, otherwise,
529 805 return the number of tasks with each status.
530 806 """
531 807
808 def clear():
809 """
810 Clear all previously run tasks from the task controller.
811
812 This is needed because the task controller keep all task results
813 in memory. This can be a problem is there are many completed
814 tasks. Users should call this periodically to clean out these
815 cached task results.
816 """
817
532 818
533 819 class TaskController(cs.ControllerAdapterBase):
534 820 """The Task based interface to a Controller object.
535 821
536 822 If you want to use a different scheduler, just subclass this and set
537 823 the `SchedulerClass` member to the *class* of your chosen scheduler.
538 824 """
539 825
540 826 zi.implements(ITaskController)
541 827 SchedulerClass = FIFOScheduler
542 828
543 829 timeout = 30
544 830
545 831 def __init__(self, controller):
546 832 self.controller = controller
547 833 self.controller.on_register_engine_do(self.registerWorker, True)
548 834 self.controller.on_unregister_engine_do(self.unregisterWorker, True)
549 835 self.taskid = 0
550 836 self.failurePenalty = 1 # the time in seconds to penalize
551 837 # a worker for failing a task
552 838 self.pendingTasks = {} # dict of {workerid:(taskid, task)}
553 839 self.deferredResults = {} # dict of {taskid:deferred}
554 840 self.finishedResults = {} # dict of {taskid:actualResult}
555 841 self.workers = {} # dict of {workerid:worker}
556 842 self.abortPending = [] # dict of {taskid:abortDeferred}
557 843 self.idleLater = None # delayed call object for timeout
558 844 self.scheduler = self.SchedulerClass()
559 845
560 846 for id in self.controller.engines.keys():
561 847 self.workers[id] = IWorker(self.controller.engines[id])
562 848 self.workers[id].workerid = id
563 849 self.schedule.add_worker(self.workers[id])
564 850
565 851 def registerWorker(self, id):
566 852 """Called by controller.register_engine."""
567 853 if self.workers.get(id):
568 raise "We already have one! This should not happen."
854 raise ValueError("worker with id %s already exists. This should not happen." % id)
569 855 self.workers[id] = IWorker(self.controller.engines[id])
570 856 self.workers[id].workerid = id
571 857 if not self.pendingTasks.has_key(id):# if not working
572 858 self.scheduler.add_worker(self.workers[id])
573 859 self.distributeTasks()
574 860
575 861 def unregisterWorker(self, id):
576 862 """Called by controller.unregister_engine"""
577 863
578 864 if self.workers.has_key(id):
579 865 try:
580 866 self.scheduler.pop_worker(id)
581 867 except IndexError:
582 868 pass
583 869 self.workers.pop(id)
584 870
585 871 def _pendingTaskIDs(self):
586 872 return [t.taskid for t in self.pendingTasks.values()]
587 873
588 874 #---------------------------------------------------------------------------
589 875 # Interface methods
590 876 #---------------------------------------------------------------------------
591 877
592 878 def run(self, task):
593 """Run a task and return `Deferred` to its taskid."""
879 """
880 Run a task and return `Deferred` to its taskid.
881 """
594 882 task.taskid = self.taskid
595 883 task.start = time.localtime()
596 884 self.taskid += 1
597 885 d = defer.Deferred()
598 886 self.scheduler.add_task(task)
599 # log.msg('Queuing task: %i' % task.taskid)
887 log.msg('Queuing task: %i' % task.taskid)
600 888
601 889 self.deferredResults[task.taskid] = []
602 890 self.distributeTasks()
603 891 return defer.succeed(task.taskid)
604 892
605 893 def get_task_result(self, taskid, block=False):
606 """Returns a `Deferred` to a TaskResult tuple or None."""
607 # log.msg("Getting task result: %i" % taskid)
894 """
895 Returns a `Deferred` to the task result, or None.
896 """
897 log.msg("Getting task result: %i" % taskid)
608 898 if self.finishedResults.has_key(taskid):
609 899 tr = self.finishedResults[taskid]
610 900 return defer.succeed(tr)
611 901 elif self.deferredResults.has_key(taskid):
612 902 if block:
613 903 d = defer.Deferred()
614 904 self.deferredResults[taskid].append(d)
615 905 return d
616 906 else:
617 907 return defer.succeed(None)
618 908 else:
619 909 return defer.fail(IndexError("task ID not registered: %r" % taskid))
620 910
621 911 def abort(self, taskid):
622 """Remove a task from the queue if it has not been run already."""
912 """
913 Remove a task from the queue if it has not been run already.
914 """
623 915 if not isinstance(taskid, int):
624 916 return defer.fail(failure.Failure(TypeError("an integer task id expected: %r" % taskid)))
625 917 try:
626 918 self.scheduler.pop_task(taskid)
627 919 except IndexError, e:
628 920 if taskid in self.finishedResults.keys():
629 921 d = defer.fail(IndexError("Task Already Completed"))
630 922 elif taskid in self.abortPending:
631 923 d = defer.fail(IndexError("Task Already Aborted"))
632 924 elif taskid in self._pendingTaskIDs():# task is pending
633 925 self.abortPending.append(taskid)
634 926 d = defer.succeed(None)
635 927 else:
636 928 d = defer.fail(e)
637 929 else:
638 930 d = defer.execute(self._doAbort, taskid)
639 931
640 932 return d
641 933
642 934 def barrier(self, taskids):
643 935 dList = []
644 936 if isinstance(taskids, int):
645 937 taskids = [taskids]
646 938 for id in taskids:
647 939 d = self.get_task_result(id, block=True)
648 940 dList.append(d)
649 941 d = DeferredList(dList, consumeErrors=1)
650 942 d.addCallbacks(lambda r: None)
651 943 return d
652 944
653 945 def spin(self):
654 946 return defer.succeed(self.distributeTasks())
655 947
656 948 def queue_status(self, verbose=False):
657 949 pending = self._pendingTaskIDs()
658 950 failed = []
659 951 succeeded = []
660 952 for k,v in self.finishedResults.iteritems():
661 953 if not isinstance(v, failure.Failure):
662 954 if hasattr(v,'failure'):
663 955 if v.failure is None:
664 956 succeeded.append(k)
665 957 else:
666 958 failed.append(k)
667 959 scheduled = self.scheduler.taskids
668 960 if verbose:
669 961 result = dict(pending=pending, failed=failed,
670 962 succeeded=succeeded, scheduled=scheduled)
671 963 else:
672 964 result = dict(pending=len(pending),failed=len(failed),
673 965 succeeded=len(succeeded),scheduled=len(scheduled))
674 966 return defer.succeed(result)
675 967
676 968 #---------------------------------------------------------------------------
677 969 # Queue methods
678 970 #---------------------------------------------------------------------------
679 971
680 972 def _doAbort(self, taskid):
681 """Helper function for aborting a pending task."""
682 # log.msg("Task aborted: %i" % taskid)
973 """
974 Helper function for aborting a pending task.
975 """
976 log.msg("Task aborted: %i" % taskid)
683 977 result = failure.Failure(error.TaskAborted())
684 978 self._finishTask(taskid, result)
685 979 if taskid in self.abortPending:
686 980 self.abortPending.remove(taskid)
687 981
688 982 def _finishTask(self, taskid, result):
689 983 dlist = self.deferredResults.pop(taskid)
690 result.taskid = taskid # The TaskResult should save the taskid
984 # result.taskid = taskid # The TaskResult should save the taskid
691 985 self.finishedResults[taskid] = result
692 986 for d in dlist:
693 987 d.callback(result)
694 988
695 989 def distributeTasks(self):
696 """Distribute tasks while self.scheduler has things to do."""
697 # log.msg("distributing Tasks")
990 """
991 Distribute tasks while self.scheduler has things to do.
992 """
993 log.msg("distributing Tasks")
698 994 worker, task = self.scheduler.schedule()
699 995 if not worker and not task:
700 996 if self.idleLater and self.idleLater.called:# we are inside failIdle
701 997 self.idleLater = None
702 998 else:
703 999 self.checkIdle()
704 1000 return False
705 1001 # else something to do:
706 1002 while worker and task:
707 1003 # get worker and task
708 1004 # add to pending
709 1005 self.pendingTasks[worker.workerid] = task
710 1006 # run/link callbacks
711 1007 d = worker.run(task)
712 # log.msg("Running task %i on worker %i" %(task.taskid, worker.workerid))
1008 log.msg("Running task %i on worker %i" %(task.taskid, worker.workerid))
713 1009 d.addBoth(self.taskCompleted, task.taskid, worker.workerid)
714 1010 worker, task = self.scheduler.schedule()
715 1011 # check for idle timeout:
716 1012 self.checkIdle()
717 1013 return True
718 1014
719 1015 def checkIdle(self):
720 1016 if self.idleLater and not self.idleLater.called:
721 1017 self.idleLater.cancel()
722 1018 if self.scheduler.ntasks and self.workers and \
723 1019 self.scheduler.nworkers == len(self.workers):
724 1020 self.idleLater = reactor.callLater(self.timeout, self.failIdle)
725 1021 else:
726 1022 self.idleLater = None
727 1023
728 1024 def failIdle(self):
729 1025 if not self.distributeTasks():
730 1026 while self.scheduler.ntasks:
731 1027 t = self.scheduler.pop_task()
732 1028 msg = "task %i failed to execute due to unmet dependencies"%t.taskid
733 1029 msg += " for %i seconds"%self.timeout
734 # log.msg("Task aborted by timeout: %i" % t.taskid)
1030 log.msg("Task aborted by timeout: %i" % t.taskid)
735 1031 f = failure.Failure(error.TaskTimeout(msg))
736 1032 self._finishTask(t.taskid, f)
737 1033 self.idleLater = None
738 1034
739 1035
740 def taskCompleted(self, result, taskid, workerid):
1036 def taskCompleted(self, success_and_result, taskid, workerid):
741 1037 """This is the err/callback for a completed task."""
1038 success, result = success_and_result
742 1039 try:
743 1040 task = self.pendingTasks.pop(workerid)
744 1041 except:
745 1042 # this should not happen
746 1043 log.msg("Tried to pop bad pending task %i from worker %i"%(taskid, workerid))
747 1044 log.msg("Result: %r"%result)
748 1045 log.msg("Pending tasks: %s"%self.pendingTasks)
749 1046 return
750 1047
751 1048 # Check if aborted while pending
752 1049 aborted = False
753 1050 if taskid in self.abortPending:
754 1051 self._doAbort(taskid)
755 1052 aborted = True
756 1053
757 1054 if not aborted:
758 if result.failure is not None and isinstance(result.failure, failure.Failure): # we failed
1055 if not success:
759 1056 log.msg("Task %i failed on worker %i"% (taskid, workerid))
760 1057 if task.retries > 0: # resubmit
761 1058 task.retries -= 1
762 1059 self.scheduler.add_task(task)
763 1060 s = "Resubmitting task %i, %i retries remaining" %(taskid, task.retries)
764 1061 log.msg(s)
765 1062 self.distributeTasks()
766 elif isinstance(task.recovery_task, Task) and \
1063 elif isinstance(task.recovery_task, BaseTask) and \
767 1064 task.recovery_task.retries > -1:
768 1065 # retries = -1 is to prevent infinite recovery_task loop
769 1066 task.retries = -1
770 1067 task.recovery_task.taskid = taskid
771 1068 task = task.recovery_task
772 1069 self.scheduler.add_task(task)
773 1070 s = "Recovering task %i, %i retries remaining" %(taskid, task.retries)
774 1071 log.msg(s)
775 1072 self.distributeTasks()
776 1073 else: # done trying
777 1074 self._finishTask(taskid, result)
778 1075 # wait a second before readmitting a worker that failed
779 1076 # it may have died, and not yet been unregistered
780 1077 reactor.callLater(self.failurePenalty, self.readmitWorker, workerid)
781 1078 else: # we succeeded
782 # log.msg("Task completed: %i"% taskid)
1079 log.msg("Task completed: %i"% taskid)
783 1080 self._finishTask(taskid, result)
784 1081 self.readmitWorker(workerid)
785 else:# we aborted the task
786 if result.failure is not None and isinstance(result.failure, failure.Failure): # it failed, penalize worker
1082 else: # we aborted the task
1083 if not success:
787 1084 reactor.callLater(self.failurePenalty, self.readmitWorker, workerid)
788 1085 else:
789 1086 self.readmitWorker(workerid)
790 1087
791 1088 def readmitWorker(self, workerid):
792 """Readmit a worker to the scheduler.
1089 """
1090 Readmit a worker to the scheduler.
793 1091
794 1092 This is outside `taskCompleted` because of the `failurePenalty` being
795 1093 implemented through `reactor.callLater`.
796 1094 """
797 1095
798 1096 if workerid in self.workers.keys() and workerid not in self.pendingTasks.keys():
799 1097 self.scheduler.add_worker(self.workers[workerid])
800 1098 self.distributeTasks()
1099
1100 def clear(self):
1101 """
1102 Clear all previously run tasks from the task controller.
1103
1104 This is needed because the task controller keep all task results
1105 in memory. This can be a problem is there are many completed
1106 tasks. Users should call this periodically to clean out these
1107 cached task results.
1108 """
1109 self.finishedResults = {}
1110 return defer.succeed(None)
801 1111
802 1112
803 1113 components.registerAdapter(TaskController, cs.IControllerBase, ITaskController)
@@ -1,161 +1,180 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_taskcontrollerxmlrpc -*-
3 3
4 """The Generic Task Client object.
5
6 This must be subclassed based on your connection method.
4 """
5 A blocking version of the task client.
7 6 """
8 7
9 8 __docformat__ = "restructuredtext en"
10 9
11 10 #-------------------------------------------------------------------------------
12 11 # Copyright (C) 2008 The IPython Development Team
13 12 #
14 13 # Distributed under the terms of the BSD License. The full license is in
15 14 # the file COPYING, distributed as part of this software.
16 15 #-------------------------------------------------------------------------------
17 16
18 17 #-------------------------------------------------------------------------------
19 18 # Imports
20 19 #-------------------------------------------------------------------------------
21 20
22 21 from zope.interface import Interface, implements
23 22 from twisted.python import components, log
24 23
25 24 from IPython.kernel.twistedutil import blockingCallFromThread
26 25 from IPython.kernel import task, error
26 from IPython.kernel.mapper import (
27 SynchronousTaskMapper,
28 ITaskMapperFactory,
29 IMapper
30 )
31 from IPython.kernel.parallelfunction import (
32 ParallelFunction,
33 ITaskParallelDecorator
34 )
27 35
28 36 #-------------------------------------------------------------------------------
29 # Connecting Task Client
37 # The task client
30 38 #-------------------------------------------------------------------------------
31 39
32 class InteractiveTaskClient(object):
33
34 def irun(self, *args, **kwargs):
35 """Run a task on the `TaskController`.
36
37 This method is a shorthand for run(task) and its arguments are simply
38 passed onto a `Task` object:
39
40 irun(*args, **kwargs) -> run(Task(*args, **kwargs))
41
42 :Parameters:
43 expression : str
44 A str that is valid python code that is the task.
45 pull : str or list of str
46 The names of objects to be pulled as results.
47 push : dict
48 A dict of objects to be pushed into the engines namespace before
49 execution of the expression.
50 clear_before : boolean
51 Should the engine's namespace be cleared before the task is run.
52 Default=False.
53 clear_after : boolean
54 Should the engine's namespace be cleared after the task is run.
55 Default=False.
56 retries : int
57 The number of times to resumbit the task if it fails. Default=0.
58 options : dict
59 Any other keyword options for more elaborate uses of tasks
60
61 :Returns: A `TaskResult` object.
62 """
63 block = kwargs.pop('block', False)
64 if len(args) == 1 and isinstance(args[0], task.Task):
65 t = args[0]
66 else:
67 t = task.Task(*args, **kwargs)
68 taskid = self.run(t)
69 print "TaskID = %i"%taskid
70 if block:
71 return self.get_task_result(taskid, block)
72 else:
73 return taskid
74
75 40 class IBlockingTaskClient(Interface):
76 41 """
77 An interface for blocking task clients.
42 A vague interface of the blocking task client
78 43 """
79 44 pass
80 45
81
82 class BlockingTaskClient(InteractiveTaskClient):
46 class BlockingTaskClient(object):
83 47 """
84 This class provides a blocking task client.
48 A blocking task client that adapts a non-blocking one.
85 49 """
86 50
87 implements(IBlockingTaskClient)
51 implements(
52 IBlockingTaskClient,
53 ITaskMapperFactory,
54 IMapper,
55 ITaskParallelDecorator
56 )
88 57
89 58 def __init__(self, task_controller):
90 59 self.task_controller = task_controller
91 60 self.block = True
92 61
93 def run(self, task):
94 """
95 Run a task and return a task id that can be used to get the task result.
62 def run(self, task, block=False):
63 """Run a task on the `TaskController`.
64
65 See the documentation of the `MapTask` and `StringTask` classes for
66 details on how to build a task of different types.
96 67
97 68 :Parameters:
98 task : `Task`
99 The `Task` object to run
69 task : an `ITask` implementer
70
71 :Returns: The int taskid of the submitted task. Pass this to
72 `get_task_result` to get the `TaskResult` object.
100 73 """
101 return blockingCallFromThread(self.task_controller.run, task)
74 tid = blockingCallFromThread(self.task_controller.run, task)
75 if block:
76 return self.get_task_result(tid, block=True)
77 else:
78 return tid
102 79
103 80 def get_task_result(self, taskid, block=False):
104 81 """
105 Get or poll for a task result.
82 Get a task result by taskid.
106 83
107 84 :Parameters:
108 85 taskid : int
109 The id of the task whose result to get
86 The taskid of the task to be retrieved.
110 87 block : boolean
111 If True, wait until the task is done and then result the
112 `TaskResult` object. If False, just poll for the result and
113 return None if the task is not done.
88 Should I block until the task is done?
89
90 :Returns: A `TaskResult` object that encapsulates the task result.
114 91 """
115 92 return blockingCallFromThread(self.task_controller.get_task_result,
116 93 taskid, block)
117 94
118 95 def abort(self, taskid):
119 96 """
120 Abort a task by task id if it has not been started.
97 Abort a task by taskid.
98
99 :Parameters:
100 taskid : int
101 The taskid of the task to be aborted.
121 102 """
122 103 return blockingCallFromThread(self.task_controller.abort, taskid)
123 104
124 105 def barrier(self, taskids):
125 """
126 Wait for a set of tasks to finish.
106 """Block until a set of tasks are completed.
127 107
128 108 :Parameters:
129 taskids : list of ints
130 A list of task ids to wait for.
109 taskids : list, tuple
110 A sequence of taskids to block on.
131 111 """
132 112 return blockingCallFromThread(self.task_controller.barrier, taskids)
133 113
134 114 def spin(self):
135 115 """
136 Cause the scheduler to schedule tasks.
116 Touch the scheduler, to resume scheduling without submitting a task.
137 117
138 118 This method only needs to be called in unusual situations where the
139 scheduler is idle for some reason.
119 scheduler is idle for some reason.
140 120 """
141 121 return blockingCallFromThread(self.task_controller.spin)
142 122
143 123 def queue_status(self, verbose=False):
144 124 """
145 125 Get a dictionary with the current state of the task queue.
146 126
147 127 :Parameters:
148 128 verbose : boolean
149 129 If True, return a list of taskids. If False, simply give
150 130 the number of tasks with each status.
151 131
152 132 :Returns:
153 133 A dict with the queue status.
154 134 """
155 135 return blockingCallFromThread(self.task_controller.queue_status, verbose)
136
137 def clear(self):
138 """
139 Clear all previously run tasks from the task controller.
140
141 This is needed because the task controller keep all task results
142 in memory. This can be a problem is there are many completed
143 tasks. Users should call this periodically to clean out these
144 cached task results.
145 """
146 return blockingCallFromThread(self.task_controller.clear)
147
148 def map(self, func, *sequences):
149 """
150 Apply func to *sequences elementwise. Like Python's builtin map.
151
152 This version is load balanced.
153 """
154 return self.mapper().map(func, *sequences)
156 155
156 def mapper(self, clear_before=False, clear_after=False, retries=0,
157 recovery_task=None, depend=None, block=True):
158 """
159 Create an `IMapper` implementer with a given set of arguments.
160
161 The `IMapper` created using a task controller is load balanced.
162
163 See the documentation for `IPython.kernel.task.BaseTask` for
164 documentation on the arguments to this method.
165 """
166 return SynchronousTaskMapper(self, clear_before=clear_before,
167 clear_after=clear_after, retries=retries,
168 recovery_task=recovery_task, depend=depend, block=block)
169
170 def parallel(self, clear_before=False, clear_after=False, retries=0,
171 recovery_task=None, depend=None, block=True):
172 mapper = self.mapper(clear_before, clear_after, retries,
173 recovery_task, depend, block)
174 pf = ParallelFunction(mapper)
175 return pf
157 176
158 177 components.registerAdapter(BlockingTaskClient,
159 178 task.ITaskController, IBlockingTaskClient)
160 179
161 180
@@ -1,267 +1,329 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_taskxmlrpc -*-
3 3 """A Foolscap interface to a TaskController.
4 4
5 5 This class lets Foolscap clients talk to a TaskController.
6 6 """
7 7
8 8 __docformat__ = "restructuredtext en"
9 9
10 10 #-------------------------------------------------------------------------------
11 11 # Copyright (C) 2008 The IPython Development Team
12 12 #
13 13 # Distributed under the terms of the BSD License. The full license is in
14 14 # the file COPYING, distributed as part of this software.
15 15 #-------------------------------------------------------------------------------
16 16
17 17 #-------------------------------------------------------------------------------
18 18 # Imports
19 19 #-------------------------------------------------------------------------------
20 20
21 21 import cPickle as pickle
22 22 import xmlrpclib, copy
23 23
24 24 from zope.interface import Interface, implements
25 25 from twisted.internet import defer
26 26 from twisted.python import components, failure
27 27
28 28 from foolscap import Referenceable
29 29
30 30 from IPython.kernel.twistedutil import blockingCallFromThread
31 31 from IPython.kernel import error, task as taskmodule, taskclient
32 32 from IPython.kernel.pickleutil import can, uncan
33 33 from IPython.kernel.clientinterfaces import (
34 34 IFCClientInterfaceProvider,
35 35 IBlockingClientAdaptor
36 36 )
37 from IPython.kernel.mapper import (
38 TaskMapper,
39 ITaskMapperFactory,
40 IMapper
41 )
42 from IPython.kernel.parallelfunction import (
43 ParallelFunction,
44 ITaskParallelDecorator
45 )
37 46
38 47 #-------------------------------------------------------------------------------
39 48 # The Controller side of things
40 49 #-------------------------------------------------------------------------------
41 50
42 51
43 52 class IFCTaskController(Interface):
44 53 """Foolscap interface to task controller.
45 54
46 See the documentation of ITaskController for documentation about the methods.
55 See the documentation of `ITaskController` for more information.
47 56 """
48 def remote_run(request, binTask):
57 def remote_run(binTask):
49 58 """"""
50 59
51 def remote_abort(request, taskid):
60 def remote_abort(taskid):
52 61 """"""
53 62
54 def remote_get_task_result(request, taskid, block=False):
63 def remote_get_task_result(taskid, block=False):
55 64 """"""
56 65
57 def remote_barrier(request, taskids):
66 def remote_barrier(taskids):
67 """"""
68
69 def remote_spin():
58 70 """"""
59 71
60 def remote_spin(request):
72 def remote_queue_status(verbose):
61 73 """"""
62 74
63 def remote_queue_status(request, verbose):
75 def remote_clear():
64 76 """"""
65 77
66 78
67 79 class FCTaskControllerFromTaskController(Referenceable):
68 """XML-RPC attachmeot for controller.
69
70 See IXMLRPCTaskController and ITaskController (and its children) for documentation.
71 80 """
81 Adapt a `TaskController` to an `IFCTaskController`
82
83 This class is used to expose a `TaskController` over the wire using
84 the Foolscap network protocol.
85 """
86
72 87 implements(IFCTaskController, IFCClientInterfaceProvider)
73 88
74 89 def __init__(self, taskController):
75 90 self.taskController = taskController
76 91
77 92 #---------------------------------------------------------------------------
78 93 # Non interface methods
79 94 #---------------------------------------------------------------------------
80 95
81 96 def packageFailure(self, f):
82 97 f.cleanFailure()
83 98 return self.packageSuccess(f)
84 99
85 100 def packageSuccess(self, obj):
86 101 serial = pickle.dumps(obj, 2)
87 102 return serial
88 103
89 104 #---------------------------------------------------------------------------
90 105 # ITaskController related methods
91 106 #---------------------------------------------------------------------------
92 107
93 108 def remote_run(self, ptask):
94 109 try:
95 ctask = pickle.loads(ptask)
96 task = taskmodule.uncanTask(ctask)
110 task = pickle.loads(ptask)
111 task.uncan_task()
97 112 except:
98 113 d = defer.fail(pickle.UnpickleableError("Could not unmarshal task"))
99 114 else:
100 115 d = self.taskController.run(task)
101 116 d.addCallback(self.packageSuccess)
102 117 d.addErrback(self.packageFailure)
103 118 return d
104 119
105 120 def remote_abort(self, taskid):
106 121 d = self.taskController.abort(taskid)
107 122 d.addCallback(self.packageSuccess)
108 123 d.addErrback(self.packageFailure)
109 124 return d
110 125
111 126 def remote_get_task_result(self, taskid, block=False):
112 127 d = self.taskController.get_task_result(taskid, block)
113 128 d.addCallback(self.packageSuccess)
114 129 d.addErrback(self.packageFailure)
115 130 return d
116 131
117 132 def remote_barrier(self, taskids):
118 133 d = self.taskController.barrier(taskids)
119 134 d.addCallback(self.packageSuccess)
120 135 d.addErrback(self.packageFailure)
121 136 return d
122 137
123 138 def remote_spin(self):
124 139 d = self.taskController.spin()
125 140 d.addCallback(self.packageSuccess)
126 141 d.addErrback(self.packageFailure)
127 142 return d
128 143
129 144 def remote_queue_status(self, verbose):
130 145 d = self.taskController.queue_status(verbose)
131 146 d.addCallback(self.packageSuccess)
132 147 d.addErrback(self.packageFailure)
133 148 return d
134 149
150 def remote_clear(self):
151 return self.taskController.clear()
152
135 153 def remote_get_client_name(self):
136 154 return 'IPython.kernel.taskfc.FCTaskClient'
137 155
138 156 components.registerAdapter(FCTaskControllerFromTaskController,
139 157 taskmodule.ITaskController, IFCTaskController)
140 158
141 159
142 160 #-------------------------------------------------------------------------------
143 161 # The Client side of things
144 162 #-------------------------------------------------------------------------------
145 163
146 164 class FCTaskClient(object):
147 """XML-RPC based TaskController client that implements ITaskController.
148
149 :Parameters:
150 addr : (ip, port)
151 The ip (str) and port (int) tuple of the `TaskController`.
152 165 """
153 implements(taskmodule.ITaskController, IBlockingClientAdaptor)
166 Client class for Foolscap exposed `TaskController`.
167
168 This class is an adapter that makes a `RemoteReference` to a
169 `TaskController` look like an actual `ITaskController` on the client side.
170
171 This class also implements `IBlockingClientAdaptor` so that clients can
172 automatically get a blocking version of this class.
173 """
174
175 implements(
176 taskmodule.ITaskController,
177 IBlockingClientAdaptor,
178 ITaskMapperFactory,
179 IMapper,
180 ITaskParallelDecorator
181 )
154 182
155 183 def __init__(self, remote_reference):
156 184 self.remote_reference = remote_reference
157 185
158 186 #---------------------------------------------------------------------------
159 187 # Non interface methods
160 188 #---------------------------------------------------------------------------
161 189
162 190 def unpackage(self, r):
163 191 return pickle.loads(r)
164 192
165 193 #---------------------------------------------------------------------------
166 194 # ITaskController related methods
167 195 #---------------------------------------------------------------------------
168 196 def run(self, task):
169 197 """Run a task on the `TaskController`.
170 198
171 :Parameters:
172 task : a `Task` object
173
174 The Task object is created using the following signature:
175
176 Task(expression, pull=None, push={}, clear_before=False,
177 clear_after=False, retries=0, **options):)
199 See the documentation of the `MapTask` and `StringTask` classes for
200 details on how to build a task of different types.
178 201
179 The meaning of the arguments is as follows:
202 :Parameters:
203 task : an `ITask` implementer
180 204
181 :Task Parameters:
182 expression : str
183 A str that is valid python code that is the task.
184 pull : str or list of str
185 The names of objects to be pulled as results.
186 push : dict
187 A dict of objects to be pushed into the engines namespace before
188 execution of the expression.
189 clear_before : boolean
190 Should the engine's namespace be cleared before the task is run.
191 Default=False.
192 clear_after : boolean
193 Should the engine's namespace be cleared after the task is run.
194 Default=False.
195 retries : int
196 The number of times to resumbit the task if it fails. Default=0.
197 options : dict
198 Any other keyword options for more elaborate uses of tasks
199
200 205 :Returns: The int taskid of the submitted task. Pass this to
201 206 `get_task_result` to get the `TaskResult` object.
202 207 """
203 assert isinstance(task, taskmodule.Task), "task must be a Task object!"
204 ctask = taskmodule.canTask(task) # handles arbitrary function in .depend
205 # as well as arbitrary recovery_task chains
206 ptask = pickle.dumps(ctask, 2)
208 assert isinstance(task, taskmodule.BaseTask), "task must be a Task object!"
209 task.can_task()
210 ptask = pickle.dumps(task, 2)
211 task.uncan_task()
207 212 d = self.remote_reference.callRemote('run', ptask)
208 213 d.addCallback(self.unpackage)
209 214 return d
210 215
211 216 def get_task_result(self, taskid, block=False):
212 """The task result by taskid.
217 """
218 Get a task result by taskid.
213 219
214 220 :Parameters:
215 221 taskid : int
216 222 The taskid of the task to be retrieved.
217 223 block : boolean
218 224 Should I block until the task is done?
219 225
220 226 :Returns: A `TaskResult` object that encapsulates the task result.
221 227 """
222 228 d = self.remote_reference.callRemote('get_task_result', taskid, block)
223 229 d.addCallback(self.unpackage)
224 230 return d
225 231
226 232 def abort(self, taskid):
227 """Abort a task by taskid.
233 """
234 Abort a task by taskid.
228 235
229 236 :Parameters:
230 237 taskid : int
231 238 The taskid of the task to be aborted.
232 block : boolean
233 Should I block until the task is aborted.
234 239 """
235 240 d = self.remote_reference.callRemote('abort', taskid)
236 241 d.addCallback(self.unpackage)
237 242 return d
238 243
239 244 def barrier(self, taskids):
240 """Block until all tasks are completed.
245 """Block until a set of tasks are completed.
241 246
242 247 :Parameters:
243 248 taskids : list, tuple
244 249 A sequence of taskids to block on.
245 250 """
246 251 d = self.remote_reference.callRemote('barrier', taskids)
247 252 d.addCallback(self.unpackage)
248 253 return d
249 254
250 255 def spin(self):
251 """touch the scheduler, to resume scheduling without submitting
252 a task.
256 """
257 Touch the scheduler, to resume scheduling without submitting a task.
258
259 This method only needs to be called in unusual situations where the
260 scheduler is idle for some reason.
253 261 """
254 262 d = self.remote_reference.callRemote('spin')
255 263 d.addCallback(self.unpackage)
256 264 return d
257 265
258 266 def queue_status(self, verbose=False):
259 """Return a dict with the status of the task queue."""
267 """
268 Get a dictionary with the current state of the task queue.
269
270 :Parameters:
271 verbose : boolean
272 If True, return a list of taskids. If False, simply give
273 the number of tasks with each status.
274
275 :Returns:
276 A dict with the queue status.
277 """
260 278 d = self.remote_reference.callRemote('queue_status', verbose)
261 279 d.addCallback(self.unpackage)
262 280 return d
263 281
282 def clear(self):
283 """
284 Clear all previously run tasks from the task controller.
285
286 This is needed because the task controller keep all task results
287 in memory. This can be a problem is there are many completed
288 tasks. Users should call this periodically to clean out these
289 cached task results.
290 """
291 d = self.remote_reference.callRemote('clear')
292 return d
293
264 294 def adapt_to_blocking_client(self):
295 """
296 Wrap self in a blocking version that implements `IBlockingTaskClient.
297 """
265 298 from IPython.kernel.taskclient import IBlockingTaskClient
266 299 return IBlockingTaskClient(self)
300
301 def map(self, func, *sequences):
302 """
303 Apply func to *sequences elementwise. Like Python's builtin map.
304
305 This version is load balanced.
306 """
307 return self.mapper().map(func, *sequences)
308
309 def mapper(self, clear_before=False, clear_after=False, retries=0,
310 recovery_task=None, depend=None, block=True):
311 """
312 Create an `IMapper` implementer with a given set of arguments.
313
314 The `IMapper` created using a task controller is load balanced.
315
316 See the documentation for `IPython.kernel.task.BaseTask` for
317 documentation on the arguments to this method.
318 """
319 return TaskMapper(self, clear_before=clear_before,
320 clear_after=clear_after, retries=retries,
321 recovery_task=recovery_task, depend=depend, block=block)
322
323 def parallel(self, clear_before=False, clear_after=False, retries=0,
324 recovery_task=None, depend=None, block=True):
325 mapper = self.mapper(clear_before, clear_after, retries,
326 recovery_task, depend, block)
327 pf = ParallelFunction(mapper)
328 return pf
267 329
@@ -1,373 +1,372 b''
1 1 # encoding: utf-8
2 2
3 3 """Test template for complete engine object"""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 import cPickle as pickle
19 19
20 20 from twisted.internet import defer, reactor
21 21 from twisted.python import failure
22 22 from twisted.application import service
23 23 import zope.interface as zi
24 24
25 25 from IPython.kernel import newserialized
26 26 from IPython.kernel import error
27 27 from IPython.kernel.pickleutil import can, uncan
28 28 import IPython.kernel.engineservice as es
29 29 from IPython.kernel.core.interpreter import Interpreter
30 30 from IPython.testing.parametric import Parametric, parametric
31 31
32 32 #-------------------------------------------------------------------------------
33 33 # Tests
34 34 #-------------------------------------------------------------------------------
35 35
36 36
37 37 # A sequence of valid commands run through execute
38 38 validCommands = ['a=5',
39 39 'b=10',
40 40 'a=5; b=10; c=a+b',
41 41 'import math; 2.0*math.pi',
42 42 """def f():
43 43 result = 0.0
44 44 for i in range(10):
45 45 result += i
46 46 """,
47 47 'if 1<2: a=5',
48 48 """import time
49 49 time.sleep(0.1)""",
50 50 """from math import cos;
51 51 x = 1.0*cos(0.5)""", # Semicolons lead to Discard ast nodes that should be discarded
52 52 """from sets import Set
53 53 s = Set()
54 54 """, # Trailing whitespace should be allowed.
55 55 """import math
56 56 math.cos(1.0)""", # Test a method call with a discarded return value
57 57 """x=1.0234
58 58 a=5; b=10""", # Test an embedded semicolon
59 59 """x=1.0234
60 60 a=5; b=10;""" # Test both an embedded and trailing semicolon
61 61 ]
62 62
63 63 # A sequence of commands that raise various exceptions
64 64 invalidCommands = [('a=1/0',ZeroDivisionError),
65 65 ('print v',NameError),
66 66 ('l=[];l[0]',IndexError),
67 67 ("d={};d['a']",KeyError),
68 68 ("assert 1==0",AssertionError),
69 69 ("import abababsdbfsbaljasdlja",ImportError),
70 70 ("raise Exception()",Exception)]
71 71
72 72 def testf(x):
73 73 return 2.0*x
74 74
75 75 globala = 99
76 76
77 77 def testg(x):
78 78 return globala*x
79 79
80 80 class IEngineCoreTestCase(object):
81 81 """Test an IEngineCore implementer."""
82 82
83 83 def createShell(self):
84 84 return Interpreter()
85 85
86 86 def catchQueueCleared(self, f):
87 87 try:
88 88 f.raiseException()
89 89 except error.QueueCleared:
90 90 pass
91 91
92 92 def testIEngineCoreInterface(self):
93 93 """Does self.engine claim to implement IEngineCore?"""
94 94 self.assert_(es.IEngineCore.providedBy(self.engine))
95 95
96 96 def testIEngineCoreInterfaceMethods(self):
97 97 """Does self.engine have the methods and attributes in IEngineCore."""
98 98 for m in list(es.IEngineCore):
99 99 self.assert_(hasattr(self.engine, m))
100 100
101 101 def testIEngineCoreDeferreds(self):
102 102 d = self.engine.execute('a=5')
103 103 d.addCallback(lambda _: self.engine.pull('a'))
104 104 d.addCallback(lambda _: self.engine.get_result())
105 105 d.addCallback(lambda _: self.engine.keys())
106 106 d.addCallback(lambda _: self.engine.push(dict(a=10)))
107 107 return d
108 108
109 109 def runTestExecute(self, cmd):
110 110 self.shell = Interpreter()
111 111 actual = self.shell.execute(cmd)
112 112 def compare(computed):
113 113 actual['id'] = computed['id']
114 114 self.assertEquals(actual, computed)
115 115 d = self.engine.execute(cmd)
116 116 d.addCallback(compare)
117 117 return d
118 118
119 119 @parametric
120 120 def testExecute(cls):
121 121 return [(cls.runTestExecute, cmd) for cmd in validCommands]
122 122
123 123 def runTestExecuteFailures(self, cmd, exc):
124 124 def compare(f):
125 125 self.assertRaises(exc, f.raiseException)
126 126 d = self.engine.execute(cmd)
127 127 d.addErrback(compare)
128 128 return d
129 129
130 130 @parametric
131 131 def testExecuteFailures(cls):
132 132 return [(cls.runTestExecuteFailures, cmd, exc) for cmd, exc in invalidCommands]
133 133
134 134 def runTestPushPull(self, o):
135 135 d = self.engine.push(dict(a=o))
136 136 d.addCallback(lambda r: self.engine.pull('a'))
137 137 d.addCallback(lambda r: self.assertEquals(o,r))
138 138 return d
139 139
140 140 @parametric
141 141 def testPushPull(cls):
142 142 objs = [10,"hi there",1.2342354,{"p":(1,2)},None]
143 143 return [(cls.runTestPushPull, o) for o in objs]
144 144
145 145 def testPullNameError(self):
146 146 d = self.engine.push(dict(a=5))
147 147 d.addCallback(lambda _:self.engine.reset())
148 148 d.addCallback(lambda _: self.engine.pull("a"))
149 149 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
150 150 return d
151 151
152 152 def testPushPullFailures(self):
153 153 d = self.engine.pull('a')
154 154 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
155 155 d.addCallback(lambda _: self.engine.execute('l = lambda x: x'))
156 156 d.addCallback(lambda _: self.engine.pull('l'))
157 157 d.addErrback(lambda f: self.assertRaises(pickle.PicklingError, f.raiseException))
158 158 d.addCallback(lambda _: self.engine.push(dict(l=lambda x: x)))
159 159 d.addErrback(lambda f: self.assertRaises(pickle.PicklingError, f.raiseException))
160 160 return d
161 161
162 162 def testPushPullArray(self):
163 163 try:
164 164 import numpy
165 165 except:
166 print 'no numpy, ',
167 166 return
168 167 a = numpy.random.random(1000)
169 168 d = self.engine.push(dict(a=a))
170 169 d.addCallback(lambda _: self.engine.pull('a'))
171 170 d.addCallback(lambda b: b==a)
172 171 d.addCallback(lambda c: c.all())
173 172 return self.assertDeferredEquals(d, True)
174 173
175 174 def testPushFunction(self):
176 175
177 176 d = self.engine.push_function(dict(f=testf))
178 177 d.addCallback(lambda _: self.engine.execute('result = f(10)'))
179 178 d.addCallback(lambda _: self.engine.pull('result'))
180 179 d.addCallback(lambda r: self.assertEquals(r, testf(10)))
181 180 return d
182 181
183 182 def testPullFunction(self):
184 183 d = self.engine.push_function(dict(f=testf, g=testg))
185 184 d.addCallback(lambda _: self.engine.pull_function(('f','g')))
186 185 d.addCallback(lambda r: self.assertEquals(r[0](10), testf(10)))
187 186 return d
188 187
189 188 def testPushFunctionGlobal(self):
190 189 """Make sure that pushed functions pick up the user's namespace for globals."""
191 190 d = self.engine.push(dict(globala=globala))
192 191 d.addCallback(lambda _: self.engine.push_function(dict(g=testg)))
193 192 d.addCallback(lambda _: self.engine.execute('result = g(10)'))
194 193 d.addCallback(lambda _: self.engine.pull('result'))
195 194 d.addCallback(lambda r: self.assertEquals(r, testg(10)))
196 195 return d
197 196
198 197 def testGetResultFailure(self):
199 198 d = self.engine.get_result(None)
200 199 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
201 200 d.addCallback(lambda _: self.engine.get_result(10))
202 201 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
203 202 return d
204 203
205 204 def runTestGetResult(self, cmd):
206 205 self.shell = Interpreter()
207 206 actual = self.shell.execute(cmd)
208 207 def compare(computed):
209 208 actual['id'] = computed['id']
210 209 self.assertEquals(actual, computed)
211 210 d = self.engine.execute(cmd)
212 211 d.addCallback(lambda r: self.engine.get_result(r['number']))
213 212 d.addCallback(compare)
214 213 return d
215 214
216 215 @parametric
217 216 def testGetResult(cls):
218 217 return [(cls.runTestGetResult, cmd) for cmd in validCommands]
219 218
220 219 def testGetResultDefault(self):
221 220 cmd = 'a=5'
222 221 shell = self.createShell()
223 222 shellResult = shell.execute(cmd)
224 223 def popit(dikt, key):
225 224 dikt.pop(key)
226 225 return dikt
227 226 d = self.engine.execute(cmd)
228 227 d.addCallback(lambda _: self.engine.get_result())
229 228 d.addCallback(lambda r: self.assertEquals(shellResult, popit(r,'id')))
230 229 return d
231 230
232 231 def testKeys(self):
233 232 d = self.engine.keys()
234 233 d.addCallback(lambda s: isinstance(s, list))
235 234 d.addCallback(lambda r: self.assertEquals(r, True))
236 235 return d
237 236
238 237 Parametric(IEngineCoreTestCase)
239 238
240 239 class IEngineSerializedTestCase(object):
241 240 """Test an IEngineCore implementer."""
242 241
243 242 def testIEngineSerializedInterface(self):
244 243 """Does self.engine claim to implement IEngineCore?"""
245 244 self.assert_(es.IEngineSerialized.providedBy(self.engine))
246 245
247 246 def testIEngineSerializedInterfaceMethods(self):
248 247 """Does self.engine have the methods and attributes in IEngireCore."""
249 248 for m in list(es.IEngineSerialized):
250 249 self.assert_(hasattr(self.engine, m))
251 250
252 251 def testIEngineSerializedDeferreds(self):
253 252 dList = []
254 253 d = self.engine.push_serialized(dict(key=newserialized.serialize(12345)))
255 254 self.assert_(isinstance(d, defer.Deferred))
256 255 dList.append(d)
257 256 d = self.engine.pull_serialized('key')
258 257 self.assert_(isinstance(d, defer.Deferred))
259 258 dList.append(d)
260 259 D = defer.DeferredList(dList)
261 260 return D
262 261
263 262 def testPushPullSerialized(self):
264 263 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
265 264 d = defer.succeed(None)
266 265 for o in objs:
267 266 self.engine.push_serialized(dict(key=newserialized.serialize(o)))
268 267 value = self.engine.pull_serialized('key')
269 268 value.addCallback(lambda serial: newserialized.IUnSerialized(serial).getObject())
270 269 d = self.assertDeferredEquals(value,o,d)
271 270 return d
272 271
273 272 def testPullSerializedFailures(self):
274 273 d = self.engine.pull_serialized('a')
275 274 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
276 275 d.addCallback(lambda _: self.engine.execute('l = lambda x: x'))
277 276 d.addCallback(lambda _: self.engine.pull_serialized('l'))
278 277 d.addErrback(lambda f: self.assertRaises(pickle.PicklingError, f.raiseException))
279 278 return d
280 279
281 280 Parametric(IEngineSerializedTestCase)
282 281
283 282 class IEngineQueuedTestCase(object):
284 283 """Test an IEngineQueued implementer."""
285 284
286 285 def testIEngineQueuedInterface(self):
287 286 """Does self.engine claim to implement IEngineQueued?"""
288 287 self.assert_(es.IEngineQueued.providedBy(self.engine))
289 288
290 289 def testIEngineQueuedInterfaceMethods(self):
291 290 """Does self.engine have the methods and attributes in IEngireQueued."""
292 291 for m in list(es.IEngineQueued):
293 292 self.assert_(hasattr(self.engine, m))
294 293
295 294 def testIEngineQueuedDeferreds(self):
296 295 dList = []
297 296 d = self.engine.clear_queue()
298 297 self.assert_(isinstance(d, defer.Deferred))
299 298 dList.append(d)
300 299 d = self.engine.queue_status()
301 300 self.assert_(isinstance(d, defer.Deferred))
302 301 dList.append(d)
303 302 D = defer.DeferredList(dList)
304 303 return D
305 304
306 305 def testClearQueue(self):
307 306 result = self.engine.clear_queue()
308 307 d1 = self.assertDeferredEquals(result, None)
309 308 d1.addCallback(lambda _: self.engine.queue_status())
310 309 d2 = self.assertDeferredEquals(d1, {'queue':[], 'pending':'None'})
311 310 return d2
312 311
313 312 def testQueueStatus(self):
314 313 result = self.engine.queue_status()
315 314 result.addCallback(lambda r: 'queue' in r and 'pending' in r)
316 315 d = self.assertDeferredEquals(result, True)
317 316 return d
318 317
319 318 Parametric(IEngineQueuedTestCase)
320 319
321 320 class IEnginePropertiesTestCase(object):
322 321 """Test an IEngineProperties implementor."""
323 322
324 323 def testIEnginePropertiesInterface(self):
325 324 """Does self.engine claim to implement IEngineProperties?"""
326 325 self.assert_(es.IEngineProperties.providedBy(self.engine))
327 326
328 327 def testIEnginePropertiesInterfaceMethods(self):
329 328 """Does self.engine have the methods and attributes in IEngireProperties."""
330 329 for m in list(es.IEngineProperties):
331 330 self.assert_(hasattr(self.engine, m))
332 331
333 332 def testGetSetProperties(self):
334 333 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
335 334 d = self.engine.set_properties(dikt)
336 335 d.addCallback(lambda r: self.engine.get_properties())
337 336 d = self.assertDeferredEquals(d, dikt)
338 337 d.addCallback(lambda r: self.engine.get_properties(('c',)))
339 338 d = self.assertDeferredEquals(d, {'c': dikt['c']})
340 339 d.addCallback(lambda r: self.engine.set_properties(dict(c=False)))
341 340 d.addCallback(lambda r: self.engine.get_properties(('c', 'd')))
342 341 d = self.assertDeferredEquals(d, dict(c=False, d=None))
343 342 return d
344 343
345 344 def testClearProperties(self):
346 345 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
347 346 d = self.engine.set_properties(dikt)
348 347 d.addCallback(lambda r: self.engine.clear_properties())
349 348 d.addCallback(lambda r: self.engine.get_properties())
350 349 d = self.assertDeferredEquals(d, {})
351 350 return d
352 351
353 352 def testDelHasProperties(self):
354 353 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
355 354 d = self.engine.set_properties(dikt)
356 355 d.addCallback(lambda r: self.engine.del_properties(('b','e')))
357 356 d.addCallback(lambda r: self.engine.has_properties(('a','b','c','d','e')))
358 357 d = self.assertDeferredEquals(d, [True, False, True, True, False])
359 358 return d
360 359
361 360 def testStrictDict(self):
362 361 s = """from IPython.kernel.engineservice import get_engine
363 362 p = get_engine(%s).properties"""%self.engine.id
364 363 d = self.engine.execute(s)
365 364 d.addCallback(lambda r: self.engine.execute("p['a'] = lambda _:None"))
366 365 d = self.assertDeferredRaises(d, error.InvalidProperty)
367 366 d.addCallback(lambda r: self.engine.execute("p['a'] = range(5)"))
368 367 d.addCallback(lambda r: self.engine.execute("p['a'].append(5)"))
369 368 d.addCallback(lambda r: self.engine.get_properties('a'))
370 369 d = self.assertDeferredEquals(d, dict(a=range(5)))
371 370 return d
372 371
373 372 Parametric(IEnginePropertiesTestCase)
@@ -1,838 +1,828 b''
1 1 # encoding: utf-8
2 2
3 3 """"""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 from twisted.internet import defer
19 19
20 20 from IPython.kernel import engineservice as es
21 21 from IPython.kernel import multiengine as me
22 22 from IPython.kernel import newserialized
23 23 from IPython.kernel.error import NotDefined
24 24 from IPython.testing import util
25 25 from IPython.testing.parametric import parametric, Parametric
26 26 from IPython.kernel import newserialized
27 27 from IPython.kernel.util import printer
28 28 from IPython.kernel.error import (InvalidEngineID,
29 29 NoEnginesRegistered,
30 30 CompositeError,
31 31 InvalidDeferredID)
32 32 from IPython.kernel.tests.engineservicetest import validCommands, invalidCommands
33 33 from IPython.kernel.core.interpreter import Interpreter
34 34
35 35
36 36 #-------------------------------------------------------------------------------
37 37 # Base classes and utilities
38 38 #-------------------------------------------------------------------------------
39 39
40 40 class IMultiEngineBaseTestCase(object):
41 41 """Basic utilities for working with multiengine tests.
42 42
43 43 Some subclass should define:
44 44
45 45 * self.multiengine
46 46 * self.engines to keep track of engines for clean up"""
47 47
48 48 def createShell(self):
49 49 return Interpreter()
50 50
51 51 def addEngine(self, n=1):
52 52 for i in range(n):
53 53 e = es.EngineService()
54 54 e.startService()
55 55 regDict = self.controller.register_engine(es.QueuedEngine(e), None)
56 56 e.id = regDict['id']
57 57 self.engines.append(e)
58 58
59 59
60 60 def testf(x):
61 61 return 2.0*x
62 62
63 63
64 64 globala = 99
65 65
66 66
67 67 def testg(x):
68 68 return globala*x
69 69
70 70
71 71 def isdid(did):
72 72 if not isinstance(did, str):
73 73 return False
74 74 if not len(did)==40:
75 75 return False
76 76 return True
77 77
78 78
79 79 def _raise_it(f):
80 80 try:
81 81 f.raiseException()
82 82 except CompositeError, e:
83 83 e.raise_exception()
84 84
85 85 #-------------------------------------------------------------------------------
86 86 # IMultiEngineTestCase
87 87 #-------------------------------------------------------------------------------
88 88
89 89 class IMultiEngineTestCase(IMultiEngineBaseTestCase):
90 90 """A test for any object that implements IEngineMultiplexer.
91 91
92 92 self.multiengine must be defined and implement IEngineMultiplexer.
93 93 """
94 94
95 95 def testIMultiEngineInterface(self):
96 96 """Does self.engine claim to implement IEngineCore?"""
97 97 self.assert_(me.IEngineMultiplexer.providedBy(self.multiengine))
98 98 self.assert_(me.IMultiEngine.providedBy(self.multiengine))
99 99
100 100 def testIEngineMultiplexerInterfaceMethods(self):
101 101 """Does self.engine have the methods and attributes in IEngineCore."""
102 102 for m in list(me.IEngineMultiplexer):
103 103 self.assert_(hasattr(self.multiengine, m))
104 104
105 105 def testIEngineMultiplexerDeferreds(self):
106 106 self.addEngine(1)
107 107 d= self.multiengine.execute('a=5', targets=0)
108 108 d.addCallback(lambda _: self.multiengine.push(dict(a=5),targets=0))
109 109 d.addCallback(lambda _: self.multiengine.push(dict(a=5, b='asdf', c=[1,2,3]),targets=0))
110 110 d.addCallback(lambda _: self.multiengine.pull(('a','b','c'),targets=0))
111 111 d.addCallback(lambda _: self.multiengine.get_result(targets=0))
112 112 d.addCallback(lambda _: self.multiengine.reset(targets=0))
113 113 d.addCallback(lambda _: self.multiengine.keys(targets=0))
114 114 d.addCallback(lambda _: self.multiengine.push_serialized(dict(a=newserialized.serialize(10)),targets=0))
115 115 d.addCallback(lambda _: self.multiengine.pull_serialized('a',targets=0))
116 116 d.addCallback(lambda _: self.multiengine.clear_queue(targets=0))
117 117 d.addCallback(lambda _: self.multiengine.queue_status(targets=0))
118 118 return d
119 119
120 120 def testInvalidEngineID(self):
121 121 self.addEngine(1)
122 122 badID = 100
123 123 d = self.multiengine.execute('a=5', targets=badID)
124 124 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
125 125 d.addCallback(lambda _: self.multiengine.push(dict(a=5), targets=badID))
126 126 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
127 127 d.addCallback(lambda _: self.multiengine.pull('a', targets=badID))
128 128 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
129 129 d.addCallback(lambda _: self.multiengine.reset(targets=badID))
130 130 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
131 131 d.addCallback(lambda _: self.multiengine.keys(targets=badID))
132 132 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
133 133 d.addCallback(lambda _: self.multiengine.push_serialized(dict(a=newserialized.serialize(10)), targets=badID))
134 134 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
135 135 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=badID))
136 136 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
137 137 d.addCallback(lambda _: self.multiengine.queue_status(targets=badID))
138 138 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
139 139 return d
140 140
141 141 def testNoEnginesRegistered(self):
142 142 badID = 'all'
143 143 d= self.multiengine.execute('a=5', targets=badID)
144 144 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
145 145 d.addCallback(lambda _: self.multiengine.push(dict(a=5), targets=badID))
146 146 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
147 147 d.addCallback(lambda _: self.multiengine.pull('a', targets=badID))
148 148 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
149 149 d.addCallback(lambda _: self.multiengine.get_result(targets=badID))
150 150 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
151 151 d.addCallback(lambda _: self.multiengine.reset(targets=badID))
152 152 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
153 153 d.addCallback(lambda _: self.multiengine.keys(targets=badID))
154 154 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
155 155 d.addCallback(lambda _: self.multiengine.push_serialized(dict(a=newserialized.serialize(10)), targets=badID))
156 156 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
157 157 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=badID))
158 158 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
159 159 d.addCallback(lambda _: self.multiengine.queue_status(targets=badID))
160 160 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
161 161 return d
162 162
163 163 def runExecuteAll(self, d, cmd, shell):
164 164 actual = shell.execute(cmd)
165 165 d.addCallback(lambda _: self.multiengine.execute(cmd))
166 166 def compare(result):
167 167 for r in result:
168 168 actual['id'] = r['id']
169 169 self.assertEquals(r, actual)
170 170 d.addCallback(compare)
171 171
172 172 def testExecuteAll(self):
173 173 self.addEngine(4)
174 174 d= defer.Deferred()
175 175 shell = Interpreter()
176 176 for cmd in validCommands:
177 177 self.runExecuteAll(d, cmd, shell)
178 178 d.callback(None)
179 179 return d
180 180
181 181 # The following two methods show how to do parametrized
182 182 # tests. This is really slick! Same is used above.
183 183 def runExecuteFailures(self, cmd, exc):
184 184 self.addEngine(4)
185 185 d= self.multiengine.execute(cmd)
186 186 d.addErrback(lambda f: self.assertRaises(exc, _raise_it, f))
187 187 return d
188 188
189 189 @parametric
190 190 def testExecuteFailures(cls):
191 191 return [(cls.runExecuteFailures,cmd,exc) for
192 192 cmd,exc in invalidCommands]
193 193
194 194 def testPushPull(self):
195 195 self.addEngine(1)
196 196 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
197 197 d= self.multiengine.push(dict(key=objs[0]), targets=0)
198 198 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
199 199 d.addCallback(lambda r: self.assertEquals(r, [objs[0]]))
200 200 d.addCallback(lambda _: self.multiengine.push(dict(key=objs[1]), targets=0))
201 201 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
202 202 d.addCallback(lambda r: self.assertEquals(r, [objs[1]]))
203 203 d.addCallback(lambda _: self.multiengine.push(dict(key=objs[2]), targets=0))
204 204 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
205 205 d.addCallback(lambda r: self.assertEquals(r, [objs[2]]))
206 206 d.addCallback(lambda _: self.multiengine.push(dict(key=objs[3]), targets=0))
207 207 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
208 208 d.addCallback(lambda r: self.assertEquals(r, [objs[3]]))
209 209 d.addCallback(lambda _: self.multiengine.reset(targets=0))
210 210 d.addCallback(lambda _: self.multiengine.pull('a', targets=0))
211 211 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
212 212 d.addCallback(lambda _: self.multiengine.push(dict(a=10,b=20)))
213 213 d.addCallback(lambda _: self.multiengine.pull(('a','b')))
214 214 d.addCallback(lambda r: self.assertEquals(r, [[10,20]]))
215 215 return d
216 216
217 217 def testPushPullAll(self):
218 218 self.addEngine(4)
219 219 d= self.multiengine.push(dict(a=10))
220 220 d.addCallback(lambda _: self.multiengine.pull('a'))
221 221 d.addCallback(lambda r: self.assert_(r==[10,10,10,10]))
222 222 d.addCallback(lambda _: self.multiengine.push(dict(a=10, b=20)))
223 223 d.addCallback(lambda _: self.multiengine.pull(('a','b')))
224 224 d.addCallback(lambda r: self.assert_(r==4*[[10,20]]))
225 225 d.addCallback(lambda _: self.multiengine.push(dict(a=10, b=20), targets=0))
226 226 d.addCallback(lambda _: self.multiengine.pull(('a','b'), targets=0))
227 227 d.addCallback(lambda r: self.assert_(r==[[10,20]]))
228 228 d.addCallback(lambda _: self.multiengine.push(dict(a=None, b=None), targets=0))
229 229 d.addCallback(lambda _: self.multiengine.pull(('a','b'), targets=0))
230 230 d.addCallback(lambda r: self.assert_(r==[[None,None]]))
231 231 return d
232 232
233 233 def testPushPullSerialized(self):
234 234 self.addEngine(1)
235 235 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
236 236 d= self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[0])), targets=0)
237 237 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
238 238 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
239 239 d.addCallback(lambda r: self.assertEquals(r, objs[0]))
240 240 d.addCallback(lambda _: self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[1])), targets=0))
241 241 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
242 242 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
243 243 d.addCallback(lambda r: self.assertEquals(r, objs[1]))
244 244 d.addCallback(lambda _: self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[2])), targets=0))
245 245 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
246 246 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
247 247 d.addCallback(lambda r: self.assertEquals(r, objs[2]))
248 248 d.addCallback(lambda _: self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[3])), targets=0))
249 249 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
250 250 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
251 251 d.addCallback(lambda r: self.assertEquals(r, objs[3]))
252 252 d.addCallback(lambda _: self.multiengine.push(dict(a=10,b=range(5)), targets=0))
253 253 d.addCallback(lambda _: self.multiengine.pull_serialized(('a','b'), targets=0))
254 254 d.addCallback(lambda serial: [newserialized.IUnSerialized(s).getObject() for s in serial[0]])
255 255 d.addCallback(lambda r: self.assertEquals(r, [10, range(5)]))
256 256 d.addCallback(lambda _: self.multiengine.reset(targets=0))
257 257 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0))
258 258 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
259 259 return d
260 260
261 261 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
262 262 d= defer.succeed(None)
263 263 for o in objs:
264 264 self.multiengine.push_serialized(0, key=newserialized.serialize(o))
265 265 value = self.multiengine.pull_serialized(0, 'key')
266 266 value.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
267 267 d = self.assertDeferredEquals(value,o,d)
268 268 return d
269 269
270 270 def runGetResultAll(self, d, cmd, shell):
271 271 actual = shell.execute(cmd)
272 272 d.addCallback(lambda _: self.multiengine.execute(cmd))
273 273 d.addCallback(lambda _: self.multiengine.get_result())
274 274 def compare(result):
275 275 for r in result:
276 276 actual['id'] = r['id']
277 277 self.assertEquals(r, actual)
278 278 d.addCallback(compare)
279 279
280 280 def testGetResultAll(self):
281 281 self.addEngine(4)
282 282 d= defer.Deferred()
283 283 shell = Interpreter()
284 284 for cmd in validCommands:
285 285 self.runGetResultAll(d, cmd, shell)
286 286 d.callback(None)
287 287 return d
288 288
289 289 def testGetResultDefault(self):
290 290 self.addEngine(1)
291 291 target = 0
292 292 cmd = 'a=5'
293 293 shell = self.createShell()
294 294 shellResult = shell.execute(cmd)
295 295 def popit(dikt, key):
296 296 dikt.pop(key)
297 297 return dikt
298 298 d= self.multiengine.execute(cmd, targets=target)
299 299 d.addCallback(lambda _: self.multiengine.get_result(targets=target))
300 300 d.addCallback(lambda r: self.assertEquals(shellResult, popit(r[0],'id')))
301 301 return d
302 302
303 303 def testGetResultFailure(self):
304 304 self.addEngine(1)
305 305 d= self.multiengine.get_result(None, targets=0)
306 306 d.addErrback(lambda f: self.assertRaises(IndexError, _raise_it, f))
307 307 d.addCallback(lambda _: self.multiengine.get_result(10, targets=0))
308 308 d.addErrback(lambda f: self.assertRaises(IndexError, _raise_it, f))
309 309 return d
310 310
311 311 def testPushFunction(self):
312 312 self.addEngine(1)
313 313 d= self.multiengine.push_function(dict(f=testf), targets=0)
314 314 d.addCallback(lambda _: self.multiengine.execute('result = f(10)', targets=0))
315 315 d.addCallback(lambda _: self.multiengine.pull('result', targets=0))
316 316 d.addCallback(lambda r: self.assertEquals(r[0], testf(10)))
317 317 d.addCallback(lambda _: self.multiengine.push(dict(globala=globala), targets=0))
318 318 d.addCallback(lambda _: self.multiengine.push_function(dict(g=testg), targets=0))
319 319 d.addCallback(lambda _: self.multiengine.execute('result = g(10)', targets=0))
320 320 d.addCallback(lambda _: self.multiengine.pull('result', targets=0))
321 321 d.addCallback(lambda r: self.assertEquals(r[0], testg(10)))
322 322 return d
323 323
324 324 def testPullFunction(self):
325 325 self.addEngine(1)
326 326 d= self.multiengine.push(dict(a=globala), targets=0)
327 327 d.addCallback(lambda _: self.multiengine.push_function(dict(f=testf), targets=0))
328 328 d.addCallback(lambda _: self.multiengine.pull_function('f', targets=0))
329 329 d.addCallback(lambda r: self.assertEquals(r[0](10), testf(10)))
330 330 d.addCallback(lambda _: self.multiengine.execute("def g(x): return x*x", targets=0))
331 331 d.addCallback(lambda _: self.multiengine.pull_function(('f','g'),targets=0))
332 332 d.addCallback(lambda r: self.assertEquals((r[0][0](10),r[0][1](10)), (testf(10), 100)))
333 333 return d
334 334
335 335 def testPushFunctionAll(self):
336 336 self.addEngine(4)
337 337 d= self.multiengine.push_function(dict(f=testf))
338 338 d.addCallback(lambda _: self.multiengine.execute('result = f(10)'))
339 339 d.addCallback(lambda _: self.multiengine.pull('result'))
340 340 d.addCallback(lambda r: self.assertEquals(r, 4*[testf(10)]))
341 341 d.addCallback(lambda _: self.multiengine.push(dict(globala=globala)))
342 342 d.addCallback(lambda _: self.multiengine.push_function(dict(testg=testg)))
343 343 d.addCallback(lambda _: self.multiengine.execute('result = testg(10)'))
344 344 d.addCallback(lambda _: self.multiengine.pull('result'))
345 345 d.addCallback(lambda r: self.assertEquals(r, 4*[testg(10)]))
346 346 return d
347 347
348 348 def testPullFunctionAll(self):
349 349 self.addEngine(4)
350 350 d= self.multiengine.push_function(dict(f=testf))
351 351 d.addCallback(lambda _: self.multiengine.pull_function('f'))
352 352 d.addCallback(lambda r: self.assertEquals([func(10) for func in r], 4*[testf(10)]))
353 353 return d
354 354
355 355 def testGetIDs(self):
356 356 self.addEngine(1)
357 357 d= self.multiengine.get_ids()
358 358 d.addCallback(lambda r: self.assertEquals(r, [0]))
359 359 d.addCallback(lambda _: self.addEngine(3))
360 360 d.addCallback(lambda _: self.multiengine.get_ids())
361 361 d.addCallback(lambda r: self.assertEquals(r, [0,1,2,3]))
362 362 return d
363 363
364 364 def testClearQueue(self):
365 365 self.addEngine(4)
366 366 d= self.multiengine.clear_queue()
367 367 d.addCallback(lambda r: self.assertEquals(r,4*[None]))
368 368 return d
369 369
370 370 def testQueueStatus(self):
371 371 self.addEngine(4)
372 372 d= self.multiengine.queue_status(targets=0)
373 373 d.addCallback(lambda r: self.assert_(isinstance(r[0],tuple)))
374 374 return d
375 375
376 376 def testGetSetProperties(self):
377 377 self.addEngine(4)
378 378 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
379 379 d= self.multiengine.set_properties(dikt)
380 380 d.addCallback(lambda r: self.multiengine.get_properties())
381 381 d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
382 382 d.addCallback(lambda r: self.multiengine.get_properties(('c',)))
383 383 d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
384 384 d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False)))
385 385 d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd')))
386 386 d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
387 387 return d
388 388
389 389 def testClearProperties(self):
390 390 self.addEngine(4)
391 391 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
392 392 d= self.multiengine.set_properties(dikt)
393 393 d.addCallback(lambda r: self.multiengine.clear_properties())
394 394 d.addCallback(lambda r: self.multiengine.get_properties())
395 395 d.addCallback(lambda r: self.assertEquals(r, 4*[{}]))
396 396 return d
397 397
398 398 def testDelHasProperties(self):
399 399 self.addEngine(4)
400 400 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
401 401 d= self.multiengine.set_properties(dikt)
402 402 d.addCallback(lambda r: self.multiengine.del_properties(('b','e')))
403 403 d.addCallback(lambda r: self.multiengine.has_properties(('a','b','c','d','e')))
404 404 d.addCallback(lambda r: self.assertEquals(r, 4*[[True, False, True, True, False]]))
405 405 return d
406 406
407 407 Parametric(IMultiEngineTestCase)
408 408
409 409 #-------------------------------------------------------------------------------
410 410 # ISynchronousMultiEngineTestCase
411 411 #-------------------------------------------------------------------------------
412 412
413 413 class ISynchronousMultiEngineTestCase(IMultiEngineBaseTestCase):
414 414
415 415 def testISynchronousMultiEngineInterface(self):
416 416 """Does self.engine claim to implement IEngineCore?"""
417 417 self.assert_(me.ISynchronousEngineMultiplexer.providedBy(self.multiengine))
418 418 self.assert_(me.ISynchronousMultiEngine.providedBy(self.multiengine))
419 419
420 420 def testExecute(self):
421 421 self.addEngine(4)
422 422 execute = self.multiengine.execute
423 423 d= execute('a=5', targets=0, block=True)
424 424 d.addCallback(lambda r: self.assert_(len(r)==1))
425 425 d.addCallback(lambda _: execute('b=10'))
426 426 d.addCallback(lambda r: self.assert_(len(r)==4))
427 427 d.addCallback(lambda _: execute('c=30', block=False))
428 428 d.addCallback(lambda did: self.assert_(isdid(did)))
429 429 d.addCallback(lambda _: execute('d=[0,1,2]', block=False))
430 430 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
431 431 d.addCallback(lambda r: self.assert_(len(r)==4))
432 432 return d
433 433
434 434 def testPushPull(self):
435 435 data = dict(a=10, b=1.05, c=range(10), d={'e':(1,2),'f':'hi'})
436 436 self.addEngine(4)
437 437 push = self.multiengine.push
438 438 pull = self.multiengine.pull
439 439 d= push({'data':data}, targets=0)
440 440 d.addCallback(lambda r: pull('data', targets=0))
441 441 d.addCallback(lambda r: self.assertEqual(r,[data]))
442 442 d.addCallback(lambda _: push({'data':data}))
443 443 d.addCallback(lambda r: pull('data'))
444 444 d.addCallback(lambda r: self.assertEqual(r,4*[data]))
445 445 d.addCallback(lambda _: push({'data':data}, block=False))
446 446 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
447 447 d.addCallback(lambda _: pull('data', block=False))
448 448 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
449 449 d.addCallback(lambda r: self.assertEqual(r,4*[data]))
450 450 d.addCallback(lambda _: push(dict(a=10,b=20)))
451 451 d.addCallback(lambda _: pull(('a','b')))
452 452 d.addCallback(lambda r: self.assertEquals(r, 4*[[10,20]]))
453 453 return d
454 454
455 455 def testPushPullFunction(self):
456 456 self.addEngine(4)
457 457 pushf = self.multiengine.push_function
458 458 pullf = self.multiengine.pull_function
459 459 push = self.multiengine.push
460 460 pull = self.multiengine.pull
461 461 execute = self.multiengine.execute
462 462 d= pushf({'testf':testf}, targets=0)
463 463 d.addCallback(lambda r: pullf('testf', targets=0))
464 464 d.addCallback(lambda r: self.assertEqual(r[0](1.0), testf(1.0)))
465 465 d.addCallback(lambda _: execute('r = testf(10)', targets=0))
466 466 d.addCallback(lambda _: pull('r', targets=0))
467 467 d.addCallback(lambda r: self.assertEquals(r[0], testf(10)))
468 468 d.addCallback(lambda _: pushf({'testf':testf}, block=False))
469 469 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
470 470 d.addCallback(lambda _: pullf('testf', block=False))
471 471 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
472 472 d.addCallback(lambda r: self.assertEqual(r[0](1.0), testf(1.0)))
473 473 d.addCallback(lambda _: execute("def g(x): return x*x", targets=0))
474 474 d.addCallback(lambda _: pullf(('testf','g'),targets=0))
475 475 d.addCallback(lambda r: self.assertEquals((r[0][0](10),r[0][1](10)), (testf(10), 100)))
476 476 return d
477 477
478 478 def testGetResult(self):
479 479 shell = Interpreter()
480 480 result1 = shell.execute('a=10')
481 481 result1['id'] = 0
482 482 result2 = shell.execute('b=20')
483 483 result2['id'] = 0
484 484 execute= self.multiengine.execute
485 485 get_result = self.multiengine.get_result
486 486 self.addEngine(1)
487 487 d= execute('a=10')
488 488 d.addCallback(lambda _: get_result())
489 489 d.addCallback(lambda r: self.assertEquals(r[0], result1))
490 490 d.addCallback(lambda _: execute('b=20'))
491 491 d.addCallback(lambda _: get_result(1))
492 492 d.addCallback(lambda r: self.assertEquals(r[0], result1))
493 493 d.addCallback(lambda _: get_result(2, block=False))
494 494 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
495 495 d.addCallback(lambda r: self.assertEquals(r[0], result2))
496 496 return d
497 497
498 498 def testResetAndKeys(self):
499 499 self.addEngine(1)
500 500
501 501 #Blocking mode
502 502 d= self.multiengine.push(dict(a=10, b=20, c=range(10)), targets=0)
503 503 d.addCallback(lambda _: self.multiengine.keys(targets=0))
504 504 def keys_found(keys):
505 505 self.assert_('a' in keys[0])
506 506 self.assert_('b' in keys[0])
507 507 self.assert_('b' in keys[0])
508 508 d.addCallback(keys_found)
509 509 d.addCallback(lambda _: self.multiengine.reset(targets=0))
510 510 d.addCallback(lambda _: self.multiengine.keys(targets=0))
511 511 def keys_not_found(keys):
512 512 self.assert_('a' not in keys[0])
513 513 self.assert_('b' not in keys[0])
514 514 self.assert_('b' not in keys[0])
515 515 d.addCallback(keys_not_found)
516 516
517 517 #Non-blocking mode
518 518 d.addCallback(lambda _: self.multiengine.push(dict(a=10, b=20, c=range(10)), targets=0))
519 519 d.addCallback(lambda _: self.multiengine.keys(targets=0, block=False))
520 520 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
521 521 def keys_found(keys):
522 522 self.assert_('a' in keys[0])
523 523 self.assert_('b' in keys[0])
524 524 self.assert_('b' in keys[0])
525 525 d.addCallback(keys_found)
526 526 d.addCallback(lambda _: self.multiengine.reset(targets=0, block=False))
527 527 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
528 528 d.addCallback(lambda _: self.multiengine.keys(targets=0, block=False))
529 529 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
530 530 def keys_not_found(keys):
531 531 self.assert_('a' not in keys[0])
532 532 self.assert_('b' not in keys[0])
533 533 self.assert_('b' not in keys[0])
534 534 d.addCallback(keys_not_found)
535 535
536 536 return d
537 537
538 538 def testPushPullSerialized(self):
539 539 self.addEngine(1)
540 540 dikt = dict(a=10,b='hi there',c=1.2345,d={'p':(1,2)})
541 541 sdikt = {}
542 542 for k,v in dikt.iteritems():
543 543 sdikt[k] = newserialized.serialize(v)
544 544 d= self.multiengine.push_serialized(dict(a=sdikt['a']), targets=0)
545 545 d.addCallback(lambda _: self.multiengine.pull('a',targets=0))
546 546 d.addCallback(lambda r: self.assertEquals(r[0], dikt['a']))
547 547 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0))
548 548 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
549 549 d.addCallback(lambda r: self.assertEquals(r, dikt['a']))
550 550 d.addCallback(lambda _: self.multiengine.push_serialized(sdikt, targets=0))
551 551 d.addCallback(lambda _: self.multiengine.pull_serialized(sdikt.keys(), targets=0))
552 552 d.addCallback(lambda serial: [newserialized.IUnSerialized(s).getObject() for s in serial[0]])
553 553 d.addCallback(lambda r: self.assertEquals(r, dikt.values()))
554 554 d.addCallback(lambda _: self.multiengine.reset(targets=0))
555 555 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0))
556 556 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
557 557
558 558 #Non-blocking mode
559 559 d.addCallback(lambda r: self.multiengine.push_serialized(dict(a=sdikt['a']), targets=0, block=False))
560 560 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
561 561 d.addCallback(lambda _: self.multiengine.pull('a',targets=0))
562 562 d.addCallback(lambda r: self.assertEquals(r[0], dikt['a']))
563 563 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0, block=False))
564 564 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
565 565 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
566 566 d.addCallback(lambda r: self.assertEquals(r, dikt['a']))
567 567 d.addCallback(lambda _: self.multiengine.push_serialized(sdikt, targets=0, block=False))
568 568 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
569 569 d.addCallback(lambda _: self.multiengine.pull_serialized(sdikt.keys(), targets=0, block=False))
570 570 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
571 571 d.addCallback(lambda serial: [newserialized.IUnSerialized(s).getObject() for s in serial[0]])
572 572 d.addCallback(lambda r: self.assertEquals(r, dikt.values()))
573 573 d.addCallback(lambda _: self.multiengine.reset(targets=0))
574 574 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0, block=False))
575 575 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
576 576 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
577 577 return d
578 578
579 579 def testClearQueue(self):
580 580 self.addEngine(4)
581 581 d= self.multiengine.clear_queue()
582 582 d.addCallback(lambda r: self.multiengine.clear_queue(block=False))
583 583 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
584 584 d.addCallback(lambda r: self.assertEquals(r,4*[None]))
585 585 return d
586 586
587 587 def testQueueStatus(self):
588 588 self.addEngine(4)
589 589 d= self.multiengine.queue_status(targets=0)
590 590 d.addCallback(lambda r: self.assert_(isinstance(r[0],tuple)))
591 591 d.addCallback(lambda r: self.multiengine.queue_status(targets=0, block=False))
592 592 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
593 593 d.addCallback(lambda r: self.assert_(isinstance(r[0],tuple)))
594 594 return d
595 595
596 596 def testGetIDs(self):
597 597 self.addEngine(1)
598 598 d= self.multiengine.get_ids()
599 599 d.addCallback(lambda r: self.assertEquals(r, [0]))
600 600 d.addCallback(lambda _: self.addEngine(3))
601 601 d.addCallback(lambda _: self.multiengine.get_ids())
602 602 d.addCallback(lambda r: self.assertEquals(r, [0,1,2,3]))
603 603 return d
604 604
605 605 def testGetSetProperties(self):
606 606 self.addEngine(4)
607 607 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
608 608 d= self.multiengine.set_properties(dikt)
609 609 d.addCallback(lambda r: self.multiengine.get_properties())
610 610 d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
611 611 d.addCallback(lambda r: self.multiengine.get_properties(('c',)))
612 612 d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
613 613 d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False)))
614 614 d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd')))
615 615 d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
616 616
617 617 #Non-blocking
618 618 d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
619 619 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
620 620 d.addCallback(lambda r: self.multiengine.get_properties(block=False))
621 621 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
622 622 d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
623 623 d.addCallback(lambda r: self.multiengine.get_properties(('c',), block=False))
624 624 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
625 625 d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
626 626 d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False), block=False))
627 627 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
628 628 d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd'), block=False))
629 629 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
630 630 d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
631 631 return d
632 632
633 633 def testClearProperties(self):
634 634 self.addEngine(4)
635 635 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
636 636 d= self.multiengine.set_properties(dikt)
637 637 d.addCallback(lambda r: self.multiengine.clear_properties())
638 638 d.addCallback(lambda r: self.multiengine.get_properties())
639 639 d.addCallback(lambda r: self.assertEquals(r, 4*[{}]))
640 640
641 641 #Non-blocking
642 642 d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
643 643 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
644 644 d.addCallback(lambda r: self.multiengine.clear_properties(block=False))
645 645 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
646 646 d.addCallback(lambda r: self.multiengine.get_properties(block=False))
647 647 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
648 648 d.addCallback(lambda r: self.assertEquals(r, 4*[{}]))
649 649 return d
650 650
651 651 def testDelHasProperties(self):
652 652 self.addEngine(4)
653 653 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
654 654 d= self.multiengine.set_properties(dikt)
655 655 d.addCallback(lambda r: self.multiengine.del_properties(('b','e')))
656 656 d.addCallback(lambda r: self.multiengine.has_properties(('a','b','c','d','e')))
657 657 d.addCallback(lambda r: self.assertEquals(r, 4*[[True, False, True, True, False]]))
658 658
659 659 #Non-blocking
660 660 d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
661 661 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
662 662 d.addCallback(lambda r: self.multiengine.del_properties(('b','e'), block=False))
663 663 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
664 664 d.addCallback(lambda r: self.multiengine.has_properties(('a','b','c','d','e'), block=False))
665 665 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
666 666 d.addCallback(lambda r: self.assertEquals(r, 4*[[True, False, True, True, False]]))
667 667 return d
668 668
669 669 def test_clear_pending_deferreds(self):
670 670 self.addEngine(4)
671 671 did_list = []
672 672 d= self.multiengine.execute('a=10',block=False)
673 673 d.addCallback(lambda did: did_list.append(did))
674 674 d.addCallback(lambda _: self.multiengine.push(dict(b=10),block=False))
675 675 d.addCallback(lambda did: did_list.append(did))
676 676 d.addCallback(lambda _: self.multiengine.pull(('a','b'),block=False))
677 677 d.addCallback(lambda did: did_list.append(did))
678 678 d.addCallback(lambda _: self.multiengine.clear_pending_deferreds())
679 679 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[0],True))
680 680 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
681 681 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[1],True))
682 682 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
683 683 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[2],True))
684 684 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
685 685 return d
686 686
687 687 #-------------------------------------------------------------------------------
688 688 # Coordinator test cases
689 689 #-------------------------------------------------------------------------------
690 690
691 691 class IMultiEngineCoordinatorTestCase(object):
692 692
693 693 def testScatterGather(self):
694 694 self.addEngine(4)
695 695 d= self.multiengine.scatter('a', range(16))
696 696 d.addCallback(lambda r: self.multiengine.gather('a'))
697 697 d.addCallback(lambda r: self.assertEquals(r, range(16)))
698 698 d.addCallback(lambda _: self.multiengine.gather('asdf'))
699 699 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
700 700 return d
701 701
702 702 def testScatterGatherNumpy(self):
703 703 try:
704 704 import numpy
705 705 from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
706 706 except:
707 707 return
708 708 else:
709 709 self.addEngine(4)
710 710 a = numpy.arange(16)
711 711 d = self.multiengine.scatter('a', a)
712 712 d.addCallback(lambda r: self.multiengine.gather('a'))
713 713 d.addCallback(lambda r: assert_array_equal(r, a))
714 714 return d
715 715
716 716 def testMap(self):
717 717 self.addEngine(4)
718 718 def f(x):
719 719 return x**2
720 720 data = range(16)
721 721 d= self.multiengine.map(f, data)
722 722 d.addCallback(lambda r: self.assertEquals(r,[f(x) for x in data]))
723 723 return d
724 724
725 725
726 726 class ISynchronousMultiEngineCoordinatorTestCase(IMultiEngineCoordinatorTestCase):
727 727
728 728 def testScatterGatherNonblocking(self):
729 729 self.addEngine(4)
730 730 d= self.multiengine.scatter('a', range(16), block=False)
731 731 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
732 732 d.addCallback(lambda r: self.multiengine.gather('a', block=False))
733 733 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
734 734 d.addCallback(lambda r: self.assertEquals(r, range(16)))
735 735 return d
736
736
737 737 def testScatterGatherNumpyNonblocking(self):
738 738 try:
739 739 import numpy
740 740 from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
741 741 except:
742 742 return
743 743 else:
744 744 self.addEngine(4)
745 745 a = numpy.arange(16)
746 746 d = self.multiengine.scatter('a', a, block=False)
747 747 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
748 748 d.addCallback(lambda r: self.multiengine.gather('a', block=False))
749 749 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
750 750 d.addCallback(lambda r: assert_array_equal(r, a))
751 751 return d
752
753 def testMapNonblocking(self):
754 self.addEngine(4)
755 def f(x):
756 return x**2
757 data = range(16)
758 d= self.multiengine.map(f, data, block=False)
759 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
760 d.addCallback(lambda r: self.assertEquals(r,[f(x) for x in data]))
761 return d
762
752
763 753 def test_clear_pending_deferreds(self):
764 754 self.addEngine(4)
765 755 did_list = []
766 756 d= self.multiengine.scatter('a',range(16),block=False)
767 757 d.addCallback(lambda did: did_list.append(did))
768 758 d.addCallback(lambda _: self.multiengine.gather('a',block=False))
769 759 d.addCallback(lambda did: did_list.append(did))
770 760 d.addCallback(lambda _: self.multiengine.map(lambda x: x, range(16),block=False))
771 761 d.addCallback(lambda did: did_list.append(did))
772 762 d.addCallback(lambda _: self.multiengine.clear_pending_deferreds())
773 763 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[0],True))
774 764 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
775 765 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[1],True))
776 766 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
777 767 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[2],True))
778 768 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
779 769 return d
780 770
781 771 #-------------------------------------------------------------------------------
782 772 # Extras test cases
783 773 #-------------------------------------------------------------------------------
784 774
785 775 class IMultiEngineExtrasTestCase(object):
786 776
787 777 def testZipPull(self):
788 778 self.addEngine(4)
789 779 d= self.multiengine.push(dict(a=10,b=20))
790 780 d.addCallback(lambda r: self.multiengine.zip_pull(('a','b')))
791 781 d.addCallback(lambda r: self.assert_(r, [4*[10],4*[20]]))
792 782 return d
793 783
794 784 def testRun(self):
795 785 self.addEngine(4)
796 786 import tempfile
797 787 fname = tempfile.mktemp('foo.py')
798 788 f= open(fname, 'w')
799 789 f.write('a = 10\nb=30')
800 790 f.close()
801 791 d= self.multiengine.run(fname)
802 792 d.addCallback(lambda r: self.multiengine.pull(('a','b')))
803 793 d.addCallback(lambda r: self.assertEquals(r, 4*[[10,30]]))
804 794 return d
805 795
806 796
807 797 class ISynchronousMultiEngineExtrasTestCase(IMultiEngineExtrasTestCase):
808 798
809 799 def testZipPullNonblocking(self):
810 800 self.addEngine(4)
811 801 d= self.multiengine.push(dict(a=10,b=20))
812 802 d.addCallback(lambda r: self.multiengine.zip_pull(('a','b'), block=False))
813 803 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
814 804 d.addCallback(lambda r: self.assert_(r, [4*[10],4*[20]]))
815 805 return d
816 806
817 807 def testRunNonblocking(self):
818 808 self.addEngine(4)
819 809 import tempfile
820 810 fname = tempfile.mktemp('foo.py')
821 811 f= open(fname, 'w')
822 812 f.write('a = 10\nb=30')
823 813 f.close()
824 814 d= self.multiengine.run(fname, block=False)
825 815 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
826 816 d.addCallback(lambda r: self.multiengine.pull(('a','b')))
827 817 d.addCallback(lambda r: self.assertEquals(r, 4*[[10,30]]))
828 818 return d
829 819
830 820
831 821 #-------------------------------------------------------------------------------
832 822 # IFullSynchronousMultiEngineTestCase
833 823 #-------------------------------------------------------------------------------
834 824
835 825 class IFullSynchronousMultiEngineTestCase(ISynchronousMultiEngineTestCase,
836 826 ISynchronousMultiEngineCoordinatorTestCase,
837 827 ISynchronousMultiEngineExtrasTestCase):
838 828 pass
@@ -1,158 +1,187 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 __docformat__ = "restructuredtext en"
5 5
6 6 #-------------------------------------------------------------------------------
7 7 # Copyright (C) 2008 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-------------------------------------------------------------------------------
12 12
13 13 #-------------------------------------------------------------------------------
14 14 # Imports
15 15 #-------------------------------------------------------------------------------
16 16
17 17 import time
18 18
19 19 from IPython.kernel import task, engineservice as es
20 20 from IPython.kernel.util import printer
21 21 from IPython.kernel import error
22 22
23 23 #-------------------------------------------------------------------------------
24 24 # Tests
25 25 #-------------------------------------------------------------------------------
26 26
27 27 def _raise_it(f):
28 28 try:
29 29 f.raiseException()
30 30 except CompositeError, e:
31 31 e.raise_exception()
32 32
33 33 class TaskTestBase(object):
34 34
35 35 def addEngine(self, n=1):
36 36 for i in range(n):
37 37 e = es.EngineService()
38 38 e.startService()
39 39 regDict = self.controller.register_engine(es.QueuedEngine(e), None)
40 40 e.id = regDict['id']
41 41 self.engines.append(e)
42 42
43 43
44 44 class ITaskControllerTestCase(TaskTestBase):
45 45
46 def testTaskIDs(self):
46 def test_task_ids(self):
47 47 self.addEngine(1)
48 d = self.tc.run(task.Task('a=5'))
48 d = self.tc.run(task.StringTask('a=5'))
49 49 d.addCallback(lambda r: self.assertEquals(r, 0))
50 d.addCallback(lambda r: self.tc.run(task.Task('a=5')))
50 d.addCallback(lambda r: self.tc.run(task.StringTask('a=5')))
51 51 d.addCallback(lambda r: self.assertEquals(r, 1))
52 d.addCallback(lambda r: self.tc.run(task.Task('a=5')))
52 d.addCallback(lambda r: self.tc.run(task.StringTask('a=5')))
53 53 d.addCallback(lambda r: self.assertEquals(r, 2))
54 d.addCallback(lambda r: self.tc.run(task.Task('a=5')))
54 d.addCallback(lambda r: self.tc.run(task.StringTask('a=5')))
55 55 d.addCallback(lambda r: self.assertEquals(r, 3))
56 56 return d
57 57
58 def testAbort(self):
58 def test_abort(self):
59 59 """Cannot do a proper abort test, because blocking execution prevents
60 60 abort from being called before task completes"""
61 61 self.addEngine(1)
62 t = task.Task('a=5')
62 t = task.StringTask('a=5')
63 63 d = self.tc.abort(0)
64 64 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
65 65 d.addCallback(lambda _:self.tc.run(t))
66 66 d.addCallback(self.tc.abort)
67 67 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
68 68 return d
69 69
70 def testAbortType(self):
70 def test_abort_type(self):
71 71 self.addEngine(1)
72 72 d = self.tc.abort('asdfadsf')
73 73 d.addErrback(lambda f: self.assertRaises(TypeError, f.raiseException))
74 74 return d
75 75
76 def testClears(self):
76 def test_clear_before_and_after(self):
77 77 self.addEngine(1)
78 t = task.Task('a=1', clear_before=True, pull='b', clear_after=True)
78 t = task.StringTask('a=1', clear_before=True, pull='b', clear_after=True)
79 79 d = self.multiengine.execute('b=1', targets=0)
80 80 d.addCallback(lambda _: self.tc.run(t))
81 81 d.addCallback(lambda tid: self.tc.get_task_result(tid,block=True))
82 82 d.addCallback(lambda tr: tr.failure)
83 83 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
84 84 d.addCallback(lambda _:self.multiengine.pull('a', targets=0))
85 85 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
86 86 return d
87 87
88 def testSimpleRetries(self):
88 def test_simple_retries(self):
89 89 self.addEngine(1)
90 t = task.Task("i += 1\nassert i == 16", pull='i',retries=10)
91 t2 = task.Task("i += 1\nassert i == 16", pull='i',retries=10)
90 t = task.StringTask("i += 1\nassert i == 16", pull='i',retries=10)
91 t2 = task.StringTask("i += 1\nassert i == 16", pull='i',retries=10)
92 92 d = self.multiengine.execute('i=0', targets=0)
93 93 d.addCallback(lambda r: self.tc.run(t))
94 94 d.addCallback(self.tc.get_task_result, block=True)
95 95 d.addCallback(lambda tr: tr.ns.i)
96 96 d.addErrback(lambda f: self.assertRaises(AssertionError, f.raiseException))
97 97
98 98 d.addCallback(lambda r: self.tc.run(t2))
99 99 d.addCallback(self.tc.get_task_result, block=True)
100 100 d.addCallback(lambda tr: tr.ns.i)
101 101 d.addCallback(lambda r: self.assertEquals(r, 16))
102 102 return d
103 103
104 def testRecoveryTasks(self):
104 def test_recovery_tasks(self):
105 105 self.addEngine(1)
106 t = task.Task("i=16", pull='i')
107 t2 = task.Task("raise Exception", recovery_task=t, retries = 2)
106 t = task.StringTask("i=16", pull='i')
107 t2 = task.StringTask("raise Exception", recovery_task=t, retries = 2)
108 108
109 109 d = self.tc.run(t2)
110 110 d.addCallback(self.tc.get_task_result, block=True)
111 111 d.addCallback(lambda tr: tr.ns.i)
112 112 d.addCallback(lambda r: self.assertEquals(r, 16))
113 113 return d
114 114
115 # def testInfiniteRecoveryLoop(self):
116 # self.addEngine(1)
117 # t = task.Task("raise Exception", retries = 5)
118 # t2 = task.Task("assert True", retries = 2, recovery_task = t)
119 # t.recovery_task = t2
120 #
121 # d = self.tc.run(t)
122 # d.addCallback(self.tc.get_task_result, block=True)
123 # d.addCallback(lambda tr: tr.ns.i)
124 # d.addBoth(printer)
125 # d.addErrback(lambda f: self.assertRaises(AssertionError, f.raiseException))
126 # return d
127 #
128 def testSetupNS(self):
115 def test_setup_ns(self):
129 116 self.addEngine(1)
130 117 d = self.multiengine.execute('a=0', targets=0)
131 118 ns = dict(a=1, b=0)
132 t = task.Task("", push=ns, pull=['a','b'])
119 t = task.StringTask("", push=ns, pull=['a','b'])
133 120 d.addCallback(lambda r: self.tc.run(t))
134 121 d.addCallback(self.tc.get_task_result, block=True)
135 122 d.addCallback(lambda tr: {'a':tr.ns.a, 'b':tr['b']})
136 123 d.addCallback(lambda r: self.assertEquals(r, ns))
137 124 return d
138 125
139 def testTaskResults(self):
126 def test_string_task_results(self):
140 127 self.addEngine(1)
141 t1 = task.Task('a=5', pull='a')
128 t1 = task.StringTask('a=5', pull='a')
142 129 d = self.tc.run(t1)
143 130 d.addCallback(self.tc.get_task_result, block=True)
144 d.addCallback(lambda tr: (tr.ns.a,tr['a'],tr.failure, tr.raiseException()))
131 d.addCallback(lambda tr: (tr.ns.a,tr['a'],tr.failure, tr.raise_exception()))
145 132 d.addCallback(lambda r: self.assertEquals(r, (5,5,None,None)))
146 133
147 t2 = task.Task('7=5')
134 t2 = task.StringTask('7=5')
148 135 d.addCallback(lambda r: self.tc.run(t2))
149 136 d.addCallback(self.tc.get_task_result, block=True)
150 137 d.addCallback(lambda tr: tr.ns)
151 138 d.addErrback(lambda f: self.assertRaises(SyntaxError, f.raiseException))
152 139
153 t3 = task.Task('', pull='b')
140 t3 = task.StringTask('', pull='b')
154 141 d.addCallback(lambda r: self.tc.run(t3))
155 142 d.addCallback(self.tc.get_task_result, block=True)
156 143 d.addCallback(lambda tr: tr.ns)
157 144 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
158 145 return d
146
147 def test_map_task(self):
148 self.addEngine(1)
149 t1 = task.MapTask(lambda x: 2*x,(10,))
150 d = self.tc.run(t1)
151 d.addCallback(self.tc.get_task_result, block=True)
152 d.addCallback(lambda r: self.assertEquals(r,20))
153
154 t2 = task.MapTask(lambda : 20)
155 d.addCallback(lambda _: self.tc.run(t2))
156 d.addCallback(self.tc.get_task_result, block=True)
157 d.addCallback(lambda r: self.assertEquals(r,20))
158
159 t3 = task.MapTask(lambda x: x,(),{'x':20})
160 d.addCallback(lambda _: self.tc.run(t3))
161 d.addCallback(self.tc.get_task_result, block=True)
162 d.addCallback(lambda r: self.assertEquals(r,20))
163 return d
164
165 def test_map_task_failure(self):
166 self.addEngine(1)
167 t1 = task.MapTask(lambda x: 1/0,(10,))
168 d = self.tc.run(t1)
169 d.addCallback(self.tc.get_task_result, block=True)
170 d.addErrback(lambda f: self.assertRaises(ZeroDivisionError, f.raiseException))
171 return d
172
173 def test_map_task_args(self):
174 self.assertRaises(TypeError, task.MapTask, 'asdfasdf')
175 self.assertRaises(TypeError, task.MapTask, lambda x: x, 10)
176 self.assertRaises(TypeError, task.MapTask, lambda x: x, (10,),30)
177
178 def test_clear(self):
179 self.addEngine(1)
180 t1 = task.MapTask(lambda x: 2*x,(10,))
181 d = self.tc.run(t1)
182 d.addCallback(lambda _: self.tc.get_task_result(0, block=True))
183 d.addCallback(lambda r: self.assertEquals(r,20))
184 d.addCallback(lambda _: self.tc.clear())
185 d.addCallback(lambda _: self.tc.get_task_result(0, block=True))
186 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
187 return d
@@ -1,92 +1,92 b''
1 1 # encoding: utf-8
2 2
3 3 """This file contains unittests for the enginepb.py module."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 try:
19 19 from twisted.python import components
20 20 from twisted.internet import reactor, defer
21 21 from twisted.spread import pb
22 22 from twisted.internet.base import DelayedCall
23 23 DelayedCall.debug = True
24 24
25 25 import zope.interface as zi
26 26
27 27 from IPython.kernel.fcutil import Tub, UnauthenticatedTub
28 28 from IPython.kernel import engineservice as es
29 29 from IPython.testing.util import DeferredTestCase
30 30 from IPython.kernel.controllerservice import IControllerBase
31 31 from IPython.kernel.enginefc import FCRemoteEngineRefFromService, IEngineBase
32 32 from IPython.kernel.engineservice import IEngineQueued
33 33 from IPython.kernel.engineconnector import EngineConnector
34 34
35 35 from IPython.kernel.tests.engineservicetest import \
36 36 IEngineCoreTestCase, \
37 37 IEngineSerializedTestCase, \
38 38 IEngineQueuedTestCase
39 39 except ImportError:
40 40 print "we got an error!!!"
41 pass
41 raise
42 42 else:
43 43 class EngineFCTest(DeferredTestCase,
44 44 IEngineCoreTestCase,
45 45 IEngineSerializedTestCase,
46 46 IEngineQueuedTestCase
47 47 ):
48 48
49 49 zi.implements(IControllerBase)
50 50
51 51 def setUp(self):
52 52
53 53 # Start a server and append to self.servers
54 54 self.controller_reference = FCRemoteEngineRefFromService(self)
55 55 self.controller_tub = Tub()
56 56 self.controller_tub.listenOn('tcp:10105:interface=127.0.0.1')
57 57 self.controller_tub.setLocation('127.0.0.1:10105')
58 58
59 59 furl = self.controller_tub.registerReference(self.controller_reference)
60 60 self.controller_tub.startService()
61 61
62 62 # Start an EngineService and append to services/client
63 63 self.engine_service = es.EngineService()
64 64 self.engine_service.startService()
65 65 self.engine_tub = Tub()
66 66 self.engine_tub.startService()
67 67 engine_connector = EngineConnector(self.engine_tub)
68 68 d = engine_connector.connect_to_controller(self.engine_service, furl)
69 69 # This deferred doesn't fire until after register_engine has returned and
70 70 # thus, self.engine has been defined and the tets can proceed.
71 71 return d
72 72
73 73 def tearDown(self):
74 74 dlist = []
75 75 # Shut down the engine
76 76 d = self.engine_tub.stopService()
77 77 dlist.append(d)
78 78 # Shut down the controller
79 79 d = self.controller_tub.stopService()
80 80 dlist.append(d)
81 81 return defer.DeferredList(dlist)
82 82
83 83 #---------------------------------------------------------------------------
84 84 # Make me look like a basic controller
85 85 #---------------------------------------------------------------------------
86 86
87 87 def register_engine(self, engine_ref, id=None, ip=None, port=None, pid=None):
88 88 self.engine = IEngineQueued(IEngineBase(engine_ref))
89 89 return {'id':id}
90 90
91 91 def unregister_engine(self, id):
92 92 pass No newline at end of file
@@ -1,70 +1,144 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 __docformat__ = "restructuredtext en"
5 5
6 6 #-------------------------------------------------------------------------------
7 7 # Copyright (C) 2008 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-------------------------------------------------------------------------------
12 12
13 13 #-------------------------------------------------------------------------------
14 14 # Imports
15 15 #-------------------------------------------------------------------------------
16 16
17 17 try:
18 18 from twisted.internet import defer, reactor
19 19
20 20 from IPython.kernel.fcutil import Tub, UnauthenticatedTub
21 21
22 22 from IPython.testing.util import DeferredTestCase
23 23 from IPython.kernel.controllerservice import ControllerService
24 24 from IPython.kernel.multiengine import IMultiEngine
25 25 from IPython.kernel.tests.multienginetest import IFullSynchronousMultiEngineTestCase
26 26 from IPython.kernel.multienginefc import IFCSynchronousMultiEngine
27 27 from IPython.kernel import multiengine as me
28 28 from IPython.kernel.clientconnector import ClientConnector
29 from IPython.kernel.parallelfunction import ParallelFunction
30 from IPython.kernel.error import CompositeError
31 from IPython.kernel.util import printer
29 32 except ImportError:
30 33 pass
31 34 else:
35
36 def _raise_it(f):
37 try:
38 f.raiseException()
39 except CompositeError, e:
40 e.raise_exception()
41
42
32 43 class FullSynchronousMultiEngineTestCase(DeferredTestCase, IFullSynchronousMultiEngineTestCase):
33 44
34 45 def setUp(self):
35 46
36 47 self.engines = []
37 48
38 49 self.controller = ControllerService()
39 50 self.controller.startService()
40 51 self.imultiengine = IMultiEngine(self.controller)
41 52 self.mec_referenceable = IFCSynchronousMultiEngine(self.imultiengine)
42 53
43 54 self.controller_tub = Tub()
44 55 self.controller_tub.listenOn('tcp:10105:interface=127.0.0.1')
45 56 self.controller_tub.setLocation('127.0.0.1:10105')
46 57
47 58 furl = self.controller_tub.registerReference(self.mec_referenceable)
48 59 self.controller_tub.startService()
49 60
50 61 self.client_tub = ClientConnector()
51 62 d = self.client_tub.get_multiengine_client(furl)
52 63 d.addCallback(self.handle_got_client)
53 64 return d
54 65
55 66 def handle_got_client(self, client):
56 67 self.multiengine = client
57 68
58 69 def tearDown(self):
59 70 dlist = []
60 71 # Shut down the multiengine client
61 72 d = self.client_tub.tub.stopService()
62 73 dlist.append(d)
63 74 # Shut down the engines
64 75 for e in self.engines:
65 76 e.stopService()
66 77 # Shut down the controller
67 78 d = self.controller_tub.stopService()
68 79 d.addBoth(lambda _: self.controller.stopService())
69 80 dlist.append(d)
70 81 return defer.DeferredList(dlist)
82
83 def test_mapper(self):
84 self.addEngine(4)
85 m = self.multiengine.mapper()
86 self.assertEquals(m.multiengine,self.multiengine)
87 self.assertEquals(m.dist,'b')
88 self.assertEquals(m.targets,'all')
89 self.assertEquals(m.block,True)
90
91 def test_map_default(self):
92 self.addEngine(4)
93 m = self.multiengine.mapper()
94 d = m.map(lambda x: 2*x, range(10))
95 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
96 d.addCallback(lambda _: self.multiengine.map(lambda x: 2*x, range(10)))
97 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
98 return d
99
100 def test_map_noblock(self):
101 self.addEngine(4)
102 m = self.multiengine.mapper(block=False)
103 d = m.map(lambda x: 2*x, range(10))
104 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
105 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
106 return d
107
108 def test_mapper_fail(self):
109 self.addEngine(4)
110 m = self.multiengine.mapper()
111 d = m.map(lambda x: 1/0, range(10))
112 d.addBoth(lambda f: self.assertRaises(ZeroDivisionError, _raise_it, f))
113 return d
114
115 def test_parallel(self):
116 self.addEngine(4)
117 p = self.multiengine.parallel()
118 self.assert_(isinstance(p, ParallelFunction))
119 @p
120 def f(x): return 2*x
121 d = f(range(10))
122 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
123 return d
124
125 def test_parallel_noblock(self):
126 self.addEngine(1)
127 p = self.multiengine.parallel(block=False)
128 self.assert_(isinstance(p, ParallelFunction))
129 @p
130 def f(x): return 2*x
131 d = f(range(10))
132 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
133 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
134 return d
135
136 def test_parallel_fail(self):
137 self.addEngine(4)
138 p = self.multiengine.parallel()
139 self.assert_(isinstance(p, ParallelFunction))
140 @p
141 def f(x): return 1/0
142 d = f(range(10))
143 d.addBoth(lambda f: self.assertRaises(ZeroDivisionError, _raise_it, f))
144 return d No newline at end of file
@@ -1,186 +1,186 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 """Tests for pendingdeferred.py"""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Imports
17 17 #-------------------------------------------------------------------------------
18 18
19 19 try:
20 20 from twisted.internet import defer
21 21 from twisted.python import failure
22 22
23 from IPython.testing import tcommon
24 #from IPython.testing.tcommon import *
25 23 from IPython.testing.util import DeferredTestCase
26 24 import IPython.kernel.pendingdeferred as pd
27 25 from IPython.kernel import error
28 26 from IPython.kernel.util import printer
29 27 except ImportError:
30 28 pass
31 29 else:
30
31 class Foo(object):
32
33 def bar(self, bahz):
34 return defer.succeed('blahblah: %s' % bahz)
32 35
33 36 class TwoPhaseFoo(pd.PendingDeferredManager):
34 37
35 38 def __init__(self, foo):
36 39 self.foo = foo
37 40 pd.PendingDeferredManager.__init__(self)
38 41
39 42 @pd.two_phase
40 43 def bar(self, bahz):
41 44 return self.foo.bar(bahz)
42 45
43 46 class PendingDeferredManagerTest(DeferredTestCase):
44 47
45 48 def setUp(self):
46 49 self.pdm = pd.PendingDeferredManager()
47 50
48 51 def tearDown(self):
49 52 pass
50 53
51 54 def testBasic(self):
52 55 dDict = {}
53 56 # Create 10 deferreds and save them
54 57 for i in range(10):
55 58 d = defer.Deferred()
56 59 did = self.pdm.save_pending_deferred(d)
57 60 dDict[did] = d
58 61 # Make sure they are begin saved
59 62 for k in dDict.keys():
60 63 self.assert_(self.pdm.quick_has_id(k))
61 64 # Get the pending deferred (block=True), then callback with 'foo' and compare
62 65 for did in dDict.keys()[0:5]:
63 66 d = self.pdm.get_pending_deferred(did,block=True)
64 67 dDict[did].callback('foo')
65 68 d.addCallback(lambda r: self.assert_(r=='foo'))
66 69 # Get the pending deferreds with (block=False) and make sure ResultNotCompleted is raised
67 70 for did in dDict.keys()[5:10]:
68 71 d = self.pdm.get_pending_deferred(did,block=False)
69 72 d.addErrback(lambda f: self.assertRaises(error.ResultNotCompleted, f.raiseException))
70 73 # Now callback the last 5, get them and compare.
71 74 for did in dDict.keys()[5:10]:
72 75 dDict[did].callback('foo')
73 76 d = self.pdm.get_pending_deferred(did,block=False)
74 77 d.addCallback(lambda r: self.assert_(r=='foo'))
75 78
76 79 def test_save_then_delete(self):
77 80 d = defer.Deferred()
78 81 did = self.pdm.save_pending_deferred(d)
79 82 self.assert_(self.pdm.quick_has_id(did))
80 83 self.pdm.delete_pending_deferred(did)
81 84 self.assert_(not self.pdm.quick_has_id(did))
82 85
83 86 def test_save_get_delete(self):
84 87 d = defer.Deferred()
85 88 did = self.pdm.save_pending_deferred(d)
86 89 d2 = self.pdm.get_pending_deferred(did,True)
87 90 d2.addErrback(lambda f: self.assertRaises(error.AbortedPendingDeferredError, f.raiseException))
88 91 self.pdm.delete_pending_deferred(did)
89 92 return d2
90 93
91 94 def test_double_get(self):
92 95 d = defer.Deferred()
93 96 did = self.pdm.save_pending_deferred(d)
94 97 d2 = self.pdm.get_pending_deferred(did,True)
95 98 d3 = self.pdm.get_pending_deferred(did,True)
96 99 d3.addErrback(lambda f: self.assertRaises(error.InvalidDeferredID, f.raiseException))
97 100
98 101 def test_get_after_callback(self):
99 102 d = defer.Deferred()
100 103 did = self.pdm.save_pending_deferred(d)
101 104 d.callback('foo')
102 105 d2 = self.pdm.get_pending_deferred(did,True)
103 106 d2.addCallback(lambda r: self.assertEquals(r,'foo'))
104 107 self.assert_(not self.pdm.quick_has_id(did))
105 108
106 109 def test_get_before_callback(self):
107 110 d = defer.Deferred()
108 111 did = self.pdm.save_pending_deferred(d)
109 112 d2 = self.pdm.get_pending_deferred(did,True)
110 113 d.callback('foo')
111 114 d2.addCallback(lambda r: self.assertEquals(r,'foo'))
112 115 self.assert_(not self.pdm.quick_has_id(did))
113 116 d = defer.Deferred()
114 117 did = self.pdm.save_pending_deferred(d)
115 118 d2 = self.pdm.get_pending_deferred(did,True)
116 119 d2.addCallback(lambda r: self.assertEquals(r,'foo'))
117 120 d.callback('foo')
118 121 self.assert_(not self.pdm.quick_has_id(did))
119 122
120 123 def test_get_after_errback(self):
121 124 class MyError(Exception):
122 125 pass
123 126 d = defer.Deferred()
124 127 did = self.pdm.save_pending_deferred(d)
125 128 d.errback(failure.Failure(MyError('foo')))
126 129 d2 = self.pdm.get_pending_deferred(did,True)
127 130 d2.addErrback(lambda f: self.assertRaises(MyError, f.raiseException))
128 131 self.assert_(not self.pdm.quick_has_id(did))
129 132
130 133 def test_get_before_errback(self):
131 134 class MyError(Exception):
132 135 pass
133 136 d = defer.Deferred()
134 137 did = self.pdm.save_pending_deferred(d)
135 138 d2 = self.pdm.get_pending_deferred(did,True)
136 139 d.errback(failure.Failure(MyError('foo')))
137 140 d2.addErrback(lambda f: self.assertRaises(MyError, f.raiseException))
138 141 self.assert_(not self.pdm.quick_has_id(did))
139 142 d = defer.Deferred()
140 143 did = self.pdm.save_pending_deferred(d)
141 144 d2 = self.pdm.get_pending_deferred(did,True)
142 145 d2.addErrback(lambda f: self.assertRaises(MyError, f.raiseException))
143 146 d.errback(failure.Failure(MyError('foo')))
144 147 self.assert_(not self.pdm.quick_has_id(did))
145 148
146 149 def test_noresult_noblock(self):
147 150 d = defer.Deferred()
148 151 did = self.pdm.save_pending_deferred(d)
149 152 d2 = self.pdm.get_pending_deferred(did,False)
150 153 d2.addErrback(lambda f: self.assertRaises(error.ResultNotCompleted, f.raiseException))
151 154
152 155 def test_with_callbacks(self):
153 156 d = defer.Deferred()
154 157 d.addCallback(lambda r: r+' foo')
155 158 d.addCallback(lambda r: r+' bar')
156 159 did = self.pdm.save_pending_deferred(d)
157 160 d2 = self.pdm.get_pending_deferred(did,True)
158 161 d.callback('bam')
159 162 d2.addCallback(lambda r: self.assertEquals(r,'bam foo bar'))
160 163
161 164 def test_with_errbacks(self):
162 165 class MyError(Exception):
163 166 pass
164 167 d = defer.Deferred()
165 168 d.addCallback(lambda r: 'foo')
166 169 d.addErrback(lambda f: 'caught error')
167 170 did = self.pdm.save_pending_deferred(d)
168 171 d2 = self.pdm.get_pending_deferred(did,True)
169 172 d.errback(failure.Failure(MyError('bam')))
170 173 d2.addErrback(lambda f: self.assertRaises(MyError, f.raiseException))
171 174
172 175 def test_nested_deferreds(self):
173 176 d = defer.Deferred()
174 177 d2 = defer.Deferred()
175 178 d.addCallback(lambda r: d2)
176 179 did = self.pdm.save_pending_deferred(d)
177 180 d.callback('foo')
178 181 d3 = self.pdm.get_pending_deferred(did,False)
179 182 d3.addErrback(lambda f: self.assertRaises(error.ResultNotCompleted, f.raiseException))
180 183 d2.callback('bar')
181 184 d3 = self.pdm.get_pending_deferred(did,False)
182 185 d3.addCallback(lambda r: self.assertEquals(r,'bar'))
183 186
184
185 # Global object expected by Twisted's trial
186 testSuite = lambda : makeTestSuite(__name__,dt_files,dt_modules)
@@ -1,90 +1,161 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 __docformat__ = "restructuredtext en"
5 5
6 6 #-------------------------------------------------------------------------------
7 7 # Copyright (C) 2008 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-------------------------------------------------------------------------------
12 12
13 13 #-------------------------------------------------------------------------------
14 14 # Imports
15 15 #-------------------------------------------------------------------------------
16 16
17 17 try:
18 18 import time
19 19
20 20 from twisted.internet import defer, reactor
21 21
22 22 from IPython.kernel.fcutil import Tub, UnauthenticatedTub
23 23
24 24 from IPython.kernel import task as taskmodule
25 25 from IPython.kernel import controllerservice as cs
26 26 import IPython.kernel.multiengine as me
27 27 from IPython.testing.util import DeferredTestCase
28 28 from IPython.kernel.multienginefc import IFCSynchronousMultiEngine
29 29 from IPython.kernel.taskfc import IFCTaskController
30 30 from IPython.kernel.util import printer
31 31 from IPython.kernel.tests.tasktest import ITaskControllerTestCase
32 32 from IPython.kernel.clientconnector import ClientConnector
33 from IPython.kernel.error import CompositeError
34 from IPython.kernel.parallelfunction import ParallelFunction
33 35 except ImportError:
34 36 pass
35 37 else:
36 38
37 39 #-------------------------------------------------------------------------------
38 40 # Tests
39 41 #-------------------------------------------------------------------------------
40 42
43 def _raise_it(f):
44 try:
45 f.raiseException()
46 except CompositeError, e:
47 e.raise_exception()
48
41 49 class TaskTest(DeferredTestCase, ITaskControllerTestCase):
42 50
43 51 def setUp(self):
44 52
45 53 self.engines = []
46 54
47 55 self.controller = cs.ControllerService()
48 56 self.controller.startService()
49 57 self.imultiengine = me.IMultiEngine(self.controller)
50 58 self.itc = taskmodule.ITaskController(self.controller)
51 59 self.itc.failurePenalty = 0
52 60
53 61 self.mec_referenceable = IFCSynchronousMultiEngine(self.imultiengine)
54 62 self.tc_referenceable = IFCTaskController(self.itc)
55 63
56 64 self.controller_tub = Tub()
57 65 self.controller_tub.listenOn('tcp:10105:interface=127.0.0.1')
58 66 self.controller_tub.setLocation('127.0.0.1:10105')
59 67
60 68 mec_furl = self.controller_tub.registerReference(self.mec_referenceable)
61 69 tc_furl = self.controller_tub.registerReference(self.tc_referenceable)
62 70 self.controller_tub.startService()
63 71
64 72 self.client_tub = ClientConnector()
65 73 d = self.client_tub.get_multiengine_client(mec_furl)
66 74 d.addCallback(self.handle_mec_client)
67 75 d.addCallback(lambda _: self.client_tub.get_task_client(tc_furl))
68 76 d.addCallback(self.handle_tc_client)
69 77 return d
70 78
71 79 def handle_mec_client(self, client):
72 80 self.multiengine = client
73 81
74 82 def handle_tc_client(self, client):
75 83 self.tc = client
76 84
77 85 def tearDown(self):
78 86 dlist = []
79 87 # Shut down the multiengine client
80 88 d = self.client_tub.tub.stopService()
81 89 dlist.append(d)
82 90 # Shut down the engines
83 91 for e in self.engines:
84 92 e.stopService()
85 93 # Shut down the controller
86 94 d = self.controller_tub.stopService()
87 95 d.addBoth(lambda _: self.controller.stopService())
88 96 dlist.append(d)
89 97 return defer.DeferredList(dlist)
90
98
99 def test_mapper(self):
100 self.addEngine(1)
101 m = self.tc.mapper()
102 self.assertEquals(m.task_controller,self.tc)
103 self.assertEquals(m.clear_before,False)
104 self.assertEquals(m.clear_after,False)
105 self.assertEquals(m.retries,0)
106 self.assertEquals(m.recovery_task,None)
107 self.assertEquals(m.depend,None)
108 self.assertEquals(m.block,True)
109
110 def test_map_default(self):
111 self.addEngine(1)
112 m = self.tc.mapper()
113 d = m.map(lambda x: 2*x, range(10))
114 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
115 d.addCallback(lambda _: self.tc.map(lambda x: 2*x, range(10)))
116 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
117 return d
118
119 def test_map_noblock(self):
120 self.addEngine(1)
121 m = self.tc.mapper(block=False)
122 d = m.map(lambda x: 2*x, range(10))
123 d.addCallback(lambda r: self.assertEquals(r,[x for x in range(10)]))
124 return d
125
126 def test_mapper_fail(self):
127 self.addEngine(1)
128 m = self.tc.mapper()
129 d = m.map(lambda x: 1/0, range(10))
130 d.addBoth(lambda f: self.assertRaises(ZeroDivisionError, _raise_it, f))
131 return d
132
133 def test_parallel(self):
134 self.addEngine(1)
135 p = self.tc.parallel()
136 self.assert_(isinstance(p, ParallelFunction))
137 @p
138 def f(x): return 2*x
139 d = f(range(10))
140 d.addCallback(lambda r: self.assertEquals(r,[2*x for x in range(10)]))
141 return d
142
143 def test_parallel_noblock(self):
144 self.addEngine(1)
145 p = self.tc.parallel(block=False)
146 self.assert_(isinstance(p, ParallelFunction))
147 @p
148 def f(x): return 2*x
149 d = f(range(10))
150 d.addCallback(lambda r: self.assertEquals(r,[x for x in range(10)]))
151 return d
152
153 def test_parallel_fail(self):
154 self.addEngine(1)
155 p = self.tc.parallel()
156 self.assert_(isinstance(p, ParallelFunction))
157 @p
158 def f(x): return 1/0
159 d = f(range(10))
160 d.addBoth(lambda f: self.assertRaises(ZeroDivisionError, _raise_it, f))
161 return d No newline at end of file
@@ -1,90 +1,90 b''
1 1 """
2 2 An exceptionally lousy site spider
3 3 Ken Kinder <ken@kenkinder.com>
4 4
5 5 This module gives an example of how the TaskClient interface to the
6 6 IPython controller works. Before running this script start the IPython controller
7 7 and some engines using something like::
8 8
9 9 ipcluster -n 4
10 10 """
11 11 from twisted.python.failure import Failure
12 12 from IPython.kernel import client
13 13 import time
14 14
15 15 fetchParse = """
16 16 from twisted.web import microdom
17 17 import urllib2
18 18 import urlparse
19 19
20 20 def fetchAndParse(url, data=None):
21 21 links = []
22 22 try:
23 23 page = urllib2.urlopen(url, data=data)
24 24 except Exception:
25 25 return links
26 26 else:
27 27 if page.headers.type == 'text/html':
28 28 doc = microdom.parseString(page.read(), beExtremelyLenient=True)
29 29 for node in doc.getElementsByTagName('a'):
30 30 if node.getAttribute('href'):
31 31 links.append(urlparse.urljoin(url, node.getAttribute('href')))
32 32 return links
33 33 """
34 34
35 35 class DistributedSpider(object):
36 36
37 37 # Time to wait between polling for task results.
38 38 pollingDelay = 0.5
39 39
40 40 def __init__(self, site):
41 41 self.tc = client.TaskClient()
42 42 self.rc = client.MultiEngineClient()
43 43 self.rc.execute(fetchParse)
44 44
45 45 self.allLinks = []
46 46 self.linksWorking = {}
47 47 self.linksDone = {}
48 48
49 49 self.site = site
50 50
51 51 def visitLink(self, url):
52 52 if url not in self.allLinks:
53 53 self.allLinks.append(url)
54 54 if url.startswith(self.site):
55 55 print ' ', url
56 self.linksWorking[url] = self.tc.run(client.Task('links = fetchAndParse(url)', pull=['links'], push={'url': url}))
56 self.linksWorking[url] = self.tc.run(client.StringTask('links = fetchAndParse(url)', pull=['links'], push={'url': url}))
57 57
58 58 def onVisitDone(self, result, url):
59 59 print url, ':'
60 60 self.linksDone[url] = None
61 61 del self.linksWorking[url]
62 62 if isinstance(result.failure, Failure):
63 63 txt = result.failure.getTraceback()
64 64 for line in txt.split('\n'):
65 65 print ' ', line
66 66 else:
67 67 for link in result.ns.links:
68 68 self.visitLink(link)
69 69
70 70 def run(self):
71 71 self.visitLink(self.site)
72 72 while self.linksWorking:
73 73 print len(self.linksWorking), 'pending...'
74 74 self.synchronize()
75 75 time.sleep(self.pollingDelay)
76 76
77 77 def synchronize(self):
78 78 for url, taskId in self.linksWorking.items():
79 79 # Calling get_task_result with block=False will return None if the
80 80 # task is not done yet. This provides a simple way of polling.
81 81 result = self.tc.get_task_result(taskId, block=False)
82 82 if result is not None:
83 83 self.onVisitDone(result, url)
84 84
85 85 def main():
86 86 distributedSpider = DistributedSpider(raw_input('Enter site to crawl: '))
87 87 distributedSpider.run()
88 88
89 89 if __name__ == '__main__':
90 90 main()
@@ -1,14 +1,14 b''
1 1 """
2 2 A Distributed Hello world
3 3 Ken Kinder <ken@kenkinder.com>
4 4 """
5 5 from IPython.kernel import client
6 6
7 7 tc = client.TaskClient()
8 8 mec = client.MultiEngineClient()
9 9
10 10 mec.execute('import time')
11 hello_taskid = tc.run(client.Task('time.sleep(3) ; word = "Hello,"', pull=('word')))
12 world_taskid = tc.run(client.Task('time.sleep(3) ; word = "World!"', pull=('word')))
11 hello_taskid = tc.run(client.StringTask('time.sleep(3) ; word = "Hello,"', pull=('word')))
12 world_taskid = tc.run(client.StringTask('time.sleep(3) ; word = "World!"', pull=('word')))
13 13 print "Submitted tasks:", hello_taskid, world_taskid
14 14 print tc.get_task_result(hello_taskid,block=True).ns.word, tc.get_task_result(world_taskid,block=True).ns.word
@@ -1,71 +1,71 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 """Run a Monte-Carlo options pricer in parallel."""
4 4
5 5 from IPython.kernel import client
6 6 import numpy as N
7 7 from mcpricer import MCOptionPricer
8 8
9 9
10 10 tc = client.TaskClient()
11 11 rc = client.MultiEngineClient()
12 12
13 13 # Initialize the common code on the engines
14 14 rc.run('mcpricer.py')
15 15
16 16 # Push the variables that won't change
17 17 #(stock print, interest rate, days and MC paths)
18 18 rc.push(dict(S=100.0, r=0.05, days=260, paths=10000))
19 19
20 20 task_string = """\
21 21 op = MCOptionPricer(S,K,sigma,r,days,paths)
22 22 op.run()
23 23 vp, ap, vc, ac = op.vanilla_put, op.asian_put, op.vanilla_call, op.asian_call
24 24 """
25 25
26 26 # Create arrays of strike prices and volatilities
27 27 K_vals = N.linspace(90.0,100.0,5)
28 28 sigma_vals = N.linspace(0.0, 0.2,5)
29 29
30 30 # Submit tasks
31 31 taskids = []
32 32 for K in K_vals:
33 33 for sigma in sigma_vals:
34 t = client.Task(task_string,
34 t = client.StringTask(task_string,
35 35 push=dict(sigma=sigma,K=K),
36 36 pull=('vp','ap','vc','ac','sigma','K'))
37 37 taskids.append(tc.run(t))
38 38
39 39 print "Submitted tasks: ", taskids
40 40
41 41 # Block until tasks are completed
42 42 tc.barrier(taskids)
43 43
44 44 # Get the results
45 45 results = [tc.get_task_result(tid) for tid in taskids]
46 46
47 47 # Assemble the result
48 48 vc = N.empty(K_vals.shape[0]*sigma_vals.shape[0],dtype='float64')
49 49 vp = N.empty(K_vals.shape[0]*sigma_vals.shape[0],dtype='float64')
50 50 ac = N.empty(K_vals.shape[0]*sigma_vals.shape[0],dtype='float64')
51 51 ap = N.empty(K_vals.shape[0]*sigma_vals.shape[0],dtype='float64')
52 52 for i, tr in enumerate(results):
53 53 ns = tr.ns
54 54 vc[i] = ns.vc
55 55 vp[i] = ns.vp
56 56 ac[i] = ns.ac
57 57 ap[i] = ns.ap
58 58 vc.shape = (K_vals.shape[0],sigma_vals.shape[0])
59 59 vp.shape = (K_vals.shape[0],sigma_vals.shape[0])
60 60 ac.shape = (K_vals.shape[0],sigma_vals.shape[0])
61 61 ap.shape = (K_vals.shape[0],sigma_vals.shape[0])
62 62
63 63
64 64 def plot_options(K_vals, sigma_vals, prices):
65 65 """Make a contour plot of the option prices."""
66 66 import pylab
67 67 pylab.contourf(sigma_vals, K_vals, prices)
68 68 pylab.colorbar()
69 69 pylab.title("Option Price")
70 70 pylab.xlabel("Volatility")
71 71 pylab.ylabel("Strike Price")
@@ -1,18 +1,18 b''
1 1 from IPython.kernel import client
2 2
3 3 tc = client.TaskClient()
4 4 rc = client.MultiEngineClient()
5 5
6 6 rc.push(dict(d=30))
7 7
8 8 cmd1 = """\
9 9 a = 5
10 10 b = 10*d
11 11 c = a*b*d
12 12 """
13 13
14 t1 = client.Task(cmd1, clear_before=False, clear_after=True, pull=['a','b','c'])
14 t1 = client.StringTask(cmd1, clear_before=False, clear_after=True, pull=['a','b','c'])
15 15 tid1 = tc.run(t1)
16 16 tr1 = tc.get_task_result(tid1,block=True)
17 tr1.raiseException()
17 tr1.raise_exception()
18 18 print "a, b: ", tr1.ns.a, tr1.ns.b No newline at end of file
@@ -1,44 +1,44 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 from IPython.kernel import client
5 5 import time
6 6
7 7 tc = client.TaskClient()
8 8 mec = client.MultiEngineClient()
9 9
10 10 mec.execute('import time')
11 11
12 12 for i in range(24):
13 tc.irun('time.sleep(1)')
13 tc.run(client.StringTask('time.sleep(1)'))
14 14
15 15 for i in range(6):
16 16 time.sleep(1.0)
17 17 print "Queue status (vebose=False)"
18 18 print tc.queue_status()
19 19
20 20 for i in range(24):
21 tc.irun('time.sleep(1)')
21 tc.run(client.StringTask('time.sleep(1)'))
22 22
23 23 for i in range(6):
24 24 time.sleep(1.0)
25 25 print "Queue status (vebose=True)"
26 26 print tc.queue_status(True)
27 27
28 28 for i in range(12):
29 tc.irun('time.sleep(2)')
29 tc.run(client.StringTask('time.sleep(2)'))
30 30
31 31 print "Queue status (vebose=True)"
32 32 print tc.queue_status(True)
33 33
34 34 qs = tc.queue_status(True)
35 35 sched = qs['scheduled']
36 36
37 37 for tid in sched[-4:]:
38 38 tc.abort(tid)
39 39
40 40 for i in range(6):
41 41 time.sleep(1.0)
42 42 print "Queue status (vebose=True)"
43 43 print tc.queue_status(True)
44 44
@@ -1,77 +1,77 b''
1 1 #!/usr/bin/env python
2 2 """Test the performance of the task farming system.
3 3
4 4 This script submits a set of tasks to the TaskClient. The tasks
5 5 are basically just a time.sleep(t), where t is a random number between
6 6 two limits that can be configured at the command line. To run
7 7 the script there must first be an IPython controller and engines running::
8 8
9 9 ipcluster -n 16
10 10
11 11 A good test to run with 16 engines is::
12 12
13 13 python task_profiler.py -n 128 -t 0.01 -T 1.0
14 14
15 15 This should show a speedup of 13-14x. The limitation here is that the
16 16 overhead of a single task is about 0.001-0.01 seconds.
17 17 """
18 18 import random, sys
19 19 from optparse import OptionParser
20 20
21 21 from IPython.genutils import time
22 22 from IPython.kernel import client
23 23
24 24 def main():
25 25 parser = OptionParser()
26 26 parser.set_defaults(n=100)
27 27 parser.set_defaults(tmin=1)
28 28 parser.set_defaults(tmax=60)
29 29 parser.set_defaults(controller='localhost')
30 30 parser.set_defaults(meport=10105)
31 31 parser.set_defaults(tport=10113)
32 32
33 33 parser.add_option("-n", type='int', dest='n',
34 34 help='the number of tasks to run')
35 35 parser.add_option("-t", type='float', dest='tmin',
36 36 help='the minimum task length in seconds')
37 37 parser.add_option("-T", type='float', dest='tmax',
38 38 help='the maximum task length in seconds')
39 39 parser.add_option("-c", type='string', dest='controller',
40 40 help='the address of the controller')
41 41 parser.add_option("-p", type='int', dest='meport',
42 42 help="the port on which the controller listens for the MultiEngine/RemoteController client")
43 43 parser.add_option("-P", type='int', dest='tport',
44 44 help="the port on which the controller listens for the TaskClient client")
45 45
46 46 (opts, args) = parser.parse_args()
47 47 assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"
48 48
49 49 rc = client.MultiEngineClient()
50 50 tc = client.TaskClient()
51 51 print tc.task_controller
52 52 rc.block=True
53 53 nengines = len(rc.get_ids())
54 54 rc.execute('from IPython.genutils import time')
55 55
56 56 # the jobs should take a random time within a range
57 57 times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)]
58 tasks = [client.Task("time.sleep(%f)"%t) for t in times]
58 tasks = [client.StringTask("time.sleep(%f)"%t) for t in times]
59 59 stime = sum(times)
60 60
61 61 print "executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines)
62 62 time.sleep(1)
63 63 start = time.time()
64 64 taskids = [tc.run(t) for t in tasks]
65 65 tc.barrier(taskids)
66 66 stop = time.time()
67 67
68 68 ptime = stop-start
69 69 scale = stime/ptime
70 70
71 71 print "executed %.1f secs in %.1f secs"%(stime, ptime)
72 72 print "%.3fx parallel performance on %i engines"%(scale, nengines)
73 73 print "%.1f%% of theoretical max"%(100*scale/nengines)
74 74
75 75
76 76 if __name__ == '__main__':
77 77 main()
@@ -1,173 +1,195 b''
1 1 .. _changes:
2 2
3 3 ==========
4 4 What's new
5 5 ==========
6 6
7 7 .. contents::
8 8
9 9 Release 0.9
10 10 ===========
11 11
12 12 New features
13 13 ------------
14 14
15 * The notion of a task has been completely reworked. An `ITask` interface has
16 been created. This interface defines the methods that tasks need to implement.
17 These methods are now responsible for things like submitting tasks and processing
18 results. There are two basic task types: :class:`IPython.kernel.task.StringTask`
19 (this is the old `Task` object, but renamed) and the new
20 :class:`IPython.kernel.task.MapTask`, which is based on a function.
21 * A new interface, :class:`IPython.kernel.mapper.IMapper` has been defined to
22 standardize the idea of a `map` method. This interface has a single
23 `map` method that has the same syntax as the built-in `map`. We have also defined
24 a `mapper` factory interface that creates objects that implement
25 :class:`IPython.kernel.mapper.IMapper` for different controllers. Both
26 the multiengine and task controller now have mapping capabilties.
27 * The parallel function capabilities have been reworks. The major changes are that
28 i) there is now an `@parallel` magic that creates parallel functions, ii)
29 the syntax for mulitple variable follows that of `map`, iii) both the
30 multiengine and task controller now have a parallel function implementation.
15 31 * All of the parallel computing capabilities from `ipython1-dev` have been merged into
16 32 IPython proper. This resulted in the following new subpackages:
17 33 :mod:`IPython.kernel`, :mod:`IPython.kernel.core`, :mod:`IPython.config`,
18 34 :mod:`IPython.tools` and :mod:`IPython.testing`.
19 35 * As part of merging in the `ipython1-dev` stuff, the `setup.py` script and friends
20 36 have been completely refactored. Now we are checking for dependencies using
21 37 the approach that matplotlib uses.
22 38 * The documentation has been completely reorganized to accept the documentation
23 39 from `ipython1-dev`.
24 40 * We have switched to using Foolscap for all of our network protocols in
25 41 :mod:`IPython.kernel`. This gives us secure connections that are both encrypted
26 42 and authenticated.
27 43 * We have a brand new `COPYING.txt` files that describes the IPython license
28 44 and copyright. The biggest change is that we are putting "The IPython
29 45 Development Team" as the copyright holder. We give more details about exactly
30 46 what this means in this file. All developer should read this and use the new
31 47 banner in all IPython source code files.
32 48 * sh profile: ./foo runs foo as system command, no need to do !./foo anymore
33 49 * String lists now support 'sort(field, nums = True)' method (to easily
34 50 sort system command output). Try it with 'a = !ls -l ; a.sort(1, nums=1)'
35 51 * '%cpaste foo' now assigns the pasted block as string list, instead of string
36 52 * The ipcluster script now run by default with no security. This is done because
37 53 the main usage of the script is for starting things on localhost. Eventually
38 54 when ipcluster is able to start things on other hosts, we will put security
39 55 back.
40 56
41
42
43 57 Bug fixes
44 58 ---------
45 59
60 * The colors escapes in the multiengine client are now turned off on win32 as they
61 don't print correctly.
46 62 * The :mod:`IPython.kernel.scripts.ipengine` script was exec'ing mpi_import_statement
47 63 incorrectly, which was leading the engine to crash when mpi was enabled.
48 64 * A few subpackages has missing `__init__.py` files.
49 65 * The documentation is only created is Sphinx is found. Previously, the `setup.py`
50 66 script would fail if it was missing.
51 67
52 68 Backwards incompatible changes
53 69 ------------------------------
54 70
71 * :class:`IPython.kernel.client.Task` has been renamed
72 :class:`IPython.kernel.client.StringTask` to make way for new task types.
73 * The keyword argument `style` has been renamed `dist` in `scatter`, `gather`
74 and `map`.
75 * Renamed the values that the rename `dist` keyword argument can have from
76 `'basic'` to `'b'`.
55 77 * IPython has a larger set of dependencies if you want all of its capabilities.
56 78 See the `setup.py` script for details.
57 79 * The constructors for :class:`IPython.kernel.client.MultiEngineClient` and
58 80 :class:`IPython.kernel.client.TaskClient` no longer take the (ip,port) tuple.
59 81 Instead they take the filename of a file that contains the FURL for that
60 82 client. If the FURL file is in your IPYTHONDIR, it will be found automatically
61 83 and the constructor can be left empty.
62 84 * The asynchronous clients in :mod:`IPython.kernel.asyncclient` are now created
63 85 using the factory functions :func:`get_multiengine_client` and
64 86 :func:`get_task_client`. These return a `Deferred` to the actual client.
65 87 * The command line options to `ipcontroller` and `ipengine` have changed to
66 88 reflect the new Foolscap network protocol and the FURL files. Please see the
67 89 help for these scripts for details.
68 90 * The configuration files for the kernel have changed because of the Foolscap stuff.
69 91 If you were using custom config files before, you should delete them and regenerate
70 92 new ones.
71 93
72 94 Changes merged in from IPython1
73 95 -------------------------------
74 96
75 97 New features
76 98 ............
77 99
78 100 * Much improved ``setup.py`` and ``setupegg.py`` scripts. Because Twisted
79 101 and zope.interface are now easy installable, we can declare them as dependencies
80 102 in our setupegg.py script.
81 103 * IPython is now compatible with Twisted 2.5.0 and 8.x.
82 104 * Added a new example of how to use :mod:`ipython1.kernel.asynclient`.
83 105 * Initial draft of a process daemon in :mod:`ipython1.daemon`. This has not
84 106 been merged into IPython and is still in `ipython1-dev`.
85 107 * The ``TaskController`` now has methods for getting the queue status.
86 108 * The ``TaskResult`` objects not have information about how long the task
87 109 took to run.
88 110 * We are attaching additional attributes to exceptions ``(_ipython_*)`` that
89 111 we use to carry additional info around.
90 112 * New top-level module :mod:`asyncclient` that has asynchronous versions (that
91 113 return deferreds) of the client classes. This is designed to users who want
92 114 to run their own Twisted reactor
93 115 * All the clients in :mod:`client` are now based on Twisted. This is done by
94 116 running the Twisted reactor in a separate thread and using the
95 117 :func:`blockingCallFromThread` function that is in recent versions of Twisted.
96 118 * Functions can now be pushed/pulled to/from engines using
97 119 :meth:`MultiEngineClient.push_function` and :meth:`MultiEngineClient.pull_function`.
98 120 * Gather/scatter are now implemented in the client to reduce the work load
99 121 of the controller and improve performance.
100 122 * Complete rewrite of the IPython docuementation. All of the documentation
101 123 from the IPython website has been moved into docs/source as restructured
102 124 text documents. PDF and HTML documentation are being generated using
103 125 Sphinx.
104 126 * New developer oriented documentation: development guidelines and roadmap.
105 127 * Traditional ``ChangeLog`` has been changed to a more useful ``changes.txt`` file
106 128 that is organized by release and is meant to provide something more relevant
107 129 for users.
108 130
109 131 Bug fixes
110 132 .........
111 133
112 134 * Created a proper ``MANIFEST.in`` file to create source distributions.
113 135 * Fixed a bug in the ``MultiEngine`` interface. Previously, multi-engine
114 136 actions were being collected with a :class:`DeferredList` with
115 137 ``fireononeerrback=1``. This meant that methods were returning
116 138 before all engines had given their results. This was causing extremely odd
117 139 bugs in certain cases. To fix this problem, we have 1) set
118 140 ``fireononeerrback=0`` to make sure all results (or exceptions) are in
119 141 before returning and 2) introduced a :exc:`CompositeError` exception
120 142 that wraps all of the engine exceptions. This is a huge change as it means
121 143 that users will have to catch :exc:`CompositeError` rather than the actual
122 144 exception.
123 145
124 146 Backwards incompatible changes
125 147 ..............................
126 148
127 149 * All names have been renamed to conform to the lowercase_with_underscore
128 150 convention. This will require users to change references to all names like
129 151 ``queueStatus`` to ``queue_status``.
130 152 * Previously, methods like :meth:`MultiEngineClient.push` and
131 153 :meth:`MultiEngineClient.push` used ``*args`` and ``**kwargs``. This was
132 154 becoming a problem as we weren't able to introduce new keyword arguments into
133 155 the API. Now these methods simple take a dict or sequence. This has also allowed
134 156 us to get rid of the ``*All`` methods like :meth:`pushAll` and :meth:`pullAll`.
135 157 These things are now handled with the ``targets`` keyword argument that defaults
136 158 to ``'all'``.
137 159 * The :attr:`MultiEngineClient.magicTargets` has been renamed to
138 160 :attr:`MultiEngineClient.targets`.
139 161 * All methods in the MultiEngine interface now accept the optional keyword argument
140 162 ``block``.
141 163 * Renamed :class:`RemoteController` to :class:`MultiEngineClient` and
142 164 :class:`TaskController` to :class:`TaskClient`.
143 165 * Renamed the top-level module from :mod:`api` to :mod:`client`.
144 166 * Most methods in the multiengine interface now raise a :exc:`CompositeError` exception
145 167 that wraps the user's exceptions, rather than just raising the raw user's exception.
146 168 * Changed the ``setupNS`` and ``resultNames`` in the ``Task`` class to ``push``
147 169 and ``pull``.
148 170
149 171 Release 0.8.4
150 172 =============
151 173
152 174 Someone needs to describe what went into 0.8.4.
153 175
154 176 Release 0.8.2
155 177 =============
156 178
157 179 * %pushd/%popd behave differently; now "pushd /foo" pushes CURRENT directory
158 180 and jumps to /foo. The current behaviour is closer to the documented
159 181 behaviour, and should not trip anyone.
160 182
161 183 Release 0.8.3
162 184 =============
163 185
164 186 * pydb is now disabled by default (due to %run -d problems). You can enable
165 187 it by passing -pydb command line argument to IPython. Note that setting
166 188 it in config file won't work.
167 189
168 190 Older releases
169 191 ==============
170 192
171 193 Changes in earlier releases of IPython are described in the older file ``ChangeLog``.
172 194 Please refer to this document for details.
173 195
@@ -1,9 +1,11 b''
1 .. _install_index:
2
1 3 ==================
2 4 Installation
3 5 ==================
4 6
5 7 .. toctree::
6 8 :maxdepth: 2
7 9
8 10 basic.txt
9 11 advanced.txt
@@ -1,189 +1,174 b''
1 1 .. _overview:
2 2
3 3 ============
4 4 Introduction
5 5 ============
6 6
7 This is the official documentation for IPython 0.x series (i.e. what
8 we are used to refer to just as "IPython"). The original text of the
9 manual (most of which is still in place) has been authored by Fernando
10 Perez, but as recommended usage patterns and new features have
11 emerged, this manual has been updated to reflect that fact. Most of
12 the additions have been authored by Ville M. Vainio.
13
14 The manual has been generated from reStructuredText source markup with
15 Sphinx, which should make it much easier to keep it up-to-date in the
16 future. Some reST artifacts and bugs may still be apparent in the
17 documentation, but this should improve as the toolchain matures.
18
19 7 Overview
20 8 ========
21 9
22 10 One of Python's most useful features is its interactive interpreter.
23 11 This system allows very fast testing of ideas without the overhead of
24 12 creating test files as is typical in most programming languages.
25 13 However, the interpreter supplied with the standard Python distribution
26 14 is somewhat limited for extended interactive use.
27 15
28 IPython is a free software project (released under the BSD license)
29 which tries to:
16 The goal of IPython is to create a comprehensive environment for
17 interactive and exploratory computing. To support, this goal, IPython
18 has two main components:
19
20 * An enhanced interactive Python shell.
21 * An architecture for interactive parallel computing.
22
23 All of IPython is open source (released under the revised BSD license).
24
25 Enhanced interactive Python shell
26 =================================
27
28 IPython's interactive shell (`ipython`), has the following goals:
30 29
31 30 1. Provide an interactive shell superior to Python's default. IPython
32 31 has many features for object introspection, system shell access,
33 32 and its own special command system for adding functionality when
34 33 working interactively. It tries to be a very efficient environment
35 34 both for Python code development and for exploration of problems
36 35 using Python objects (in situations like data analysis).
37 36 2. Serve as an embeddable, ready to use interpreter for your own
38 37 programs. IPython can be started with a single call from inside
39 38 another program, providing access to the current namespace. This
40 39 can be very useful both for debugging purposes and for situations
41 40 where a blend of batch-processing and interactive exploration are
42 41 needed.
43 42 3. Offer a flexible framework which can be used as the base
44 43 environment for other systems with Python as the underlying
45 44 language. Specifically scientific environments like Mathematica,
46 45 IDL and Matlab inspired its design, but similar ideas can be
47 46 useful in many fields.
48 47 4. Allow interactive testing of threaded graphical toolkits. IPython
49 48 has support for interactive, non-blocking control of GTK, Qt and
50 49 WX applications via special threading flags. The normal Python
51 50 shell can only do this for Tkinter applications.
52 51
53
54 Main features
55 -------------
56
57 * Dynamic object introspection. One can access docstrings, function
58 definition prototypes, source code, source files and other details
59 of any object accessible to the interpreter with a single
60 keystroke ('?', and using '??' provides additional detail).
61 * Searching through modules and namespaces with '*' wildcards, both
62 when using the '?' system and via the %psearch command.
63 * Completion in the local namespace, by typing TAB at the prompt.
64 This works for keywords, modules, methods, variables and files in the
65 current directory. This is supported via the readline library, and
66 full access to configuring readline's behavior is provided.
67 Custom completers can be implemented easily for different purposes
68 (system commands, magic arguments etc.)
69 * Numbered input/output prompts with command history (persistent
70 across sessions and tied to each profile), full searching in this
71 history and caching of all input and output.
72 * User-extensible 'magic' commands. A set of commands prefixed with
73 % is available for controlling IPython itself and provides
74 directory control, namespace information and many aliases to
75 common system shell commands.
76 * Alias facility for defining your own system aliases.
77 * Complete system shell access. Lines starting with ! are passed
78 directly to the system shell, and using !! or var = !cmd
79 captures shell output into python variables for further use.
80 * Background execution of Python commands in a separate thread.
81 IPython has an internal job manager called jobs, and a
82 conveninence backgrounding magic function called %bg.
83 * The ability to expand python variables when calling the system
84 shell. In a shell command, any python variable prefixed with $ is
85 expanded. A double $$ allows passing a literal $ to the shell (for
86 access to shell and environment variables like $PATH).
87 * Filesystem navigation, via a magic %cd command, along with a
88 persistent bookmark system (using %bookmark) for fast access to
89 frequently visited directories.
90 * A lightweight persistence framework via the %store command, which
91 allows you to save arbitrary Python variables. These get restored
92 automatically when your session restarts.
93 * Automatic indentation (optional) of code as you type (through the
94 readline library).
95 * Macro system for quickly re-executing multiple lines of previous
96 input with a single name. Macros can be stored persistently via
97 %store and edited via %edit.
98 * Session logging (you can then later use these logs as code in your
99 programs). Logs can optionally timestamp all input, and also store
100 session output (marked as comments, so the log remains valid
101 Python source code).
102 * Session restoring: logs can be replayed to restore a previous
103 session to the state where you left it.
104 * Verbose and colored exception traceback printouts. Easier to parse
105 visually, and in verbose mode they produce a lot of useful
106 debugging information (basically a terminal version of the cgitb
107 module).
108 * Auto-parentheses: callable objects can be executed without
109 parentheses: 'sin 3' is automatically converted to 'sin(3)'.
110 * Auto-quoting: using ',' or ';' as the first character forces
111 auto-quoting of the rest of the line: ',my_function a b' becomes
112 automatically 'my_function("a","b")', while ';my_function a b'
113 becomes 'my_function("a b")'.
114 * Extensible input syntax. You can define filters that pre-process
115 user input to simplify input in special situations. This allows
116 for example pasting multi-line code fragments which start with
117 '>>>' or '...' such as those from other python sessions or the
118 standard Python documentation.
119 * Flexible configuration system. It uses a configuration file which
120 allows permanent setting of all command-line options, module
121 loading, code and file execution. The system allows recursive file
122 inclusion, so you can have a base file with defaults and layers
123 which load other customizations for particular projects.
124 * Embeddable. You can call IPython as a python shell inside your own
125 python programs. This can be used both for debugging code or for
126 providing interactive abilities to your programs with knowledge
127 about the local namespaces (very useful in debugging and data
128 analysis situations).
129 * Easy debugger access. You can set IPython to call up an enhanced
130 version of the Python debugger (pdb) every time there is an
131 uncaught exception. This drops you inside the code which triggered
132 the exception with all the data live and it is possible to
133 navigate the stack to rapidly isolate the source of a bug. The
134 %run magic command -with the -d option- can run any script under
135 pdb's control, automatically setting initial breakpoints for you.
136 This version of pdb has IPython-specific improvements, including
137 tab-completion and traceback coloring support. For even easier
138 debugger access, try %debug after seeing an exception. winpdb is
139 also supported, see ipy_winpdb extension.
140 * Profiler support. You can run single statements (similar to
141 profile.run()) or complete programs under the profiler's control.
142 While this is possible with standard cProfile or profile modules,
143 IPython wraps this functionality with magic commands (see '%prun'
144 and '%run -p') convenient for rapid interactive work.
145 * Doctest support. The special %doctest_mode command toggles a mode
146 that allows you to paste existing doctests (with leading '>>>'
147 prompts and whitespace) and uses doctest-compatible prompts and
148 output, so you can use IPython sessions as doctest code.
149
52 Main features of the interactive shell
53 --------------------------------------
54
55 * Dynamic object introspection. One can access docstrings, function
56 definition prototypes, source code, source files and other details
57 of any object accessible to the interpreter with a single
58 keystroke (:samp:`?`, and using :samp:`??` provides additional detail).
59 * Searching through modules and namespaces with :samp:`*` wildcards, both
60 when using the :samp:`?` system and via the :samp:`%psearch` command.
61 * Completion in the local namespace, by typing :kbd:`TAB` at the prompt.
62 This works for keywords, modules, methods, variables and files in the
63 current directory. This is supported via the readline library, and
64 full access to configuring readline's behavior is provided.
65 Custom completers can be implemented easily for different purposes
66 (system commands, magic arguments etc.)
67 * Numbered input/output prompts with command history (persistent
68 across sessions and tied to each profile), full searching in this
69 history and caching of all input and output.
70 * User-extensible 'magic' commands. A set of commands prefixed with
71 :samp:`%` is available for controlling IPython itself and provides
72 directory control, namespace information and many aliases to
73 common system shell commands.
74 * Alias facility for defining your own system aliases.
75 * Complete system shell access. Lines starting with :samp:`!` are passed
76 directly to the system shell, and using :samp:`!!` or :samp:`var = !cmd`
77 captures shell output into python variables for further use.
78 * Background execution of Python commands in a separate thread.
79 IPython has an internal job manager called jobs, and a
80 conveninence backgrounding magic function called :samp:`%bg`.
81 * The ability to expand python variables when calling the system
82 shell. In a shell command, any python variable prefixed with :samp:`$` is
83 expanded. A double :samp:`$$` allows passing a literal :samp:`$` to the shell (for
84 access to shell and environment variables like :envvar:`PATH`).
85 * Filesystem navigation, via a magic :samp:`%cd` command, along with a
86 persistent bookmark system (using :samp:`%bookmark`) for fast access to
87 frequently visited directories.
88 * A lightweight persistence framework via the :samp:`%store` command, which
89 allows you to save arbitrary Python variables. These get restored
90 automatically when your session restarts.
91 * Automatic indentation (optional) of code as you type (through the
92 readline library).
93 * Macro system for quickly re-executing multiple lines of previous
94 input with a single name. Macros can be stored persistently via
95 :samp:`%store` and edited via :samp:`%edit`.
96 * Session logging (you can then later use these logs as code in your
97 programs). Logs can optionally timestamp all input, and also store
98 session output (marked as comments, so the log remains valid
99 Python source code).
100 * Session restoring: logs can be replayed to restore a previous
101 session to the state where you left it.
102 * Verbose and colored exception traceback printouts. Easier to parse
103 visually, and in verbose mode they produce a lot of useful
104 debugging information (basically a terminal version of the cgitb
105 module).
106 * Auto-parentheses: callable objects can be executed without
107 parentheses: :samp:`sin 3` is automatically converted to :samp:`sin(3)`.
108 * Auto-quoting: using :samp:`,`, or :samp:`;` as the first character forces
109 auto-quoting of the rest of the line: :samp:`,my_function a b` becomes
110 automatically :samp:`my_function("a","b")`, while :samp:`;my_function a b`
111 becomes :samp:`my_function("a b")`.
112 * Extensible input syntax. You can define filters that pre-process
113 user input to simplify input in special situations. This allows
114 for example pasting multi-line code fragments which start with
115 :samp:`>>>` or :samp:`...` such as those from other python sessions or the
116 standard Python documentation.
117 * Flexible configuration system. It uses a configuration file which
118 allows permanent setting of all command-line options, module
119 loading, code and file execution. The system allows recursive file
120 inclusion, so you can have a base file with defaults and layers
121 which load other customizations for particular projects.
122 * Embeddable. You can call IPython as a python shell inside your own
123 python programs. This can be used both for debugging code or for
124 providing interactive abilities to your programs with knowledge
125 about the local namespaces (very useful in debugging and data
126 analysis situations).
127 * Easy debugger access. You can set IPython to call up an enhanced
128 version of the Python debugger (pdb) every time there is an
129 uncaught exception. This drops you inside the code which triggered
130 the exception with all the data live and it is possible to
131 navigate the stack to rapidly isolate the source of a bug. The
132 :samp:`%run` magic command (with the :samp:`-d` option) can run any script under
133 pdb's control, automatically setting initial breakpoints for you.
134 This version of pdb has IPython-specific improvements, including
135 tab-completion and traceback coloring support. For even easier
136 debugger access, try :samp:`%debug` after seeing an exception. winpdb is
137 also supported, see ipy_winpdb extension.
138 * Profiler support. You can run single statements (similar to
139 :samp:`profile.run()`) or complete programs under the profiler's control.
140 While this is possible with standard cProfile or profile modules,
141 IPython wraps this functionality with magic commands (see :samp:`%prun`
142 and :samp:`%run -p`) convenient for rapid interactive work.
143 * Doctest support. The special :samp:`%doctest_mode` command toggles a mode
144 that allows you to paste existing doctests (with leading :samp:`>>>`
145 prompts and whitespace) and uses doctest-compatible prompts and
146 output, so you can use IPython sessions as doctest code.
147
148 Interactive parallel computing
149 ==============================
150
151 Increasingly, parallel computer hardware, such as multicore CPUs, clusters and supercomputers, is becoming ubiquitous. Over the last 3 years, we have developed an
152 architecture within IPython that allows such hardware to be used quickly and easily
153 from Python. Moreover, this architecture is designed to support interactive and
154 collaborative parallel computing.
155
156 For more information, see our :ref:`overview <parallel_index>` of using IPython for
157 parallel computing.
150 158
151 159 Portability and Python requirements
152 160 -----------------------------------
153 161
154 Python requirements: IPython requires with Python version 2.3 or newer.
155 If you are still using Python 2.2 and can not upgrade, the last version
156 of IPython which worked with Python 2.2 was 0.6.15, so you will have to
157 use that.
158
159 IPython is developed under Linux, but it should work in any reasonable
160 Unix-type system (tested OK under Solaris and the BSD family, for which
161 a port exists thanks to Dryice Liu).
162
163 Mac OS X: it works, apparently without any problems (thanks to Jim Boyle
164 at Lawrence Livermore for the information). Thanks to Andrea Riciputi,
165 Fink support is available.
166
167 CygWin: it works mostly OK, though some users have reported problems
168 with prompt coloring. No satisfactory solution to this has been found so
169 far, you may want to disable colors permanently in the ipythonrc
170 configuration file if you experience problems. If you have proper color
171 support under cygwin, please post to the IPython mailing list so this
172 issue can be resolved for all users.
173
174 Windows: it works well under Windows Vista/XP/2k, and I suspect NT should
175 behave similarly. Section "Installation under windows" describes
176 installation details for Windows, including some additional tools needed
177 on this platform.
178
179 Windows 9x support is present, and has been reported to work fine (at
180 least on WinME).
181
182 Location
183 --------
184
185 IPython is generously hosted at http://ipython.scipy.org by the
186 Enthought, Inc and the SciPy project. This site offers downloads,
187 subversion access, mailing lists and a bug tracking system. I am very
188 grateful to Enthought (http://www.enthought.com) and all of the SciPy
189 team for their contribution. No newline at end of file
162 As of the 0.9 release, IPython requires Python 2.4 or greater. We have
163 not begun to test IPython on Python 2.6 or 3.0, but we expect it will
164 work with some minor changes.
165
166 IPython is known to work on the following operating systems:
167
168 * Linux
169 * AIX
170 * Most other Unix-like OSs (Solaris, BSD, etc.)
171 * Mac OS X
172 * Windows (CygWin, XP, Vista, etc.)
173
174 See :ref:`here <install_index>` for instructions on how to install IPython. No newline at end of file
@@ -1,15 +1,17 b''
1 .. _parallel_index:
2
1 3 ====================================
2 4 Using IPython for Parallel computing
3 5 ====================================
4 6
5 7 User Documentation
6 8 ==================
7 9
8 10 .. toctree::
9 11 :maxdepth: 2
10 12
11 13 parallel_intro.txt
12 14 parallel_multiengine.txt
13 15 parallel_task.txt
14 16 parallel_mpi.txt
15 17
1 NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now