##// END OF EJS Templates
merge from trunk
Barry Wark -
r1441:d4c5ae87 merge
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,233 b''
1 # encoding: utf-8
2
3 """A parallelized version of Python's builtin map."""
4
5 __docformat__ = "restructuredtext en"
6
7 #----------------------------------------------------------------------------
8 # Copyright (C) 2008 The IPython Development Team
9 #
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
12 #----------------------------------------------------------------------------
13
14 #----------------------------------------------------------------------------
15 # Imports
16 #----------------------------------------------------------------------------
17
18 from types import FunctionType
19 from zope.interface import Interface, implements
20 from IPython.kernel.task import MapTask
21 from IPython.kernel.twistedutil import DeferredList, gatherBoth
22 from IPython.kernel.util import printer
23 from IPython.kernel.error import collect_exceptions
24
25 #----------------------------------------------------------------------------
26 # Code
27 #----------------------------------------------------------------------------
28
29 class IMapper(Interface):
30 """The basic interface for a Mapper.
31
32 This defines a generic interface for mapping. The idea of this is
33 similar to that of Python's builtin `map` function, which applies a function
34 elementwise to a sequence.
35 """
36
37 def map(func, *seqs):
38 """Do map in parallel.
39
40 Equivalent to map(func, *seqs) or:
41
42 [func(seqs[0][0], seqs[1][0],...), func(seqs[0][1], seqs[1][1],...),...]
43
44 :Parameters:
45 func : FunctionType
46 The function to apply to the sequence
47 sequences : tuple of iterables
48 A sequence of iterables that are used for sucessive function
49 arguments. This work just like map
50 """
51
52 class IMultiEngineMapperFactory(Interface):
53 """
54 An interface for something that creates `IMapper` instances.
55 """
56
57 def mapper(dist='b', targets='all', block=True):
58 """
59 Create an `IMapper` implementer with a given set of arguments.
60
61 The `IMapper` created using a multiengine controller is
62 not load balanced.
63 """
64
65 class ITaskMapperFactory(Interface):
66 """
67 An interface for something that creates `IMapper` instances.
68 """
69
70 def mapper(clear_before=False, clear_after=False, retries=0,
71 recovery_task=None, depend=None, block=True):
72 """
73 Create an `IMapper` implementer with a given set of arguments.
74
75 The `IMapper` created using a task controller is load balanced.
76
77 See the documentation for `IPython.kernel.task.BaseTask` for
78 documentation on the arguments to this method.
79 """
80
81
82 class MultiEngineMapper(object):
83 """
84 A Mapper for `IMultiEngine` implementers.
85 """
86
87 implements(IMapper)
88
89 def __init__(self, multiengine, dist='b', targets='all', block=True):
90 """
91 Create a Mapper for a multiengine.
92
93 The value of all arguments are used for all calls to `map`. This
94 class allows these arguemnts to be set for a series of map calls.
95
96 :Parameters:
97 multiengine : `IMultiEngine` implementer
98 The multiengine to use for running the map commands
99 dist : str
100 The type of decomposition to use. Only block ('b') is
101 supported currently
102 targets : (str, int, tuple of ints)
103 The engines to use in the map
104 block : boolean
105 Whether to block when the map is applied
106 """
107 self.multiengine = multiengine
108 self.dist = dist
109 self.targets = targets
110 self.block = block
111
112 def map(self, func, *sequences):
113 """
114 Apply func to *sequences elementwise. Like Python's builtin map.
115
116 This version is not load balanced.
117 """
118 max_len = max(len(s) for s in sequences)
119 for s in sequences:
120 if len(s)!=max_len:
121 raise ValueError('all sequences must have equal length')
122 assert isinstance(func, (str, FunctionType)), "func must be a fuction or str"
123 return self.multiengine.raw_map(func, sequences, dist=self.dist,
124 targets=self.targets, block=self.block)
125
126 class TaskMapper(object):
127 """
128 Make an `ITaskController` look like an `IMapper`.
129
130 This class provides a load balanced version of `map`.
131 """
132
133 def __init__(self, task_controller, clear_before=False, clear_after=False, retries=0,
134 recovery_task=None, depend=None, block=True):
135 """
136 Create a `IMapper` given a `TaskController` and arguments.
137
138 The additional arguments are those that are common to all types of
139 tasks and are described in the documentation for
140 `IPython.kernel.task.BaseTask`.
141
142 :Parameters:
143 task_controller : an `IBlockingTaskClient` implementer
144 The `TaskController` to use for calls to `map`
145 """
146 self.task_controller = task_controller
147 self.clear_before = clear_before
148 self.clear_after = clear_after
149 self.retries = retries
150 self.recovery_task = recovery_task
151 self.depend = depend
152 self.block = block
153
154 def map(self, func, *sequences):
155 """
156 Apply func to *sequences elementwise. Like Python's builtin map.
157
158 This version is load balanced.
159 """
160 max_len = max(len(s) for s in sequences)
161 for s in sequences:
162 if len(s)!=max_len:
163 raise ValueError('all sequences must have equal length')
164 task_args = zip(*sequences)
165 task_ids = []
166 dlist = []
167 for ta in task_args:
168 task = MapTask(func, ta, clear_before=self.clear_before,
169 clear_after=self.clear_after, retries=self.retries,
170 recovery_task=self.recovery_task, depend=self.depend)
171 dlist.append(self.task_controller.run(task))
172 dlist = gatherBoth(dlist, consumeErrors=1)
173 dlist.addCallback(collect_exceptions,'map')
174 if self.block:
175 def get_results(task_ids):
176 d = self.task_controller.barrier(task_ids)
177 d.addCallback(lambda _: gatherBoth([self.task_controller.get_task_result(tid) for tid in task_ids], consumeErrors=1))
178 d.addCallback(collect_exceptions, 'map')
179 return d
180 dlist.addCallback(get_results)
181 return dlist
182
183 class SynchronousTaskMapper(object):
184 """
185 Make an `IBlockingTaskClient` look like an `IMapper`.
186
187 This class provides a load balanced version of `map`.
188 """
189
190 def __init__(self, task_controller, clear_before=False, clear_after=False, retries=0,
191 recovery_task=None, depend=None, block=True):
192 """
193 Create a `IMapper` given a `IBlockingTaskClient` and arguments.
194
195 The additional arguments are those that are common to all types of
196 tasks and are described in the documentation for
197 `IPython.kernel.task.BaseTask`.
198
199 :Parameters:
200 task_controller : an `IBlockingTaskClient` implementer
201 The `TaskController` to use for calls to `map`
202 """
203 self.task_controller = task_controller
204 self.clear_before = clear_before
205 self.clear_after = clear_after
206 self.retries = retries
207 self.recovery_task = recovery_task
208 self.depend = depend
209 self.block = block
210
211 def map(self, func, *sequences):
212 """
213 Apply func to *sequences elementwise. Like Python's builtin map.
214
215 This version is load balanced.
216 """
217 max_len = max(len(s) for s in sequences)
218 for s in sequences:
219 if len(s)!=max_len:
220 raise ValueError('all sequences must have equal length')
221 task_args = zip(*sequences)
222 task_ids = []
223 for ta in task_args:
224 task = MapTask(func, ta, clear_before=self.clear_before,
225 clear_after=self.clear_after, retries=self.retries,
226 recovery_task=self.recovery_task, depend=self.depend)
227 task_ids.append(self.task_controller.run(task))
228 if self.block:
229 self.task_controller.barrier(task_ids)
230 task_results = [self.task_controller.get_task_result(tid) for tid in task_ids]
231 return task_results
232 else:
233 return task_ids No newline at end of file
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100755
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100755
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644, binary diff hidden
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,3319 +1,3318 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Magic functions for InteractiveShell.
3 3
4 4 $Id: Magic.py 2996 2008-01-30 06:31:39Z fperez $"""
5 5
6 6 #*****************************************************************************
7 7 # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
8 8 # Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #*****************************************************************************
13 13
14 14 #****************************************************************************
15 15 # Modules and globals
16 16
17 17 from IPython import Release
18 18 __author__ = '%s <%s>\n%s <%s>' % \
19 19 ( Release.authors['Janko'] + Release.authors['Fernando'] )
20 20 __license__ = Release.license
21 21
22 22 # Python standard modules
23 23 import __builtin__
24 24 import bdb
25 25 import inspect
26 26 import os
27 27 import pdb
28 28 import pydoc
29 29 import sys
30 30 import re
31 31 import tempfile
32 32 import time
33 33 import cPickle as pickle
34 34 import textwrap
35 35 from cStringIO import StringIO
36 36 from getopt import getopt,GetoptError
37 37 from pprint import pprint, pformat
38 38 from sets import Set
39 39
40 40 # cProfile was added in Python2.5
41 41 try:
42 42 import cProfile as profile
43 43 import pstats
44 44 except ImportError:
45 45 # profile isn't bundled by default in Debian for license reasons
46 46 try:
47 47 import profile,pstats
48 48 except ImportError:
49 49 profile = pstats = None
50 50
51 51 # Homebrewed
52 52 import IPython
53 53 from IPython import Debugger, OInspect, wildcard
54 54 from IPython.FakeModule import FakeModule
55 55 from IPython.Itpl import Itpl, itpl, printpl,itplns
56 56 from IPython.PyColorize import Parser
57 57 from IPython.ipstruct import Struct
58 58 from IPython.macro import Macro
59 59 from IPython.genutils import *
60 60 from IPython import platutils
61 61 import IPython.generics
62 62 import IPython.ipapi
63 63 from IPython.ipapi import UsageError
64 64 #***************************************************************************
65 65 # Utility functions
66 66 def on_off(tag):
67 67 """Return an ON/OFF string for a 1/0 input. Simple utility function."""
68 68 return ['OFF','ON'][tag]
69 69
70 70 class Bunch: pass
71 71
72 72 def compress_dhist(dh):
73 73 head, tail = dh[:-10], dh[-10:]
74 74
75 75 newhead = []
76 76 done = Set()
77 77 for h in head:
78 78 if h in done:
79 79 continue
80 80 newhead.append(h)
81 81 done.add(h)
82 82
83 83 return newhead + tail
84 84
85 85
86 86 #***************************************************************************
87 87 # Main class implementing Magic functionality
88 88 class Magic:
89 89 """Magic functions for InteractiveShell.
90 90
91 91 Shell functions which can be reached as %function_name. All magic
92 92 functions should accept a string, which they can parse for their own
93 93 needs. This can make some functions easier to type, eg `%cd ../`
94 94 vs. `%cd("../")`
95 95
96 96 ALL definitions MUST begin with the prefix magic_. The user won't need it
97 97 at the command line, but it is is needed in the definition. """
98 98
99 99 # class globals
100 100 auto_status = ['Automagic is OFF, % prefix IS needed for magic functions.',
101 101 'Automagic is ON, % prefix NOT needed for magic functions.']
102 102
103 103 #......................................................................
104 104 # some utility functions
105 105
106 106 def __init__(self,shell):
107 107
108 108 self.options_table = {}
109 109 if profile is None:
110 110 self.magic_prun = self.profile_missing_notice
111 111 self.shell = shell
112 112
113 113 # namespace for holding state we may need
114 114 self._magic_state = Bunch()
115 115
116 116 def profile_missing_notice(self, *args, **kwargs):
117 117 error("""\
118 118 The profile module could not be found. It has been removed from the standard
119 119 python packages because of its non-free license. To use profiling, install the
120 120 python-profiler package from non-free.""")
121 121
122 122 def default_option(self,fn,optstr):
123 123 """Make an entry in the options_table for fn, with value optstr"""
124 124
125 125 if fn not in self.lsmagic():
126 126 error("%s is not a magic function" % fn)
127 127 self.options_table[fn] = optstr
128 128
129 129 def lsmagic(self):
130 130 """Return a list of currently available magic functions.
131 131
132 132 Gives a list of the bare names after mangling (['ls','cd', ...], not
133 133 ['magic_ls','magic_cd',...]"""
134 134
135 135 # FIXME. This needs a cleanup, in the way the magics list is built.
136 136
137 137 # magics in class definition
138 138 class_magic = lambda fn: fn.startswith('magic_') and \
139 139 callable(Magic.__dict__[fn])
140 140 # in instance namespace (run-time user additions)
141 141 inst_magic = lambda fn: fn.startswith('magic_') and \
142 142 callable(self.__dict__[fn])
143 143 # and bound magics by user (so they can access self):
144 144 inst_bound_magic = lambda fn: fn.startswith('magic_') and \
145 145 callable(self.__class__.__dict__[fn])
146 146 magics = filter(class_magic,Magic.__dict__.keys()) + \
147 147 filter(inst_magic,self.__dict__.keys()) + \
148 148 filter(inst_bound_magic,self.__class__.__dict__.keys())
149 149 out = []
150 150 for fn in Set(magics):
151 151 out.append(fn.replace('magic_','',1))
152 152 out.sort()
153 153 return out
154 154
155 155 def extract_input_slices(self,slices,raw=False):
156 156 """Return as a string a set of input history slices.
157 157
158 158 Inputs:
159 159
160 160 - slices: the set of slices is given as a list of strings (like
161 161 ['1','4:8','9'], since this function is for use by magic functions
162 162 which get their arguments as strings.
163 163
164 164 Optional inputs:
165 165
166 166 - raw(False): by default, the processed input is used. If this is
167 167 true, the raw input history is used instead.
168 168
169 169 Note that slices can be called with two notations:
170 170
171 171 N:M -> standard python form, means including items N...(M-1).
172 172
173 173 N-M -> include items N..M (closed endpoint)."""
174 174
175 175 if raw:
176 176 hist = self.shell.input_hist_raw
177 177 else:
178 178 hist = self.shell.input_hist
179 179
180 180 cmds = []
181 181 for chunk in slices:
182 182 if ':' in chunk:
183 183 ini,fin = map(int,chunk.split(':'))
184 184 elif '-' in chunk:
185 185 ini,fin = map(int,chunk.split('-'))
186 186 fin += 1
187 187 else:
188 188 ini = int(chunk)
189 189 fin = ini+1
190 190 cmds.append(hist[ini:fin])
191 191 return cmds
192 192
193 193 def _ofind(self, oname, namespaces=None):
194 194 """Find an object in the available namespaces.
195 195
196 196 self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
197 197
198 198 Has special code to detect magic functions.
199 199 """
200 200
201 201 oname = oname.strip()
202 202
203 203 alias_ns = None
204 204 if namespaces is None:
205 205 # Namespaces to search in:
206 206 # Put them in a list. The order is important so that we
207 207 # find things in the same order that Python finds them.
208 208 namespaces = [ ('Interactive', self.shell.user_ns),
209 209 ('IPython internal', self.shell.internal_ns),
210 210 ('Python builtin', __builtin__.__dict__),
211 211 ('Alias', self.shell.alias_table),
212 212 ]
213 213 alias_ns = self.shell.alias_table
214 214
215 215 # initialize results to 'null'
216 216 found = 0; obj = None; ospace = None; ds = None;
217 217 ismagic = 0; isalias = 0; parent = None
218 218
219 219 # Look for the given name by splitting it in parts. If the head is
220 220 # found, then we look for all the remaining parts as members, and only
221 221 # declare success if we can find them all.
222 222 oname_parts = oname.split('.')
223 223 oname_head, oname_rest = oname_parts[0],oname_parts[1:]
224 224 for nsname,ns in namespaces:
225 225 try:
226 226 obj = ns[oname_head]
227 227 except KeyError:
228 228 continue
229 229 else:
230 230 #print 'oname_rest:', oname_rest # dbg
231 231 for part in oname_rest:
232 232 try:
233 233 parent = obj
234 234 obj = getattr(obj,part)
235 235 except:
236 236 # Blanket except b/c some badly implemented objects
237 237 # allow __getattr__ to raise exceptions other than
238 238 # AttributeError, which then crashes IPython.
239 239 break
240 240 else:
241 241 # If we finish the for loop (no break), we got all members
242 242 found = 1
243 243 ospace = nsname
244 244 if ns == alias_ns:
245 245 isalias = 1
246 246 break # namespace loop
247 247
248 248 # Try to see if it's magic
249 249 if not found:
250 250 if oname.startswith(self.shell.ESC_MAGIC):
251 251 oname = oname[1:]
252 252 obj = getattr(self,'magic_'+oname,None)
253 253 if obj is not None:
254 254 found = 1
255 255 ospace = 'IPython internal'
256 256 ismagic = 1
257 257
258 258 # Last try: special-case some literals like '', [], {}, etc:
259 259 if not found and oname_head in ["''",'""','[]','{}','()']:
260 260 obj = eval(oname_head)
261 261 found = 1
262 262 ospace = 'Interactive'
263 263
264 264 return {'found':found, 'obj':obj, 'namespace':ospace,
265 265 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
266 266
267 267 def arg_err(self,func):
268 268 """Print docstring if incorrect arguments were passed"""
269 269 print 'Error in arguments:'
270 270 print OInspect.getdoc(func)
271 271
272 272 def format_latex(self,strng):
273 273 """Format a string for latex inclusion."""
274 274
275 275 # Characters that need to be escaped for latex:
276 276 escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
277 277 # Magic command names as headers:
278 278 cmd_name_re = re.compile(r'^(%s.*?):' % self.shell.ESC_MAGIC,
279 279 re.MULTILINE)
280 280 # Magic commands
281 281 cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % self.shell.ESC_MAGIC,
282 282 re.MULTILINE)
283 283 # Paragraph continue
284 284 par_re = re.compile(r'\\$',re.MULTILINE)
285 285
286 286 # The "\n" symbol
287 287 newline_re = re.compile(r'\\n')
288 288
289 289 # Now build the string for output:
290 290 #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
291 291 strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
292 292 strng)
293 293 strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
294 294 strng = par_re.sub(r'\\\\',strng)
295 295 strng = escape_re.sub(r'\\\1',strng)
296 296 strng = newline_re.sub(r'\\textbackslash{}n',strng)
297 297 return strng
298 298
299 299 def format_screen(self,strng):
300 300 """Format a string for screen printing.
301 301
302 302 This removes some latex-type format codes."""
303 303 # Paragraph continue
304 304 par_re = re.compile(r'\\$',re.MULTILINE)
305 305 strng = par_re.sub('',strng)
306 306 return strng
307 307
308 308 def parse_options(self,arg_str,opt_str,*long_opts,**kw):
309 309 """Parse options passed to an argument string.
310 310
311 311 The interface is similar to that of getopt(), but it returns back a
312 312 Struct with the options as keys and the stripped argument string still
313 313 as a string.
314 314
315 315 arg_str is quoted as a true sys.argv vector by using shlex.split.
316 316 This allows us to easily expand variables, glob files, quote
317 317 arguments, etc.
318 318
319 319 Options:
320 320 -mode: default 'string'. If given as 'list', the argument string is
321 321 returned as a list (split on whitespace) instead of a string.
322 322
323 323 -list_all: put all option values in lists. Normally only options
324 324 appearing more than once are put in a list.
325 325
326 326 -posix (True): whether to split the input line in POSIX mode or not,
327 327 as per the conventions outlined in the shlex module from the
328 328 standard library."""
329 329
330 330 # inject default options at the beginning of the input line
331 331 caller = sys._getframe(1).f_code.co_name.replace('magic_','')
332 332 arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
333 333
334 334 mode = kw.get('mode','string')
335 335 if mode not in ['string','list']:
336 336 raise ValueError,'incorrect mode given: %s' % mode
337 337 # Get options
338 338 list_all = kw.get('list_all',0)
339 339 posix = kw.get('posix',True)
340 340
341 341 # Check if we have more than one argument to warrant extra processing:
342 342 odict = {} # Dictionary with options
343 343 args = arg_str.split()
344 344 if len(args) >= 1:
345 345 # If the list of inputs only has 0 or 1 thing in it, there's no
346 346 # need to look for options
347 347 argv = arg_split(arg_str,posix)
348 348 # Do regular option processing
349 349 try:
350 350 opts,args = getopt(argv,opt_str,*long_opts)
351 351 except GetoptError,e:
352 352 raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
353 353 " ".join(long_opts)))
354 354 for o,a in opts:
355 355 if o.startswith('--'):
356 356 o = o[2:]
357 357 else:
358 358 o = o[1:]
359 359 try:
360 360 odict[o].append(a)
361 361 except AttributeError:
362 362 odict[o] = [odict[o],a]
363 363 except KeyError:
364 364 if list_all:
365 365 odict[o] = [a]
366 366 else:
367 367 odict[o] = a
368 368
369 369 # Prepare opts,args for return
370 370 opts = Struct(odict)
371 371 if mode == 'string':
372 372 args = ' '.join(args)
373 373
374 374 return opts,args
375 375
376 376 #......................................................................
377 377 # And now the actual magic functions
378 378
379 379 # Functions for IPython shell work (vars,funcs, config, etc)
380 380 def magic_lsmagic(self, parameter_s = ''):
381 381 """List currently available magic functions."""
382 382 mesc = self.shell.ESC_MAGIC
383 383 print 'Available magic functions:\n'+mesc+\
384 384 (' '+mesc).join(self.lsmagic())
385 385 print '\n' + Magic.auto_status[self.shell.rc.automagic]
386 386 return None
387 387
388 388 def magic_magic(self, parameter_s = ''):
389 389 """Print information about the magic function system.
390 390
391 391 Supported formats: -latex, -brief, -rest
392 392 """
393 393
394 394 mode = ''
395 395 try:
396 396 if parameter_s.split()[0] == '-latex':
397 397 mode = 'latex'
398 398 if parameter_s.split()[0] == '-brief':
399 399 mode = 'brief'
400 400 if parameter_s.split()[0] == '-rest':
401 401 mode = 'rest'
402 402 rest_docs = []
403 403 except:
404 404 pass
405 405
406 406 magic_docs = []
407 407 for fname in self.lsmagic():
408 408 mname = 'magic_' + fname
409 409 for space in (Magic,self,self.__class__):
410 410 try:
411 411 fn = space.__dict__[mname]
412 412 except KeyError:
413 413 pass
414 414 else:
415 415 break
416 416 if mode == 'brief':
417 417 # only first line
418 418 if fn.__doc__:
419 419 fndoc = fn.__doc__.split('\n',1)[0]
420 420 else:
421 421 fndoc = 'No documentation'
422 422 else:
423 423 fndoc = fn.__doc__.rstrip()
424 424
425 425 if mode == 'rest':
426 426 rest_docs.append('**%s%s**::\n\n\t%s\n\n' %(self.shell.ESC_MAGIC,
427 427 fname,fndoc))
428 428
429 429 else:
430 430 magic_docs.append('%s%s:\n\t%s\n' %(self.shell.ESC_MAGIC,
431 431 fname,fndoc))
432 432
433 433 magic_docs = ''.join(magic_docs)
434 434
435 435 if mode == 'rest':
436 436 return "".join(rest_docs)
437 437
438 438 if mode == 'latex':
439 439 print self.format_latex(magic_docs)
440 440 return
441 441 else:
442 442 magic_docs = self.format_screen(magic_docs)
443 443 if mode == 'brief':
444 444 return magic_docs
445 445
446 446 outmsg = """
447 447 IPython's 'magic' functions
448 448 ===========================
449 449
450 450 The magic function system provides a series of functions which allow you to
451 451 control the behavior of IPython itself, plus a lot of system-type
452 452 features. All these functions are prefixed with a % character, but parameters
453 453 are given without parentheses or quotes.
454 454
455 455 NOTE: If you have 'automagic' enabled (via the command line option or with the
456 456 %automagic function), you don't need to type in the % explicitly. By default,
457 457 IPython ships with automagic on, so you should only rarely need the % escape.
458 458
459 459 Example: typing '%cd mydir' (without the quotes) changes you working directory
460 460 to 'mydir', if it exists.
461 461
462 462 You can define your own magic functions to extend the system. See the supplied
463 463 ipythonrc and example-magic.py files for details (in your ipython
464 464 configuration directory, typically $HOME/.ipython/).
465 465
466 466 You can also define your own aliased names for magic functions. In your
467 467 ipythonrc file, placing a line like:
468 468
469 469 execute __IPYTHON__.magic_pf = __IPYTHON__.magic_profile
470 470
471 471 will define %pf as a new name for %profile.
472 472
473 473 You can also call magics in code using the ipmagic() function, which IPython
474 474 automatically adds to the builtin namespace. Type 'ipmagic?' for details.
475 475
476 476 For a list of the available magic functions, use %lsmagic. For a description
477 477 of any of them, type %magic_name?, e.g. '%cd?'.
478 478
479 479 Currently the magic system has the following functions:\n"""
480 480
481 481 mesc = self.shell.ESC_MAGIC
482 482 outmsg = ("%s\n%s\n\nSummary of magic functions (from %slsmagic):"
483 483 "\n\n%s%s\n\n%s" % (outmsg,
484 484 magic_docs,mesc,mesc,
485 485 (' '+mesc).join(self.lsmagic()),
486 486 Magic.auto_status[self.shell.rc.automagic] ) )
487 487
488 488 page(outmsg,screen_lines=self.shell.rc.screen_length)
489 489
490 490
491 491 def magic_autoindent(self, parameter_s = ''):
492 492 """Toggle autoindent on/off (if available)."""
493 493
494 494 self.shell.set_autoindent()
495 495 print "Automatic indentation is:",['OFF','ON'][self.shell.autoindent]
496 496
497 497
498 498 def magic_automagic(self, parameter_s = ''):
499 499 """Make magic functions callable without having to type the initial %.
500 500
501 501 Without argumentsl toggles on/off (when off, you must call it as
502 502 %automagic, of course). With arguments it sets the value, and you can
503 503 use any of (case insensitive):
504 504
505 505 - on,1,True: to activate
506 506
507 507 - off,0,False: to deactivate.
508 508
509 509 Note that magic functions have lowest priority, so if there's a
510 510 variable whose name collides with that of a magic fn, automagic won't
511 511 work for that function (you get the variable instead). However, if you
512 512 delete the variable (del var), the previously shadowed magic function
513 513 becomes visible to automagic again."""
514 514
515 515 rc = self.shell.rc
516 516 arg = parameter_s.lower()
517 517 if parameter_s in ('on','1','true'):
518 518 rc.automagic = True
519 519 elif parameter_s in ('off','0','false'):
520 520 rc.automagic = False
521 521 else:
522 522 rc.automagic = not rc.automagic
523 523 print '\n' + Magic.auto_status[rc.automagic]
524 524
525 525
526 526 def magic_autocall(self, parameter_s = ''):
527 527 """Make functions callable without having to type parentheses.
528 528
529 529 Usage:
530 530
531 531 %autocall [mode]
532 532
533 533 The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
534 534 value is toggled on and off (remembering the previous state).
535 535
536 536 In more detail, these values mean:
537 537
538 538 0 -> fully disabled
539 539
540 540 1 -> active, but do not apply if there are no arguments on the line.
541 541
542 542 In this mode, you get:
543 543
544 544 In [1]: callable
545 545 Out[1]: <built-in function callable>
546 546
547 547 In [2]: callable 'hello'
548 548 ------> callable('hello')
549 549 Out[2]: False
550 550
551 551 2 -> Active always. Even if no arguments are present, the callable
552 552 object is called:
553 553
554 554 In [4]: callable
555 555 ------> callable()
556 556
557 557 Note that even with autocall off, you can still use '/' at the start of
558 558 a line to treat the first argument on the command line as a function
559 559 and add parentheses to it:
560 560
561 561 In [8]: /str 43
562 562 ------> str(43)
563 563 Out[8]: '43'
564 564 """
565 565
566 566 rc = self.shell.rc
567 567
568 568 if parameter_s:
569 569 arg = int(parameter_s)
570 570 else:
571 571 arg = 'toggle'
572 572
573 573 if not arg in (0,1,2,'toggle'):
574 574 error('Valid modes: (0->Off, 1->Smart, 2->Full')
575 575 return
576 576
577 577 if arg in (0,1,2):
578 578 rc.autocall = arg
579 579 else: # toggle
580 580 if rc.autocall:
581 581 self._magic_state.autocall_save = rc.autocall
582 582 rc.autocall = 0
583 583 else:
584 584 try:
585 585 rc.autocall = self._magic_state.autocall_save
586 586 except AttributeError:
587 587 rc.autocall = self._magic_state.autocall_save = 1
588 588
589 589 print "Automatic calling is:",['OFF','Smart','Full'][rc.autocall]
590 590
591 591 def magic_system_verbose(self, parameter_s = ''):
592 592 """Set verbose printing of system calls.
593 593
594 594 If called without an argument, act as a toggle"""
595 595
596 596 if parameter_s:
597 597 val = bool(eval(parameter_s))
598 598 else:
599 599 val = None
600 600
601 601 self.shell.rc_set_toggle('system_verbose',val)
602 602 print "System verbose printing is:",\
603 603 ['OFF','ON'][self.shell.rc.system_verbose]
604 604
605 605
606 606 def magic_page(self, parameter_s=''):
607 607 """Pretty print the object and display it through a pager.
608 608
609 609 %page [options] OBJECT
610 610
611 611 If no object is given, use _ (last output).
612 612
613 613 Options:
614 614
615 615 -r: page str(object), don't pretty-print it."""
616 616
617 617 # After a function contributed by Olivier Aubert, slightly modified.
618 618
619 619 # Process options/args
620 620 opts,args = self.parse_options(parameter_s,'r')
621 621 raw = 'r' in opts
622 622
623 623 oname = args and args or '_'
624 624 info = self._ofind(oname)
625 625 if info['found']:
626 626 txt = (raw and str or pformat)( info['obj'] )
627 627 page(txt)
628 628 else:
629 629 print 'Object `%s` not found' % oname
630 630
631 631 def magic_profile(self, parameter_s=''):
632 632 """Print your currently active IPyhton profile."""
633 633 if self.shell.rc.profile:
634 634 printpl('Current IPython profile: $self.shell.rc.profile.')
635 635 else:
636 636 print 'No profile active.'
637 637
638 638 def magic_pinfo(self, parameter_s='', namespaces=None):
639 639 """Provide detailed information about an object.
640 640
641 641 '%pinfo object' is just a synonym for object? or ?object."""
642 642
643 643 #print 'pinfo par: <%s>' % parameter_s # dbg
644 644
645 645
646 646 # detail_level: 0 -> obj? , 1 -> obj??
647 647 detail_level = 0
648 648 # We need to detect if we got called as 'pinfo pinfo foo', which can
649 649 # happen if the user types 'pinfo foo?' at the cmd line.
650 650 pinfo,qmark1,oname,qmark2 = \
651 651 re.match('(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
652 652 if pinfo or qmark1 or qmark2:
653 653 detail_level = 1
654 654 if "*" in oname:
655 655 self.magic_psearch(oname)
656 656 else:
657 657 self._inspect('pinfo', oname, detail_level=detail_level,
658 658 namespaces=namespaces)
659 659
660 660 def magic_pdef(self, parameter_s='', namespaces=None):
661 661 """Print the definition header for any callable object.
662 662
663 663 If the object is a class, print the constructor information."""
664 664 self._inspect('pdef',parameter_s, namespaces)
665 665
666 666 def magic_pdoc(self, parameter_s='', namespaces=None):
667 667 """Print the docstring for an object.
668 668
669 669 If the given object is a class, it will print both the class and the
670 670 constructor docstrings."""
671 671 self._inspect('pdoc',parameter_s, namespaces)
672 672
673 673 def magic_psource(self, parameter_s='', namespaces=None):
674 674 """Print (or run through pager) the source code for an object."""
675 675 self._inspect('psource',parameter_s, namespaces)
676 676
677 677 def magic_pfile(self, parameter_s=''):
678 678 """Print (or run through pager) the file where an object is defined.
679 679
680 680 The file opens at the line where the object definition begins. IPython
681 681 will honor the environment variable PAGER if set, and otherwise will
682 682 do its best to print the file in a convenient form.
683 683
684 684 If the given argument is not an object currently defined, IPython will
685 685 try to interpret it as a filename (automatically adding a .py extension
686 686 if needed). You can thus use %pfile as a syntax highlighting code
687 687 viewer."""
688 688
689 689 # first interpret argument as an object name
690 690 out = self._inspect('pfile',parameter_s)
691 691 # if not, try the input as a filename
692 692 if out == 'not found':
693 693 try:
694 694 filename = get_py_filename(parameter_s)
695 695 except IOError,msg:
696 696 print msg
697 697 return
698 698 page(self.shell.inspector.format(file(filename).read()))
699 699
700 700 def _inspect(self,meth,oname,namespaces=None,**kw):
701 701 """Generic interface to the inspector system.
702 702
703 703 This function is meant to be called by pdef, pdoc & friends."""
704 704
705 705 #oname = oname.strip()
706 706 #print '1- oname: <%r>' % oname # dbg
707 707 try:
708 708 oname = oname.strip().encode('ascii')
709 709 #print '2- oname: <%r>' % oname # dbg
710 710 except UnicodeEncodeError:
711 711 print 'Python identifiers can only contain ascii characters.'
712 712 return 'not found'
713 713
714 714 info = Struct(self._ofind(oname, namespaces))
715 715
716 716 if info.found:
717 717 try:
718 718 IPython.generics.inspect_object(info.obj)
719 719 return
720 720 except IPython.ipapi.TryNext:
721 721 pass
722 722 # Get the docstring of the class property if it exists.
723 723 path = oname.split('.')
724 724 root = '.'.join(path[:-1])
725 725 if info.parent is not None:
726 726 try:
727 727 target = getattr(info.parent, '__class__')
728 728 # The object belongs to a class instance.
729 729 try:
730 730 target = getattr(target, path[-1])
731 731 # The class defines the object.
732 732 if isinstance(target, property):
733 733 oname = root + '.__class__.' + path[-1]
734 734 info = Struct(self._ofind(oname))
735 735 except AttributeError: pass
736 736 except AttributeError: pass
737 737
738 738 pmethod = getattr(self.shell.inspector,meth)
739 739 formatter = info.ismagic and self.format_screen or None
740 740 if meth == 'pdoc':
741 741 pmethod(info.obj,oname,formatter)
742 742 elif meth == 'pinfo':
743 743 pmethod(info.obj,oname,formatter,info,**kw)
744 744 else:
745 745 pmethod(info.obj,oname)
746 746 else:
747 747 print 'Object `%s` not found.' % oname
748 748 return 'not found' # so callers can take other action
749 749
750 750 def magic_psearch(self, parameter_s=''):
751 751 """Search for object in namespaces by wildcard.
752 752
753 753 %psearch [options] PATTERN [OBJECT TYPE]
754 754
755 755 Note: ? can be used as a synonym for %psearch, at the beginning or at
756 756 the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
757 757 rest of the command line must be unchanged (options come first), so
758 758 for example the following forms are equivalent
759 759
760 760 %psearch -i a* function
761 761 -i a* function?
762 762 ?-i a* function
763 763
764 764 Arguments:
765 765
766 766 PATTERN
767 767
768 768 where PATTERN is a string containing * as a wildcard similar to its
769 769 use in a shell. The pattern is matched in all namespaces on the
770 770 search path. By default objects starting with a single _ are not
771 771 matched, many IPython generated objects have a single
772 772 underscore. The default is case insensitive matching. Matching is
773 773 also done on the attributes of objects and not only on the objects
774 774 in a module.
775 775
776 776 [OBJECT TYPE]
777 777
778 778 Is the name of a python type from the types module. The name is
779 779 given in lowercase without the ending type, ex. StringType is
780 780 written string. By adding a type here only objects matching the
781 781 given type are matched. Using all here makes the pattern match all
782 782 types (this is the default).
783 783
784 784 Options:
785 785
786 786 -a: makes the pattern match even objects whose names start with a
787 787 single underscore. These names are normally ommitted from the
788 788 search.
789 789
790 790 -i/-c: make the pattern case insensitive/sensitive. If neither of
791 791 these options is given, the default is read from your ipythonrc
792 792 file. The option name which sets this value is
793 793 'wildcards_case_sensitive'. If this option is not specified in your
794 794 ipythonrc file, IPython's internal default is to do a case sensitive
795 795 search.
796 796
797 797 -e/-s NAMESPACE: exclude/search a given namespace. The pattern you
798 798 specifiy can be searched in any of the following namespaces:
799 799 'builtin', 'user', 'user_global','internal', 'alias', where
800 800 'builtin' and 'user' are the search defaults. Note that you should
801 801 not use quotes when specifying namespaces.
802 802
803 803 'Builtin' contains the python module builtin, 'user' contains all
804 804 user data, 'alias' only contain the shell aliases and no python
805 805 objects, 'internal' contains objects used by IPython. The
806 806 'user_global' namespace is only used by embedded IPython instances,
807 807 and it contains module-level globals. You can add namespaces to the
808 808 search with -s or exclude them with -e (these options can be given
809 809 more than once).
810 810
811 811 Examples:
812 812
813 813 %psearch a* -> objects beginning with an a
814 814 %psearch -e builtin a* -> objects NOT in the builtin space starting in a
815 815 %psearch a* function -> all functions beginning with an a
816 816 %psearch re.e* -> objects beginning with an e in module re
817 817 %psearch r*.e* -> objects that start with e in modules starting in r
818 818 %psearch r*.* string -> all strings in modules beginning with r
819 819
820 820 Case sensitve search:
821 821
822 822 %psearch -c a* list all object beginning with lower case a
823 823
824 824 Show objects beginning with a single _:
825 825
826 826 %psearch -a _* list objects beginning with a single underscore"""
827 827 try:
828 828 parameter_s = parameter_s.encode('ascii')
829 829 except UnicodeEncodeError:
830 830 print 'Python identifiers can only contain ascii characters.'
831 831 return
832 832
833 833 # default namespaces to be searched
834 834 def_search = ['user','builtin']
835 835
836 836 # Process options/args
837 837 opts,args = self.parse_options(parameter_s,'cias:e:',list_all=True)
838 838 opt = opts.get
839 839 shell = self.shell
840 840 psearch = shell.inspector.psearch
841 841
842 842 # select case options
843 843 if opts.has_key('i'):
844 844 ignore_case = True
845 845 elif opts.has_key('c'):
846 846 ignore_case = False
847 847 else:
848 848 ignore_case = not shell.rc.wildcards_case_sensitive
849 849
850 850 # Build list of namespaces to search from user options
851 851 def_search.extend(opt('s',[]))
852 852 ns_exclude = ns_exclude=opt('e',[])
853 853 ns_search = [nm for nm in def_search if nm not in ns_exclude]
854 854
855 855 # Call the actual search
856 856 try:
857 857 psearch(args,shell.ns_table,ns_search,
858 858 show_all=opt('a'),ignore_case=ignore_case)
859 859 except:
860 860 shell.showtraceback()
861 861
862 862 def magic_who_ls(self, parameter_s=''):
863 863 """Return a sorted list of all interactive variables.
864 864
865 865 If arguments are given, only variables of types matching these
866 866 arguments are returned."""
867 867
868 868 user_ns = self.shell.user_ns
869 869 internal_ns = self.shell.internal_ns
870 870 user_config_ns = self.shell.user_config_ns
871 871 out = []
872 872 typelist = parameter_s.split()
873 873
874 874 for i in user_ns:
875 875 if not (i.startswith('_') or i.startswith('_i')) \
876 876 and not (i in internal_ns or i in user_config_ns):
877 877 if typelist:
878 878 if type(user_ns[i]).__name__ in typelist:
879 879 out.append(i)
880 880 else:
881 881 out.append(i)
882 882 out.sort()
883 883 return out
884 884
885 885 def magic_who(self, parameter_s=''):
886 886 """Print all interactive variables, with some minimal formatting.
887 887
888 888 If any arguments are given, only variables whose type matches one of
889 889 these are printed. For example:
890 890
891 891 %who function str
892 892
893 893 will only list functions and strings, excluding all other types of
894 894 variables. To find the proper type names, simply use type(var) at a
895 895 command line to see how python prints type names. For example:
896 896
897 897 In [1]: type('hello')\\
898 898 Out[1]: <type 'str'>
899 899
900 900 indicates that the type name for strings is 'str'.
901 901
902 902 %who always excludes executed names loaded through your configuration
903 903 file and things which are internal to IPython.
904 904
905 905 This is deliberate, as typically you may load many modules and the
906 906 purpose of %who is to show you only what you've manually defined."""
907 907
908 908 varlist = self.magic_who_ls(parameter_s)
909 909 if not varlist:
910 910 if parameter_s:
911 911 print 'No variables match your requested type.'
912 912 else:
913 913 print 'Interactive namespace is empty.'
914 914 return
915 915
916 916 # if we have variables, move on...
917 917 count = 0
918 918 for i in varlist:
919 919 print i+'\t',
920 920 count += 1
921 921 if count > 8:
922 922 count = 0
923 923 print
924 924 print
925 925
926 926 def magic_whos(self, parameter_s=''):
927 927 """Like %who, but gives some extra information about each variable.
928 928
929 929 The same type filtering of %who can be applied here.
930 930
931 931 For all variables, the type is printed. Additionally it prints:
932 932
933 933 - For {},[],(): their length.
934 934
935 935 - For numpy and Numeric arrays, a summary with shape, number of
936 936 elements, typecode and size in memory.
937 937
938 938 - Everything else: a string representation, snipping their middle if
939 939 too long."""
940 940
941 941 varnames = self.magic_who_ls(parameter_s)
942 942 if not varnames:
943 943 if parameter_s:
944 944 print 'No variables match your requested type.'
945 945 else:
946 946 print 'Interactive namespace is empty.'
947 947 return
948 948
949 949 # if we have variables, move on...
950 950
951 951 # for these types, show len() instead of data:
952 952 seq_types = [types.DictType,types.ListType,types.TupleType]
953 953
954 954 # for numpy/Numeric arrays, display summary info
955 955 try:
956 956 import numpy
957 957 except ImportError:
958 958 ndarray_type = None
959 959 else:
960 960 ndarray_type = numpy.ndarray.__name__
961 961 try:
962 962 import Numeric
963 963 except ImportError:
964 964 array_type = None
965 965 else:
966 966 array_type = Numeric.ArrayType.__name__
967 967
968 968 # Find all variable names and types so we can figure out column sizes
969 969 def get_vars(i):
970 970 return self.shell.user_ns[i]
971 971
972 972 # some types are well known and can be shorter
973 973 abbrevs = {'IPython.macro.Macro' : 'Macro'}
974 974 def type_name(v):
975 975 tn = type(v).__name__
976 976 return abbrevs.get(tn,tn)
977 977
978 978 varlist = map(get_vars,varnames)
979 979
980 980 typelist = []
981 981 for vv in varlist:
982 982 tt = type_name(vv)
983 983
984 984 if tt=='instance':
985 985 typelist.append( abbrevs.get(str(vv.__class__),
986 986 str(vv.__class__)))
987 987 else:
988 988 typelist.append(tt)
989 989
990 990 # column labels and # of spaces as separator
991 991 varlabel = 'Variable'
992 992 typelabel = 'Type'
993 993 datalabel = 'Data/Info'
994 994 colsep = 3
995 995 # variable format strings
996 996 vformat = "$vname.ljust(varwidth)$vtype.ljust(typewidth)"
997 997 vfmt_short = '$vstr[:25]<...>$vstr[-25:]'
998 998 aformat = "%s: %s elems, type `%s`, %s bytes"
999 999 # find the size of the columns to format the output nicely
1000 1000 varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
1001 1001 typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
1002 1002 # table header
1003 1003 print varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
1004 1004 ' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1)
1005 1005 # and the table itself
1006 1006 kb = 1024
1007 1007 Mb = 1048576 # kb**2
1008 1008 for vname,var,vtype in zip(varnames,varlist,typelist):
1009 1009 print itpl(vformat),
1010 1010 if vtype in seq_types:
1011 1011 print len(var)
1012 1012 elif vtype in [array_type,ndarray_type]:
1013 1013 vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
1014 1014 if vtype==ndarray_type:
1015 1015 # numpy
1016 1016 vsize = var.size
1017 1017 vbytes = vsize*var.itemsize
1018 1018 vdtype = var.dtype
1019 1019 else:
1020 1020 # Numeric
1021 1021 vsize = Numeric.size(var)
1022 1022 vbytes = vsize*var.itemsize()
1023 1023 vdtype = var.typecode()
1024 1024
1025 1025 if vbytes < 100000:
1026 1026 print aformat % (vshape,vsize,vdtype,vbytes)
1027 1027 else:
1028 1028 print aformat % (vshape,vsize,vdtype,vbytes),
1029 1029 if vbytes < Mb:
1030 1030 print '(%s kb)' % (vbytes/kb,)
1031 1031 else:
1032 1032 print '(%s Mb)' % (vbytes/Mb,)
1033 1033 else:
1034 1034 try:
1035 1035 vstr = str(var)
1036 1036 except UnicodeEncodeError:
1037 1037 vstr = unicode(var).encode(sys.getdefaultencoding(),
1038 1038 'backslashreplace')
1039 1039 vstr = vstr.replace('\n','\\n')
1040 1040 if len(vstr) < 50:
1041 1041 print vstr
1042 1042 else:
1043 1043 printpl(vfmt_short)
1044 1044
1045 1045 def magic_reset(self, parameter_s=''):
1046 1046 """Resets the namespace by removing all names defined by the user.
1047 1047
1048 1048 Input/Output history are left around in case you need them."""
1049 1049
1050 1050 ans = self.shell.ask_yes_no(
1051 1051 "Once deleted, variables cannot be recovered. Proceed (y/[n])? ")
1052 1052 if not ans:
1053 1053 print 'Nothing done.'
1054 1054 return
1055 1055 user_ns = self.shell.user_ns
1056 1056 for i in self.magic_who_ls():
1057 1057 del(user_ns[i])
1058 1058
1059 1059 # Also flush the private list of module references kept for script
1060 1060 # execution protection
1061 1061 self.shell._user_main_modules[:] = []
1062 1062
1063 1063 def magic_logstart(self,parameter_s=''):
1064 1064 """Start logging anywhere in a session.
1065 1065
1066 1066 %logstart [-o|-r|-t] [log_name [log_mode]]
1067 1067
1068 1068 If no name is given, it defaults to a file named 'ipython_log.py' in your
1069 1069 current directory, in 'rotate' mode (see below).
1070 1070
1071 1071 '%logstart name' saves to file 'name' in 'backup' mode. It saves your
1072 1072 history up to that point and then continues logging.
1073 1073
1074 1074 %logstart takes a second optional parameter: logging mode. This can be one
1075 1075 of (note that the modes are given unquoted):\\
1076 1076 append: well, that says it.\\
1077 1077 backup: rename (if exists) to name~ and start name.\\
1078 1078 global: single logfile in your home dir, appended to.\\
1079 1079 over : overwrite existing log.\\
1080 1080 rotate: create rotating logs name.1~, name.2~, etc.
1081 1081
1082 1082 Options:
1083 1083
1084 1084 -o: log also IPython's output. In this mode, all commands which
1085 1085 generate an Out[NN] prompt are recorded to the logfile, right after
1086 1086 their corresponding input line. The output lines are always
1087 1087 prepended with a '#[Out]# ' marker, so that the log remains valid
1088 1088 Python code.
1089 1089
1090 1090 Since this marker is always the same, filtering only the output from
1091 1091 a log is very easy, using for example a simple awk call:
1092 1092
1093 1093 awk -F'#\\[Out\\]# ' '{if($2) {print $2}}' ipython_log.py
1094 1094
1095 1095 -r: log 'raw' input. Normally, IPython's logs contain the processed
1096 1096 input, so that user lines are logged in their final form, converted
1097 1097 into valid Python. For example, %Exit is logged as
1098 1098 '_ip.magic("Exit"). If the -r flag is given, all input is logged
1099 1099 exactly as typed, with no transformations applied.
1100 1100
1101 1101 -t: put timestamps before each input line logged (these are put in
1102 1102 comments)."""
1103 1103
1104 1104 opts,par = self.parse_options(parameter_s,'ort')
1105 1105 log_output = 'o' in opts
1106 1106 log_raw_input = 'r' in opts
1107 1107 timestamp = 't' in opts
1108 1108
1109 1109 rc = self.shell.rc
1110 1110 logger = self.shell.logger
1111 1111
1112 1112 # if no args are given, the defaults set in the logger constructor by
1113 1113 # ipytohn remain valid
1114 1114 if par:
1115 1115 try:
1116 1116 logfname,logmode = par.split()
1117 1117 except:
1118 1118 logfname = par
1119 1119 logmode = 'backup'
1120 1120 else:
1121 1121 logfname = logger.logfname
1122 1122 logmode = logger.logmode
1123 1123 # put logfname into rc struct as if it had been called on the command
1124 1124 # line, so it ends up saved in the log header Save it in case we need
1125 1125 # to restore it...
1126 1126 old_logfile = rc.opts.get('logfile','')
1127 1127 if logfname:
1128 1128 logfname = os.path.expanduser(logfname)
1129 1129 rc.opts.logfile = logfname
1130 1130 loghead = self.shell.loghead_tpl % (rc.opts,rc.args)
1131 1131 try:
1132 1132 started = logger.logstart(logfname,loghead,logmode,
1133 1133 log_output,timestamp,log_raw_input)
1134 1134 except:
1135 1135 rc.opts.logfile = old_logfile
1136 1136 warn("Couldn't start log: %s" % sys.exc_info()[1])
1137 1137 else:
1138 1138 # log input history up to this point, optionally interleaving
1139 1139 # output if requested
1140 1140
1141 1141 if timestamp:
1142 1142 # disable timestamping for the previous history, since we've
1143 1143 # lost those already (no time machine here).
1144 1144 logger.timestamp = False
1145 1145
1146 1146 if log_raw_input:
1147 1147 input_hist = self.shell.input_hist_raw
1148 1148 else:
1149 1149 input_hist = self.shell.input_hist
1150 1150
1151 1151 if log_output:
1152 1152 log_write = logger.log_write
1153 1153 output_hist = self.shell.output_hist
1154 1154 for n in range(1,len(input_hist)-1):
1155 1155 log_write(input_hist[n].rstrip())
1156 1156 if n in output_hist:
1157 1157 log_write(repr(output_hist[n]),'output')
1158 1158 else:
1159 1159 logger.log_write(input_hist[1:])
1160 1160 if timestamp:
1161 1161 # re-enable timestamping
1162 1162 logger.timestamp = True
1163 1163
1164 1164 print ('Activating auto-logging. '
1165 1165 'Current session state plus future input saved.')
1166 1166 logger.logstate()
1167 1167
1168 1168 def magic_logstop(self,parameter_s=''):
1169 1169 """Fully stop logging and close log file.
1170 1170
1171 1171 In order to start logging again, a new %logstart call needs to be made,
1172 1172 possibly (though not necessarily) with a new filename, mode and other
1173 1173 options."""
1174 1174 self.logger.logstop()
1175 1175
1176 1176 def magic_logoff(self,parameter_s=''):
1177 1177 """Temporarily stop logging.
1178 1178
1179 1179 You must have previously started logging."""
1180 1180 self.shell.logger.switch_log(0)
1181 1181
1182 1182 def magic_logon(self,parameter_s=''):
1183 1183 """Restart logging.
1184 1184
1185 1185 This function is for restarting logging which you've temporarily
1186 1186 stopped with %logoff. For starting logging for the first time, you
1187 1187 must use the %logstart function, which allows you to specify an
1188 1188 optional log filename."""
1189 1189
1190 1190 self.shell.logger.switch_log(1)
1191 1191
1192 1192 def magic_logstate(self,parameter_s=''):
1193 1193 """Print the status of the logging system."""
1194 1194
1195 1195 self.shell.logger.logstate()
1196 1196
1197 1197 def magic_pdb(self, parameter_s=''):
1198 1198 """Control the automatic calling of the pdb interactive debugger.
1199 1199
1200 1200 Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
1201 1201 argument it works as a toggle.
1202 1202
1203 1203 When an exception is triggered, IPython can optionally call the
1204 1204 interactive pdb debugger after the traceback printout. %pdb toggles
1205 1205 this feature on and off.
1206 1206
1207 1207 The initial state of this feature is set in your ipythonrc
1208 1208 configuration file (the variable is called 'pdb').
1209 1209
1210 1210 If you want to just activate the debugger AFTER an exception has fired,
1211 1211 without having to type '%pdb on' and rerunning your code, you can use
1212 1212 the %debug magic."""
1213 1213
1214 1214 par = parameter_s.strip().lower()
1215 1215
1216 1216 if par:
1217 1217 try:
1218 1218 new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
1219 1219 except KeyError:
1220 1220 print ('Incorrect argument. Use on/1, off/0, '
1221 1221 'or nothing for a toggle.')
1222 1222 return
1223 1223 else:
1224 1224 # toggle
1225 1225 new_pdb = not self.shell.call_pdb
1226 1226
1227 1227 # set on the shell
1228 1228 self.shell.call_pdb = new_pdb
1229 1229 print 'Automatic pdb calling has been turned',on_off(new_pdb)
1230 1230
1231 1231 def magic_debug(self, parameter_s=''):
1232 1232 """Activate the interactive debugger in post-mortem mode.
1233 1233
1234 1234 If an exception has just occurred, this lets you inspect its stack
1235 1235 frames interactively. Note that this will always work only on the last
1236 1236 traceback that occurred, so you must call this quickly after an
1237 1237 exception that you wish to inspect has fired, because if another one
1238 1238 occurs, it clobbers the previous one.
1239 1239
1240 1240 If you want IPython to automatically do this on every exception, see
1241 1241 the %pdb magic for more details.
1242 1242 """
1243 1243
1244 1244 self.shell.debugger(force=True)
1245 1245
1246 1246 def magic_prun(self, parameter_s ='',user_mode=1,
1247 1247 opts=None,arg_lst=None,prog_ns=None):
1248 1248
1249 1249 """Run a statement through the python code profiler.
1250 1250
1251 1251 Usage:\\
1252 1252 %prun [options] statement
1253 1253
1254 1254 The given statement (which doesn't require quote marks) is run via the
1255 1255 python profiler in a manner similar to the profile.run() function.
1256 1256 Namespaces are internally managed to work correctly; profile.run
1257 1257 cannot be used in IPython because it makes certain assumptions about
1258 1258 namespaces which do not hold under IPython.
1259 1259
1260 1260 Options:
1261 1261
1262 1262 -l <limit>: you can place restrictions on what or how much of the
1263 1263 profile gets printed. The limit value can be:
1264 1264
1265 1265 * A string: only information for function names containing this string
1266 1266 is printed.
1267 1267
1268 1268 * An integer: only these many lines are printed.
1269 1269
1270 1270 * A float (between 0 and 1): this fraction of the report is printed
1271 1271 (for example, use a limit of 0.4 to see the topmost 40% only).
1272 1272
1273 1273 You can combine several limits with repeated use of the option. For
1274 1274 example, '-l __init__ -l 5' will print only the topmost 5 lines of
1275 1275 information about class constructors.
1276 1276
1277 1277 -r: return the pstats.Stats object generated by the profiling. This
1278 1278 object has all the information about the profile in it, and you can
1279 1279 later use it for further analysis or in other functions.
1280 1280
1281 1281 -s <key>: sort profile by given key. You can provide more than one key
1282 1282 by using the option several times: '-s key1 -s key2 -s key3...'. The
1283 1283 default sorting key is 'time'.
1284 1284
1285 1285 The following is copied verbatim from the profile documentation
1286 1286 referenced below:
1287 1287
1288 1288 When more than one key is provided, additional keys are used as
1289 1289 secondary criteria when the there is equality in all keys selected
1290 1290 before them.
1291 1291
1292 1292 Abbreviations can be used for any key names, as long as the
1293 1293 abbreviation is unambiguous. The following are the keys currently
1294 1294 defined:
1295 1295
1296 1296 Valid Arg Meaning\\
1297 1297 "calls" call count\\
1298 1298 "cumulative" cumulative time\\
1299 1299 "file" file name\\
1300 1300 "module" file name\\
1301 1301 "pcalls" primitive call count\\
1302 1302 "line" line number\\
1303 1303 "name" function name\\
1304 1304 "nfl" name/file/line\\
1305 1305 "stdname" standard name\\
1306 1306 "time" internal time
1307 1307
1308 1308 Note that all sorts on statistics are in descending order (placing
1309 1309 most time consuming items first), where as name, file, and line number
1310 1310 searches are in ascending order (i.e., alphabetical). The subtle
1311 1311 distinction between "nfl" and "stdname" is that the standard name is a
1312 1312 sort of the name as printed, which means that the embedded line
1313 1313 numbers get compared in an odd way. For example, lines 3, 20, and 40
1314 1314 would (if the file names were the same) appear in the string order
1315 1315 "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
1316 1316 line numbers. In fact, sort_stats("nfl") is the same as
1317 1317 sort_stats("name", "file", "line").
1318 1318
1319 1319 -T <filename>: save profile results as shown on screen to a text
1320 1320 file. The profile is still shown on screen.
1321 1321
1322 1322 -D <filename>: save (via dump_stats) profile statistics to given
1323 1323 filename. This data is in a format understod by the pstats module, and
1324 1324 is generated by a call to the dump_stats() method of profile
1325 1325 objects. The profile is still shown on screen.
1326 1326
1327 1327 If you want to run complete programs under the profiler's control, use
1328 1328 '%run -p [prof_opts] filename.py [args to program]' where prof_opts
1329 1329 contains profiler specific options as described here.
1330 1330
1331 1331 You can read the complete documentation for the profile module with:\\
1332 1332 In [1]: import profile; profile.help() """
1333 1333
1334 1334 opts_def = Struct(D=[''],l=[],s=['time'],T=[''])
1335 1335 # protect user quote marks
1336 1336 parameter_s = parameter_s.replace('"',r'\"').replace("'",r"\'")
1337 1337
1338 1338 if user_mode: # regular user call
1339 1339 opts,arg_str = self.parse_options(parameter_s,'D:l:rs:T:',
1340 1340 list_all=1)
1341 1341 namespace = self.shell.user_ns
1342 1342 else: # called to run a program by %run -p
1343 1343 try:
1344 1344 filename = get_py_filename(arg_lst[0])
1345 1345 except IOError,msg:
1346 1346 error(msg)
1347 1347 return
1348 1348
1349 1349 arg_str = 'execfile(filename,prog_ns)'
1350 1350 namespace = locals()
1351 1351
1352 1352 opts.merge(opts_def)
1353 1353
1354 1354 prof = profile.Profile()
1355 1355 try:
1356 1356 prof = prof.runctx(arg_str,namespace,namespace)
1357 1357 sys_exit = ''
1358 1358 except SystemExit:
1359 1359 sys_exit = """*** SystemExit exception caught in code being profiled."""
1360 1360
1361 1361 stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
1362 1362
1363 1363 lims = opts.l
1364 1364 if lims:
1365 1365 lims = [] # rebuild lims with ints/floats/strings
1366 1366 for lim in opts.l:
1367 1367 try:
1368 1368 lims.append(int(lim))
1369 1369 except ValueError:
1370 1370 try:
1371 1371 lims.append(float(lim))
1372 1372 except ValueError:
1373 1373 lims.append(lim)
1374 1374
1375 1375 # Trap output.
1376 1376 stdout_trap = StringIO()
1377 1377
1378 1378 if hasattr(stats,'stream'):
1379 1379 # In newer versions of python, the stats object has a 'stream'
1380 1380 # attribute to write into.
1381 1381 stats.stream = stdout_trap
1382 1382 stats.print_stats(*lims)
1383 1383 else:
1384 1384 # For older versions, we manually redirect stdout during printing
1385 1385 sys_stdout = sys.stdout
1386 1386 try:
1387 1387 sys.stdout = stdout_trap
1388 1388 stats.print_stats(*lims)
1389 1389 finally:
1390 1390 sys.stdout = sys_stdout
1391 1391
1392 1392 output = stdout_trap.getvalue()
1393 1393 output = output.rstrip()
1394 1394
1395 1395 page(output,screen_lines=self.shell.rc.screen_length)
1396 1396 print sys_exit,
1397 1397
1398 1398 dump_file = opts.D[0]
1399 1399 text_file = opts.T[0]
1400 1400 if dump_file:
1401 1401 prof.dump_stats(dump_file)
1402 1402 print '\n*** Profile stats marshalled to file',\
1403 1403 `dump_file`+'.',sys_exit
1404 1404 if text_file:
1405 1405 pfile = file(text_file,'w')
1406 1406 pfile.write(output)
1407 1407 pfile.close()
1408 1408 print '\n*** Profile printout saved to text file',\
1409 1409 `text_file`+'.',sys_exit
1410 1410
1411 1411 if opts.has_key('r'):
1412 1412 return stats
1413 1413 else:
1414 1414 return None
1415 1415
1416 1416 def magic_run(self, parameter_s ='',runner=None):
1417 1417 """Run the named file inside IPython as a program.
1418 1418
1419 1419 Usage:\\
1420 1420 %run [-n -i -t [-N<N>] -d [-b<N>] -p [profile options]] file [args]
1421 1421
1422 1422 Parameters after the filename are passed as command-line arguments to
1423 1423 the program (put in sys.argv). Then, control returns to IPython's
1424 1424 prompt.
1425 1425
1426 1426 This is similar to running at a system prompt:\\
1427 1427 $ python file args\\
1428 1428 but with the advantage of giving you IPython's tracebacks, and of
1429 1429 loading all variables into your interactive namespace for further use
1430 1430 (unless -p is used, see below).
1431 1431
1432 1432 The file is executed in a namespace initially consisting only of
1433 1433 __name__=='__main__' and sys.argv constructed as indicated. It thus
1434 1434 sees its environment as if it were being run as a stand-alone program
1435 1435 (except for sharing global objects such as previously imported
1436 1436 modules). But after execution, the IPython interactive namespace gets
1437 1437 updated with all variables defined in the program (except for __name__
1438 1438 and sys.argv). This allows for very convenient loading of code for
1439 1439 interactive work, while giving each program a 'clean sheet' to run in.
1440 1440
1441 1441 Options:
1442 1442
1443 1443 -n: __name__ is NOT set to '__main__', but to the running file's name
1444 1444 without extension (as python does under import). This allows running
1445 1445 scripts and reloading the definitions in them without calling code
1446 1446 protected by an ' if __name__ == "__main__" ' clause.
1447 1447
1448 1448 -i: run the file in IPython's namespace instead of an empty one. This
1449 1449 is useful if you are experimenting with code written in a text editor
1450 1450 which depends on variables defined interactively.
1451 1451
1452 1452 -e: ignore sys.exit() calls or SystemExit exceptions in the script
1453 1453 being run. This is particularly useful if IPython is being used to
1454 1454 run unittests, which always exit with a sys.exit() call. In such
1455 1455 cases you are interested in the output of the test results, not in
1456 1456 seeing a traceback of the unittest module.
1457 1457
1458 1458 -t: print timing information at the end of the run. IPython will give
1459 1459 you an estimated CPU time consumption for your script, which under
1460 1460 Unix uses the resource module to avoid the wraparound problems of
1461 1461 time.clock(). Under Unix, an estimate of time spent on system tasks
1462 1462 is also given (for Windows platforms this is reported as 0.0).
1463 1463
1464 1464 If -t is given, an additional -N<N> option can be given, where <N>
1465 1465 must be an integer indicating how many times you want the script to
1466 1466 run. The final timing report will include total and per run results.
1467 1467
1468 1468 For example (testing the script uniq_stable.py):
1469 1469
1470 1470 In [1]: run -t uniq_stable
1471 1471
1472 1472 IPython CPU timings (estimated):\\
1473 1473 User : 0.19597 s.\\
1474 1474 System: 0.0 s.\\
1475 1475
1476 1476 In [2]: run -t -N5 uniq_stable
1477 1477
1478 1478 IPython CPU timings (estimated):\\
1479 1479 Total runs performed: 5\\
1480 1480 Times : Total Per run\\
1481 1481 User : 0.910862 s, 0.1821724 s.\\
1482 1482 System: 0.0 s, 0.0 s.
1483 1483
1484 1484 -d: run your program under the control of pdb, the Python debugger.
1485 1485 This allows you to execute your program step by step, watch variables,
1486 1486 etc. Internally, what IPython does is similar to calling:
1487 1487
1488 1488 pdb.run('execfile("YOURFILENAME")')
1489 1489
1490 1490 with a breakpoint set on line 1 of your file. You can change the line
1491 1491 number for this automatic breakpoint to be <N> by using the -bN option
1492 1492 (where N must be an integer). For example:
1493 1493
1494 1494 %run -d -b40 myscript
1495 1495
1496 1496 will set the first breakpoint at line 40 in myscript.py. Note that
1497 1497 the first breakpoint must be set on a line which actually does
1498 1498 something (not a comment or docstring) for it to stop execution.
1499 1499
1500 1500 When the pdb debugger starts, you will see a (Pdb) prompt. You must
1501 1501 first enter 'c' (without qoutes) to start execution up to the first
1502 1502 breakpoint.
1503 1503
1504 1504 Entering 'help' gives information about the use of the debugger. You
1505 1505 can easily see pdb's full documentation with "import pdb;pdb.help()"
1506 1506 at a prompt.
1507 1507
1508 1508 -p: run program under the control of the Python profiler module (which
1509 1509 prints a detailed report of execution times, function calls, etc).
1510 1510
1511 1511 You can pass other options after -p which affect the behavior of the
1512 1512 profiler itself. See the docs for %prun for details.
1513 1513
1514 1514 In this mode, the program's variables do NOT propagate back to the
1515 1515 IPython interactive namespace (because they remain in the namespace
1516 1516 where the profiler executes them).
1517 1517
1518 1518 Internally this triggers a call to %prun, see its documentation for
1519 1519 details on the options available specifically for profiling.
1520 1520
1521 1521 There is one special usage for which the text above doesn't apply:
1522 1522 if the filename ends with .ipy, the file is run as ipython script,
1523 1523 just as if the commands were written on IPython prompt.
1524 1524 """
1525 1525
1526 1526 # get arguments and set sys.argv for program to be run.
1527 1527 opts,arg_lst = self.parse_options(parameter_s,'nidtN:b:pD:l:rs:T:e',
1528 1528 mode='list',list_all=1)
1529 1529
1530 1530 try:
1531 1531 filename = get_py_filename(arg_lst[0])
1532 1532 except IndexError:
1533 1533 warn('you must provide at least a filename.')
1534 1534 print '\n%run:\n',OInspect.getdoc(self.magic_run)
1535 1535 return
1536 1536 except IOError,msg:
1537 1537 error(msg)
1538 1538 return
1539 1539
1540 1540 if filename.lower().endswith('.ipy'):
1541 1541 self.api.runlines(open(filename).read())
1542 1542 return
1543 1543
1544 1544 # Control the response to exit() calls made by the script being run
1545 1545 exit_ignore = opts.has_key('e')
1546 1546
1547 1547 # Make sure that the running script gets a proper sys.argv as if it
1548 1548 # were run from a system shell.
1549 1549 save_argv = sys.argv # save it for later restoring
1550 1550 sys.argv = [filename]+ arg_lst[1:] # put in the proper filename
1551 1551
1552 1552 if opts.has_key('i'):
1553 1553 # Run in user's interactive namespace
1554 1554 prog_ns = self.shell.user_ns
1555 1555 __name__save = self.shell.user_ns['__name__']
1556 1556 prog_ns['__name__'] = '__main__'
1557 1557 main_mod = FakeModule(prog_ns)
1558 1558 else:
1559 1559 # Run in a fresh, empty namespace
1560 1560 if opts.has_key('n'):
1561 1561 name = os.path.splitext(os.path.basename(filename))[0]
1562 1562 else:
1563 1563 name = '__main__'
1564 1564 main_mod = FakeModule()
1565 1565 prog_ns = main_mod.__dict__
1566 1566 prog_ns['__name__'] = name
1567 1567 # The shell MUST hold a reference to main_mod so after %run exits,
1568 1568 # the python deletion mechanism doesn't zero it out (leaving
1569 1569 # dangling references)
1570 1570 self.shell._user_main_modules.append(main_mod)
1571 1571
1572 1572 # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
1573 1573 # set the __file__ global in the script's namespace
1574 1574 prog_ns['__file__'] = filename
1575 1575
1576 1576 # pickle fix. See iplib for an explanation. But we need to make sure
1577 1577 # that, if we overwrite __main__, we replace it at the end
1578 1578 if prog_ns['__name__'] == '__main__':
1579 1579 restore_main = sys.modules['__main__']
1580 1580 else:
1581 1581 restore_main = False
1582 1582
1583 1583 sys.modules[prog_ns['__name__']] = main_mod
1584 1584
1585 1585 stats = None
1586 1586 try:
1587 1587 self.shell.savehist()
1588 1588
1589 1589 if opts.has_key('p'):
1590 1590 stats = self.magic_prun('',0,opts,arg_lst,prog_ns)
1591 1591 else:
1592 1592 if opts.has_key('d'):
1593 1593 deb = Debugger.Pdb(self.shell.rc.colors)
1594 1594 # reset Breakpoint state, which is moronically kept
1595 1595 # in a class
1596 1596 bdb.Breakpoint.next = 1
1597 1597 bdb.Breakpoint.bplist = {}
1598 1598 bdb.Breakpoint.bpbynumber = [None]
1599 1599 # Set an initial breakpoint to stop execution
1600 1600 maxtries = 10
1601 1601 bp = int(opts.get('b',[1])[0])
1602 1602 checkline = deb.checkline(filename,bp)
1603 1603 if not checkline:
1604 1604 for bp in range(bp+1,bp+maxtries+1):
1605 1605 if deb.checkline(filename,bp):
1606 1606 break
1607 1607 else:
1608 1608 msg = ("\nI failed to find a valid line to set "
1609 1609 "a breakpoint\n"
1610 1610 "after trying up to line: %s.\n"
1611 1611 "Please set a valid breakpoint manually "
1612 1612 "with the -b option." % bp)
1613 1613 error(msg)
1614 1614 return
1615 1615 # if we find a good linenumber, set the breakpoint
1616 1616 deb.do_break('%s:%s' % (filename,bp))
1617 1617 # Start file run
1618 1618 print "NOTE: Enter 'c' at the",
1619 1619 print "%s prompt to start your script." % deb.prompt
1620 1620 try:
1621 1621 deb.run('execfile("%s")' % filename,prog_ns)
1622 1622
1623 1623 except:
1624 1624 etype, value, tb = sys.exc_info()
1625 1625 # Skip three frames in the traceback: the %run one,
1626 1626 # one inside bdb.py, and the command-line typed by the
1627 1627 # user (run by exec in pdb itself).
1628 1628 self.shell.InteractiveTB(etype,value,tb,tb_offset=3)
1629 1629 else:
1630 1630 if runner is None:
1631 1631 runner = self.shell.safe_execfile
1632 1632 if opts.has_key('t'):
1633 1633 # timed execution
1634 1634 try:
1635 1635 nruns = int(opts['N'][0])
1636 1636 if nruns < 1:
1637 1637 error('Number of runs must be >=1')
1638 1638 return
1639 1639 except (KeyError):
1640 1640 nruns = 1
1641 1641 if nruns == 1:
1642 1642 t0 = clock2()
1643 1643 runner(filename,prog_ns,prog_ns,
1644 1644 exit_ignore=exit_ignore)
1645 1645 t1 = clock2()
1646 1646 t_usr = t1[0]-t0[0]
1647 1647 t_sys = t1[1]-t1[1]
1648 1648 print "\nIPython CPU timings (estimated):"
1649 1649 print " User : %10s s." % t_usr
1650 1650 print " System: %10s s." % t_sys
1651 1651 else:
1652 1652 runs = range(nruns)
1653 1653 t0 = clock2()
1654 1654 for nr in runs:
1655 1655 runner(filename,prog_ns,prog_ns,
1656 1656 exit_ignore=exit_ignore)
1657 1657 t1 = clock2()
1658 1658 t_usr = t1[0]-t0[0]
1659 1659 t_sys = t1[1]-t1[1]
1660 1660 print "\nIPython CPU timings (estimated):"
1661 1661 print "Total runs performed:",nruns
1662 1662 print " Times : %10s %10s" % ('Total','Per run')
1663 1663 print " User : %10s s, %10s s." % (t_usr,t_usr/nruns)
1664 1664 print " System: %10s s, %10s s." % (t_sys,t_sys/nruns)
1665 1665
1666 1666 else:
1667 1667 # regular execution
1668 1668 runner(filename,prog_ns,prog_ns,exit_ignore=exit_ignore)
1669 1669 if opts.has_key('i'):
1670 1670 self.shell.user_ns['__name__'] = __name__save
1671 1671 else:
1672 1672 # update IPython interactive namespace
1673 1673 del prog_ns['__name__']
1674 1674 self.shell.user_ns.update(prog_ns)
1675 1675 finally:
1676 1676 sys.argv = save_argv
1677 1677 if restore_main:
1678 1678 sys.modules['__main__'] = restore_main
1679 1679 self.shell.reloadhist()
1680 1680
1681 1681 return stats
1682 1682
1683 1683 def magic_runlog(self, parameter_s =''):
1684 1684 """Run files as logs.
1685 1685
1686 1686 Usage:\\
1687 1687 %runlog file1 file2 ...
1688 1688
1689 1689 Run the named files (treating them as log files) in sequence inside
1690 1690 the interpreter, and return to the prompt. This is much slower than
1691 1691 %run because each line is executed in a try/except block, but it
1692 1692 allows running files with syntax errors in them.
1693 1693
1694 1694 Normally IPython will guess when a file is one of its own logfiles, so
1695 1695 you can typically use %run even for logs. This shorthand allows you to
1696 1696 force any file to be treated as a log file."""
1697 1697
1698 1698 for f in parameter_s.split():
1699 1699 self.shell.safe_execfile(f,self.shell.user_ns,
1700 1700 self.shell.user_ns,islog=1)
1701 1701
1702 1702 def magic_timeit(self, parameter_s =''):
1703 1703 """Time execution of a Python statement or expression
1704 1704
1705 1705 Usage:\\
1706 1706 %timeit [-n<N> -r<R> [-t|-c]] statement
1707 1707
1708 1708 Time execution of a Python statement or expression using the timeit
1709 1709 module.
1710 1710
1711 1711 Options:
1712 1712 -n<N>: execute the given statement <N> times in a loop. If this value
1713 1713 is not given, a fitting value is chosen.
1714 1714
1715 1715 -r<R>: repeat the loop iteration <R> times and take the best result.
1716 1716 Default: 3
1717 1717
1718 1718 -t: use time.time to measure the time, which is the default on Unix.
1719 1719 This function measures wall time.
1720 1720
1721 1721 -c: use time.clock to measure the time, which is the default on
1722 1722 Windows and measures wall time. On Unix, resource.getrusage is used
1723 1723 instead and returns the CPU user time.
1724 1724
1725 1725 -p<P>: use a precision of <P> digits to display the timing result.
1726 1726 Default: 3
1727 1727
1728 1728
1729 1729 Examples:\\
1730 1730 In [1]: %timeit pass
1731 1731 10000000 loops, best of 3: 53.3 ns per loop
1732 1732
1733 1733 In [2]: u = None
1734 1734
1735 1735 In [3]: %timeit u is None
1736 1736 10000000 loops, best of 3: 184 ns per loop
1737 1737
1738 1738 In [4]: %timeit -r 4 u == None
1739 1739 1000000 loops, best of 4: 242 ns per loop
1740 1740
1741 1741 In [5]: import time
1742 1742
1743 1743 In [6]: %timeit -n1 time.sleep(2)
1744 1744 1 loops, best of 3: 2 s per loop
1745 1745
1746 1746
1747 1747 The times reported by %timeit will be slightly higher than those
1748 1748 reported by the timeit.py script when variables are accessed. This is
1749 1749 due to the fact that %timeit executes the statement in the namespace
1750 1750 of the shell, compared with timeit.py, which uses a single setup
1751 1751 statement to import function or create variables. Generally, the bias
1752 1752 does not matter as long as results from timeit.py are not mixed with
1753 1753 those from %timeit."""
1754 1754
1755 1755 import timeit
1756 1756 import math
1757 1757
1758 1758 units = ["s", "ms", "\xc2\xb5s", "ns"]
1759 1759 scaling = [1, 1e3, 1e6, 1e9]
1760 1760
1761 1761 opts, stmt = self.parse_options(parameter_s,'n:r:tcp:',
1762 1762 posix=False)
1763 1763 if stmt == "":
1764 1764 return
1765 1765 timefunc = timeit.default_timer
1766 1766 number = int(getattr(opts, "n", 0))
1767 1767 repeat = int(getattr(opts, "r", timeit.default_repeat))
1768 1768 precision = int(getattr(opts, "p", 3))
1769 1769 if hasattr(opts, "t"):
1770 1770 timefunc = time.time
1771 1771 if hasattr(opts, "c"):
1772 1772 timefunc = clock
1773 1773
1774 1774 timer = timeit.Timer(timer=timefunc)
1775 1775 # this code has tight coupling to the inner workings of timeit.Timer,
1776 1776 # but is there a better way to achieve that the code stmt has access
1777 1777 # to the shell namespace?
1778 1778
1779 1779 src = timeit.template % {'stmt': timeit.reindent(stmt, 8),
1780 1780 'setup': "pass"}
1781 1781 # Track compilation time so it can be reported if too long
1782 1782 # Minimum time above which compilation time will be reported
1783 1783 tc_min = 0.1
1784 1784
1785 1785 t0 = clock()
1786 1786 code = compile(src, "<magic-timeit>", "exec")
1787 1787 tc = clock()-t0
1788 1788
1789 1789 ns = {}
1790 1790 exec code in self.shell.user_ns, ns
1791 1791 timer.inner = ns["inner"]
1792 1792
1793 1793 if number == 0:
1794 1794 # determine number so that 0.2 <= total time < 2.0
1795 1795 number = 1
1796 1796 for i in range(1, 10):
1797 1797 number *= 10
1798 1798 if timer.timeit(number) >= 0.2:
1799 1799 break
1800 1800
1801 1801 best = min(timer.repeat(repeat, number)) / number
1802 1802
1803 1803 if best > 0.0:
1804 1804 order = min(-int(math.floor(math.log10(best)) // 3), 3)
1805 1805 else:
1806 1806 order = 3
1807 1807 print "%d loops, best of %d: %.*g %s per loop" % (number, repeat,
1808 1808 precision,
1809 1809 best * scaling[order],
1810 1810 units[order])
1811 1811 if tc > tc_min:
1812 1812 print "Compiler time: %.2f s" % tc
1813 1813
1814 1814 def magic_time(self,parameter_s = ''):
1815 1815 """Time execution of a Python statement or expression.
1816 1816
1817 1817 The CPU and wall clock times are printed, and the value of the
1818 1818 expression (if any) is returned. Note that under Win32, system time
1819 1819 is always reported as 0, since it can not be measured.
1820 1820
1821 1821 This function provides very basic timing functionality. In Python
1822 1822 2.3, the timeit module offers more control and sophistication, so this
1823 1823 could be rewritten to use it (patches welcome).
1824 1824
1825 1825 Some examples:
1826 1826
1827 1827 In [1]: time 2**128
1828 1828 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1829 1829 Wall time: 0.00
1830 1830 Out[1]: 340282366920938463463374607431768211456L
1831 1831
1832 1832 In [2]: n = 1000000
1833 1833
1834 1834 In [3]: time sum(range(n))
1835 1835 CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
1836 1836 Wall time: 1.37
1837 1837 Out[3]: 499999500000L
1838 1838
1839 1839 In [4]: time print 'hello world'
1840 1840 hello world
1841 1841 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1842 1842 Wall time: 0.00
1843 1843
1844 1844 Note that the time needed by Python to compile the given expression
1845 1845 will be reported if it is more than 0.1s. In this example, the
1846 1846 actual exponentiation is done by Python at compilation time, so while
1847 1847 the expression can take a noticeable amount of time to compute, that
1848 1848 time is purely due to the compilation:
1849 1849
1850 1850 In [5]: time 3**9999;
1851 1851 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1852 1852 Wall time: 0.00 s
1853 1853
1854 1854 In [6]: time 3**999999;
1855 1855 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1856 1856 Wall time: 0.00 s
1857 1857 Compiler : 0.78 s
1858 1858 """
1859 1859
1860 1860 # fail immediately if the given expression can't be compiled
1861 1861
1862 1862 expr = self.shell.prefilter(parameter_s,False)
1863 1863
1864 1864 # Minimum time above which compilation time will be reported
1865 1865 tc_min = 0.1
1866 1866
1867 1867 try:
1868 1868 mode = 'eval'
1869 1869 t0 = clock()
1870 1870 code = compile(expr,'<timed eval>',mode)
1871 1871 tc = clock()-t0
1872 1872 except SyntaxError:
1873 1873 mode = 'exec'
1874 1874 t0 = clock()
1875 1875 code = compile(expr,'<timed exec>',mode)
1876 1876 tc = clock()-t0
1877 1877 # skew measurement as little as possible
1878 1878 glob = self.shell.user_ns
1879 1879 clk = clock2
1880 1880 wtime = time.time
1881 1881 # time execution
1882 1882 wall_st = wtime()
1883 1883 if mode=='eval':
1884 1884 st = clk()
1885 1885 out = eval(code,glob)
1886 1886 end = clk()
1887 1887 else:
1888 1888 st = clk()
1889 1889 exec code in glob
1890 1890 end = clk()
1891 1891 out = None
1892 1892 wall_end = wtime()
1893 1893 # Compute actual times and report
1894 1894 wall_time = wall_end-wall_st
1895 1895 cpu_user = end[0]-st[0]
1896 1896 cpu_sys = end[1]-st[1]
1897 1897 cpu_tot = cpu_user+cpu_sys
1898 1898 print "CPU times: user %.2f s, sys: %.2f s, total: %.2f s" % \
1899 1899 (cpu_user,cpu_sys,cpu_tot)
1900 1900 print "Wall time: %.2f s" % wall_time
1901 1901 if tc > tc_min:
1902 1902 print "Compiler : %.2f s" % tc
1903 1903 return out
1904 1904
1905 1905 def magic_macro(self,parameter_s = ''):
1906 1906 """Define a set of input lines as a macro for future re-execution.
1907 1907
1908 1908 Usage:\\
1909 1909 %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
1910 1910
1911 1911 Options:
1912 1912
1913 1913 -r: use 'raw' input. By default, the 'processed' history is used,
1914 1914 so that magics are loaded in their transformed version to valid
1915 1915 Python. If this option is given, the raw input as typed as the
1916 1916 command line is used instead.
1917 1917
1918 1918 This will define a global variable called `name` which is a string
1919 1919 made of joining the slices and lines you specify (n1,n2,... numbers
1920 1920 above) from your input history into a single string. This variable
1921 1921 acts like an automatic function which re-executes those lines as if
1922 1922 you had typed them. You just type 'name' at the prompt and the code
1923 1923 executes.
1924 1924
1925 1925 The notation for indicating number ranges is: n1-n2 means 'use line
1926 1926 numbers n1,...n2' (the endpoint is included). That is, '5-7' means
1927 1927 using the lines numbered 5,6 and 7.
1928 1928
1929 1929 Note: as a 'hidden' feature, you can also use traditional python slice
1930 1930 notation, where N:M means numbers N through M-1.
1931 1931
1932 1932 For example, if your history contains (%hist prints it):
1933 1933
1934 1934 44: x=1\\
1935 1935 45: y=3\\
1936 1936 46: z=x+y\\
1937 1937 47: print x\\
1938 1938 48: a=5\\
1939 1939 49: print 'x',x,'y',y\\
1940 1940
1941 1941 you can create a macro with lines 44 through 47 (included) and line 49
1942 1942 called my_macro with:
1943 1943
1944 1944 In [51]: %macro my_macro 44-47 49
1945 1945
1946 1946 Now, typing `my_macro` (without quotes) will re-execute all this code
1947 1947 in one pass.
1948 1948
1949 1949 You don't need to give the line-numbers in order, and any given line
1950 1950 number can appear multiple times. You can assemble macros with any
1951 1951 lines from your input history in any order.
1952 1952
1953 1953 The macro is a simple object which holds its value in an attribute,
1954 1954 but IPython's display system checks for macros and executes them as
1955 1955 code instead of printing them when you type their name.
1956 1956
1957 1957 You can view a macro's contents by explicitly printing it with:
1958 1958
1959 1959 'print macro_name'.
1960 1960
1961 1961 For one-off cases which DON'T contain magic function calls in them you
1962 1962 can obtain similar results by explicitly executing slices from your
1963 1963 input history with:
1964 1964
1965 1965 In [60]: exec In[44:48]+In[49]"""
1966 1966
1967 1967 opts,args = self.parse_options(parameter_s,'r',mode='list')
1968 1968 if not args:
1969 1969 macs = [k for k,v in self.shell.user_ns.items() if isinstance(v, Macro)]
1970 1970 macs.sort()
1971 1971 return macs
1972 1972 if len(args) == 1:
1973 1973 raise UsageError(
1974 1974 "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
1975 1975 name,ranges = args[0], args[1:]
1976 1976
1977 1977 #print 'rng',ranges # dbg
1978 1978 lines = self.extract_input_slices(ranges,opts.has_key('r'))
1979 1979 macro = Macro(lines)
1980 1980 self.shell.user_ns.update({name:macro})
1981 1981 print 'Macro `%s` created. To execute, type its name (without quotes).' % name
1982 1982 print 'Macro contents:'
1983 1983 print macro,
1984 1984
1985 1985 def magic_save(self,parameter_s = ''):
1986 1986 """Save a set of lines to a given filename.
1987 1987
1988 1988 Usage:\\
1989 1989 %save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
1990 1990
1991 1991 Options:
1992 1992
1993 1993 -r: use 'raw' input. By default, the 'processed' history is used,
1994 1994 so that magics are loaded in their transformed version to valid
1995 1995 Python. If this option is given, the raw input as typed as the
1996 1996 command line is used instead.
1997 1997
1998 1998 This function uses the same syntax as %macro for line extraction, but
1999 1999 instead of creating a macro it saves the resulting string to the
2000 2000 filename you specify.
2001 2001
2002 2002 It adds a '.py' extension to the file if you don't do so yourself, and
2003 2003 it asks for confirmation before overwriting existing files."""
2004 2004
2005 2005 opts,args = self.parse_options(parameter_s,'r',mode='list')
2006 2006 fname,ranges = args[0], args[1:]
2007 2007 if not fname.endswith('.py'):
2008 2008 fname += '.py'
2009 2009 if os.path.isfile(fname):
2010 2010 ans = raw_input('File `%s` exists. Overwrite (y/[N])? ' % fname)
2011 2011 if ans.lower() not in ['y','yes']:
2012 2012 print 'Operation cancelled.'
2013 2013 return
2014 2014 cmds = ''.join(self.extract_input_slices(ranges,opts.has_key('r')))
2015 2015 f = file(fname,'w')
2016 2016 f.write(cmds)
2017 2017 f.close()
2018 2018 print 'The following commands were written to file `%s`:' % fname
2019 2019 print cmds
2020 2020
2021 2021 def _edit_macro(self,mname,macro):
2022 2022 """open an editor with the macro data in a file"""
2023 2023 filename = self.shell.mktempfile(macro.value)
2024 2024 self.shell.hooks.editor(filename)
2025 2025
2026 2026 # and make a new macro object, to replace the old one
2027 2027 mfile = open(filename)
2028 2028 mvalue = mfile.read()
2029 2029 mfile.close()
2030 2030 self.shell.user_ns[mname] = Macro(mvalue)
2031 2031
2032 2032 def magic_ed(self,parameter_s=''):
2033 2033 """Alias to %edit."""
2034 2034 return self.magic_edit(parameter_s)
2035 2035
2036 2036 def magic_edit(self,parameter_s='',last_call=['','']):
2037 2037 """Bring up an editor and execute the resulting code.
2038 2038
2039 2039 Usage:
2040 2040 %edit [options] [args]
2041 2041
2042 2042 %edit runs IPython's editor hook. The default version of this hook is
2043 2043 set to call the __IPYTHON__.rc.editor command. This is read from your
2044 2044 environment variable $EDITOR. If this isn't found, it will default to
2045 2045 vi under Linux/Unix and to notepad under Windows. See the end of this
2046 2046 docstring for how to change the editor hook.
2047 2047
2048 2048 You can also set the value of this editor via the command line option
2049 2049 '-editor' or in your ipythonrc file. This is useful if you wish to use
2050 2050 specifically for IPython an editor different from your typical default
2051 2051 (and for Windows users who typically don't set environment variables).
2052 2052
2053 2053 This command allows you to conveniently edit multi-line code right in
2054 2054 your IPython session.
2055 2055
2056 2056 If called without arguments, %edit opens up an empty editor with a
2057 2057 temporary file and will execute the contents of this file when you
2058 2058 close it (don't forget to save it!).
2059 2059
2060 2060
2061 2061 Options:
2062 2062
2063 2063 -n <number>: open the editor at a specified line number. By default,
2064 2064 the IPython editor hook uses the unix syntax 'editor +N filename', but
2065 2065 you can configure this by providing your own modified hook if your
2066 2066 favorite editor supports line-number specifications with a different
2067 2067 syntax.
2068 2068
2069 2069 -p: this will call the editor with the same data as the previous time
2070 2070 it was used, regardless of how long ago (in your current session) it
2071 2071 was.
2072 2072
2073 2073 -r: use 'raw' input. This option only applies to input taken from the
2074 2074 user's history. By default, the 'processed' history is used, so that
2075 2075 magics are loaded in their transformed version to valid Python. If
2076 2076 this option is given, the raw input as typed as the command line is
2077 2077 used instead. When you exit the editor, it will be executed by
2078 2078 IPython's own processor.
2079 2079
2080 2080 -x: do not execute the edited code immediately upon exit. This is
2081 2081 mainly useful if you are editing programs which need to be called with
2082 2082 command line arguments, which you can then do using %run.
2083 2083
2084 2084
2085 2085 Arguments:
2086 2086
2087 2087 If arguments are given, the following possibilites exist:
2088 2088
2089 2089 - The arguments are numbers or pairs of colon-separated numbers (like
2090 2090 1 4:8 9). These are interpreted as lines of previous input to be
2091 2091 loaded into the editor. The syntax is the same of the %macro command.
2092 2092
2093 2093 - If the argument doesn't start with a number, it is evaluated as a
2094 2094 variable and its contents loaded into the editor. You can thus edit
2095 2095 any string which contains python code (including the result of
2096 2096 previous edits).
2097 2097
2098 2098 - If the argument is the name of an object (other than a string),
2099 2099 IPython will try to locate the file where it was defined and open the
2100 2100 editor at the point where it is defined. You can use `%edit function`
2101 2101 to load an editor exactly at the point where 'function' is defined,
2102 2102 edit it and have the file be executed automatically.
2103 2103
2104 2104 If the object is a macro (see %macro for details), this opens up your
2105 2105 specified editor with a temporary file containing the macro's data.
2106 2106 Upon exit, the macro is reloaded with the contents of the file.
2107 2107
2108 2108 Note: opening at an exact line is only supported under Unix, and some
2109 2109 editors (like kedit and gedit up to Gnome 2.8) do not understand the
2110 2110 '+NUMBER' parameter necessary for this feature. Good editors like
2111 2111 (X)Emacs, vi, jed, pico and joe all do.
2112 2112
2113 2113 - If the argument is not found as a variable, IPython will look for a
2114 2114 file with that name (adding .py if necessary) and load it into the
2115 2115 editor. It will execute its contents with execfile() when you exit,
2116 2116 loading any code in the file into your interactive namespace.
2117 2117
2118 2118 After executing your code, %edit will return as output the code you
2119 2119 typed in the editor (except when it was an existing file). This way
2120 2120 you can reload the code in further invocations of %edit as a variable,
2121 2121 via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
2122 2122 the output.
2123 2123
2124 2124 Note that %edit is also available through the alias %ed.
2125 2125
2126 2126 This is an example of creating a simple function inside the editor and
2127 2127 then modifying it. First, start up the editor:
2128 2128
2129 2129 In [1]: ed\\
2130 2130 Editing... done. Executing edited code...\\
2131 2131 Out[1]: 'def foo():\\n print "foo() was defined in an editing session"\\n'
2132 2132
2133 2133 We can then call the function foo():
2134 2134
2135 2135 In [2]: foo()\\
2136 2136 foo() was defined in an editing session
2137 2137
2138 2138 Now we edit foo. IPython automatically loads the editor with the
2139 2139 (temporary) file where foo() was previously defined:
2140 2140
2141 2141 In [3]: ed foo\\
2142 2142 Editing... done. Executing edited code...
2143 2143
2144 2144 And if we call foo() again we get the modified version:
2145 2145
2146 2146 In [4]: foo()\\
2147 2147 foo() has now been changed!
2148 2148
2149 2149 Here is an example of how to edit a code snippet successive
2150 2150 times. First we call the editor:
2151 2151
2152 2152 In [8]: ed\\
2153 2153 Editing... done. Executing edited code...\\
2154 2154 hello\\
2155 2155 Out[8]: "print 'hello'\\n"
2156 2156
2157 2157 Now we call it again with the previous output (stored in _):
2158 2158
2159 2159 In [9]: ed _\\
2160 2160 Editing... done. Executing edited code...\\
2161 2161 hello world\\
2162 2162 Out[9]: "print 'hello world'\\n"
2163 2163
2164 2164 Now we call it with the output #8 (stored in _8, also as Out[8]):
2165 2165
2166 2166 In [10]: ed _8\\
2167 2167 Editing... done. Executing edited code...\\
2168 2168 hello again\\
2169 2169 Out[10]: "print 'hello again'\\n"
2170 2170
2171 2171
2172 2172 Changing the default editor hook:
2173 2173
2174 2174 If you wish to write your own editor hook, you can put it in a
2175 2175 configuration file which you load at startup time. The default hook
2176 2176 is defined in the IPython.hooks module, and you can use that as a
2177 2177 starting example for further modifications. That file also has
2178 2178 general instructions on how to set a new hook for use once you've
2179 2179 defined it."""
2180 2180
2181 2181 # FIXME: This function has become a convoluted mess. It needs a
2182 2182 # ground-up rewrite with clean, simple logic.
2183 2183
2184 2184 def make_filename(arg):
2185 2185 "Make a filename from the given args"
2186 2186 try:
2187 2187 filename = get_py_filename(arg)
2188 2188 except IOError:
2189 2189 if args.endswith('.py'):
2190 2190 filename = arg
2191 2191 else:
2192 2192 filename = None
2193 2193 return filename
2194 2194
2195 2195 # custom exceptions
2196 2196 class DataIsObject(Exception): pass
2197 2197
2198 2198 opts,args = self.parse_options(parameter_s,'prxn:')
2199 2199 # Set a few locals from the options for convenience:
2200 2200 opts_p = opts.has_key('p')
2201 2201 opts_r = opts.has_key('r')
2202 2202
2203 2203 # Default line number value
2204 2204 lineno = opts.get('n',None)
2205 2205
2206 2206 if opts_p:
2207 2207 args = '_%s' % last_call[0]
2208 2208 if not self.shell.user_ns.has_key(args):
2209 2209 args = last_call[1]
2210 2210
2211 2211 # use last_call to remember the state of the previous call, but don't
2212 2212 # let it be clobbered by successive '-p' calls.
2213 2213 try:
2214 2214 last_call[0] = self.shell.outputcache.prompt_count
2215 2215 if not opts_p:
2216 2216 last_call[1] = parameter_s
2217 2217 except:
2218 2218 pass
2219 2219
2220 2220 # by default this is done with temp files, except when the given
2221 2221 # arg is a filename
2222 2222 use_temp = 1
2223 2223
2224 2224 if re.match(r'\d',args):
2225 2225 # Mode where user specifies ranges of lines, like in %macro.
2226 2226 # This means that you can't edit files whose names begin with
2227 2227 # numbers this way. Tough.
2228 2228 ranges = args.split()
2229 2229 data = ''.join(self.extract_input_slices(ranges,opts_r))
2230 2230 elif args.endswith('.py'):
2231 2231 filename = make_filename(args)
2232 2232 data = ''
2233 2233 use_temp = 0
2234 2234 elif args:
2235 2235 try:
2236 2236 # Load the parameter given as a variable. If not a string,
2237 2237 # process it as an object instead (below)
2238 2238
2239 2239 #print '*** args',args,'type',type(args) # dbg
2240 2240 data = eval(args,self.shell.user_ns)
2241 2241 if not type(data) in StringTypes:
2242 2242 raise DataIsObject
2243 2243
2244 2244 except (NameError,SyntaxError):
2245 2245 # given argument is not a variable, try as a filename
2246 2246 filename = make_filename(args)
2247 2247 if filename is None:
2248 2248 warn("Argument given (%s) can't be found as a variable "
2249 2249 "or as a filename." % args)
2250 2250 return
2251 2251
2252 2252 data = ''
2253 2253 use_temp = 0
2254 2254 except DataIsObject:
2255 2255
2256 2256 # macros have a special edit function
2257 2257 if isinstance(data,Macro):
2258 2258 self._edit_macro(args,data)
2259 2259 return
2260 2260
2261 2261 # For objects, try to edit the file where they are defined
2262 2262 try:
2263 2263 filename = inspect.getabsfile(data)
2264 2264 if 'fakemodule' in filename.lower() and inspect.isclass(data):
2265 2265 # class created by %edit? Try to find source
2266 2266 # by looking for method definitions instead, the
2267 2267 # __module__ in those classes is FakeModule.
2268 2268 attrs = [getattr(data, aname) for aname in dir(data)]
2269 2269 for attr in attrs:
2270 2270 if not inspect.ismethod(attr):
2271 2271 continue
2272 2272 filename = inspect.getabsfile(attr)
2273 2273 if filename and 'fakemodule' not in filename.lower():
2274 2274 # change the attribute to be the edit target instead
2275 2275 data = attr
2276 2276 break
2277 2277
2278 2278 datafile = 1
2279 2279 except TypeError:
2280 2280 filename = make_filename(args)
2281 2281 datafile = 1
2282 2282 warn('Could not find file where `%s` is defined.\n'
2283 2283 'Opening a file named `%s`' % (args,filename))
2284 2284 # Now, make sure we can actually read the source (if it was in
2285 2285 # a temp file it's gone by now).
2286 2286 if datafile:
2287 2287 try:
2288 2288 if lineno is None:
2289 2289 lineno = inspect.getsourcelines(data)[1]
2290 2290 except IOError:
2291 2291 filename = make_filename(args)
2292 2292 if filename is None:
2293 2293 warn('The file `%s` where `%s` was defined cannot '
2294 2294 'be read.' % (filename,data))
2295 2295 return
2296 2296 use_temp = 0
2297 2297 else:
2298 2298 data = ''
2299 2299
2300 2300 if use_temp:
2301 2301 filename = self.shell.mktempfile(data)
2302 2302 print 'IPython will make a temporary file named:',filename
2303 2303
2304 2304 # do actual editing here
2305 2305 print 'Editing...',
2306 2306 sys.stdout.flush()
2307 2307 self.shell.hooks.editor(filename,lineno)
2308 2308 if opts.has_key('x'): # -x prevents actual execution
2309 2309 print
2310 2310 else:
2311 2311 print 'done. Executing edited code...'
2312 2312 if opts_r:
2313 2313 self.shell.runlines(file_read(filename))
2314 2314 else:
2315 2315 self.shell.safe_execfile(filename,self.shell.user_ns,
2316 2316 self.shell.user_ns)
2317 2317 if use_temp:
2318 2318 try:
2319 2319 return open(filename).read()
2320 2320 except IOError,msg:
2321 2321 if msg.filename == filename:
2322 2322 warn('File not found. Did you forget to save?')
2323 2323 return
2324 2324 else:
2325 2325 self.shell.showtraceback()
2326 2326
2327 2327 def magic_xmode(self,parameter_s = ''):
2328 2328 """Switch modes for the exception handlers.
2329 2329
2330 2330 Valid modes: Plain, Context and Verbose.
2331 2331
2332 2332 If called without arguments, acts as a toggle."""
2333 2333
2334 2334 def xmode_switch_err(name):
2335 2335 warn('Error changing %s exception modes.\n%s' %
2336 2336 (name,sys.exc_info()[1]))
2337 2337
2338 2338 shell = self.shell
2339 2339 new_mode = parameter_s.strip().capitalize()
2340 2340 try:
2341 2341 shell.InteractiveTB.set_mode(mode=new_mode)
2342 2342 print 'Exception reporting mode:',shell.InteractiveTB.mode
2343 2343 except:
2344 2344 xmode_switch_err('user')
2345 2345
2346 2346 # threaded shells use a special handler in sys.excepthook
2347 2347 if shell.isthreaded:
2348 2348 try:
2349 2349 shell.sys_excepthook.set_mode(mode=new_mode)
2350 2350 except:
2351 2351 xmode_switch_err('threaded')
2352 2352
2353 2353 def magic_colors(self,parameter_s = ''):
2354 2354 """Switch color scheme for prompts, info system and exception handlers.
2355 2355
2356 2356 Currently implemented schemes: NoColor, Linux, LightBG.
2357 2357
2358 2358 Color scheme names are not case-sensitive."""
2359 2359
2360 2360 def color_switch_err(name):
2361 2361 warn('Error changing %s color schemes.\n%s' %
2362 2362 (name,sys.exc_info()[1]))
2363 2363
2364 2364
2365 2365 new_scheme = parameter_s.strip()
2366 2366 if not new_scheme:
2367 2367 raise UsageError(
2368 2368 "%colors: you must specify a color scheme. See '%colors?'")
2369 2369 return
2370 2370 # local shortcut
2371 2371 shell = self.shell
2372 2372
2373 2373 import IPython.rlineimpl as readline
2374 2374
2375 2375 if not readline.have_readline and sys.platform == "win32":
2376 2376 msg = """\
2377 2377 Proper color support under MS Windows requires the pyreadline library.
2378 2378 You can find it at:
2379 2379 http://ipython.scipy.org/moin/PyReadline/Intro
2380 2380 Gary's readline needs the ctypes module, from:
2381 2381 http://starship.python.net/crew/theller/ctypes
2382 2382 (Note that ctypes is already part of Python versions 2.5 and newer).
2383 2383
2384 2384 Defaulting color scheme to 'NoColor'"""
2385 2385 new_scheme = 'NoColor'
2386 2386 warn(msg)
2387 2387
2388 2388 # readline option is 0
2389 2389 if not shell.has_readline:
2390 2390 new_scheme = 'NoColor'
2391 2391
2392 2392 # Set prompt colors
2393 2393 try:
2394 2394 shell.outputcache.set_colors(new_scheme)
2395 2395 except:
2396 2396 color_switch_err('prompt')
2397 2397 else:
2398 2398 shell.rc.colors = \
2399 2399 shell.outputcache.color_table.active_scheme_name
2400 2400 # Set exception colors
2401 2401 try:
2402 2402 shell.InteractiveTB.set_colors(scheme = new_scheme)
2403 2403 shell.SyntaxTB.set_colors(scheme = new_scheme)
2404 2404 except:
2405 2405 color_switch_err('exception')
2406 2406
2407 2407 # threaded shells use a verbose traceback in sys.excepthook
2408 2408 if shell.isthreaded:
2409 2409 try:
2410 2410 shell.sys_excepthook.set_colors(scheme=new_scheme)
2411 2411 except:
2412 2412 color_switch_err('system exception handler')
2413 2413
2414 2414 # Set info (for 'object?') colors
2415 2415 if shell.rc.color_info:
2416 2416 try:
2417 2417 shell.inspector.set_active_scheme(new_scheme)
2418 2418 except:
2419 2419 color_switch_err('object inspector')
2420 2420 else:
2421 2421 shell.inspector.set_active_scheme('NoColor')
2422 2422
2423 2423 def magic_color_info(self,parameter_s = ''):
2424 2424 """Toggle color_info.
2425 2425
2426 2426 The color_info configuration parameter controls whether colors are
2427 2427 used for displaying object details (by things like %psource, %pfile or
2428 2428 the '?' system). This function toggles this value with each call.
2429 2429
2430 2430 Note that unless you have a fairly recent pager (less works better
2431 2431 than more) in your system, using colored object information displays
2432 2432 will not work properly. Test it and see."""
2433 2433
2434 2434 self.shell.rc.color_info = 1 - self.shell.rc.color_info
2435 2435 self.magic_colors(self.shell.rc.colors)
2436 2436 print 'Object introspection functions have now coloring:',
2437 2437 print ['OFF','ON'][self.shell.rc.color_info]
2438 2438
2439 2439 def magic_Pprint(self, parameter_s=''):
2440 2440 """Toggle pretty printing on/off."""
2441 2441
2442 2442 self.shell.rc.pprint = 1 - self.shell.rc.pprint
2443 2443 print 'Pretty printing has been turned', \
2444 2444 ['OFF','ON'][self.shell.rc.pprint]
2445 2445
2446 2446 def magic_exit(self, parameter_s=''):
2447 2447 """Exit IPython, confirming if configured to do so.
2448 2448
2449 2449 You can configure whether IPython asks for confirmation upon exit by
2450 2450 setting the confirm_exit flag in the ipythonrc file."""
2451 2451
2452 2452 self.shell.exit()
2453 2453
2454 2454 def magic_quit(self, parameter_s=''):
2455 2455 """Exit IPython, confirming if configured to do so (like %exit)"""
2456 2456
2457 2457 self.shell.exit()
2458 2458
2459 2459 def magic_Exit(self, parameter_s=''):
2460 2460 """Exit IPython without confirmation."""
2461 2461
2462 2462 self.shell.exit_now = True
2463 2463
2464 2464 #......................................................................
2465 2465 # Functions to implement unix shell-type things
2466 2466
2467 2467 def magic_alias(self, parameter_s = ''):
2468 2468 """Define an alias for a system command.
2469 2469
2470 2470 '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
2471 2471
2472 2472 Then, typing 'alias_name params' will execute the system command 'cmd
2473 2473 params' (from your underlying operating system).
2474 2474
2475 2475 Aliases have lower precedence than magic functions and Python normal
2476 2476 variables, so if 'foo' is both a Python variable and an alias, the
2477 2477 alias can not be executed until 'del foo' removes the Python variable.
2478 2478
2479 2479 You can use the %l specifier in an alias definition to represent the
2480 2480 whole line when the alias is called. For example:
2481 2481
2482 2482 In [2]: alias all echo "Input in brackets: <%l>"\\
2483 2483 In [3]: all hello world\\
2484 2484 Input in brackets: <hello world>
2485 2485
2486 2486 You can also define aliases with parameters using %s specifiers (one
2487 2487 per parameter):
2488 2488
2489 2489 In [1]: alias parts echo first %s second %s\\
2490 2490 In [2]: %parts A B\\
2491 2491 first A second B\\
2492 2492 In [3]: %parts A\\
2493 2493 Incorrect number of arguments: 2 expected.\\
2494 2494 parts is an alias to: 'echo first %s second %s'
2495 2495
2496 2496 Note that %l and %s are mutually exclusive. You can only use one or
2497 2497 the other in your aliases.
2498 2498
2499 2499 Aliases expand Python variables just like system calls using ! or !!
2500 2500 do: all expressions prefixed with '$' get expanded. For details of
2501 2501 the semantic rules, see PEP-215:
2502 2502 http://www.python.org/peps/pep-0215.html. This is the library used by
2503 2503 IPython for variable expansion. If you want to access a true shell
2504 2504 variable, an extra $ is necessary to prevent its expansion by IPython:
2505 2505
2506 2506 In [6]: alias show echo\\
2507 2507 In [7]: PATH='A Python string'\\
2508 2508 In [8]: show $PATH\\
2509 2509 A Python string\\
2510 2510 In [9]: show $$PATH\\
2511 2511 /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
2512 2512
2513 2513 You can use the alias facility to acess all of $PATH. See the %rehash
2514 2514 and %rehashx functions, which automatically create aliases for the
2515 2515 contents of your $PATH.
2516 2516
2517 2517 If called with no parameters, %alias prints the current alias table."""
2518 2518
2519 2519 par = parameter_s.strip()
2520 2520 if not par:
2521 2521 stored = self.db.get('stored_aliases', {} )
2522 2522 atab = self.shell.alias_table
2523 2523 aliases = atab.keys()
2524 2524 aliases.sort()
2525 2525 res = []
2526 2526 showlast = []
2527 2527 for alias in aliases:
2528 2528 special = False
2529 2529 try:
2530 2530 tgt = atab[alias][1]
2531 2531 except (TypeError, AttributeError):
2532 2532 # unsubscriptable? probably a callable
2533 2533 tgt = atab[alias]
2534 2534 special = True
2535 2535 # 'interesting' aliases
2536 2536 if (alias in stored or
2537 2537 special or
2538 2538 alias.lower() != os.path.splitext(tgt)[0].lower() or
2539 2539 ' ' in tgt):
2540 2540 showlast.append((alias, tgt))
2541 2541 else:
2542 2542 res.append((alias, tgt ))
2543 2543
2544 2544 # show most interesting aliases last
2545 2545 res.extend(showlast)
2546 2546 print "Total number of aliases:",len(aliases)
2547 2547 return res
2548 2548 try:
2549 2549 alias,cmd = par.split(None,1)
2550 2550 except:
2551 2551 print OInspect.getdoc(self.magic_alias)
2552 2552 else:
2553 2553 nargs = cmd.count('%s')
2554 2554 if nargs>0 and cmd.find('%l')>=0:
2555 2555 error('The %s and %l specifiers are mutually exclusive '
2556 2556 'in alias definitions.')
2557 2557 else: # all looks OK
2558 2558 self.shell.alias_table[alias] = (nargs,cmd)
2559 2559 self.shell.alias_table_validate(verbose=0)
2560 2560 # end magic_alias
2561 2561
2562 2562 def magic_unalias(self, parameter_s = ''):
2563 2563 """Remove an alias"""
2564 2564
2565 2565 aname = parameter_s.strip()
2566 2566 if aname in self.shell.alias_table:
2567 2567 del self.shell.alias_table[aname]
2568 2568 stored = self.db.get('stored_aliases', {} )
2569 2569 if aname in stored:
2570 2570 print "Removing %stored alias",aname
2571 2571 del stored[aname]
2572 2572 self.db['stored_aliases'] = stored
2573 2573
2574 2574
2575 2575 def magic_rehashx(self, parameter_s = ''):
2576 2576 """Update the alias table with all executable files in $PATH.
2577 2577
2578 2578 This version explicitly checks that every entry in $PATH is a file
2579 2579 with execute access (os.X_OK), so it is much slower than %rehash.
2580 2580
2581 2581 Under Windows, it checks executability as a match agains a
2582 2582 '|'-separated string of extensions, stored in the IPython config
2583 2583 variable win_exec_ext. This defaults to 'exe|com|bat'.
2584 2584
2585 2585 This function also resets the root module cache of module completer,
2586 2586 used on slow filesystems.
2587 2587 """
2588 2588
2589 2589
2590 2590 ip = self.api
2591 2591
2592 2592 # for the benefit of module completer in ipy_completers.py
2593 2593 del ip.db['rootmodules']
2594 2594
2595 2595 path = [os.path.abspath(os.path.expanduser(p)) for p in
2596 2596 os.environ.get('PATH','').split(os.pathsep)]
2597 2597 path = filter(os.path.isdir,path)
2598 2598
2599 2599 alias_table = self.shell.alias_table
2600 2600 syscmdlist = []
2601 2601 if os.name == 'posix':
2602 2602 isexec = lambda fname:os.path.isfile(fname) and \
2603 2603 os.access(fname,os.X_OK)
2604 2604 else:
2605 2605
2606 2606 try:
2607 2607 winext = os.environ['pathext'].replace(';','|').replace('.','')
2608 2608 except KeyError:
2609 2609 winext = 'exe|com|bat|py'
2610 2610 if 'py' not in winext:
2611 2611 winext += '|py'
2612 2612 execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
2613 2613 isexec = lambda fname:os.path.isfile(fname) and execre.match(fname)
2614 2614 savedir = os.getcwd()
2615 2615 try:
2616 2616 # write the whole loop for posix/Windows so we don't have an if in
2617 2617 # the innermost part
2618 2618 if os.name == 'posix':
2619 2619 for pdir in path:
2620 2620 os.chdir(pdir)
2621 2621 for ff in os.listdir(pdir):
2622 2622 if isexec(ff) and ff not in self.shell.no_alias:
2623 2623 # each entry in the alias table must be (N,name),
2624 2624 # where N is the number of positional arguments of the
2625 2625 # alias.
2626 2626 alias_table[ff] = (0,ff)
2627 2627 syscmdlist.append(ff)
2628 2628 else:
2629 2629 for pdir in path:
2630 2630 os.chdir(pdir)
2631 2631 for ff in os.listdir(pdir):
2632 2632 base, ext = os.path.splitext(ff)
2633 2633 if isexec(ff) and base.lower() not in self.shell.no_alias:
2634 2634 if ext.lower() == '.exe':
2635 2635 ff = base
2636 2636 alias_table[base.lower()] = (0,ff)
2637 2637 syscmdlist.append(ff)
2638 2638 # Make sure the alias table doesn't contain keywords or builtins
2639 2639 self.shell.alias_table_validate()
2640 2640 # Call again init_auto_alias() so we get 'rm -i' and other
2641 2641 # modified aliases since %rehashx will probably clobber them
2642 2642
2643 2643 # no, we don't want them. if %rehashx clobbers them, good,
2644 2644 # we'll probably get better versions
2645 2645 # self.shell.init_auto_alias()
2646 2646 db = ip.db
2647 2647 db['syscmdlist'] = syscmdlist
2648 2648 finally:
2649 2649 os.chdir(savedir)
2650 2650
2651 2651 def magic_pwd(self, parameter_s = ''):
2652 2652 """Return the current working directory path."""
2653 2653 return os.getcwd()
2654 2654
2655 2655 def magic_cd(self, parameter_s=''):
2656 2656 """Change the current working directory.
2657 2657
2658 2658 This command automatically maintains an internal list of directories
2659 2659 you visit during your IPython session, in the variable _dh. The
2660 2660 command %dhist shows this history nicely formatted. You can also
2661 2661 do 'cd -<tab>' to see directory history conveniently.
2662 2662
2663 2663 Usage:
2664 2664
2665 2665 cd 'dir': changes to directory 'dir'.
2666 2666
2667 2667 cd -: changes to the last visited directory.
2668 2668
2669 2669 cd -<n>: changes to the n-th directory in the directory history.
2670 2670
2671 2671 cd -b <bookmark_name>: jump to a bookmark set by %bookmark
2672 2672 (note: cd <bookmark_name> is enough if there is no
2673 2673 directory <bookmark_name>, but a bookmark with the name exists.)
2674 2674 'cd -b <tab>' allows you to tab-complete bookmark names.
2675 2675
2676 2676 Options:
2677 2677
2678 2678 -q: quiet. Do not print the working directory after the cd command is
2679 2679 executed. By default IPython's cd command does print this directory,
2680 2680 since the default prompts do not display path information.
2681 2681
2682 2682 Note that !cd doesn't work for this purpose because the shell where
2683 2683 !command runs is immediately discarded after executing 'command'."""
2684 2684
2685 2685 parameter_s = parameter_s.strip()
2686 2686 #bkms = self.shell.persist.get("bookmarks",{})
2687 2687
2688 2688 oldcwd = os.getcwd()
2689 2689 numcd = re.match(r'(-)(\d+)$',parameter_s)
2690 2690 # jump in directory history by number
2691 2691 if numcd:
2692 2692 nn = int(numcd.group(2))
2693 2693 try:
2694 2694 ps = self.shell.user_ns['_dh'][nn]
2695 2695 except IndexError:
2696 2696 print 'The requested directory does not exist in history.'
2697 2697 return
2698 2698 else:
2699 2699 opts = {}
2700 2700 else:
2701 2701 #turn all non-space-escaping backslashes to slashes,
2702 2702 # for c:\windows\directory\names\
2703 2703 parameter_s = re.sub(r'\\(?! )','/', parameter_s)
2704 2704 opts,ps = self.parse_options(parameter_s,'qb',mode='string')
2705 2705 # jump to previous
2706 2706 if ps == '-':
2707 2707 try:
2708 2708 ps = self.shell.user_ns['_dh'][-2]
2709 2709 except IndexError:
2710 2710 raise UsageError('%cd -: No previous directory to change to.')
2711 2711 # jump to bookmark if needed
2712 2712 else:
2713 2713 if not os.path.isdir(ps) or opts.has_key('b'):
2714 2714 bkms = self.db.get('bookmarks', {})
2715 2715
2716 2716 if bkms.has_key(ps):
2717 2717 target = bkms[ps]
2718 2718 print '(bookmark:%s) -> %s' % (ps,target)
2719 2719 ps = target
2720 2720 else:
2721 2721 if opts.has_key('b'):
2722 2722 raise UsageError("Bookmark '%s' not found. "
2723 2723 "Use '%%bookmark -l' to see your bookmarks." % ps)
2724 2724
2725 2725 # at this point ps should point to the target dir
2726 2726 if ps:
2727 2727 try:
2728 2728 os.chdir(os.path.expanduser(ps))
2729 2729 if self.shell.rc.term_title:
2730 2730 #print 'set term title:',self.shell.rc.term_title # dbg
2731 ttitle = 'IPy ' + abbrev_cwd()
2732 platutils.set_term_title(ttitle)
2731 platutils.set_term_title('IPy ' + abbrev_cwd())
2733 2732 except OSError:
2734 2733 print sys.exc_info()[1]
2735 2734 else:
2736 2735 cwd = os.getcwd()
2737 2736 dhist = self.shell.user_ns['_dh']
2738 2737 if oldcwd != cwd:
2739 2738 dhist.append(cwd)
2740 2739 self.db['dhist'] = compress_dhist(dhist)[-100:]
2741 2740
2742 2741 else:
2743 2742 os.chdir(self.shell.home_dir)
2744 2743 if self.shell.rc.term_title:
2745 2744 platutils.set_term_title("IPy ~")
2746 2745 cwd = os.getcwd()
2747 2746 dhist = self.shell.user_ns['_dh']
2748 2747
2749 2748 if oldcwd != cwd:
2750 2749 dhist.append(cwd)
2751 2750 self.db['dhist'] = compress_dhist(dhist)[-100:]
2752 2751 if not 'q' in opts and self.shell.user_ns['_dh']:
2753 2752 print self.shell.user_ns['_dh'][-1]
2754 2753
2755 2754
2756 2755 def magic_env(self, parameter_s=''):
2757 2756 """List environment variables."""
2758 2757
2759 2758 return os.environ.data
2760 2759
2761 2760 def magic_pushd(self, parameter_s=''):
2762 2761 """Place the current dir on stack and change directory.
2763 2762
2764 2763 Usage:\\
2765 2764 %pushd ['dirname']
2766 2765 """
2767 2766
2768 2767 dir_s = self.shell.dir_stack
2769 2768 tgt = os.path.expanduser(parameter_s)
2770 2769 cwd = os.getcwd().replace(self.home_dir,'~')
2771 2770 if tgt:
2772 2771 self.magic_cd(parameter_s)
2773 2772 dir_s.insert(0,cwd)
2774 2773 return self.magic_dirs()
2775 2774
2776 2775 def magic_popd(self, parameter_s=''):
2777 2776 """Change to directory popped off the top of the stack.
2778 2777 """
2779 2778 if not self.shell.dir_stack:
2780 2779 raise UsageError("%popd on empty stack")
2781 2780 top = self.shell.dir_stack.pop(0)
2782 2781 self.magic_cd(top)
2783 2782 print "popd ->",top
2784 2783
2785 2784 def magic_dirs(self, parameter_s=''):
2786 2785 """Return the current directory stack."""
2787 2786
2788 2787 return self.shell.dir_stack
2789 2788
2790 2789 def magic_dhist(self, parameter_s=''):
2791 2790 """Print your history of visited directories.
2792 2791
2793 2792 %dhist -> print full history\\
2794 2793 %dhist n -> print last n entries only\\
2795 2794 %dhist n1 n2 -> print entries between n1 and n2 (n1 not included)\\
2796 2795
2797 2796 This history is automatically maintained by the %cd command, and
2798 2797 always available as the global list variable _dh. You can use %cd -<n>
2799 2798 to go to directory number <n>.
2800 2799
2801 2800 Note that most of time, you should view directory history by entering
2802 2801 cd -<TAB>.
2803 2802
2804 2803 """
2805 2804
2806 2805 dh = self.shell.user_ns['_dh']
2807 2806 if parameter_s:
2808 2807 try:
2809 2808 args = map(int,parameter_s.split())
2810 2809 except:
2811 2810 self.arg_err(Magic.magic_dhist)
2812 2811 return
2813 2812 if len(args) == 1:
2814 2813 ini,fin = max(len(dh)-(args[0]),0),len(dh)
2815 2814 elif len(args) == 2:
2816 2815 ini,fin = args
2817 2816 else:
2818 2817 self.arg_err(Magic.magic_dhist)
2819 2818 return
2820 2819 else:
2821 2820 ini,fin = 0,len(dh)
2822 2821 nlprint(dh,
2823 2822 header = 'Directory history (kept in _dh)',
2824 2823 start=ini,stop=fin)
2825 2824
2826 2825
2827 2826 def magic_sc(self, parameter_s=''):
2828 2827 """Shell capture - execute a shell command and capture its output.
2829 2828
2830 2829 DEPRECATED. Suboptimal, retained for backwards compatibility.
2831 2830
2832 2831 You should use the form 'var = !command' instead. Example:
2833 2832
2834 2833 "%sc -l myfiles = ls ~" should now be written as
2835 2834
2836 2835 "myfiles = !ls ~"
2837 2836
2838 2837 myfiles.s, myfiles.l and myfiles.n still apply as documented
2839 2838 below.
2840 2839
2841 2840 --
2842 2841 %sc [options] varname=command
2843 2842
2844 2843 IPython will run the given command using commands.getoutput(), and
2845 2844 will then update the user's interactive namespace with a variable
2846 2845 called varname, containing the value of the call. Your command can
2847 2846 contain shell wildcards, pipes, etc.
2848 2847
2849 2848 The '=' sign in the syntax is mandatory, and the variable name you
2850 2849 supply must follow Python's standard conventions for valid names.
2851 2850
2852 2851 (A special format without variable name exists for internal use)
2853 2852
2854 2853 Options:
2855 2854
2856 2855 -l: list output. Split the output on newlines into a list before
2857 2856 assigning it to the given variable. By default the output is stored
2858 2857 as a single string.
2859 2858
2860 2859 -v: verbose. Print the contents of the variable.
2861 2860
2862 2861 In most cases you should not need to split as a list, because the
2863 2862 returned value is a special type of string which can automatically
2864 2863 provide its contents either as a list (split on newlines) or as a
2865 2864 space-separated string. These are convenient, respectively, either
2866 2865 for sequential processing or to be passed to a shell command.
2867 2866
2868 2867 For example:
2869 2868
2870 2869 # Capture into variable a
2871 2870 In [9]: sc a=ls *py
2872 2871
2873 2872 # a is a string with embedded newlines
2874 2873 In [10]: a
2875 2874 Out[10]: 'setup.py\nwin32_manual_post_install.py'
2876 2875
2877 2876 # which can be seen as a list:
2878 2877 In [11]: a.l
2879 2878 Out[11]: ['setup.py', 'win32_manual_post_install.py']
2880 2879
2881 2880 # or as a whitespace-separated string:
2882 2881 In [12]: a.s
2883 2882 Out[12]: 'setup.py win32_manual_post_install.py'
2884 2883
2885 2884 # a.s is useful to pass as a single command line:
2886 2885 In [13]: !wc -l $a.s
2887 2886 146 setup.py
2888 2887 130 win32_manual_post_install.py
2889 2888 276 total
2890 2889
2891 2890 # while the list form is useful to loop over:
2892 2891 In [14]: for f in a.l:
2893 2892 ....: !wc -l $f
2894 2893 ....:
2895 2894 146 setup.py
2896 2895 130 win32_manual_post_install.py
2897 2896
2898 2897 Similiarly, the lists returned by the -l option are also special, in
2899 2898 the sense that you can equally invoke the .s attribute on them to
2900 2899 automatically get a whitespace-separated string from their contents:
2901 2900
2902 2901 In [1]: sc -l b=ls *py
2903 2902
2904 2903 In [2]: b
2905 2904 Out[2]: ['setup.py', 'win32_manual_post_install.py']
2906 2905
2907 2906 In [3]: b.s
2908 2907 Out[3]: 'setup.py win32_manual_post_install.py'
2909 2908
2910 2909 In summary, both the lists and strings used for ouptut capture have
2911 2910 the following special attributes:
2912 2911
2913 2912 .l (or .list) : value as list.
2914 2913 .n (or .nlstr): value as newline-separated string.
2915 2914 .s (or .spstr): value as space-separated string.
2916 2915 """
2917 2916
2918 2917 opts,args = self.parse_options(parameter_s,'lv')
2919 2918 # Try to get a variable name and command to run
2920 2919 try:
2921 2920 # the variable name must be obtained from the parse_options
2922 2921 # output, which uses shlex.split to strip options out.
2923 2922 var,_ = args.split('=',1)
2924 2923 var = var.strip()
2925 2924 # But the the command has to be extracted from the original input
2926 2925 # parameter_s, not on what parse_options returns, to avoid the
2927 2926 # quote stripping which shlex.split performs on it.
2928 2927 _,cmd = parameter_s.split('=',1)
2929 2928 except ValueError:
2930 2929 var,cmd = '',''
2931 2930 # If all looks ok, proceed
2932 2931 out,err = self.shell.getoutputerror(cmd)
2933 2932 if err:
2934 2933 print >> Term.cerr,err
2935 2934 if opts.has_key('l'):
2936 2935 out = SList(out.split('\n'))
2937 2936 else:
2938 2937 out = LSString(out)
2939 2938 if opts.has_key('v'):
2940 2939 print '%s ==\n%s' % (var,pformat(out))
2941 2940 if var:
2942 2941 self.shell.user_ns.update({var:out})
2943 2942 else:
2944 2943 return out
2945 2944
2946 2945 def magic_sx(self, parameter_s=''):
2947 2946 """Shell execute - run a shell command and capture its output.
2948 2947
2949 2948 %sx command
2950 2949
2951 2950 IPython will run the given command using commands.getoutput(), and
2952 2951 return the result formatted as a list (split on '\\n'). Since the
2953 2952 output is _returned_, it will be stored in ipython's regular output
2954 2953 cache Out[N] and in the '_N' automatic variables.
2955 2954
2956 2955 Notes:
2957 2956
2958 2957 1) If an input line begins with '!!', then %sx is automatically
2959 2958 invoked. That is, while:
2960 2959 !ls
2961 2960 causes ipython to simply issue system('ls'), typing
2962 2961 !!ls
2963 2962 is a shorthand equivalent to:
2964 2963 %sx ls
2965 2964
2966 2965 2) %sx differs from %sc in that %sx automatically splits into a list,
2967 2966 like '%sc -l'. The reason for this is to make it as easy as possible
2968 2967 to process line-oriented shell output via further python commands.
2969 2968 %sc is meant to provide much finer control, but requires more
2970 2969 typing.
2971 2970
2972 2971 3) Just like %sc -l, this is a list with special attributes:
2973 2972
2974 2973 .l (or .list) : value as list.
2975 2974 .n (or .nlstr): value as newline-separated string.
2976 2975 .s (or .spstr): value as whitespace-separated string.
2977 2976
2978 2977 This is very useful when trying to use such lists as arguments to
2979 2978 system commands."""
2980 2979
2981 2980 if parameter_s:
2982 2981 out,err = self.shell.getoutputerror(parameter_s)
2983 2982 if err:
2984 2983 print >> Term.cerr,err
2985 2984 return SList(out.split('\n'))
2986 2985
2987 2986 def magic_bg(self, parameter_s=''):
2988 2987 """Run a job in the background, in a separate thread.
2989 2988
2990 2989 For example,
2991 2990
2992 2991 %bg myfunc(x,y,z=1)
2993 2992
2994 2993 will execute 'myfunc(x,y,z=1)' in a background thread. As soon as the
2995 2994 execution starts, a message will be printed indicating the job
2996 2995 number. If your job number is 5, you can use
2997 2996
2998 2997 myvar = jobs.result(5) or myvar = jobs[5].result
2999 2998
3000 2999 to assign this result to variable 'myvar'.
3001 3000
3002 3001 IPython has a job manager, accessible via the 'jobs' object. You can
3003 3002 type jobs? to get more information about it, and use jobs.<TAB> to see
3004 3003 its attributes. All attributes not starting with an underscore are
3005 3004 meant for public use.
3006 3005
3007 3006 In particular, look at the jobs.new() method, which is used to create
3008 3007 new jobs. This magic %bg function is just a convenience wrapper
3009 3008 around jobs.new(), for expression-based jobs. If you want to create a
3010 3009 new job with an explicit function object and arguments, you must call
3011 3010 jobs.new() directly.
3012 3011
3013 3012 The jobs.new docstring also describes in detail several important
3014 3013 caveats associated with a thread-based model for background job
3015 3014 execution. Type jobs.new? for details.
3016 3015
3017 3016 You can check the status of all jobs with jobs.status().
3018 3017
3019 3018 The jobs variable is set by IPython into the Python builtin namespace.
3020 3019 If you ever declare a variable named 'jobs', you will shadow this
3021 3020 name. You can either delete your global jobs variable to regain
3022 3021 access to the job manager, or make a new name and assign it manually
3023 3022 to the manager (stored in IPython's namespace). For example, to
3024 3023 assign the job manager to the Jobs name, use:
3025 3024
3026 3025 Jobs = __builtins__.jobs"""
3027 3026
3028 3027 self.shell.jobs.new(parameter_s,self.shell.user_ns)
3029 3028
3030 3029 def magic_r(self, parameter_s=''):
3031 3030 """Repeat previous input.
3032 3031
3033 3032 Note: Consider using the more powerfull %rep instead!
3034 3033
3035 3034 If given an argument, repeats the previous command which starts with
3036 3035 the same string, otherwise it just repeats the previous input.
3037 3036
3038 3037 Shell escaped commands (with ! as first character) are not recognized
3039 3038 by this system, only pure python code and magic commands.
3040 3039 """
3041 3040
3042 3041 start = parameter_s.strip()
3043 3042 esc_magic = self.shell.ESC_MAGIC
3044 3043 # Identify magic commands even if automagic is on (which means
3045 3044 # the in-memory version is different from that typed by the user).
3046 3045 if self.shell.rc.automagic:
3047 3046 start_magic = esc_magic+start
3048 3047 else:
3049 3048 start_magic = start
3050 3049 # Look through the input history in reverse
3051 3050 for n in range(len(self.shell.input_hist)-2,0,-1):
3052 3051 input = self.shell.input_hist[n]
3053 3052 # skip plain 'r' lines so we don't recurse to infinity
3054 3053 if input != '_ip.magic("r")\n' and \
3055 3054 (input.startswith(start) or input.startswith(start_magic)):
3056 3055 #print 'match',`input` # dbg
3057 3056 print 'Executing:',input,
3058 3057 self.shell.runlines(input)
3059 3058 return
3060 3059 print 'No previous input matching `%s` found.' % start
3061 3060
3062 3061
3063 3062 def magic_bookmark(self, parameter_s=''):
3064 3063 """Manage IPython's bookmark system.
3065 3064
3066 3065 %bookmark <name> - set bookmark to current dir
3067 3066 %bookmark <name> <dir> - set bookmark to <dir>
3068 3067 %bookmark -l - list all bookmarks
3069 3068 %bookmark -d <name> - remove bookmark
3070 3069 %bookmark -r - remove all bookmarks
3071 3070
3072 3071 You can later on access a bookmarked folder with:
3073 3072 %cd -b <name>
3074 3073 or simply '%cd <name>' if there is no directory called <name> AND
3075 3074 there is such a bookmark defined.
3076 3075
3077 3076 Your bookmarks persist through IPython sessions, but they are
3078 3077 associated with each profile."""
3079 3078
3080 3079 opts,args = self.parse_options(parameter_s,'drl',mode='list')
3081 3080 if len(args) > 2:
3082 3081 raise UsageError("%bookmark: too many arguments")
3083 3082
3084 3083 bkms = self.db.get('bookmarks',{})
3085 3084
3086 3085 if opts.has_key('d'):
3087 3086 try:
3088 3087 todel = args[0]
3089 3088 except IndexError:
3090 3089 raise UsageError(
3091 3090 "%bookmark -d: must provide a bookmark to delete")
3092 3091 else:
3093 3092 try:
3094 3093 del bkms[todel]
3095 3094 except KeyError:
3096 3095 raise UsageError(
3097 3096 "%%bookmark -d: Can't delete bookmark '%s'" % todel)
3098 3097
3099 3098 elif opts.has_key('r'):
3100 3099 bkms = {}
3101 3100 elif opts.has_key('l'):
3102 3101 bks = bkms.keys()
3103 3102 bks.sort()
3104 3103 if bks:
3105 3104 size = max(map(len,bks))
3106 3105 else:
3107 3106 size = 0
3108 3107 fmt = '%-'+str(size)+'s -> %s'
3109 3108 print 'Current bookmarks:'
3110 3109 for bk in bks:
3111 3110 print fmt % (bk,bkms[bk])
3112 3111 else:
3113 3112 if not args:
3114 3113 raise UsageError("%bookmark: You must specify the bookmark name")
3115 3114 elif len(args)==1:
3116 3115 bkms[args[0]] = os.getcwd()
3117 3116 elif len(args)==2:
3118 3117 bkms[args[0]] = args[1]
3119 3118 self.db['bookmarks'] = bkms
3120 3119
3121 3120 def magic_pycat(self, parameter_s=''):
3122 3121 """Show a syntax-highlighted file through a pager.
3123 3122
3124 3123 This magic is similar to the cat utility, but it will assume the file
3125 3124 to be Python source and will show it with syntax highlighting. """
3126 3125
3127 3126 try:
3128 3127 filename = get_py_filename(parameter_s)
3129 3128 cont = file_read(filename)
3130 3129 except IOError:
3131 3130 try:
3132 3131 cont = eval(parameter_s,self.user_ns)
3133 3132 except NameError:
3134 3133 cont = None
3135 3134 if cont is None:
3136 3135 print "Error: no such file or variable"
3137 3136 return
3138 3137
3139 3138 page(self.shell.pycolorize(cont),
3140 3139 screen_lines=self.shell.rc.screen_length)
3141 3140
3142 3141 def magic_cpaste(self, parameter_s=''):
3143 3142 """Allows you to paste & execute a pre-formatted code block from clipboard.
3144 3143
3145 3144 You must terminate the block with '--' (two minus-signs) alone on the
3146 3145 line. You can also provide your own sentinel with '%paste -s %%' ('%%'
3147 3146 is the new sentinel for this operation)
3148 3147
3149 3148 The block is dedented prior to execution to enable execution of method
3150 3149 definitions. '>' and '+' characters at the beginning of a line are
3151 3150 ignored, to allow pasting directly from e-mails, diff files and
3152 3151 doctests (the '...' continuation prompt is also stripped). The
3153 3152 executed block is also assigned to variable named 'pasted_block' for
3154 3153 later editing with '%edit pasted_block'.
3155 3154
3156 3155 You can also pass a variable name as an argument, e.g. '%cpaste foo'.
3157 3156 This assigns the pasted block to variable 'foo' as string, without
3158 3157 dedenting or executing it (preceding >>> and + is still stripped)
3159 3158
3160 3159 Do not be alarmed by garbled output on Windows (it's a readline bug).
3161 3160 Just press enter and type -- (and press enter again) and the block
3162 3161 will be what was just pasted.
3163 3162
3164 3163 IPython statements (magics, shell escapes) are not supported (yet).
3165 3164 """
3166 3165 opts,args = self.parse_options(parameter_s,'s:',mode='string')
3167 3166 par = args.strip()
3168 3167 sentinel = opts.get('s','--')
3169 3168
3170 3169 # Regular expressions that declare text we strip from the input:
3171 3170 strip_re = [r'^\s*In \[\d+\]:', # IPython input prompt
3172 3171 r'^\s*(\s?>)+', # Python input prompt
3173 3172 r'^\s*\.{3,}', # Continuation prompts
3174 3173 r'^\++',
3175 3174 ]
3176 3175
3177 3176 strip_from_start = map(re.compile,strip_re)
3178 3177
3179 3178 from IPython import iplib
3180 3179 lines = []
3181 3180 print "Pasting code; enter '%s' alone on the line to stop." % sentinel
3182 3181 while 1:
3183 3182 l = iplib.raw_input_original(':')
3184 3183 if l ==sentinel:
3185 3184 break
3186 3185
3187 3186 for pat in strip_from_start:
3188 3187 l = pat.sub('',l)
3189 3188 lines.append(l)
3190 3189
3191 3190 block = "\n".join(lines) + '\n'
3192 3191 #print "block:\n",block
3193 3192 if not par:
3194 3193 b = textwrap.dedent(block)
3195 3194 exec b in self.user_ns
3196 3195 self.user_ns['pasted_block'] = b
3197 3196 else:
3198 self.user_ns[par] = block
3197 self.user_ns[par] = SList(block.splitlines())
3199 3198 print "Block assigned to '%s'" % par
3200 3199
3201 3200 def magic_quickref(self,arg):
3202 3201 """ Show a quick reference sheet """
3203 3202 import IPython.usage
3204 3203 qr = IPython.usage.quick_reference + self.magic_magic('-brief')
3205 3204
3206 3205 page(qr)
3207 3206
3208 3207 def magic_upgrade(self,arg):
3209 3208 """ Upgrade your IPython installation
3210 3209
3211 3210 This will copy the config files that don't yet exist in your
3212 3211 ipython dir from the system config dir. Use this after upgrading
3213 3212 IPython if you don't wish to delete your .ipython dir.
3214 3213
3215 3214 Call with -nolegacy to get rid of ipythonrc* files (recommended for
3216 3215 new users)
3217 3216
3218 3217 """
3219 3218 ip = self.getapi()
3220 3219 ipinstallation = path(IPython.__file__).dirname()
3221 3220 upgrade_script = '%s "%s"' % (sys.executable,ipinstallation / 'upgrade_dir.py')
3222 3221 src_config = ipinstallation / 'UserConfig'
3223 3222 userdir = path(ip.options.ipythondir)
3224 3223 cmd = '%s "%s" "%s"' % (upgrade_script, src_config, userdir)
3225 3224 print ">",cmd
3226 3225 shell(cmd)
3227 3226 if arg == '-nolegacy':
3228 3227 legacy = userdir.files('ipythonrc*')
3229 3228 print "Nuking legacy files:",legacy
3230 3229
3231 3230 [p.remove() for p in legacy]
3232 3231 suffix = (sys.platform == 'win32' and '.ini' or '')
3233 3232 (userdir / ('ipythonrc' + suffix)).write_text('# Empty, see ipy_user_conf.py\n')
3234 3233
3235 3234
3236 3235 def magic_doctest_mode(self,parameter_s=''):
3237 3236 """Toggle doctest mode on and off.
3238 3237
3239 3238 This mode allows you to toggle the prompt behavior between normal
3240 3239 IPython prompts and ones that are as similar to the default IPython
3241 3240 interpreter as possible.
3242 3241
3243 3242 It also supports the pasting of code snippets that have leading '>>>'
3244 3243 and '...' prompts in them. This means that you can paste doctests from
3245 3244 files or docstrings (even if they have leading whitespace), and the
3246 3245 code will execute correctly. You can then use '%history -tn' to see
3247 3246 the translated history without line numbers; this will give you the
3248 3247 input after removal of all the leading prompts and whitespace, which
3249 3248 can be pasted back into an editor.
3250 3249
3251 3250 With these features, you can switch into this mode easily whenever you
3252 3251 need to do testing and changes to doctests, without having to leave
3253 3252 your existing IPython session.
3254 3253 """
3255 3254
3256 3255 # XXX - Fix this to have cleaner activate/deactivate calls.
3257 3256 from IPython.Extensions import InterpreterPasteInput as ipaste
3258 3257 from IPython.ipstruct import Struct
3259 3258
3260 3259 # Shorthands
3261 3260 shell = self.shell
3262 3261 oc = shell.outputcache
3263 3262 rc = shell.rc
3264 3263 meta = shell.meta
3265 3264 # dstore is a data store kept in the instance metadata bag to track any
3266 3265 # changes we make, so we can undo them later.
3267 3266 dstore = meta.setdefault('doctest_mode',Struct())
3268 3267 save_dstore = dstore.setdefault
3269 3268
3270 3269 # save a few values we'll need to recover later
3271 3270 mode = save_dstore('mode',False)
3272 3271 save_dstore('rc_pprint',rc.pprint)
3273 3272 save_dstore('xmode',shell.InteractiveTB.mode)
3274 3273 save_dstore('rc_separate_out',rc.separate_out)
3275 3274 save_dstore('rc_separate_out2',rc.separate_out2)
3276 3275 save_dstore('rc_prompts_pad_left',rc.prompts_pad_left)
3277 3276
3278 3277 if mode == False:
3279 3278 # turn on
3280 3279 ipaste.activate_prefilter()
3281 3280
3282 3281 oc.prompt1.p_template = '>>> '
3283 3282 oc.prompt2.p_template = '... '
3284 3283 oc.prompt_out.p_template = ''
3285 3284
3286 3285 oc.output_sep = ''
3287 3286 oc.output_sep2 = ''
3288 3287
3289 3288 oc.prompt1.pad_left = oc.prompt2.pad_left = \
3290 3289 oc.prompt_out.pad_left = False
3291 3290
3292 3291 rc.pprint = False
3293 3292
3294 3293 shell.magic_xmode('Plain')
3295 3294
3296 3295 else:
3297 3296 # turn off
3298 3297 ipaste.deactivate_prefilter()
3299 3298
3300 3299 oc.prompt1.p_template = rc.prompt_in1
3301 3300 oc.prompt2.p_template = rc.prompt_in2
3302 3301 oc.prompt_out.p_template = rc.prompt_out
3303 3302
3304 3303 oc.output_sep = dstore.rc_separate_out
3305 3304 oc.output_sep2 = dstore.rc_separate_out2
3306 3305
3307 3306 oc.prompt1.pad_left = oc.prompt2.pad_left = \
3308 3307 oc.prompt_out.pad_left = dstore.rc_prompts_pad_left
3309 3308
3310 3309 rc.pprint = dstore.rc_pprint
3311 3310
3312 3311 shell.magic_xmode(dstore.xmode)
3313 3312
3314 3313 # Store new mode and inform
3315 3314 dstore.mode = bool(1-int(mode))
3316 3315 print 'Doctest mode is:',
3317 3316 print ['OFF','ON'][dstore.mode]
3318 3317
3319 3318 # end Magic
@@ -1,95 +1,99 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Release data for the IPython project.
3 3
4 4 $Id: Release.py 3002 2008-02-01 07:17:00Z fperez $"""
5 5
6 6 #*****************************************************************************
7 7 # Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
8 8 #
9 9 # Copyright (c) 2001 Janko Hauser <jhauser@zscout.de> and Nathaniel Gray
10 10 # <n8gray@caltech.edu>
11 11 #
12 12 # Distributed under the terms of the BSD License. The full license is in
13 13 # the file COPYING, distributed as part of this software.
14 14 #*****************************************************************************
15 15
16 16 # Name of the package for release purposes. This is the name which labels
17 17 # the tarballs and RPMs made by distutils, so it's best to lowercase it.
18 18 name = 'ipython'
19 19
20 20 # For versions with substrings (like 0.6.16.svn), use an extra . to separate
21 21 # the new substring. We have to avoid using either dashes or underscores,
22 22 # because bdist_rpm does not accept dashes (an RPM) convention, and
23 23 # bdist_deb does not accept underscores (a Debian convention).
24 24
25 revision = '1016'
25 development = True # change this to False to do a release
26 version_base = '0.9.0'
26 27 branch = 'ipython'
28 revision = '1016'
27 29
28 if branch == 'ipython':
29 version = '0.9.0.bzr.r' + revision
30 if development:
31 if branch == 'ipython':
32 version = '%s.bzr.r%s' % (version_base, revision)
33 else:
34 version = '%s.bzr.r%s.%s' % (version_base, revision, branch)
30 35 else:
31 version = '0.9.0.bzr.r%s.%s' % (revision,branch)
36 version = version_base
32 37
33 # version = '0.8.4'
34 38
35 39 description = "Tools for interactive development in Python."
36 40
37 41 long_description = \
38 42 """
39 43 IPython provides a replacement for the interactive Python interpreter with
40 44 extra functionality.
41 45
42 46 Main features:
43 47
44 48 * Comprehensive object introspection.
45 49
46 50 * Input history, persistent across sessions.
47 51
48 52 * Caching of output results during a session with automatically generated
49 53 references.
50 54
51 55 * Readline based name completion.
52 56
53 57 * Extensible system of 'magic' commands for controlling the environment and
54 58 performing many tasks related either to IPython or the operating system.
55 59
56 60 * Configuration system with easy switching between different setups (simpler
57 61 than changing $PYTHONSTARTUP environment variables every time).
58 62
59 63 * Session logging and reloading.
60 64
61 65 * Extensible syntax processing for special purpose situations.
62 66
63 67 * Access to the system shell with user-extensible alias system.
64 68
65 69 * Easily embeddable in other Python programs.
66 70
67 71 * Integrated access to the pdb debugger and the Python profiler.
68 72
69 73 The latest development version is always available at the IPython subversion
70 74 repository_.
71 75
72 76 .. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
73 77 """
74 78
75 79 license = 'BSD'
76 80
77 81 authors = {'Fernando' : ('Fernando Perez','fperez@colorado.edu'),
78 82 'Janko' : ('Janko Hauser','jhauser@zscout.de'),
79 83 'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
80 84 'Ville' : ('Ville Vainio','vivainio@gmail.com'),
81 85 'Brian' : ('Brian E Granger', 'ellisonbg@gmail.com'),
82 86 'Min' : ('Min Ragan-Kelley', 'benjaminrk@gmail.com')
83 87 }
84 88
85 89 author = 'The IPython Development Team'
86 90
87 91 author_email = 'ipython-dev@scipy.org'
88 92
89 93 url = 'http://ipython.scipy.org'
90 94
91 95 download_url = 'http://ipython.scipy.org/dist'
92 96
93 97 platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
94 98
95 99 keywords = ['Interactive','Interpreter','Shell','Parallel','Distributed']
@@ -1,203 +1,231 b''
1 1 #!/usr/bin/env python
2 2
3 3 r""" mglob - enhanced file list expansion module
4 4
5 5 Use as stand-alone utility (for xargs, `backticks` etc.),
6 6 or a globbing library for own python programs. Globbing the sys.argv is something
7 7 that almost every Windows script has to perform manually, and this module is here
8 8 to help with that task. Also Unix users will benefit from enhanced modes
9 9 such as recursion, exclusion, directory omission...
10 10
11 11 Unlike glob.glob, directories are not included in the glob unless specified
12 12 with 'dir:'
13 13
14 14 'expand' is the function to use in python programs. Typical use
15 15 to expand argv (esp. in windows)::
16 16
17 17 try:
18 18 import mglob
19 19 files = mglob.expand(sys.argv[1:])
20 20 except ImportError:
21 21 print "mglob not found; try 'easy_install mglob' for extra features"
22 22 files = sys.argv[1:]
23 23
24 24 Note that for unix, shell expands *normal* wildcards (*.cpp, etc.) in argv.
25 25 Therefore, you might want to use quotes with normal wildcards to prevent this
26 26 expansion, in order for mglob to see the wildcards and get the wanted behaviour.
27 27 Not quoting the wildcards is harmless and typically has equivalent results, though.
28 28
29 29 Author: Ville Vainio <vivainio@gmail.com>
30 30 License: MIT Open Source license
31 31
32 32 """
33 33
34 34 #Assigned in variable for "usage" printing convenience"
35 35
36 36 globsyntax = """\
37 37 This program allows specifying filenames with "mglob" mechanism.
38 38 Supported syntax in globs (wilcard matching patterns)::
39 39
40 40 *.cpp ?ellowo*
41 41 - obvious. Differs from normal glob in that dirs are not included.
42 42 Unix users might want to write this as: "*.cpp" "?ellowo*"
43 43 rec:/usr/share=*.txt,*.doc
44 44 - get all *.txt and *.doc under /usr/share,
45 45 recursively
46 46 rec:/usr/share
47 47 - All files under /usr/share, recursively
48 48 rec:*.py
49 49 - All .py files under current working dir, recursively
50 50 foo
51 51 - File or dir foo
52 52 !*.bak readme*
53 53 - readme*, exclude files ending with .bak
54 54 !.svn/ !.hg/ !*_Data/ rec:.
55 55 - Skip .svn, .hg, foo_Data dirs (and their subdirs) in recurse.
56 Trailing / is the key, \ does not work!
56 Trailing / is the key, \ does not work! Use !.*/ for all hidden.
57 57 dir:foo
58 58 - the directory foo if it exists (not files in foo)
59 59 dir:*
60 60 - all directories in current folder
61 61 foo.py bar.* !h* rec:*.py
62 62 - Obvious. !h* exclusion only applies for rec:*.py.
63 63 foo.py is *not* included twice.
64 64 @filelist.txt
65 65 - All files listed in 'filelist.txt' file, on separate lines.
66 "cont:class \wak:" rec:*.py
67 - Match files containing regexp. Applies to subsequent files.
68 note quotes because of whitespace.
66 69 """
67 70
68 71
69 72 __version__ = "0.2"
70 73
71 74
72 import os,glob,fnmatch,sys
75 import os,glob,fnmatch,sys,re
73 76 from sets import Set as set
74 77
75 78
76 79 def expand(flist,exp_dirs = False):
77 80 """ Expand the glob(s) in flist.
78 81
79 82 flist may be either a whitespace-separated list of globs/files
80 83 or an array of globs/files.
81 84
82 85 if exp_dirs is true, directory names in glob are expanded to the files
83 86 contained in them - otherwise, directory names are returned as is.
84 87
85 88 """
86 89 if isinstance(flist, basestring):
87 flist = flist.split()
90 import shlex
91 flist = shlex.split(flist)
88 92 done_set = set()
89 93 denied_set = set()
90
94 cont_set = set()
95 cur_rejected_dirs = set()
96
91 97 def recfind(p, pats = ["*"]):
92 denied_dirs = ["*" + d+"*" for d in denied_set if d.endswith("/")]
93 #print "de", denied_dirs
98 denied_dirs = [os.path.dirname(d) for d in denied_set if d.endswith("/")]
94 99 for (dp,dnames,fnames) in os.walk(p):
95 100 # see if we should ignore the whole directory
96 101 dp_norm = dp.replace("\\","/") + "/"
97 102 deny = False
103 # do not traverse under already rejected dirs
104 for d in cur_rejected_dirs:
105 if dp.startswith(d):
106 deny = True
107 break
108 if deny:
109 continue
110
111
98 112 #print "dp",dp
113 bname = os.path.basename(dp)
99 114 for deny_pat in denied_dirs:
100 if fnmatch.fnmatch( dp_norm, deny_pat):
115 if fnmatch.fnmatch( bname, deny_pat):
101 116 deny = True
117 cur_rejected_dirs.add(dp)
102 118 break
103 119 if deny:
104 120 continue
105 121
106 122
107 123 for f in fnames:
108 124 matched = False
109 125 for p in pats:
110 126 if fnmatch.fnmatch(f,p):
111 127 matched = True
112 128 break
113 129 if matched:
114 130 yield os.path.join(dp,f)
115 131
116 132 def once_filter(seq):
117 133 for it in seq:
118 134 p = os.path.abspath(it)
119 135 if p in done_set:
120 136 continue
121 137 done_set.add(p)
122 138 deny = False
123 139 for deny_pat in denied_set:
124 140 if fnmatch.fnmatch(os.path.basename(p), deny_pat):
125 141 deny = True
126 142 break
143 if cont_set:
144 try:
145 cont = open(p).read()
146 except IOError:
147 # deny
148 continue
149 for pat in cont_set:
150 if not re.search(pat,cont, re.IGNORECASE):
151 deny = True
152 break
153
127 154 if not deny:
128 155 yield it
129 156 return
130 157
131 158 res = []
132 159
133 160 for ent in flist:
134 161 ent = os.path.expanduser(os.path.expandvars(ent))
135 162 if ent.lower().startswith('rec:'):
136 163 fields = ent[4:].split('=')
137 164 if len(fields) == 2:
138 165 pth, patlist = fields
139 166 elif len(fields) == 1:
140 167 if os.path.isdir(fields[0]):
141 168 # single arg is dir
142 169 pth, patlist = fields[0], '*'
143 170 else:
144 171 # single arg is pattern
145 172 pth, patlist = '.', fields[0]
146 173
147 174 elif len(fields) == 0:
148 175 pth, pathlist = '.','*'
149 176
150 177 pats = patlist.split(',')
151 178 res.extend(once_filter(recfind(pth, pats)))
152 179 # filelist
153 180 elif ent.startswith('@') and os.path.isfile(ent[1:]):
154 181 res.extend(once_filter(open(ent[1:]).read().splitlines()))
155 182 # exclusion
156 183 elif ent.startswith('!'):
157 184 denied_set.add(ent[1:])
158 185 # glob only dirs
159 186 elif ent.lower().startswith('dir:'):
160 187 res.extend(once_filter(filter(os.path.isdir,glob.glob(ent[4:]))))
161
188 elif ent.lower().startswith('cont:'):
189 cont_set.add(ent[5:])
162 190 # get all files in the specified dir
163 191 elif os.path.isdir(ent) and exp_dirs:
164 192 res.extend(once_filter(filter(os.path.isfile,glob.glob(ent + os.sep+"*"))))
165 193
166 194 # glob only files
167 195
168 196 elif '*' in ent or '?' in ent:
169 197 res.extend(once_filter(filter(os.path.isfile,glob.glob(ent))))
170 198
171 199 else:
172 200 res.extend(once_filter([ent]))
173 201 return res
174 202
175 203
176 204 def test():
177 205 assert (
178 206 expand("*.py ~/.ipython/*.py rec:/usr/share/doc-base") ==
179 207 expand( ['*.py', '~/.ipython/*.py', 'rec:/usr/share/doc-base'] )
180 208 )
181 209
182 210 def main():
183 211 if len(sys.argv) < 2:
184 212 print globsyntax
185 213 return
186 214
187 215 print "\n".join(expand(sys.argv[1:])),
188 216
189 217 def mglob_f(self, arg):
190 218 from IPython.genutils import SList
191 219 if arg.strip():
192 220 return SList(expand(arg))
193 221 print "Please specify pattern!"
194 222 print globsyntax
195 223
196 224 def init_ipython(ip):
197 225 """ register %mglob for IPython """
198 226 mglob_f.__doc__ = globsyntax
199 227 ip.expose_magic("mglob",mglob_f)
200 228
201 229 # test()
202 230 if __name__ == "__main__":
203 231 main()
@@ -1,151 +1,151 b''
1 1 # encoding: utf-8
2 2
3 3 """This file contains unittests for the frontendbase module."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #---------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #---------------------------------------------------------------------------
13 13
14 14 #---------------------------------------------------------------------------
15 15 # Imports
16 16 #---------------------------------------------------------------------------
17 17
18 18 import unittest
19 19 from IPython.frontend import frontendbase
20 20 from IPython.kernel.engineservice import EngineService
21 21
22 22 class FrontEndCallbackChecker(frontendbase.AsyncFrontEndBase):
23 23 """FrontEndBase subclass for checking callbacks"""
24 24 def __init__(self, engine=None, history=None):
25 25 super(FrontEndCallbackChecker, self).__init__(engine=engine,
26 26 history=history)
27 27 self.updateCalled = False
28 28 self.renderResultCalled = False
29 29 self.renderErrorCalled = False
30 30
31 31 def update_cell_prompt(self, result, blockID=None):
32 32 self.updateCalled = True
33 33 return result
34 34
35 35 def render_result(self, result):
36 36 self.renderResultCalled = True
37 37 return result
38 38
39 39
40 40 def render_error(self, failure):
41 41 self.renderErrorCalled = True
42 42 return failure
43 43
44 44
45 45
46 46
47 47 class TestAsyncFrontendBase(unittest.TestCase):
48 48 def setUp(self):
49 49 """Setup the EngineService and FrontEndBase"""
50 50
51 51 self.fb = FrontEndCallbackChecker(engine=EngineService())
52 52
53 53
54 54 def test_implements_IFrontEnd(self):
55 55 assert(frontendbase.IFrontEnd.implementedBy(
56 56 frontendbase.AsyncFrontEndBase))
57 57
58 58
59 59 def test_is_complete_returns_False_for_incomplete_block(self):
60 60 """"""
61 61
62 62 block = """def test(a):"""
63 63
64 64 assert(self.fb.is_complete(block) == False)
65 65
66 66 def test_is_complete_returns_True_for_complete_block(self):
67 67 """"""
68 68
69 69 block = """def test(a): pass"""
70 70
71 71 assert(self.fb.is_complete(block))
72 72
73 73 block = """a=3"""
74 74
75 75 assert(self.fb.is_complete(block))
76 76
77 77
78 78 def test_blockID_added_to_result(self):
79 79 block = """3+3"""
80 80
81 81 d = self.fb.execute(block, blockID='TEST_ID')
82 82
83 83 d.addCallback(self.checkBlockID, expected='TEST_ID')
84 84
85 85 def test_blockID_added_to_failure(self):
86 block = "raise Exception()"
86 block = "raise Exception()"
87 87
88 88 d = self.fb.execute(block,blockID='TEST_ID')
89 89 d.addErrback(self.checkFailureID, expected='TEST_ID')
90 90
91 91 def checkBlockID(self, result, expected=""):
92 92 assert(result['blockID'] == expected)
93 93
94 94
95 95 def checkFailureID(self, failure, expected=""):
96 96 assert(failure.blockID == expected)
97 97
98 98
99 99 def test_callbacks_added_to_execute(self):
100 100 """test that
101 101 update_cell_prompt
102 102 render_result
103 103
104 104 are added to execute request
105 105 """
106 106
107 107 d = self.fb.execute("10+10")
108 108 d.addCallback(self.checkCallbacks)
109 109
110 110
111 111 def checkCallbacks(self, result):
112 112 assert(self.fb.updateCalled)
113 113 assert(self.fb.renderResultCalled)
114 114
115 115
116 116 def test_error_callback_added_to_execute(self):
117 117 """test that render_error called on execution error"""
118 118
119 119 d = self.fb.execute("raise Exception()")
120 120 d.addCallback(self.checkRenderError)
121 121
122 122 def checkRenderError(self, result):
123 123 assert(self.fb.renderErrorCalled)
124 124
125 125 def test_history_returns_expected_block(self):
126 126 """Make sure history browsing doesn't fail"""
127 127
128 128 blocks = ["a=1","a=2","a=3"]
129 129 for b in blocks:
130 130 d = self.fb.execute(b)
131 131
132 132 # d is now the deferred for the last executed block
133 133 d.addCallback(self.historyTests, blocks)
134 134
135 135
136 136 def historyTests(self, result, blocks):
137 137 """historyTests"""
138 138
139 139 assert(len(blocks) >= 3)
140 140 assert(self.fb.get_history_previous("") == blocks[-2])
141 141 assert(self.fb.get_history_previous("") == blocks[-3])
142 142 assert(self.fb.get_history_next() == blocks[-2])
143 143
144 144
145 145 def test_history_returns_none_at_startup(self):
146 146 """test_history_returns_none_at_startup"""
147 147
148 148 assert(self.fb.get_history_previous("")==None)
149 149 assert(self.fb.get_history_next()==None)
150 150
151 151
@@ -1,2097 +1,2124 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 General purpose utilities.
4 4
5 5 This is a grab-bag of stuff I find useful in most programs I write. Some of
6 6 these things are also convenient when working at the command line.
7 7
8 8 $Id: genutils.py 2998 2008-01-31 10:06:04Z vivainio $"""
9 9
10 10 #*****************************************************************************
11 11 # Copyright (C) 2001-2006 Fernando Perez. <fperez@colorado.edu>
12 12 #
13 13 # Distributed under the terms of the BSD License. The full license is in
14 14 # the file COPYING, distributed as part of this software.
15 15 #*****************************************************************************
16 16
17 17 from IPython import Release
18 18 __author__ = '%s <%s>' % Release.authors['Fernando']
19 19 __license__ = Release.license
20 20
21 21 #****************************************************************************
22 22 # required modules from the Python standard library
23 23 import __main__
24 24 import commands
25 25 try:
26 26 import doctest
27 27 except ImportError:
28 28 pass
29 29 import os
30 30 import platform
31 31 import re
32 32 import shlex
33 33 import shutil
34 34 import subprocess
35 35 import sys
36 36 import tempfile
37 37 import time
38 38 import types
39 39 import warnings
40 40
41 41 # Curses and termios are Unix-only modules
42 42 try:
43 43 import curses
44 44 # We need termios as well, so if its import happens to raise, we bail on
45 45 # using curses altogether.
46 46 import termios
47 47 except ImportError:
48 48 USE_CURSES = False
49 49 else:
50 50 # Curses on Solaris may not be complete, so we can't use it there
51 51 USE_CURSES = hasattr(curses,'initscr')
52 52
53 53 # Other IPython utilities
54 54 import IPython
55 55 from IPython.Itpl import Itpl,itpl,printpl
56 56 from IPython import DPyGetOpt, platutils
57 57 from IPython.generics import result_display
58 58 import IPython.ipapi
59 59 from IPython.external.path import path
60 60 if os.name == "nt":
61 61 from IPython.winconsole import get_console_size
62 62
63 63 try:
64 64 set
65 65 except:
66 66 from sets import Set as set
67 67
68 68
69 69 #****************************************************************************
70 70 # Exceptions
71 71 class Error(Exception):
72 72 """Base class for exceptions in this module."""
73 73 pass
74 74
75 75 #----------------------------------------------------------------------------
76 76 class IOStream:
77 77 def __init__(self,stream,fallback):
78 78 if not hasattr(stream,'write') or not hasattr(stream,'flush'):
79 79 stream = fallback
80 80 self.stream = stream
81 81 self._swrite = stream.write
82 82 self.flush = stream.flush
83 83
84 84 def write(self,data):
85 85 try:
86 86 self._swrite(data)
87 87 except:
88 88 try:
89 89 # print handles some unicode issues which may trip a plain
90 90 # write() call. Attempt to emulate write() by using a
91 91 # trailing comma
92 92 print >> self.stream, data,
93 93 except:
94 94 # if we get here, something is seriously broken.
95 95 print >> sys.stderr, \
96 96 'ERROR - failed to write data to stream:', self.stream
97 97
98 98 def close(self):
99 99 pass
100 100
101 101
102 102 class IOTerm:
103 103 """ Term holds the file or file-like objects for handling I/O operations.
104 104
105 105 These are normally just sys.stdin, sys.stdout and sys.stderr but for
106 106 Windows they can can replaced to allow editing the strings before they are
107 107 displayed."""
108 108
109 109 # In the future, having IPython channel all its I/O operations through
110 110 # this class will make it easier to embed it into other environments which
111 111 # are not a normal terminal (such as a GUI-based shell)
112 112 def __init__(self,cin=None,cout=None,cerr=None):
113 113 self.cin = IOStream(cin,sys.stdin)
114 114 self.cout = IOStream(cout,sys.stdout)
115 115 self.cerr = IOStream(cerr,sys.stderr)
116 116
117 117 # Global variable to be used for all I/O
118 118 Term = IOTerm()
119 119
120 120 import IPython.rlineimpl as readline
121 121 # Remake Term to use the readline i/o facilities
122 122 if sys.platform == 'win32' and readline.have_readline:
123 123
124 124 Term = IOTerm(cout=readline._outputfile,cerr=readline._outputfile)
125 125
126 126
127 127 #****************************************************************************
128 128 # Generic warning/error printer, used by everything else
129 129 def warn(msg,level=2,exit_val=1):
130 130 """Standard warning printer. Gives formatting consistency.
131 131
132 132 Output is sent to Term.cerr (sys.stderr by default).
133 133
134 134 Options:
135 135
136 136 -level(2): allows finer control:
137 137 0 -> Do nothing, dummy function.
138 138 1 -> Print message.
139 139 2 -> Print 'WARNING:' + message. (Default level).
140 140 3 -> Print 'ERROR:' + message.
141 141 4 -> Print 'FATAL ERROR:' + message and trigger a sys.exit(exit_val).
142 142
143 143 -exit_val (1): exit value returned by sys.exit() for a level 4
144 144 warning. Ignored for all other levels."""
145 145
146 146 if level>0:
147 147 header = ['','','WARNING: ','ERROR: ','FATAL ERROR: ']
148 148 print >> Term.cerr, '%s%s' % (header[level],msg)
149 149 if level == 4:
150 150 print >> Term.cerr,'Exiting.\n'
151 151 sys.exit(exit_val)
152 152
153 153 def info(msg):
154 154 """Equivalent to warn(msg,level=1)."""
155 155
156 156 warn(msg,level=1)
157 157
158 158 def error(msg):
159 159 """Equivalent to warn(msg,level=3)."""
160 160
161 161 warn(msg,level=3)
162 162
163 163 def fatal(msg,exit_val=1):
164 164 """Equivalent to warn(msg,exit_val=exit_val,level=4)."""
165 165
166 166 warn(msg,exit_val=exit_val,level=4)
167 167
168 168 #---------------------------------------------------------------------------
169 169 # Debugging routines
170 170 #
171 171 def debugx(expr,pre_msg=''):
172 172 """Print the value of an expression from the caller's frame.
173 173
174 174 Takes an expression, evaluates it in the caller's frame and prints both
175 175 the given expression and the resulting value (as well as a debug mark
176 176 indicating the name of the calling function. The input must be of a form
177 177 suitable for eval().
178 178
179 179 An optional message can be passed, which will be prepended to the printed
180 180 expr->value pair."""
181 181
182 182 cf = sys._getframe(1)
183 183 print '[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
184 184 eval(expr,cf.f_globals,cf.f_locals))
185 185
186 186 # deactivate it by uncommenting the following line, which makes it a no-op
187 187 #def debugx(expr,pre_msg=''): pass
188 188
189 189 #----------------------------------------------------------------------------
190 190 StringTypes = types.StringTypes
191 191
192 192 # Basic timing functionality
193 193
194 194 # If possible (Unix), use the resource module instead of time.clock()
195 195 try:
196 196 import resource
197 197 def clocku():
198 198 """clocku() -> floating point number
199 199
200 200 Return the *USER* CPU time in seconds since the start of the process.
201 201 This is done via a call to resource.getrusage, so it avoids the
202 202 wraparound problems in time.clock()."""
203 203
204 204 return resource.getrusage(resource.RUSAGE_SELF)[0]
205 205
206 206 def clocks():
207 207 """clocks() -> floating point number
208 208
209 209 Return the *SYSTEM* CPU time in seconds since the start of the process.
210 210 This is done via a call to resource.getrusage, so it avoids the
211 211 wraparound problems in time.clock()."""
212 212
213 213 return resource.getrusage(resource.RUSAGE_SELF)[1]
214 214
215 215 def clock():
216 216 """clock() -> floating point number
217 217
218 218 Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of
219 219 the process. This is done via a call to resource.getrusage, so it
220 220 avoids the wraparound problems in time.clock()."""
221 221
222 222 u,s = resource.getrusage(resource.RUSAGE_SELF)[:2]
223 223 return u+s
224 224
225 225 def clock2():
226 226 """clock2() -> (t_user,t_system)
227 227
228 228 Similar to clock(), but return a tuple of user/system times."""
229 229 return resource.getrusage(resource.RUSAGE_SELF)[:2]
230 230
231 231 except ImportError:
232 232 # There is no distinction of user/system time under windows, so we just use
233 233 # time.clock() for everything...
234 234 clocku = clocks = clock = time.clock
235 235 def clock2():
236 236 """Under windows, system CPU time can't be measured.
237 237
238 238 This just returns clock() and zero."""
239 239 return time.clock(),0.0
240 240
241 241 def timings_out(reps,func,*args,**kw):
242 242 """timings_out(reps,func,*args,**kw) -> (t_total,t_per_call,output)
243 243
244 244 Execute a function reps times, return a tuple with the elapsed total
245 245 CPU time in seconds, the time per call and the function's output.
246 246
247 247 Under Unix, the return value is the sum of user+system time consumed by
248 248 the process, computed via the resource module. This prevents problems
249 249 related to the wraparound effect which the time.clock() function has.
250 250
251 251 Under Windows the return value is in wall clock seconds. See the
252 252 documentation for the time module for more details."""
253 253
254 254 reps = int(reps)
255 255 assert reps >=1, 'reps must be >= 1'
256 256 if reps==1:
257 257 start = clock()
258 258 out = func(*args,**kw)
259 259 tot_time = clock()-start
260 260 else:
261 261 rng = xrange(reps-1) # the last time is executed separately to store output
262 262 start = clock()
263 263 for dummy in rng: func(*args,**kw)
264 264 out = func(*args,**kw) # one last time
265 265 tot_time = clock()-start
266 266 av_time = tot_time / reps
267 267 return tot_time,av_time,out
268 268
269 269 def timings(reps,func,*args,**kw):
270 270 """timings(reps,func,*args,**kw) -> (t_total,t_per_call)
271 271
272 272 Execute a function reps times, return a tuple with the elapsed total CPU
273 273 time in seconds and the time per call. These are just the first two values
274 274 in timings_out()."""
275 275
276 276 return timings_out(reps,func,*args,**kw)[0:2]
277 277
278 278 def timing(func,*args,**kw):
279 279 """timing(func,*args,**kw) -> t_total
280 280
281 281 Execute a function once, return the elapsed total CPU time in
282 282 seconds. This is just the first value in timings_out()."""
283 283
284 284 return timings_out(1,func,*args,**kw)[0]
285 285
286 286 #****************************************************************************
287 287 # file and system
288 288
289 289 def arg_split(s,posix=False):
290 290 """Split a command line's arguments in a shell-like manner.
291 291
292 292 This is a modified version of the standard library's shlex.split()
293 293 function, but with a default of posix=False for splitting, so that quotes
294 294 in inputs are respected."""
295 295
296 296 # XXX - there may be unicode-related problems here!!! I'm not sure that
297 297 # shlex is truly unicode-safe, so it might be necessary to do
298 298 #
299 299 # s = s.encode(sys.stdin.encoding)
300 300 #
301 301 # first, to ensure that shlex gets a normal string. Input from anyone who
302 302 # knows more about unicode and shlex than I would be good to have here...
303 303 lex = shlex.shlex(s, posix=posix)
304 304 lex.whitespace_split = True
305 305 return list(lex)
306 306
307 307 def system(cmd,verbose=0,debug=0,header=''):
308 308 """Execute a system command, return its exit status.
309 309
310 310 Options:
311 311
312 312 - verbose (0): print the command to be executed.
313 313
314 314 - debug (0): only print, do not actually execute.
315 315
316 316 - header (''): Header to print on screen prior to the executed command (it
317 317 is only prepended to the command, no newlines are added).
318 318
319 319 Note: a stateful version of this function is available through the
320 320 SystemExec class."""
321 321
322 322 stat = 0
323 323 if verbose or debug: print header+cmd
324 324 sys.stdout.flush()
325 325 if not debug: stat = os.system(cmd)
326 326 return stat
327 327
328 328 def abbrev_cwd():
329 329 """ Return abbreviated version of cwd, e.g. d:mydir """
330 330 cwd = os.getcwd().replace('\\','/')
331 331 drivepart = ''
332 332 tail = cwd
333 333 if sys.platform == 'win32':
334 334 if len(cwd) < 4:
335 335 return cwd
336 336 drivepart,tail = os.path.splitdrive(cwd)
337 337
338 338
339 339 parts = tail.split('/')
340 340 if len(parts) > 2:
341 341 tail = '/'.join(parts[-2:])
342 342
343 343 return (drivepart + (
344 344 cwd == '/' and '/' or tail))
345 345
346 346
347 347 # This function is used by ipython in a lot of places to make system calls.
348 348 # We need it to be slightly different under win32, due to the vagaries of
349 349 # 'network shares'. A win32 override is below.
350 350
351 351 def shell(cmd,verbose=0,debug=0,header=''):
352 352 """Execute a command in the system shell, always return None.
353 353
354 354 Options:
355 355
356 356 - verbose (0): print the command to be executed.
357 357
358 358 - debug (0): only print, do not actually execute.
359 359
360 360 - header (''): Header to print on screen prior to the executed command (it
361 361 is only prepended to the command, no newlines are added).
362 362
363 363 Note: this is similar to genutils.system(), but it returns None so it can
364 364 be conveniently used in interactive loops without getting the return value
365 365 (typically 0) printed many times."""
366 366
367 367 stat = 0
368 368 if verbose or debug: print header+cmd
369 369 # flush stdout so we don't mangle python's buffering
370 370 sys.stdout.flush()
371 371
372 372 if not debug:
373 373 platutils.set_term_title("IPy " + cmd)
374 374 os.system(cmd)
375 375 platutils.set_term_title("IPy " + abbrev_cwd())
376 376
377 377 # override shell() for win32 to deal with network shares
378 378 if os.name in ('nt','dos'):
379 379
380 380 shell_ori = shell
381 381
382 382 def shell(cmd,verbose=0,debug=0,header=''):
383 383 if os.getcwd().startswith(r"\\"):
384 384 path = os.getcwd()
385 385 # change to c drive (cannot be on UNC-share when issuing os.system,
386 386 # as cmd.exe cannot handle UNC addresses)
387 387 os.chdir("c:")
388 388 # issue pushd to the UNC-share and then run the command
389 389 try:
390 390 shell_ori('"pushd %s&&"'%path+cmd,verbose,debug,header)
391 391 finally:
392 392 os.chdir(path)
393 393 else:
394 394 shell_ori(cmd,verbose,debug,header)
395 395
396 396 shell.__doc__ = shell_ori.__doc__
397 397
398 398 def getoutput(cmd,verbose=0,debug=0,header='',split=0):
399 399 """Dummy substitute for perl's backquotes.
400 400
401 401 Executes a command and returns the output.
402 402
403 403 Accepts the same arguments as system(), plus:
404 404
405 405 - split(0): if true, the output is returned as a list split on newlines.
406 406
407 407 Note: a stateful version of this function is available through the
408 408 SystemExec class.
409 409
410 410 This is pretty much deprecated and rarely used,
411 411 genutils.getoutputerror may be what you need.
412 412
413 413 """
414 414
415 415 if verbose or debug: print header+cmd
416 416 if not debug:
417 417 output = os.popen(cmd).read()
418 418 # stipping last \n is here for backwards compat.
419 419 if output.endswith('\n'):
420 420 output = output[:-1]
421 421 if split:
422 422 return output.split('\n')
423 423 else:
424 424 return output
425 425
426 426 def getoutputerror(cmd,verbose=0,debug=0,header='',split=0):
427 427 """Return (standard output,standard error) of executing cmd in a shell.
428 428
429 429 Accepts the same arguments as system(), plus:
430 430
431 431 - split(0): if true, each of stdout/err is returned as a list split on
432 432 newlines.
433 433
434 434 Note: a stateful version of this function is available through the
435 435 SystemExec class."""
436 436
437 437 if verbose or debug: print header+cmd
438 438 if not cmd:
439 439 if split:
440 440 return [],[]
441 441 else:
442 442 return '',''
443 443 if not debug:
444 444 pin,pout,perr = os.popen3(cmd)
445 445 tout = pout.read().rstrip()
446 446 terr = perr.read().rstrip()
447 447 pin.close()
448 448 pout.close()
449 449 perr.close()
450 450 if split:
451 451 return tout.split('\n'),terr.split('\n')
452 452 else:
453 453 return tout,terr
454 454
455 455 # for compatibility with older naming conventions
456 456 xsys = system
457 457 bq = getoutput
458 458
459 459 class SystemExec:
460 460 """Access the system and getoutput functions through a stateful interface.
461 461
462 462 Note: here we refer to the system and getoutput functions from this
463 463 library, not the ones from the standard python library.
464 464
465 465 This class offers the system and getoutput functions as methods, but the
466 466 verbose, debug and header parameters can be set for the instance (at
467 467 creation time or later) so that they don't need to be specified on each
468 468 call.
469 469
470 470 For efficiency reasons, there's no way to override the parameters on a
471 471 per-call basis other than by setting instance attributes. If you need
472 472 local overrides, it's best to directly call system() or getoutput().
473 473
474 474 The following names are provided as alternate options:
475 475 - xsys: alias to system
476 476 - bq: alias to getoutput
477 477
478 478 An instance can then be created as:
479 479 >>> sysexec = SystemExec(verbose=1,debug=0,header='Calling: ')
480 480
481 481 And used as:
482 482 >>> sysexec.xsys('pwd')
483 483 >>> dirlist = sysexec.bq('ls -l')
484 484 """
485 485
486 486 def __init__(self,verbose=0,debug=0,header='',split=0):
487 487 """Specify the instance's values for verbose, debug and header."""
488 488 setattr_list(self,'verbose debug header split')
489 489
490 490 def system(self,cmd):
491 491 """Stateful interface to system(), with the same keyword parameters."""
492 492
493 493 system(cmd,self.verbose,self.debug,self.header)
494 494
495 495 def shell(self,cmd):
496 496 """Stateful interface to shell(), with the same keyword parameters."""
497 497
498 498 shell(cmd,self.verbose,self.debug,self.header)
499 499
500 500 xsys = system # alias
501 501
502 502 def getoutput(self,cmd):
503 503 """Stateful interface to getoutput()."""
504 504
505 505 return getoutput(cmd,self.verbose,self.debug,self.header,self.split)
506 506
507 507 def getoutputerror(self,cmd):
508 508 """Stateful interface to getoutputerror()."""
509 509
510 510 return getoutputerror(cmd,self.verbose,self.debug,self.header,self.split)
511 511
512 512 bq = getoutput # alias
513 513
514 514 #-----------------------------------------------------------------------------
515 515 def mutex_opts(dict,ex_op):
516 516 """Check for presence of mutually exclusive keys in a dict.
517 517
518 518 Call: mutex_opts(dict,[[op1a,op1b],[op2a,op2b]...]"""
519 519 for op1,op2 in ex_op:
520 520 if op1 in dict and op2 in dict:
521 521 raise ValueError,'\n*** ERROR in Arguments *** '\
522 522 'Options '+op1+' and '+op2+' are mutually exclusive.'
523 523
524 524 #-----------------------------------------------------------------------------
525 525 def get_py_filename(name):
526 526 """Return a valid python filename in the current directory.
527 527
528 528 If the given name is not a file, it adds '.py' and searches again.
529 529 Raises IOError with an informative message if the file isn't found."""
530 530
531 531 name = os.path.expanduser(name)
532 532 if not os.path.isfile(name) and not name.endswith('.py'):
533 533 name += '.py'
534 534 if os.path.isfile(name):
535 535 return name
536 536 else:
537 537 raise IOError,'File `%s` not found.' % name
538 538
539 539 #-----------------------------------------------------------------------------
540 540 def filefind(fname,alt_dirs = None):
541 541 """Return the given filename either in the current directory, if it
542 542 exists, or in a specified list of directories.
543 543
544 544 ~ expansion is done on all file and directory names.
545 545
546 546 Upon an unsuccessful search, raise an IOError exception."""
547 547
548 548 if alt_dirs is None:
549 549 try:
550 550 alt_dirs = get_home_dir()
551 551 except HomeDirError:
552 552 alt_dirs = os.getcwd()
553 553 search = [fname] + list_strings(alt_dirs)
554 554 search = map(os.path.expanduser,search)
555 555 #print 'search list for',fname,'list:',search # dbg
556 556 fname = search[0]
557 557 if os.path.isfile(fname):
558 558 return fname
559 559 for direc in search[1:]:
560 560 testname = os.path.join(direc,fname)
561 561 #print 'testname',testname # dbg
562 562 if os.path.isfile(testname):
563 563 return testname
564 564 raise IOError,'File' + `fname` + \
565 565 ' not found in current or supplied directories:' + `alt_dirs`
566 566
567 567 #----------------------------------------------------------------------------
568 568 def file_read(filename):
569 569 """Read a file and close it. Returns the file source."""
570 570 fobj = open(filename,'r');
571 571 source = fobj.read();
572 572 fobj.close()
573 573 return source
574 574
575 575 def file_readlines(filename):
576 576 """Read a file and close it. Returns the file source using readlines()."""
577 577 fobj = open(filename,'r');
578 578 lines = fobj.readlines();
579 579 fobj.close()
580 580 return lines
581 581
582 582 #----------------------------------------------------------------------------
583 583 def target_outdated(target,deps):
584 584 """Determine whether a target is out of date.
585 585
586 586 target_outdated(target,deps) -> 1/0
587 587
588 588 deps: list of filenames which MUST exist.
589 589 target: single filename which may or may not exist.
590 590
591 591 If target doesn't exist or is older than any file listed in deps, return
592 592 true, otherwise return false.
593 593 """
594 594 try:
595 595 target_time = os.path.getmtime(target)
596 596 except os.error:
597 597 return 1
598 598 for dep in deps:
599 599 dep_time = os.path.getmtime(dep)
600 600 if dep_time > target_time:
601 601 #print "For target",target,"Dep failed:",dep # dbg
602 602 #print "times (dep,tar):",dep_time,target_time # dbg
603 603 return 1
604 604 return 0
605 605
606 606 #-----------------------------------------------------------------------------
607 607 def target_update(target,deps,cmd):
608 608 """Update a target with a given command given a list of dependencies.
609 609
610 610 target_update(target,deps,cmd) -> runs cmd if target is outdated.
611 611
612 612 This is just a wrapper around target_outdated() which calls the given
613 613 command if target is outdated."""
614 614
615 615 if target_outdated(target,deps):
616 616 xsys(cmd)
617 617
618 618 #----------------------------------------------------------------------------
619 619 def unquote_ends(istr):
620 620 """Remove a single pair of quotes from the endpoints of a string."""
621 621
622 622 if not istr:
623 623 return istr
624 624 if (istr[0]=="'" and istr[-1]=="'") or \
625 625 (istr[0]=='"' and istr[-1]=='"'):
626 626 return istr[1:-1]
627 627 else:
628 628 return istr
629 629
630 630 #----------------------------------------------------------------------------
631 631 def process_cmdline(argv,names=[],defaults={},usage=''):
632 632 """ Process command-line options and arguments.
633 633
634 634 Arguments:
635 635
636 636 - argv: list of arguments, typically sys.argv.
637 637
638 638 - names: list of option names. See DPyGetOpt docs for details on options
639 639 syntax.
640 640
641 641 - defaults: dict of default values.
642 642
643 643 - usage: optional usage notice to print if a wrong argument is passed.
644 644
645 645 Return a dict of options and a list of free arguments."""
646 646
647 647 getopt = DPyGetOpt.DPyGetOpt()
648 648 getopt.setIgnoreCase(0)
649 649 getopt.parseConfiguration(names)
650 650
651 651 try:
652 652 getopt.processArguments(argv)
653 653 except DPyGetOpt.ArgumentError, exc:
654 654 print usage
655 655 warn('"%s"' % exc,level=4)
656 656
657 657 defaults.update(getopt.optionValues)
658 658 args = getopt.freeValues
659 659
660 660 return defaults,args
661 661
662 662 #----------------------------------------------------------------------------
663 663 def optstr2types(ostr):
664 664 """Convert a string of option names to a dict of type mappings.
665 665
666 666 optstr2types(str) -> {None:'string_opts',int:'int_opts',float:'float_opts'}
667 667
668 668 This is used to get the types of all the options in a string formatted
669 669 with the conventions of DPyGetOpt. The 'type' None is used for options
670 670 which are strings (they need no further conversion). This function's main
671 671 use is to get a typemap for use with read_dict().
672 672 """
673 673
674 674 typeconv = {None:'',int:'',float:''}
675 675 typemap = {'s':None,'i':int,'f':float}
676 676 opt_re = re.compile(r'([\w]*)([^:=]*:?=?)([sif]?)')
677 677
678 678 for w in ostr.split():
679 679 oname,alias,otype = opt_re.match(w).groups()
680 680 if otype == '' or alias == '!': # simple switches are integers too
681 681 otype = 'i'
682 682 typeconv[typemap[otype]] += oname + ' '
683 683 return typeconv
684 684
685 685 #----------------------------------------------------------------------------
686 686 def read_dict(filename,type_conv=None,**opt):
687 687
688 688 """Read a dictionary of key=value pairs from an input file, optionally
689 689 performing conversions on the resulting values.
690 690
691 691 read_dict(filename,type_conv,**opt) -> dict
692 692
693 693 Only one value per line is accepted, the format should be
694 694 # optional comments are ignored
695 695 key value\n
696 696
697 697 Args:
698 698
699 699 - type_conv: A dictionary specifying which keys need to be converted to
700 700 which types. By default all keys are read as strings. This dictionary
701 701 should have as its keys valid conversion functions for strings
702 702 (int,long,float,complex, or your own). The value for each key
703 703 (converter) should be a whitespace separated string containing the names
704 704 of all the entries in the file to be converted using that function. For
705 705 keys to be left alone, use None as the conversion function (only needed
706 706 with purge=1, see below).
707 707
708 708 - opt: dictionary with extra options as below (default in parens)
709 709
710 710 purge(0): if set to 1, all keys *not* listed in type_conv are purged out
711 711 of the dictionary to be returned. If purge is going to be used, the
712 712 set of keys to be left as strings also has to be explicitly specified
713 713 using the (non-existent) conversion function None.
714 714
715 715 fs(None): field separator. This is the key/value separator to be used
716 716 when parsing the file. The None default means any whitespace [behavior
717 717 of string.split()].
718 718
719 719 strip(0): if 1, strip string values of leading/trailinig whitespace.
720 720
721 721 warn(1): warning level if requested keys are not found in file.
722 722 - 0: silently ignore.
723 723 - 1: inform but proceed.
724 724 - 2: raise KeyError exception.
725 725
726 726 no_empty(0): if 1, remove keys with whitespace strings as a value.
727 727
728 728 unique([]): list of keys (or space separated string) which can't be
729 729 repeated. If one such key is found in the file, each new instance
730 730 overwrites the previous one. For keys not listed here, the behavior is
731 731 to make a list of all appearances.
732 732
733 733 Example:
734 734 If the input file test.ini has:
735 735 i 3
736 736 x 4.5
737 737 y 5.5
738 738 s hi ho
739 739 Then:
740 740
741 741 >>> type_conv={int:'i',float:'x',None:'s'}
742 742 >>> read_dict('test.ini')
743 743 {'i': '3', 's': 'hi ho', 'x': '4.5', 'y': '5.5'}
744 744 >>> read_dict('test.ini',type_conv)
745 745 {'i': 3, 's': 'hi ho', 'x': 4.5, 'y': '5.5'}
746 746 >>> read_dict('test.ini',type_conv,purge=1)
747 747 {'i': 3, 's': 'hi ho', 'x': 4.5}
748 748 """
749 749
750 750 # starting config
751 751 opt.setdefault('purge',0)
752 752 opt.setdefault('fs',None) # field sep defaults to any whitespace
753 753 opt.setdefault('strip',0)
754 754 opt.setdefault('warn',1)
755 755 opt.setdefault('no_empty',0)
756 756 opt.setdefault('unique','')
757 757 if type(opt['unique']) in StringTypes:
758 758 unique_keys = qw(opt['unique'])
759 759 elif type(opt['unique']) in (types.TupleType,types.ListType):
760 760 unique_keys = opt['unique']
761 761 else:
762 762 raise ValueError, 'Unique keys must be given as a string, List or Tuple'
763 763
764 764 dict = {}
765 765 # first read in table of values as strings
766 766 file = open(filename,'r')
767 767 for line in file.readlines():
768 768 line = line.strip()
769 769 if len(line) and line[0]=='#': continue
770 770 if len(line)>0:
771 771 lsplit = line.split(opt['fs'],1)
772 772 try:
773 773 key,val = lsplit
774 774 except ValueError:
775 775 key,val = lsplit[0],''
776 776 key = key.strip()
777 777 if opt['strip']: val = val.strip()
778 778 if val == "''" or val == '""': val = ''
779 779 if opt['no_empty'] and (val=='' or val.isspace()):
780 780 continue
781 781 # if a key is found more than once in the file, build a list
782 782 # unless it's in the 'unique' list. In that case, last found in file
783 783 # takes precedence. User beware.
784 784 try:
785 785 if dict[key] and key in unique_keys:
786 786 dict[key] = val
787 787 elif type(dict[key]) is types.ListType:
788 788 dict[key].append(val)
789 789 else:
790 790 dict[key] = [dict[key],val]
791 791 except KeyError:
792 792 dict[key] = val
793 793 # purge if requested
794 794 if opt['purge']:
795 795 accepted_keys = qwflat(type_conv.values())
796 796 for key in dict.keys():
797 797 if key in accepted_keys: continue
798 798 del(dict[key])
799 799 # now convert if requested
800 800 if type_conv==None: return dict
801 801 conversions = type_conv.keys()
802 802 try: conversions.remove(None)
803 803 except: pass
804 804 for convert in conversions:
805 805 for val in qw(type_conv[convert]):
806 806 try:
807 807 dict[val] = convert(dict[val])
808 808 except KeyError,e:
809 809 if opt['warn'] == 0:
810 810 pass
811 811 elif opt['warn'] == 1:
812 812 print >>sys.stderr, 'Warning: key',val,\
813 813 'not found in file',filename
814 814 elif opt['warn'] == 2:
815 815 raise KeyError,e
816 816 else:
817 817 raise ValueError,'Warning level must be 0,1 or 2'
818 818
819 819 return dict
820 820
821 821 #----------------------------------------------------------------------------
822 822 def flag_calls(func):
823 823 """Wrap a function to detect and flag when it gets called.
824 824
825 825 This is a decorator which takes a function and wraps it in a function with
826 826 a 'called' attribute. wrapper.called is initialized to False.
827 827
828 828 The wrapper.called attribute is set to False right before each call to the
829 829 wrapped function, so if the call fails it remains False. After the call
830 830 completes, wrapper.called is set to True and the output is returned.
831 831
832 832 Testing for truth in wrapper.called allows you to determine if a call to
833 833 func() was attempted and succeeded."""
834 834
835 835 def wrapper(*args,**kw):
836 836 wrapper.called = False
837 837 out = func(*args,**kw)
838 838 wrapper.called = True
839 839 return out
840 840
841 841 wrapper.called = False
842 842 wrapper.__doc__ = func.__doc__
843 843 return wrapper
844 844
845 845 #----------------------------------------------------------------------------
846 846 def dhook_wrap(func,*a,**k):
847 847 """Wrap a function call in a sys.displayhook controller.
848 848
849 849 Returns a wrapper around func which calls func, with all its arguments and
850 850 keywords unmodified, using the default sys.displayhook. Since IPython
851 851 modifies sys.displayhook, it breaks the behavior of certain systems that
852 852 rely on the default behavior, notably doctest.
853 853 """
854 854
855 855 def f(*a,**k):
856 856
857 857 dhook_s = sys.displayhook
858 858 sys.displayhook = sys.__displayhook__
859 859 try:
860 860 out = func(*a,**k)
861 861 finally:
862 862 sys.displayhook = dhook_s
863 863
864 864 return out
865 865
866 866 f.__doc__ = func.__doc__
867 867 return f
868 868
869 869 #----------------------------------------------------------------------------
870 870 def doctest_reload():
871 871 """Properly reload doctest to reuse it interactively.
872 872
873 873 This routine:
874 874
875 875 - reloads doctest
876 876
877 877 - resets its global 'master' attribute to None, so that multiple uses of
878 878 the module interactively don't produce cumulative reports.
879 879
880 880 - Monkeypatches its core test runner method to protect it from IPython's
881 881 modified displayhook. Doctest expects the default displayhook behavior
882 882 deep down, so our modification breaks it completely. For this reason, a
883 883 hard monkeypatch seems like a reasonable solution rather than asking
884 884 users to manually use a different doctest runner when under IPython."""
885 885
886 886 import doctest
887 887 reload(doctest)
888 888 doctest.master=None
889 889
890 890 try:
891 891 doctest.DocTestRunner
892 892 except AttributeError:
893 893 # This is only for python 2.3 compatibility, remove once we move to
894 894 # 2.4 only.
895 895 pass
896 896 else:
897 897 doctest.DocTestRunner.run = dhook_wrap(doctest.DocTestRunner.run)
898 898
899 899 #----------------------------------------------------------------------------
900 900 class HomeDirError(Error):
901 901 pass
902 902
903 903 def get_home_dir():
904 904 """Return the closest possible equivalent to a 'home' directory.
905 905
906 906 We first try $HOME. Absent that, on NT it's $HOMEDRIVE\$HOMEPATH.
907 907
908 908 Currently only Posix and NT are implemented, a HomeDirError exception is
909 909 raised for all other OSes. """
910 910
911 911 isdir = os.path.isdir
912 912 env = os.environ
913 913
914 914 # first, check py2exe distribution root directory for _ipython.
915 915 # This overrides all. Normally does not exist.
916 916
917 917 if '\\library.zip\\' in IPython.__file__.lower():
918 918 root, rest = IPython.__file__.lower().split('library.zip')
919 919 if isdir(root + '_ipython'):
920 920 os.environ["IPYKITROOT"] = root.rstrip('\\')
921 921 return root
922 922
923 923 try:
924 924 homedir = env['HOME']
925 925 if not isdir(homedir):
926 926 # in case a user stuck some string which does NOT resolve to a
927 927 # valid path, it's as good as if we hadn't foud it
928 928 raise KeyError
929 929 return homedir
930 930 except KeyError:
931 931 if os.name == 'posix':
932 932 raise HomeDirError,'undefined $HOME, IPython can not proceed.'
933 933 elif os.name == 'nt':
934 934 # For some strange reason, win9x returns 'nt' for os.name.
935 935 try:
936 936 homedir = os.path.join(env['HOMEDRIVE'],env['HOMEPATH'])
937 937 if not isdir(homedir):
938 938 homedir = os.path.join(env['USERPROFILE'])
939 939 if not isdir(homedir):
940 940 raise HomeDirError
941 941 return homedir
942 942 except:
943 943 try:
944 944 # Use the registry to get the 'My Documents' folder.
945 945 import _winreg as wreg
946 946 key = wreg.OpenKey(wreg.HKEY_CURRENT_USER,
947 947 "Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
948 948 homedir = wreg.QueryValueEx(key,'Personal')[0]
949 949 key.Close()
950 950 if not isdir(homedir):
951 951 e = ('Invalid "Personal" folder registry key '
952 952 'typically "My Documents".\n'
953 953 'Value: %s\n'
954 954 'This is not a valid directory on your system.' %
955 955 homedir)
956 956 raise HomeDirError(e)
957 957 return homedir
958 958 except HomeDirError:
959 959 raise
960 960 except:
961 961 return 'C:\\'
962 962 elif os.name == 'dos':
963 963 # Desperate, may do absurd things in classic MacOS. May work under DOS.
964 964 return 'C:\\'
965 965 else:
966 966 raise HomeDirError,'support for your operating system not implemented.'
967 967
968 968 #****************************************************************************
969 969 # strings and text
970 970
971 971 class LSString(str):
972 972 """String derivative with a special access attributes.
973 973
974 974 These are normal strings, but with the special attributes:
975 975
976 976 .l (or .list) : value as list (split on newlines).
977 977 .n (or .nlstr): original value (the string itself).
978 978 .s (or .spstr): value as whitespace-separated string.
979 979 .p (or .paths): list of path objects
980 980
981 981 Any values which require transformations are computed only once and
982 982 cached.
983 983
984 984 Such strings are very useful to efficiently interact with the shell, which
985 985 typically only understands whitespace-separated options for commands."""
986 986
987 987 def get_list(self):
988 988 try:
989 989 return self.__list
990 990 except AttributeError:
991 991 self.__list = self.split('\n')
992 992 return self.__list
993 993
994 994 l = list = property(get_list)
995 995
996 996 def get_spstr(self):
997 997 try:
998 998 return self.__spstr
999 999 except AttributeError:
1000 1000 self.__spstr = self.replace('\n',' ')
1001 1001 return self.__spstr
1002 1002
1003 1003 s = spstr = property(get_spstr)
1004 1004
1005 1005 def get_nlstr(self):
1006 1006 return self
1007 1007
1008 1008 n = nlstr = property(get_nlstr)
1009 1009
1010 1010 def get_paths(self):
1011 1011 try:
1012 1012 return self.__paths
1013 1013 except AttributeError:
1014 1014 self.__paths = [path(p) for p in self.split('\n') if os.path.exists(p)]
1015 1015 return self.__paths
1016 1016
1017 1017 p = paths = property(get_paths)
1018 1018
1019 1019 def print_lsstring(arg):
1020 1020 """ Prettier (non-repr-like) and more informative printer for LSString """
1021 1021 print "LSString (.p, .n, .l, .s available). Value:"
1022 1022 print arg
1023 1023
1024 1024 print_lsstring = result_display.when_type(LSString)(print_lsstring)
1025 1025
1026 1026 #----------------------------------------------------------------------------
1027 1027 class SList(list):
1028 1028 """List derivative with a special access attributes.
1029 1029
1030 1030 These are normal lists, but with the special attributes:
1031 1031
1032 1032 .l (or .list) : value as list (the list itself).
1033 1033 .n (or .nlstr): value as a string, joined on newlines.
1034 1034 .s (or .spstr): value as a string, joined on spaces.
1035 1035 .p (or .paths): list of path objects
1036 1036
1037 1037 Any values which require transformations are computed only once and
1038 1038 cached."""
1039 1039
1040 1040 def get_list(self):
1041 1041 return self
1042 1042
1043 1043 l = list = property(get_list)
1044 1044
1045 1045 def get_spstr(self):
1046 1046 try:
1047 1047 return self.__spstr
1048 1048 except AttributeError:
1049 1049 self.__spstr = ' '.join(self)
1050 1050 return self.__spstr
1051 1051
1052 1052 s = spstr = property(get_spstr)
1053 1053
1054 1054 def get_nlstr(self):
1055 1055 try:
1056 1056 return self.__nlstr
1057 1057 except AttributeError:
1058 1058 self.__nlstr = '\n'.join(self)
1059 1059 return self.__nlstr
1060 1060
1061 1061 n = nlstr = property(get_nlstr)
1062 1062
1063 1063 def get_paths(self):
1064 1064 try:
1065 1065 return self.__paths
1066 1066 except AttributeError:
1067 1067 self.__paths = [path(p) for p in self if os.path.exists(p)]
1068 1068 return self.__paths
1069 1069
1070 1070 p = paths = property(get_paths)
1071 1071
1072 1072 def grep(self, pattern, prune = False, field = None):
1073 1073 """ Return all strings matching 'pattern' (a regex or callable)
1074 1074
1075 1075 This is case-insensitive. If prune is true, return all items
1076 1076 NOT matching the pattern.
1077 1077
1078 1078 If field is specified, the match must occur in the specified
1079 1079 whitespace-separated field.
1080 1080
1081 1081 Examples::
1082 1082
1083 1083 a.grep( lambda x: x.startswith('C') )
1084 1084 a.grep('Cha.*log', prune=1)
1085 1085 a.grep('chm', field=-1)
1086 1086 """
1087 1087
1088 1088 def match_target(s):
1089 1089 if field is None:
1090 1090 return s
1091 1091 parts = s.split()
1092 1092 try:
1093 1093 tgt = parts[field]
1094 1094 return tgt
1095 1095 except IndexError:
1096 1096 return ""
1097 1097
1098 1098 if isinstance(pattern, basestring):
1099 1099 pred = lambda x : re.search(pattern, x, re.IGNORECASE)
1100 1100 else:
1101 1101 pred = pattern
1102 1102 if not prune:
1103 1103 return SList([el for el in self if pred(match_target(el))])
1104 1104 else:
1105 1105 return SList([el for el in self if not pred(match_target(el))])
1106 1106 def fields(self, *fields):
1107 1107 """ Collect whitespace-separated fields from string list
1108 1108
1109 1109 Allows quick awk-like usage of string lists.
1110 1110
1111 1111 Example data (in var a, created by 'a = !ls -l')::
1112 1112 -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
1113 1113 drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
1114 1114
1115 1115 a.fields(0) is ['-rwxrwxrwx', 'drwxrwxrwx+']
1116 1116 a.fields(1,0) is ['1 -rwxrwxrwx', '6 drwxrwxrwx+']
1117 1117 (note the joining by space).
1118 1118 a.fields(-1) is ['ChangeLog', 'IPython']
1119 1119
1120 1120 IndexErrors are ignored.
1121 1121
1122 1122 Without args, fields() just split()'s the strings.
1123 1123 """
1124 1124 if len(fields) == 0:
1125 1125 return [el.split() for el in self]
1126 1126
1127 1127 res = SList()
1128 1128 for el in [f.split() for f in self]:
1129 1129 lineparts = []
1130 1130
1131 1131 for fd in fields:
1132 1132 try:
1133 1133 lineparts.append(el[fd])
1134 1134 except IndexError:
1135 1135 pass
1136 1136 if lineparts:
1137 1137 res.append(" ".join(lineparts))
1138 1138
1139 1139 return res
1140
1141
1142
1140 def sort(self,field= None, nums = False):
1141 """ sort by specified fields (see fields())
1142
1143 Example::
1144 a.sort(1, nums = True)
1145
1146 Sorts a by second field, in numerical order (so that 21 > 3)
1147
1148 """
1143 1149
1150 #decorate, sort, undecorate
1151 if field is not None:
1152 dsu = [[SList([line]).fields(field), line] for line in self]
1153 else:
1154 dsu = [[line, line] for line in self]
1155 if nums:
1156 for i in range(len(dsu)):
1157 numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
1158 try:
1159 n = int(numstr)
1160 except ValueError:
1161 n = 0;
1162 dsu[i][0] = n
1163
1164
1165 dsu.sort()
1166 return SList([t[1] for t in dsu])
1144 1167
1145 1168 def print_slist(arg):
1146 1169 """ Prettier (non-repr-like) and more informative printer for SList """
1147 print "SList (.p, .n, .l, .s, .grep(), .fields() available). Value:"
1170 print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
1171 if hasattr(arg, 'hideonce') and arg.hideonce:
1172 arg.hideonce = False
1173 return
1174
1148 1175 nlprint(arg)
1149 1176
1150 1177 print_slist = result_display.when_type(SList)(print_slist)
1151 1178
1152 1179
1153 1180
1154 1181 #----------------------------------------------------------------------------
1155 1182 def esc_quotes(strng):
1156 1183 """Return the input string with single and double quotes escaped out"""
1157 1184
1158 1185 return strng.replace('"','\\"').replace("'","\\'")
1159 1186
1160 1187 #----------------------------------------------------------------------------
1161 1188 def make_quoted_expr(s):
1162 1189 """Return string s in appropriate quotes, using raw string if possible.
1163 1190
1164 1191 Effectively this turns string: cd \ao\ao\
1165 1192 to: r"cd \ao\ao\_"[:-1]
1166 1193
1167 1194 Note the use of raw string and padding at the end to allow trailing backslash.
1168 1195
1169 1196 """
1170 1197
1171 1198 tail = ''
1172 1199 tailpadding = ''
1173 1200 raw = ''
1174 1201 if "\\" in s:
1175 1202 raw = 'r'
1176 1203 if s.endswith('\\'):
1177 1204 tail = '[:-1]'
1178 1205 tailpadding = '_'
1179 1206 if '"' not in s:
1180 1207 quote = '"'
1181 1208 elif "'" not in s:
1182 1209 quote = "'"
1183 1210 elif '"""' not in s and not s.endswith('"'):
1184 1211 quote = '"""'
1185 1212 elif "'''" not in s and not s.endswith("'"):
1186 1213 quote = "'''"
1187 1214 else:
1188 1215 # give up, backslash-escaped string will do
1189 1216 return '"%s"' % esc_quotes(s)
1190 1217 res = raw + quote + s + tailpadding + quote + tail
1191 1218 return res
1192 1219
1193 1220
1194 1221 #----------------------------------------------------------------------------
1195 1222 def raw_input_multi(header='', ps1='==> ', ps2='..> ',terminate_str = '.'):
1196 1223 """Take multiple lines of input.
1197 1224
1198 1225 A list with each line of input as a separate element is returned when a
1199 1226 termination string is entered (defaults to a single '.'). Input can also
1200 1227 terminate via EOF (^D in Unix, ^Z-RET in Windows).
1201 1228
1202 1229 Lines of input which end in \\ are joined into single entries (and a
1203 1230 secondary continuation prompt is issued as long as the user terminates
1204 1231 lines with \\). This allows entering very long strings which are still
1205 1232 meant to be treated as single entities.
1206 1233 """
1207 1234
1208 1235 try:
1209 1236 if header:
1210 1237 header += '\n'
1211 1238 lines = [raw_input(header + ps1)]
1212 1239 except EOFError:
1213 1240 return []
1214 1241 terminate = [terminate_str]
1215 1242 try:
1216 1243 while lines[-1:] != terminate:
1217 1244 new_line = raw_input(ps1)
1218 1245 while new_line.endswith('\\'):
1219 1246 new_line = new_line[:-1] + raw_input(ps2)
1220 1247 lines.append(new_line)
1221 1248
1222 1249 return lines[:-1] # don't return the termination command
1223 1250 except EOFError:
1224 1251 print
1225 1252 return lines
1226 1253
1227 1254 #----------------------------------------------------------------------------
1228 1255 def raw_input_ext(prompt='', ps2='... '):
1229 1256 """Similar to raw_input(), but accepts extended lines if input ends with \\."""
1230 1257
1231 1258 line = raw_input(prompt)
1232 1259 while line.endswith('\\'):
1233 1260 line = line[:-1] + raw_input(ps2)
1234 1261 return line
1235 1262
1236 1263 #----------------------------------------------------------------------------
1237 1264 def ask_yes_no(prompt,default=None):
1238 1265 """Asks a question and returns a boolean (y/n) answer.
1239 1266
1240 1267 If default is given (one of 'y','n'), it is used if the user input is
1241 1268 empty. Otherwise the question is repeated until an answer is given.
1242 1269
1243 1270 An EOF is treated as the default answer. If there is no default, an
1244 1271 exception is raised to prevent infinite loops.
1245 1272
1246 1273 Valid answers are: y/yes/n/no (match is not case sensitive)."""
1247 1274
1248 1275 answers = {'y':True,'n':False,'yes':True,'no':False}
1249 1276 ans = None
1250 1277 while ans not in answers.keys():
1251 1278 try:
1252 1279 ans = raw_input(prompt+' ').lower()
1253 1280 if not ans: # response was an empty string
1254 1281 ans = default
1255 1282 except KeyboardInterrupt:
1256 1283 pass
1257 1284 except EOFError:
1258 1285 if default in answers.keys():
1259 1286 ans = default
1260 1287 print
1261 1288 else:
1262 1289 raise
1263 1290
1264 1291 return answers[ans]
1265 1292
1266 1293 #----------------------------------------------------------------------------
1267 1294 def marquee(txt='',width=78,mark='*'):
1268 1295 """Return the input string centered in a 'marquee'."""
1269 1296 if not txt:
1270 1297 return (mark*width)[:width]
1271 1298 nmark = (width-len(txt)-2)/len(mark)/2
1272 1299 if nmark < 0: nmark =0
1273 1300 marks = mark*nmark
1274 1301 return '%s %s %s' % (marks,txt,marks)
1275 1302
1276 1303 #----------------------------------------------------------------------------
1277 1304 class EvalDict:
1278 1305 """
1279 1306 Emulate a dict which evaluates its contents in the caller's frame.
1280 1307
1281 1308 Usage:
1282 1309 >>>number = 19
1283 1310 >>>text = "python"
1284 1311 >>>print "%(text.capitalize())s %(number/9.0).1f rules!" % EvalDict()
1285 1312 """
1286 1313
1287 1314 # This version is due to sismex01@hebmex.com on c.l.py, and is basically a
1288 1315 # modified (shorter) version of:
1289 1316 # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66018 by
1290 1317 # Skip Montanaro (skip@pobox.com).
1291 1318
1292 1319 def __getitem__(self, name):
1293 1320 frame = sys._getframe(1)
1294 1321 return eval(name, frame.f_globals, frame.f_locals)
1295 1322
1296 1323 EvalString = EvalDict # for backwards compatibility
1297 1324 #----------------------------------------------------------------------------
1298 1325 def qw(words,flat=0,sep=None,maxsplit=-1):
1299 1326 """Similar to Perl's qw() operator, but with some more options.
1300 1327
1301 1328 qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit)
1302 1329
1303 1330 words can also be a list itself, and with flat=1, the output will be
1304 1331 recursively flattened. Examples:
1305 1332
1306 1333 >>> qw('1 2')
1307 1334 ['1', '2']
1308 1335 >>> qw(['a b','1 2',['m n','p q']])
1309 1336 [['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]]
1310 1337 >>> qw(['a b','1 2',['m n','p q']],flat=1)
1311 1338 ['a', 'b', '1', '2', 'm', 'n', 'p', 'q'] """
1312 1339
1313 1340 if type(words) in StringTypes:
1314 1341 return [word.strip() for word in words.split(sep,maxsplit)
1315 1342 if word and not word.isspace() ]
1316 1343 if flat:
1317 1344 return flatten(map(qw,words,[1]*len(words)))
1318 1345 return map(qw,words)
1319 1346
1320 1347 #----------------------------------------------------------------------------
1321 1348 def qwflat(words,sep=None,maxsplit=-1):
1322 1349 """Calls qw(words) in flat mode. It's just a convenient shorthand."""
1323 1350 return qw(words,1,sep,maxsplit)
1324 1351
1325 1352 #----------------------------------------------------------------------------
1326 1353 def qw_lol(indata):
1327 1354 """qw_lol('a b') -> [['a','b']],
1328 1355 otherwise it's just a call to qw().
1329 1356
1330 1357 We need this to make sure the modules_some keys *always* end up as a
1331 1358 list of lists."""
1332 1359
1333 1360 if type(indata) in StringTypes:
1334 1361 return [qw(indata)]
1335 1362 else:
1336 1363 return qw(indata)
1337 1364
1338 1365 #-----------------------------------------------------------------------------
1339 1366 def list_strings(arg):
1340 1367 """Always return a list of strings, given a string or list of strings
1341 1368 as input."""
1342 1369
1343 1370 if type(arg) in StringTypes: return [arg]
1344 1371 else: return arg
1345 1372
1346 1373 #----------------------------------------------------------------------------
1347 1374 def grep(pat,list,case=1):
1348 1375 """Simple minded grep-like function.
1349 1376 grep(pat,list) returns occurrences of pat in list, None on failure.
1350 1377
1351 1378 It only does simple string matching, with no support for regexps. Use the
1352 1379 option case=0 for case-insensitive matching."""
1353 1380
1354 1381 # This is pretty crude. At least it should implement copying only references
1355 1382 # to the original data in case it's big. Now it copies the data for output.
1356 1383 out=[]
1357 1384 if case:
1358 1385 for term in list:
1359 1386 if term.find(pat)>-1: out.append(term)
1360 1387 else:
1361 1388 lpat=pat.lower()
1362 1389 for term in list:
1363 1390 if term.lower().find(lpat)>-1: out.append(term)
1364 1391
1365 1392 if len(out): return out
1366 1393 else: return None
1367 1394
1368 1395 #----------------------------------------------------------------------------
1369 1396 def dgrep(pat,*opts):
1370 1397 """Return grep() on dir()+dir(__builtins__).
1371 1398
1372 1399 A very common use of grep() when working interactively."""
1373 1400
1374 1401 return grep(pat,dir(__main__)+dir(__main__.__builtins__),*opts)
1375 1402
1376 1403 #----------------------------------------------------------------------------
1377 1404 def idgrep(pat):
1378 1405 """Case-insensitive dgrep()"""
1379 1406
1380 1407 return dgrep(pat,0)
1381 1408
1382 1409 #----------------------------------------------------------------------------
1383 1410 def igrep(pat,list):
1384 1411 """Synonym for case-insensitive grep."""
1385 1412
1386 1413 return grep(pat,list,case=0)
1387 1414
1388 1415 #----------------------------------------------------------------------------
1389 1416 def indent(str,nspaces=4,ntabs=0):
1390 1417 """Indent a string a given number of spaces or tabstops.
1391 1418
1392 1419 indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
1393 1420 """
1394 1421 if str is None:
1395 1422 return
1396 1423 ind = '\t'*ntabs+' '*nspaces
1397 1424 outstr = '%s%s' % (ind,str.replace(os.linesep,os.linesep+ind))
1398 1425 if outstr.endswith(os.linesep+ind):
1399 1426 return outstr[:-len(ind)]
1400 1427 else:
1401 1428 return outstr
1402 1429
1403 1430 #-----------------------------------------------------------------------------
1404 1431 def native_line_ends(filename,backup=1):
1405 1432 """Convert (in-place) a file to line-ends native to the current OS.
1406 1433
1407 1434 If the optional backup argument is given as false, no backup of the
1408 1435 original file is left. """
1409 1436
1410 1437 backup_suffixes = {'posix':'~','dos':'.bak','nt':'.bak','mac':'.bak'}
1411 1438
1412 1439 bak_filename = filename + backup_suffixes[os.name]
1413 1440
1414 1441 original = open(filename).read()
1415 1442 shutil.copy2(filename,bak_filename)
1416 1443 try:
1417 1444 new = open(filename,'wb')
1418 1445 new.write(os.linesep.join(original.splitlines()))
1419 1446 new.write(os.linesep) # ALWAYS put an eol at the end of the file
1420 1447 new.close()
1421 1448 except:
1422 1449 os.rename(bak_filename,filename)
1423 1450 if not backup:
1424 1451 try:
1425 1452 os.remove(bak_filename)
1426 1453 except:
1427 1454 pass
1428 1455
1429 1456 #----------------------------------------------------------------------------
1430 1457 def get_pager_cmd(pager_cmd = None):
1431 1458 """Return a pager command.
1432 1459
1433 1460 Makes some attempts at finding an OS-correct one."""
1434 1461
1435 1462 if os.name == 'posix':
1436 1463 default_pager_cmd = 'less -r' # -r for color control sequences
1437 1464 elif os.name in ['nt','dos']:
1438 1465 default_pager_cmd = 'type'
1439 1466
1440 1467 if pager_cmd is None:
1441 1468 try:
1442 1469 pager_cmd = os.environ['PAGER']
1443 1470 except:
1444 1471 pager_cmd = default_pager_cmd
1445 1472 return pager_cmd
1446 1473
1447 1474 #-----------------------------------------------------------------------------
1448 1475 def get_pager_start(pager,start):
1449 1476 """Return the string for paging files with an offset.
1450 1477
1451 1478 This is the '+N' argument which less and more (under Unix) accept.
1452 1479 """
1453 1480
1454 1481 if pager in ['less','more']:
1455 1482 if start:
1456 1483 start_string = '+' + str(start)
1457 1484 else:
1458 1485 start_string = ''
1459 1486 else:
1460 1487 start_string = ''
1461 1488 return start_string
1462 1489
1463 1490 #----------------------------------------------------------------------------
1464 1491 # (X)emacs on W32 doesn't like to be bypassed with msvcrt.getch()
1465 1492 if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
1466 1493 import msvcrt
1467 1494 def page_more():
1468 1495 """ Smart pausing between pages
1469 1496
1470 1497 @return: True if need print more lines, False if quit
1471 1498 """
1472 1499 Term.cout.write('---Return to continue, q to quit--- ')
1473 1500 ans = msvcrt.getch()
1474 1501 if ans in ("q", "Q"):
1475 1502 result = False
1476 1503 else:
1477 1504 result = True
1478 1505 Term.cout.write("\b"*37 + " "*37 + "\b"*37)
1479 1506 return result
1480 1507 else:
1481 1508 def page_more():
1482 1509 ans = raw_input('---Return to continue, q to quit--- ')
1483 1510 if ans.lower().startswith('q'):
1484 1511 return False
1485 1512 else:
1486 1513 return True
1487 1514
1488 1515 esc_re = re.compile(r"(\x1b[^m]+m)")
1489 1516
1490 1517 def page_dumb(strng,start=0,screen_lines=25):
1491 1518 """Very dumb 'pager' in Python, for when nothing else works.
1492 1519
1493 1520 Only moves forward, same interface as page(), except for pager_cmd and
1494 1521 mode."""
1495 1522
1496 1523 out_ln = strng.splitlines()[start:]
1497 1524 screens = chop(out_ln,screen_lines-1)
1498 1525 if len(screens) == 1:
1499 1526 print >>Term.cout, os.linesep.join(screens[0])
1500 1527 else:
1501 1528 last_escape = ""
1502 1529 for scr in screens[0:-1]:
1503 1530 hunk = os.linesep.join(scr)
1504 1531 print >>Term.cout, last_escape + hunk
1505 1532 if not page_more():
1506 1533 return
1507 1534 esc_list = esc_re.findall(hunk)
1508 1535 if len(esc_list) > 0:
1509 1536 last_escape = esc_list[-1]
1510 1537 print >>Term.cout, last_escape + os.linesep.join(screens[-1])
1511 1538
1512 1539 #----------------------------------------------------------------------------
1513 1540 def page(strng,start=0,screen_lines=0,pager_cmd = None):
1514 1541 """Print a string, piping through a pager after a certain length.
1515 1542
1516 1543 The screen_lines parameter specifies the number of *usable* lines of your
1517 1544 terminal screen (total lines minus lines you need to reserve to show other
1518 1545 information).
1519 1546
1520 1547 If you set screen_lines to a number <=0, page() will try to auto-determine
1521 1548 your screen size and will only use up to (screen_size+screen_lines) for
1522 1549 printing, paging after that. That is, if you want auto-detection but need
1523 1550 to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
1524 1551 auto-detection without any lines reserved simply use screen_lines = 0.
1525 1552
1526 1553 If a string won't fit in the allowed lines, it is sent through the
1527 1554 specified pager command. If none given, look for PAGER in the environment,
1528 1555 and ultimately default to less.
1529 1556
1530 1557 If no system pager works, the string is sent through a 'dumb pager'
1531 1558 written in python, very simplistic.
1532 1559 """
1533 1560
1534 1561 # Some routines may auto-compute start offsets incorrectly and pass a
1535 1562 # negative value. Offset to 0 for robustness.
1536 1563 start = max(0,start)
1537 1564
1538 1565 # first, try the hook
1539 1566 ip = IPython.ipapi.get()
1540 1567 if ip:
1541 1568 try:
1542 1569 ip.IP.hooks.show_in_pager(strng)
1543 1570 return
1544 1571 except IPython.ipapi.TryNext:
1545 1572 pass
1546 1573
1547 1574 # Ugly kludge, but calling curses.initscr() flat out crashes in emacs
1548 1575 TERM = os.environ.get('TERM','dumb')
1549 1576 if TERM in ['dumb','emacs'] and os.name != 'nt':
1550 1577 print strng
1551 1578 return
1552 1579 # chop off the topmost part of the string we don't want to see
1553 1580 str_lines = strng.split(os.linesep)[start:]
1554 1581 str_toprint = os.linesep.join(str_lines)
1555 1582 num_newlines = len(str_lines)
1556 1583 len_str = len(str_toprint)
1557 1584
1558 1585 # Dumb heuristics to guesstimate number of on-screen lines the string
1559 1586 # takes. Very basic, but good enough for docstrings in reasonable
1560 1587 # terminals. If someone later feels like refining it, it's not hard.
1561 1588 numlines = max(num_newlines,int(len_str/80)+1)
1562 1589
1563 1590 if os.name == "nt":
1564 1591 screen_lines_def = get_console_size(defaulty=25)[1]
1565 1592 else:
1566 1593 screen_lines_def = 25 # default value if we can't auto-determine
1567 1594
1568 1595 # auto-determine screen size
1569 1596 if screen_lines <= 0:
1570 1597 if TERM=='xterm':
1571 1598 use_curses = USE_CURSES
1572 1599 else:
1573 1600 # curses causes problems on many terminals other than xterm.
1574 1601 use_curses = False
1575 1602 if use_curses:
1576 1603 # There is a bug in curses, where *sometimes* it fails to properly
1577 1604 # initialize, and then after the endwin() call is made, the
1578 1605 # terminal is left in an unusable state. Rather than trying to
1579 1606 # check everytime for this (by requesting and comparing termios
1580 1607 # flags each time), we just save the initial terminal state and
1581 1608 # unconditionally reset it every time. It's cheaper than making
1582 1609 # the checks.
1583 1610 term_flags = termios.tcgetattr(sys.stdout)
1584 1611 scr = curses.initscr()
1585 1612 screen_lines_real,screen_cols = scr.getmaxyx()
1586 1613 curses.endwin()
1587 1614 # Restore terminal state in case endwin() didn't.
1588 1615 termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
1589 1616 # Now we have what we needed: the screen size in rows/columns
1590 1617 screen_lines += screen_lines_real
1591 1618 #print '***Screen size:',screen_lines_real,'lines x',\
1592 1619 #screen_cols,'columns.' # dbg
1593 1620 else:
1594 1621 screen_lines += screen_lines_def
1595 1622
1596 1623 #print 'numlines',numlines,'screenlines',screen_lines # dbg
1597 1624 if numlines <= screen_lines :
1598 1625 #print '*** normal print' # dbg
1599 1626 print >>Term.cout, str_toprint
1600 1627 else:
1601 1628 # Try to open pager and default to internal one if that fails.
1602 1629 # All failure modes are tagged as 'retval=1', to match the return
1603 1630 # value of a failed system command. If any intermediate attempt
1604 1631 # sets retval to 1, at the end we resort to our own page_dumb() pager.
1605 1632 pager_cmd = get_pager_cmd(pager_cmd)
1606 1633 pager_cmd += ' ' + get_pager_start(pager_cmd,start)
1607 1634 if os.name == 'nt':
1608 1635 if pager_cmd.startswith('type'):
1609 1636 # The default WinXP 'type' command is failing on complex strings.
1610 1637 retval = 1
1611 1638 else:
1612 1639 tmpname = tempfile.mktemp('.txt')
1613 1640 tmpfile = file(tmpname,'wt')
1614 1641 tmpfile.write(strng)
1615 1642 tmpfile.close()
1616 1643 cmd = "%s < %s" % (pager_cmd,tmpname)
1617 1644 if os.system(cmd):
1618 1645 retval = 1
1619 1646 else:
1620 1647 retval = None
1621 1648 os.remove(tmpname)
1622 1649 else:
1623 1650 try:
1624 1651 retval = None
1625 1652 # if I use popen4, things hang. No idea why.
1626 1653 #pager,shell_out = os.popen4(pager_cmd)
1627 1654 pager = os.popen(pager_cmd,'w')
1628 1655 pager.write(strng)
1629 1656 pager.close()
1630 1657 retval = pager.close() # success returns None
1631 1658 except IOError,msg: # broken pipe when user quits
1632 1659 if msg.args == (32,'Broken pipe'):
1633 1660 retval = None
1634 1661 else:
1635 1662 retval = 1
1636 1663 except OSError:
1637 1664 # Other strange problems, sometimes seen in Win2k/cygwin
1638 1665 retval = 1
1639 1666 if retval is not None:
1640 1667 page_dumb(strng,screen_lines=screen_lines)
1641 1668
1642 1669 #----------------------------------------------------------------------------
1643 1670 def page_file(fname,start = 0, pager_cmd = None):
1644 1671 """Page a file, using an optional pager command and starting line.
1645 1672 """
1646 1673
1647 1674 pager_cmd = get_pager_cmd(pager_cmd)
1648 1675 pager_cmd += ' ' + get_pager_start(pager_cmd,start)
1649 1676
1650 1677 try:
1651 1678 if os.environ['TERM'] in ['emacs','dumb']:
1652 1679 raise EnvironmentError
1653 1680 xsys(pager_cmd + ' ' + fname)
1654 1681 except:
1655 1682 try:
1656 1683 if start > 0:
1657 1684 start -= 1
1658 1685 page(open(fname).read(),start)
1659 1686 except:
1660 1687 print 'Unable to show file',`fname`
1661 1688
1662 1689
1663 1690 #----------------------------------------------------------------------------
1664 1691 def snip_print(str,width = 75,print_full = 0,header = ''):
1665 1692 """Print a string snipping the midsection to fit in width.
1666 1693
1667 1694 print_full: mode control:
1668 1695 - 0: only snip long strings
1669 1696 - 1: send to page() directly.
1670 1697 - 2: snip long strings and ask for full length viewing with page()
1671 1698 Return 1 if snipping was necessary, 0 otherwise."""
1672 1699
1673 1700 if print_full == 1:
1674 1701 page(header+str)
1675 1702 return 0
1676 1703
1677 1704 print header,
1678 1705 if len(str) < width:
1679 1706 print str
1680 1707 snip = 0
1681 1708 else:
1682 1709 whalf = int((width -5)/2)
1683 1710 print str[:whalf] + ' <...> ' + str[-whalf:]
1684 1711 snip = 1
1685 1712 if snip and print_full == 2:
1686 1713 if raw_input(header+' Snipped. View (y/n)? [N]').lower() == 'y':
1687 1714 page(str)
1688 1715 return snip
1689 1716
1690 1717 #****************************************************************************
1691 1718 # lists, dicts and structures
1692 1719
1693 1720 def belong(candidates,checklist):
1694 1721 """Check whether a list of items appear in a given list of options.
1695 1722
1696 1723 Returns a list of 1 and 0, one for each candidate given."""
1697 1724
1698 1725 return [x in checklist for x in candidates]
1699 1726
1700 1727 #----------------------------------------------------------------------------
1701 1728 def uniq_stable(elems):
1702 1729 """uniq_stable(elems) -> list
1703 1730
1704 1731 Return from an iterable, a list of all the unique elements in the input,
1705 1732 but maintaining the order in which they first appear.
1706 1733
1707 1734 A naive solution to this problem which just makes a dictionary with the
1708 1735 elements as keys fails to respect the stability condition, since
1709 1736 dictionaries are unsorted by nature.
1710 1737
1711 1738 Note: All elements in the input must be valid dictionary keys for this
1712 1739 routine to work, as it internally uses a dictionary for efficiency
1713 1740 reasons."""
1714 1741
1715 1742 unique = []
1716 1743 unique_dict = {}
1717 1744 for nn in elems:
1718 1745 if nn not in unique_dict:
1719 1746 unique.append(nn)
1720 1747 unique_dict[nn] = None
1721 1748 return unique
1722 1749
1723 1750 #----------------------------------------------------------------------------
1724 1751 class NLprinter:
1725 1752 """Print an arbitrarily nested list, indicating index numbers.
1726 1753
1727 1754 An instance of this class called nlprint is available and callable as a
1728 1755 function.
1729 1756
1730 1757 nlprint(list,indent=' ',sep=': ') -> prints indenting each level by 'indent'
1731 1758 and using 'sep' to separate the index from the value. """
1732 1759
1733 1760 def __init__(self):
1734 1761 self.depth = 0
1735 1762
1736 1763 def __call__(self,lst,pos='',**kw):
1737 1764 """Prints the nested list numbering levels."""
1738 1765 kw.setdefault('indent',' ')
1739 1766 kw.setdefault('sep',': ')
1740 1767 kw.setdefault('start',0)
1741 1768 kw.setdefault('stop',len(lst))
1742 1769 # we need to remove start and stop from kw so they don't propagate
1743 1770 # into a recursive call for a nested list.
1744 1771 start = kw['start']; del kw['start']
1745 1772 stop = kw['stop']; del kw['stop']
1746 1773 if self.depth == 0 and 'header' in kw.keys():
1747 1774 print kw['header']
1748 1775
1749 1776 for idx in range(start,stop):
1750 1777 elem = lst[idx]
1751 1778 if type(elem)==type([]):
1752 1779 self.depth += 1
1753 1780 self.__call__(elem,itpl('$pos$idx,'),**kw)
1754 1781 self.depth -= 1
1755 1782 else:
1756 1783 printpl(kw['indent']*self.depth+'$pos$idx$kw["sep"]$elem')
1757 1784
1758 1785 nlprint = NLprinter()
1759 1786 #----------------------------------------------------------------------------
1760 1787 def all_belong(candidates,checklist):
1761 1788 """Check whether a list of items ALL appear in a given list of options.
1762 1789
1763 1790 Returns a single 1 or 0 value."""
1764 1791
1765 1792 return 1-(0 in [x in checklist for x in candidates])
1766 1793
1767 1794 #----------------------------------------------------------------------------
1768 1795 def sort_compare(lst1,lst2,inplace = 1):
1769 1796 """Sort and compare two lists.
1770 1797
1771 1798 By default it does it in place, thus modifying the lists. Use inplace = 0
1772 1799 to avoid that (at the cost of temporary copy creation)."""
1773 1800 if not inplace:
1774 1801 lst1 = lst1[:]
1775 1802 lst2 = lst2[:]
1776 1803 lst1.sort(); lst2.sort()
1777 1804 return lst1 == lst2
1778 1805
1779 1806 #----------------------------------------------------------------------------
1780 1807 def mkdict(**kwargs):
1781 1808 """Return a dict from a keyword list.
1782 1809
1783 1810 It's just syntactic sugar for making ditcionary creation more convenient:
1784 1811 # the standard way
1785 1812 >>>data = { 'red' : 1, 'green' : 2, 'blue' : 3 }
1786 1813 # a cleaner way
1787 1814 >>>data = dict(red=1, green=2, blue=3)
1788 1815
1789 1816 If you need more than this, look at the Struct() class."""
1790 1817
1791 1818 return kwargs
1792 1819
1793 1820 #----------------------------------------------------------------------------
1794 1821 def list2dict(lst):
1795 1822 """Takes a list of (key,value) pairs and turns it into a dict."""
1796 1823
1797 1824 dic = {}
1798 1825 for k,v in lst: dic[k] = v
1799 1826 return dic
1800 1827
1801 1828 #----------------------------------------------------------------------------
1802 1829 def list2dict2(lst,default=''):
1803 1830 """Takes a list and turns it into a dict.
1804 1831 Much slower than list2dict, but more versatile. This version can take
1805 1832 lists with sublists of arbitrary length (including sclars)."""
1806 1833
1807 1834 dic = {}
1808 1835 for elem in lst:
1809 1836 if type(elem) in (types.ListType,types.TupleType):
1810 1837 size = len(elem)
1811 1838 if size == 0:
1812 1839 pass
1813 1840 elif size == 1:
1814 1841 dic[elem] = default
1815 1842 else:
1816 1843 k,v = elem[0], elem[1:]
1817 1844 if len(v) == 1: v = v[0]
1818 1845 dic[k] = v
1819 1846 else:
1820 1847 dic[elem] = default
1821 1848 return dic
1822 1849
1823 1850 #----------------------------------------------------------------------------
1824 1851 def flatten(seq):
1825 1852 """Flatten a list of lists (NOT recursive, only works for 2d lists)."""
1826 1853
1827 1854 return [x for subseq in seq for x in subseq]
1828 1855
1829 1856 #----------------------------------------------------------------------------
1830 1857 def get_slice(seq,start=0,stop=None,step=1):
1831 1858 """Get a slice of a sequence with variable step. Specify start,stop,step."""
1832 1859 if stop == None:
1833 1860 stop = len(seq)
1834 1861 item = lambda i: seq[i]
1835 1862 return map(item,xrange(start,stop,step))
1836 1863
1837 1864 #----------------------------------------------------------------------------
1838 1865 def chop(seq,size):
1839 1866 """Chop a sequence into chunks of the given size."""
1840 1867 chunk = lambda i: seq[i:i+size]
1841 1868 return map(chunk,xrange(0,len(seq),size))
1842 1869
1843 1870 #----------------------------------------------------------------------------
1844 1871 # with is a keyword as of python 2.5, so this function is renamed to withobj
1845 1872 # from its old 'with' name.
1846 1873 def with_obj(object, **args):
1847 1874 """Set multiple attributes for an object, similar to Pascal's with.
1848 1875
1849 1876 Example:
1850 1877 with_obj(jim,
1851 1878 born = 1960,
1852 1879 haircolour = 'Brown',
1853 1880 eyecolour = 'Green')
1854 1881
1855 1882 Credit: Greg Ewing, in
1856 1883 http://mail.python.org/pipermail/python-list/2001-May/040703.html.
1857 1884
1858 1885 NOTE: up until IPython 0.7.2, this was called simply 'with', but 'with'
1859 1886 has become a keyword for Python 2.5, so we had to rename it."""
1860 1887
1861 1888 object.__dict__.update(args)
1862 1889
1863 1890 #----------------------------------------------------------------------------
1864 1891 def setattr_list(obj,alist,nspace = None):
1865 1892 """Set a list of attributes for an object taken from a namespace.
1866 1893
1867 1894 setattr_list(obj,alist,nspace) -> sets in obj all the attributes listed in
1868 1895 alist with their values taken from nspace, which must be a dict (something
1869 1896 like locals() will often do) If nspace isn't given, locals() of the
1870 1897 *caller* is used, so in most cases you can omit it.
1871 1898
1872 1899 Note that alist can be given as a string, which will be automatically
1873 1900 split into a list on whitespace. If given as a list, it must be a list of
1874 1901 *strings* (the variable names themselves), not of variables."""
1875 1902
1876 1903 # this grabs the local variables from the *previous* call frame -- that is
1877 1904 # the locals from the function that called setattr_list().
1878 1905 # - snipped from weave.inline()
1879 1906 if nspace is None:
1880 1907 call_frame = sys._getframe().f_back
1881 1908 nspace = call_frame.f_locals
1882 1909
1883 1910 if type(alist) in StringTypes:
1884 1911 alist = alist.split()
1885 1912 for attr in alist:
1886 1913 val = eval(attr,nspace)
1887 1914 setattr(obj,attr,val)
1888 1915
1889 1916 #----------------------------------------------------------------------------
1890 1917 def getattr_list(obj,alist,*args):
1891 1918 """getattr_list(obj,alist[, default]) -> attribute list.
1892 1919
1893 1920 Get a list of named attributes for an object. When a default argument is
1894 1921 given, it is returned when the attribute doesn't exist; without it, an
1895 1922 exception is raised in that case.
1896 1923
1897 1924 Note that alist can be given as a string, which will be automatically
1898 1925 split into a list on whitespace. If given as a list, it must be a list of
1899 1926 *strings* (the variable names themselves), not of variables."""
1900 1927
1901 1928 if type(alist) in StringTypes:
1902 1929 alist = alist.split()
1903 1930 if args:
1904 1931 if len(args)==1:
1905 1932 default = args[0]
1906 1933 return map(lambda attr: getattr(obj,attr,default),alist)
1907 1934 else:
1908 1935 raise ValueError,'getattr_list() takes only one optional argument'
1909 1936 else:
1910 1937 return map(lambda attr: getattr(obj,attr),alist)
1911 1938
1912 1939 #----------------------------------------------------------------------------
1913 1940 def map_method(method,object_list,*argseq,**kw):
1914 1941 """map_method(method,object_list,*args,**kw) -> list
1915 1942
1916 1943 Return a list of the results of applying the methods to the items of the
1917 1944 argument sequence(s). If more than one sequence is given, the method is
1918 1945 called with an argument list consisting of the corresponding item of each
1919 1946 sequence. All sequences must be of the same length.
1920 1947
1921 1948 Keyword arguments are passed verbatim to all objects called.
1922 1949
1923 1950 This is Python code, so it's not nearly as fast as the builtin map()."""
1924 1951
1925 1952 out_list = []
1926 1953 idx = 0
1927 1954 for object in object_list:
1928 1955 try:
1929 1956 handler = getattr(object, method)
1930 1957 except AttributeError:
1931 1958 out_list.append(None)
1932 1959 else:
1933 1960 if argseq:
1934 1961 args = map(lambda lst:lst[idx],argseq)
1935 1962 #print 'ob',object,'hand',handler,'ar',args # dbg
1936 1963 out_list.append(handler(args,**kw))
1937 1964 else:
1938 1965 out_list.append(handler(**kw))
1939 1966 idx += 1
1940 1967 return out_list
1941 1968
1942 1969 #----------------------------------------------------------------------------
1943 1970 def get_class_members(cls):
1944 1971 ret = dir(cls)
1945 1972 if hasattr(cls,'__bases__'):
1946 1973 for base in cls.__bases__:
1947 1974 ret.extend(get_class_members(base))
1948 1975 return ret
1949 1976
1950 1977 #----------------------------------------------------------------------------
1951 1978 def dir2(obj):
1952 1979 """dir2(obj) -> list of strings
1953 1980
1954 1981 Extended version of the Python builtin dir(), which does a few extra
1955 1982 checks, and supports common objects with unusual internals that confuse
1956 1983 dir(), such as Traits and PyCrust.
1957 1984
1958 1985 This version is guaranteed to return only a list of true strings, whereas
1959 1986 dir() returns anything that objects inject into themselves, even if they
1960 1987 are later not really valid for attribute access (many extension libraries
1961 1988 have such bugs).
1962 1989 """
1963 1990
1964 1991 # Start building the attribute list via dir(), and then complete it
1965 1992 # with a few extra special-purpose calls.
1966 1993 words = dir(obj)
1967 1994
1968 1995 if hasattr(obj,'__class__'):
1969 1996 words.append('__class__')
1970 1997 words.extend(get_class_members(obj.__class__))
1971 1998 #if '__base__' in words: 1/0
1972 1999
1973 2000 # Some libraries (such as traits) may introduce duplicates, we want to
1974 2001 # track and clean this up if it happens
1975 2002 may_have_dupes = False
1976 2003
1977 2004 # this is the 'dir' function for objects with Enthought's traits
1978 2005 if hasattr(obj, 'trait_names'):
1979 2006 try:
1980 2007 words.extend(obj.trait_names())
1981 2008 may_have_dupes = True
1982 2009 except TypeError:
1983 2010 # This will happen if `obj` is a class and not an instance.
1984 2011 pass
1985 2012
1986 2013 # Support for PyCrust-style _getAttributeNames magic method.
1987 2014 if hasattr(obj, '_getAttributeNames'):
1988 2015 try:
1989 2016 words.extend(obj._getAttributeNames())
1990 2017 may_have_dupes = True
1991 2018 except TypeError:
1992 2019 # `obj` is a class and not an instance. Ignore
1993 2020 # this error.
1994 2021 pass
1995 2022
1996 2023 if may_have_dupes:
1997 2024 # eliminate possible duplicates, as some traits may also
1998 2025 # appear as normal attributes in the dir() call.
1999 2026 words = list(set(words))
2000 2027 words.sort()
2001 2028
2002 2029 # filter out non-string attributes which may be stuffed by dir() calls
2003 2030 # and poor coding in third-party modules
2004 2031 return [w for w in words if isinstance(w, basestring)]
2005 2032
2006 2033 #----------------------------------------------------------------------------
2007 2034 def import_fail_info(mod_name,fns=None):
2008 2035 """Inform load failure for a module."""
2009 2036
2010 2037 if fns == None:
2011 2038 warn("Loading of %s failed.\n" % (mod_name,))
2012 2039 else:
2013 2040 warn("Loading of %s from %s failed.\n" % (fns,mod_name))
2014 2041
2015 2042 #----------------------------------------------------------------------------
2016 2043 # Proposed popitem() extension, written as a method
2017 2044
2018 2045
2019 2046 class NotGiven: pass
2020 2047
2021 2048 def popkey(dct,key,default=NotGiven):
2022 2049 """Return dct[key] and delete dct[key].
2023 2050
2024 2051 If default is given, return it if dct[key] doesn't exist, otherwise raise
2025 2052 KeyError. """
2026 2053
2027 2054 try:
2028 2055 val = dct[key]
2029 2056 except KeyError:
2030 2057 if default is NotGiven:
2031 2058 raise
2032 2059 else:
2033 2060 return default
2034 2061 else:
2035 2062 del dct[key]
2036 2063 return val
2037 2064
2038 2065 def wrap_deprecated(func, suggest = '<nothing>'):
2039 2066 def newFunc(*args, **kwargs):
2040 2067 warnings.warn("Call to deprecated function %s, use %s instead" %
2041 2068 ( func.__name__, suggest),
2042 2069 category=DeprecationWarning,
2043 2070 stacklevel = 2)
2044 2071 return func(*args, **kwargs)
2045 2072 return newFunc
2046 2073
2047 2074
2048 2075 def _num_cpus_unix():
2049 2076 """Return the number of active CPUs on a Unix system."""
2050 2077 return os.sysconf("SC_NPROCESSORS_ONLN")
2051 2078
2052 2079
2053 2080 def _num_cpus_darwin():
2054 2081 """Return the number of active CPUs on a Darwin system."""
2055 2082 p = subprocess.Popen(['sysctl','-n','hw.ncpu'],stdout=subprocess.PIPE)
2056 2083 return p.stdout.read()
2057 2084
2058 2085
2059 2086 def _num_cpus_windows():
2060 2087 """Return the number of active CPUs on a Windows system."""
2061 2088 return os.environ.get("NUMBER_OF_PROCESSORS")
2062 2089
2063 2090
2064 2091 def num_cpus():
2065 2092 """Return the effective number of CPUs in the system as an integer.
2066 2093
2067 2094 This cross-platform function makes an attempt at finding the total number of
2068 2095 available CPUs in the system, as returned by various underlying system and
2069 2096 python calls.
2070 2097
2071 2098 If it can't find a sensible answer, it returns 1 (though an error *may* make
2072 2099 it return a large positive number that's actually incorrect).
2073 2100 """
2074 2101
2075 2102 # Many thanks to the Parallel Python project (http://www.parallelpython.com)
2076 2103 # for the names of the keys we needed to look up for this function. This
2077 2104 # code was inspired by their equivalent function.
2078 2105
2079 2106 ncpufuncs = {'Linux':_num_cpus_unix,
2080 2107 'Darwin':_num_cpus_darwin,
2081 2108 'Windows':_num_cpus_windows,
2082 2109 # On Vista, python < 2.5.2 has a bug and returns 'Microsoft'
2083 2110 # See http://bugs.python.org/issue1082 for details.
2084 2111 'Microsoft':_num_cpus_windows,
2085 2112 }
2086 2113
2087 2114 ncpufunc = ncpufuncs.get(platform.system(),
2088 2115 # default to unix version (Solaris, AIX, etc)
2089 2116 _num_cpus_unix)
2090 2117
2091 2118 try:
2092 2119 ncpus = max(1,int(ncpufunc()))
2093 2120 except:
2094 2121 ncpus = 1
2095 2122 return ncpus
2096 2123
2097 2124 #*************************** end of file <genutils.py> **********************
@@ -1,41 +1,41 b''
1 1 # encoding: utf-8
2 2
3 3 """Asynchronous clients for the IPython controller.
4 4
5 5 This module has clients for using the various interfaces of the controller
6 6 in a fully asynchronous manner. This means that you will need to run the
7 7 Twisted reactor yourself and that all methods of the client classes return
8 8 deferreds to the result.
9 9
10 10 The main methods are are `get_*_client` and `get_client`.
11 11 """
12 12
13 13 __docformat__ = "restructuredtext en"
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Copyright (C) 2008 The IPython Development Team
17 17 #
18 18 # Distributed under the terms of the BSD License. The full license is in
19 19 # the file COPYING, distributed as part of this software.
20 20 #-------------------------------------------------------------------------------
21 21
22 22 #-------------------------------------------------------------------------------
23 23 # Imports
24 24 #-------------------------------------------------------------------------------
25 25
26 26 from IPython.kernel import codeutil
27 27 from IPython.kernel.clientconnector import ClientConnector
28 28
29 29 # Other things that the user will need
30 from IPython.kernel.task import Task
30 from IPython.kernel.task import MapTask, StringTask
31 31 from IPython.kernel.error import CompositeError
32 32
33 33 #-------------------------------------------------------------------------------
34 34 # Code
35 35 #-------------------------------------------------------------------------------
36 36
37 37 _client_tub = ClientConnector()
38 38 get_multiengine_client = _client_tub.get_multiengine_client
39 39 get_task_client = _client_tub.get_task_client
40 40 get_client = _client_tub.get_client
41 41
@@ -1,96 +1,96 b''
1 1 # encoding: utf-8
2 2
3 3 """This module contains blocking clients for the controller interfaces.
4 4
5 5 Unlike the clients in `asyncclient.py`, the clients in this module are fully
6 6 blocking. This means that methods on the clients return the actual results
7 7 rather than a deferred to the result. Also, we manage the Twisted reactor
8 8 for you. This is done by running the reactor in a thread.
9 9
10 10 The main classes in this module are:
11 11
12 12 * MultiEngineClient
13 13 * TaskClient
14 14 * Task
15 15 * CompositeError
16 16 """
17 17
18 18 __docformat__ = "restructuredtext en"
19 19
20 20 #-------------------------------------------------------------------------------
21 21 # Copyright (C) 2008 The IPython Development Team
22 22 #
23 23 # Distributed under the terms of the BSD License. The full license is in
24 24 # the file COPYING, distributed as part of this software.
25 25 #-------------------------------------------------------------------------------
26 26
27 27 #-------------------------------------------------------------------------------
28 28 # Imports
29 29 #-------------------------------------------------------------------------------
30 30
31 31 import sys
32 32
33 33 # from IPython.tools import growl
34 34 # growl.start("IPython1 Client")
35 35
36 36
37 37 from twisted.internet import reactor
38 38 from IPython.kernel.clientconnector import ClientConnector
39 39 from IPython.kernel.twistedutil import ReactorInThread
40 40 from IPython.kernel.twistedutil import blockingCallFromThread
41 41
42 42 # These enable various things
43 43 from IPython.kernel import codeutil
44 44 import IPython.kernel.magic
45 45
46 46 # Other things that the user will need
47 from IPython.kernel.task import Task
47 from IPython.kernel.task import MapTask, StringTask
48 48 from IPython.kernel.error import CompositeError
49 49
50 50 #-------------------------------------------------------------------------------
51 51 # Code
52 52 #-------------------------------------------------------------------------------
53 53
54 54 _client_tub = ClientConnector()
55 55
56 56
57 57 def get_multiengine_client(furl_or_file=''):
58 58 """Get the blocking MultiEngine client.
59 59
60 60 :Parameters:
61 61 furl_or_file : str
62 62 A furl or a filename containing a furl. If empty, the
63 63 default furl_file will be used
64 64
65 65 :Returns:
66 66 The connected MultiEngineClient instance
67 67 """
68 68 client = blockingCallFromThread(_client_tub.get_multiengine_client,
69 69 furl_or_file)
70 70 return client.adapt_to_blocking_client()
71 71
72 72 def get_task_client(furl_or_file=''):
73 73 """Get the blocking Task client.
74 74
75 75 :Parameters:
76 76 furl_or_file : str
77 77 A furl or a filename containing a furl. If empty, the
78 78 default furl_file will be used
79 79
80 80 :Returns:
81 81 The connected TaskClient instance
82 82 """
83 83 client = blockingCallFromThread(_client_tub.get_task_client,
84 84 furl_or_file)
85 85 return client.adapt_to_blocking_client()
86 86
87 87
88 88 MultiEngineClient = get_multiengine_client
89 89 TaskClient = get_task_client
90 90
91 91
92 92
93 93 # Now we start the reactor in a thread
94 94 rit = ReactorInThread()
95 95 rit.setDaemon(True)
96 96 rit.start() No newline at end of file
@@ -1,178 +1,180 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.test.test_contexts -*-
3 3 """Context managers for IPython.
4 4
5 5 Python 2.5 introduced the `with` statement, which is based on the context
6 6 manager protocol. This module offers a few context managers for common cases,
7 7 which can also be useful as templates for writing new, application-specific
8 8 managers.
9 9 """
10 10
11 11 from __future__ import with_statement
12 12
13 13 __docformat__ = "restructuredtext en"
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Copyright (C) 2008 The IPython Development Team
17 17 #
18 18 # Distributed under the terms of the BSD License. The full license is in
19 19 # the file COPYING, distributed as part of this software.
20 20 #-------------------------------------------------------------------------------
21 21
22 22 #-------------------------------------------------------------------------------
23 23 # Imports
24 24 #-------------------------------------------------------------------------------
25 25
26 26 import linecache
27 27 import sys
28 28
29 29 from twisted.internet.error import ConnectionRefusedError
30 30
31 31 from IPython.ultraTB import _fixed_getinnerframes, findsource
32 32 from IPython import ipapi
33 33
34 34 from IPython.kernel import error
35 35
36 36 #---------------------------------------------------------------------------
37 37 # Utility functions needed by all context managers.
38 38 #---------------------------------------------------------------------------
39 39
40 40 def remote():
41 41 """Raises a special exception meant to be caught by context managers.
42 42 """
43 43 m = 'Special exception to stop local execution of parallel code.'
44 44 raise error.StopLocalExecution(m)
45 45
46 46
47 47 def strip_whitespace(source,require_remote=True):
48 48 """strip leading whitespace from input source.
49 49
50 50 :Parameters:
51 51
52 52 """
53 53 remote_mark = 'remote()'
54 54 # Expand tabs to avoid any confusion.
55 55 wsource = [l.expandtabs(4) for l in source]
56 56 # Detect the indentation level
57 57 done = False
58 58 for line in wsource:
59 59 if line.isspace():
60 60 continue
61 61 for col,char in enumerate(line):
62 62 if char != ' ':
63 63 done = True
64 64 break
65 65 if done:
66 66 break
67 67 # Now we know how much leading space there is in the code. Next, we
68 68 # extract up to the first line that has less indentation.
69 69 # WARNINGS: we skip comments that may be misindented, but we do NOT yet
70 70 # detect triple quoted strings that may have flush left text.
71 71 for lno,line in enumerate(wsource):
72 72 lead = line[:col]
73 73 if lead.isspace():
74 74 continue
75 75 else:
76 76 if not lead.lstrip().startswith('#'):
77 77 break
78 78 # The real 'with' source is up to lno
79 79 src_lines = [l[col:] for l in wsource[:lno+1]]
80 80
81 81 # Finally, check that the source's first non-comment line begins with the
82 82 # special call 'remote()'
83 83 if require_remote:
84 84 for nline,line in enumerate(src_lines):
85 85 if line.isspace() or line.startswith('#'):
86 86 continue
87 87 if line.startswith(remote_mark):
88 88 break
89 89 else:
90 90 raise ValueError('%s call missing at the start of code' %
91 91 remote_mark)
92 92 out_lines = src_lines[nline+1:]
93 93 else:
94 94 # If the user specified that the remote() call wasn't mandatory
95 95 out_lines = src_lines
96 96
97 97 # src = ''.join(out_lines) # dbg
98 98 #print 'SRC:\n<<<<<<<>>>>>>>\n%s<<<<<>>>>>>' % src # dbg
99 99 return ''.join(out_lines)
100 100
101 101 class RemoteContextBase(object):
102 102 def __init__(self):
103 103 self.ip = ipapi.get()
104 104
105 105 def _findsource_file(self,f):
106 106 linecache.checkcache()
107 107 s = findsource(f.f_code)
108 108 lnum = f.f_lineno
109 109 wsource = s[0][f.f_lineno:]
110 110 return strip_whitespace(wsource)
111 111
112 112 def _findsource_ipython(self,f):
113 113 from IPython import ipapi
114 114 self.ip = ipapi.get()
115 115 buf = self.ip.IP.input_hist_raw[-1].splitlines()[1:]
116 116 wsource = [l+'\n' for l in buf ]
117 117
118 118 return strip_whitespace(wsource)
119 119
120 120 def findsource(self,frame):
121 121 local_ns = frame.f_locals
122 122 global_ns = frame.f_globals
123 123 if frame.f_code.co_filename == '<ipython console>':
124 124 src = self._findsource_ipython(frame)
125 125 else:
126 126 src = self._findsource_file(frame)
127 127 return src
128 128
129 129 def __enter__(self):
130 130 raise NotImplementedError
131 131
132 132 def __exit__ (self, etype, value, tb):
133 133 if issubclass(etype,error.StopLocalExecution):
134 134 return True
135 135
136 136 class RemoteMultiEngine(RemoteContextBase):
137 137 def __init__(self,mec):
138 138 self.mec = mec
139 139 RemoteContextBase.__init__(self)
140 140
141 141 def __enter__(self):
142 142 src = self.findsource(sys._getframe(1))
143 143 return self.mec.execute(src)
144 144
145 145
146 146 # XXX - Temporary hackish testing, we'll move this into proper tests right
147 # away
148
149 if __name__ == '__main__':
150
151 # XXX - for now, we need a running cluster to be started separately. The
152 # daemon work is almost finished, and will make much of this unnecessary.
153 from IPython.kernel import client
154 mec = client.MultiEngineClient(('127.0.0.1',10105))
155
156 try:
157 mec.get_ids()
158 except ConnectionRefusedError:
159 import os, time
160 os.system('ipcluster -n 2 &')
161 time.sleep(2)
162 mec = client.MultiEngineClient(('127.0.0.1',10105))
163
164 mec.block = False
165
166 import itertools
167 c = itertools.count()
168
169 parallel = RemoteMultiEngine(mec)
170
171 with parallel as pr:
172 # A comment
173 remote() # this means the code below only runs remotely
174 print 'Hello remote world'
175 x = 3.14
176 # Comments are OK
177 # Even misindented.
178 y = x+1
147 # away. This has been commented out as it doesn't run under Python 2.4
148 # because of the usage of the with statement below. We need to protect
149 # such things with a try:except.
150
151 # if __name__ == '__main__':
152 #
153 # # XXX - for now, we need a running cluster to be started separately. The
154 # # daemon work is almost finished, and will make much of this unnecessary.
155 # from IPython.kernel import client
156 # mec = client.MultiEngineClient(('127.0.0.1',10105))
157 #
158 # try:
159 # mec.get_ids()
160 # except ConnectionRefusedError:
161 # import os, time
162 # os.system('ipcluster -n 2 &')
163 # time.sleep(2)
164 # mec = client.MultiEngineClient(('127.0.0.1',10105))
165 #
166 # mec.block = False
167 #
168 # import itertools
169 # c = itertools.count()
170 #
171 # parallel = RemoteMultiEngine(mec)
172 #
173 # with parallel as pr:
174 # # A comment
175 # remote() # this means the code below only runs remotely
176 # print 'Hello remote world'
177 # x = 3.14
178 # # Comments are OK
179 # # Even misindented.
180 # y = x+1
@@ -1,171 +1,171 b''
1 1 # encoding: utf-8
2 2
3 3 """Magic command interface for interactive parallel work."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 import new
19 19
20 20 from IPython.iplib import InteractiveShell
21 21 from IPython.Shell import MTInteractiveShell
22 22
23 23 from twisted.internet.defer import Deferred
24 24
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Definitions of magic functions for use with IPython
28 28 #-------------------------------------------------------------------------------
29 29
30 30 NO_ACTIVE_CONTROLLER = """
31 31 Error: No Controller is activated
32 32 Use activate() on a RemoteController object to activate it for magics.
33 33 """
34 34
35 35 def magic_result(self,parameter_s=''):
36 36 """Print the result of command i on all engines of the active controller.
37 37
38 38 To activate a controller in IPython, first create it and then call
39 39 the activate() method.
40 40
41 41 Then you can do the following:
42 42
43 43 >>> result # Print the latest result
44 44 Printing result...
45 45 [127.0.0.1:0] In [1]: b = 10
46 46 [127.0.0.1:1] In [1]: b = 10
47 47
48 48 >>> result 0 # Print result 0
49 49 In [14]: result 0
50 50 Printing result...
51 51 [127.0.0.1:0] In [0]: a = 5
52 52 [127.0.0.1:1] In [0]: a = 5
53 53 """
54 54 try:
55 55 activeController = __IPYTHON__.activeController
56 56 except AttributeError:
57 57 print NO_ACTIVE_CONTROLLER
58 58 else:
59 59 try:
60 60 index = int(parameter_s)
61 61 except:
62 62 index = None
63 63 result = activeController.get_result(index)
64 64 return result
65 65
66 66 def magic_px(self,parameter_s=''):
67 67 """Executes the given python command on the active IPython Controller.
68 68
69 69 To activate a Controller in IPython, first create it and then call
70 70 the activate() method.
71 71
72 72 Then you can do the following:
73 73
74 74 >>> %px a = 5 # Runs a = 5 on all nodes
75 75 """
76 76
77 77 try:
78 78 activeController = __IPYTHON__.activeController
79 79 except AttributeError:
80 80 print NO_ACTIVE_CONTROLLER
81 81 else:
82 print "Executing command on Controller"
82 print "Parallel execution on engines: %s" % activeController.targets
83 83 result = activeController.execute(parameter_s)
84 84 return result
85 85
86 86 def pxrunsource(self, source, filename="<input>", symbol="single"):
87 87
88 88 try:
89 89 code = self.compile(source, filename, symbol)
90 90 except (OverflowError, SyntaxError, ValueError):
91 91 # Case 1
92 92 self.showsyntaxerror(filename)
93 93 return None
94 94
95 95 if code is None:
96 96 # Case 2
97 97 return True
98 98
99 99 # Case 3
100 100 # Because autopx is enabled, we now call executeAll or disable autopx if
101 101 # %autopx or autopx has been called
102 102 if '_ip.magic("%autopx' in source or '_ip.magic("autopx' in source:
103 103 _disable_autopx(self)
104 104 return False
105 105 else:
106 106 try:
107 107 result = self.activeController.execute(source)
108 108 except:
109 109 self.showtraceback()
110 110 else:
111 111 print result.__repr__()
112 112 return False
113 113
114 114 def magic_autopx(self, parameter_s=''):
115 115 """Toggles auto parallel mode for the active IPython Controller.
116 116
117 117 To activate a Controller in IPython, first create it and then call
118 118 the activate() method.
119 119
120 120 Then you can do the following:
121 121
122 122 >>> %autopx # Now all commands are executed in parallel
123 123 Auto Parallel Enabled
124 124 Type %autopx to disable
125 125 ...
126 126 >>> %autopx # Now all commands are locally executed
127 127 Auto Parallel Disabled
128 128 """
129 129
130 130 if hasattr(self, 'autopx'):
131 131 if self.autopx == True:
132 132 _disable_autopx(self)
133 133 else:
134 134 _enable_autopx(self)
135 135 else:
136 136 _enable_autopx(self)
137 137
138 138 def _enable_autopx(self):
139 139 """Enable %autopx mode by saving the original runsource and installing
140 140 pxrunsource.
141 141 """
142 142 try:
143 143 activeController = __IPYTHON__.activeController
144 144 except AttributeError:
145 145 print "No active RemoteController found, use RemoteController.activate()."
146 146 else:
147 147 self._original_runsource = self.runsource
148 148 self.runsource = new.instancemethod(pxrunsource, self, self.__class__)
149 149 self.autopx = True
150 150 print "Auto Parallel Enabled\nType %autopx to disable"
151 151
152 152 def _disable_autopx(self):
153 153 """Disable %autopx by restoring the original runsource."""
154 154 if hasattr(self, 'autopx'):
155 155 if self.autopx == True:
156 156 self.runsource = self._original_runsource
157 157 self.autopx = False
158 158 print "Auto Parallel Disabled"
159 159
160 160 # Add the new magic function to the class dict:
161 161
162 162 InteractiveShell.magic_result = magic_result
163 163 InteractiveShell.magic_px = magic_px
164 164 InteractiveShell.magic_autopx = magic_autopx
165 165
166 166 # And remove the global name to keep global namespace clean. Don't worry, the
167 167 # copy bound to IPython stays, we're just removing the global name.
168 168 del magic_result
169 169 del magic_px
170 170 del magic_autopx
171 171
@@ -1,121 +1,121 b''
1 1 # encoding: utf-8
2 2
3 3 """Classes used in scattering and gathering sequences.
4 4
5 5 Scattering consists of partitioning a sequence and sending the various
6 6 pieces to individual nodes in a cluster.
7 7 """
8 8
9 9 __docformat__ = "restructuredtext en"
10 10
11 11 #-------------------------------------------------------------------------------
12 12 # Copyright (C) 2008 The IPython Development Team
13 13 #
14 14 # Distributed under the terms of the BSD License. The full license is in
15 15 # the file COPYING, distributed as part of this software.
16 16 #-------------------------------------------------------------------------------
17 17
18 18 #-------------------------------------------------------------------------------
19 19 # Imports
20 20 #-------------------------------------------------------------------------------
21 21
22 22 import types
23 23
24 24 from IPython.genutils import flatten as genutil_flatten
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Figure out which array packages are present and their array types
28 28 #-------------------------------------------------------------------------------
29 29
30 30 arrayModules = []
31 31 try:
32 32 import Numeric
33 33 except ImportError:
34 34 pass
35 35 else:
36 36 arrayModules.append({'module':Numeric, 'type':Numeric.arraytype})
37 37 try:
38 38 import numpy
39 39 except ImportError:
40 40 pass
41 41 else:
42 42 arrayModules.append({'module':numpy, 'type':numpy.ndarray})
43 43 try:
44 44 import numarray
45 45 except ImportError:
46 46 pass
47 47 else:
48 48 arrayModules.append({'module':numarray,
49 49 'type':numarray.numarraycore.NumArray})
50 50
51 51 class Map:
52 52 """A class for partitioning a sequence using a map."""
53 53
54 54 def getPartition(self, seq, p, q):
55 55 """Returns the pth partition of q partitions of seq."""
56 56
57 57 # Test for error conditions here
58 58 if p<0 or p>=q:
59 59 print "No partition exists."
60 60 return
61 61
62 62 remainder = len(seq)%q
63 63 basesize = len(seq)/q
64 64 hi = []
65 65 lo = []
66 66 for n in range(q):
67 67 if n < remainder:
68 68 lo.append(n * (basesize + 1))
69 69 hi.append(lo[-1] + basesize + 1)
70 70 else:
71 71 lo.append(n*basesize + remainder)
72 72 hi.append(lo[-1] + basesize)
73 73
74 74
75 75 result = seq[lo[p]:hi[p]]
76 76 return result
77 77
78 78 def joinPartitions(self, listOfPartitions):
79 79 return self.concatenate(listOfPartitions)
80 80
81 81 def concatenate(self, listOfPartitions):
82 82 testObject = listOfPartitions[0]
83 83 # First see if we have a known array type
84 84 for m in arrayModules:
85 85 #print m
86 86 if isinstance(testObject, m['type']):
87 87 return m['module'].concatenate(listOfPartitions)
88 88 # Next try for Python sequence types
89 89 if isinstance(testObject, (types.ListType, types.TupleType)):
90 90 return genutil_flatten(listOfPartitions)
91 91 # If we have scalars, just return listOfPartitions
92 92 return listOfPartitions
93 93
94 94 class RoundRobinMap(Map):
95 95 """Partitions a sequence in a roun robin fashion.
96 96
97 97 This currently does not work!
98 98 """
99 99
100 100 def getPartition(self, seq, p, q):
101 101 return seq[p:len(seq):q]
102 102 #result = []
103 103 #for i in range(p,len(seq),q):
104 104 # result.append(seq[i])
105 105 #return result
106 106
107 107 def joinPartitions(self, listOfPartitions):
108 108 #lengths = [len(x) for x in listOfPartitions]
109 109 #maxPartitionLength = len(listOfPartitions[0])
110 110 #numberOfPartitions = len(listOfPartitions)
111 111 #concat = self.concatenate(listOfPartitions)
112 112 #totalLength = len(concat)
113 113 #result = []
114 114 #for i in range(maxPartitionLength):
115 115 # result.append(concat[i:totalLength:maxPartitionLength])
116 116 return self.concatenate(listOfPartitions)
117 117
118 styles = {'basic':Map}
118 dists = {'b':Map}
119 119
120 120
121 121
@@ -1,780 +1,753 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.test.test_multiengine -*-
3 3
4 4 """Adapt the IPython ControllerServer to IMultiEngine.
5 5
6 6 This module provides classes that adapt a ControllerService to the
7 7 IMultiEngine interface. This interface is a basic interactive interface
8 8 for working with a set of engines where it is desired to have explicit
9 9 access to each registered engine.
10 10
11 11 The classes here are exposed to the network in files like:
12 12
13 13 * multienginevanilla.py
14 14 * multienginepb.py
15 15 """
16 16
17 17 __docformat__ = "restructuredtext en"
18 18
19 19 #-------------------------------------------------------------------------------
20 20 # Copyright (C) 2008 The IPython Development Team
21 21 #
22 22 # Distributed under the terms of the BSD License. The full license is in
23 23 # the file COPYING, distributed as part of this software.
24 24 #-------------------------------------------------------------------------------
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Imports
28 28 #-------------------------------------------------------------------------------
29 29
30 30 from new import instancemethod
31 31 from types import FunctionType
32 32
33 33 from twisted.application import service
34 34 from twisted.internet import defer, reactor
35 35 from twisted.python import log, components, failure
36 36 from zope.interface import Interface, implements, Attribute
37 37
38 38 from IPython.tools import growl
39 39 from IPython.kernel.util import printer
40 40 from IPython.kernel.twistedutil import gatherBoth
41 41 from IPython.kernel import map as Map
42 42 from IPython.kernel import error
43 43 from IPython.kernel.pendingdeferred import PendingDeferredManager, two_phase
44 44 from IPython.kernel.controllerservice import \
45 45 ControllerAdapterBase, \
46 46 ControllerService, \
47 47 IControllerBase
48 48
49 49
50 50 #-------------------------------------------------------------------------------
51 51 # Interfaces for the MultiEngine representation of a controller
52 52 #-------------------------------------------------------------------------------
53 53
54 54 class IEngineMultiplexer(Interface):
55 55 """Interface to multiple engines implementing IEngineCore/Serialized/Queued.
56 56
57 57 This class simply acts as a multiplexer of methods that are in the
58 58 various IEngines* interfaces. Thus the methods here are jut like those
59 59 in the IEngine* interfaces, but with an extra first argument, targets.
60 60 The targets argument can have the following forms:
61 61
62 62 * targets = 10 # Engines are indexed by ints
63 63 * targets = [0,1,2,3] # A list of ints
64 64 * targets = 'all' # A string to indicate all targets
65 65
66 66 If targets is bad in any way, an InvalidEngineID will be raised. This
67 67 includes engines not being registered.
68 68
69 69 All IEngineMultiplexer multiplexer methods must return a Deferred to a list
70 70 with length equal to the number of targets. The elements of the list will
71 71 correspond to the return of the corresponding IEngine method.
72 72
73 73 Failures are aggressive, meaning that if an action fails for any target,
74 74 the overall action will fail immediately with that Failure.
75 75
76 76 :Parameters:
77 77 targets : int, list of ints, or 'all'
78 78 Engine ids the action will apply to.
79 79
80 80 :Returns: Deferred to a list of results for each engine.
81 81
82 82 :Exception:
83 83 InvalidEngineID
84 84 If the targets argument is bad or engines aren't registered.
85 85 NoEnginesRegistered
86 86 If there are no engines registered and targets='all'
87 87 """
88 88
89 89 #---------------------------------------------------------------------------
90 90 # Mutiplexed methods
91 91 #---------------------------------------------------------------------------
92 92
93 93 def execute(lines, targets='all'):
94 94 """Execute lines of Python code on targets.
95 95
96 96 See the class docstring for information about targets and possible
97 97 exceptions this method can raise.
98 98
99 99 :Parameters:
100 100 lines : str
101 101 String of python code to be executed on targets.
102 102 """
103 103
104 104 def push(namespace, targets='all'):
105 105 """Push dict namespace into the user's namespace on targets.
106 106
107 107 See the class docstring for information about targets and possible
108 108 exceptions this method can raise.
109 109
110 110 :Parameters:
111 111 namspace : dict
112 112 Dict of key value pairs to be put into the users namspace.
113 113 """
114 114
115 115 def pull(keys, targets='all'):
116 116 """Pull values out of the user's namespace on targets by keys.
117 117
118 118 See the class docstring for information about targets and possible
119 119 exceptions this method can raise.
120 120
121 121 :Parameters:
122 122 keys : tuple of strings
123 123 Sequence of keys to be pulled from user's namespace.
124 124 """
125 125
126 126 def push_function(namespace, targets='all'):
127 127 """"""
128 128
129 129 def pull_function(keys, targets='all'):
130 130 """"""
131 131
132 132 def get_result(i=None, targets='all'):
133 133 """Get the result for command i from targets.
134 134
135 135 See the class docstring for information about targets and possible
136 136 exceptions this method can raise.
137 137
138 138 :Parameters:
139 139 i : int or None
140 140 Command index or None to indicate most recent command.
141 141 """
142 142
143 143 def reset(targets='all'):
144 144 """Reset targets.
145 145
146 146 This clears the users namespace of the Engines, but won't cause
147 147 modules to be reloaded.
148 148 """
149 149
150 150 def keys(targets='all'):
151 151 """Get variable names defined in user's namespace on targets."""
152 152
153 153 def kill(controller=False, targets='all'):
154 154 """Kill the targets Engines and possibly the controller.
155 155
156 156 :Parameters:
157 157 controller : boolean
158 158 Should the controller be killed as well. If so all the
159 159 engines will be killed first no matter what targets is.
160 160 """
161 161
162 162 def push_serialized(namespace, targets='all'):
163 163 """Push a namespace of Serialized objects to targets.
164 164
165 165 :Parameters:
166 166 namespace : dict
167 167 A dict whose keys are the variable names and whose values
168 168 are serialized version of the objects.
169 169 """
170 170
171 171 def pull_serialized(keys, targets='all'):
172 172 """Pull Serialized objects by keys from targets.
173 173
174 174 :Parameters:
175 175 keys : tuple of strings
176 176 Sequence of variable names to pull as serialized objects.
177 177 """
178 178
179 179 def clear_queue(targets='all'):
180 180 """Clear the queue of pending command for targets."""
181 181
182 182 def queue_status(targets='all'):
183 183 """Get the status of the queue on the targets."""
184 184
185 185 def set_properties(properties, targets='all'):
186 186 """set properties by key and value"""
187 187
188 188 def get_properties(keys=None, targets='all'):
189 189 """get a list of properties by `keys`, if no keys specified, get all"""
190 190
191 191 def del_properties(keys, targets='all'):
192 192 """delete properties by `keys`"""
193 193
194 194 def has_properties(keys, targets='all'):
195 195 """get a list of bool values for whether `properties` has `keys`"""
196 196
197 197 def clear_properties(targets='all'):
198 198 """clear the properties dict"""
199 199
200 200
201 201 class IMultiEngine(IEngineMultiplexer):
202 202 """A controller that exposes an explicit interface to all of its engines.
203 203
204 204 This is the primary inteface for interactive usage.
205 205 """
206 206
207 207 def get_ids():
208 208 """Return list of currently registered ids.
209 209
210 210 :Returns: A Deferred to a list of registered engine ids.
211 211 """
212 212
213 213
214 214
215 215 #-------------------------------------------------------------------------------
216 216 # Implementation of the core MultiEngine classes
217 217 #-------------------------------------------------------------------------------
218 218
219 219 class MultiEngine(ControllerAdapterBase):
220 220 """The representation of a ControllerService as a IMultiEngine.
221 221
222 222 Although it is not implemented currently, this class would be where a
223 223 client/notification API is implemented. It could inherit from something
224 224 like results.NotifierParent and then use the notify method to send
225 225 notifications.
226 226 """
227 227
228 228 implements(IMultiEngine)
229 229
230 230 def __init(self, controller):
231 231 ControllerAdapterBase.__init__(self, controller)
232 232
233 233 #---------------------------------------------------------------------------
234 234 # Helper methods
235 235 #---------------------------------------------------------------------------
236 236
237 237 def engineList(self, targets):
238 238 """Parse the targets argument into a list of valid engine objects.
239 239
240 240 :Parameters:
241 241 targets : int, list of ints or 'all'
242 242 The targets argument to be parsed.
243 243
244 244 :Returns: List of engine objects.
245 245
246 246 :Exception:
247 247 InvalidEngineID
248 248 If targets is not valid or if an engine is not registered.
249 249 """
250 250 if isinstance(targets, int):
251 251 if targets not in self.engines.keys():
252 252 log.msg("Engine with id %i is not registered" % targets)
253 253 raise error.InvalidEngineID("Engine with id %i is not registered" % targets)
254 254 else:
255 255 return [self.engines[targets]]
256 256 elif isinstance(targets, (list, tuple)):
257 257 for id in targets:
258 258 if id not in self.engines.keys():
259 259 log.msg("Engine with id %r is not registered" % id)
260 260 raise error.InvalidEngineID("Engine with id %r is not registered" % id)
261 261 return map(self.engines.get, targets)
262 262 elif targets == 'all':
263 263 eList = self.engines.values()
264 264 if len(eList) == 0:
265 265 msg = """There are no engines registered.
266 266 Check the logs in ~/.ipython/log if you think there should have been."""
267 267 raise error.NoEnginesRegistered(msg)
268 268 else:
269 269 return eList
270 270 else:
271 271 raise error.InvalidEngineID("targets argument is not an int, list of ints or 'all': %r"%targets)
272 272
273 273 def _performOnEngines(self, methodName, *args, **kwargs):
274 274 """Calls a method on engines and returns deferred to list of results.
275 275
276 276 :Parameters:
277 277 methodName : str
278 278 Name of the method to be called.
279 279 targets : int, list of ints, 'all'
280 280 The targets argument to be parsed into a list of engine objects.
281 281 args
282 282 The positional keyword arguments to be passed to the engines.
283 283 kwargs
284 284 The keyword arguments passed to the method
285 285
286 286 :Returns: List of deferreds to the results on each engine
287 287
288 288 :Exception:
289 289 InvalidEngineID
290 290 If the targets argument is bad in any way.
291 291 AttributeError
292 292 If the method doesn't exist on one of the engines.
293 293 """
294 294 targets = kwargs.pop('targets')
295 295 log.msg("Performing %s on %r" % (methodName, targets))
296 296 # log.msg("Performing %s(%r, %r) on %r" % (methodName, args, kwargs, targets))
297 297 # This will and should raise if targets is not valid!
298 298 engines = self.engineList(targets)
299 299 dList = []
300 300 for e in engines:
301 301 meth = getattr(e, methodName, None)
302 302 if meth is not None:
303 303 dList.append(meth(*args, **kwargs))
304 304 else:
305 305 raise AttributeError("Engine %i does not have method %s" % (e.id, methodName))
306 306 return dList
307 307
308 308 def _performOnEnginesAndGatherBoth(self, methodName, *args, **kwargs):
309 309 """Called _performOnEngines and wraps result/exception into deferred."""
310 310 try:
311 311 dList = self._performOnEngines(methodName, *args, **kwargs)
312 312 except (error.InvalidEngineID, AttributeError, KeyError, error.NoEnginesRegistered):
313 313 return defer.fail(failure.Failure())
314 314 else:
315 315 # Having fireOnOneErrback is causing problems with the determinacy
316 316 # of the system. Basically, once a single engine has errbacked, this
317 317 # method returns. In some cases, this will cause client to submit
318 318 # another command. Because the previous command is still running
319 319 # on some engines, this command will be queued. When those commands
320 320 # then errback, the second command will raise QueueCleared. Ahhh!
321 321 d = gatherBoth(dList,
322 322 fireOnOneErrback=0,
323 323 consumeErrors=1,
324 324 logErrors=0)
325 325 d.addCallback(error.collect_exceptions, methodName)
326 326 return d
327 327
328 328 #---------------------------------------------------------------------------
329 329 # General IMultiEngine methods
330 330 #---------------------------------------------------------------------------
331 331
332 332 def get_ids(self):
333 333 return defer.succeed(self.engines.keys())
334 334
335 335 #---------------------------------------------------------------------------
336 336 # IEngineMultiplexer methods
337 337 #---------------------------------------------------------------------------
338 338
339 339 def execute(self, lines, targets='all'):
340 340 return self._performOnEnginesAndGatherBoth('execute', lines, targets=targets)
341 341
342 342 def push(self, ns, targets='all'):
343 343 return self._performOnEnginesAndGatherBoth('push', ns, targets=targets)
344 344
345 345 def pull(self, keys, targets='all'):
346 346 return self._performOnEnginesAndGatherBoth('pull', keys, targets=targets)
347 347
348 348 def push_function(self, ns, targets='all'):
349 349 return self._performOnEnginesAndGatherBoth('push_function', ns, targets=targets)
350 350
351 351 def pull_function(self, keys, targets='all'):
352 352 return self._performOnEnginesAndGatherBoth('pull_function', keys, targets=targets)
353 353
354 354 def get_result(self, i=None, targets='all'):
355 355 return self._performOnEnginesAndGatherBoth('get_result', i, targets=targets)
356 356
357 357 def reset(self, targets='all'):
358 358 return self._performOnEnginesAndGatherBoth('reset', targets=targets)
359 359
360 360 def keys(self, targets='all'):
361 361 return self._performOnEnginesAndGatherBoth('keys', targets=targets)
362 362
363 363 def kill(self, controller=False, targets='all'):
364 364 if controller:
365 365 targets = 'all'
366 366 d = self._performOnEnginesAndGatherBoth('kill', targets=targets)
367 367 if controller:
368 368 log.msg("Killing controller")
369 369 d.addCallback(lambda _: reactor.callLater(2.0, reactor.stop))
370 370 # Consume any weird stuff coming back
371 371 d.addBoth(lambda _: None)
372 372 return d
373 373
374 374 def push_serialized(self, namespace, targets='all'):
375 375 for k, v in namespace.iteritems():
376 376 log.msg("Pushed object %s is %f MB" % (k, v.getDataSize()))
377 377 d = self._performOnEnginesAndGatherBoth('push_serialized', namespace, targets=targets)
378 378 return d
379 379
380 380 def pull_serialized(self, keys, targets='all'):
381 381 try:
382 382 dList = self._performOnEngines('pull_serialized', keys, targets=targets)
383 383 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
384 384 return defer.fail(failure.Failure())
385 385 else:
386 386 for d in dList:
387 387 d.addCallback(self._logSizes)
388 388 d = gatherBoth(dList,
389 389 fireOnOneErrback=0,
390 390 consumeErrors=1,
391 391 logErrors=0)
392 392 d.addCallback(error.collect_exceptions, 'pull_serialized')
393 393 return d
394 394
395 395 def _logSizes(self, listOfSerialized):
396 396 if isinstance(listOfSerialized, (list, tuple)):
397 397 for s in listOfSerialized:
398 398 log.msg("Pulled object is %f MB" % s.getDataSize())
399 399 else:
400 400 log.msg("Pulled object is %f MB" % listOfSerialized.getDataSize())
401 401 return listOfSerialized
402 402
403 403 def clear_queue(self, targets='all'):
404 404 return self._performOnEnginesAndGatherBoth('clear_queue', targets=targets)
405 405
406 406 def queue_status(self, targets='all'):
407 407 log.msg("Getting queue status on %r" % targets)
408 408 try:
409 409 engines = self.engineList(targets)
410 410 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
411 411 return defer.fail(failure.Failure())
412 412 else:
413 413 dList = []
414 414 for e in engines:
415 415 dList.append(e.queue_status().addCallback(lambda s:(e.id, s)))
416 416 d = gatherBoth(dList,
417 417 fireOnOneErrback=0,
418 418 consumeErrors=1,
419 419 logErrors=0)
420 420 d.addCallback(error.collect_exceptions, 'queue_status')
421 421 return d
422 422
423 423 def get_properties(self, keys=None, targets='all'):
424 424 log.msg("Getting properties on %r" % targets)
425 425 try:
426 426 engines = self.engineList(targets)
427 427 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
428 428 return defer.fail(failure.Failure())
429 429 else:
430 430 dList = [e.get_properties(keys) for e in engines]
431 431 d = gatherBoth(dList,
432 432 fireOnOneErrback=0,
433 433 consumeErrors=1,
434 434 logErrors=0)
435 435 d.addCallback(error.collect_exceptions, 'get_properties')
436 436 return d
437 437
438 438 def set_properties(self, properties, targets='all'):
439 439 log.msg("Setting properties on %r" % targets)
440 440 try:
441 441 engines = self.engineList(targets)
442 442 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
443 443 return defer.fail(failure.Failure())
444 444 else:
445 445 dList = [e.set_properties(properties) for e in engines]
446 446 d = gatherBoth(dList,
447 447 fireOnOneErrback=0,
448 448 consumeErrors=1,
449 449 logErrors=0)
450 450 d.addCallback(error.collect_exceptions, 'set_properties')
451 451 return d
452 452
453 453 def has_properties(self, keys, targets='all'):
454 454 log.msg("Checking properties on %r" % targets)
455 455 try:
456 456 engines = self.engineList(targets)
457 457 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
458 458 return defer.fail(failure.Failure())
459 459 else:
460 460 dList = [e.has_properties(keys) for e in engines]
461 461 d = gatherBoth(dList,
462 462 fireOnOneErrback=0,
463 463 consumeErrors=1,
464 464 logErrors=0)
465 465 d.addCallback(error.collect_exceptions, 'has_properties')
466 466 return d
467 467
468 468 def del_properties(self, keys, targets='all'):
469 469 log.msg("Deleting properties on %r" % targets)
470 470 try:
471 471 engines = self.engineList(targets)
472 472 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
473 473 return defer.fail(failure.Failure())
474 474 else:
475 475 dList = [e.del_properties(keys) for e in engines]
476 476 d = gatherBoth(dList,
477 477 fireOnOneErrback=0,
478 478 consumeErrors=1,
479 479 logErrors=0)
480 480 d.addCallback(error.collect_exceptions, 'del_properties')
481 481 return d
482 482
483 483 def clear_properties(self, targets='all'):
484 484 log.msg("Clearing properties on %r" % targets)
485 485 try:
486 486 engines = self.engineList(targets)
487 487 except (error.InvalidEngineID, AttributeError, error.NoEnginesRegistered):
488 488 return defer.fail(failure.Failure())
489 489 else:
490 490 dList = [e.clear_properties() for e in engines]
491 491 d = gatherBoth(dList,
492 492 fireOnOneErrback=0,
493 493 consumeErrors=1,
494 494 logErrors=0)
495 495 d.addCallback(error.collect_exceptions, 'clear_properties')
496 496 return d
497 497
498 498
499 499 components.registerAdapter(MultiEngine,
500 500 IControllerBase,
501 501 IMultiEngine)
502 502
503 503
504 504 #-------------------------------------------------------------------------------
505 505 # Interfaces for the Synchronous MultiEngine
506 506 #-------------------------------------------------------------------------------
507 507
508 508 class ISynchronousEngineMultiplexer(Interface):
509 509 pass
510 510
511 511
512 512 class ISynchronousMultiEngine(ISynchronousEngineMultiplexer):
513 513 """Synchronous, two-phase version of IMultiEngine.
514 514
515 515 Methods in this interface are identical to those of IMultiEngine, but they
516 516 take one additional argument:
517 517
518 518 execute(lines, targets='all') -> execute(lines, targets='all, block=True)
519 519
520 520 :Parameters:
521 521 block : boolean
522 522 Should the method return a deferred to a deferredID or the
523 523 actual result. If block=False a deferred to a deferredID is
524 524 returned and the user must call `get_pending_deferred` at a later
525 525 point. If block=True, a deferred to the actual result comes back.
526 526 """
527 527 def get_pending_deferred(deferredID, block=True):
528 528 """"""
529 529
530 530 def clear_pending_deferreds():
531 531 """"""
532 532
533 533
534 534 #-------------------------------------------------------------------------------
535 535 # Implementation of the Synchronous MultiEngine
536 536 #-------------------------------------------------------------------------------
537 537
538 538 class SynchronousMultiEngine(PendingDeferredManager):
539 539 """Adapt an `IMultiEngine` -> `ISynchronousMultiEngine`
540 540
541 541 Warning, this class uses a decorator that currently uses **kwargs.
542 542 Because of this block must be passed as a kwarg, not positionally.
543 543 """
544 544
545 545 implements(ISynchronousMultiEngine)
546 546
547 547 def __init__(self, multiengine):
548 548 self.multiengine = multiengine
549 549 PendingDeferredManager.__init__(self)
550 550
551 551 #---------------------------------------------------------------------------
552 552 # Decorated pending deferred methods
553 553 #---------------------------------------------------------------------------
554 554
555 555 @two_phase
556 556 def execute(self, lines, targets='all'):
557 557 d = self.multiengine.execute(lines, targets)
558 558 return d
559 559
560 560 @two_phase
561 561 def push(self, namespace, targets='all'):
562 562 return self.multiengine.push(namespace, targets)
563 563
564 564 @two_phase
565 565 def pull(self, keys, targets='all'):
566 566 d = self.multiengine.pull(keys, targets)
567 567 return d
568 568
569 569 @two_phase
570 570 def push_function(self, namespace, targets='all'):
571 571 return self.multiengine.push_function(namespace, targets)
572 572
573 573 @two_phase
574 574 def pull_function(self, keys, targets='all'):
575 575 d = self.multiengine.pull_function(keys, targets)
576 576 return d
577 577
578 578 @two_phase
579 579 def get_result(self, i=None, targets='all'):
580 580 return self.multiengine.get_result(i, targets='all')
581 581
582 582 @two_phase
583 583 def reset(self, targets='all'):
584 584 return self.multiengine.reset(targets)
585 585
586 586 @two_phase
587 587 def keys(self, targets='all'):
588 588 return self.multiengine.keys(targets)
589 589
590 590 @two_phase
591 591 def kill(self, controller=False, targets='all'):
592 592 return self.multiengine.kill(controller, targets)
593 593
594 594 @two_phase
595 595 def push_serialized(self, namespace, targets='all'):
596 596 return self.multiengine.push_serialized(namespace, targets)
597 597
598 598 @two_phase
599 599 def pull_serialized(self, keys, targets='all'):
600 600 return self.multiengine.pull_serialized(keys, targets)
601 601
602 602 @two_phase
603 603 def clear_queue(self, targets='all'):
604 604 return self.multiengine.clear_queue(targets)
605 605
606 606 @two_phase
607 607 def queue_status(self, targets='all'):
608 608 return self.multiengine.queue_status(targets)
609 609
610 610 @two_phase
611 611 def set_properties(self, properties, targets='all'):
612 612 return self.multiengine.set_properties(properties, targets)
613 613
614 614 @two_phase
615 615 def get_properties(self, keys=None, targets='all'):
616 616 return self.multiengine.get_properties(keys, targets)
617 617
618 618 @two_phase
619 619 def has_properties(self, keys, targets='all'):
620 620 return self.multiengine.has_properties(keys, targets)
621 621
622 622 @two_phase
623 623 def del_properties(self, keys, targets='all'):
624 624 return self.multiengine.del_properties(keys, targets)
625 625
626 626 @two_phase
627 627 def clear_properties(self, targets='all'):
628 628 return self.multiengine.clear_properties(targets)
629 629
630 630 #---------------------------------------------------------------------------
631 631 # IMultiEngine methods
632 632 #---------------------------------------------------------------------------
633 633
634 634 def get_ids(self):
635 635 """Return a list of registered engine ids.
636 636
637 637 Never use the two phase block/non-block stuff for this.
638 638 """
639 639 return self.multiengine.get_ids()
640 640
641 641
642 642 components.registerAdapter(SynchronousMultiEngine, IMultiEngine, ISynchronousMultiEngine)
643 643
644 644
645 645 #-------------------------------------------------------------------------------
646 646 # Various high-level interfaces that can be used as MultiEngine mix-ins
647 647 #-------------------------------------------------------------------------------
648 648
649 649 #-------------------------------------------------------------------------------
650 650 # IMultiEngineCoordinator
651 651 #-------------------------------------------------------------------------------
652 652
653 653 class IMultiEngineCoordinator(Interface):
654 654 """Methods that work on multiple engines explicitly."""
655 655
656 def scatter(key, seq, style='basic', flatten=False, targets='all'):
657 """Partition and distribute a sequence to targets.
656 def scatter(key, seq, dist='b', flatten=False, targets='all'):
657 """Partition and distribute a sequence to targets."""
658 658
659 :Parameters:
660 key : str
661 The variable name to call the scattered sequence.
662 seq : list, tuple, array
663 The sequence to scatter. The type should be preserved.
664 style : string
665 A specification of how the sequence is partitioned. Currently
666 only 'basic' is implemented.
667 flatten : boolean
668 Should single element sequences be converted to scalars.
669 """
670
671 def gather(key, style='basic', targets='all'):
672 """Gather object key from targets.
659 def gather(key, dist='b', targets='all'):
660 """Gather object key from targets."""
673 661
674 :Parameters:
675 key : string
676 The name of a sequence on the targets to gather.
677 style : string
678 A specification of how the sequence is partitioned. Currently
679 only 'basic' is implemented.
662 def raw_map(func, seqs, dist='b', targets='all'):
680 663 """
681
682 def map(func, seq, style='basic', targets='all'):
683 """A parallelized version of Python's builtin map.
664 A parallelized version of Python's builtin `map` function.
684 665
685 This function implements the following pattern:
666 This has a slightly different syntax than the builtin `map`.
667 This is needed because we need to have keyword arguments and thus
668 can't use *args to capture all the sequences. Instead, they must
669 be passed in a list or tuple.
686 670
687 1. The sequence seq is scattered to the given targets.
688 2. map(functionSource, seq) is called on each engine.
689 3. The resulting sequences are gathered back to the local machine.
690
691 :Parameters:
692 targets : int, list or 'all'
693 The engine ids the action will apply to. Call `get_ids` to see
694 a list of currently available engines.
695 func : str, function
696 An actual function object or a Python string that names a
697 callable defined on the engines.
698 seq : list, tuple or numpy array
699 The local sequence to be scattered.
700 style : str
701 Only 'basic' is supported for now.
702
703 :Returns: A list of len(seq) with functionSource called on each element
704 of seq.
705
706 Example
707 =======
671 The equivalence is:
708 672
709 >>> rc.mapAll('lambda x: x*x', range(10000))
710 [0,2,4,9,25,36,...]
673 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
674
675 Most users will want to use parallel functions or the `mapper`
676 and `map` methods for an API that follows that of the builtin
677 `map`.
711 678 """
712 679
713 680
714 681 class ISynchronousMultiEngineCoordinator(IMultiEngineCoordinator):
715 682 """Methods that work on multiple engines explicitly."""
716 pass
683
684 def scatter(key, seq, dist='b', flatten=False, targets='all', block=True):
685 """Partition and distribute a sequence to targets."""
686
687 def gather(key, dist='b', targets='all', block=True):
688 """Gather object key from targets"""
689
690 def raw_map(func, seqs, dist='b', targets='all', block=True):
691 """
692 A parallelized version of Python's builtin map.
693
694 This has a slightly different syntax than the builtin `map`.
695 This is needed because we need to have keyword arguments and thus
696 can't use *args to capture all the sequences. Instead, they must
697 be passed in a list or tuple.
698
699 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
700
701 Most users will want to use parallel functions or the `mapper`
702 and `map` methods for an API that follows that of the builtin
703 `map`.
704 """
717 705
718 706
719 707 #-------------------------------------------------------------------------------
720 708 # IMultiEngineExtras
721 709 #-------------------------------------------------------------------------------
722 710
723 711 class IMultiEngineExtras(Interface):
724 712
725 def zip_pull(targets, *keys):
726 """Pull, but return results in a different format from `pull`.
713 def zip_pull(targets, keys):
714 """
715 Pull, but return results in a different format from `pull`.
727 716
728 717 This method basically returns zip(pull(targets, *keys)), with a few
729 718 edge cases handled differently. Users of chainsaw will find this format
730 719 familiar.
731
732 :Parameters:
733 targets : int, list or 'all'
734 The engine ids the action will apply to. Call `get_ids` to see
735 a list of currently available engines.
736 keys: list or tuple of str
737 A list of variable names as string of the Python objects to be pulled
738 back to the client.
739
740 :Returns: A list of pulled Python objects for each target.
741 720 """
742 721
743 722 def run(targets, fname):
744 """Run a .py file on targets.
745
746 :Parameters:
747 targets : int, list or 'all'
748 The engine ids the action will apply to. Call `get_ids` to see
749 a list of currently available engines.
750 fname : str
751 The filename of a .py file on the local system to be sent to and run
752 on the engines.
753 block : boolean
754 Should I block or not. If block=True, wait for the action to
755 complete and return the result. If block=False, return a
756 `PendingResult` object that can be used to later get the
757 result. If block is not specified, the block attribute
758 will be used instead.
759 """
723 """Run a .py file on targets."""
760 724
761 725
762 726 class ISynchronousMultiEngineExtras(IMultiEngineExtras):
763 pass
764
727 def zip_pull(targets, keys, block=True):
728 """
729 Pull, but return results in a different format from `pull`.
730
731 This method basically returns zip(pull(targets, *keys)), with a few
732 edge cases handled differently. Users of chainsaw will find this format
733 familiar.
734 """
735
736 def run(targets, fname, block=True):
737 """Run a .py file on targets."""
765 738
766 739 #-------------------------------------------------------------------------------
767 740 # The full MultiEngine interface
768 741 #-------------------------------------------------------------------------------
769 742
770 743 class IFullMultiEngine(IMultiEngine,
771 744 IMultiEngineCoordinator,
772 745 IMultiEngineExtras):
773 746 pass
774 747
775 748
776 749 class IFullSynchronousMultiEngine(ISynchronousMultiEngine,
777 750 ISynchronousMultiEngineCoordinator,
778 751 ISynchronousMultiEngineExtras):
779 752 pass
780 753
@@ -1,840 +1,896 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.test.test_multiengineclient -*-
3 3
4 4 """General Classes for IMultiEngine clients."""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Imports
17 17 #-------------------------------------------------------------------------------
18 18
19 19 import sys
20 20 import cPickle as pickle
21 21 from types import FunctionType
22 22 import linecache
23 23
24 24 from twisted.internet import reactor
25 25 from twisted.python import components, log
26 26 from twisted.python.failure import Failure
27 27 from zope.interface import Interface, implements, Attribute
28 28
29 29 from IPython.ColorANSI import TermColors
30 30
31 31 from IPython.kernel.twistedutil import blockingCallFromThread
32 32 from IPython.kernel import error
33 33 from IPython.kernel.parallelfunction import ParallelFunction
34 from IPython.kernel.mapper import (
35 MultiEngineMapper,
36 IMultiEngineMapperFactory,
37 IMapper
38 )
34 39 from IPython.kernel import map as Map
35 40 from IPython.kernel import multiengine as me
36 41 from IPython.kernel.multiengine import (IFullMultiEngine,
37 42 IFullSynchronousMultiEngine)
38 43
39 44
40 45 #-------------------------------------------------------------------------------
41 46 # Pending Result things
42 47 #-------------------------------------------------------------------------------
43 48
44 49 class IPendingResult(Interface):
45 50 """A representation of a result that is pending.
46 51
47 52 This class is similar to Twisted's `Deferred` object, but is designed to be
48 53 used in a synchronous context.
49 54 """
50 55
51 56 result_id=Attribute("ID of the deferred on the other side")
52 57 client=Attribute("A client that I came from")
53 58 r=Attribute("An attribute that is a property that calls and returns get_result")
54 59
55 60 def get_result(default=None, block=True):
56 61 """
57 62 Get a result that is pending.
58 63
59 64 :Parameters:
60 65 default
61 66 The value to return if the result is not ready.
62 67 block : boolean
63 68 Should I block for the result.
64 69
65 70 :Returns: The actual result or the default value.
66 71 """
67 72
68 73 def add_callback(f, *args, **kwargs):
69 74 """
70 75 Add a callback that is called with the result.
71 76
72 77 If the original result is foo, adding a callback will cause
73 78 f(foo, *args, **kwargs) to be returned instead. If multiple
74 79 callbacks are registered, they are chained together: the result of
75 80 one is passed to the next and so on.
76 81
77 82 Unlike Twisted's Deferred object, there is no errback chain. Thus
78 83 any exception raised will not be caught and handled. User must
79 84 catch these by hand when calling `get_result`.
80 85 """
81 86
82 87
83 88 class PendingResult(object):
84 89 """A representation of a result that is not yet ready.
85 90
86 91 A user should not create a `PendingResult` instance by hand.
87 92
88 93 Methods
89 94 =======
90 95
91 96 * `get_result`
92 97 * `add_callback`
93 98
94 99 Properties
95 100 ==========
96 101 * `r`
97 102 """
98 103
99 104 def __init__(self, client, result_id):
100 105 """Create a PendingResult with a result_id and a client instance.
101 106
102 107 The client should implement `_getPendingResult(result_id, block)`.
103 108 """
104 109 self.client = client
105 110 self.result_id = result_id
106 111 self.called = False
107 112 self.raised = False
108 113 self.callbacks = []
109 114
110 115 def get_result(self, default=None, block=True):
111 116 """Get a result that is pending.
112 117
113 118 This method will connect to an IMultiEngine adapted controller
114 119 and see if the result is ready. If the action triggers an exception
115 120 raise it and record it. This method records the result/exception once it is
116 121 retrieved. Calling `get_result` again will get this cached result or will
117 122 re-raise the exception. The .r attribute is a property that calls
118 123 `get_result` with block=True.
119 124
120 125 :Parameters:
121 126 default
122 127 The value to return if the result is not ready.
123 128 block : boolean
124 129 Should I block for the result.
125 130
126 131 :Returns: The actual result or the default value.
127 132 """
128 133
129 134 if self.called:
130 135 if self.raised:
131 136 raise self.result[0], self.result[1], self.result[2]
132 137 else:
133 138 return self.result
134 139 try:
135 140 result = self.client.get_pending_deferred(self.result_id, block)
136 141 except error.ResultNotCompleted:
137 142 return default
138 143 except:
139 144 # Reraise other error, but first record them so they can be reraised
140 145 # later if .r or get_result is called again.
141 146 self.result = sys.exc_info()
142 147 self.called = True
143 148 self.raised = True
144 149 raise
145 150 else:
146 151 for cb in self.callbacks:
147 152 result = cb[0](result, *cb[1], **cb[2])
148 153 self.result = result
149 154 self.called = True
150 155 return result
151 156
152 157 def add_callback(self, f, *args, **kwargs):
153 158 """Add a callback that is called with the result.
154 159
155 160 If the original result is result, adding a callback will cause
156 161 f(result, *args, **kwargs) to be returned instead. If multiple
157 162 callbacks are registered, they are chained together: the result of
158 163 one is passed to the next and so on.
159 164
160 165 Unlike Twisted's Deferred object, there is no errback chain. Thus
161 166 any exception raised will not be caught and handled. User must
162 167 catch these by hand when calling `get_result`.
163 168 """
164 169 assert callable(f)
165 170 self.callbacks.append((f, args, kwargs))
166 171
167 172 def __cmp__(self, other):
168 173 if self.result_id < other.result_id:
169 174 return -1
170 175 else:
171 176 return 1
172 177
173 178 def _get_r(self):
174 179 return self.get_result(block=True)
175 180
176 181 r = property(_get_r)
177 182 """This property is a shortcut to a `get_result(block=True)`."""
178 183
179 184
180 185 #-------------------------------------------------------------------------------
181 186 # Pretty printing wrappers for certain lists
182 187 #-------------------------------------------------------------------------------
183 188
184 189 class ResultList(list):
185 190 """A subclass of list that pretty prints the output of `execute`/`get_result`."""
186 191
187 192 def __repr__(self):
188 193 output = []
189 blue = TermColors.Blue
190 normal = TermColors.Normal
191 red = TermColors.Red
192 green = TermColors.Green
194 # These colored prompts were not working on Windows
195 if sys.platform == 'win32':
196 blue = normal = red = green = ''
197 else:
198 blue = TermColors.Blue
199 normal = TermColors.Normal
200 red = TermColors.Red
201 green = TermColors.Green
193 202 output.append("<Results List>\n")
194 203 for cmd in self:
195 204 if isinstance(cmd, Failure):
196 205 output.append(cmd)
197 206 else:
198 207 target = cmd.get('id',None)
199 208 cmd_num = cmd.get('number',None)
200 209 cmd_stdin = cmd.get('input',{}).get('translated','No Input')
201 210 cmd_stdout = cmd.get('stdout', None)
202 211 cmd_stderr = cmd.get('stderr', None)
203 212 output.append("%s[%i]%s In [%i]:%s %s\n" % \
204 213 (green, target,
205 214 blue, cmd_num, normal, cmd_stdin))
206 215 if cmd_stdout:
207 216 output.append("%s[%i]%s Out[%i]:%s %s\n" % \
208 217 (green, target,
209 218 red, cmd_num, normal, cmd_stdout))
210 219 if cmd_stderr:
211 220 output.append("%s[%i]%s Err[%i]:\n%s %s" % \
212 221 (green, target,
213 222 red, cmd_num, normal, cmd_stderr))
214 223 return ''.join(output)
215 224
216 225
217 226 def wrapResultList(result):
218 227 """A function that wraps the output of `execute`/`get_result` -> `ResultList`."""
219 228 if len(result) == 0:
220 229 result = [result]
221 230 return ResultList(result)
222 231
223 232
224 233 class QueueStatusList(list):
225 234 """A subclass of list that pretty prints the output of `queue_status`."""
226 235
227 236 def __repr__(self):
228 237 output = []
229 238 output.append("<Queue Status List>\n")
230 239 for e in self:
231 240 output.append("Engine: %s\n" % repr(e[0]))
232 241 output.append(" Pending: %s\n" % repr(e[1]['pending']))
233 242 for q in e[1]['queue']:
234 243 output.append(" Command: %s\n" % repr(q))
235 244 return ''.join(output)
236 245
237 246
238 247 #-------------------------------------------------------------------------------
239 248 # InteractiveMultiEngineClient
240 249 #-------------------------------------------------------------------------------
241 250
242 251 class InteractiveMultiEngineClient(object):
243 252 """A mixin class that add a few methods to a multiengine client.
244 253
245 254 The methods in this mixin class are designed for interactive usage.
246 255 """
247 256
248 257 def activate(self):
249 258 """Make this `MultiEngineClient` active for parallel magic commands.
250 259
251 260 IPython has a magic command syntax to work with `MultiEngineClient` objects.
252 261 In a given IPython session there is a single active one. While
253 262 there can be many `MultiEngineClient` created and used by the user,
254 263 there is only one active one. The active `MultiEngineClient` is used whenever
255 264 the magic commands %px and %autopx are used.
256 265
257 266 The activate() method is called on a given `MultiEngineClient` to make it
258 267 active. Once this has been done, the magic commands can be used.
259 268 """
260 269
261 270 try:
262 271 __IPYTHON__.activeController = self
263 272 except NameError:
264 273 print "The IPython Controller magics only work within IPython."
265 274
266 275 def __setitem__(self, key, value):
267 276 """Add a dictionary interface for pushing/pulling.
268 277
269 278 This functions as a shorthand for `push`.
270 279
271 280 :Parameters:
272 281 key : str
273 282 What to call the remote object.
274 283 value : object
275 284 The local Python object to push.
276 285 """
277 286 targets, block = self._findTargetsAndBlock()
278 287 return self.push({key:value}, targets=targets, block=block)
279 288
280 289 def __getitem__(self, key):
281 290 """Add a dictionary interface for pushing/pulling.
282 291
283 292 This functions as a shorthand to `pull`.
284 293
285 294 :Parameters:
286 295 - `key`: A string representing the key.
287 296 """
288 297 if isinstance(key, str):
289 298 targets, block = self._findTargetsAndBlock()
290 299 return self.pull(key, targets=targets, block=block)
291 300 else:
292 301 raise TypeError("__getitem__ only takes strs")
293 302
294 303 def __len__(self):
295 304 """Return the number of available engines."""
296 305 return len(self.get_ids())
297
298 def parallelize(self, func, targets=None, block=None):
299 """Build a `ParallelFunction` object for functionName on engines.
300
301 The returned object will implement a parallel version of functionName
302 that takes a local sequence as its only argument and calls (in
303 parallel) functionName on each element of that sequence. The
304 `ParallelFunction` object has a `targets` attribute that controls
305 which engines the function is run on.
306
307 :Parameters:
308 targets : int, list or 'all'
309 The engine ids the action will apply to. Call `get_ids` to see
310 a list of currently available engines.
311 functionName : str
312 A Python string that names a callable defined on the engines.
313
314 :Returns: A `ParallelFunction` object.
315
316 Examples
317 ========
318
319 >>> psin = rc.parallelize('all','lambda x:sin(x)')
320 >>> psin(range(10000))
321 [0,2,4,9,25,36,...]
322 """
323 targets, block = self._findTargetsAndBlock(targets, block)
324 return ParallelFunction(func, self, targets, block)
325
306
326 307 #---------------------------------------------------------------------------
327 308 # Make this a context manager for with
328 309 #---------------------------------------------------------------------------
329 310
330 311 def findsource_file(self,f):
331 312 linecache.checkcache()
332 313 s = findsource(f.f_code)
333 314 lnum = f.f_lineno
334 315 wsource = s[0][f.f_lineno:]
335 316 return strip_whitespace(wsource)
336 317
337 318 def findsource_ipython(self,f):
338 319 from IPython import ipapi
339 320 self.ip = ipapi.get()
340 321 wsource = [l+'\n' for l in
341 322 self.ip.IP.input_hist_raw[-1].splitlines()[1:]]
342 323 return strip_whitespace(wsource)
343 324
344 325 def __enter__(self):
345 326 f = sys._getframe(1)
346 327 local_ns = f.f_locals
347 328 global_ns = f.f_globals
348 329 if f.f_code.co_filename == '<ipython console>':
349 330 s = self.findsource_ipython(f)
350 331 else:
351 332 s = self.findsource_file(f)
352 333
353 334 self._with_context_result = self.execute(s)
354 335
355 336 def __exit__ (self, etype, value, tb):
356 337 if issubclass(etype,error.StopLocalExecution):
357 338 return True
358 339
359 340
360 341 def remote():
361 342 m = 'Special exception to stop local execution of parallel code.'
362 343 raise error.StopLocalExecution(m)
363 344
364 345 def strip_whitespace(source):
365 346 # Expand tabs to avoid any confusion.
366 347 wsource = [l.expandtabs(4) for l in source]
367 348 # Detect the indentation level
368 349 done = False
369 350 for line in wsource:
370 351 if line.isspace():
371 352 continue
372 353 for col,char in enumerate(line):
373 354 if char != ' ':
374 355 done = True
375 356 break
376 357 if done:
377 358 break
378 359 # Now we know how much leading space there is in the code. Next, we
379 360 # extract up to the first line that has less indentation.
380 361 # WARNINGS: we skip comments that may be misindented, but we do NOT yet
381 362 # detect triple quoted strings that may have flush left text.
382 363 for lno,line in enumerate(wsource):
383 364 lead = line[:col]
384 365 if lead.isspace():
385 366 continue
386 367 else:
387 368 if not lead.lstrip().startswith('#'):
388 369 break
389 370 # The real 'with' source is up to lno
390 371 src_lines = [l[col:] for l in wsource[:lno+1]]
391 372
392 373 # Finally, check that the source's first non-comment line begins with the
393 374 # special call 'remote()'
394 375 for nline,line in enumerate(src_lines):
395 376 if line.isspace() or line.startswith('#'):
396 377 continue
397 378 if 'remote()' in line:
398 379 break
399 380 else:
400 381 raise ValueError('remote() call missing at the start of code')
401 382 src = ''.join(src_lines[nline+1:])
402 383 #print 'SRC:\n<<<<<<<>>>>>>>\n%s<<<<<>>>>>>' % src # dbg
403 384 return src
404 385
405 386
406 387 #-------------------------------------------------------------------------------
407 388 # The top-level MultiEngine client adaptor
408 389 #-------------------------------------------------------------------------------
409 390
410 391
411 392 class IFullBlockingMultiEngineClient(Interface):
412 393 pass
413 394
414 395
415 396 class FullBlockingMultiEngineClient(InteractiveMultiEngineClient):
416 397 """
417 398 A blocking client to the `IMultiEngine` controller interface.
418 399
419 400 This class allows users to use a set of engines for a parallel
420 401 computation through the `IMultiEngine` interface. In this interface,
421 402 each engine has a specific id (an int) that is used to refer to the
422 403 engine, run code on it, etc.
423 404 """
424 405
425 implements(IFullBlockingMultiEngineClient)
406 implements(
407 IFullBlockingMultiEngineClient,
408 IMultiEngineMapperFactory,
409 IMapper
410 )
426 411
427 412 def __init__(self, smultiengine):
428 413 self.smultiengine = smultiengine
429 414 self.block = True
430 415 self.targets = 'all'
431 416
432 417 def _findBlock(self, block=None):
433 418 if block is None:
434 419 return self.block
435 420 else:
436 421 if block in (True, False):
437 422 return block
438 423 else:
439 424 raise ValueError("block must be True or False")
440 425
441 426 def _findTargets(self, targets=None):
442 427 if targets is None:
443 428 return self.targets
444 429 else:
445 430 if not isinstance(targets, (str,list,tuple,int)):
446 431 raise ValueError("targets must be a str, list, tuple or int")
447 432 return targets
448 433
449 434 def _findTargetsAndBlock(self, targets=None, block=None):
450 435 return self._findTargets(targets), self._findBlock(block)
451 436
452 437 def _blockFromThread(self, function, *args, **kwargs):
453 438 block = kwargs.get('block', None)
454 439 if block is None:
455 440 raise error.MissingBlockArgument("'block' keyword argument is missing")
456 441 result = blockingCallFromThread(function, *args, **kwargs)
457 442 if not block:
458 443 result = PendingResult(self, result)
459 444 return result
460 445
461 446 def get_pending_deferred(self, deferredID, block):
462 447 return blockingCallFromThread(self.smultiengine.get_pending_deferred, deferredID, block)
463 448
464 449 def barrier(self, pendingResults):
465 450 """Synchronize a set of `PendingResults`.
466 451
467 452 This method is a synchronization primitive that waits for a set of
468 453 `PendingResult` objects to complete. More specifically, barier does
469 454 the following.
470 455
471 456 * The `PendingResult`s are sorted by result_id.
472 457 * The `get_result` method is called for each `PendingResult` sequentially
473 458 with block=True.
474 459 * If a `PendingResult` gets a result that is an exception, it is
475 460 trapped and can be re-raised later by calling `get_result` again.
476 461 * The `PendingResult`s are flushed from the controller.
477 462
478 463 After barrier has been called on a `PendingResult`, its results can
479 464 be retrieved by calling `get_result` again or accesing the `r` attribute
480 465 of the instance.
481 466 """
482 467
483 468 # Convert to list for sorting and check class type
484 469 prList = list(pendingResults)
485 470 for pr in prList:
486 471 if not isinstance(pr, PendingResult):
487 472 raise error.NotAPendingResult("Objects passed to barrier must be PendingResult instances")
488 473
489 474 # Sort the PendingResults so they are in order
490 475 prList.sort()
491 476 # Block on each PendingResult object
492 477 for pr in prList:
493 478 try:
494 479 result = pr.get_result(block=True)
495 480 except Exception:
496 481 pass
497 482
498 483 def flush(self):
499 484 """
500 485 Clear all pending deferreds/results from the controller.
501 486
502 487 For each `PendingResult` that is created by this client, the controller
503 488 holds on to the result for that `PendingResult`. This can be a problem
504 489 if there are a large number of `PendingResult` objects that are created.
505 490
506 491 Once the result of the `PendingResult` has been retrieved, the result
507 492 is removed from the controller, but if a user doesn't get a result (
508 493 they just ignore the `PendingResult`) the result is kept forever on the
509 494 controller. This method allows the user to clear out all un-retrieved
510 495 results on the controller.
511 496 """
512 497 r = blockingCallFromThread(self.smultiengine.clear_pending_deferreds)
513 498 return r
514 499
515 500 clear_pending_results = flush
516 501
517 502 #---------------------------------------------------------------------------
518 503 # IEngineMultiplexer related methods
519 504 #---------------------------------------------------------------------------
520 505
521 506 def execute(self, lines, targets=None, block=None):
522 507 """
523 508 Execute code on a set of engines.
524 509
525 510 :Parameters:
526 511 lines : str
527 512 The Python code to execute as a string
528 513 targets : id or list of ids
529 514 The engine to use for the execution
530 515 block : boolean
531 516 If False, this method will return the actual result. If False,
532 517 a `PendingResult` is returned which can be used to get the result
533 518 at a later time.
534 519 """
535 520 targets, block = self._findTargetsAndBlock(targets, block)
536 521 result = blockingCallFromThread(self.smultiengine.execute, lines,
537 522 targets=targets, block=block)
538 523 if block:
539 524 result = ResultList(result)
540 525 else:
541 526 result = PendingResult(self, result)
542 527 result.add_callback(wrapResultList)
543 528 return result
544 529
545 530 def push(self, namespace, targets=None, block=None):
546 531 """
547 532 Push a dictionary of keys and values to engines namespace.
548 533
549 534 Each engine has a persistent namespace. This method is used to push
550 535 Python objects into that namespace.
551 536
552 537 The objects in the namespace must be pickleable.
553 538
554 539 :Parameters:
555 540 namespace : dict
556 541 A dict that contains Python objects to be injected into
557 542 the engine persistent namespace.
558 543 targets : id or list of ids
559 544 The engine to use for the execution
560 545 block : boolean
561 546 If False, this method will return the actual result. If False,
562 547 a `PendingResult` is returned which can be used to get the result
563 548 at a later time.
564 549 """
565 550 targets, block = self._findTargetsAndBlock(targets, block)
566 551 return self._blockFromThread(self.smultiengine.push, namespace,
567 552 targets=targets, block=block)
568 553
569 554 def pull(self, keys, targets=None, block=None):
570 555 """
571 556 Pull Python objects by key out of engines namespaces.
572 557
573 558 :Parameters:
574 559 keys : str or list of str
575 560 The names of the variables to be pulled
576 561 targets : id or list of ids
577 562 The engine to use for the execution
578 563 block : boolean
579 564 If False, this method will return the actual result. If False,
580 565 a `PendingResult` is returned which can be used to get the result
581 566 at a later time.
582 567 """
583 568 targets, block = self._findTargetsAndBlock(targets, block)
584 569 return self._blockFromThread(self.smultiengine.pull, keys, targets=targets, block=block)
585 570
586 571 def push_function(self, namespace, targets=None, block=None):
587 572 """
588 573 Push a Python function to an engine.
589 574
590 575 This method is used to push a Python function to an engine. This
591 576 method can then be used in code on the engines. Closures are not supported.
592 577
593 578 :Parameters:
594 579 namespace : dict
595 580 A dict whose values are the functions to be pushed. The keys give
596 581 that names that the function will appear as in the engines
597 582 namespace.
598 583 targets : id or list of ids
599 584 The engine to use for the execution
600 585 block : boolean
601 586 If False, this method will return the actual result. If False,
602 587 a `PendingResult` is returned which can be used to get the result
603 588 at a later time.
604 589 """
605 590 targets, block = self._findTargetsAndBlock(targets, block)
606 591 return self._blockFromThread(self.smultiengine.push_function, namespace, targets=targets, block=block)
607 592
608 593 def pull_function(self, keys, targets=None, block=None):
609 594 """
610 595 Pull a Python function from an engine.
611 596
612 597 This method is used to pull a Python function from an engine.
613 598 Closures are not supported.
614 599
615 600 :Parameters:
616 601 keys : str or list of str
617 602 The names of the functions to be pulled
618 603 targets : id or list of ids
619 604 The engine to use for the execution
620 605 block : boolean
621 606 If False, this method will return the actual result. If False,
622 607 a `PendingResult` is returned which can be used to get the result
623 608 at a later time.
624 609 """
625 610 targets, block = self._findTargetsAndBlock(targets, block)
626 611 return self._blockFromThread(self.smultiengine.pull_function, keys, targets=targets, block=block)
627 612
628 613 def push_serialized(self, namespace, targets=None, block=None):
629 614 targets, block = self._findTargetsAndBlock(targets, block)
630 615 return self._blockFromThread(self.smultiengine.push_serialized, namespace, targets=targets, block=block)
631 616
632 617 def pull_serialized(self, keys, targets=None, block=None):
633 618 targets, block = self._findTargetsAndBlock(targets, block)
634 619 return self._blockFromThread(self.smultiengine.pull_serialized, keys, targets=targets, block=block)
635 620
636 621 def get_result(self, i=None, targets=None, block=None):
637 622 """
638 623 Get a previous result.
639 624
640 625 When code is executed in an engine, a dict is created and returned. This
641 626 method retrieves that dict for previous commands.
642 627
643 628 :Parameters:
644 629 i : int
645 630 The number of the result to get
646 631 targets : id or list of ids
647 632 The engine to use for the execution
648 633 block : boolean
649 634 If False, this method will return the actual result. If False,
650 635 a `PendingResult` is returned which can be used to get the result
651 636 at a later time.
652 637 """
653 638 targets, block = self._findTargetsAndBlock(targets, block)
654 639 result = blockingCallFromThread(self.smultiengine.get_result, i, targets=targets, block=block)
655 640 if block:
656 641 result = ResultList(result)
657 642 else:
658 643 result = PendingResult(self, result)
659 644 result.add_callback(wrapResultList)
660 645 return result
661 646
662 647 def reset(self, targets=None, block=None):
663 648 """
664 649 Reset an engine.
665 650
666 651 This method clears out the namespace of an engine.
667 652
668 653 :Parameters:
669 654 targets : id or list of ids
670 655 The engine to use for the execution
671 656 block : boolean
672 657 If False, this method will return the actual result. If False,
673 658 a `PendingResult` is returned which can be used to get the result
674 659 at a later time.
675 660 """
676 661 targets, block = self._findTargetsAndBlock(targets, block)
677 662 return self._blockFromThread(self.smultiengine.reset, targets=targets, block=block)
678 663
679 664 def keys(self, targets=None, block=None):
680 665 """
681 666 Get a list of all the variables in an engine's namespace.
682 667
683 668 :Parameters:
684 669 targets : id or list of ids
685 670 The engine to use for the execution
686 671 block : boolean
687 672 If False, this method will return the actual result. If False,
688 673 a `PendingResult` is returned which can be used to get the result
689 674 at a later time.
690 675 """
691 676 targets, block = self._findTargetsAndBlock(targets, block)
692 677 return self._blockFromThread(self.smultiengine.keys, targets=targets, block=block)
693 678
694 679 def kill(self, controller=False, targets=None, block=None):
695 680 """
696 681 Kill the engines and controller.
697 682
698 683 This method is used to stop the engine and controller by calling
699 684 `reactor.stop`.
700 685
701 686 :Parameters:
702 687 controller : boolean
703 688 If True, kill the engines and controller. If False, just the
704 689 engines
705 690 targets : id or list of ids
706 691 The engine to use for the execution
707 692 block : boolean
708 693 If False, this method will return the actual result. If False,
709 694 a `PendingResult` is returned which can be used to get the result
710 695 at a later time.
711 696 """
712 697 targets, block = self._findTargetsAndBlock(targets, block)
713 698 return self._blockFromThread(self.smultiengine.kill, controller, targets=targets, block=block)
714 699
715 700 def clear_queue(self, targets=None, block=None):
716 701 """
717 702 Clear out the controller's queue for an engine.
718 703
719 704 The controller maintains a queue for each engine. This clear it out.
720 705
721 706 :Parameters:
722 707 targets : id or list of ids
723 708 The engine to use for the execution
724 709 block : boolean
725 710 If False, this method will return the actual result. If False,
726 711 a `PendingResult` is returned which can be used to get the result
727 712 at a later time.
728 713 """
729 714 targets, block = self._findTargetsAndBlock(targets, block)
730 715 return self._blockFromThread(self.smultiengine.clear_queue, targets=targets, block=block)
731 716
732 717 def queue_status(self, targets=None, block=None):
733 718 """
734 719 Get the status of an engines queue.
735 720
736 721 :Parameters:
737 722 targets : id or list of ids
738 723 The engine to use for the execution
739 724 block : boolean
740 725 If False, this method will return the actual result. If False,
741 726 a `PendingResult` is returned which can be used to get the result
742 727 at a later time.
743 728 """
744 729 targets, block = self._findTargetsAndBlock(targets, block)
745 730 return self._blockFromThread(self.smultiengine.queue_status, targets=targets, block=block)
746 731
747 732 def set_properties(self, properties, targets=None, block=None):
748 733 targets, block = self._findTargetsAndBlock(targets, block)
749 734 return self._blockFromThread(self.smultiengine.set_properties, properties, targets=targets, block=block)
750 735
751 736 def get_properties(self, keys=None, targets=None, block=None):
752 737 targets, block = self._findTargetsAndBlock(targets, block)
753 738 return self._blockFromThread(self.smultiengine.get_properties, keys, targets=targets, block=block)
754 739
755 740 def has_properties(self, keys, targets=None, block=None):
756 741 targets, block = self._findTargetsAndBlock(targets, block)
757 742 return self._blockFromThread(self.smultiengine.has_properties, keys, targets=targets, block=block)
758 743
759 744 def del_properties(self, keys, targets=None, block=None):
760 745 targets, block = self._findTargetsAndBlock(targets, block)
761 746 return self._blockFromThread(self.smultiengine.del_properties, keys, targets=targets, block=block)
762 747
763 748 def clear_properties(self, targets=None, block=None):
764 749 targets, block = self._findTargetsAndBlock(targets, block)
765 750 return self._blockFromThread(self.smultiengine.clear_properties, targets=targets, block=block)
766 751
767 752 #---------------------------------------------------------------------------
768 753 # IMultiEngine related methods
769 754 #---------------------------------------------------------------------------
770 755
771 756 def get_ids(self):
772 757 """
773 758 Returns the ids of currently registered engines.
774 759 """
775 760 result = blockingCallFromThread(self.smultiengine.get_ids)
776 761 return result
777 762
778 763 #---------------------------------------------------------------------------
779 764 # IMultiEngineCoordinator
780 765 #---------------------------------------------------------------------------
781 766
782 def scatter(self, key, seq, style='basic', flatten=False, targets=None, block=None):
767 def scatter(self, key, seq, dist='b', flatten=False, targets=None, block=None):
783 768 """
784 769 Partition a Python sequence and send the partitions to a set of engines.
785 770 """
786 771 targets, block = self._findTargetsAndBlock(targets, block)
787 772 return self._blockFromThread(self.smultiengine.scatter, key, seq,
788 style, flatten, targets=targets, block=block)
773 dist, flatten, targets=targets, block=block)
789 774
790 def gather(self, key, style='basic', targets=None, block=None):
775 def gather(self, key, dist='b', targets=None, block=None):
791 776 """
792 777 Gather a partitioned sequence on a set of engines as a single local seq.
793 778 """
794 779 targets, block = self._findTargetsAndBlock(targets, block)
795 return self._blockFromThread(self.smultiengine.gather, key, style,
780 return self._blockFromThread(self.smultiengine.gather, key, dist,
796 781 targets=targets, block=block)
797 782
798 def map(self, func, seq, style='basic', targets=None, block=None):
783 def raw_map(self, func, seq, dist='b', targets=None, block=None):
799 784 """
800 A parallelized version of Python's builtin map
785 A parallelized version of Python's builtin map.
786
787 This has a slightly different syntax than the builtin `map`.
788 This is needed because we need to have keyword arguments and thus
789 can't use *args to capture all the sequences. Instead, they must
790 be passed in a list or tuple.
791
792 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
793
794 Most users will want to use parallel functions or the `mapper`
795 and `map` methods for an API that follows that of the builtin
796 `map`.
801 797 """
802 798 targets, block = self._findTargetsAndBlock(targets, block)
803 return self._blockFromThread(self.smultiengine.map, func, seq,
804 style, targets=targets, block=block)
799 return self._blockFromThread(self.smultiengine.raw_map, func, seq,
800 dist, targets=targets, block=block)
801
802 def map(self, func, *sequences):
803 """
804 A parallel version of Python's builtin `map` function.
805
806 This method applies a function to sequences of arguments. It
807 follows the same syntax as the builtin `map`.
808
809 This method creates a mapper objects by calling `self.mapper` with
810 no arguments and then uses that mapper to do the mapping. See
811 the documentation of `mapper` for more details.
812 """
813 return self.mapper().map(func, *sequences)
814
815 def mapper(self, dist='b', targets='all', block=None):
816 """
817 Create a mapper object that has a `map` method.
818
819 This method returns an object that implements the `IMapper`
820 interface. This method is a factory that is used to control how
821 the map happens.
822
823 :Parameters:
824 dist : str
825 What decomposition to use, 'b' is the only one supported
826 currently
827 targets : str, int, sequence of ints
828 Which engines to use for the map
829 block : boolean
830 Should calls to `map` block or not
831 """
832 return MultiEngineMapper(self, dist, targets, block)
833
834 def parallel(self, dist='b', targets=None, block=None):
835 """
836 A decorator that turns a function into a parallel function.
837
838 This can be used as:
839
840 @parallel()
841 def f(x, y)
842 ...
843
844 f(range(10), range(10))
845
846 This causes f(0,0), f(1,1), ... to be called in parallel.
847
848 :Parameters:
849 dist : str
850 What decomposition to use, 'b' is the only one supported
851 currently
852 targets : str, int, sequence of ints
853 Which engines to use for the map
854 block : boolean
855 Should calls to `map` block or not
856 """
857 targets, block = self._findTargetsAndBlock(targets, block)
858 mapper = self.mapper(dist, targets, block)
859 pf = ParallelFunction(mapper)
860 return pf
805 861
806 862 #---------------------------------------------------------------------------
807 863 # IMultiEngineExtras
808 864 #---------------------------------------------------------------------------
809 865
810 866 def zip_pull(self, keys, targets=None, block=None):
811 867 targets, block = self._findTargetsAndBlock(targets, block)
812 868 return self._blockFromThread(self.smultiengine.zip_pull, keys,
813 869 targets=targets, block=block)
814 870
815 871 def run(self, filename, targets=None, block=None):
816 872 """
817 873 Run a Python code in a file on the engines.
818 874
819 875 :Parameters:
820 876 filename : str
821 877 The name of the local file to run
822 878 targets : id or list of ids
823 879 The engine to use for the execution
824 880 block : boolean
825 881 If False, this method will return the actual result. If False,
826 882 a `PendingResult` is returned which can be used to get the result
827 883 at a later time.
828 884 """
829 885 targets, block = self._findTargetsAndBlock(targets, block)
830 886 return self._blockFromThread(self.smultiengine.run, filename,
831 887 targets=targets, block=block)
832 888
833 889
834 890
835 891 components.registerAdapter(FullBlockingMultiEngineClient,
836 892 IFullSynchronousMultiEngine, IFullBlockingMultiEngineClient)
837 893
838 894
839 895
840 896
@@ -1,668 +1,757 b''
1 1 # encoding: utf-8
2 2
3 3 """
4 4 Expose the multiengine controller over the Foolscap network protocol.
5 5 """
6 6
7 7 __docformat__ = "restructuredtext en"
8 8
9 9 #-------------------------------------------------------------------------------
10 10 # Copyright (C) 2008 The IPython Development Team
11 11 #
12 12 # Distributed under the terms of the BSD License. The full license is in
13 13 # the file COPYING, distributed as part of this software.
14 14 #-------------------------------------------------------------------------------
15 15
16 16 #-------------------------------------------------------------------------------
17 17 # Imports
18 18 #-------------------------------------------------------------------------------
19 19
20 20 import cPickle as pickle
21 21 from types import FunctionType
22 22
23 23 from zope.interface import Interface, implements
24 24 from twisted.internet import defer
25 25 from twisted.python import components, failure, log
26 26
27 27 from foolscap import Referenceable
28 28
29 29 from IPython.kernel import error
30 30 from IPython.kernel.util import printer
31 31 from IPython.kernel import map as Map
32 from IPython.kernel.parallelfunction import ParallelFunction
33 from IPython.kernel.mapper import (
34 MultiEngineMapper,
35 IMultiEngineMapperFactory,
36 IMapper
37 )
32 38 from IPython.kernel.twistedutil import gatherBoth
33 39 from IPython.kernel.multiengine import (MultiEngine,
34 40 IMultiEngine,
35 41 IFullSynchronousMultiEngine,
36 42 ISynchronousMultiEngine)
37 43 from IPython.kernel.multiengineclient import wrapResultList
38 44 from IPython.kernel.pendingdeferred import PendingDeferredManager
39 45 from IPython.kernel.pickleutil import (can, canDict,
40 46 canSequence, uncan, uncanDict, uncanSequence)
41 47
42 48 from IPython.kernel.clientinterfaces import (
43 49 IFCClientInterfaceProvider,
44 50 IBlockingClientAdaptor
45 51 )
46 52
47 53 # Needed to access the true globals from __main__.__dict__
48 54 import __main__
49 55
50 56 #-------------------------------------------------------------------------------
51 57 # The Controller side of things
52 58 #-------------------------------------------------------------------------------
53 59
54 60 def packageResult(wrappedMethod):
55 61
56 62 def wrappedPackageResult(self, *args, **kwargs):
57 63 d = wrappedMethod(self, *args, **kwargs)
58 64 d.addCallback(self.packageSuccess)
59 65 d.addErrback(self.packageFailure)
60 66 return d
61 67 return wrappedPackageResult
62 68
63 69
64 70 class IFCSynchronousMultiEngine(Interface):
65 71 """Foolscap interface to `ISynchronousMultiEngine`.
66 72
67 73 The methods in this interface are similar to those of
68 74 `ISynchronousMultiEngine`, but their arguments and return values are pickled
69 75 if they are not already simple Python types that can be send over XML-RPC.
70 76
71 77 See the documentation of `ISynchronousMultiEngine` and `IMultiEngine` for
72 78 documentation about the methods.
73 79
74 80 Most methods in this interface act like the `ISynchronousMultiEngine`
75 81 versions and can be called in blocking or non-blocking mode.
76 82 """
77 83 pass
78 84
79 85
80 86 class FCSynchronousMultiEngineFromMultiEngine(Referenceable):
81 87 """Adapt `IMultiEngine` -> `ISynchronousMultiEngine` -> `IFCSynchronousMultiEngine`.
82 88 """
83 89
84 90 implements(IFCSynchronousMultiEngine, IFCClientInterfaceProvider)
85 91
86 92 addSlash = True
87 93
88 94 def __init__(self, multiengine):
89 95 # Adapt the raw multiengine to `ISynchronousMultiEngine` before saving
90 96 # it. This allow this class to do two adaptation steps.
91 97 self.smultiengine = ISynchronousMultiEngine(multiengine)
92 98 self._deferredIDCallbacks = {}
93 99
94 100 #---------------------------------------------------------------------------
95 101 # Non interface methods
96 102 #---------------------------------------------------------------------------
97 103
98 104 def packageFailure(self, f):
99 105 f.cleanFailure()
100 106 return self.packageSuccess(f)
101 107
102 108 def packageSuccess(self, obj):
103 109 serial = pickle.dumps(obj, 2)
104 110 return serial
105 111
106 112 #---------------------------------------------------------------------------
107 113 # Things related to PendingDeferredManager
108 114 #---------------------------------------------------------------------------
109 115
110 116 @packageResult
111 117 def remote_get_pending_deferred(self, deferredID, block):
112 118 d = self.smultiengine.get_pending_deferred(deferredID, block)
113 119 try:
114 120 callback = self._deferredIDCallbacks.pop(deferredID)
115 121 except KeyError:
116 122 callback = None
117 123 if callback is not None:
118 124 d.addCallback(callback[0], *callback[1], **callback[2])
119 125 return d
120 126
121 127 @packageResult
122 128 def remote_clear_pending_deferreds(self):
123 129 return defer.maybeDeferred(self.smultiengine.clear_pending_deferreds)
124 130
125 131 def _addDeferredIDCallback(self, did, callback, *args, **kwargs):
126 132 self._deferredIDCallbacks[did] = (callback, args, kwargs)
127 133 return did
128 134
129 135 #---------------------------------------------------------------------------
130 136 # IEngineMultiplexer related methods
131 137 #---------------------------------------------------------------------------
132 138
133 139 @packageResult
134 140 def remote_execute(self, lines, targets, block):
135 141 return self.smultiengine.execute(lines, targets=targets, block=block)
136 142
137 143 @packageResult
138 144 def remote_push(self, binaryNS, targets, block):
139 145 try:
140 146 namespace = pickle.loads(binaryNS)
141 147 except:
142 148 d = defer.fail(failure.Failure())
143 149 else:
144 150 d = self.smultiengine.push(namespace, targets=targets, block=block)
145 151 return d
146 152
147 153 @packageResult
148 154 def remote_pull(self, keys, targets, block):
149 155 d = self.smultiengine.pull(keys, targets=targets, block=block)
150 156 return d
151 157
152 158 @packageResult
153 159 def remote_push_function(self, binaryNS, targets, block):
154 160 try:
155 161 namespace = pickle.loads(binaryNS)
156 162 except:
157 163 d = defer.fail(failure.Failure())
158 164 else:
159 165 namespace = uncanDict(namespace)
160 166 d = self.smultiengine.push_function(namespace, targets=targets, block=block)
161 167 return d
162 168
163 169 def _canMultipleKeys(self, result):
164 170 return [canSequence(r) for r in result]
165 171
166 172 @packageResult
167 173 def remote_pull_function(self, keys, targets, block):
168 174 def can_functions(r, keys):
169 175 if len(keys)==1 or isinstance(keys, str):
170 176 result = canSequence(r)
171 177 elif len(keys)>1:
172 178 result = [canSequence(s) for s in r]
173 179 return result
174 180 d = self.smultiengine.pull_function(keys, targets=targets, block=block)
175 181 if block:
176 182 d.addCallback(can_functions, keys)
177 183 else:
178 184 d.addCallback(lambda did: self._addDeferredIDCallback(did, can_functions, keys))
179 185 return d
180 186
181 187 @packageResult
182 188 def remote_push_serialized(self, binaryNS, targets, block):
183 189 try:
184 190 namespace = pickle.loads(binaryNS)
185 191 except:
186 192 d = defer.fail(failure.Failure())
187 193 else:
188 194 d = self.smultiengine.push_serialized(namespace, targets=targets, block=block)
189 195 return d
190 196
191 197 @packageResult
192 198 def remote_pull_serialized(self, keys, targets, block):
193 199 d = self.smultiengine.pull_serialized(keys, targets=targets, block=block)
194 200 return d
195 201
196 202 @packageResult
197 203 def remote_get_result(self, i, targets, block):
198 204 if i == 'None':
199 205 i = None
200 206 return self.smultiengine.get_result(i, targets=targets, block=block)
201 207
202 208 @packageResult
203 209 def remote_reset(self, targets, block):
204 210 return self.smultiengine.reset(targets=targets, block=block)
205 211
206 212 @packageResult
207 213 def remote_keys(self, targets, block):
208 214 return self.smultiengine.keys(targets=targets, block=block)
209 215
210 216 @packageResult
211 217 def remote_kill(self, controller, targets, block):
212 218 return self.smultiengine.kill(controller, targets=targets, block=block)
213 219
214 220 @packageResult
215 221 def remote_clear_queue(self, targets, block):
216 222 return self.smultiengine.clear_queue(targets=targets, block=block)
217 223
218 224 @packageResult
219 225 def remote_queue_status(self, targets, block):
220 226 return self.smultiengine.queue_status(targets=targets, block=block)
221 227
222 228 @packageResult
223 229 def remote_set_properties(self, binaryNS, targets, block):
224 230 try:
225 231 ns = pickle.loads(binaryNS)
226 232 except:
227 233 d = defer.fail(failure.Failure())
228 234 else:
229 235 d = self.smultiengine.set_properties(ns, targets=targets, block=block)
230 236 return d
231 237
232 238 @packageResult
233 239 def remote_get_properties(self, keys, targets, block):
234 240 if keys=='None':
235 241 keys=None
236 242 return self.smultiengine.get_properties(keys, targets=targets, block=block)
237 243
238 244 @packageResult
239 245 def remote_has_properties(self, keys, targets, block):
240 246 return self.smultiengine.has_properties(keys, targets=targets, block=block)
241 247
242 248 @packageResult
243 249 def remote_del_properties(self, keys, targets, block):
244 250 return self.smultiengine.del_properties(keys, targets=targets, block=block)
245 251
246 252 @packageResult
247 253 def remote_clear_properties(self, targets, block):
248 254 return self.smultiengine.clear_properties(targets=targets, block=block)
249 255
250 256 #---------------------------------------------------------------------------
251 257 # IMultiEngine related methods
252 258 #---------------------------------------------------------------------------
253 259
254 260 def remote_get_ids(self):
255 261 """Get the ids of the registered engines.
256 262
257 263 This method always blocks.
258 264 """
259 265 return self.smultiengine.get_ids()
260 266
261 267 #---------------------------------------------------------------------------
262 268 # IFCClientInterfaceProvider related methods
263 269 #---------------------------------------------------------------------------
264 270
265 271 def remote_get_client_name(self):
266 272 return 'IPython.kernel.multienginefc.FCFullSynchronousMultiEngineClient'
267 273
268 274
269 275 # The __init__ method of `FCMultiEngineFromMultiEngine` first adapts the
270 276 # `IMultiEngine` to `ISynchronousMultiEngine` so this is actually doing a
271 277 # two phase adaptation.
272 278 components.registerAdapter(FCSynchronousMultiEngineFromMultiEngine,
273 279 IMultiEngine, IFCSynchronousMultiEngine)
274 280
275 281
276 282 #-------------------------------------------------------------------------------
277 283 # The Client side of things
278 284 #-------------------------------------------------------------------------------
279 285
280 286
281 287 class FCFullSynchronousMultiEngineClient(object):
282 288
283 implements(IFullSynchronousMultiEngine, IBlockingClientAdaptor)
289 implements(
290 IFullSynchronousMultiEngine,
291 IBlockingClientAdaptor,
292 IMultiEngineMapperFactory,
293 IMapper
294 )
284 295
285 296 def __init__(self, remote_reference):
286 297 self.remote_reference = remote_reference
287 298 self._deferredIDCallbacks = {}
288 299 # This class manages some pending deferreds through this instance. This
289 300 # is required for methods like gather/scatter as it enables us to
290 301 # create our own pending deferreds for composite operations.
291 302 self.pdm = PendingDeferredManager()
292 303
293 304 #---------------------------------------------------------------------------
294 305 # Non interface methods
295 306 #---------------------------------------------------------------------------
296 307
297 308 def unpackage(self, r):
298 309 return pickle.loads(r)
299 310
300 311 #---------------------------------------------------------------------------
301 312 # Things related to PendingDeferredManager
302 313 #---------------------------------------------------------------------------
303 314
304 315 def get_pending_deferred(self, deferredID, block=True):
305 316
306 317 # Because we are managing some pending deferreds locally (through
307 318 # self.pdm) and some remotely (on the controller), we first try the
308 319 # local one and then the remote one.
309 320 if self.pdm.quick_has_id(deferredID):
310 321 d = self.pdm.get_pending_deferred(deferredID, block)
311 322 return d
312 323 else:
313 324 d = self.remote_reference.callRemote('get_pending_deferred', deferredID, block)
314 325 d.addCallback(self.unpackage)
315 326 try:
316 327 callback = self._deferredIDCallbacks.pop(deferredID)
317 328 except KeyError:
318 329 callback = None
319 330 if callback is not None:
320 331 d.addCallback(callback[0], *callback[1], **callback[2])
321 332 return d
322 333
323 334 def clear_pending_deferreds(self):
324 335
325 336 # This clear both the local (self.pdm) and remote pending deferreds
326 337 self.pdm.clear_pending_deferreds()
327 338 d2 = self.remote_reference.callRemote('clear_pending_deferreds')
328 339 d2.addCallback(self.unpackage)
329 340 return d2
330 341
331 342 def _addDeferredIDCallback(self, did, callback, *args, **kwargs):
332 343 self._deferredIDCallbacks[did] = (callback, args, kwargs)
333 344 return did
334 345
335 346 #---------------------------------------------------------------------------
336 347 # IEngineMultiplexer related methods
337 348 #---------------------------------------------------------------------------
338 349
339 350 def execute(self, lines, targets='all', block=True):
340 351 d = self.remote_reference.callRemote('execute', lines, targets, block)
341 352 d.addCallback(self.unpackage)
342 353 return d
343 354
344 355 def push(self, namespace, targets='all', block=True):
345 356 serial = pickle.dumps(namespace, 2)
346 357 d = self.remote_reference.callRemote('push', serial, targets, block)
347 358 d.addCallback(self.unpackage)
348 359 return d
349 360
350 361 def pull(self, keys, targets='all', block=True):
351 362 d = self.remote_reference.callRemote('pull', keys, targets, block)
352 363 d.addCallback(self.unpackage)
353 364 return d
354 365
355 366 def push_function(self, namespace, targets='all', block=True):
356 367 cannedNamespace = canDict(namespace)
357 368 serial = pickle.dumps(cannedNamespace, 2)
358 369 d = self.remote_reference.callRemote('push_function', serial, targets, block)
359 370 d.addCallback(self.unpackage)
360 371 return d
361 372
362 373 def pull_function(self, keys, targets='all', block=True):
363 374 def uncan_functions(r, keys):
364 375 if len(keys)==1 or isinstance(keys, str):
365 376 return uncanSequence(r)
366 377 elif len(keys)>1:
367 378 return [uncanSequence(s) for s in r]
368 379 d = self.remote_reference.callRemote('pull_function', keys, targets, block)
369 380 if block:
370 381 d.addCallback(self.unpackage)
371 382 d.addCallback(uncan_functions, keys)
372 383 else:
373 384 d.addCallback(self.unpackage)
374 385 d.addCallback(lambda did: self._addDeferredIDCallback(did, uncan_functions, keys))
375 386 return d
376 387
377 388 def push_serialized(self, namespace, targets='all', block=True):
378 389 cannedNamespace = canDict(namespace)
379 390 serial = pickle.dumps(cannedNamespace, 2)
380 391 d = self.remote_reference.callRemote('push_serialized', serial, targets, block)
381 392 d.addCallback(self.unpackage)
382 393 return d
383 394
384 395 def pull_serialized(self, keys, targets='all', block=True):
385 396 d = self.remote_reference.callRemote('pull_serialized', keys, targets, block)
386 397 d.addCallback(self.unpackage)
387 398 return d
388 399
389 400 def get_result(self, i=None, targets='all', block=True):
390 401 if i is None: # This is because None cannot be marshalled by xml-rpc
391 402 i = 'None'
392 403 d = self.remote_reference.callRemote('get_result', i, targets, block)
393 404 d.addCallback(self.unpackage)
394 405 return d
395 406
396 407 def reset(self, targets='all', block=True):
397 408 d = self.remote_reference.callRemote('reset', targets, block)
398 409 d.addCallback(self.unpackage)
399 410 return d
400 411
401 412 def keys(self, targets='all', block=True):
402 413 d = self.remote_reference.callRemote('keys', targets, block)
403 414 d.addCallback(self.unpackage)
404 415 return d
405 416
406 417 def kill(self, controller=False, targets='all', block=True):
407 418 d = self.remote_reference.callRemote('kill', controller, targets, block)
408 419 d.addCallback(self.unpackage)
409 420 return d
410 421
411 422 def clear_queue(self, targets='all', block=True):
412 423 d = self.remote_reference.callRemote('clear_queue', targets, block)
413 424 d.addCallback(self.unpackage)
414 425 return d
415 426
416 427 def queue_status(self, targets='all', block=True):
417 428 d = self.remote_reference.callRemote('queue_status', targets, block)
418 429 d.addCallback(self.unpackage)
419 430 return d
420 431
421 432 def set_properties(self, properties, targets='all', block=True):
422 433 serial = pickle.dumps(properties, 2)
423 434 d = self.remote_reference.callRemote('set_properties', serial, targets, block)
424 435 d.addCallback(self.unpackage)
425 436 return d
426 437
427 438 def get_properties(self, keys=None, targets='all', block=True):
428 439 if keys==None:
429 440 keys='None'
430 441 d = self.remote_reference.callRemote('get_properties', keys, targets, block)
431 442 d.addCallback(self.unpackage)
432 443 return d
433 444
434 445 def has_properties(self, keys, targets='all', block=True):
435 446 d = self.remote_reference.callRemote('has_properties', keys, targets, block)
436 447 d.addCallback(self.unpackage)
437 448 return d
438 449
439 450 def del_properties(self, keys, targets='all', block=True):
440 451 d = self.remote_reference.callRemote('del_properties', keys, targets, block)
441 452 d.addCallback(self.unpackage)
442 453 return d
443 454
444 455 def clear_properties(self, targets='all', block=True):
445 456 d = self.remote_reference.callRemote('clear_properties', targets, block)
446 457 d.addCallback(self.unpackage)
447 458 return d
448 459
449 460 #---------------------------------------------------------------------------
450 461 # IMultiEngine related methods
451 462 #---------------------------------------------------------------------------
452 463
453 464 def get_ids(self):
454 465 d = self.remote_reference.callRemote('get_ids')
455 466 return d
456 467
457 468 #---------------------------------------------------------------------------
458 469 # ISynchronousMultiEngineCoordinator related methods
459 470 #---------------------------------------------------------------------------
460 471
461 472 def _process_targets(self, targets):
462 473 def create_targets(ids):
463 474 if isinstance(targets, int):
464 475 engines = [targets]
465 476 elif targets=='all':
466 477 engines = ids
467 478 elif isinstance(targets, (list, tuple)):
468 479 engines = targets
469 480 for t in engines:
470 481 if not t in ids:
471 482 raise error.InvalidEngineID("engine with id %r does not exist"%t)
472 483 return engines
473 484
474 485 d = self.get_ids()
475 486 d.addCallback(create_targets)
476 487 return d
477 488
478 def scatter(self, key, seq, style='basic', flatten=False, targets='all', block=True):
489 def scatter(self, key, seq, dist='b', flatten=False, targets='all', block=True):
479 490
480 491 # Note: scatter and gather handle pending deferreds locally through self.pdm.
481 492 # This enables us to collect a bunch fo deferred ids and make a secondary
482 493 # deferred id that corresponds to the entire group. This logic is extremely
483 494 # difficult to get right though.
484 495 def do_scatter(engines):
485 496 nEngines = len(engines)
486 mapClass = Map.styles[style]
497 mapClass = Map.dists[dist]
487 498 mapObject = mapClass()
488 499 d_list = []
489 500 # Loop through and push to each engine in non-blocking mode.
490 501 # This returns a set of deferreds to deferred_ids
491 502 for index, engineid in enumerate(engines):
492 503 partition = mapObject.getPartition(seq, index, nEngines)
493 504 if flatten and len(partition) == 1:
494 505 d = self.push({key: partition[0]}, targets=engineid, block=False)
495 506 else:
496 507 d = self.push({key: partition}, targets=engineid, block=False)
497 508 d_list.append(d)
498 509 # Collect the deferred to deferred_ids
499 510 d = gatherBoth(d_list,
500 511 fireOnOneErrback=0,
501 512 consumeErrors=1,
502 513 logErrors=0)
503 514 # Now d has a list of deferred_ids or Failures coming
504 515 d.addCallback(error.collect_exceptions, 'scatter')
505 516 def process_did_list(did_list):
506 517 """Turn a list of deferred_ids into a final result or failure."""
507 518 new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
508 519 final_d = gatherBoth(new_d_list,
509 520 fireOnOneErrback=0,
510 521 consumeErrors=1,
511 522 logErrors=0)
512 523 final_d.addCallback(error.collect_exceptions, 'scatter')
513 524 final_d.addCallback(lambda lop: [i[0] for i in lop])
514 525 return final_d
515 526 # Now, depending on block, we need to handle the list deferred_ids
516 527 # coming down the pipe diferently.
517 528 if block:
518 529 # If we are blocking register a callback that will transform the
519 530 # list of deferred_ids into the final result.
520 531 d.addCallback(process_did_list)
521 532 return d
522 533 else:
523 534 # Here we are going to use a _local_ PendingDeferredManager.
524 535 deferred_id = self.pdm.get_deferred_id()
525 536 # This is the deferred we will return to the user that will fire
526 537 # with the local deferred_id AFTER we have received the list of
527 538 # primary deferred_ids
528 539 d_to_return = defer.Deferred()
529 540 def do_it(did_list):
530 541 """Produce a deferred to the final result, but first fire the
531 542 deferred we will return to the user that has the local
532 543 deferred id."""
533 544 d_to_return.callback(deferred_id)
534 545 return process_did_list(did_list)
535 546 d.addCallback(do_it)
536 547 # Now save the deferred to the final result
537 548 self.pdm.save_pending_deferred(d, deferred_id)
538 549 return d_to_return
539 550
540 551 d = self._process_targets(targets)
541 552 d.addCallback(do_scatter)
542 553 return d
543 554
544 def gather(self, key, style='basic', targets='all', block=True):
555 def gather(self, key, dist='b', targets='all', block=True):
545 556
546 557 # Note: scatter and gather handle pending deferreds locally through self.pdm.
547 558 # This enables us to collect a bunch fo deferred ids and make a secondary
548 559 # deferred id that corresponds to the entire group. This logic is extremely
549 560 # difficult to get right though.
550 561 def do_gather(engines):
551 562 nEngines = len(engines)
552 mapClass = Map.styles[style]
563 mapClass = Map.dists[dist]
553 564 mapObject = mapClass()
554 565 d_list = []
555 566 # Loop through and push to each engine in non-blocking mode.
556 567 # This returns a set of deferreds to deferred_ids
557 568 for index, engineid in enumerate(engines):
558 569 d = self.pull(key, targets=engineid, block=False)
559 570 d_list.append(d)
560 571 # Collect the deferred to deferred_ids
561 572 d = gatherBoth(d_list,
562 573 fireOnOneErrback=0,
563 574 consumeErrors=1,
564 575 logErrors=0)
565 576 # Now d has a list of deferred_ids or Failures coming
566 577 d.addCallback(error.collect_exceptions, 'scatter')
567 578 def process_did_list(did_list):
568 579 """Turn a list of deferred_ids into a final result or failure."""
569 580 new_d_list = [self.get_pending_deferred(did, True) for did in did_list]
570 581 final_d = gatherBoth(new_d_list,
571 582 fireOnOneErrback=0,
572 583 consumeErrors=1,
573 584 logErrors=0)
574 585 final_d.addCallback(error.collect_exceptions, 'gather')
575 586 final_d.addCallback(lambda lop: [i[0] for i in lop])
576 587 final_d.addCallback(mapObject.joinPartitions)
577 588 return final_d
578 589 # Now, depending on block, we need to handle the list deferred_ids
579 590 # coming down the pipe diferently.
580 591 if block:
581 592 # If we are blocking register a callback that will transform the
582 593 # list of deferred_ids into the final result.
583 594 d.addCallback(process_did_list)
584 595 return d
585 596 else:
586 597 # Here we are going to use a _local_ PendingDeferredManager.
587 598 deferred_id = self.pdm.get_deferred_id()
588 599 # This is the deferred we will return to the user that will fire
589 600 # with the local deferred_id AFTER we have received the list of
590 601 # primary deferred_ids
591 602 d_to_return = defer.Deferred()
592 603 def do_it(did_list):
593 604 """Produce a deferred to the final result, but first fire the
594 605 deferred we will return to the user that has the local
595 606 deferred id."""
596 607 d_to_return.callback(deferred_id)
597 608 return process_did_list(did_list)
598 609 d.addCallback(do_it)
599 610 # Now save the deferred to the final result
600 611 self.pdm.save_pending_deferred(d, deferred_id)
601 612 return d_to_return
602 613
603 614 d = self._process_targets(targets)
604 615 d.addCallback(do_gather)
605 616 return d
606 617
607 def map(self, func, seq, style='basic', targets='all', block=True):
608 d_list = []
618 def raw_map(self, func, sequences, dist='b', targets='all', block=True):
619 """
620 A parallelized version of Python's builtin map.
621
622 This has a slightly different syntax than the builtin `map`.
623 This is needed because we need to have keyword arguments and thus
624 can't use *args to capture all the sequences. Instead, they must
625 be passed in a list or tuple.
626
627 raw_map(func, seqs) -> map(func, seqs[0], seqs[1], ...)
628
629 Most users will want to use parallel functions or the `mapper`
630 and `map` methods for an API that follows that of the builtin
631 `map`.
632 """
633 if not isinstance(sequences, (list, tuple)):
634 raise TypeError('sequences must be a list or tuple')
635 max_len = max(len(s) for s in sequences)
636 for s in sequences:
637 if len(s)!=max_len:
638 raise ValueError('all sequences must have equal length')
609 639 if isinstance(func, FunctionType):
610 640 d = self.push_function(dict(_ipython_map_func=func), targets=targets, block=False)
611 641 d.addCallback(lambda did: self.get_pending_deferred(did, True))
612 sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, _ipython_map_seq)'
642 sourceToRun = '_ipython_map_seq_result = map(_ipython_map_func, *zip(*_ipython_map_seq))'
613 643 elif isinstance(func, str):
614 644 d = defer.succeed(None)
615 645 sourceToRun = \
616 '_ipython_map_seq_result = map(%s, _ipython_map_seq)' % func
646 '_ipython_map_seq_result = map(%s, *zip(*_ipython_map_seq))' % func
617 647 else:
618 648 raise TypeError("func must be a function or str")
619 649
620 d.addCallback(lambda _: self.scatter('_ipython_map_seq', seq, style, targets=targets))
650 d.addCallback(lambda _: self.scatter('_ipython_map_seq', zip(*sequences), dist, targets=targets))
621 651 d.addCallback(lambda _: self.execute(sourceToRun, targets=targets, block=False))
622 652 d.addCallback(lambda did: self.get_pending_deferred(did, True))
623 d.addCallback(lambda _: self.gather('_ipython_map_seq_result', style, targets=targets, block=block))
653 d.addCallback(lambda _: self.gather('_ipython_map_seq_result', dist, targets=targets, block=block))
624 654 return d
625 655
656 def map(self, func, *sequences):
657 """
658 A parallel version of Python's builtin `map` function.
659
660 This method applies a function to sequences of arguments. It
661 follows the same syntax as the builtin `map`.
662
663 This method creates a mapper objects by calling `self.mapper` with
664 no arguments and then uses that mapper to do the mapping. See
665 the documentation of `mapper` for more details.
666 """
667 return self.mapper().map(func, *sequences)
668
669 def mapper(self, dist='b', targets='all', block=True):
670 """
671 Create a mapper object that has a `map` method.
672
673 This method returns an object that implements the `IMapper`
674 interface. This method is a factory that is used to control how
675 the map happens.
676
677 :Parameters:
678 dist : str
679 What decomposition to use, 'b' is the only one supported
680 currently
681 targets : str, int, sequence of ints
682 Which engines to use for the map
683 block : boolean
684 Should calls to `map` block or not
685 """
686 return MultiEngineMapper(self, dist, targets, block)
687
688 def parallel(self, dist='b', targets='all', block=True):
689 """
690 A decorator that turns a function into a parallel function.
691
692 This can be used as:
693
694 @parallel()
695 def f(x, y)
696 ...
697
698 f(range(10), range(10))
699
700 This causes f(0,0), f(1,1), ... to be called in parallel.
701
702 :Parameters:
703 dist : str
704 What decomposition to use, 'b' is the only one supported
705 currently
706 targets : str, int, sequence of ints
707 Which engines to use for the map
708 block : boolean
709 Should calls to `map` block or not
710 """
711 mapper = self.mapper(dist, targets, block)
712 pf = ParallelFunction(mapper)
713 return pf
714
626 715 #---------------------------------------------------------------------------
627 716 # ISynchronousMultiEngineExtras related methods
628 717 #---------------------------------------------------------------------------
629 718
630 719 def _transformPullResult(self, pushResult, multitargets, lenKeys):
631 720 if not multitargets:
632 721 result = pushResult[0]
633 722 elif lenKeys > 1:
634 723 result = zip(*pushResult)
635 724 elif lenKeys is 1:
636 725 result = list(pushResult)
637 726 return result
638 727
639 728 def zip_pull(self, keys, targets='all', block=True):
640 729 multitargets = not isinstance(targets, int) and len(targets) > 1
641 730 lenKeys = len(keys)
642 731 d = self.pull(keys, targets=targets, block=block)
643 732 if block:
644 733 d.addCallback(self._transformPullResult, multitargets, lenKeys)
645 734 else:
646 735 d.addCallback(lambda did: self._addDeferredIDCallback(did, self._transformPullResult, multitargets, lenKeys))
647 736 return d
648 737
649 738 def run(self, fname, targets='all', block=True):
650 739 fileobj = open(fname,'r')
651 740 source = fileobj.read()
652 741 fileobj.close()
653 742 # if the compilation blows, we get a local error right away
654 743 try:
655 744 code = compile(source,fname,'exec')
656 745 except:
657 746 return defer.fail(failure.Failure())
658 747 # Now run the code
659 748 d = self.execute(source, targets=targets, block=block)
660 749 return d
661 750
662 751 #---------------------------------------------------------------------------
663 752 # IBlockingClientAdaptor related methods
664 753 #---------------------------------------------------------------------------
665 754
666 755 def adapt_to_blocking_client(self):
667 756 from IPython.kernel.multiengineclient import IFullBlockingMultiEngineClient
668 757 return IFullBlockingMultiEngineClient(self)
@@ -1,32 +1,107 b''
1 1 # encoding: utf-8
2 2
3 3 """A parallelized function that does scatter/execute/gather."""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 from types import FunctionType
19 from zope.interface import Interface, implements
19 20
20 class ParallelFunction:
21 """A function that operates in parallel on sequences."""
22 def __init__(self, func, multiengine, targets, block):
23 """Create a `ParallelFunction`.
21
22 class IMultiEngineParallelDecorator(Interface):
23 """A decorator that creates a parallel function."""
24
25 def parallel(dist='b', targets=None, block=None):
26 """
27 A decorator that turns a function into a parallel function.
28
29 This can be used as:
30
31 @parallel()
32 def f(x, y)
33 ...
34
35 f(range(10), range(10))
36
37 This causes f(0,0), f(1,1), ... to be called in parallel.
38
39 :Parameters:
40 dist : str
41 What decomposition to use, 'b' is the only one supported
42 currently
43 targets : str, int, sequence of ints
44 Which engines to use for the map
45 block : boolean
46 Should calls to `map` block or not
47 """
48
49 class ITaskParallelDecorator(Interface):
50 """A decorator that creates a parallel function."""
51
52 def parallel(clear_before=False, clear_after=False, retries=0,
53 recovery_task=None, depend=None, block=True):
54 """
55 A decorator that turns a function into a parallel function.
56
57 This can be used as:
58
59 @parallel()
60 def f(x, y)
61 ...
62
63 f(range(10), range(10))
64
65 This causes f(0,0), f(1,1), ... to be called in parallel.
66
67 See the documentation for `IPython.kernel.task.BaseTask` for
68 documentation on the arguments to this method.
69 """
70
71 class IParallelFunction(Interface):
72 pass
73
74 class ParallelFunction(object):
75 """
76 The implementation of a parallel function.
77
78 A parallel function is similar to Python's map function:
79
80 map(func, *sequences) -> pfunc(*sequences)
81
82 Parallel functions should be created by using the @parallel decorator.
83 """
84
85 implements(IParallelFunction)
86
87 def __init__(self, mapper):
88 """
89 Create a parallel function from an `IMapper`.
90
91 :Parameters:
92 mapper : an `IMapper` implementer.
93 The mapper to use for the parallel function
94 """
95 self.mapper = mapper
96
97 def __call__(self, func):
98 """
99 Decorate a function to make it run in parallel.
24 100 """
25 101 assert isinstance(func, (str, FunctionType)), "func must be a fuction or str"
26 102 self.func = func
27 self.multiengine = multiengine
28 self.targets = targets
29 self.block = block
30
31 def __call__(self, sequence):
32 return self.multiengine.map(self.func, sequence, targets=self.targets, block=self.block) No newline at end of file
103 def call_function(*sequences):
104 return self.mapper.map(self.func, *sequences)
105 return call_function
106
107 No newline at end of file
@@ -1,323 +1,324 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 """Start an IPython cluster conveniently, either locally or remotely.
5 5
6 6 Basic usage
7 7 -----------
8 8
9 9 For local operation, the simplest mode of usage is:
10 10
11 11 %prog -n N
12 12
13 13 where N is the number of engines you want started.
14 14
15 15 For remote operation, you must call it with a cluster description file:
16 16
17 17 %prog -f clusterfile.py
18 18
19 19 The cluster file is a normal Python script which gets run via execfile(). You
20 20 can have arbitrary logic in it, but all that matters is that at the end of the
21 21 execution, it declares the variables 'controller', 'engines', and optionally
22 22 'sshx'. See the accompanying examples for details on what these variables must
23 23 contain.
24 24
25 25
26 26 Notes
27 27 -----
28 28
29 29 WARNING: this code is still UNFINISHED and EXPERIMENTAL! It is incomplete,
30 30 some listed options are not really implemented, and all of its interfaces are
31 31 subject to change.
32 32
33 33 When operating over SSH for a remote cluster, this program relies on the
34 34 existence of a particular script called 'sshx'. This script must live in the
35 35 target systems where you'll be running your controller and engines, and is
36 36 needed to configure your PATH and PYTHONPATH variables for further execution of
37 37 python code at the other end of an SSH connection. The script can be as simple
38 38 as:
39 39
40 40 #!/bin/sh
41 41 . $HOME/.bashrc
42 42 "$@"
43 43
44 44 which is the default one provided by IPython. You can modify this or provide
45 45 your own. Since it's quite likely that for different clusters you may need
46 46 this script to configure things differently or that it may live in different
47 47 locations, its full path can be set in the same file where you define the
48 48 cluster setup. IPython's order of evaluation for this variable is the
49 49 following:
50 50
51 51 a) Internal default: 'sshx'. This only works if it is in the default system
52 52 path which SSH sets up in non-interactive mode.
53 53
54 54 b) Environment variable: if $IPYTHON_SSHX is defined, this overrides the
55 55 internal default.
56 56
57 57 c) Variable 'sshx' in the cluster configuration file: finally, this will
58 58 override the previous two values.
59 59
60 60 This code is Unix-only, with precious little hope of any of this ever working
61 61 under Windows, since we need SSH from the ground up, we background processes,
62 62 etc. Ports of this functionality to Windows are welcome.
63 63
64 64
65 65 Call summary
66 66 ------------
67 67
68 68 %prog [options]
69 69 """
70 70
71 71 __docformat__ = "restructuredtext en"
72 72
73 73 #-------------------------------------------------------------------------------
74 74 # Copyright (C) 2008 The IPython Development Team
75 75 #
76 76 # Distributed under the terms of the BSD License. The full license is in
77 77 # the file COPYING, distributed as part of this software.
78 78 #-------------------------------------------------------------------------------
79 79
80 80 #-------------------------------------------------------------------------------
81 81 # Stdlib imports
82 82 #-------------------------------------------------------------------------------
83 83
84 84 import os
85 85 import signal
86 86 import sys
87 87 import time
88 88
89 89 from optparse import OptionParser
90 90 from subprocess import Popen,call
91 91
92 92 #---------------------------------------------------------------------------
93 93 # IPython imports
94 94 #---------------------------------------------------------------------------
95 95 from IPython.tools import utils
96 96 from IPython.config import cutils
97 97
98 98 #---------------------------------------------------------------------------
99 99 # Normal code begins
100 100 #---------------------------------------------------------------------------
101 101
102 102 def parse_args():
103 103 """Parse command line and return opts,args."""
104 104
105 105 parser = OptionParser(usage=__doc__)
106 106 newopt = parser.add_option # shorthand
107 107
108 108 newopt("--controller-port", type="int", dest="controllerport",
109 109 help="the TCP port the controller is listening on")
110 110
111 111 newopt("--controller-ip", type="string", dest="controllerip",
112 112 help="the TCP ip address of the controller")
113 113
114 114 newopt("-n", "--num", type="int", dest="n",default=2,
115 115 help="the number of engines to start")
116 116
117 117 newopt("--engine-port", type="int", dest="engineport",
118 118 help="the TCP port the controller will listen on for engine "
119 119 "connections")
120 120
121 121 newopt("--engine-ip", type="string", dest="engineip",
122 122 help="the TCP ip address the controller will listen on "
123 123 "for engine connections")
124 124
125 125 newopt("--mpi", type="string", dest="mpi",
126 126 help="use mpi with package: for instance --mpi=mpi4py")
127 127
128 128 newopt("-l", "--logfile", type="string", dest="logfile",
129 129 help="log file name")
130 130
131 131 newopt('-f','--cluster-file',dest='clusterfile',
132 132 help='file describing a remote cluster')
133 133
134 134 return parser.parse_args()
135 135
136 136 def numAlive(controller,engines):
137 137 """Return the number of processes still alive."""
138 138 retcodes = [controller.poll()] + \
139 139 [e.poll() for e in engines]
140 140 return retcodes.count(None)
141 141
142 142 stop = lambda pid: os.kill(pid,signal.SIGINT)
143 143 kill = lambda pid: os.kill(pid,signal.SIGTERM)
144 144
145 145 def cleanup(clean,controller,engines):
146 146 """Stop the controller and engines with the given cleanup method."""
147 147
148 148 for e in engines:
149 149 if e.poll() is None:
150 150 print 'Stopping engine, pid',e.pid
151 151 clean(e.pid)
152 152 if controller.poll() is None:
153 153 print 'Stopping controller, pid',controller.pid
154 154 clean(controller.pid)
155 155
156 156
157 157 def ensureDir(path):
158 158 """Ensure a directory exists or raise an exception."""
159 159 if not os.path.isdir(path):
160 160 os.makedirs(path)
161 161
162 162
163 163 def startMsg(control_host,control_port=10105):
164 164 """Print a startup message"""
165 165 print
166 166 print 'Your cluster is up and running.'
167 167 print
168 168 print 'For interactive use, you can make a MultiEngineClient with:'
169 169 print
170 170 print 'from IPython.kernel import client'
171 print "mec = client.MultiEngineClient((%r,%s))" % \
172 (control_host,control_port)
171 print "mec = client.MultiEngineClient()"
173 172 print
174 173 print 'You can then cleanly stop the cluster from IPython using:'
175 174 print
176 175 print 'mec.kill(controller=True)'
177 176 print
178 177
179 178
180 179 def clusterLocal(opt,arg):
181 180 """Start a cluster on the local machine."""
182 181
183 182 # Store all logs inside the ipython directory
184 183 ipdir = cutils.get_ipython_dir()
185 184 pjoin = os.path.join
186 185
187 186 logfile = opt.logfile
188 187 if logfile is None:
189 188 logdir_base = pjoin(ipdir,'log')
190 189 ensureDir(logdir_base)
191 190 logfile = pjoin(logdir_base,'ipcluster-')
192 191
193 192 print 'Starting controller:',
194 controller = Popen(['ipcontroller','--logfile',logfile])
193 controller = Popen(['ipcontroller','--logfile',logfile,'-x','-y'])
195 194 print 'Controller PID:',controller.pid
196 195
197 196 print 'Starting engines: ',
198 time.sleep(3)
197 time.sleep(5)
199 198
200 199 englogfile = '%s%s-' % (logfile,controller.pid)
201 200 mpi = opt.mpi
202 201 if mpi: # start with mpi - killing the engines with sigterm will not work if you do this
203 engines = [Popen(['mpirun', '-np', str(opt.n), 'ipengine', '--mpi', mpi, '--logfile',englogfile])]
202 engines = [Popen(['mpirun', '-np', str(opt.n), 'ipengine', '--mpi',
203 mpi, '--logfile',englogfile])]
204 # engines = [Popen(['mpirun', '-np', str(opt.n), 'ipengine', '--mpi', mpi])]
204 205 else: # do what we would normally do
205 206 engines = [ Popen(['ipengine','--logfile',englogfile])
206 207 for i in range(opt.n) ]
207 208 eids = [e.pid for e in engines]
208 209 print 'Engines PIDs: ',eids
209 210 print 'Log files: %s*' % englogfile
210 211
211 212 proc_ids = eids + [controller.pid]
212 213 procs = engines + [controller]
213 214
214 215 grpid = os.getpgrp()
215 216 try:
216 217 startMsg('127.0.0.1')
217 218 print 'You can also hit Ctrl-C to stop it, or use from the cmd line:'
218 219 print
219 220 print 'kill -INT',grpid
220 221 print
221 222 try:
222 223 while True:
223 224 time.sleep(5)
224 225 except:
225 226 pass
226 227 finally:
227 228 print 'Stopping cluster. Cleaning up...'
228 229 cleanup(stop,controller,engines)
229 230 for i in range(4):
230 231 time.sleep(i+2)
231 232 nZombies = numAlive(controller,engines)
232 233 if nZombies== 0:
233 234 print 'OK: All processes cleaned up.'
234 235 break
235 236 print 'Trying again, %d processes did not stop...' % nZombies
236 237 cleanup(kill,controller,engines)
237 238 if numAlive(controller,engines) == 0:
238 239 print 'OK: All processes cleaned up.'
239 240 break
240 241 else:
241 242 print '*'*75
242 243 print 'ERROR: could not kill some processes, try to do it',
243 244 print 'manually.'
244 245 zombies = []
245 246 if controller.returncode is None:
246 247 print 'Controller is alive: pid =',controller.pid
247 248 zombies.append(controller.pid)
248 249 liveEngines = [ e for e in engines if e.returncode is None ]
249 250 for e in liveEngines:
250 251 print 'Engine is alive: pid =',e.pid
251 252 zombies.append(e.pid)
252 253 print
253 254 print 'Zombie summary:',' '.join(map(str,zombies))
254 255
255 256 def clusterRemote(opt,arg):
256 257 """Start a remote cluster over SSH"""
257 258
258 259 # Load the remote cluster configuration
259 260 clConfig = {}
260 261 execfile(opt.clusterfile,clConfig)
261 262 contConfig = clConfig['controller']
262 263 engConfig = clConfig['engines']
263 264 # Determine where to find sshx:
264 265 sshx = clConfig.get('sshx',os.environ.get('IPYTHON_SSHX','sshx'))
265 266
266 267 # Store all logs inside the ipython directory
267 268 ipdir = cutils.get_ipython_dir()
268 269 pjoin = os.path.join
269 270
270 271 logfile = opt.logfile
271 272 if logfile is None:
272 273 logdir_base = pjoin(ipdir,'log')
273 274 ensureDir(logdir_base)
274 275 logfile = pjoin(logdir_base,'ipcluster')
275 276
276 277 # Append this script's PID to the logfile name always
277 278 logfile = '%s-%s' % (logfile,os.getpid())
278 279
279 280 print 'Starting controller:'
280 281 # Controller data:
281 282 xsys = os.system
282 283
283 284 contHost = contConfig['host']
284 285 contLog = '%s-con-%s-' % (logfile,contHost)
285 286 cmd = "ssh %s '%s' 'ipcontroller --logfile %s' &" % \
286 287 (contHost,sshx,contLog)
287 288 #print 'cmd:<%s>' % cmd # dbg
288 289 xsys(cmd)
289 290 time.sleep(2)
290 291
291 292 print 'Starting engines: '
292 293 for engineHost,engineData in engConfig.iteritems():
293 294 if isinstance(engineData,int):
294 295 numEngines = engineData
295 296 else:
296 297 raise NotImplementedError('port configuration not finished for engines')
297 298
298 299 print 'Sarting %d engines on %s' % (numEngines,engineHost)
299 300 engLog = '%s-eng-%s-' % (logfile,engineHost)
300 301 for i in range(numEngines):
301 302 cmd = "ssh %s '%s' 'ipengine --controller-ip %s --logfile %s' &" % \
302 303 (engineHost,sshx,contHost,engLog)
303 304 #print 'cmd:<%s>' % cmd # dbg
304 305 xsys(cmd)
305 306 # Wait after each host a little bit
306 307 time.sleep(1)
307 308
308 309 startMsg(contConfig['host'])
309 310
310 311 def main():
311 312 """Main driver for the two big options: local or remote cluster."""
312 313
313 314 opt,arg = parse_args()
314 315
315 316 clusterfile = opt.clusterfile
316 317 if clusterfile:
317 318 clusterRemote(opt,arg)
318 319 else:
319 320 clusterLocal(opt,arg)
320 321
321 322
322 323 if __name__=='__main__':
323 324 main()
@@ -1,169 +1,171 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3
4 4 """Start the IPython Engine."""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Imports
17 17 #-------------------------------------------------------------------------------
18 18
19 19 # Python looks for an empty string at the beginning of sys.path to enable
20 20 # importing from the cwd.
21 21 import sys
22 22 sys.path.insert(0, '')
23 23
24 24 import sys, os
25 25 from optparse import OptionParser
26 26
27 27 from twisted.application import service
28 28 from twisted.internet import reactor
29 29 from twisted.python import log
30 30
31 31 from IPython.kernel.fcutil import Tub, UnauthenticatedTub
32 32
33 33 from IPython.kernel.core.config import config_manager as core_config_manager
34 34 from IPython.config.cutils import import_item
35 35 from IPython.kernel.engineservice import EngineService
36 36 from IPython.kernel.config import config_manager as kernel_config_manager
37 37 from IPython.kernel.engineconnector import EngineConnector
38 38
39 39
40 40 #-------------------------------------------------------------------------------
41 41 # Code
42 42 #-------------------------------------------------------------------------------
43 43
44 44 def start_engine():
45 45 """
46 46 Start the engine, by creating it and starting the Twisted reactor.
47 47
48 48 This method does:
49 49
50 50 * If it exists, runs the `mpi_import_statement` to call `MPI_Init`
51 51 * Starts the engine logging
52 52 * Creates an IPython shell and wraps it in an `EngineService`
53 53 * Creates a `foolscap.Tub` to use in connecting to a controller.
54 54 * Uses the tub and the `EngineService` along with a Foolscap URL
55 55 (or FURL) to connect to the controller and register the engine
56 56 with the controller
57 57 """
58 58 kernel_config = kernel_config_manager.get_config_obj()
59 59 core_config = core_config_manager.get_config_obj()
60 60
61
61 62 # Execute the mpi import statement that needs to call MPI_Init
63 global mpi
62 64 mpikey = kernel_config['mpi']['default']
63 65 mpi_import_statement = kernel_config['mpi'].get(mpikey, None)
64 66 if mpi_import_statement is not None:
65 67 try:
66 exec mpi_import_statement in locals(), globals()
68 exec mpi_import_statement in globals()
67 69 except:
68 70 mpi = None
69 71 else:
70 72 mpi = None
71 73
72 74 # Start logging
73 75 logfile = kernel_config['engine']['logfile']
74 76 if logfile:
75 77 logfile = logfile + str(os.getpid()) + '.log'
76 78 try:
77 79 openLogFile = open(logfile, 'w')
78 80 except:
79 81 openLogFile = sys.stdout
80 82 else:
81 83 openLogFile = sys.stdout
82 84 log.startLogging(openLogFile)
83 85
84 86 # Create the underlying shell class and EngineService
85 87 shell_class = import_item(core_config['shell']['shell_class'])
86 88 engine_service = EngineService(shell_class, mpi=mpi)
87 89 shell_import_statement = core_config['shell']['import_statement']
88 90 if shell_import_statement:
89 91 try:
90 92 engine_service.execute(shell_import_statement)
91 93 except:
92 94 log.msg("Error running import_statement: %s" % sis)
93 95
94 96 # Create the service hierarchy
95 97 main_service = service.MultiService()
96 98 engine_service.setServiceParent(main_service)
97 99 tub_service = Tub()
98 100 tub_service.setServiceParent(main_service)
99 101 # This needs to be called before the connection is initiated
100 102 main_service.startService()
101 103
102 104 # This initiates the connection to the controller and calls
103 105 # register_engine to tell the controller we are ready to do work
104 106 engine_connector = EngineConnector(tub_service)
105 107 furl_file = kernel_config['engine']['furl_file']
106 108 d = engine_connector.connect_to_controller(engine_service, furl_file)
107 109 d.addErrback(lambda _: reactor.stop())
108 110
109 111 reactor.run()
110 112
111 113
112 114 def init_config():
113 115 """
114 116 Initialize the configuration using default and command line options.
115 117 """
116 118
117 119 parser = OptionParser()
118 120
119 121 parser.add_option(
120 122 "--furl-file",
121 123 type="string",
122 124 dest="furl_file",
123 125 help="The filename containing the FURL of the controller"
124 126 )
125 127 parser.add_option(
126 128 "--mpi",
127 129 type="string",
128 130 dest="mpi",
129 131 help="How to enable MPI (mpi4py, pytrilinos, or empty string to disable)"
130 132 )
131 133 parser.add_option(
132 134 "-l",
133 135 "--logfile",
134 136 type="string",
135 137 dest="logfile",
136 138 help="log file name (default is stdout)"
137 139 )
138 140 parser.add_option(
139 141 "--ipythondir",
140 142 type="string",
141 143 dest="ipythondir",
142 144 help="look for config files and profiles in this directory"
143 145 )
144 146
145 147 (options, args) = parser.parse_args()
146 148
147 149 kernel_config_manager.update_config_obj_from_default_file(options.ipythondir)
148 150 core_config_manager.update_config_obj_from_default_file(options.ipythondir)
149 151
150 152 kernel_config = kernel_config_manager.get_config_obj()
151 153 # Now override with command line options
152 154 if options.furl_file is not None:
153 155 kernel_config['engine']['furl_file'] = options.furl_file
154 156 if options.logfile is not None:
155 157 kernel_config['engine']['logfile'] = options.logfile
156 158 if options.mpi is not None:
157 159 kernel_config['mpi']['default'] = options.mpi
158 160
159 161
160 162 def main():
161 163 """
162 164 After creating the configuration information, start the engine.
163 165 """
164 166 init_config()
165 167 start_engine()
166 168
167 169
168 170 if __name__ == "__main__":
169 171 main() No newline at end of file
This diff has been collapsed as it changes many lines, (674 lines changed) Show them Hide them
@@ -1,799 +1,1113 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_task -*-
3 3
4 4 """Task farming representation of the ControllerService."""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 #-------------------------------------------------------------------------------
8 #-----------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 #-------------------------------------------------------------------------------
13 #-----------------------------------------------------------------------------
14 14
15 #-------------------------------------------------------------------------------
15 #-----------------------------------------------------------------------------
16 16 # Imports
17 #-------------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18 18
19 19 import copy, time
20 from types import FunctionType as function
20 from types import FunctionType
21 21
22 22 import zope.interface as zi, string
23 23 from twisted.internet import defer, reactor
24 24 from twisted.python import components, log, failure
25 25
26 # from IPython.genutils import time
27
26 from IPython.kernel.util import printer
28 27 from IPython.kernel import engineservice as es, error
29 28 from IPython.kernel import controllerservice as cs
30 29 from IPython.kernel.twistedutil import gatherBoth, DeferredList
31 30
32 from IPython.kernel.pickleutil import can,uncan, CannedFunction
33
34 def canTask(task):
35 t = copy.copy(task)
36 t.depend = can(t.depend)
37 if t.recovery_task:
38 t.recovery_task = canTask(t.recovery_task)
39 return t
31 from IPython.kernel.pickleutil import can, uncan, CannedFunction
40 32
41 def uncanTask(task):
42 t = copy.copy(task)
43 t.depend = uncan(t.depend)
44 if t.recovery_task and t.recovery_task is not task:
45 t.recovery_task = uncanTask(t.recovery_task)
46 return t
33 #-----------------------------------------------------------------------------
34 # Definition of the Task objects
35 #-----------------------------------------------------------------------------
47 36
48 37 time_format = '%Y/%m/%d %H:%M:%S'
49 38
50 class Task(object):
51 """Our representation of a task for the `TaskController` interface.
52
53 The user should create instances of this class to represent a task that
54 needs to be done.
55
56 :Parameters:
57 expression : str
58 A str that is valid python code that is the task.
59 pull : str or list of str
60 The names of objects to be pulled as results. If not specified,
61 will return {'result', None}
62 push : dict
63 A dict of objects to be pushed into the engines namespace before
64 execution of the expression.
65 clear_before : boolean
66 Should the engine's namespace be cleared before the task is run.
67 Default=False.
68 clear_after : boolean
69 Should the engine's namespace be cleared after the task is run.
70 Default=False.
71 retries : int
72 The number of times to resumbit the task if it fails. Default=0.
73 recovery_task : Task
74 This is the Task to be run when the task has exhausted its retries
75 Default=None.
76 depend : bool function(properties)
77 This is the dependency function for the Task, which determines
78 whether a task can be run on a Worker. `depend` is called with
79 one argument, the worker's properties dict, and should return
80 True if the worker meets the dependencies or False if it does
81 not.
82 Default=None - run on any worker
83 options : dict
84 Any other keyword options for more elaborate uses of tasks
85
86 Examples
87 --------
39 class ITask(zi.Interface):
40 """
41 This interface provides a generic definition of what constitutes a task.
42
43 There are two sides to a task. First a task needs to take input from
44 a user to determine what work is performed by the task. Second, the
45 task needs to have the logic that knows how to turn that information
46 info specific calls to a worker, through the `IQueuedEngine` interface.
88 47
89 >>> t = Task('dostuff(args)')
90 >>> t = Task('a=5', pull='a')
91 >>> t = Task('a=5\nb=4', pull=['a','b'])
92 >>> t = Task('os.kill(os.getpid(),9)', retries=100) # this is a bad idea
93 # A dependency case:
94 >>> def hasMPI(props):
95 ... return props.get('mpi') is not None
96 >>> t = Task('mpi.send(blah,blah)', depend = hasMPI)
48 Many method in this class get two things passed to them: a Deferred
49 and an IQueuedEngine implementer. Such methods should register callbacks
50 on the Deferred that use the IQueuedEngine to accomplish something. See
51 the existing task objects for examples.
97 52 """
98 53
99 def __init__(self, expression, pull=None, push=None,
100 clear_before=False, clear_after=False, retries=0,
101 recovery_task=None, depend=None, **options):
102 self.expression = expression
103 if isinstance(pull, str):
104 self.pull = [pull]
105 else:
106 self.pull = pull
107 self.push = push
54 zi.Attribute('retries','How many times to retry the task')
55 zi.Attribute('recovery_task','A task to try if the initial one fails')
56 zi.Attribute('taskid','the id of the task')
57
58 def start_time(result):
59 """
60 Do anything needed to start the timing of the task.
61
62 Must simply return the result after starting the timers.
63 """
64
65 def stop_time(result):
66 """
67 Do anything needed to stop the timing of the task.
68
69 Must simply return the result after stopping the timers. This
70 method will usually set attributes that are used by `process_result`
71 in building result of the task.
72 """
73
74 def pre_task(d, queued_engine):
75 """Do something with the queued_engine before the task is run.
76
77 This method should simply add callbacks to the input Deferred
78 that do something with the `queued_engine` before the task is run.
79
80 :Parameters:
81 d : Deferred
82 The deferred that actions should be attached to
83 queued_engine : IQueuedEngine implementer
84 The worker that has been allocated to perform the task
85 """
86
87 def post_task(d, queued_engine):
88 """Do something with the queued_engine after the task is run.
89
90 This method should simply add callbacks to the input Deferred
91 that do something with the `queued_engine` before the task is run.
92
93 :Parameters:
94 d : Deferred
95 The deferred that actions should be attached to
96 queued_engine : IQueuedEngine implementer
97 The worker that has been allocated to perform the task
98 """
99
100 def submit_task(d, queued_engine):
101 """Submit a task using the `queued_engine` we have been allocated.
102
103 When a task is ready to run, this method is called. This method
104 must take the internal information of the task and make suitable
105 calls on the queued_engine to have the actual work done.
106
107 This method should simply add callbacks to the input Deferred
108 that do something with the `queued_engine` before the task is run.
109
110 :Parameters:
111 d : Deferred
112 The deferred that actions should be attached to
113 queued_engine : IQueuedEngine implementer
114 The worker that has been allocated to perform the task
115 """
116
117 def process_result(d, result, engine_id):
118 """Take a raw task result.
119
120 Objects that implement `ITask` can choose how the result of running
121 the task is presented. This method takes the raw result and
122 does this logic. Two example are the `MapTask` which simply returns
123 the raw result or a `Failure` object and the `StringTask` which
124 returns a `TaskResult` object.
125
126 :Parameters:
127 d : Deferred
128 The deferred that actions should be attached to
129 result : object
130 The raw task result that needs to be wrapped
131 engine_id : int
132 The id of the engine that did the task
133
134 :Returns:
135 The result, as a tuple of the form: (success, result).
136 Here, success is a boolean indicating if the task
137 succeeded or failed and result is the result.
138 """
139
140 def check_depend(properties):
141 """Check properties to see if the task should be run.
142
143 :Parameters:
144 properties : dict
145 A dictionary of properties that an engine has set
146
147 :Returns:
148 True if the task should be run, False otherwise
149 """
150
151 def can_task(self):
152 """Serialize (can) any functions in the task for pickling.
153
154 Subclasses must override this method and make sure that all
155 functions in the task are canned by calling `can` on the
156 function.
157 """
158
159 def uncan_task(self):
160 """Unserialize (uncan) any canned function in the task."""
161
162 class BaseTask(object):
163 """
164 Common fuctionality for all objects implementing `ITask`.
165 """
166
167 zi.implements(ITask)
168
169 def __init__(self, clear_before=False, clear_after=False, retries=0,
170 recovery_task=None, depend=None):
171 """
172 Make a generic task.
173
174 :Parameters:
175 clear_before : boolean
176 Should the engines namespace be cleared before the task
177 is run
178 clear_after : boolean
179 Should the engines namespace be clear after the task is run
180 retries : int
181 The number of times a task should be retries upon failure
182 recovery_task : any task object
183 If a task fails and it has a recovery_task, that is run
184 upon a retry
185 depend : FunctionType
186 A function that is called to test for properties. This function
187 must take one argument, the properties dict and return a boolean
188 """
108 189 self.clear_before = clear_before
109 190 self.clear_after = clear_after
110 self.retries=retries
191 self.retries = retries
111 192 self.recovery_task = recovery_task
112 193 self.depend = depend
113 self.options = options
114 194 self.taskid = None
195
196 def start_time(self, result):
197 """
198 Start the basic timers.
199 """
200 self.start = time.time()
201 self.start_struct = time.localtime()
202 return result
203
204 def stop_time(self, result):
205 """
206 Stop the basic timers.
207 """
208 self.stop = time.time()
209 self.stop_struct = time.localtime()
210 self.duration = self.stop - self.start
211 self.submitted = time.strftime(time_format, self.start_struct)
212 self.completed = time.strftime(time_format)
213 return result
214
215 def pre_task(self, d, queued_engine):
216 """
217 Clear the engine before running the task if clear_before is set.
218 """
219 if self.clear_before:
220 d.addCallback(lambda r: queued_engine.reset())
221
222 def post_task(self, d, queued_engine):
223 """
224 Clear the engine after running the task if clear_after is set.
225 """
226 def reseter(result):
227 queued_engine.reset()
228 return result
229 if self.clear_after:
230 d.addBoth(reseter)
231
232 def submit_task(self, d, queued_engine):
233 raise NotImplementedError('submit_task must be implemented in a subclass')
234
235 def process_result(self, result, engine_id):
236 """
237 Process a task result.
238
239 This is the default `process_result` that just returns the raw
240 result or a `Failure`.
241 """
242 if isinstance(result, failure.Failure):
243 return (False, result)
244 else:
245 return (True, result)
246
247 def check_depend(self, properties):
248 """
249 Calls self.depend(properties) to see if a task should be run.
250 """
251 if self.depend is not None:
252 return self.depend(properties)
253 else:
254 return True
255
256 def can_task(self):
257 self.depend = can(self.depend)
258 if isinstance(self.recovery_task, BaseTask):
259 self.recovery_task.can_task()
260
261 def uncan_task(self):
262 self.depend = uncan(self.depend)
263 if isinstance(self.recovery_task, BaseTask):
264 self.recovery_task.uncan_task()
265
266 class MapTask(BaseTask):
267 """
268 A task that consists of a function and arguments.
269 """
270
271 zi.implements(ITask)
272
273 def __init__(self, function, args=None, kwargs=None, clear_before=False,
274 clear_after=False, retries=0, recovery_task=None, depend=None):
275 """
276 Create a task based on a function, args and kwargs.
277
278 This is a simple type of task that consists of calling:
279 function(*args, **kwargs) and wrapping the result in a `TaskResult`.
280
281 The return value of the function, or a `Failure` wrapping an
282 exception is the task result for this type of task.
283 """
284 BaseTask.__init__(self, clear_before, clear_after, retries,
285 recovery_task, depend)
286 if not isinstance(function, FunctionType):
287 raise TypeError('a task function must be a FunctionType')
288 self.function = function
289 if args is None:
290 self.args = ()
291 else:
292 self.args = args
293 if not isinstance(self.args, (list, tuple)):
294 raise TypeError('a task args must be a list or tuple')
295 if kwargs is None:
296 self.kwargs = {}
297 else:
298 self.kwargs = kwargs
299 if not isinstance(self.kwargs, dict):
300 raise TypeError('a task kwargs must be a dict')
301
302 def submit_task(self, d, queued_engine):
303 d.addCallback(lambda r: queued_engine.push_function(
304 dict(_ipython_task_function=self.function))
305 )
306 d.addCallback(lambda r: queued_engine.push(
307 dict(_ipython_task_args=self.args,_ipython_task_kwargs=self.kwargs))
308 )
309 d.addCallback(lambda r: queued_engine.execute(
310 '_ipython_task_result = _ipython_task_function(*_ipython_task_args,**_ipython_task_kwargs)')
311 )
312 d.addCallback(lambda r: queued_engine.pull('_ipython_task_result'))
313
314 def can_task(self):
315 self.function = can(self.function)
316 BaseTask.can_task(self)
317
318 def uncan_task(self):
319 self.function = uncan(self.function)
320 BaseTask.uncan_task(self)
321
322
323 class StringTask(BaseTask):
324 """
325 A task that consists of a string of Python code to run.
326 """
327
328 def __init__(self, expression, pull=None, push=None,
329 clear_before=False, clear_after=False, retries=0,
330 recovery_task=None, depend=None):
331 """
332 Create a task based on a Python expression and variables
333
334 This type of task lets you push a set of variables to the engines
335 namespace, run a Python string in that namespace and then bring back
336 a different set of Python variables as the result.
337
338 Because this type of task can return many results (through the
339 `pull` keyword argument) it returns a special `TaskResult` object
340 that wraps the pulled variables, statistics about the run and
341 any exceptions raised.
342 """
343 if not isinstance(expression, str):
344 raise TypeError('a task expression must be a string')
345 self.expression = expression
346
347 if pull==None:
348 self.pull = ()
349 elif isinstance(pull, str):
350 self.pull = (pull,)
351 elif isinstance(pull, (list, tuple)):
352 self.pull = pull
353 else:
354 raise TypeError('pull must be str or a sequence of strs')
355
356 if push==None:
357 self.push = {}
358 elif isinstance(push, dict):
359 self.push = push
360 else:
361 raise TypeError('push must be a dict')
362
363 BaseTask.__init__(self, clear_before, clear_after, retries,
364 recovery_task, depend)
365
366 def submit_task(self, d, queued_engine):
367 if self.push is not None:
368 d.addCallback(lambda r: queued_engine.push(self.push))
369
370 d.addCallback(lambda r: queued_engine.execute(self.expression))
371
372 if self.pull is not None:
373 d.addCallback(lambda r: queued_engine.pull(self.pull))
374 else:
375 d.addCallback(lambda r: None)
376
377 def process_result(self, result, engine_id):
378 if isinstance(result, failure.Failure):
379 tr = TaskResult(result, engine_id)
380 else:
381 if self.pull is None:
382 resultDict = {}
383 elif len(self.pull) == 1:
384 resultDict = {self.pull[0]:result}
385 else:
386 resultDict = dict(zip(self.pull, result))
387 tr = TaskResult(resultDict, engine_id)
388 # Assign task attributes
389 tr.submitted = self.submitted
390 tr.completed = self.completed
391 tr.duration = self.duration
392 if hasattr(self,'taskid'):
393 tr.taskid = self.taskid
394 else:
395 tr.taskid = None
396 if isinstance(result, failure.Failure):
397 return (False, tr)
398 else:
399 return (True, tr)
115 400
116 class ResultNS:
117 """The result namespace object for use in TaskResult objects as tr.ns.
401 class ResultNS(object):
402 """
403 A dict like object for holding the results of a task.
404
405 The result namespace object for use in `TaskResult` objects as tr.ns.
118 406 It builds an object from a dictionary, such that it has attributes
119 407 according to the key,value pairs of the dictionary.
120 408
121 409 This works by calling setattr on ALL key,value pairs in the dict. If a user
122 410 chooses to overwrite the `__repr__` or `__getattr__` attributes, they can.
123 411 This can be a bad idea, as it may corrupt standard behavior of the
124 412 ns object.
125 413
126 414 Example
127 415 --------
128 416
129 417 >>> ns = ResultNS({'a':17,'foo':range(3)})
130 418 >>> print ns
131 419 NS{'a':17,'foo':range(3)}
132 420 >>> ns.a
133 421 17
134 422 >>> ns['foo']
135 423 [0,1,2]
136 424 """
137 425 def __init__(self, dikt):
138 426 for k,v in dikt.iteritems():
139 427 setattr(self,k,v)
140 428
141 429 def __repr__(self):
142 430 l = dir(self)
143 431 d = {}
144 432 for k in l:
145 433 # do not print private objects
146 434 if k[:2] != '__' and k[-2:] != '__':
147 435 d[k] = getattr(self, k)
148 436 return "NS"+repr(d)
149 437
150 438 def __getitem__(self, key):
151 439 return getattr(self, key)
152 440
153 441 class TaskResult(object):
154 442 """
155 An object for returning task results.
443 An object for returning task results for certain types of tasks.
156 444
157 445 This object encapsulates the results of a task. On task
158 446 success it will have a keys attribute that will have a list
159 447 of the variables that have been pulled back. These variables
160 448 are accessible as attributes of this class as well. On
161 449 success the failure attribute will be None.
162 450
163 451 In task failure, keys will be empty, but failure will contain
164 452 the failure object that encapsulates the remote exception.
165 One can also simply call the raiseException() method of
453 One can also simply call the `raise_exception` method of
166 454 this class to re-raise any remote exception in the local
167 455 session.
168 456
169 The TaskResult has a .ns member, which is a property for access
457 The `TaskResult` has a `.ns` member, which is a property for access
170 458 to the results. If the Task had pull=['a', 'b'], then the
171 Task Result will have attributes tr.ns.a, tr.ns.b for those values.
172 Accessing tr.ns will raise the remote failure if the task failed.
459 Task Result will have attributes `tr.ns.a`, `tr.ns.b` for those values.
460 Accessing `tr.ns` will raise the remote failure if the task failed.
173 461
174 The engineid attribute should have the engineid of the engine
175 that ran the task. But, because engines can come and go in
176 the ipython task system, the engineid may not continue to be
462 The `engineid` attribute should have the `engineid` of the engine
463 that ran the task. But, because engines can come and go,
464 the `engineid` may not continue to be
177 465 valid or accurate.
178 466
179 The taskid attribute simply gives the taskid that the task
467 The `taskid` attribute simply gives the `taskid` that the task
180 468 is tracked under.
181 469 """
182 470 taskid = None
183 471
184 472 def _getNS(self):
185 473 if isinstance(self.failure, failure.Failure):
186 474 return self.failure.raiseException()
187 475 else:
188 476 return self._ns
189 477
190 478 def _setNS(self, v):
191 raise Exception("I am protected!")
479 raise Exception("the ns attribute cannot be changed")
192 480
193 481 ns = property(_getNS, _setNS)
194 482
195 483 def __init__(self, results, engineid):
196 484 self.engineid = engineid
197 485 if isinstance(results, failure.Failure):
198 486 self.failure = results
199 487 self.results = {}
200 488 else:
201 489 self.results = results
202 490 self.failure = None
203 491
204 492 self._ns = ResultNS(self.results)
205 493
206 494 self.keys = self.results.keys()
207 495
208 496 def __repr__(self):
209 497 if self.failure is not None:
210 498 contents = self.failure
211 499 else:
212 500 contents = self.results
213 501 return "TaskResult[ID:%r]:%r"%(self.taskid, contents)
214 502
215 503 def __getitem__(self, key):
216 504 if self.failure is not None:
217 self.raiseException()
505 self.raise_exception()
218 506 return self.results[key]
219 507
220 def raiseException(self):
508 def raise_exception(self):
221 509 """Re-raise any remote exceptions in the local python session."""
222 510 if self.failure is not None:
223 511 self.failure.raiseException()
224 512
225 513
514 #-----------------------------------------------------------------------------
515 # The controller side of things
516 #-----------------------------------------------------------------------------
517
226 518 class IWorker(zi.Interface):
227 519 """The Basic Worker Interface.
228 520
229 521 A worked is a representation of an Engine that is ready to run tasks.
230 522 """
231 523
232 524 zi.Attribute("workerid", "the id of the worker")
233 525
234 526 def run(task):
235 527 """Run task in worker's namespace.
236 528
237 529 :Parameters:
238 530 task : a `Task` object
239 531
240 :Returns: `Deferred` to a `TaskResult` object.
532 :Returns: `Deferred` to a tuple of (success, result) where
533 success if a boolean that signifies success or failure
534 and result is the task result.
241 535 """
242 536
243 537
244 538 class WorkerFromQueuedEngine(object):
245 539 """Adapt an `IQueuedEngine` to an `IWorker` object"""
540
246 541 zi.implements(IWorker)
247 542
248 543 def __init__(self, qe):
249 544 self.queuedEngine = qe
250 545 self.workerid = None
251 546
252 547 def _get_properties(self):
253 548 return self.queuedEngine.properties
254 549
255 550 properties = property(_get_properties, lambda self, _:None)
256 551
257 552 def run(self, task):
258 553 """Run task in worker's namespace.
259 554
555 This takes a task and calls methods on the task that actually
556 cause `self.queuedEngine` to do the task. See the methods of
557 `ITask` for more information about how these methods are called.
558
260 559 :Parameters:
261 560 task : a `Task` object
262 561
263 :Returns: `Deferred` to a `TaskResult` object.
562 :Returns: `Deferred` to a tuple of (success, result) where
563 success if a boolean that signifies success or failure
564 and result is the task result.
264 565 """
265 if task.clear_before:
266 d = self.queuedEngine.reset()
267 else:
268 d = defer.succeed(None)
269
270 if task.push is not None:
271 d.addCallback(lambda r: self.queuedEngine.push(task.push))
272
273 d.addCallback(lambda r: self.queuedEngine.execute(task.expression))
274
275 if task.pull is not None:
276 d.addCallback(lambda r: self.queuedEngine.pull(task.pull))
277 else:
278 d.addCallback(lambda r: None)
279
280 def reseter(result):
281 self.queuedEngine.reset()
282 return result
283
284 if task.clear_after:
285 d.addBoth(reseter)
286
287 return d.addBoth(self._zipResults, task.pull, time.time(), time.localtime())
288
289 def _zipResults(self, result, names, start, start_struct):
290 """Callback for construting the TaskResult object."""
291 if isinstance(result, failure.Failure):
292 tr = TaskResult(result, self.queuedEngine.id)
293 else:
294 if names is None:
295 resultDict = {}
296 elif len(names) == 1:
297 resultDict = {names[0]:result}
298 else:
299 resultDict = dict(zip(names, result))
300 tr = TaskResult(resultDict, self.queuedEngine.id)
301 # the time info
302 tr.submitted = time.strftime(time_format, start_struct)
303 tr.completed = time.strftime(time_format)
304 tr.duration = time.time()-start
305 return tr
306
566 d = defer.succeed(None)
567 d.addCallback(task.start_time)
568 task.pre_task(d, self.queuedEngine)
569 task.submit_task(d, self.queuedEngine)
570 task.post_task(d, self.queuedEngine)
571 d.addBoth(task.stop_time)
572 d.addBoth(task.process_result, self.queuedEngine.id)
573 # At this point, there will be (success, result) coming down the line
574 return d
575
307 576
308 577 components.registerAdapter(WorkerFromQueuedEngine, es.IEngineQueued, IWorker)
309 578
310 579 class IScheduler(zi.Interface):
311 580 """The interface for a Scheduler.
312 581 """
313 582 zi.Attribute("nworkers", "the number of unassigned workers")
314 583 zi.Attribute("ntasks", "the number of unscheduled tasks")
315 584 zi.Attribute("workerids", "a list of the worker ids")
316 585 zi.Attribute("taskids", "a list of the task ids")
317 586
318 587 def add_task(task, **flags):
319 588 """Add a task to the queue of the Scheduler.
320 589
321 590 :Parameters:
322 task : a `Task` object
591 task : an `ITask` implementer
323 592 The task to be queued.
324 593 flags : dict
325 594 General keywords for more sophisticated scheduling
326 595 """
327 596
328 597 def pop_task(id=None):
329 """Pops a Task object.
598 """Pops a task object from the queue.
330 599
331 600 This gets the next task to be run. If no `id` is requested, the highest priority
332 601 task is returned.
333 602
334 603 :Parameters:
335 604 id
336 605 The id of the task to be popped. The default (None) is to return
337 606 the highest priority task.
338 607
339 :Returns: a `Task` object
608 :Returns: an `ITask` implementer
340 609
341 610 :Exceptions:
342 611 IndexError : raised if no taskid in queue
343 612 """
344 613
345 614 def add_worker(worker, **flags):
346 615 """Add a worker to the worker queue.
347 616
348 617 :Parameters:
349 worker : an IWorker implementing object
350 flags : General keywords for more sophisticated scheduling
618 worker : an `IWorker` implementer
619 flags : dict
620 General keywords for more sophisticated scheduling
351 621 """
352 622
353 623 def pop_worker(id=None):
354 624 """Pops an IWorker object that is ready to do work.
355 625
356 626 This gets the next IWorker that is ready to do work.
357 627
358 628 :Parameters:
359 629 id : if specified, will pop worker with workerid=id, else pops
360 630 highest priority worker. Defaults to None.
361 631
362 632 :Returns:
363 633 an IWorker object
364 634
365 635 :Exceptions:
366 636 IndexError : raised if no workerid in queue
367 637 """
368 638
369 639 def ready():
370 640 """Returns True if there is something to do, False otherwise"""
371 641
372 642 def schedule():
373 """Returns a tuple of the worker and task pair for the next
374 task to be run.
375 """
643 """Returns (worker,task) pair for the next task to be run."""
376 644
377 645
378 646 class FIFOScheduler(object):
379 """A basic First-In-First-Out (Queue) Scheduler.
380 This is the default Scheduler for the TaskController.
381 See the docstrings for IScheduler for interface details.
647 """
648 A basic First-In-First-Out (Queue) Scheduler.
649
650 This is the default Scheduler for the `TaskController`.
651 See the docstrings for `IScheduler` for interface details.
382 652 """
383 653
384 654 zi.implements(IScheduler)
385 655
386 656 def __init__(self):
387 657 self.tasks = []
388 658 self.workers = []
389 659
390 660 def _ntasks(self):
391 661 return len(self.tasks)
392 662
393 663 def _nworkers(self):
394 664 return len(self.workers)
395 665
396 666 ntasks = property(_ntasks, lambda self, _:None)
397 667 nworkers = property(_nworkers, lambda self, _:None)
398 668
399 669 def _taskids(self):
400 670 return [t.taskid for t in self.tasks]
401 671
402 672 def _workerids(self):
403 673 return [w.workerid for w in self.workers]
404 674
405 675 taskids = property(_taskids, lambda self,_:None)
406 676 workerids = property(_workerids, lambda self,_:None)
407 677
408 678 def add_task(self, task, **flags):
409 679 self.tasks.append(task)
410 680
411 681 def pop_task(self, id=None):
412 682 if id is None:
413 683 return self.tasks.pop(0)
414 684 else:
415 685 for i in range(len(self.tasks)):
416 686 taskid = self.tasks[i].taskid
417 687 if id == taskid:
418 688 return self.tasks.pop(i)
419 689 raise IndexError("No task #%i"%id)
420 690
421 691 def add_worker(self, worker, **flags):
422 692 self.workers.append(worker)
423 693
424 694 def pop_worker(self, id=None):
425 695 if id is None:
426 696 return self.workers.pop(0)
427 697 else:
428 698 for i in range(len(self.workers)):
429 699 workerid = self.workers[i].workerid
430 700 if id == workerid:
431 701 return self.workers.pop(i)
432 702 raise IndexError("No worker #%i"%id)
433 703
434 704 def schedule(self):
435 705 for t in self.tasks:
436 706 for w in self.workers:
437 707 try:# do not allow exceptions to break this
438 cando = t.depend is None or t.depend(w.properties)
708 # Allow the task to check itself using its
709 # check_depend method.
710 cando = t.check_depend(w.properties)
439 711 except:
440 712 cando = False
441 713 if cando:
442 714 return self.pop_worker(w.workerid), self.pop_task(t.taskid)
443 715 return None, None
444 716
445 717
446 718
447 719 class LIFOScheduler(FIFOScheduler):
448 """A Last-In-First-Out (Stack) Scheduler. This scheduler should naively
449 reward fast engines by giving them more jobs. This risks starvation, but
450 only in cases with low load, where starvation does not really matter.
720 """
721 A Last-In-First-Out (Stack) Scheduler.
722
723 This scheduler should naively reward fast engines by giving
724 them more jobs. This risks starvation, but only in cases with
725 low load, where starvation does not really matter.
451 726 """
452 727
453 728 def add_task(self, task, **flags):
454 729 # self.tasks.reverse()
455 730 self.tasks.insert(0, task)
456 731 # self.tasks.reverse()
457 732
458 733 def add_worker(self, worker, **flags):
459 734 # self.workers.reverse()
460 735 self.workers.insert(0, worker)
461 736 # self.workers.reverse()
462 737
463 738
464 739 class ITaskController(cs.IControllerBase):
465 """The Task based interface to a `ControllerService` object
740 """
741 The Task based interface to a `ControllerService` object
466 742
467 743 This adapts a `ControllerService` to the ITaskController interface.
468 744 """
469 745
470 746 def run(task):
471 """Run a task.
747 """
748 Run a task.
472 749
473 750 :Parameters:
474 751 task : an IPython `Task` object
475 752
476 753 :Returns: the integer ID of the task
477 754 """
478 755
479 756 def get_task_result(taskid, block=False):
480 """Get the result of a task by its ID.
757 """
758 Get the result of a task by its ID.
481 759
482 760 :Parameters:
483 761 taskid : int
484 762 the id of the task whose result is requested
485 763
486 :Returns: `Deferred` to (taskid, actualResult) if the task is done, and None
764 :Returns: `Deferred` to the task result if the task is done, and None
487 765 if not.
488 766
489 767 :Exceptions:
490 768 actualResult will be an `IndexError` if no such task has been submitted
491 769 """
492 770
493 771 def abort(taskid):
494 772 """Remove task from queue if task is has not been submitted.
495 773
496 774 If the task has already been submitted, wait for it to finish and discard
497 775 results and prevent resubmission.
498 776
499 777 :Parameters:
500 778 taskid : the id of the task to be aborted
501 779
502 780 :Returns:
503 781 `Deferred` to abort attempt completion. Will be None on success.
504 782
505 783 :Exceptions:
506 784 deferred will fail with `IndexError` if no such task has been submitted
507 785 or the task has already completed.
508 786 """
509 787
510 788 def barrier(taskids):
511 """Block until the list of taskids are completed.
789 """
790 Block until the list of taskids are completed.
512 791
513 792 Returns None on success.
514 793 """
515 794
516 795 def spin():
517 """touch the scheduler, to resume scheduling without submitting
518 a task.
796 """
797 Touch the scheduler, to resume scheduling without submitting a task.
519 798 """
520 799
521 def queue_status(self, verbose=False):
522 """Get a dictionary with the current state of the task queue.
800 def queue_status(verbose=False):
801 """
802 Get a dictionary with the current state of the task queue.
523 803
524 804 If verbose is True, then return lists of taskids, otherwise,
525 805 return the number of tasks with each status.
526 806 """
527 807
808 def clear():
809 """
810 Clear all previously run tasks from the task controller.
811
812 This is needed because the task controller keep all task results
813 in memory. This can be a problem is there are many completed
814 tasks. Users should call this periodically to clean out these
815 cached task results.
816 """
817
528 818
529 819 class TaskController(cs.ControllerAdapterBase):
530 820 """The Task based interface to a Controller object.
531 821
532 822 If you want to use a different scheduler, just subclass this and set
533 823 the `SchedulerClass` member to the *class* of your chosen scheduler.
534 824 """
535 825
536 826 zi.implements(ITaskController)
537 827 SchedulerClass = FIFOScheduler
538 828
539 829 timeout = 30
540 830
541 831 def __init__(self, controller):
542 832 self.controller = controller
543 833 self.controller.on_register_engine_do(self.registerWorker, True)
544 834 self.controller.on_unregister_engine_do(self.unregisterWorker, True)
545 835 self.taskid = 0
546 836 self.failurePenalty = 1 # the time in seconds to penalize
547 837 # a worker for failing a task
548 838 self.pendingTasks = {} # dict of {workerid:(taskid, task)}
549 839 self.deferredResults = {} # dict of {taskid:deferred}
550 840 self.finishedResults = {} # dict of {taskid:actualResult}
551 841 self.workers = {} # dict of {workerid:worker}
552 842 self.abortPending = [] # dict of {taskid:abortDeferred}
553 843 self.idleLater = None # delayed call object for timeout
554 844 self.scheduler = self.SchedulerClass()
555 845
556 846 for id in self.controller.engines.keys():
557 847 self.workers[id] = IWorker(self.controller.engines[id])
558 848 self.workers[id].workerid = id
559 849 self.schedule.add_worker(self.workers[id])
560 850
561 851 def registerWorker(self, id):
562 852 """Called by controller.register_engine."""
563 853 if self.workers.get(id):
564 raise "We already have one! This should not happen."
854 raise ValueError("worker with id %s already exists. This should not happen." % id)
565 855 self.workers[id] = IWorker(self.controller.engines[id])
566 856 self.workers[id].workerid = id
567 857 if not self.pendingTasks.has_key(id):# if not working
568 858 self.scheduler.add_worker(self.workers[id])
569 859 self.distributeTasks()
570 860
571 861 def unregisterWorker(self, id):
572 862 """Called by controller.unregister_engine"""
573 863
574 864 if self.workers.has_key(id):
575 865 try:
576 866 self.scheduler.pop_worker(id)
577 867 except IndexError:
578 868 pass
579 869 self.workers.pop(id)
580 870
581 871 def _pendingTaskIDs(self):
582 872 return [t.taskid for t in self.pendingTasks.values()]
583 873
584 874 #---------------------------------------------------------------------------
585 875 # Interface methods
586 876 #---------------------------------------------------------------------------
587 877
588 878 def run(self, task):
589 """Run a task and return `Deferred` to its taskid."""
879 """
880 Run a task and return `Deferred` to its taskid.
881 """
590 882 task.taskid = self.taskid
591 883 task.start = time.localtime()
592 884 self.taskid += 1
593 885 d = defer.Deferred()
594 886 self.scheduler.add_task(task)
595 # log.msg('Queuing task: %i' % task.taskid)
887 log.msg('Queuing task: %i' % task.taskid)
596 888
597 889 self.deferredResults[task.taskid] = []
598 890 self.distributeTasks()
599 891 return defer.succeed(task.taskid)
600 892
601 893 def get_task_result(self, taskid, block=False):
602 """Returns a `Deferred` to a TaskResult tuple or None."""
603 # log.msg("Getting task result: %i" % taskid)
894 """
895 Returns a `Deferred` to the task result, or None.
896 """
897 log.msg("Getting task result: %i" % taskid)
604 898 if self.finishedResults.has_key(taskid):
605 899 tr = self.finishedResults[taskid]
606 900 return defer.succeed(tr)
607 901 elif self.deferredResults.has_key(taskid):
608 902 if block:
609 903 d = defer.Deferred()
610 904 self.deferredResults[taskid].append(d)
611 905 return d
612 906 else:
613 907 return defer.succeed(None)
614 908 else:
615 909 return defer.fail(IndexError("task ID not registered: %r" % taskid))
616 910
617 911 def abort(self, taskid):
618 """Remove a task from the queue if it has not been run already."""
912 """
913 Remove a task from the queue if it has not been run already.
914 """
619 915 if not isinstance(taskid, int):
620 916 return defer.fail(failure.Failure(TypeError("an integer task id expected: %r" % taskid)))
621 917 try:
622 918 self.scheduler.pop_task(taskid)
623 919 except IndexError, e:
624 920 if taskid in self.finishedResults.keys():
625 921 d = defer.fail(IndexError("Task Already Completed"))
626 922 elif taskid in self.abortPending:
627 923 d = defer.fail(IndexError("Task Already Aborted"))
628 924 elif taskid in self._pendingTaskIDs():# task is pending
629 925 self.abortPending.append(taskid)
630 926 d = defer.succeed(None)
631 927 else:
632 928 d = defer.fail(e)
633 929 else:
634 930 d = defer.execute(self._doAbort, taskid)
635 931
636 932 return d
637 933
638 934 def barrier(self, taskids):
639 935 dList = []
640 936 if isinstance(taskids, int):
641 937 taskids = [taskids]
642 938 for id in taskids:
643 939 d = self.get_task_result(id, block=True)
644 940 dList.append(d)
645 941 d = DeferredList(dList, consumeErrors=1)
646 942 d.addCallbacks(lambda r: None)
647 943 return d
648 944
649 945 def spin(self):
650 946 return defer.succeed(self.distributeTasks())
651 947
652 948 def queue_status(self, verbose=False):
653 949 pending = self._pendingTaskIDs()
654 950 failed = []
655 951 succeeded = []
656 952 for k,v in self.finishedResults.iteritems():
657 953 if not isinstance(v, failure.Failure):
658 954 if hasattr(v,'failure'):
659 955 if v.failure is None:
660 956 succeeded.append(k)
661 957 else:
662 958 failed.append(k)
663 959 scheduled = self.scheduler.taskids
664 960 if verbose:
665 961 result = dict(pending=pending, failed=failed,
666 962 succeeded=succeeded, scheduled=scheduled)
667 963 else:
668 964 result = dict(pending=len(pending),failed=len(failed),
669 965 succeeded=len(succeeded),scheduled=len(scheduled))
670 966 return defer.succeed(result)
671 967
672 968 #---------------------------------------------------------------------------
673 969 # Queue methods
674 970 #---------------------------------------------------------------------------
675 971
676 972 def _doAbort(self, taskid):
677 """Helper function for aborting a pending task."""
678 # log.msg("Task aborted: %i" % taskid)
973 """
974 Helper function for aborting a pending task.
975 """
976 log.msg("Task aborted: %i" % taskid)
679 977 result = failure.Failure(error.TaskAborted())
680 978 self._finishTask(taskid, result)
681 979 if taskid in self.abortPending:
682 980 self.abortPending.remove(taskid)
683 981
684 982 def _finishTask(self, taskid, result):
685 983 dlist = self.deferredResults.pop(taskid)
686 result.taskid = taskid # The TaskResult should save the taskid
984 # result.taskid = taskid # The TaskResult should save the taskid
687 985 self.finishedResults[taskid] = result
688 986 for d in dlist:
689 987 d.callback(result)
690 988
691 989 def distributeTasks(self):
692 """Distribute tasks while self.scheduler has things to do."""
693 # log.msg("distributing Tasks")
990 """
991 Distribute tasks while self.scheduler has things to do.
992 """
993 log.msg("distributing Tasks")
694 994 worker, task = self.scheduler.schedule()
695 995 if not worker and not task:
696 996 if self.idleLater and self.idleLater.called:# we are inside failIdle
697 997 self.idleLater = None
698 998 else:
699 999 self.checkIdle()
700 1000 return False
701 1001 # else something to do:
702 1002 while worker and task:
703 1003 # get worker and task
704 1004 # add to pending
705 1005 self.pendingTasks[worker.workerid] = task
706 1006 # run/link callbacks
707 1007 d = worker.run(task)
708 # log.msg("Running task %i on worker %i" %(task.taskid, worker.workerid))
1008 log.msg("Running task %i on worker %i" %(task.taskid, worker.workerid))
709 1009 d.addBoth(self.taskCompleted, task.taskid, worker.workerid)
710 1010 worker, task = self.scheduler.schedule()
711 1011 # check for idle timeout:
712 1012 self.checkIdle()
713 1013 return True
714 1014
715 1015 def checkIdle(self):
716 1016 if self.idleLater and not self.idleLater.called:
717 1017 self.idleLater.cancel()
718 1018 if self.scheduler.ntasks and self.workers and \
719 1019 self.scheduler.nworkers == len(self.workers):
720 1020 self.idleLater = reactor.callLater(self.timeout, self.failIdle)
721 1021 else:
722 1022 self.idleLater = None
723 1023
724 1024 def failIdle(self):
725 1025 if not self.distributeTasks():
726 1026 while self.scheduler.ntasks:
727 1027 t = self.scheduler.pop_task()
728 1028 msg = "task %i failed to execute due to unmet dependencies"%t.taskid
729 1029 msg += " for %i seconds"%self.timeout
730 # log.msg("Task aborted by timeout: %i" % t.taskid)
1030 log.msg("Task aborted by timeout: %i" % t.taskid)
731 1031 f = failure.Failure(error.TaskTimeout(msg))
732 1032 self._finishTask(t.taskid, f)
733 1033 self.idleLater = None
734 1034
735 1035
736 def taskCompleted(self, result, taskid, workerid):
1036 def taskCompleted(self, success_and_result, taskid, workerid):
737 1037 """This is the err/callback for a completed task."""
1038 success, result = success_and_result
738 1039 try:
739 1040 task = self.pendingTasks.pop(workerid)
740 1041 except:
741 1042 # this should not happen
742 1043 log.msg("Tried to pop bad pending task %i from worker %i"%(taskid, workerid))
743 1044 log.msg("Result: %r"%result)
744 1045 log.msg("Pending tasks: %s"%self.pendingTasks)
745 1046 return
746 1047
747 1048 # Check if aborted while pending
748 1049 aborted = False
749 1050 if taskid in self.abortPending:
750 1051 self._doAbort(taskid)
751 1052 aborted = True
752 1053
753 1054 if not aborted:
754 if result.failure is not None and isinstance(result.failure, failure.Failure): # we failed
1055 if not success:
755 1056 log.msg("Task %i failed on worker %i"% (taskid, workerid))
756 1057 if task.retries > 0: # resubmit
757 1058 task.retries -= 1
758 1059 self.scheduler.add_task(task)
759 1060 s = "Resubmitting task %i, %i retries remaining" %(taskid, task.retries)
760 1061 log.msg(s)
761 1062 self.distributeTasks()
762 elif isinstance(task.recovery_task, Task) and \
1063 elif isinstance(task.recovery_task, BaseTask) and \
763 1064 task.recovery_task.retries > -1:
764 1065 # retries = -1 is to prevent infinite recovery_task loop
765 1066 task.retries = -1
766 1067 task.recovery_task.taskid = taskid
767 1068 task = task.recovery_task
768 1069 self.scheduler.add_task(task)
769 1070 s = "Recovering task %i, %i retries remaining" %(taskid, task.retries)
770 1071 log.msg(s)
771 1072 self.distributeTasks()
772 1073 else: # done trying
773 1074 self._finishTask(taskid, result)
774 1075 # wait a second before readmitting a worker that failed
775 1076 # it may have died, and not yet been unregistered
776 1077 reactor.callLater(self.failurePenalty, self.readmitWorker, workerid)
777 1078 else: # we succeeded
778 # log.msg("Task completed: %i"% taskid)
1079 log.msg("Task completed: %i"% taskid)
779 1080 self._finishTask(taskid, result)
780 1081 self.readmitWorker(workerid)
781 else:# we aborted the task
782 if result.failure is not None and isinstance(result.failure, failure.Failure): # it failed, penalize worker
1082 else: # we aborted the task
1083 if not success:
783 1084 reactor.callLater(self.failurePenalty, self.readmitWorker, workerid)
784 1085 else:
785 1086 self.readmitWorker(workerid)
786 1087
787 1088 def readmitWorker(self, workerid):
788 """Readmit a worker to the scheduler.
1089 """
1090 Readmit a worker to the scheduler.
789 1091
790 1092 This is outside `taskCompleted` because of the `failurePenalty` being
791 1093 implemented through `reactor.callLater`.
792 1094 """
793 1095
794 1096 if workerid in self.workers.keys() and workerid not in self.pendingTasks.keys():
795 1097 self.scheduler.add_worker(self.workers[workerid])
796 1098 self.distributeTasks()
1099
1100 def clear(self):
1101 """
1102 Clear all previously run tasks from the task controller.
1103
1104 This is needed because the task controller keep all task results
1105 in memory. This can be a problem is there are many completed
1106 tasks. Users should call this periodically to clean out these
1107 cached task results.
1108 """
1109 self.finishedResults = {}
1110 return defer.succeed(None)
797 1111
798 1112
799 1113 components.registerAdapter(TaskController, cs.IControllerBase, ITaskController)
@@ -1,161 +1,180 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_taskcontrollerxmlrpc -*-
3 3
4 """The Generic Task Client object.
5
6 This must be subclassed based on your connection method.
4 """
5 A blocking version of the task client.
7 6 """
8 7
9 8 __docformat__ = "restructuredtext en"
10 9
11 10 #-------------------------------------------------------------------------------
12 11 # Copyright (C) 2008 The IPython Development Team
13 12 #
14 13 # Distributed under the terms of the BSD License. The full license is in
15 14 # the file COPYING, distributed as part of this software.
16 15 #-------------------------------------------------------------------------------
17 16
18 17 #-------------------------------------------------------------------------------
19 18 # Imports
20 19 #-------------------------------------------------------------------------------
21 20
22 21 from zope.interface import Interface, implements
23 22 from twisted.python import components, log
24 23
25 24 from IPython.kernel.twistedutil import blockingCallFromThread
26 25 from IPython.kernel import task, error
26 from IPython.kernel.mapper import (
27 SynchronousTaskMapper,
28 ITaskMapperFactory,
29 IMapper
30 )
31 from IPython.kernel.parallelfunction import (
32 ParallelFunction,
33 ITaskParallelDecorator
34 )
27 35
28 36 #-------------------------------------------------------------------------------
29 # Connecting Task Client
37 # The task client
30 38 #-------------------------------------------------------------------------------
31 39
32 class InteractiveTaskClient(object):
33
34 def irun(self, *args, **kwargs):
35 """Run a task on the `TaskController`.
36
37 This method is a shorthand for run(task) and its arguments are simply
38 passed onto a `Task` object:
39
40 irun(*args, **kwargs) -> run(Task(*args, **kwargs))
41
42 :Parameters:
43 expression : str
44 A str that is valid python code that is the task.
45 pull : str or list of str
46 The names of objects to be pulled as results.
47 push : dict
48 A dict of objects to be pushed into the engines namespace before
49 execution of the expression.
50 clear_before : boolean
51 Should the engine's namespace be cleared before the task is run.
52 Default=False.
53 clear_after : boolean
54 Should the engine's namespace be cleared after the task is run.
55 Default=False.
56 retries : int
57 The number of times to resumbit the task if it fails. Default=0.
58 options : dict
59 Any other keyword options for more elaborate uses of tasks
60
61 :Returns: A `TaskResult` object.
62 """
63 block = kwargs.pop('block', False)
64 if len(args) == 1 and isinstance(args[0], task.Task):
65 t = args[0]
66 else:
67 t = task.Task(*args, **kwargs)
68 taskid = self.run(t)
69 print "TaskID = %i"%taskid
70 if block:
71 return self.get_task_result(taskid, block)
72 else:
73 return taskid
74
75 40 class IBlockingTaskClient(Interface):
76 41 """
77 An interface for blocking task clients.
42 A vague interface of the blocking task client
78 43 """
79 44 pass
80 45
81
82 class BlockingTaskClient(InteractiveTaskClient):
46 class BlockingTaskClient(object):
83 47 """
84 This class provides a blocking task client.
48 A blocking task client that adapts a non-blocking one.
85 49 """
86 50
87 implements(IBlockingTaskClient)
51 implements(
52 IBlockingTaskClient,
53 ITaskMapperFactory,
54 IMapper,
55 ITaskParallelDecorator
56 )
88 57
89 58 def __init__(self, task_controller):
90 59 self.task_controller = task_controller
91 60 self.block = True
92 61
93 def run(self, task):
94 """
95 Run a task and return a task id that can be used to get the task result.
62 def run(self, task, block=False):
63 """Run a task on the `TaskController`.
64
65 See the documentation of the `MapTask` and `StringTask` classes for
66 details on how to build a task of different types.
96 67
97 68 :Parameters:
98 task : `Task`
99 The `Task` object to run
69 task : an `ITask` implementer
70
71 :Returns: The int taskid of the submitted task. Pass this to
72 `get_task_result` to get the `TaskResult` object.
100 73 """
101 return blockingCallFromThread(self.task_controller.run, task)
74 tid = blockingCallFromThread(self.task_controller.run, task)
75 if block:
76 return self.get_task_result(tid, block=True)
77 else:
78 return tid
102 79
103 80 def get_task_result(self, taskid, block=False):
104 81 """
105 Get or poll for a task result.
82 Get a task result by taskid.
106 83
107 84 :Parameters:
108 85 taskid : int
109 The id of the task whose result to get
86 The taskid of the task to be retrieved.
110 87 block : boolean
111 If True, wait until the task is done and then result the
112 `TaskResult` object. If False, just poll for the result and
113 return None if the task is not done.
88 Should I block until the task is done?
89
90 :Returns: A `TaskResult` object that encapsulates the task result.
114 91 """
115 92 return blockingCallFromThread(self.task_controller.get_task_result,
116 93 taskid, block)
117 94
118 95 def abort(self, taskid):
119 96 """
120 Abort a task by task id if it has not been started.
97 Abort a task by taskid.
98
99 :Parameters:
100 taskid : int
101 The taskid of the task to be aborted.
121 102 """
122 103 return blockingCallFromThread(self.task_controller.abort, taskid)
123 104
124 105 def barrier(self, taskids):
125 """
126 Wait for a set of tasks to finish.
106 """Block until a set of tasks are completed.
127 107
128 108 :Parameters:
129 taskids : list of ints
130 A list of task ids to wait for.
109 taskids : list, tuple
110 A sequence of taskids to block on.
131 111 """
132 112 return blockingCallFromThread(self.task_controller.barrier, taskids)
133 113
134 114 def spin(self):
135 115 """
136 Cause the scheduler to schedule tasks.
116 Touch the scheduler, to resume scheduling without submitting a task.
137 117
138 118 This method only needs to be called in unusual situations where the
139 scheduler is idle for some reason.
119 scheduler is idle for some reason.
140 120 """
141 121 return blockingCallFromThread(self.task_controller.spin)
142 122
143 123 def queue_status(self, verbose=False):
144 124 """
145 125 Get a dictionary with the current state of the task queue.
146 126
147 127 :Parameters:
148 128 verbose : boolean
149 129 If True, return a list of taskids. If False, simply give
150 130 the number of tasks with each status.
151 131
152 132 :Returns:
153 133 A dict with the queue status.
154 134 """
155 135 return blockingCallFromThread(self.task_controller.queue_status, verbose)
136
137 def clear(self):
138 """
139 Clear all previously run tasks from the task controller.
140
141 This is needed because the task controller keep all task results
142 in memory. This can be a problem is there are many completed
143 tasks. Users should call this periodically to clean out these
144 cached task results.
145 """
146 return blockingCallFromThread(self.task_controller.clear)
147
148 def map(self, func, *sequences):
149 """
150 Apply func to *sequences elementwise. Like Python's builtin map.
151
152 This version is load balanced.
153 """
154 return self.mapper().map(func, *sequences)
156 155
156 def mapper(self, clear_before=False, clear_after=False, retries=0,
157 recovery_task=None, depend=None, block=True):
158 """
159 Create an `IMapper` implementer with a given set of arguments.
160
161 The `IMapper` created using a task controller is load balanced.
162
163 See the documentation for `IPython.kernel.task.BaseTask` for
164 documentation on the arguments to this method.
165 """
166 return SynchronousTaskMapper(self, clear_before=clear_before,
167 clear_after=clear_after, retries=retries,
168 recovery_task=recovery_task, depend=depend, block=block)
169
170 def parallel(self, clear_before=False, clear_after=False, retries=0,
171 recovery_task=None, depend=None, block=True):
172 mapper = self.mapper(clear_before, clear_after, retries,
173 recovery_task, depend, block)
174 pf = ParallelFunction(mapper)
175 return pf
157 176
158 177 components.registerAdapter(BlockingTaskClient,
159 178 task.ITaskController, IBlockingTaskClient)
160 179
161 180
@@ -1,267 +1,329 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_taskxmlrpc -*-
3 3 """A Foolscap interface to a TaskController.
4 4
5 5 This class lets Foolscap clients talk to a TaskController.
6 6 """
7 7
8 8 __docformat__ = "restructuredtext en"
9 9
10 10 #-------------------------------------------------------------------------------
11 11 # Copyright (C) 2008 The IPython Development Team
12 12 #
13 13 # Distributed under the terms of the BSD License. The full license is in
14 14 # the file COPYING, distributed as part of this software.
15 15 #-------------------------------------------------------------------------------
16 16
17 17 #-------------------------------------------------------------------------------
18 18 # Imports
19 19 #-------------------------------------------------------------------------------
20 20
21 21 import cPickle as pickle
22 22 import xmlrpclib, copy
23 23
24 24 from zope.interface import Interface, implements
25 25 from twisted.internet import defer
26 26 from twisted.python import components, failure
27 27
28 28 from foolscap import Referenceable
29 29
30 30 from IPython.kernel.twistedutil import blockingCallFromThread
31 31 from IPython.kernel import error, task as taskmodule, taskclient
32 32 from IPython.kernel.pickleutil import can, uncan
33 33 from IPython.kernel.clientinterfaces import (
34 34 IFCClientInterfaceProvider,
35 35 IBlockingClientAdaptor
36 36 )
37 from IPython.kernel.mapper import (
38 TaskMapper,
39 ITaskMapperFactory,
40 IMapper
41 )
42 from IPython.kernel.parallelfunction import (
43 ParallelFunction,
44 ITaskParallelDecorator
45 )
37 46
38 47 #-------------------------------------------------------------------------------
39 48 # The Controller side of things
40 49 #-------------------------------------------------------------------------------
41 50
42 51
43 52 class IFCTaskController(Interface):
44 53 """Foolscap interface to task controller.
45 54
46 See the documentation of ITaskController for documentation about the methods.
55 See the documentation of `ITaskController` for more information.
47 56 """
48 def remote_run(request, binTask):
57 def remote_run(binTask):
49 58 """"""
50 59
51 def remote_abort(request, taskid):
60 def remote_abort(taskid):
52 61 """"""
53 62
54 def remote_get_task_result(request, taskid, block=False):
63 def remote_get_task_result(taskid, block=False):
55 64 """"""
56 65
57 def remote_barrier(request, taskids):
66 def remote_barrier(taskids):
67 """"""
68
69 def remote_spin():
58 70 """"""
59 71
60 def remote_spin(request):
72 def remote_queue_status(verbose):
61 73 """"""
62 74
63 def remote_queue_status(request, verbose):
75 def remote_clear():
64 76 """"""
65 77
66 78
67 79 class FCTaskControllerFromTaskController(Referenceable):
68 """XML-RPC attachmeot for controller.
69
70 See IXMLRPCTaskController and ITaskController (and its children) for documentation.
71 80 """
81 Adapt a `TaskController` to an `IFCTaskController`
82
83 This class is used to expose a `TaskController` over the wire using
84 the Foolscap network protocol.
85 """
86
72 87 implements(IFCTaskController, IFCClientInterfaceProvider)
73 88
74 89 def __init__(self, taskController):
75 90 self.taskController = taskController
76 91
77 92 #---------------------------------------------------------------------------
78 93 # Non interface methods
79 94 #---------------------------------------------------------------------------
80 95
81 96 def packageFailure(self, f):
82 97 f.cleanFailure()
83 98 return self.packageSuccess(f)
84 99
85 100 def packageSuccess(self, obj):
86 101 serial = pickle.dumps(obj, 2)
87 102 return serial
88 103
89 104 #---------------------------------------------------------------------------
90 105 # ITaskController related methods
91 106 #---------------------------------------------------------------------------
92 107
93 108 def remote_run(self, ptask):
94 109 try:
95 ctask = pickle.loads(ptask)
96 task = taskmodule.uncanTask(ctask)
110 task = pickle.loads(ptask)
111 task.uncan_task()
97 112 except:
98 113 d = defer.fail(pickle.UnpickleableError("Could not unmarshal task"))
99 114 else:
100 115 d = self.taskController.run(task)
101 116 d.addCallback(self.packageSuccess)
102 117 d.addErrback(self.packageFailure)
103 118 return d
104 119
105 120 def remote_abort(self, taskid):
106 121 d = self.taskController.abort(taskid)
107 122 d.addCallback(self.packageSuccess)
108 123 d.addErrback(self.packageFailure)
109 124 return d
110 125
111 126 def remote_get_task_result(self, taskid, block=False):
112 127 d = self.taskController.get_task_result(taskid, block)
113 128 d.addCallback(self.packageSuccess)
114 129 d.addErrback(self.packageFailure)
115 130 return d
116 131
117 132 def remote_barrier(self, taskids):
118 133 d = self.taskController.barrier(taskids)
119 134 d.addCallback(self.packageSuccess)
120 135 d.addErrback(self.packageFailure)
121 136 return d
122 137
123 138 def remote_spin(self):
124 139 d = self.taskController.spin()
125 140 d.addCallback(self.packageSuccess)
126 141 d.addErrback(self.packageFailure)
127 142 return d
128 143
129 144 def remote_queue_status(self, verbose):
130 145 d = self.taskController.queue_status(verbose)
131 146 d.addCallback(self.packageSuccess)
132 147 d.addErrback(self.packageFailure)
133 148 return d
134 149
150 def remote_clear(self):
151 return self.taskController.clear()
152
135 153 def remote_get_client_name(self):
136 154 return 'IPython.kernel.taskfc.FCTaskClient'
137 155
138 156 components.registerAdapter(FCTaskControllerFromTaskController,
139 157 taskmodule.ITaskController, IFCTaskController)
140 158
141 159
142 160 #-------------------------------------------------------------------------------
143 161 # The Client side of things
144 162 #-------------------------------------------------------------------------------
145 163
146 164 class FCTaskClient(object):
147 """XML-RPC based TaskController client that implements ITaskController.
148
149 :Parameters:
150 addr : (ip, port)
151 The ip (str) and port (int) tuple of the `TaskController`.
152 165 """
153 implements(taskmodule.ITaskController, IBlockingClientAdaptor)
166 Client class for Foolscap exposed `TaskController`.
167
168 This class is an adapter that makes a `RemoteReference` to a
169 `TaskController` look like an actual `ITaskController` on the client side.
170
171 This class also implements `IBlockingClientAdaptor` so that clients can
172 automatically get a blocking version of this class.
173 """
174
175 implements(
176 taskmodule.ITaskController,
177 IBlockingClientAdaptor,
178 ITaskMapperFactory,
179 IMapper,
180 ITaskParallelDecorator
181 )
154 182
155 183 def __init__(self, remote_reference):
156 184 self.remote_reference = remote_reference
157 185
158 186 #---------------------------------------------------------------------------
159 187 # Non interface methods
160 188 #---------------------------------------------------------------------------
161 189
162 190 def unpackage(self, r):
163 191 return pickle.loads(r)
164 192
165 193 #---------------------------------------------------------------------------
166 194 # ITaskController related methods
167 195 #---------------------------------------------------------------------------
168 196 def run(self, task):
169 197 """Run a task on the `TaskController`.
170 198
171 :Parameters:
172 task : a `Task` object
173
174 The Task object is created using the following signature:
175
176 Task(expression, pull=None, push={}, clear_before=False,
177 clear_after=False, retries=0, **options):)
199 See the documentation of the `MapTask` and `StringTask` classes for
200 details on how to build a task of different types.
178 201
179 The meaning of the arguments is as follows:
202 :Parameters:
203 task : an `ITask` implementer
180 204
181 :Task Parameters:
182 expression : str
183 A str that is valid python code that is the task.
184 pull : str or list of str
185 The names of objects to be pulled as results.
186 push : dict
187 A dict of objects to be pushed into the engines namespace before
188 execution of the expression.
189 clear_before : boolean
190 Should the engine's namespace be cleared before the task is run.
191 Default=False.
192 clear_after : boolean
193 Should the engine's namespace be cleared after the task is run.
194 Default=False.
195 retries : int
196 The number of times to resumbit the task if it fails. Default=0.
197 options : dict
198 Any other keyword options for more elaborate uses of tasks
199
200 205 :Returns: The int taskid of the submitted task. Pass this to
201 206 `get_task_result` to get the `TaskResult` object.
202 207 """
203 assert isinstance(task, taskmodule.Task), "task must be a Task object!"
204 ctask = taskmodule.canTask(task) # handles arbitrary function in .depend
205 # as well as arbitrary recovery_task chains
206 ptask = pickle.dumps(ctask, 2)
208 assert isinstance(task, taskmodule.BaseTask), "task must be a Task object!"
209 task.can_task()
210 ptask = pickle.dumps(task, 2)
211 task.uncan_task()
207 212 d = self.remote_reference.callRemote('run', ptask)
208 213 d.addCallback(self.unpackage)
209 214 return d
210 215
211 216 def get_task_result(self, taskid, block=False):
212 """The task result by taskid.
217 """
218 Get a task result by taskid.
213 219
214 220 :Parameters:
215 221 taskid : int
216 222 The taskid of the task to be retrieved.
217 223 block : boolean
218 224 Should I block until the task is done?
219 225
220 226 :Returns: A `TaskResult` object that encapsulates the task result.
221 227 """
222 228 d = self.remote_reference.callRemote('get_task_result', taskid, block)
223 229 d.addCallback(self.unpackage)
224 230 return d
225 231
226 232 def abort(self, taskid):
227 """Abort a task by taskid.
233 """
234 Abort a task by taskid.
228 235
229 236 :Parameters:
230 237 taskid : int
231 238 The taskid of the task to be aborted.
232 block : boolean
233 Should I block until the task is aborted.
234 239 """
235 240 d = self.remote_reference.callRemote('abort', taskid)
236 241 d.addCallback(self.unpackage)
237 242 return d
238 243
239 244 def barrier(self, taskids):
240 """Block until all tasks are completed.
245 """Block until a set of tasks are completed.
241 246
242 247 :Parameters:
243 248 taskids : list, tuple
244 249 A sequence of taskids to block on.
245 250 """
246 251 d = self.remote_reference.callRemote('barrier', taskids)
247 252 d.addCallback(self.unpackage)
248 253 return d
249 254
250 255 def spin(self):
251 """touch the scheduler, to resume scheduling without submitting
252 a task.
256 """
257 Touch the scheduler, to resume scheduling without submitting a task.
258
259 This method only needs to be called in unusual situations where the
260 scheduler is idle for some reason.
253 261 """
254 262 d = self.remote_reference.callRemote('spin')
255 263 d.addCallback(self.unpackage)
256 264 return d
257 265
258 266 def queue_status(self, verbose=False):
259 """Return a dict with the status of the task queue."""
267 """
268 Get a dictionary with the current state of the task queue.
269
270 :Parameters:
271 verbose : boolean
272 If True, return a list of taskids. If False, simply give
273 the number of tasks with each status.
274
275 :Returns:
276 A dict with the queue status.
277 """
260 278 d = self.remote_reference.callRemote('queue_status', verbose)
261 279 d.addCallback(self.unpackage)
262 280 return d
263 281
282 def clear(self):
283 """
284 Clear all previously run tasks from the task controller.
285
286 This is needed because the task controller keep all task results
287 in memory. This can be a problem is there are many completed
288 tasks. Users should call this periodically to clean out these
289 cached task results.
290 """
291 d = self.remote_reference.callRemote('clear')
292 return d
293
264 294 def adapt_to_blocking_client(self):
295 """
296 Wrap self in a blocking version that implements `IBlockingTaskClient.
297 """
265 298 from IPython.kernel.taskclient import IBlockingTaskClient
266 299 return IBlockingTaskClient(self)
300
301 def map(self, func, *sequences):
302 """
303 Apply func to *sequences elementwise. Like Python's builtin map.
304
305 This version is load balanced.
306 """
307 return self.mapper().map(func, *sequences)
308
309 def mapper(self, clear_before=False, clear_after=False, retries=0,
310 recovery_task=None, depend=None, block=True):
311 """
312 Create an `IMapper` implementer with a given set of arguments.
313
314 The `IMapper` created using a task controller is load balanced.
315
316 See the documentation for `IPython.kernel.task.BaseTask` for
317 documentation on the arguments to this method.
318 """
319 return TaskMapper(self, clear_before=clear_before,
320 clear_after=clear_after, retries=retries,
321 recovery_task=recovery_task, depend=depend, block=block)
322
323 def parallel(self, clear_before=False, clear_after=False, retries=0,
324 recovery_task=None, depend=None, block=True):
325 mapper = self.mapper(clear_before, clear_after, retries,
326 recovery_task, depend, block)
327 pf = ParallelFunction(mapper)
328 return pf
267 329
@@ -1,373 +1,372 b''
1 1 # encoding: utf-8
2 2
3 3 """Test template for complete engine object"""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 import cPickle as pickle
19 19
20 20 from twisted.internet import defer, reactor
21 21 from twisted.python import failure
22 22 from twisted.application import service
23 23 import zope.interface as zi
24 24
25 25 from IPython.kernel import newserialized
26 26 from IPython.kernel import error
27 27 from IPython.kernel.pickleutil import can, uncan
28 28 import IPython.kernel.engineservice as es
29 29 from IPython.kernel.core.interpreter import Interpreter
30 30 from IPython.testing.parametric import Parametric, parametric
31 31
32 32 #-------------------------------------------------------------------------------
33 33 # Tests
34 34 #-------------------------------------------------------------------------------
35 35
36 36
37 37 # A sequence of valid commands run through execute
38 38 validCommands = ['a=5',
39 39 'b=10',
40 40 'a=5; b=10; c=a+b',
41 41 'import math; 2.0*math.pi',
42 42 """def f():
43 43 result = 0.0
44 44 for i in range(10):
45 45 result += i
46 46 """,
47 47 'if 1<2: a=5',
48 48 """import time
49 49 time.sleep(0.1)""",
50 50 """from math import cos;
51 51 x = 1.0*cos(0.5)""", # Semicolons lead to Discard ast nodes that should be discarded
52 52 """from sets import Set
53 53 s = Set()
54 54 """, # Trailing whitespace should be allowed.
55 55 """import math
56 56 math.cos(1.0)""", # Test a method call with a discarded return value
57 57 """x=1.0234
58 58 a=5; b=10""", # Test an embedded semicolon
59 59 """x=1.0234
60 60 a=5; b=10;""" # Test both an embedded and trailing semicolon
61 61 ]
62 62
63 63 # A sequence of commands that raise various exceptions
64 64 invalidCommands = [('a=1/0',ZeroDivisionError),
65 65 ('print v',NameError),
66 66 ('l=[];l[0]',IndexError),
67 67 ("d={};d['a']",KeyError),
68 68 ("assert 1==0",AssertionError),
69 69 ("import abababsdbfsbaljasdlja",ImportError),
70 70 ("raise Exception()",Exception)]
71 71
72 72 def testf(x):
73 73 return 2.0*x
74 74
75 75 globala = 99
76 76
77 77 def testg(x):
78 78 return globala*x
79 79
80 80 class IEngineCoreTestCase(object):
81 81 """Test an IEngineCore implementer."""
82 82
83 83 def createShell(self):
84 84 return Interpreter()
85 85
86 86 def catchQueueCleared(self, f):
87 87 try:
88 88 f.raiseException()
89 89 except error.QueueCleared:
90 90 pass
91 91
92 92 def testIEngineCoreInterface(self):
93 93 """Does self.engine claim to implement IEngineCore?"""
94 94 self.assert_(es.IEngineCore.providedBy(self.engine))
95 95
96 96 def testIEngineCoreInterfaceMethods(self):
97 97 """Does self.engine have the methods and attributes in IEngineCore."""
98 98 for m in list(es.IEngineCore):
99 99 self.assert_(hasattr(self.engine, m))
100 100
101 101 def testIEngineCoreDeferreds(self):
102 102 d = self.engine.execute('a=5')
103 103 d.addCallback(lambda _: self.engine.pull('a'))
104 104 d.addCallback(lambda _: self.engine.get_result())
105 105 d.addCallback(lambda _: self.engine.keys())
106 106 d.addCallback(lambda _: self.engine.push(dict(a=10)))
107 107 return d
108 108
109 109 def runTestExecute(self, cmd):
110 110 self.shell = Interpreter()
111 111 actual = self.shell.execute(cmd)
112 112 def compare(computed):
113 113 actual['id'] = computed['id']
114 114 self.assertEquals(actual, computed)
115 115 d = self.engine.execute(cmd)
116 116 d.addCallback(compare)
117 117 return d
118 118
119 119 @parametric
120 120 def testExecute(cls):
121 121 return [(cls.runTestExecute, cmd) for cmd in validCommands]
122 122
123 123 def runTestExecuteFailures(self, cmd, exc):
124 124 def compare(f):
125 125 self.assertRaises(exc, f.raiseException)
126 126 d = self.engine.execute(cmd)
127 127 d.addErrback(compare)
128 128 return d
129 129
130 130 @parametric
131 131 def testExecuteFailures(cls):
132 132 return [(cls.runTestExecuteFailures, cmd, exc) for cmd, exc in invalidCommands]
133 133
134 134 def runTestPushPull(self, o):
135 135 d = self.engine.push(dict(a=o))
136 136 d.addCallback(lambda r: self.engine.pull('a'))
137 137 d.addCallback(lambda r: self.assertEquals(o,r))
138 138 return d
139 139
140 140 @parametric
141 141 def testPushPull(cls):
142 142 objs = [10,"hi there",1.2342354,{"p":(1,2)},None]
143 143 return [(cls.runTestPushPull, o) for o in objs]
144 144
145 145 def testPullNameError(self):
146 146 d = self.engine.push(dict(a=5))
147 147 d.addCallback(lambda _:self.engine.reset())
148 148 d.addCallback(lambda _: self.engine.pull("a"))
149 149 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
150 150 return d
151 151
152 152 def testPushPullFailures(self):
153 153 d = self.engine.pull('a')
154 154 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
155 155 d.addCallback(lambda _: self.engine.execute('l = lambda x: x'))
156 156 d.addCallback(lambda _: self.engine.pull('l'))
157 157 d.addErrback(lambda f: self.assertRaises(pickle.PicklingError, f.raiseException))
158 158 d.addCallback(lambda _: self.engine.push(dict(l=lambda x: x)))
159 159 d.addErrback(lambda f: self.assertRaises(pickle.PicklingError, f.raiseException))
160 160 return d
161 161
162 162 def testPushPullArray(self):
163 163 try:
164 164 import numpy
165 165 except:
166 print 'no numpy, ',
167 166 return
168 167 a = numpy.random.random(1000)
169 168 d = self.engine.push(dict(a=a))
170 169 d.addCallback(lambda _: self.engine.pull('a'))
171 170 d.addCallback(lambda b: b==a)
172 171 d.addCallback(lambda c: c.all())
173 172 return self.assertDeferredEquals(d, True)
174 173
175 174 def testPushFunction(self):
176 175
177 176 d = self.engine.push_function(dict(f=testf))
178 177 d.addCallback(lambda _: self.engine.execute('result = f(10)'))
179 178 d.addCallback(lambda _: self.engine.pull('result'))
180 179 d.addCallback(lambda r: self.assertEquals(r, testf(10)))
181 180 return d
182 181
183 182 def testPullFunction(self):
184 183 d = self.engine.push_function(dict(f=testf, g=testg))
185 184 d.addCallback(lambda _: self.engine.pull_function(('f','g')))
186 185 d.addCallback(lambda r: self.assertEquals(r[0](10), testf(10)))
187 186 return d
188 187
189 188 def testPushFunctionGlobal(self):
190 189 """Make sure that pushed functions pick up the user's namespace for globals."""
191 190 d = self.engine.push(dict(globala=globala))
192 191 d.addCallback(lambda _: self.engine.push_function(dict(g=testg)))
193 192 d.addCallback(lambda _: self.engine.execute('result = g(10)'))
194 193 d.addCallback(lambda _: self.engine.pull('result'))
195 194 d.addCallback(lambda r: self.assertEquals(r, testg(10)))
196 195 return d
197 196
198 197 def testGetResultFailure(self):
199 198 d = self.engine.get_result(None)
200 199 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
201 200 d.addCallback(lambda _: self.engine.get_result(10))
202 201 d.addErrback(lambda f: self.assertRaises(IndexError, f.raiseException))
203 202 return d
204 203
205 204 def runTestGetResult(self, cmd):
206 205 self.shell = Interpreter()
207 206 actual = self.shell.execute(cmd)
208 207 def compare(computed):
209 208 actual['id'] = computed['id']
210 209 self.assertEquals(actual, computed)
211 210 d = self.engine.execute(cmd)
212 211 d.addCallback(lambda r: self.engine.get_result(r['number']))
213 212 d.addCallback(compare)
214 213 return d
215 214
216 215 @parametric
217 216 def testGetResult(cls):
218 217 return [(cls.runTestGetResult, cmd) for cmd in validCommands]
219 218
220 219 def testGetResultDefault(self):
221 220 cmd = 'a=5'
222 221 shell = self.createShell()
223 222 shellResult = shell.execute(cmd)
224 223 def popit(dikt, key):
225 224 dikt.pop(key)
226 225 return dikt
227 226 d = self.engine.execute(cmd)
228 227 d.addCallback(lambda _: self.engine.get_result())
229 228 d.addCallback(lambda r: self.assertEquals(shellResult, popit(r,'id')))
230 229 return d
231 230
232 231 def testKeys(self):
233 232 d = self.engine.keys()
234 233 d.addCallback(lambda s: isinstance(s, list))
235 234 d.addCallback(lambda r: self.assertEquals(r, True))
236 235 return d
237 236
238 237 Parametric(IEngineCoreTestCase)
239 238
240 239 class IEngineSerializedTestCase(object):
241 240 """Test an IEngineCore implementer."""
242 241
243 242 def testIEngineSerializedInterface(self):
244 243 """Does self.engine claim to implement IEngineCore?"""
245 244 self.assert_(es.IEngineSerialized.providedBy(self.engine))
246 245
247 246 def testIEngineSerializedInterfaceMethods(self):
248 247 """Does self.engine have the methods and attributes in IEngireCore."""
249 248 for m in list(es.IEngineSerialized):
250 249 self.assert_(hasattr(self.engine, m))
251 250
252 251 def testIEngineSerializedDeferreds(self):
253 252 dList = []
254 253 d = self.engine.push_serialized(dict(key=newserialized.serialize(12345)))
255 254 self.assert_(isinstance(d, defer.Deferred))
256 255 dList.append(d)
257 256 d = self.engine.pull_serialized('key')
258 257 self.assert_(isinstance(d, defer.Deferred))
259 258 dList.append(d)
260 259 D = defer.DeferredList(dList)
261 260 return D
262 261
263 262 def testPushPullSerialized(self):
264 263 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
265 264 d = defer.succeed(None)
266 265 for o in objs:
267 266 self.engine.push_serialized(dict(key=newserialized.serialize(o)))
268 267 value = self.engine.pull_serialized('key')
269 268 value.addCallback(lambda serial: newserialized.IUnSerialized(serial).getObject())
270 269 d = self.assertDeferredEquals(value,o,d)
271 270 return d
272 271
273 272 def testPullSerializedFailures(self):
274 273 d = self.engine.pull_serialized('a')
275 274 d.addErrback(lambda f: self.assertRaises(NameError, f.raiseException))
276 275 d.addCallback(lambda _: self.engine.execute('l = lambda x: x'))
277 276 d.addCallback(lambda _: self.engine.pull_serialized('l'))
278 277 d.addErrback(lambda f: self.assertRaises(pickle.PicklingError, f.raiseException))
279 278 return d
280 279
281 280 Parametric(IEngineSerializedTestCase)
282 281
283 282 class IEngineQueuedTestCase(object):
284 283 """Test an IEngineQueued implementer."""
285 284
286 285 def testIEngineQueuedInterface(self):
287 286 """Does self.engine claim to implement IEngineQueued?"""
288 287 self.assert_(es.IEngineQueued.providedBy(self.engine))
289 288
290 289 def testIEngineQueuedInterfaceMethods(self):
291 290 """Does self.engine have the methods and attributes in IEngireQueued."""
292 291 for m in list(es.IEngineQueued):
293 292 self.assert_(hasattr(self.engine, m))
294 293
295 294 def testIEngineQueuedDeferreds(self):
296 295 dList = []
297 296 d = self.engine.clear_queue()
298 297 self.assert_(isinstance(d, defer.Deferred))
299 298 dList.append(d)
300 299 d = self.engine.queue_status()
301 300 self.assert_(isinstance(d, defer.Deferred))
302 301 dList.append(d)
303 302 D = defer.DeferredList(dList)
304 303 return D
305 304
306 305 def testClearQueue(self):
307 306 result = self.engine.clear_queue()
308 307 d1 = self.assertDeferredEquals(result, None)
309 308 d1.addCallback(lambda _: self.engine.queue_status())
310 309 d2 = self.assertDeferredEquals(d1, {'queue':[], 'pending':'None'})
311 310 return d2
312 311
313 312 def testQueueStatus(self):
314 313 result = self.engine.queue_status()
315 314 result.addCallback(lambda r: 'queue' in r and 'pending' in r)
316 315 d = self.assertDeferredEquals(result, True)
317 316 return d
318 317
319 318 Parametric(IEngineQueuedTestCase)
320 319
321 320 class IEnginePropertiesTestCase(object):
322 321 """Test an IEngineProperties implementor."""
323 322
324 323 def testIEnginePropertiesInterface(self):
325 324 """Does self.engine claim to implement IEngineProperties?"""
326 325 self.assert_(es.IEngineProperties.providedBy(self.engine))
327 326
328 327 def testIEnginePropertiesInterfaceMethods(self):
329 328 """Does self.engine have the methods and attributes in IEngireProperties."""
330 329 for m in list(es.IEngineProperties):
331 330 self.assert_(hasattr(self.engine, m))
332 331
333 332 def testGetSetProperties(self):
334 333 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
335 334 d = self.engine.set_properties(dikt)
336 335 d.addCallback(lambda r: self.engine.get_properties())
337 336 d = self.assertDeferredEquals(d, dikt)
338 337 d.addCallback(lambda r: self.engine.get_properties(('c',)))
339 338 d = self.assertDeferredEquals(d, {'c': dikt['c']})
340 339 d.addCallback(lambda r: self.engine.set_properties(dict(c=False)))
341 340 d.addCallback(lambda r: self.engine.get_properties(('c', 'd')))
342 341 d = self.assertDeferredEquals(d, dict(c=False, d=None))
343 342 return d
344 343
345 344 def testClearProperties(self):
346 345 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
347 346 d = self.engine.set_properties(dikt)
348 347 d.addCallback(lambda r: self.engine.clear_properties())
349 348 d.addCallback(lambda r: self.engine.get_properties())
350 349 d = self.assertDeferredEquals(d, {})
351 350 return d
352 351
353 352 def testDelHasProperties(self):
354 353 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
355 354 d = self.engine.set_properties(dikt)
356 355 d.addCallback(lambda r: self.engine.del_properties(('b','e')))
357 356 d.addCallback(lambda r: self.engine.has_properties(('a','b','c','d','e')))
358 357 d = self.assertDeferredEquals(d, [True, False, True, True, False])
359 358 return d
360 359
361 360 def testStrictDict(self):
362 361 s = """from IPython.kernel.engineservice import get_engine
363 362 p = get_engine(%s).properties"""%self.engine.id
364 363 d = self.engine.execute(s)
365 364 d.addCallback(lambda r: self.engine.execute("p['a'] = lambda _:None"))
366 365 d = self.assertDeferredRaises(d, error.InvalidProperty)
367 366 d.addCallback(lambda r: self.engine.execute("p['a'] = range(5)"))
368 367 d.addCallback(lambda r: self.engine.execute("p['a'].append(5)"))
369 368 d.addCallback(lambda r: self.engine.get_properties('a'))
370 369 d = self.assertDeferredEquals(d, dict(a=range(5)))
371 370 return d
372 371
373 372 Parametric(IEnginePropertiesTestCase)
@@ -1,838 +1,828 b''
1 1 # encoding: utf-8
2 2
3 3 """"""
4 4
5 5 __docformat__ = "restructuredtext en"
6 6
7 7 #-------------------------------------------------------------------------------
8 8 # Copyright (C) 2008 The IPython Development Team
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #-------------------------------------------------------------------------------
13 13
14 14 #-------------------------------------------------------------------------------
15 15 # Imports
16 16 #-------------------------------------------------------------------------------
17 17
18 18 from twisted.internet import defer
19 19
20 20 from IPython.kernel import engineservice as es
21 21 from IPython.kernel import multiengine as me
22 22 from IPython.kernel import newserialized
23 23 from IPython.kernel.error import NotDefined
24 24 from IPython.testing import util
25 25 from IPython.testing.parametric import parametric, Parametric
26 26 from IPython.kernel import newserialized
27 27 from IPython.kernel.util import printer
28 28 from IPython.kernel.error import (InvalidEngineID,
29 29 NoEnginesRegistered,
30 30 CompositeError,
31 31 InvalidDeferredID)
32 32 from IPython.kernel.tests.engineservicetest import validCommands, invalidCommands
33 33 from IPython.kernel.core.interpreter import Interpreter
34 34
35 35
36 36 #-------------------------------------------------------------------------------
37 37 # Base classes and utilities
38 38 #-------------------------------------------------------------------------------
39 39
40 40 class IMultiEngineBaseTestCase(object):
41 41 """Basic utilities for working with multiengine tests.
42 42
43 43 Some subclass should define:
44 44
45 45 * self.multiengine
46 46 * self.engines to keep track of engines for clean up"""
47 47
48 48 def createShell(self):
49 49 return Interpreter()
50 50
51 51 def addEngine(self, n=1):
52 52 for i in range(n):
53 53 e = es.EngineService()
54 54 e.startService()
55 55 regDict = self.controller.register_engine(es.QueuedEngine(e), None)
56 56 e.id = regDict['id']
57 57 self.engines.append(e)
58 58
59 59
60 60 def testf(x):
61 61 return 2.0*x
62 62
63 63
64 64 globala = 99
65 65
66 66
67 67 def testg(x):
68 68 return globala*x
69 69
70 70
71 71 def isdid(did):
72 72 if not isinstance(did, str):
73 73 return False
74 74 if not len(did)==40:
75 75 return False
76 76 return True
77 77
78 78
79 79 def _raise_it(f):
80 80 try:
81 81 f.raiseException()
82 82 except CompositeError, e:
83 83 e.raise_exception()
84 84
85 85 #-------------------------------------------------------------------------------
86 86 # IMultiEngineTestCase
87 87 #-------------------------------------------------------------------------------
88 88
89 89 class IMultiEngineTestCase(IMultiEngineBaseTestCase):
90 90 """A test for any object that implements IEngineMultiplexer.
91 91
92 92 self.multiengine must be defined and implement IEngineMultiplexer.
93 93 """
94 94
95 95 def testIMultiEngineInterface(self):
96 96 """Does self.engine claim to implement IEngineCore?"""
97 97 self.assert_(me.IEngineMultiplexer.providedBy(self.multiengine))
98 98 self.assert_(me.IMultiEngine.providedBy(self.multiengine))
99 99
100 100 def testIEngineMultiplexerInterfaceMethods(self):
101 101 """Does self.engine have the methods and attributes in IEngineCore."""
102 102 for m in list(me.IEngineMultiplexer):
103 103 self.assert_(hasattr(self.multiengine, m))
104 104
105 105 def testIEngineMultiplexerDeferreds(self):
106 106 self.addEngine(1)
107 107 d= self.multiengine.execute('a=5', targets=0)
108 108 d.addCallback(lambda _: self.multiengine.push(dict(a=5),targets=0))
109 109 d.addCallback(lambda _: self.multiengine.push(dict(a=5, b='asdf', c=[1,2,3]),targets=0))
110 110 d.addCallback(lambda _: self.multiengine.pull(('a','b','c'),targets=0))
111 111 d.addCallback(lambda _: self.multiengine.get_result(targets=0))
112 112 d.addCallback(lambda _: self.multiengine.reset(targets=0))
113 113 d.addCallback(lambda _: self.multiengine.keys(targets=0))
114 114 d.addCallback(lambda _: self.multiengine.push_serialized(dict(a=newserialized.serialize(10)),targets=0))
115 115 d.addCallback(lambda _: self.multiengine.pull_serialized('a',targets=0))
116 116 d.addCallback(lambda _: self.multiengine.clear_queue(targets=0))
117 117 d.addCallback(lambda _: self.multiengine.queue_status(targets=0))
118 118 return d
119 119
120 120 def testInvalidEngineID(self):
121 121 self.addEngine(1)
122 122 badID = 100
123 123 d = self.multiengine.execute('a=5', targets=badID)
124 124 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
125 125 d.addCallback(lambda _: self.multiengine.push(dict(a=5), targets=badID))
126 126 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
127 127 d.addCallback(lambda _: self.multiengine.pull('a', targets=badID))
128 128 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
129 129 d.addCallback(lambda _: self.multiengine.reset(targets=badID))
130 130 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
131 131 d.addCallback(lambda _: self.multiengine.keys(targets=badID))
132 132 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
133 133 d.addCallback(lambda _: self.multiengine.push_serialized(dict(a=newserialized.serialize(10)), targets=badID))
134 134 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
135 135 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=badID))
136 136 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
137 137 d.addCallback(lambda _: self.multiengine.queue_status(targets=badID))
138 138 d.addErrback(lambda f: self.assertRaises(InvalidEngineID, f.raiseException))
139 139 return d
140 140
141 141 def testNoEnginesRegistered(self):
142 142 badID = 'all'
143 143 d= self.multiengine.execute('a=5', targets=badID)
144 144 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
145 145 d.addCallback(lambda _: self.multiengine.push(dict(a=5), targets=badID))
146 146 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
147 147 d.addCallback(lambda _: self.multiengine.pull('a', targets=badID))
148 148 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
149 149 d.addCallback(lambda _: self.multiengine.get_result(targets=badID))
150 150 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
151 151 d.addCallback(lambda _: self.multiengine.reset(targets=badID))
152 152 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
153 153 d.addCallback(lambda _: self.multiengine.keys(targets=badID))
154 154 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
155 155 d.addCallback(lambda _: self.multiengine.push_serialized(dict(a=newserialized.serialize(10)), targets=badID))
156 156 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
157 157 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=badID))
158 158 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
159 159 d.addCallback(lambda _: self.multiengine.queue_status(targets=badID))
160 160 d.addErrback(lambda f: self.assertRaises(NoEnginesRegistered, f.raiseException))
161 161 return d
162 162
163 163 def runExecuteAll(self, d, cmd, shell):
164 164 actual = shell.execute(cmd)
165 165 d.addCallback(lambda _: self.multiengine.execute(cmd))
166 166 def compare(result):
167 167 for r in result:
168 168 actual['id'] = r['id']
169 169 self.assertEquals(r, actual)
170 170 d.addCallback(compare)
171 171
172 172 def testExecuteAll(self):
173 173 self.addEngine(4)
174 174 d= defer.Deferred()
175 175 shell = Interpreter()
176 176 for cmd in validCommands:
177 177 self.runExecuteAll(d, cmd, shell)
178 178 d.callback(None)
179 179 return d
180 180
181 181 # The following two methods show how to do parametrized
182 182 # tests. This is really slick! Same is used above.
183 183 def runExecuteFailures(self, cmd, exc):
184 184 self.addEngine(4)
185 185 d= self.multiengine.execute(cmd)
186 186 d.addErrback(lambda f: self.assertRaises(exc, _raise_it, f))
187 187 return d
188 188
189 189 @parametric
190 190 def testExecuteFailures(cls):
191 191 return [(cls.runExecuteFailures,cmd,exc) for
192 192 cmd,exc in invalidCommands]
193 193
194 194 def testPushPull(self):
195 195 self.addEngine(1)
196 196 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
197 197 d= self.multiengine.push(dict(key=objs[0]), targets=0)
198 198 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
199 199 d.addCallback(lambda r: self.assertEquals(r, [objs[0]]))
200 200 d.addCallback(lambda _: self.multiengine.push(dict(key=objs[1]), targets=0))
201 201 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
202 202 d.addCallback(lambda r: self.assertEquals(r, [objs[1]]))
203 203 d.addCallback(lambda _: self.multiengine.push(dict(key=objs[2]), targets=0))
204 204 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
205 205 d.addCallback(lambda r: self.assertEquals(r, [objs[2]]))
206 206 d.addCallback(lambda _: self.multiengine.push(dict(key=objs[3]), targets=0))
207 207 d.addCallback(lambda _: self.multiengine.pull('key', targets=0))
208 208 d.addCallback(lambda r: self.assertEquals(r, [objs[3]]))
209 209 d.addCallback(lambda _: self.multiengine.reset(targets=0))
210 210 d.addCallback(lambda _: self.multiengine.pull('a', targets=0))
211 211 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
212 212 d.addCallback(lambda _: self.multiengine.push(dict(a=10,b=20)))
213 213 d.addCallback(lambda _: self.multiengine.pull(('a','b')))
214 214 d.addCallback(lambda r: self.assertEquals(r, [[10,20]]))
215 215 return d
216 216
217 217 def testPushPullAll(self):
218 218 self.addEngine(4)
219 219 d= self.multiengine.push(dict(a=10))
220 220 d.addCallback(lambda _: self.multiengine.pull('a'))
221 221 d.addCallback(lambda r: self.assert_(r==[10,10,10,10]))
222 222 d.addCallback(lambda _: self.multiengine.push(dict(a=10, b=20)))
223 223 d.addCallback(lambda _: self.multiengine.pull(('a','b')))
224 224 d.addCallback(lambda r: self.assert_(r==4*[[10,20]]))
225 225 d.addCallback(lambda _: self.multiengine.push(dict(a=10, b=20), targets=0))
226 226 d.addCallback(lambda _: self.multiengine.pull(('a','b'), targets=0))
227 227 d.addCallback(lambda r: self.assert_(r==[[10,20]]))
228 228 d.addCallback(lambda _: self.multiengine.push(dict(a=None, b=None), targets=0))
229 229 d.addCallback(lambda _: self.multiengine.pull(('a','b'), targets=0))
230 230 d.addCallback(lambda r: self.assert_(r==[[None,None]]))
231 231 return d
232 232
233 233 def testPushPullSerialized(self):
234 234 self.addEngine(1)
235 235 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
236 236 d= self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[0])), targets=0)
237 237 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
238 238 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
239 239 d.addCallback(lambda r: self.assertEquals(r, objs[0]))
240 240 d.addCallback(lambda _: self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[1])), targets=0))
241 241 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
242 242 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
243 243 d.addCallback(lambda r: self.assertEquals(r, objs[1]))
244 244 d.addCallback(lambda _: self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[2])), targets=0))
245 245 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
246 246 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
247 247 d.addCallback(lambda r: self.assertEquals(r, objs[2]))
248 248 d.addCallback(lambda _: self.multiengine.push_serialized(dict(key=newserialized.serialize(objs[3])), targets=0))
249 249 d.addCallback(lambda _: self.multiengine.pull_serialized('key', targets=0))
250 250 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
251 251 d.addCallback(lambda r: self.assertEquals(r, objs[3]))
252 252 d.addCallback(lambda _: self.multiengine.push(dict(a=10,b=range(5)), targets=0))
253 253 d.addCallback(lambda _: self.multiengine.pull_serialized(('a','b'), targets=0))
254 254 d.addCallback(lambda serial: [newserialized.IUnSerialized(s).getObject() for s in serial[0]])
255 255 d.addCallback(lambda r: self.assertEquals(r, [10, range(5)]))
256 256 d.addCallback(lambda _: self.multiengine.reset(targets=0))
257 257 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0))
258 258 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
259 259 return d
260 260
261 261 objs = [10,"hi there",1.2342354,{"p":(1,2)}]
262 262 d= defer.succeed(None)
263 263 for o in objs:
264 264 self.multiengine.push_serialized(0, key=newserialized.serialize(o))
265 265 value = self.multiengine.pull_serialized(0, 'key')
266 266 value.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
267 267 d = self.assertDeferredEquals(value,o,d)
268 268 return d
269 269
270 270 def runGetResultAll(self, d, cmd, shell):
271 271 actual = shell.execute(cmd)
272 272 d.addCallback(lambda _: self.multiengine.execute(cmd))
273 273 d.addCallback(lambda _: self.multiengine.get_result())
274 274 def compare(result):
275 275 for r in result:
276 276 actual['id'] = r['id']
277 277 self.assertEquals(r, actual)
278 278 d.addCallback(compare)
279 279
280 280 def testGetResultAll(self):
281 281 self.addEngine(4)
282 282 d= defer.Deferred()
283 283 shell = Interpreter()
284 284 for cmd in validCommands:
285 285 self.runGetResultAll(d, cmd, shell)
286 286 d.callback(None)
287 287 return d
288 288
289 289 def testGetResultDefault(self):
290 290 self.addEngine(1)
291 291 target = 0
292 292 cmd = 'a=5'
293 293 shell = self.createShell()
294 294 shellResult = shell.execute(cmd)
295 295 def popit(dikt, key):
296 296 dikt.pop(key)
297 297 return dikt
298 298 d= self.multiengine.execute(cmd, targets=target)
299 299 d.addCallback(lambda _: self.multiengine.get_result(targets=target))
300 300 d.addCallback(lambda r: self.assertEquals(shellResult, popit(r[0],'id')))
301 301 return d
302 302
303 303 def testGetResultFailure(self):
304 304 self.addEngine(1)
305 305 d= self.multiengine.get_result(None, targets=0)
306 306 d.addErrback(lambda f: self.assertRaises(IndexError, _raise_it, f))
307 307 d.addCallback(lambda _: self.multiengine.get_result(10, targets=0))
308 308 d.addErrback(lambda f: self.assertRaises(IndexError, _raise_it, f))
309 309 return d
310 310
311 311 def testPushFunction(self):
312 312 self.addEngine(1)
313 313 d= self.multiengine.push_function(dict(f=testf), targets=0)
314 314 d.addCallback(lambda _: self.multiengine.execute('result = f(10)', targets=0))
315 315 d.addCallback(lambda _: self.multiengine.pull('result', targets=0))
316 316 d.addCallback(lambda r: self.assertEquals(r[0], testf(10)))
317 317 d.addCallback(lambda _: self.multiengine.push(dict(globala=globala), targets=0))
318 318 d.addCallback(lambda _: self.multiengine.push_function(dict(g=testg), targets=0))
319 319 d.addCallback(lambda _: self.multiengine.execute('result = g(10)', targets=0))
320 320 d.addCallback(lambda _: self.multiengine.pull('result', targets=0))
321 321 d.addCallback(lambda r: self.assertEquals(r[0], testg(10)))
322 322 return d
323 323
324 324 def testPullFunction(self):
325 325 self.addEngine(1)
326 326 d= self.multiengine.push(dict(a=globala), targets=0)
327 327 d.addCallback(lambda _: self.multiengine.push_function(dict(f=testf), targets=0))
328 328 d.addCallback(lambda _: self.multiengine.pull_function('f', targets=0))
329 329 d.addCallback(lambda r: self.assertEquals(r[0](10), testf(10)))
330 330 d.addCallback(lambda _: self.multiengine.execute("def g(x): return x*x", targets=0))
331 331 d.addCallback(lambda _: self.multiengine.pull_function(('f','g'),targets=0))
332 332 d.addCallback(lambda r: self.assertEquals((r[0][0](10),r[0][1](10)), (testf(10), 100)))
333 333 return d
334 334
335 335 def testPushFunctionAll(self):
336 336 self.addEngine(4)
337 337 d= self.multiengine.push_function(dict(f=testf))
338 338 d.addCallback(lambda _: self.multiengine.execute('result = f(10)'))
339 339 d.addCallback(lambda _: self.multiengine.pull('result'))
340 340 d.addCallback(lambda r: self.assertEquals(r, 4*[testf(10)]))
341 341 d.addCallback(lambda _: self.multiengine.push(dict(globala=globala)))
342 342 d.addCallback(lambda _: self.multiengine.push_function(dict(testg=testg)))
343 343 d.addCallback(lambda _: self.multiengine.execute('result = testg(10)'))
344 344 d.addCallback(lambda _: self.multiengine.pull('result'))
345 345 d.addCallback(lambda r: self.assertEquals(r, 4*[testg(10)]))
346 346 return d
347 347
348 348 def testPullFunctionAll(self):
349 349 self.addEngine(4)
350 350 d= self.multiengine.push_function(dict(f=testf))
351 351 d.addCallback(lambda _: self.multiengine.pull_function('f'))
352 352 d.addCallback(lambda r: self.assertEquals([func(10) for func in r], 4*[testf(10)]))
353 353 return d
354 354
355 355 def testGetIDs(self):
356 356 self.addEngine(1)
357 357 d= self.multiengine.get_ids()
358 358 d.addCallback(lambda r: self.assertEquals(r, [0]))
359 359 d.addCallback(lambda _: self.addEngine(3))
360 360 d.addCallback(lambda _: self.multiengine.get_ids())
361 361 d.addCallback(lambda r: self.assertEquals(r, [0,1,2,3]))
362 362 return d
363 363
364 364 def testClearQueue(self):
365 365 self.addEngine(4)
366 366 d= self.multiengine.clear_queue()
367 367 d.addCallback(lambda r: self.assertEquals(r,4*[None]))
368 368 return d
369 369
370 370 def testQueueStatus(self):
371 371 self.addEngine(4)
372 372 d= self.multiengine.queue_status(targets=0)
373 373 d.addCallback(lambda r: self.assert_(isinstance(r[0],tuple)))
374 374 return d
375 375
376 376 def testGetSetProperties(self):
377 377 self.addEngine(4)
378 378 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
379 379 d= self.multiengine.set_properties(dikt)
380 380 d.addCallback(lambda r: self.multiengine.get_properties())
381 381 d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
382 382 d.addCallback(lambda r: self.multiengine.get_properties(('c',)))
383 383 d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
384 384 d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False)))
385 385 d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd')))
386 386 d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
387 387 return d
388 388
389 389 def testClearProperties(self):
390 390 self.addEngine(4)
391 391 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
392 392 d= self.multiengine.set_properties(dikt)
393 393 d.addCallback(lambda r: self.multiengine.clear_properties())
394 394 d.addCallback(lambda r: self.multiengine.get_properties())
395 395 d.addCallback(lambda r: self.assertEquals(r, 4*[{}]))
396 396 return d
397 397
398 398 def testDelHasProperties(self):
399 399 self.addEngine(4)
400 400 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
401 401 d= self.multiengine.set_properties(dikt)
402 402 d.addCallback(lambda r: self.multiengine.del_properties(('b','e')))
403 403 d.addCallback(lambda r: self.multiengine.has_properties(('a','b','c','d','e')))
404 404 d.addCallback(lambda r: self.assertEquals(r, 4*[[True, False, True, True, False]]))
405 405 return d
406 406
407 407 Parametric(IMultiEngineTestCase)
408 408
409 409 #-------------------------------------------------------------------------------
410 410 # ISynchronousMultiEngineTestCase
411 411 #-------------------------------------------------------------------------------
412 412
413 413 class ISynchronousMultiEngineTestCase(IMultiEngineBaseTestCase):
414 414
415 415 def testISynchronousMultiEngineInterface(self):
416 416 """Does self.engine claim to implement IEngineCore?"""
417 417 self.assert_(me.ISynchronousEngineMultiplexer.providedBy(self.multiengine))
418 418 self.assert_(me.ISynchronousMultiEngine.providedBy(self.multiengine))
419 419
420 420 def testExecute(self):
421 421 self.addEngine(4)
422 422 execute = self.multiengine.execute
423 423 d= execute('a=5', targets=0, block=True)
424 424 d.addCallback(lambda r: self.assert_(len(r)==1))
425 425 d.addCallback(lambda _: execute('b=10'))
426 426 d.addCallback(lambda r: self.assert_(len(r)==4))
427 427 d.addCallback(lambda _: execute('c=30', block=False))
428 428 d.addCallback(lambda did: self.assert_(isdid(did)))
429 429 d.addCallback(lambda _: execute('d=[0,1,2]', block=False))
430 430 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
431 431 d.addCallback(lambda r: self.assert_(len(r)==4))
432 432 return d
433 433
434 434 def testPushPull(self):
435 435 data = dict(a=10, b=1.05, c=range(10), d={'e':(1,2),'f':'hi'})
436 436 self.addEngine(4)
437 437 push = self.multiengine.push
438 438 pull = self.multiengine.pull
439 439 d= push({'data':data}, targets=0)
440 440 d.addCallback(lambda r: pull('data', targets=0))
441 441 d.addCallback(lambda r: self.assertEqual(r,[data]))
442 442 d.addCallback(lambda _: push({'data':data}))
443 443 d.addCallback(lambda r: pull('data'))
444 444 d.addCallback(lambda r: self.assertEqual(r,4*[data]))
445 445 d.addCallback(lambda _: push({'data':data}, block=False))
446 446 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
447 447 d.addCallback(lambda _: pull('data', block=False))
448 448 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
449 449 d.addCallback(lambda r: self.assertEqual(r,4*[data]))
450 450 d.addCallback(lambda _: push(dict(a=10,b=20)))
451 451 d.addCallback(lambda _: pull(('a','b')))
452 452 d.addCallback(lambda r: self.assertEquals(r, 4*[[10,20]]))
453 453 return d
454 454
455 455 def testPushPullFunction(self):
456 456 self.addEngine(4)
457 457 pushf = self.multiengine.push_function
458 458 pullf = self.multiengine.pull_function
459 459 push = self.multiengine.push
460 460 pull = self.multiengine.pull
461 461 execute = self.multiengine.execute
462 462 d= pushf({'testf':testf}, targets=0)
463 463 d.addCallback(lambda r: pullf('testf', targets=0))
464 464 d.addCallback(lambda r: self.assertEqual(r[0](1.0), testf(1.0)))
465 465 d.addCallback(lambda _: execute('r = testf(10)', targets=0))
466 466 d.addCallback(lambda _: pull('r', targets=0))
467 467 d.addCallback(lambda r: self.assertEquals(r[0], testf(10)))
468 468 d.addCallback(lambda _: pushf({'testf':testf}, block=False))
469 469 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
470 470 d.addCallback(lambda _: pullf('testf', block=False))
471 471 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
472 472 d.addCallback(lambda r: self.assertEqual(r[0](1.0), testf(1.0)))
473 473 d.addCallback(lambda _: execute("def g(x): return x*x", targets=0))
474 474 d.addCallback(lambda _: pullf(('testf','g'),targets=0))
475 475 d.addCallback(lambda r: self.assertEquals((r[0][0](10),r[0][1](10)), (testf(10), 100)))
476 476 return d
477 477
478 478 def testGetResult(self):
479 479 shell = Interpreter()
480 480 result1 = shell.execute('a=10')
481 481 result1['id'] = 0
482 482 result2 = shell.execute('b=20')
483 483 result2['id'] = 0
484 484 execute= self.multiengine.execute
485 485 get_result = self.multiengine.get_result
486 486 self.addEngine(1)
487 487 d= execute('a=10')
488 488 d.addCallback(lambda _: get_result())
489 489 d.addCallback(lambda r: self.assertEquals(r[0], result1))
490 490 d.addCallback(lambda _: execute('b=20'))
491 491 d.addCallback(lambda _: get_result(1))
492 492 d.addCallback(lambda r: self.assertEquals(r[0], result1))
493 493 d.addCallback(lambda _: get_result(2, block=False))
494 494 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
495 495 d.addCallback(lambda r: self.assertEquals(r[0], result2))
496 496 return d
497 497
498 498 def testResetAndKeys(self):
499 499 self.addEngine(1)
500 500
501 501 #Blocking mode
502 502 d= self.multiengine.push(dict(a=10, b=20, c=range(10)), targets=0)
503 503 d.addCallback(lambda _: self.multiengine.keys(targets=0))
504 504 def keys_found(keys):
505 505 self.assert_('a' in keys[0])
506 506 self.assert_('b' in keys[0])
507 507 self.assert_('b' in keys[0])
508 508 d.addCallback(keys_found)
509 509 d.addCallback(lambda _: self.multiengine.reset(targets=0))
510 510 d.addCallback(lambda _: self.multiengine.keys(targets=0))
511 511 def keys_not_found(keys):
512 512 self.assert_('a' not in keys[0])
513 513 self.assert_('b' not in keys[0])
514 514 self.assert_('b' not in keys[0])
515 515 d.addCallback(keys_not_found)
516 516
517 517 #Non-blocking mode
518 518 d.addCallback(lambda _: self.multiengine.push(dict(a=10, b=20, c=range(10)), targets=0))
519 519 d.addCallback(lambda _: self.multiengine.keys(targets=0, block=False))
520 520 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
521 521 def keys_found(keys):
522 522 self.assert_('a' in keys[0])
523 523 self.assert_('b' in keys[0])
524 524 self.assert_('b' in keys[0])
525 525 d.addCallback(keys_found)
526 526 d.addCallback(lambda _: self.multiengine.reset(targets=0, block=False))
527 527 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
528 528 d.addCallback(lambda _: self.multiengine.keys(targets=0, block=False))
529 529 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
530 530 def keys_not_found(keys):
531 531 self.assert_('a' not in keys[0])
532 532 self.assert_('b' not in keys[0])
533 533 self.assert_('b' not in keys[0])
534 534 d.addCallback(keys_not_found)
535 535
536 536 return d
537 537
538 538 def testPushPullSerialized(self):
539 539 self.addEngine(1)
540 540 dikt = dict(a=10,b='hi there',c=1.2345,d={'p':(1,2)})
541 541 sdikt = {}
542 542 for k,v in dikt.iteritems():
543 543 sdikt[k] = newserialized.serialize(v)
544 544 d= self.multiengine.push_serialized(dict(a=sdikt['a']), targets=0)
545 545 d.addCallback(lambda _: self.multiengine.pull('a',targets=0))
546 546 d.addCallback(lambda r: self.assertEquals(r[0], dikt['a']))
547 547 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0))
548 548 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
549 549 d.addCallback(lambda r: self.assertEquals(r, dikt['a']))
550 550 d.addCallback(lambda _: self.multiengine.push_serialized(sdikt, targets=0))
551 551 d.addCallback(lambda _: self.multiengine.pull_serialized(sdikt.keys(), targets=0))
552 552 d.addCallback(lambda serial: [newserialized.IUnSerialized(s).getObject() for s in serial[0]])
553 553 d.addCallback(lambda r: self.assertEquals(r, dikt.values()))
554 554 d.addCallback(lambda _: self.multiengine.reset(targets=0))
555 555 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0))
556 556 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
557 557
558 558 #Non-blocking mode
559 559 d.addCallback(lambda r: self.multiengine.push_serialized(dict(a=sdikt['a']), targets=0, block=False))
560 560 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
561 561 d.addCallback(lambda _: self.multiengine.pull('a',targets=0))
562 562 d.addCallback(lambda r: self.assertEquals(r[0], dikt['a']))
563 563 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0, block=False))
564 564 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
565 565 d.addCallback(lambda serial: newserialized.IUnSerialized(serial[0]).getObject())
566 566 d.addCallback(lambda r: self.assertEquals(r, dikt['a']))
567 567 d.addCallback(lambda _: self.multiengine.push_serialized(sdikt, targets=0, block=False))
568 568 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
569 569 d.addCallback(lambda _: self.multiengine.pull_serialized(sdikt.keys(), targets=0, block=False))
570 570 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
571 571 d.addCallback(lambda serial: [newserialized.IUnSerialized(s).getObject() for s in serial[0]])
572 572 d.addCallback(lambda r: self.assertEquals(r, dikt.values()))
573 573 d.addCallback(lambda _: self.multiengine.reset(targets=0))
574 574 d.addCallback(lambda _: self.multiengine.pull_serialized('a', targets=0, block=False))
575 575 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
576 576 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
577 577 return d
578 578
579 579 def testClearQueue(self):
580 580 self.addEngine(4)
581 581 d= self.multiengine.clear_queue()
582 582 d.addCallback(lambda r: self.multiengine.clear_queue(block=False))
583 583 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
584 584 d.addCallback(lambda r: self.assertEquals(r,4*[None]))
585 585 return d
586 586
587 587 def testQueueStatus(self):
588 588 self.addEngine(4)
589 589 d= self.multiengine.queue_status(targets=0)
590 590 d.addCallback(lambda r: self.assert_(isinstance(r[0],tuple)))
591 591 d.addCallback(lambda r: self.multiengine.queue_status(targets=0, block=False))
592 592 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
593 593 d.addCallback(lambda r: self.assert_(isinstance(r[0],tuple)))
594 594 return d
595 595
596 596 def testGetIDs(self):
597 597 self.addEngine(1)
598 598 d= self.multiengine.get_ids()
599 599 d.addCallback(lambda r: self.assertEquals(r, [0]))
600 600 d.addCallback(lambda _: self.addEngine(3))
601 601 d.addCallback(lambda _: self.multiengine.get_ids())
602 602 d.addCallback(lambda r: self.assertEquals(r, [0,1,2,3]))
603 603 return d
604 604
605 605 def testGetSetProperties(self):
606 606 self.addEngine(4)
607 607 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
608 608 d= self.multiengine.set_properties(dikt)
609 609 d.addCallback(lambda r: self.multiengine.get_properties())
610 610 d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
611 611 d.addCallback(lambda r: self.multiengine.get_properties(('c',)))
612 612 d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
613 613 d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False)))
614 614 d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd')))
615 615 d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
616 616
617 617 #Non-blocking
618 618 d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
619 619 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
620 620 d.addCallback(lambda r: self.multiengine.get_properties(block=False))
621 621 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
622 622 d.addCallback(lambda r: self.assertEquals(r, 4*[dikt]))
623 623 d.addCallback(lambda r: self.multiengine.get_properties(('c',), block=False))
624 624 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
625 625 d.addCallback(lambda r: self.assertEquals(r, 4*[{'c': dikt['c']}]))
626 626 d.addCallback(lambda r: self.multiengine.set_properties(dict(c=False), block=False))
627 627 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
628 628 d.addCallback(lambda r: self.multiengine.get_properties(('c', 'd'), block=False))
629 629 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
630 630 d.addCallback(lambda r: self.assertEquals(r, 4*[dict(c=False, d=None)]))
631 631 return d
632 632
633 633 def testClearProperties(self):
634 634 self.addEngine(4)
635 635 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
636 636 d= self.multiengine.set_properties(dikt)
637 637 d.addCallback(lambda r: self.multiengine.clear_properties())
638 638 d.addCallback(lambda r: self.multiengine.get_properties())
639 639 d.addCallback(lambda r: self.assertEquals(r, 4*[{}]))
640 640
641 641 #Non-blocking
642 642 d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
643 643 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
644 644 d.addCallback(lambda r: self.multiengine.clear_properties(block=False))
645 645 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
646 646 d.addCallback(lambda r: self.multiengine.get_properties(block=False))
647 647 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
648 648 d.addCallback(lambda r: self.assertEquals(r, 4*[{}]))
649 649 return d
650 650
651 651 def testDelHasProperties(self):
652 652 self.addEngine(4)
653 653 dikt = dict(a=5, b='asdf', c=True, d=None, e=range(5))
654 654 d= self.multiengine.set_properties(dikt)
655 655 d.addCallback(lambda r: self.multiengine.del_properties(('b','e')))
656 656 d.addCallback(lambda r: self.multiengine.has_properties(('a','b','c','d','e')))
657 657 d.addCallback(lambda r: self.assertEquals(r, 4*[[True, False, True, True, False]]))
658 658
659 659 #Non-blocking
660 660 d.addCallback(lambda r: self.multiengine.set_properties(dikt, block=False))
661 661 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
662 662 d.addCallback(lambda r: self.multiengine.del_properties(('b','e'), block=False))
663 663 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
664 664 d.addCallback(lambda r: self.multiengine.has_properties(('a','b','c','d','e'), block=False))
665 665 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
666 666 d.addCallback(lambda r: self.assertEquals(r, 4*[[True, False, True, True, False]]))
667 667 return d
668 668
669 669 def test_clear_pending_deferreds(self):
670 670 self.addEngine(4)
671 671 did_list = []
672 672 d= self.multiengine.execute('a=10',block=False)
673 673 d.addCallback(lambda did: did_list.append(did))
674 674 d.addCallback(lambda _: self.multiengine.push(dict(b=10),block=False))
675 675 d.addCallback(lambda did: did_list.append(did))
676 676 d.addCallback(lambda _: self.multiengine.pull(('a','b'),block=False))
677 677 d.addCallback(lambda did: did_list.append(did))
678 678 d.addCallback(lambda _: self.multiengine.clear_pending_deferreds())
679 679 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[0],True))
680 680 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
681 681 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[1],True))
682 682 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
683 683 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[2],True))
684 684 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
685 685 return d
686 686
687 687 #-------------------------------------------------------------------------------
688 688 # Coordinator test cases
689 689 #-------------------------------------------------------------------------------
690 690
691 691 class IMultiEngineCoordinatorTestCase(object):
692 692
693 693 def testScatterGather(self):
694 694 self.addEngine(4)
695 695 d= self.multiengine.scatter('a', range(16))
696 696 d.addCallback(lambda r: self.multiengine.gather('a'))
697 697 d.addCallback(lambda r: self.assertEquals(r, range(16)))
698 698 d.addCallback(lambda _: self.multiengine.gather('asdf'))
699 699 d.addErrback(lambda f: self.assertRaises(NameError, _raise_it, f))
700 700 return d
701 701
702 702 def testScatterGatherNumpy(self):
703 703 try:
704 704 import numpy
705 705 from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
706 706 except:
707 707 return
708 708 else:
709 709 self.addEngine(4)
710 710 a = numpy.arange(16)
711 711 d = self.multiengine.scatter('a', a)
712 712 d.addCallback(lambda r: self.multiengine.gather('a'))
713 713 d.addCallback(lambda r: assert_array_equal(r, a))
714 714 return d
715 715
716 716 def testMap(self):
717 717 self.addEngine(4)
718 718 def f(x):
719 719 return x**2
720 720 data = range(16)
721 721 d= self.multiengine.map(f, data)
722 722 d.addCallback(lambda r: self.assertEquals(r,[f(x) for x in data]))
723 723 return d
724 724
725 725
726 726 class ISynchronousMultiEngineCoordinatorTestCase(IMultiEngineCoordinatorTestCase):
727 727
728 728 def testScatterGatherNonblocking(self):
729 729 self.addEngine(4)
730 730 d= self.multiengine.scatter('a', range(16), block=False)
731 731 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
732 732 d.addCallback(lambda r: self.multiengine.gather('a', block=False))
733 733 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
734 734 d.addCallback(lambda r: self.assertEquals(r, range(16)))
735 735 return d
736
736
737 737 def testScatterGatherNumpyNonblocking(self):
738 738 try:
739 739 import numpy
740 740 from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
741 741 except:
742 742 return
743 743 else:
744 744 self.addEngine(4)
745 745 a = numpy.arange(16)
746 746 d = self.multiengine.scatter('a', a, block=False)
747 747 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
748 748 d.addCallback(lambda r: self.multiengine.gather('a', block=False))
749 749 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
750 750 d.addCallback(lambda r: assert_array_equal(r, a))
751 751 return d
752
753 def testMapNonblocking(self):
754 self.addEngine(4)
755 def f(x):
756 return x**2
757 data = range(16)
758 d= self.multiengine.map(f, data, block=False)
759 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
760 d.addCallback(lambda r: self.assertEquals(r,[f(x) for x in data]))
761 return d
762
752
763 753 def test_clear_pending_deferreds(self):
764 754 self.addEngine(4)
765 755 did_list = []
766 756 d= self.multiengine.scatter('a',range(16),block=False)
767 757 d.addCallback(lambda did: did_list.append(did))
768 758 d.addCallback(lambda _: self.multiengine.gather('a',block=False))
769 759 d.addCallback(lambda did: did_list.append(did))
770 760 d.addCallback(lambda _: self.multiengine.map(lambda x: x, range(16),block=False))
771 761 d.addCallback(lambda did: did_list.append(did))
772 762 d.addCallback(lambda _: self.multiengine.clear_pending_deferreds())
773 763 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[0],True))
774 764 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
775 765 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[1],True))
776 766 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
777 767 d.addCallback(lambda _: self.multiengine.get_pending_deferred(did_list[2],True))
778 768 d.addErrback(lambda f: self.assertRaises(InvalidDeferredID, f.raiseException))
779 769 return d
780 770
781 771 #-------------------------------------------------------------------------------
782 772 # Extras test cases
783 773 #-------------------------------------------------------------------------------
784 774
785 775 class IMultiEngineExtrasTestCase(object):
786 776
787 777 def testZipPull(self):
788 778 self.addEngine(4)
789 779 d= self.multiengine.push(dict(a=10,b=20))
790 780 d.addCallback(lambda r: self.multiengine.zip_pull(('a','b')))
791 781 d.addCallback(lambda r: self.assert_(r, [4*[10],4*[20]]))
792 782 return d
793 783
794 784 def testRun(self):
795 785 self.addEngine(4)
796 786 import tempfile
797 787 fname = tempfile.mktemp('foo.py')
798 788 f= open(fname, 'w')
799 789 f.write('a = 10\nb=30')
800 790 f.close()
801 791 d= self.multiengine.run(fname)
802 792 d.addCallback(lambda r: self.multiengine.pull(('a','b')))
803 793 d.addCallback(lambda r: self.assertEquals(r, 4*[[10,30]]))
804 794 return d
805 795
806 796
807 797 class ISynchronousMultiEngineExtrasTestCase(IMultiEngineExtrasTestCase):
808 798
809 799 def testZipPullNonblocking(self):
810 800 self.addEngine(4)
811 801 d= self.multiengine.push(dict(a=10,b=20))
812 802 d.addCallback(lambda r: self.multiengine.zip_pull(('a','b'), block=False))
813 803 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
814 804 d.addCallback(lambda r: self.assert_(r, [4*[10],4*[20]]))
815 805 return d
816 806
817 807 def testRunNonblocking(self):
818 808 self.addEngine(4)
819 809 import tempfile
820 810 fname = tempfile.mktemp('foo.py')
821 811 f= open(fname, 'w')
822 812 f.write('a = 10\nb=30')
823 813 f.close()
824 814 d= self.multiengine.run(fname, block=False)
825 815 d.addCallback(lambda did: self.multiengine.get_pending_deferred(did, True))
826 816 d.addCallback(lambda r: self.multiengine.pull(('a','b')))
827 817 d.addCallback(lambda r: self.assertEquals(r, 4*[[10,30]]))
828 818 return d
829 819
830 820
831 821 #-------------------------------------------------------------------------------
832 822 # IFullSynchronousMultiEngineTestCase
833 823 #-------------------------------------------------------------------------------
834 824
835 825 class IFullSynchronousMultiEngineTestCase(ISynchronousMultiEngineTestCase,
836 826 ISynchronousMultiEngineCoordinatorTestCase,
837 827 ISynchronousMultiEngineExtrasTestCase):
838 828 pass
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from IPython/testing/ipdoctest.py to IPython/testing/attic/ipdoctest.py
1 NO CONTENT: file renamed from IPython/testing/tcommon.py to IPython/testing/attic/tcommon.py
1 NO CONTENT: file renamed from IPython/testing/testTEMPLATE.py to IPython/testing/attic/testTEMPLATE.py
1 NO CONTENT: file renamed from IPython/testing/tstTEMPLATE_doctest.py to IPython/testing/attic/tstTEMPLATE_doctest.py
1 NO CONTENT: file renamed from IPython/testing/tstTEMPLATE_doctest.txt to IPython/testing/attic/tstTEMPLATE_doctest.txt
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file renamed from IPython/tools/tests/tst_tools_utils_doctest2.txt to IPython/tools/tests/test_tools_utils.txt
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now