##// END OF EJS Templates
Merging Fernando's fixes from his trunk-dev and fixing testing things....
Brian Granger -
r1973:5d4c3105 merge
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,94 b''
1 """Tests for the ipdoctest machinery itself.
2
3 Note: in a file named test_X, functions whose only test is their docstring (as
4 a doctest) and which have no test functionality of their own, should be called
5 'doctest_foo' instead of 'test_foo', otherwise they get double-counted (the
6 empty function call is counted as a test, which just inflates tests numbers
7 artificially).
8 """
9
10 def doctest_simple():
11 """ipdoctest must handle simple inputs
12
13 In [1]: 1
14 Out[1]: 1
15
16 In [2]: print 1
17 1
18 """
19
20
21 def doctest_run_builtins():
22 """Check that %run doesn't damage __builtins__ via a doctest.
23
24 This is similar to the test_run_builtins, but I want *both* forms of the
25 test to catch any possible glitches in our testing machinery, since that
26 modifies %run somewhat. So for this, we have both a normal test (below)
27 and a doctest (this one).
28
29 In [1]: import tempfile
30
31 In [3]: f = tempfile.NamedTemporaryFile()
32
33 In [4]: f.write('pass\\n')
34
35 In [5]: f.flush()
36
37 In [7]: %run $f.name
38 """
39
40 def doctest_multiline1():
41 """The ipdoctest machinery must handle multiline examples gracefully.
42
43 In [2]: for i in range(10):
44 ...: print i,
45 ...:
46 0 1 2 3 4 5 6 7 8 9
47 """
48
49
50 def doctest_multiline2():
51 """Multiline examples that define functions and print output.
52
53 In [7]: def f(x):
54 ...: return x+1
55 ...:
56
57 In [8]: f(1)
58 Out[8]: 2
59
60 In [9]: def g(x):
61 ...: print 'x is:',x
62 ...:
63
64 In [10]: g(1)
65 x is: 1
66
67 In [11]: g('hello')
68 x is: hello
69 """
70
71
72 def doctest_multiline3():
73 """Multiline examples with blank lines.
74
75 In [12]: def h(x):
76 ....: if x>1:
77 ....: return x**2
78 ....: # To leave a blank line in the input, you must mark it
79 ....: # with a comment character:
80 ....: #
81 ....: # otherwise the doctest parser gets confused.
82 ....: else:
83 ....: return -1
84 ....:
85
86 In [13]: h(5)
87 Out[13]: 25
88
89 In [14]: h(1)
90 Out[14]: -1
91
92 In [15]: h(0)
93 Out[15]: -1
94 """
@@ -0,0 +1,90 b''
1 """Generic testing tools that do NOT depend on Twisted.
2
3 In particular, this module exposes a set of top-level assert* functions that
4 can be used in place of nose.tools.assert* in method generators (the ones in
5 nose can not, at least as of nose 0.10.4).
6
7 Note: our testing package contains testing.util, which does depend on Twisted
8 and provides utilities for tests that manage Deferreds. All testing support
9 tools that only depend on nose, IPython or the standard library should go here
10 instead.
11
12
13 Authors
14 -------
15 - Fernando Perez <Fernando.Perez@berkeley.edu>
16 """
17
18 #*****************************************************************************
19 # Copyright (C) 2009 The IPython Development Team
20 #
21 # Distributed under the terms of the BSD License. The full license is in
22 # the file COPYING, distributed as part of this software.
23 #*****************************************************************************
24
25 #-----------------------------------------------------------------------------
26 # Required modules and packages
27 #-----------------------------------------------------------------------------
28
29 # Standard Python lib
30 import os
31 import sys
32
33 # Third-party
34 import nose.tools as nt
35
36 # From this project
37 from IPython.tools import utils
38
39 #-----------------------------------------------------------------------------
40 # Globals
41 #-----------------------------------------------------------------------------
42
43 # Make a bunch of nose.tools assert wrappers that can be used in test
44 # generators. This will expose an assert* function for each one in nose.tools.
45
46 _tpl = """
47 def %(name)s(*a,**kw):
48 return nt.%(name)s(*a,**kw)
49 """
50
51 for _x in [a for a in dir(nt) if a.startswith('assert')]:
52 exec _tpl % dict(name=_x)
53
54 #-----------------------------------------------------------------------------
55 # Functions and classes
56 #-----------------------------------------------------------------------------
57
58 def full_path(startPath,files):
59 """Make full paths for all the listed files, based on startPath.
60
61 Only the base part of startPath is kept, since this routine is typically
62 used with a script's __file__ variable as startPath. The base of startPath
63 is then prepended to all the listed files, forming the output list.
64
65 Parameters
66 ----------
67 startPath : string
68 Initial path to use as the base for the results. This path is split
69 using os.path.split() and only its first component is kept.
70
71 files : string or list
72 One or more files.
73
74 Examples
75 --------
76
77 >>> full_path('/foo/bar.py',['a.txt','b.txt'])
78 ['/foo/a.txt', '/foo/b.txt']
79
80 >>> full_path('/foo',['a.txt','b.txt'])
81 ['/a.txt', '/b.txt']
82
83 If a single file is given, the output is still a list:
84 >>> full_path('/foo','a.txt')
85 ['/a.txt']
86 """
87
88 files = utils.list_strings(files)
89 base = os.path.split(startPath)[0]
90 return [ os.path.join(base,f) for f in files ]
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,622 +1,626 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Classes for handling input/output prompts.
4 4 """
5 5
6 6 #*****************************************************************************
7 7 # Copyright (C) 2008-2009 The IPython Development Team
8 8 # Copyright (C) 2001-2007 Fernando Perez <fperez@colorado.edu>
9 9 #
10 10 # Distributed under the terms of the BSD License. The full license is in
11 11 # the file COPYING, distributed as part of this software.
12 12 #*****************************************************************************
13 13
14 14 #****************************************************************************
15 15 # Required modules
16 16 import __builtin__
17 17 import os
18 18 import socket
19 19 import sys
20 20 import time
21 21
22 22 # IPython's own
23 23 from IPython import ColorANSI
24 24 from IPython import Release
25 25 from IPython.external.Itpl import ItplNS
26 26 from IPython.ipapi import TryNext
27 27 from IPython.ipstruct import Struct
28 28 from IPython.macro import Macro
29 29
30 30 from IPython.genutils import *
31 31
32 32 #****************************************************************************
33 33 #Color schemes for Prompts.
34 34
35 35 PromptColors = ColorANSI.ColorSchemeTable()
36 36 InputColors = ColorANSI.InputTermColors # just a shorthand
37 37 Colors = ColorANSI.TermColors # just a shorthand
38 38
39 39 PromptColors.add_scheme(ColorANSI.ColorScheme(
40 40 'NoColor',
41 41 in_prompt = InputColors.NoColor, # Input prompt
42 42 in_number = InputColors.NoColor, # Input prompt number
43 43 in_prompt2 = InputColors.NoColor, # Continuation prompt
44 44 in_normal = InputColors.NoColor, # color off (usu. Colors.Normal)
45 45
46 46 out_prompt = Colors.NoColor, # Output prompt
47 47 out_number = Colors.NoColor, # Output prompt number
48 48
49 49 normal = Colors.NoColor # color off (usu. Colors.Normal)
50 50 ))
51 51
52 52 # make some schemes as instances so we can copy them for modification easily:
53 53 __PColLinux = ColorANSI.ColorScheme(
54 54 'Linux',
55 55 in_prompt = InputColors.Green,
56 56 in_number = InputColors.LightGreen,
57 57 in_prompt2 = InputColors.Green,
58 58 in_normal = InputColors.Normal, # color off (usu. Colors.Normal)
59 59
60 60 out_prompt = Colors.Red,
61 61 out_number = Colors.LightRed,
62 62
63 63 normal = Colors.Normal
64 64 )
65 65 # Don't forget to enter it into the table!
66 66 PromptColors.add_scheme(__PColLinux)
67 67
68 68 # Slightly modified Linux for light backgrounds
69 69 __PColLightBG = __PColLinux.copy('LightBG')
70 70
71 71 __PColLightBG.colors.update(
72 72 in_prompt = InputColors.Blue,
73 73 in_number = InputColors.LightBlue,
74 74 in_prompt2 = InputColors.Blue
75 75 )
76 76 PromptColors.add_scheme(__PColLightBG)
77 77
78 78 del Colors,InputColors
79 79
80 80 #-----------------------------------------------------------------------------
81 81 def multiple_replace(dict, text):
82 82 """ Replace in 'text' all occurences of any key in the given
83 83 dictionary by its corresponding value. Returns the new string."""
84 84
85 85 # Function by Xavier Defrang, originally found at:
86 86 # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81330
87 87
88 88 # Create a regular expression from the dictionary keys
89 89 regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
90 90 # For each match, look-up corresponding value in dictionary
91 91 return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
92 92
93 93 #-----------------------------------------------------------------------------
94 94 # Special characters that can be used in prompt templates, mainly bash-like
95 95
96 96 # If $HOME isn't defined (Windows), make it an absurd string so that it can
97 97 # never be expanded out into '~'. Basically anything which can never be a
98 98 # reasonable directory name will do, we just want the $HOME -> '~' operation
99 99 # to become a no-op. We pre-compute $HOME here so it's not done on every
100 100 # prompt call.
101 101
102 102 # FIXME:
103 103
104 104 # - This should be turned into a class which does proper namespace management,
105 105 # since the prompt specials need to be evaluated in a certain namespace.
106 106 # Currently it's just globals, which need to be managed manually by code
107 107 # below.
108 108
109 109 # - I also need to split up the color schemes from the prompt specials
110 110 # somehow. I don't have a clean design for that quite yet.
111 111
112 112 HOME = os.environ.get("HOME","//////:::::ZZZZZ,,,~~~")
113 113
114 114 # We precompute a few more strings here for the prompt_specials, which are
115 115 # fixed once ipython starts. This reduces the runtime overhead of computing
116 116 # prompt strings.
117 117 USER = os.environ.get("USER")
118 118 HOSTNAME = socket.gethostname()
119 119 HOSTNAME_SHORT = HOSTNAME.split(".")[0]
120 120 ROOT_SYMBOL = "$#"[os.name=='nt' or os.getuid()==0]
121 121
122 122 prompt_specials_color = {
123 123 # Prompt/history count
124 124 '%n' : '${self.col_num}' '${self.cache.prompt_count}' '${self.col_p}',
125 125 r'\#': '${self.col_num}' '${self.cache.prompt_count}' '${self.col_p}',
126 126 # Just the prompt counter number, WITHOUT any coloring wrappers, so users
127 127 # can get numbers displayed in whatever color they want.
128 128 r'\N': '${self.cache.prompt_count}',
129
129 130 # Prompt/history count, with the actual digits replaced by dots. Used
130 131 # mainly in continuation prompts (prompt_in2)
132 #r'\D': '${"."*len(str(self.cache.prompt_count))}',
133 # More robust form of the above expression, that uses __builtins__
131 134 r'\D': '${"."*__builtins__.len(__builtins__.str(self.cache.prompt_count))}',
135
132 136 # Current working directory
133 137 r'\w': '${os.getcwd()}',
134 138 # Current time
135 139 r'\t' : '${time.strftime("%H:%M:%S")}',
136 140 # Basename of current working directory.
137 141 # (use os.sep to make this portable across OSes)
138 142 r'\W' : '${os.getcwd().split("%s")[-1]}' % os.sep,
139 143 # These X<N> are an extension to the normal bash prompts. They return
140 144 # N terms of the path, after replacing $HOME with '~'
141 145 r'\X0': '${os.getcwd().replace("%s","~")}' % HOME,
142 146 r'\X1': '${self.cwd_filt(1)}',
143 147 r'\X2': '${self.cwd_filt(2)}',
144 148 r'\X3': '${self.cwd_filt(3)}',
145 149 r'\X4': '${self.cwd_filt(4)}',
146 150 r'\X5': '${self.cwd_filt(5)}',
147 151 # Y<N> are similar to X<N>, but they show '~' if it's the directory
148 152 # N+1 in the list. Somewhat like %cN in tcsh.
149 153 r'\Y0': '${self.cwd_filt2(0)}',
150 154 r'\Y1': '${self.cwd_filt2(1)}',
151 155 r'\Y2': '${self.cwd_filt2(2)}',
152 156 r'\Y3': '${self.cwd_filt2(3)}',
153 157 r'\Y4': '${self.cwd_filt2(4)}',
154 158 r'\Y5': '${self.cwd_filt2(5)}',
155 159 # Hostname up to first .
156 160 r'\h': HOSTNAME_SHORT,
157 161 # Full hostname
158 162 r'\H': HOSTNAME,
159 163 # Username of current user
160 164 r'\u': USER,
161 165 # Escaped '\'
162 166 '\\\\': '\\',
163 167 # Newline
164 168 r'\n': '\n',
165 169 # Carriage return
166 170 r'\r': '\r',
167 171 # Release version
168 172 r'\v': Release.version,
169 173 # Root symbol ($ or #)
170 174 r'\$': ROOT_SYMBOL,
171 175 }
172 176
173 177 # A copy of the prompt_specials dictionary but with all color escapes removed,
174 178 # so we can correctly compute the prompt length for the auto_rewrite method.
175 179 prompt_specials_nocolor = prompt_specials_color.copy()
176 180 prompt_specials_nocolor['%n'] = '${self.cache.prompt_count}'
177 181 prompt_specials_nocolor[r'\#'] = '${self.cache.prompt_count}'
178 182
179 183 # Add in all the InputTermColors color escapes as valid prompt characters.
180 184 # They all get added as \\C_COLORNAME, so that we don't have any conflicts
181 185 # with a color name which may begin with a letter used by any other of the
182 186 # allowed specials. This of course means that \\C will never be allowed for
183 187 # anything else.
184 188 input_colors = ColorANSI.InputTermColors
185 189 for _color in dir(input_colors):
186 190 if _color[0] != '_':
187 191 c_name = r'\C_'+_color
188 192 prompt_specials_color[c_name] = getattr(input_colors,_color)
189 193 prompt_specials_nocolor[c_name] = ''
190 194
191 195 # we default to no color for safety. Note that prompt_specials is a global
192 196 # variable used by all prompt objects.
193 197 prompt_specials = prompt_specials_nocolor
194 198
195 199 #-----------------------------------------------------------------------------
196 200 def str_safe(arg):
197 201 """Convert to a string, without ever raising an exception.
198 202
199 203 If str(arg) fails, <ERROR: ... > is returned, where ... is the exception
200 204 error message."""
201 205
202 206 try:
203 207 out = str(arg)
204 208 except UnicodeError:
205 209 try:
206 210 out = arg.encode('utf_8','replace')
207 211 except Exception,msg:
208 212 # let's keep this little duplication here, so that the most common
209 213 # case doesn't suffer from a double try wrapping.
210 214 out = '<ERROR: %s>' % msg
211 215 except Exception,msg:
212 216 out = '<ERROR: %s>' % msg
213 217 return out
214 218
215 219 class BasePrompt(object):
216 220 """Interactive prompt similar to Mathematica's."""
217 221
218 222 def _get_p_template(self):
219 223 return self._p_template
220 224
221 225 def _set_p_template(self,val):
222 226 self._p_template = val
223 227 self.set_p_str()
224 228
225 229 p_template = property(_get_p_template,_set_p_template,
226 230 doc='Template for prompt string creation')
227 231
228 232 def __init__(self,cache,sep,prompt,pad_left=False):
229 233
230 234 # Hack: we access information about the primary prompt through the
231 235 # cache argument. We need this, because we want the secondary prompt
232 236 # to be aligned with the primary one. Color table info is also shared
233 237 # by all prompt classes through the cache. Nice OO spaghetti code!
234 238 self.cache = cache
235 239 self.sep = sep
236 240
237 241 # regexp to count the number of spaces at the end of a prompt
238 242 # expression, useful for prompt auto-rewriting
239 243 self.rspace = re.compile(r'(\s*)$')
240 244 # Flag to left-pad prompt strings to match the length of the primary
241 245 # prompt
242 246 self.pad_left = pad_left
243 247
244 248 # Set template to create each actual prompt (where numbers change).
245 249 # Use a property
246 250 self.p_template = prompt
247 251 self.set_p_str()
248 252
249 253 def set_p_str(self):
250 254 """ Set the interpolating prompt strings.
251 255
252 256 This must be called every time the color settings change, because the
253 257 prompt_specials global may have changed."""
254 258
255 259 import os,time # needed in locals for prompt string handling
256 260 loc = locals()
257 261 try:
258 262 self.p_str = ItplNS('%s%s%s' %
259 263 ('${self.sep}${self.col_p}',
260 264 multiple_replace(prompt_specials, self.p_template),
261 265 '${self.col_norm}'),self.cache.user_ns,loc)
262 266
263 267 self.p_str_nocolor = ItplNS(multiple_replace(prompt_specials_nocolor,
264 268 self.p_template),
265 269 self.cache.user_ns,loc)
266 270 except:
267 271 print "Illegal prompt template (check $ usage!):",self.p_template
268 272 self.p_str = self.p_template
269 273 self.p_str_nocolor = self.p_template
270 274
271 275 def write(self,msg): # dbg
272 276 sys.stdout.write(msg)
273 277 return ''
274 278
275 279 def __str__(self):
276 280 """Return a string form of the prompt.
277 281
278 282 This for is useful for continuation and output prompts, since it is
279 283 left-padded to match lengths with the primary one (if the
280 284 self.pad_left attribute is set)."""
281 285
282 286 out_str = str_safe(self.p_str)
283 287 if self.pad_left:
284 288 # We must find the amount of padding required to match lengths,
285 289 # taking the color escapes (which are invisible on-screen) into
286 290 # account.
287 291 esc_pad = len(out_str) - len(str_safe(self.p_str_nocolor))
288 292 format = '%%%ss' % (len(str(self.cache.last_prompt))+esc_pad)
289 293 return format % out_str
290 294 else:
291 295 return out_str
292 296
293 297 # these path filters are put in as methods so that we can control the
294 298 # namespace where the prompt strings get evaluated
295 299 def cwd_filt(self,depth):
296 300 """Return the last depth elements of the current working directory.
297 301
298 302 $HOME is always replaced with '~'.
299 303 If depth==0, the full path is returned."""
300 304
301 305 cwd = os.getcwd().replace(HOME,"~")
302 306 out = os.sep.join(cwd.split(os.sep)[-depth:])
303 307 if out:
304 308 return out
305 309 else:
306 310 return os.sep
307 311
308 312 def cwd_filt2(self,depth):
309 313 """Return the last depth elements of the current working directory.
310 314
311 315 $HOME is always replaced with '~'.
312 316 If depth==0, the full path is returned."""
313 317
314 318 full_cwd = os.getcwd()
315 319 cwd = full_cwd.replace(HOME,"~").split(os.sep)
316 320 if '~' in cwd and len(cwd) == depth+1:
317 321 depth += 1
318 322 drivepart = ''
319 323 if sys.platform == 'win32' and len(cwd) > depth:
320 324 drivepart = os.path.splitdrive(full_cwd)[0]
321 325 out = drivepart + '/'.join(cwd[-depth:])
322 326
323 327 if out:
324 328 return out
325 329 else:
326 330 return os.sep
327 331
328 332 def __nonzero__(self):
329 333 """Implement boolean behavior.
330 334
331 335 Checks whether the p_str attribute is non-empty"""
332 336
333 337 return bool(self.p_template)
334 338
335 339 class Prompt1(BasePrompt):
336 340 """Input interactive prompt similar to Mathematica's."""
337 341
338 342 def __init__(self,cache,sep='\n',prompt='In [\\#]: ',pad_left=True):
339 343 BasePrompt.__init__(self,cache,sep,prompt,pad_left)
340 344
341 345 def set_colors(self):
342 346 self.set_p_str()
343 347 Colors = self.cache.color_table.active_colors # shorthand
344 348 self.col_p = Colors.in_prompt
345 349 self.col_num = Colors.in_number
346 350 self.col_norm = Colors.in_normal
347 351 # We need a non-input version of these escapes for the '--->'
348 352 # auto-call prompts used in the auto_rewrite() method.
349 353 self.col_p_ni = self.col_p.replace('\001','').replace('\002','')
350 354 self.col_norm_ni = Colors.normal
351 355
352 356 def __str__(self):
353 357 self.cache.prompt_count += 1
354 358 self.cache.last_prompt = str_safe(self.p_str_nocolor).split('\n')[-1]
355 359 return str_safe(self.p_str)
356 360
357 361 def auto_rewrite(self):
358 362 """Print a string of the form '--->' which lines up with the previous
359 363 input string. Useful for systems which re-write the user input when
360 364 handling automatically special syntaxes."""
361 365
362 366 curr = str(self.cache.last_prompt)
363 367 nrspaces = len(self.rspace.search(curr).group())
364 368 return '%s%s>%s%s' % (self.col_p_ni,'-'*(len(curr)-nrspaces-1),
365 369 ' '*nrspaces,self.col_norm_ni)
366 370
367 371 class PromptOut(BasePrompt):
368 372 """Output interactive prompt similar to Mathematica's."""
369 373
370 374 def __init__(self,cache,sep='',prompt='Out[\\#]: ',pad_left=True):
371 375 BasePrompt.__init__(self,cache,sep,prompt,pad_left)
372 376 if not self.p_template:
373 377 self.__str__ = lambda: ''
374 378
375 379 def set_colors(self):
376 380 self.set_p_str()
377 381 Colors = self.cache.color_table.active_colors # shorthand
378 382 self.col_p = Colors.out_prompt
379 383 self.col_num = Colors.out_number
380 384 self.col_norm = Colors.normal
381 385
382 386 class Prompt2(BasePrompt):
383 387 """Interactive continuation prompt."""
384 388
385 389 def __init__(self,cache,prompt=' .\\D.: ',pad_left=True):
386 390 self.cache = cache
387 391 self.p_template = prompt
388 392 self.pad_left = pad_left
389 393 self.set_p_str()
390 394
391 395 def set_p_str(self):
392 396 import os,time # needed in locals for prompt string handling
393 397 loc = locals()
394 398 self.p_str = ItplNS('%s%s%s' %
395 399 ('${self.col_p2}',
396 400 multiple_replace(prompt_specials, self.p_template),
397 401 '$self.col_norm'),
398 402 self.cache.user_ns,loc)
399 403 self.p_str_nocolor = ItplNS(multiple_replace(prompt_specials_nocolor,
400 404 self.p_template),
401 405 self.cache.user_ns,loc)
402 406
403 407 def set_colors(self):
404 408 self.set_p_str()
405 409 Colors = self.cache.color_table.active_colors
406 410 self.col_p2 = Colors.in_prompt2
407 411 self.col_norm = Colors.in_normal
408 412 # FIXME (2004-06-16) HACK: prevent crashes for users who haven't
409 413 # updated their prompt_in2 definitions. Remove eventually.
410 414 self.col_p = Colors.out_prompt
411 415 self.col_num = Colors.out_number
412 416
413 417
414 418 #-----------------------------------------------------------------------------
415 419 class CachedOutput:
416 420 """Class for printing output from calculations while keeping a cache of
417 421 reults. It dynamically creates global variables prefixed with _ which
418 422 contain these results.
419 423
420 424 Meant to be used as a sys.displayhook replacement, providing numbered
421 425 prompts and cache services.
422 426
423 427 Initialize with initial and final values for cache counter (this defines
424 428 the maximum size of the cache."""
425 429
426 430 def __init__(self,shell,cache_size,Pprint,
427 431 colors='NoColor',input_sep='\n',
428 432 output_sep='\n',output_sep2='',
429 433 ps1 = None, ps2 = None,ps_out = None,pad_left=True):
430 434
431 435 cache_size_min = 3
432 436 if cache_size <= 0:
433 437 self.do_full_cache = 0
434 438 cache_size = 0
435 439 elif cache_size < cache_size_min:
436 440 self.do_full_cache = 0
437 441 cache_size = 0
438 442 warn('caching was disabled (min value for cache size is %s).' %
439 443 cache_size_min,level=3)
440 444 else:
441 445 self.do_full_cache = 1
442 446
443 447 self.cache_size = cache_size
444 448 self.input_sep = input_sep
445 449
446 450 # we need a reference to the user-level namespace
447 451 self.shell = shell
448 452 self.user_ns = shell.user_ns
449 453 # and to the user's input
450 454 self.input_hist = shell.input_hist
451 455 # and to the user's logger, for logging output
452 456 self.logger = shell.logger
453 457
454 458 # Set input prompt strings and colors
455 459 if cache_size == 0:
456 460 if ps1.find('%n') > -1 or ps1.find(r'\#') > -1 \
457 461 or ps1.find(r'\N') > -1:
458 462 ps1 = '>>> '
459 463 if ps2.find('%n') > -1 or ps2.find(r'\#') > -1 \
460 464 or ps2.find(r'\N') > -1:
461 465 ps2 = '... '
462 466 self.ps1_str = self._set_prompt_str(ps1,'In [\\#]: ','>>> ')
463 467 self.ps2_str = self._set_prompt_str(ps2,' .\\D.: ','... ')
464 468 self.ps_out_str = self._set_prompt_str(ps_out,'Out[\\#]: ','')
465 469
466 470 self.color_table = PromptColors
467 471 self.prompt1 = Prompt1(self,sep=input_sep,prompt=self.ps1_str,
468 472 pad_left=pad_left)
469 473 self.prompt2 = Prompt2(self,prompt=self.ps2_str,pad_left=pad_left)
470 474 self.prompt_out = PromptOut(self,sep='',prompt=self.ps_out_str,
471 475 pad_left=pad_left)
472 476 self.set_colors(colors)
473 477
474 478 # other more normal stuff
475 479 # b/c each call to the In[] prompt raises it by 1, even the first.
476 480 self.prompt_count = 0
477 481 # Store the last prompt string each time, we need it for aligning
478 482 # continuation and auto-rewrite prompts
479 483 self.last_prompt = ''
480 484 self.Pprint = Pprint
481 485 self.output_sep = output_sep
482 486 self.output_sep2 = output_sep2
483 487 self._,self.__,self.___ = '','',''
484 488 self.pprint_types = map(type,[(),[],{}])
485 489
486 490 # these are deliberately global:
487 491 to_user_ns = {'_':self._,'__':self.__,'___':self.___}
488 492 self.user_ns.update(to_user_ns)
489 493
490 494 def _set_prompt_str(self,p_str,cache_def,no_cache_def):
491 495 if p_str is None:
492 496 if self.do_full_cache:
493 497 return cache_def
494 498 else:
495 499 return no_cache_def
496 500 else:
497 501 return p_str
498 502
499 503 def set_colors(self,colors):
500 504 """Set the active color scheme and configure colors for the three
501 505 prompt subsystems."""
502 506
503 507 # FIXME: the prompt_specials global should be gobbled inside this
504 508 # class instead. Do it when cleaning up the whole 3-prompt system.
505 509 global prompt_specials
506 510 if colors.lower()=='nocolor':
507 511 prompt_specials = prompt_specials_nocolor
508 512 else:
509 513 prompt_specials = prompt_specials_color
510 514
511 515 self.color_table.set_active_scheme(colors)
512 516 self.prompt1.set_colors()
513 517 self.prompt2.set_colors()
514 518 self.prompt_out.set_colors()
515 519
516 520 def __call__(self,arg=None):
517 521 """Printing with history cache management.
518 522
519 523 This is invoked everytime the interpreter needs to print, and is
520 524 activated by setting the variable sys.displayhook to it."""
521 525
522 526 # If something injected a '_' variable in __builtin__, delete
523 527 # ipython's automatic one so we don't clobber that. gettext() in
524 528 # particular uses _, so we need to stay away from it.
525 529 if '_' in __builtin__.__dict__:
526 530 try:
527 531 del self.user_ns['_']
528 532 except KeyError:
529 533 pass
530 534 if arg is not None:
531 535 cout_write = Term.cout.write # fast lookup
532 536 # first handle the cache and counters
533 537
534 538 # do not print output if input ends in ';'
535 539 try:
536 540 if self.input_hist[self.prompt_count].endswith(';\n'):
537 541 return
538 542 except IndexError:
539 543 # some uses of ipshellembed may fail here
540 544 pass
541 545 # don't use print, puts an extra space
542 546 cout_write(self.output_sep)
543 547 outprompt = self.shell.hooks.generate_output_prompt()
544 548 if self.do_full_cache:
545 549 cout_write(outprompt)
546 550
547 551 # and now call a possibly user-defined print mechanism
548 552 manipulated_val = self.display(arg)
549 553
550 554 # user display hooks can change the variable to be stored in
551 555 # output history
552 556
553 557 if manipulated_val is not None:
554 558 arg = manipulated_val
555 559
556 560 # avoid recursive reference when displaying _oh/Out
557 561 if arg is not self.user_ns['_oh']:
558 562 self.update(arg)
559 563
560 564 if self.logger.log_output:
561 565 self.logger.log_write(repr(arg),'output')
562 566 cout_write(self.output_sep2)
563 567 Term.cout.flush()
564 568
565 569 def _display(self,arg):
566 570 """Default printer method, uses pprint.
567 571
568 572 Do ip.set_hook("result_display", my_displayhook) for custom result
569 573 display, e.g. when your own objects need special formatting.
570 574 """
571 575 try:
572 576 return IPython.generics.result_display(arg)
573 577 except TryNext:
574 578 return self.shell.hooks.result_display(arg)
575 579
576 580 # Assign the default display method:
577 581 display = _display
578 582
579 583 def update(self,arg):
580 584 #print '***cache_count', self.cache_count # dbg
581 585 if len(self.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
582 586 warn('Output cache limit (currently '+
583 587 `self.cache_size`+' entries) hit.\n'
584 588 'Flushing cache and resetting history counter...\n'
585 589 'The only history variables available will be _,__,___ and _1\n'
586 590 'with the current result.')
587 591
588 592 self.flush()
589 593 # Don't overwrite '_' and friends if '_' is in __builtin__ (otherwise
590 594 # we cause buggy behavior for things like gettext).
591 595 if '_' not in __builtin__.__dict__:
592 596 self.___ = self.__
593 597 self.__ = self._
594 598 self._ = arg
595 599 self.user_ns.update({'_':self._,'__':self.__,'___':self.___})
596 600
597 601 # hackish access to top-level namespace to create _1,_2... dynamically
598 602 to_main = {}
599 603 if self.do_full_cache:
600 604 new_result = '_'+`self.prompt_count`
601 605 to_main[new_result] = arg
602 606 self.user_ns.update(to_main)
603 607 self.user_ns['_oh'][self.prompt_count] = arg
604 608
605 609 def flush(self):
606 610 if not self.do_full_cache:
607 611 raise ValueError,"You shouldn't have reached the cache flush "\
608 612 "if full caching is not enabled!"
609 613 # delete auto-generated vars from global namespace
610 614
611 615 for n in range(1,self.prompt_count + 1):
612 616 key = '_'+`n`
613 617 try:
614 618 del self.user_ns[key]
615 619 except: pass
616 620 self.user_ns['_oh'].clear()
617 621
618 622 if '_' not in __builtin__.__dict__:
619 623 self.user_ns.update({'_':None,'__':None, '___':None})
620 624 import gc
621 625 gc.collect() # xxx needed?
622 626
@@ -1,123 +1,125 b''
1 1 # encoding: utf-8
2 2
3 3 """The IPython Core Notification Center.
4 4
5 5 See docs/source/development/notification_blueprint.txt for an overview of the
6 6 notification module.
7 7 """
8 8
9 9 __docformat__ = "restructuredtext en"
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Copyright (C) 2008 The IPython Development Team
13 13 #
14 14 # Distributed under the terms of the BSD License. The full license is in
15 15 # the file COPYING, distributed as part of this software.
16 16 #-----------------------------------------------------------------------------
17 17
18 # Tell nose to skip the testing of this module
19 __test__ = {}
18 20
19 21 class NotificationCenter(object):
20 22 """Synchronous notification center
21 23
22 24 Example
23 25 -------
24 26 >>> import IPython.kernel.core.notification as notification
25 27 >>> def callback(theType, theSender, args={}):
26 28 ... print theType,theSender,args
27 29 ...
28 30 >>> notification.sharedCenter.add_observer(callback, 'NOTIFICATION_TYPE', None)
29 31 >>> notification.sharedCenter.post_notification('NOTIFICATION_TYPE', object()) # doctest:+ELLIPSIS
30 32 NOTIFICATION_TYPE ...
31 33
32 34 """
33 35 def __init__(self):
34 36 super(NotificationCenter, self).__init__()
35 37 self._init_observers()
36 38
37 39
38 40 def _init_observers(self):
39 41 """Initialize observer storage"""
40 42
41 43 self.registered_types = set() #set of types that are observed
42 44 self.registered_senders = set() #set of senders that are observed
43 45 self.observers = {} #map (type,sender) => callback (callable)
44 46
45 47
46 48 def post_notification(self, theType, sender, **kwargs):
47 49 """Post notification (type,sender,**kwargs) to all registered
48 50 observers.
49 51
50 52 Implementation
51 53 --------------
52 54 * If no registered observers, performance is O(1).
53 55 * Notificaiton order is undefined.
54 56 * Notifications are posted synchronously.
55 57 """
56 58
57 59 if(theType==None or sender==None):
58 60 raise Exception("NotificationCenter.post_notification requires \
59 61 type and sender.")
60 62
61 63 # If there are no registered observers for the type/sender pair
62 64 if((theType not in self.registered_types and
63 65 None not in self.registered_types) or
64 66 (sender not in self.registered_senders and
65 67 None not in self.registered_senders)):
66 68 return
67 69
68 70 for o in self._observers_for_notification(theType, sender):
69 71 o(theType, sender, args=kwargs)
70 72
71 73
72 74 def _observers_for_notification(self, theType, sender):
73 75 """Find all registered observers that should recieve notification"""
74 76
75 77 keys = (
76 78 (theType,sender),
77 79 (theType, None),
78 80 (None, sender),
79 81 (None,None)
80 82 )
81 83
82 84
83 85 obs = set()
84 86 for k in keys:
85 87 obs.update(self.observers.get(k, set()))
86 88
87 89 return obs
88 90
89 91
90 92 def add_observer(self, callback, theType, sender):
91 93 """Add an observer callback to this notification center.
92 94
93 95 The given callback will be called upon posting of notifications of
94 96 the given type/sender and will receive any additional kwargs passed
95 97 to post_notification.
96 98
97 99 Parameters
98 100 ----------
99 101 observerCallback : callable
100 102 Callable. Must take at least two arguments::
101 103 observerCallback(type, sender, args={})
102 104
103 105 theType : hashable
104 106 The notification type. If None, all notifications from sender
105 107 will be posted.
106 108
107 109 sender : hashable
108 110 The notification sender. If None, all notifications of theType
109 111 will be posted.
110 112 """
111 113 assert(callback != None)
112 114 self.registered_types.add(theType)
113 115 self.registered_senders.add(sender)
114 116 self.observers.setdefault((theType,sender), set()).add(callback)
115 117
116 118 def remove_all_observers(self):
117 119 """Removes all observers from this notification center"""
118 120
119 121 self._init_observers()
120 122
121 123
122 124
123 125 sharedCenter = NotificationCenter() No newline at end of file
@@ -1,903 +1,906 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_engineservice -*-
3 3
4 4 """A Twisted Service Representation of the IPython core.
5 5
6 6 The IPython Core exposed to the network is called the Engine. Its
7 7 representation in Twisted in the EngineService. Interfaces and adapters
8 8 are used to abstract out the details of the actual network protocol used.
9 9 The EngineService is an Engine that knows nothing about the actual protocol
10 10 used.
11 11
12 12 The EngineService is exposed with various network protocols in modules like:
13 13
14 14 enginepb.py
15 15 enginevanilla.py
16 16
17 17 As of 12/12/06 the classes in this module have been simplified greatly. It was
18 18 felt that we had over-engineered things. To improve the maintainability of the
19 19 code we have taken out the ICompleteEngine interface and the completeEngine
20 20 method that automatically added methods to engines.
21 21
22 22 """
23 23
24 24 __docformat__ = "restructuredtext en"
25 25
26 26 #-------------------------------------------------------------------------------
27 27 # Copyright (C) 2008 The IPython Development Team
28 28 #
29 29 # Distributed under the terms of the BSD License. The full license is in
30 30 # the file COPYING, distributed as part of this software.
31 31 #-------------------------------------------------------------------------------
32 32
33 33 #-------------------------------------------------------------------------------
34 34 # Imports
35 35 #-------------------------------------------------------------------------------
36 36
37 # Tell nose to skip the testing of this module
38 __test__ = {}
39
37 40 import os, sys, copy
38 41 import cPickle as pickle
39 42 from new import instancemethod
40 43
41 44 from twisted.application import service
42 45 from twisted.internet import defer, reactor
43 46 from twisted.python import log, failure, components
44 47 import zope.interface as zi
45 48
46 49 from IPython.kernel.core.interpreter import Interpreter
47 50 from IPython.kernel import newserialized, error, util
48 51 from IPython.kernel.util import printer
49 52 from IPython.kernel.twistedutil import gatherBoth, DeferredList
50 53 from IPython.kernel import codeutil
51 54
52 55
53 56 #-------------------------------------------------------------------------------
54 57 # Interface specification for the Engine
55 58 #-------------------------------------------------------------------------------
56 59
57 60 class IEngineCore(zi.Interface):
58 61 """The minimal required interface for the IPython Engine.
59 62
60 63 This interface provides a formal specification of the IPython core.
61 64 All these methods should return deferreds regardless of what side of a
62 65 network connection they are on.
63 66
64 67 In general, this class simply wraps a shell class and wraps its return
65 68 values as Deferred objects. If the underlying shell class method raises
66 69 an exception, this class should convert it to a twisted.failure.Failure
67 70 that will be propagated along the Deferred's errback chain.
68 71
69 72 In addition, Failures are aggressive. By this, we mean that if a method
70 73 is performing multiple actions (like pulling multiple object) if any
71 74 single one fails, the entire method will fail with that Failure. It is
72 75 all or nothing.
73 76 """
74 77
75 78 id = zi.interface.Attribute("the id of the Engine object")
76 79 properties = zi.interface.Attribute("A dict of properties of the Engine")
77 80
78 81 def execute(lines):
79 82 """Execute lines of Python code.
80 83
81 84 Returns a dictionary with keys (id, number, stdin, stdout, stderr)
82 85 upon success.
83 86
84 87 Returns a failure object if the execution of lines raises an exception.
85 88 """
86 89
87 90 def push(namespace):
88 91 """Push dict namespace into the user's namespace.
89 92
90 93 Returns a deferred to None or a failure.
91 94 """
92 95
93 96 def pull(keys):
94 97 """Pulls values out of the user's namespace by keys.
95 98
96 99 Returns a deferred to a tuple objects or a single object.
97 100
98 101 Raises NameError if any one of objects doess not exist.
99 102 """
100 103
101 104 def push_function(namespace):
102 105 """Push a dict of key, function pairs into the user's namespace.
103 106
104 107 Returns a deferred to None or a failure."""
105 108
106 109 def pull_function(keys):
107 110 """Pulls functions out of the user's namespace by keys.
108 111
109 112 Returns a deferred to a tuple of functions or a single function.
110 113
111 114 Raises NameError if any one of the functions does not exist.
112 115 """
113 116
114 117 def get_result(i=None):
115 118 """Get the stdin/stdout/stderr of command i.
116 119
117 120 Returns a deferred to a dict with keys
118 121 (id, number, stdin, stdout, stderr).
119 122
120 123 Raises IndexError if command i does not exist.
121 124 Raises TypeError if i in not an int.
122 125 """
123 126
124 127 def reset():
125 128 """Reset the shell.
126 129
127 130 This clears the users namespace. Won't cause modules to be
128 131 reloaded. Should also re-initialize certain variables like id.
129 132 """
130 133
131 134 def kill():
132 135 """Kill the engine by stopping the reactor."""
133 136
134 137 def keys():
135 138 """Return the top level variables in the users namspace.
136 139
137 140 Returns a deferred to a dict."""
138 141
139 142
140 143 class IEngineSerialized(zi.Interface):
141 144 """Push/Pull methods that take Serialized objects.
142 145
143 146 All methods should return deferreds.
144 147 """
145 148
146 149 def push_serialized(namespace):
147 150 """Push a dict of keys and Serialized objects into the user's namespace."""
148 151
149 152 def pull_serialized(keys):
150 153 """Pull objects by key from the user's namespace as Serialized.
151 154
152 155 Returns a list of or one Serialized.
153 156
154 157 Raises NameError is any one of the objects does not exist.
155 158 """
156 159
157 160
158 161 class IEngineProperties(zi.Interface):
159 162 """Methods for access to the properties object of an Engine"""
160 163
161 164 properties = zi.Attribute("A StrictDict object, containing the properties")
162 165
163 166 def set_properties(properties):
164 167 """set properties by key and value"""
165 168
166 169 def get_properties(keys=None):
167 170 """get a list of properties by `keys`, if no keys specified, get all"""
168 171
169 172 def del_properties(keys):
170 173 """delete properties by `keys`"""
171 174
172 175 def has_properties(keys):
173 176 """get a list of bool values for whether `properties` has `keys`"""
174 177
175 178 def clear_properties():
176 179 """clear the properties dict"""
177 180
178 181 class IEngineBase(IEngineCore, IEngineSerialized, IEngineProperties):
179 182 """The basic engine interface that EngineService will implement.
180 183
181 184 This exists so it is easy to specify adapters that adapt to and from the
182 185 API that the basic EngineService implements.
183 186 """
184 187 pass
185 188
186 189 class IEngineQueued(IEngineBase):
187 190 """Interface for adding a queue to an IEngineBase.
188 191
189 192 This interface extends the IEngineBase interface to add methods for managing
190 193 the engine's queue. The implicit details of this interface are that the
191 194 execution of all methods declared in IEngineBase should appropriately be
192 195 put through a queue before execution.
193 196
194 197 All methods should return deferreds.
195 198 """
196 199
197 200 def clear_queue():
198 201 """Clear the queue."""
199 202
200 203 def queue_status():
201 204 """Get the queued and pending commands in the queue."""
202 205
203 206 def register_failure_observer(obs):
204 207 """Register an observer of pending Failures.
205 208
206 209 The observer must implement IFailureObserver.
207 210 """
208 211
209 212 def unregister_failure_observer(obs):
210 213 """Unregister an observer of pending Failures."""
211 214
212 215
213 216 class IEngineThreaded(zi.Interface):
214 217 """A place holder for threaded commands.
215 218
216 219 All methods should return deferreds.
217 220 """
218 221 pass
219 222
220 223
221 224 #-------------------------------------------------------------------------------
222 225 # Functions and classes to implement the EngineService
223 226 #-------------------------------------------------------------------------------
224 227
225 228
226 229 class StrictDict(dict):
227 230 """This is a strict copying dictionary for use as the interface to the
228 231 properties of an Engine.
229 232
230 233 :IMPORTANT:
231 234 This object copies the values you set to it, and returns copies to you
232 235 when you request them. The only way to change properties os explicitly
233 236 through the setitem and getitem of the dictionary interface.
234 237
235 238 Example:
236 239 >>> e = get_engine(id)
237 240 >>> L = [1,2,3]
238 241 >>> e.properties['L'] = L
239 242 >>> L == e.properties['L']
240 243 True
241 244 >>> L.append(99)
242 245 >>> L == e.properties['L']
243 246 False
244 247
245 248 Note that getitem copies, so calls to methods of objects do not affect
246 249 the properties, as seen here:
247 250
248 251 >>> e.properties[1] = range(2)
249 252 >>> print e.properties[1]
250 253 [0, 1]
251 254 >>> e.properties[1].append(2)
252 255 >>> print e.properties[1]
253 256 [0, 1]
254 257 """
255 258 def __init__(self, *args, **kwargs):
256 259 dict.__init__(self, *args, **kwargs)
257 260 self.modified = True
258 261
259 262 def __getitem__(self, key):
260 263 return copy.deepcopy(dict.__getitem__(self, key))
261 264
262 265 def __setitem__(self, key, value):
263 266 # check if this entry is valid for transport around the network
264 267 # and copying
265 268 try:
266 269 pickle.dumps(key, 2)
267 270 pickle.dumps(value, 2)
268 271 newvalue = copy.deepcopy(value)
269 272 except:
270 273 raise error.InvalidProperty(value)
271 274 dict.__setitem__(self, key, newvalue)
272 275 self.modified = True
273 276
274 277 def __delitem__(self, key):
275 278 dict.__delitem__(self, key)
276 279 self.modified = True
277 280
278 281 def update(self, dikt):
279 282 for k,v in dikt.iteritems():
280 283 self[k] = v
281 284
282 285 def pop(self, key):
283 286 self.modified = True
284 287 return dict.pop(self, key)
285 288
286 289 def popitem(self):
287 290 self.modified = True
288 291 return dict.popitem(self)
289 292
290 293 def clear(self):
291 294 self.modified = True
292 295 dict.clear(self)
293 296
294 297 def subDict(self, *keys):
295 298 d = {}
296 299 for key in keys:
297 300 d[key] = self[key]
298 301 return d
299 302
300 303
301 304
302 305 class EngineAPI(object):
303 306 """This is the object through which the user can edit the `properties`
304 307 attribute of an Engine.
305 308 The Engine Properties object copies all object in and out of itself.
306 309 See the EngineProperties object for details.
307 310 """
308 311 _fix=False
309 312 def __init__(self, id):
310 313 self.id = id
311 314 self.properties = StrictDict()
312 315 self._fix=True
313 316
314 317 def __setattr__(self, k,v):
315 318 if self._fix:
316 319 raise error.KernelError("I am protected!")
317 320 else:
318 321 object.__setattr__(self, k, v)
319 322
320 323 def __delattr__(self, key):
321 324 raise error.KernelError("I am protected!")
322 325
323 326
324 327 _apiDict = {}
325 328
326 329 def get_engine(id):
327 330 """Get the Engine API object, whcih currently just provides the properties
328 331 object, by ID"""
329 332 global _apiDict
330 333 if not _apiDict.get(id):
331 334 _apiDict[id] = EngineAPI(id)
332 335 return _apiDict[id]
333 336
334 337 def drop_engine(id):
335 338 """remove an engine"""
336 339 global _apiDict
337 340 if _apiDict.has_key(id):
338 341 del _apiDict[id]
339 342
340 343 class EngineService(object, service.Service):
341 344 """Adapt a IPython shell into a IEngine implementing Twisted Service."""
342 345
343 346 zi.implements(IEngineBase)
344 347 name = 'EngineService'
345 348
346 349 def __init__(self, shellClass=Interpreter, mpi=None):
347 350 """Create an EngineService.
348 351
349 352 shellClass: something that implements IInterpreter or core1
350 353 mpi: an mpi module that has rank and size attributes
351 354 """
352 355 self.shellClass = shellClass
353 356 self.shell = self.shellClass()
354 357 self.mpi = mpi
355 358 self.id = None
356 359 self.properties = get_engine(self.id).properties
357 360 if self.mpi is not None:
358 361 log.msg("MPI started with rank = %i and size = %i" %
359 362 (self.mpi.rank, self.mpi.size))
360 363 self.id = self.mpi.rank
361 364 self._seedNamespace()
362 365
363 366 # Make id a property so that the shell can get the updated id
364 367
365 368 def _setID(self, id):
366 369 self._id = id
367 370 self.properties = get_engine(id).properties
368 371 self.shell.push({'id': id})
369 372
370 373 def _getID(self):
371 374 return self._id
372 375
373 376 id = property(_getID, _setID)
374 377
375 378 def _seedNamespace(self):
376 379 self.shell.push({'mpi': self.mpi, 'id' : self.id})
377 380
378 381 def executeAndRaise(self, msg, callable, *args, **kwargs):
379 382 """Call a method of self.shell and wrap any exception."""
380 383 d = defer.Deferred()
381 384 try:
382 385 result = callable(*args, **kwargs)
383 386 except:
384 387 # This gives the following:
385 388 # et=exception class
386 389 # ev=exception class instance
387 390 # tb=traceback object
388 391 et,ev,tb = sys.exc_info()
389 392 # This call adds attributes to the exception value
390 393 et,ev,tb = self.shell.formatTraceback(et,ev,tb,msg)
391 394 # Add another attribute
392 395 ev._ipython_engine_info = msg
393 396 f = failure.Failure(ev,et,None)
394 397 d.errback(f)
395 398 else:
396 399 d.callback(result)
397 400
398 401 return d
399 402
400 403
401 404 # The IEngine methods. See the interface for documentation.
402 405
403 406 def execute(self, lines):
404 407 msg = {'engineid':self.id,
405 408 'method':'execute',
406 409 'args':[lines]}
407 410 d = self.executeAndRaise(msg, self.shell.execute, lines)
408 411 d.addCallback(self.addIDToResult)
409 412 return d
410 413
411 414 def addIDToResult(self, result):
412 415 result['id'] = self.id
413 416 return result
414 417
415 418 def push(self, namespace):
416 419 msg = {'engineid':self.id,
417 420 'method':'push',
418 421 'args':[repr(namespace.keys())]}
419 422 d = self.executeAndRaise(msg, self.shell.push, namespace)
420 423 return d
421 424
422 425 def pull(self, keys):
423 426 msg = {'engineid':self.id,
424 427 'method':'pull',
425 428 'args':[repr(keys)]}
426 429 d = self.executeAndRaise(msg, self.shell.pull, keys)
427 430 return d
428 431
429 432 def push_function(self, namespace):
430 433 msg = {'engineid':self.id,
431 434 'method':'push_function',
432 435 'args':[repr(namespace.keys())]}
433 436 d = self.executeAndRaise(msg, self.shell.push_function, namespace)
434 437 return d
435 438
436 439 def pull_function(self, keys):
437 440 msg = {'engineid':self.id,
438 441 'method':'pull_function',
439 442 'args':[repr(keys)]}
440 443 d = self.executeAndRaise(msg, self.shell.pull_function, keys)
441 444 return d
442 445
443 446 def get_result(self, i=None):
444 447 msg = {'engineid':self.id,
445 448 'method':'get_result',
446 449 'args':[repr(i)]}
447 450 d = self.executeAndRaise(msg, self.shell.getCommand, i)
448 451 d.addCallback(self.addIDToResult)
449 452 return d
450 453
451 454 def reset(self):
452 455 msg = {'engineid':self.id,
453 456 'method':'reset',
454 457 'args':[]}
455 458 del self.shell
456 459 self.shell = self.shellClass()
457 460 self.properties.clear()
458 461 d = self.executeAndRaise(msg, self._seedNamespace)
459 462 return d
460 463
461 464 def kill(self):
462 465 drop_engine(self.id)
463 466 try:
464 467 reactor.stop()
465 468 except RuntimeError:
466 469 log.msg('The reactor was not running apparently.')
467 470 return defer.fail()
468 471 else:
469 472 return defer.succeed(None)
470 473
471 474 def keys(self):
472 475 """Return a list of variables names in the users top level namespace.
473 476
474 477 This used to return a dict of all the keys/repr(values) in the
475 478 user's namespace. This was too much info for the ControllerService
476 479 to handle so it is now just a list of keys.
477 480 """
478 481
479 482 remotes = []
480 483 for k in self.shell.user_ns.iterkeys():
481 484 if k not in ['__name__', '_ih', '_oh', '__builtins__',
482 485 'In', 'Out', '_', '__', '___', '__IP', 'input', 'raw_input']:
483 486 remotes.append(k)
484 487 return defer.succeed(remotes)
485 488
486 489 def set_properties(self, properties):
487 490 msg = {'engineid':self.id,
488 491 'method':'set_properties',
489 492 'args':[repr(properties.keys())]}
490 493 return self.executeAndRaise(msg, self.properties.update, properties)
491 494
492 495 def get_properties(self, keys=None):
493 496 msg = {'engineid':self.id,
494 497 'method':'get_properties',
495 498 'args':[repr(keys)]}
496 499 if keys is None:
497 500 keys = self.properties.keys()
498 501 return self.executeAndRaise(msg, self.properties.subDict, *keys)
499 502
500 503 def _doDel(self, keys):
501 504 for key in keys:
502 505 del self.properties[key]
503 506
504 507 def del_properties(self, keys):
505 508 msg = {'engineid':self.id,
506 509 'method':'del_properties',
507 510 'args':[repr(keys)]}
508 511 return self.executeAndRaise(msg, self._doDel, keys)
509 512
510 513 def _doHas(self, keys):
511 514 return [self.properties.has_key(key) for key in keys]
512 515
513 516 def has_properties(self, keys):
514 517 msg = {'engineid':self.id,
515 518 'method':'has_properties',
516 519 'args':[repr(keys)]}
517 520 return self.executeAndRaise(msg, self._doHas, keys)
518 521
519 522 def clear_properties(self):
520 523 msg = {'engineid':self.id,
521 524 'method':'clear_properties',
522 525 'args':[]}
523 526 return self.executeAndRaise(msg, self.properties.clear)
524 527
525 528 def push_serialized(self, sNamespace):
526 529 msg = {'engineid':self.id,
527 530 'method':'push_serialized',
528 531 'args':[repr(sNamespace.keys())]}
529 532 ns = {}
530 533 for k,v in sNamespace.iteritems():
531 534 try:
532 535 unserialized = newserialized.IUnSerialized(v)
533 536 ns[k] = unserialized.getObject()
534 537 except:
535 538 return defer.fail()
536 539 return self.executeAndRaise(msg, self.shell.push, ns)
537 540
538 541 def pull_serialized(self, keys):
539 542 msg = {'engineid':self.id,
540 543 'method':'pull_serialized',
541 544 'args':[repr(keys)]}
542 545 if isinstance(keys, str):
543 546 keys = [keys]
544 547 if len(keys)==1:
545 548 d = self.executeAndRaise(msg, self.shell.pull, keys)
546 549 d.addCallback(newserialized.serialize)
547 550 return d
548 551 elif len(keys)>1:
549 552 d = self.executeAndRaise(msg, self.shell.pull, keys)
550 553 @d.addCallback
551 554 def packThemUp(values):
552 555 serials = []
553 556 for v in values:
554 557 try:
555 558 serials.append(newserialized.serialize(v))
556 559 except:
557 560 return defer.fail(failure.Failure())
558 561 return serials
559 562 return packThemUp
560 563
561 564
562 565 def queue(methodToQueue):
563 566 def queuedMethod(this, *args, **kwargs):
564 567 name = methodToQueue.__name__
565 568 return this.submitCommand(Command(name, *args, **kwargs))
566 569 return queuedMethod
567 570
568 571 class QueuedEngine(object):
569 572 """Adapt an IEngineBase to an IEngineQueued by wrapping it.
570 573
571 574 The resulting object will implement IEngineQueued which extends
572 575 IEngineCore which extends (IEngineBase, IEngineSerialized).
573 576
574 577 This seems like the best way of handling it, but I am not sure. The
575 578 other option is to have the various base interfaces be used like
576 579 mix-in intefaces. The problem I have with this is adpatation is
577 580 more difficult and complicated because there can be can multiple
578 581 original and final Interfaces.
579 582 """
580 583
581 584 zi.implements(IEngineQueued)
582 585
583 586 def __init__(self, engine):
584 587 """Create a QueuedEngine object from an engine
585 588
586 589 engine: An implementor of IEngineCore and IEngineSerialized
587 590 keepUpToDate: whether to update the remote status when the
588 591 queue is empty. Defaults to False.
589 592 """
590 593
591 594 # This is the right way to do these tests rather than
592 595 # IEngineCore in list(zi.providedBy(engine)) which will only
593 596 # picks of the interfaces that are directly declared by engine.
594 597 assert IEngineBase.providedBy(engine), \
595 598 "engine passed to QueuedEngine doesn't provide IEngineBase"
596 599
597 600 self.engine = engine
598 601 self.id = engine.id
599 602 self.queued = []
600 603 self.history = {}
601 604 self.engineStatus = {}
602 605 self.currentCommand = None
603 606 self.failureObservers = []
604 607
605 608 def _get_properties(self):
606 609 return self.engine.properties
607 610
608 611 properties = property(_get_properties, lambda self, _: None)
609 612 # Queue management methods. You should not call these directly
610 613
611 614 def submitCommand(self, cmd):
612 615 """Submit command to queue."""
613 616
614 617 d = defer.Deferred()
615 618 cmd.setDeferred(d)
616 619 if self.currentCommand is not None:
617 620 if self.currentCommand.finished:
618 621 # log.msg("Running command immediately: %r" % cmd)
619 622 self.currentCommand = cmd
620 623 self.runCurrentCommand()
621 624 else: # command is still running
622 625 # log.msg("Command is running: %r" % self.currentCommand)
623 626 # log.msg("Queueing: %r" % cmd)
624 627 self.queued.append(cmd)
625 628 else:
626 629 # log.msg("No current commands, running: %r" % cmd)
627 630 self.currentCommand = cmd
628 631 self.runCurrentCommand()
629 632 return d
630 633
631 634 def runCurrentCommand(self):
632 635 """Run current command."""
633 636
634 637 cmd = self.currentCommand
635 638 f = getattr(self.engine, cmd.remoteMethod, None)
636 639 if f:
637 640 d = f(*cmd.args, **cmd.kwargs)
638 641 if cmd.remoteMethod is 'execute':
639 642 d.addCallback(self.saveResult)
640 643 d.addCallback(self.finishCommand)
641 644 d.addErrback(self.abortCommand)
642 645 else:
643 646 return defer.fail(AttributeError(cmd.remoteMethod))
644 647
645 648 def _flushQueue(self):
646 649 """Pop next command in queue and run it."""
647 650
648 651 if len(self.queued) > 0:
649 652 self.currentCommand = self.queued.pop(0)
650 653 self.runCurrentCommand()
651 654
652 655 def saveResult(self, result):
653 656 """Put the result in the history."""
654 657 self.history[result['number']] = result
655 658 return result
656 659
657 660 def finishCommand(self, result):
658 661 """Finish currrent command."""
659 662
660 663 # The order of these commands is absolutely critical.
661 664 self.currentCommand.handleResult(result)
662 665 self.currentCommand.finished = True
663 666 self._flushQueue()
664 667 return result
665 668
666 669 def abortCommand(self, reason):
667 670 """Abort current command.
668 671
669 672 This eats the Failure but first passes it onto the Deferred that the
670 673 user has.
671 674
672 675 It also clear out the queue so subsequence commands don't run.
673 676 """
674 677
675 678 # The order of these 3 commands is absolutely critical. The currentCommand
676 679 # must first be marked as finished BEFORE the queue is cleared and before
677 680 # the current command is sent the failure.
678 681 # Also, the queue must be cleared BEFORE the current command is sent the Failure
679 682 # otherwise the errback chain could trigger new commands to be added to the
680 683 # queue before we clear it. We should clear ONLY the commands that were in
681 684 # the queue when the error occured.
682 685 self.currentCommand.finished = True
683 686 s = "%r %r %r" % (self.currentCommand.remoteMethod, self.currentCommand.args, self.currentCommand.kwargs)
684 687 self.clear_queue(msg=s)
685 688 self.currentCommand.handleError(reason)
686 689
687 690 return None
688 691
689 692 #---------------------------------------------------------------------------
690 693 # IEngineCore methods
691 694 #---------------------------------------------------------------------------
692 695
693 696 @queue
694 697 def execute(self, lines):
695 698 pass
696 699
697 700 @queue
698 701 def push(self, namespace):
699 702 pass
700 703
701 704 @queue
702 705 def pull(self, keys):
703 706 pass
704 707
705 708 @queue
706 709 def push_function(self, namespace):
707 710 pass
708 711
709 712 @queue
710 713 def pull_function(self, keys):
711 714 pass
712 715
713 716 def get_result(self, i=None):
714 717 if i is None:
715 718 i = max(self.history.keys()+[None])
716 719
717 720 cmd = self.history.get(i, None)
718 721 # Uncomment this line to disable chaching of results
719 722 #cmd = None
720 723 if cmd is None:
721 724 return self.submitCommand(Command('get_result', i))
722 725 else:
723 726 return defer.succeed(cmd)
724 727
725 728 def reset(self):
726 729 self.clear_queue()
727 730 self.history = {} # reset the cache - I am not sure we should do this
728 731 return self.submitCommand(Command('reset'))
729 732
730 733 def kill(self):
731 734 self.clear_queue()
732 735 return self.submitCommand(Command('kill'))
733 736
734 737 @queue
735 738 def keys(self):
736 739 pass
737 740
738 741 #---------------------------------------------------------------------------
739 742 # IEngineSerialized methods
740 743 #---------------------------------------------------------------------------
741 744
742 745 @queue
743 746 def push_serialized(self, namespace):
744 747 pass
745 748
746 749 @queue
747 750 def pull_serialized(self, keys):
748 751 pass
749 752
750 753 #---------------------------------------------------------------------------
751 754 # IEngineProperties methods
752 755 #---------------------------------------------------------------------------
753 756
754 757 @queue
755 758 def set_properties(self, namespace):
756 759 pass
757 760
758 761 @queue
759 762 def get_properties(self, keys=None):
760 763 pass
761 764
762 765 @queue
763 766 def del_properties(self, keys):
764 767 pass
765 768
766 769 @queue
767 770 def has_properties(self, keys):
768 771 pass
769 772
770 773 @queue
771 774 def clear_properties(self):
772 775 pass
773 776
774 777 #---------------------------------------------------------------------------
775 778 # IQueuedEngine methods
776 779 #---------------------------------------------------------------------------
777 780
778 781 def clear_queue(self, msg=''):
779 782 """Clear the queue, but doesn't cancel the currently running commmand."""
780 783
781 784 for cmd in self.queued:
782 785 cmd.deferred.errback(failure.Failure(error.QueueCleared(msg)))
783 786 self.queued = []
784 787 return defer.succeed(None)
785 788
786 789 def queue_status(self):
787 790 if self.currentCommand is not None:
788 791 if self.currentCommand.finished:
789 792 pending = repr(None)
790 793 else:
791 794 pending = repr(self.currentCommand)
792 795 else:
793 796 pending = repr(None)
794 797 dikt = {'queue':map(repr,self.queued), 'pending':pending}
795 798 return defer.succeed(dikt)
796 799
797 800 def register_failure_observer(self, obs):
798 801 self.failureObservers.append(obs)
799 802
800 803 def unregister_failure_observer(self, obs):
801 804 self.failureObservers.remove(obs)
802 805
803 806
804 807 # Now register QueuedEngine as an adpater class that makes an IEngineBase into a
805 808 # IEngineQueued.
806 809 components.registerAdapter(QueuedEngine, IEngineBase, IEngineQueued)
807 810
808 811
809 812 class Command(object):
810 813 """A command object that encapslates queued commands.
811 814
812 815 This class basically keeps track of a command that has been queued
813 816 in a QueuedEngine. It manages the deferreds and hold the method to be called
814 817 and the arguments to that method.
815 818 """
816 819
817 820
818 821 def __init__(self, remoteMethod, *args, **kwargs):
819 822 """Build a new Command object."""
820 823
821 824 self.remoteMethod = remoteMethod
822 825 self.args = args
823 826 self.kwargs = kwargs
824 827 self.finished = False
825 828
826 829 def setDeferred(self, d):
827 830 """Sets the deferred attribute of the Command."""
828 831
829 832 self.deferred = d
830 833
831 834 def __repr__(self):
832 835 if not self.args:
833 836 args = ''
834 837 else:
835 838 args = str(self.args)[1:-2] #cut off (...,)
836 839 for k,v in self.kwargs.iteritems():
837 840 if args:
838 841 args += ', '
839 842 args += '%s=%r' %(k,v)
840 843 return "%s(%s)" %(self.remoteMethod, args)
841 844
842 845 def handleResult(self, result):
843 846 """When the result is ready, relay it to self.deferred."""
844 847
845 848 self.deferred.callback(result)
846 849
847 850 def handleError(self, reason):
848 851 """When an error has occured, relay it to self.deferred."""
849 852
850 853 self.deferred.errback(reason)
851 854
852 855 class ThreadedEngineService(EngineService):
853 856 """An EngineService subclass that defers execute commands to a separate
854 857 thread.
855 858
856 859 ThreadedEngineService uses twisted.internet.threads.deferToThread to
857 860 defer execute requests to a separate thread. GUI frontends may want to
858 861 use ThreadedEngineService as the engine in an
859 862 IPython.frontend.frontendbase.FrontEndBase subclass to prevent
860 863 block execution from blocking the GUI thread.
861 864 """
862 865
863 866 zi.implements(IEngineBase)
864 867
865 868 def __init__(self, shellClass=Interpreter, mpi=None):
866 869 EngineService.__init__(self, shellClass, mpi)
867 870
868 871 def wrapped_execute(self, msg, lines):
869 872 """Wrap self.shell.execute to add extra information to tracebacks"""
870 873
871 874 try:
872 875 result = self.shell.execute(lines)
873 876 except Exception,e:
874 877 # This gives the following:
875 878 # et=exception class
876 879 # ev=exception class instance
877 880 # tb=traceback object
878 881 et,ev,tb = sys.exc_info()
879 882 # This call adds attributes to the exception value
880 883 et,ev,tb = self.shell.formatTraceback(et,ev,tb,msg)
881 884 # Add another attribute
882 885
883 886 # Create a new exception with the new attributes
884 887 e = et(ev._ipython_traceback_text)
885 888 e._ipython_engine_info = msg
886 889
887 890 # Re-raise
888 891 raise e
889 892
890 893 return result
891 894
892 895
893 896 def execute(self, lines):
894 897 # Only import this if we are going to use this class
895 898 from twisted.internet import threads
896 899
897 900 msg = {'engineid':self.id,
898 901 'method':'execute',
899 902 'args':[lines]}
900 903
901 904 d = threads.deferToThread(self.wrapped_execute, msg, lines)
902 905 d.addCallback(self.addIDToResult)
903 906 return d
@@ -1,1113 +1,1116 b''
1 1 # encoding: utf-8
2 2 # -*- test-case-name: IPython.kernel.tests.test_task -*-
3 3
4 4 """Task farming representation of the ControllerService."""
5 5
6 6 __docformat__ = "restructuredtext en"
7 7
8 8 #-----------------------------------------------------------------------------
9 9 # Copyright (C) 2008 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-----------------------------------------------------------------------------
14 14
15 15 #-----------------------------------------------------------------------------
16 16 # Imports
17 17 #-----------------------------------------------------------------------------
18 18
19 # Tell nose to skip the testing of this module
20 __test__ = {}
21
19 22 import copy, time
20 23 from types import FunctionType
21 24
22 25 import zope.interface as zi, string
23 26 from twisted.internet import defer, reactor
24 27 from twisted.python import components, log, failure
25 28
26 29 from IPython.kernel.util import printer
27 30 from IPython.kernel import engineservice as es, error
28 31 from IPython.kernel import controllerservice as cs
29 32 from IPython.kernel.twistedutil import gatherBoth, DeferredList
30 33
31 34 from IPython.kernel.pickleutil import can, uncan, CannedFunction
32 35
33 36 #-----------------------------------------------------------------------------
34 37 # Definition of the Task objects
35 38 #-----------------------------------------------------------------------------
36 39
37 40 time_format = '%Y/%m/%d %H:%M:%S'
38 41
39 42 class ITask(zi.Interface):
40 43 """
41 44 This interface provides a generic definition of what constitutes a task.
42 45
43 46 There are two sides to a task. First a task needs to take input from
44 47 a user to determine what work is performed by the task. Second, the
45 48 task needs to have the logic that knows how to turn that information
46 49 info specific calls to a worker, through the `IQueuedEngine` interface.
47 50
48 51 Many method in this class get two things passed to them: a Deferred
49 52 and an IQueuedEngine implementer. Such methods should register callbacks
50 53 on the Deferred that use the IQueuedEngine to accomplish something. See
51 54 the existing task objects for examples.
52 55 """
53 56
54 57 zi.Attribute('retries','How many times to retry the task')
55 58 zi.Attribute('recovery_task','A task to try if the initial one fails')
56 59 zi.Attribute('taskid','the id of the task')
57 60
58 61 def start_time(result):
59 62 """
60 63 Do anything needed to start the timing of the task.
61 64
62 65 Must simply return the result after starting the timers.
63 66 """
64 67
65 68 def stop_time(result):
66 69 """
67 70 Do anything needed to stop the timing of the task.
68 71
69 72 Must simply return the result after stopping the timers. This
70 73 method will usually set attributes that are used by `process_result`
71 74 in building result of the task.
72 75 """
73 76
74 77 def pre_task(d, queued_engine):
75 78 """Do something with the queued_engine before the task is run.
76 79
77 80 This method should simply add callbacks to the input Deferred
78 81 that do something with the `queued_engine` before the task is run.
79 82
80 83 :Parameters:
81 84 d : Deferred
82 85 The deferred that actions should be attached to
83 86 queued_engine : IQueuedEngine implementer
84 87 The worker that has been allocated to perform the task
85 88 """
86 89
87 90 def post_task(d, queued_engine):
88 91 """Do something with the queued_engine after the task is run.
89 92
90 93 This method should simply add callbacks to the input Deferred
91 94 that do something with the `queued_engine` before the task is run.
92 95
93 96 :Parameters:
94 97 d : Deferred
95 98 The deferred that actions should be attached to
96 99 queued_engine : IQueuedEngine implementer
97 100 The worker that has been allocated to perform the task
98 101 """
99 102
100 103 def submit_task(d, queued_engine):
101 104 """Submit a task using the `queued_engine` we have been allocated.
102 105
103 106 When a task is ready to run, this method is called. This method
104 107 must take the internal information of the task and make suitable
105 108 calls on the queued_engine to have the actual work done.
106 109
107 110 This method should simply add callbacks to the input Deferred
108 111 that do something with the `queued_engine` before the task is run.
109 112
110 113 :Parameters:
111 114 d : Deferred
112 115 The deferred that actions should be attached to
113 116 queued_engine : IQueuedEngine implementer
114 117 The worker that has been allocated to perform the task
115 118 """
116 119
117 120 def process_result(d, result, engine_id):
118 121 """Take a raw task result.
119 122
120 123 Objects that implement `ITask` can choose how the result of running
121 124 the task is presented. This method takes the raw result and
122 125 does this logic. Two example are the `MapTask` which simply returns
123 126 the raw result or a `Failure` object and the `StringTask` which
124 127 returns a `TaskResult` object.
125 128
126 129 :Parameters:
127 130 d : Deferred
128 131 The deferred that actions should be attached to
129 132 result : object
130 133 The raw task result that needs to be wrapped
131 134 engine_id : int
132 135 The id of the engine that did the task
133 136
134 137 :Returns:
135 138 The result, as a tuple of the form: (success, result).
136 139 Here, success is a boolean indicating if the task
137 140 succeeded or failed and result is the result.
138 141 """
139 142
140 143 def check_depend(properties):
141 144 """Check properties to see if the task should be run.
142 145
143 146 :Parameters:
144 147 properties : dict
145 148 A dictionary of properties that an engine has set
146 149
147 150 :Returns:
148 151 True if the task should be run, False otherwise
149 152 """
150 153
151 154 def can_task(self):
152 155 """Serialize (can) any functions in the task for pickling.
153 156
154 157 Subclasses must override this method and make sure that all
155 158 functions in the task are canned by calling `can` on the
156 159 function.
157 160 """
158 161
159 162 def uncan_task(self):
160 163 """Unserialize (uncan) any canned function in the task."""
161 164
162 165 class BaseTask(object):
163 166 """
164 167 Common fuctionality for all objects implementing `ITask`.
165 168 """
166 169
167 170 zi.implements(ITask)
168 171
169 172 def __init__(self, clear_before=False, clear_after=False, retries=0,
170 173 recovery_task=None, depend=None):
171 174 """
172 175 Make a generic task.
173 176
174 177 :Parameters:
175 178 clear_before : boolean
176 179 Should the engines namespace be cleared before the task
177 180 is run
178 181 clear_after : boolean
179 182 Should the engines namespace be clear after the task is run
180 183 retries : int
181 184 The number of times a task should be retries upon failure
182 185 recovery_task : any task object
183 186 If a task fails and it has a recovery_task, that is run
184 187 upon a retry
185 188 depend : FunctionType
186 189 A function that is called to test for properties. This function
187 190 must take one argument, the properties dict and return a boolean
188 191 """
189 192 self.clear_before = clear_before
190 193 self.clear_after = clear_after
191 194 self.retries = retries
192 195 self.recovery_task = recovery_task
193 196 self.depend = depend
194 197 self.taskid = None
195 198
196 199 def start_time(self, result):
197 200 """
198 201 Start the basic timers.
199 202 """
200 203 self.start = time.time()
201 204 self.start_struct = time.localtime()
202 205 return result
203 206
204 207 def stop_time(self, result):
205 208 """
206 209 Stop the basic timers.
207 210 """
208 211 self.stop = time.time()
209 212 self.stop_struct = time.localtime()
210 213 self.duration = self.stop - self.start
211 214 self.submitted = time.strftime(time_format, self.start_struct)
212 215 self.completed = time.strftime(time_format)
213 216 return result
214 217
215 218 def pre_task(self, d, queued_engine):
216 219 """
217 220 Clear the engine before running the task if clear_before is set.
218 221 """
219 222 if self.clear_before:
220 223 d.addCallback(lambda r: queued_engine.reset())
221 224
222 225 def post_task(self, d, queued_engine):
223 226 """
224 227 Clear the engine after running the task if clear_after is set.
225 228 """
226 229 def reseter(result):
227 230 queued_engine.reset()
228 231 return result
229 232 if self.clear_after:
230 233 d.addBoth(reseter)
231 234
232 235 def submit_task(self, d, queued_engine):
233 236 raise NotImplementedError('submit_task must be implemented in a subclass')
234 237
235 238 def process_result(self, result, engine_id):
236 239 """
237 240 Process a task result.
238 241
239 242 This is the default `process_result` that just returns the raw
240 243 result or a `Failure`.
241 244 """
242 245 if isinstance(result, failure.Failure):
243 246 return (False, result)
244 247 else:
245 248 return (True, result)
246 249
247 250 def check_depend(self, properties):
248 251 """
249 252 Calls self.depend(properties) to see if a task should be run.
250 253 """
251 254 if self.depend is not None:
252 255 return self.depend(properties)
253 256 else:
254 257 return True
255 258
256 259 def can_task(self):
257 260 self.depend = can(self.depend)
258 261 if isinstance(self.recovery_task, BaseTask):
259 262 self.recovery_task.can_task()
260 263
261 264 def uncan_task(self):
262 265 self.depend = uncan(self.depend)
263 266 if isinstance(self.recovery_task, BaseTask):
264 267 self.recovery_task.uncan_task()
265 268
266 269 class MapTask(BaseTask):
267 270 """
268 271 A task that consists of a function and arguments.
269 272 """
270 273
271 274 zi.implements(ITask)
272 275
273 276 def __init__(self, function, args=None, kwargs=None, clear_before=False,
274 277 clear_after=False, retries=0, recovery_task=None, depend=None):
275 278 """
276 279 Create a task based on a function, args and kwargs.
277 280
278 281 This is a simple type of task that consists of calling:
279 282 function(*args, **kwargs) and wrapping the result in a `TaskResult`.
280 283
281 284 The return value of the function, or a `Failure` wrapping an
282 285 exception is the task result for this type of task.
283 286 """
284 287 BaseTask.__init__(self, clear_before, clear_after, retries,
285 288 recovery_task, depend)
286 289 if not isinstance(function, FunctionType):
287 290 raise TypeError('a task function must be a FunctionType')
288 291 self.function = function
289 292 if args is None:
290 293 self.args = ()
291 294 else:
292 295 self.args = args
293 296 if not isinstance(self.args, (list, tuple)):
294 297 raise TypeError('a task args must be a list or tuple')
295 298 if kwargs is None:
296 299 self.kwargs = {}
297 300 else:
298 301 self.kwargs = kwargs
299 302 if not isinstance(self.kwargs, dict):
300 303 raise TypeError('a task kwargs must be a dict')
301 304
302 305 def submit_task(self, d, queued_engine):
303 306 d.addCallback(lambda r: queued_engine.push_function(
304 307 dict(_ipython_task_function=self.function))
305 308 )
306 309 d.addCallback(lambda r: queued_engine.push(
307 310 dict(_ipython_task_args=self.args,_ipython_task_kwargs=self.kwargs))
308 311 )
309 312 d.addCallback(lambda r: queued_engine.execute(
310 313 '_ipython_task_result = _ipython_task_function(*_ipython_task_args,**_ipython_task_kwargs)')
311 314 )
312 315 d.addCallback(lambda r: queued_engine.pull('_ipython_task_result'))
313 316
314 317 def can_task(self):
315 318 self.function = can(self.function)
316 319 BaseTask.can_task(self)
317 320
318 321 def uncan_task(self):
319 322 self.function = uncan(self.function)
320 323 BaseTask.uncan_task(self)
321 324
322 325
323 326 class StringTask(BaseTask):
324 327 """
325 328 A task that consists of a string of Python code to run.
326 329 """
327 330
328 331 def __init__(self, expression, pull=None, push=None,
329 332 clear_before=False, clear_after=False, retries=0,
330 333 recovery_task=None, depend=None):
331 334 """
332 335 Create a task based on a Python expression and variables
333 336
334 337 This type of task lets you push a set of variables to the engines
335 338 namespace, run a Python string in that namespace and then bring back
336 339 a different set of Python variables as the result.
337 340
338 341 Because this type of task can return many results (through the
339 342 `pull` keyword argument) it returns a special `TaskResult` object
340 343 that wraps the pulled variables, statistics about the run and
341 344 any exceptions raised.
342 345 """
343 346 if not isinstance(expression, str):
344 347 raise TypeError('a task expression must be a string')
345 348 self.expression = expression
346 349
347 350 if pull==None:
348 351 self.pull = ()
349 352 elif isinstance(pull, str):
350 353 self.pull = (pull,)
351 354 elif isinstance(pull, (list, tuple)):
352 355 self.pull = pull
353 356 else:
354 357 raise TypeError('pull must be str or a sequence of strs')
355 358
356 359 if push==None:
357 360 self.push = {}
358 361 elif isinstance(push, dict):
359 362 self.push = push
360 363 else:
361 364 raise TypeError('push must be a dict')
362 365
363 366 BaseTask.__init__(self, clear_before, clear_after, retries,
364 367 recovery_task, depend)
365 368
366 369 def submit_task(self, d, queued_engine):
367 370 if self.push is not None:
368 371 d.addCallback(lambda r: queued_engine.push(self.push))
369 372
370 373 d.addCallback(lambda r: queued_engine.execute(self.expression))
371 374
372 375 if self.pull is not None:
373 376 d.addCallback(lambda r: queued_engine.pull(self.pull))
374 377 else:
375 378 d.addCallback(lambda r: None)
376 379
377 380 def process_result(self, result, engine_id):
378 381 if isinstance(result, failure.Failure):
379 382 tr = TaskResult(result, engine_id)
380 383 else:
381 384 if self.pull is None:
382 385 resultDict = {}
383 386 elif len(self.pull) == 1:
384 387 resultDict = {self.pull[0]:result}
385 388 else:
386 389 resultDict = dict(zip(self.pull, result))
387 390 tr = TaskResult(resultDict, engine_id)
388 391 # Assign task attributes
389 392 tr.submitted = self.submitted
390 393 tr.completed = self.completed
391 394 tr.duration = self.duration
392 395 if hasattr(self,'taskid'):
393 396 tr.taskid = self.taskid
394 397 else:
395 398 tr.taskid = None
396 399 if isinstance(result, failure.Failure):
397 400 return (False, tr)
398 401 else:
399 402 return (True, tr)
400 403
401 404 class ResultNS(object):
402 405 """
403 406 A dict like object for holding the results of a task.
404 407
405 408 The result namespace object for use in `TaskResult` objects as tr.ns.
406 409 It builds an object from a dictionary, such that it has attributes
407 410 according to the key,value pairs of the dictionary.
408 411
409 412 This works by calling setattr on ALL key,value pairs in the dict. If a user
410 413 chooses to overwrite the `__repr__` or `__getattr__` attributes, they can.
411 414 This can be a bad idea, as it may corrupt standard behavior of the
412 415 ns object.
413 416
414 417 Example
415 418 --------
416 419
417 420 >>> ns = ResultNS({'a':17,'foo':range(3)})
418 421 >>> print ns
419 422 NS{'a': 17, 'foo': [0, 1, 2]}
420 423 >>> ns.a
421 424 17
422 425 >>> ns['foo']
423 426 [0, 1, 2]
424 427 """
425 428 def __init__(self, dikt):
426 429 for k,v in dikt.iteritems():
427 430 setattr(self,k,v)
428 431
429 432 def __repr__(self):
430 433 l = dir(self)
431 434 d = {}
432 435 for k in l:
433 436 # do not print private objects
434 437 if k[:2] != '__' and k[-2:] != '__':
435 438 d[k] = getattr(self, k)
436 439 return "NS"+repr(d)
437 440
438 441 def __getitem__(self, key):
439 442 return getattr(self, key)
440 443
441 444 class TaskResult(object):
442 445 """
443 446 An object for returning task results for certain types of tasks.
444 447
445 448 This object encapsulates the results of a task. On task
446 449 success it will have a keys attribute that will have a list
447 450 of the variables that have been pulled back. These variables
448 451 are accessible as attributes of this class as well. On
449 452 success the failure attribute will be None.
450 453
451 454 In task failure, keys will be empty, but failure will contain
452 455 the failure object that encapsulates the remote exception.
453 456 One can also simply call the `raise_exception` method of
454 457 this class to re-raise any remote exception in the local
455 458 session.
456 459
457 460 The `TaskResult` has a `.ns` member, which is a property for access
458 461 to the results. If the Task had pull=['a', 'b'], then the
459 462 Task Result will have attributes `tr.ns.a`, `tr.ns.b` for those values.
460 463 Accessing `tr.ns` will raise the remote failure if the task failed.
461 464
462 465 The `engineid` attribute should have the `engineid` of the engine
463 466 that ran the task. But, because engines can come and go,
464 467 the `engineid` may not continue to be
465 468 valid or accurate.
466 469
467 470 The `taskid` attribute simply gives the `taskid` that the task
468 471 is tracked under.
469 472 """
470 473 taskid = None
471 474
472 475 def _getNS(self):
473 476 if isinstance(self.failure, failure.Failure):
474 477 return self.failure.raiseException()
475 478 else:
476 479 return self._ns
477 480
478 481 def _setNS(self, v):
479 482 raise Exception("the ns attribute cannot be changed")
480 483
481 484 ns = property(_getNS, _setNS)
482 485
483 486 def __init__(self, results, engineid):
484 487 self.engineid = engineid
485 488 if isinstance(results, failure.Failure):
486 489 self.failure = results
487 490 self.results = {}
488 491 else:
489 492 self.results = results
490 493 self.failure = None
491 494
492 495 self._ns = ResultNS(self.results)
493 496
494 497 self.keys = self.results.keys()
495 498
496 499 def __repr__(self):
497 500 if self.failure is not None:
498 501 contents = self.failure
499 502 else:
500 503 contents = self.results
501 504 return "TaskResult[ID:%r]:%r"%(self.taskid, contents)
502 505
503 506 def __getitem__(self, key):
504 507 if self.failure is not None:
505 508 self.raise_exception()
506 509 return self.results[key]
507 510
508 511 def raise_exception(self):
509 512 """Re-raise any remote exceptions in the local python session."""
510 513 if self.failure is not None:
511 514 self.failure.raiseException()
512 515
513 516
514 517 #-----------------------------------------------------------------------------
515 518 # The controller side of things
516 519 #-----------------------------------------------------------------------------
517 520
518 521 class IWorker(zi.Interface):
519 522 """The Basic Worker Interface.
520 523
521 524 A worked is a representation of an Engine that is ready to run tasks.
522 525 """
523 526
524 527 zi.Attribute("workerid", "the id of the worker")
525 528
526 529 def run(task):
527 530 """Run task in worker's namespace.
528 531
529 532 :Parameters:
530 533 task : a `Task` object
531 534
532 535 :Returns: `Deferred` to a tuple of (success, result) where
533 536 success if a boolean that signifies success or failure
534 537 and result is the task result.
535 538 """
536 539
537 540
538 541 class WorkerFromQueuedEngine(object):
539 542 """Adapt an `IQueuedEngine` to an `IWorker` object"""
540 543
541 544 zi.implements(IWorker)
542 545
543 546 def __init__(self, qe):
544 547 self.queuedEngine = qe
545 548 self.workerid = None
546 549
547 550 def _get_properties(self):
548 551 return self.queuedEngine.properties
549 552
550 553 properties = property(_get_properties, lambda self, _:None)
551 554
552 555 def run(self, task):
553 556 """Run task in worker's namespace.
554 557
555 558 This takes a task and calls methods on the task that actually
556 559 cause `self.queuedEngine` to do the task. See the methods of
557 560 `ITask` for more information about how these methods are called.
558 561
559 562 :Parameters:
560 563 task : a `Task` object
561 564
562 565 :Returns: `Deferred` to a tuple of (success, result) where
563 566 success if a boolean that signifies success or failure
564 567 and result is the task result.
565 568 """
566 569 d = defer.succeed(None)
567 570 d.addCallback(task.start_time)
568 571 task.pre_task(d, self.queuedEngine)
569 572 task.submit_task(d, self.queuedEngine)
570 573 task.post_task(d, self.queuedEngine)
571 574 d.addBoth(task.stop_time)
572 575 d.addBoth(task.process_result, self.queuedEngine.id)
573 576 # At this point, there will be (success, result) coming down the line
574 577 return d
575 578
576 579
577 580 components.registerAdapter(WorkerFromQueuedEngine, es.IEngineQueued, IWorker)
578 581
579 582 class IScheduler(zi.Interface):
580 583 """The interface for a Scheduler.
581 584 """
582 585 zi.Attribute("nworkers", "the number of unassigned workers")
583 586 zi.Attribute("ntasks", "the number of unscheduled tasks")
584 587 zi.Attribute("workerids", "a list of the worker ids")
585 588 zi.Attribute("taskids", "a list of the task ids")
586 589
587 590 def add_task(task, **flags):
588 591 """Add a task to the queue of the Scheduler.
589 592
590 593 :Parameters:
591 594 task : an `ITask` implementer
592 595 The task to be queued.
593 596 flags : dict
594 597 General keywords for more sophisticated scheduling
595 598 """
596 599
597 600 def pop_task(id=None):
598 601 """Pops a task object from the queue.
599 602
600 603 This gets the next task to be run. If no `id` is requested, the highest priority
601 604 task is returned.
602 605
603 606 :Parameters:
604 607 id
605 608 The id of the task to be popped. The default (None) is to return
606 609 the highest priority task.
607 610
608 611 :Returns: an `ITask` implementer
609 612
610 613 :Exceptions:
611 614 IndexError : raised if no taskid in queue
612 615 """
613 616
614 617 def add_worker(worker, **flags):
615 618 """Add a worker to the worker queue.
616 619
617 620 :Parameters:
618 621 worker : an `IWorker` implementer
619 622 flags : dict
620 623 General keywords for more sophisticated scheduling
621 624 """
622 625
623 626 def pop_worker(id=None):
624 627 """Pops an IWorker object that is ready to do work.
625 628
626 629 This gets the next IWorker that is ready to do work.
627 630
628 631 :Parameters:
629 632 id : if specified, will pop worker with workerid=id, else pops
630 633 highest priority worker. Defaults to None.
631 634
632 635 :Returns:
633 636 an IWorker object
634 637
635 638 :Exceptions:
636 639 IndexError : raised if no workerid in queue
637 640 """
638 641
639 642 def ready():
640 643 """Returns True if there is something to do, False otherwise"""
641 644
642 645 def schedule():
643 646 """Returns (worker,task) pair for the next task to be run."""
644 647
645 648
646 649 class FIFOScheduler(object):
647 650 """
648 651 A basic First-In-First-Out (Queue) Scheduler.
649 652
650 653 This is the default Scheduler for the `TaskController`.
651 654 See the docstrings for `IScheduler` for interface details.
652 655 """
653 656
654 657 zi.implements(IScheduler)
655 658
656 659 def __init__(self):
657 660 self.tasks = []
658 661 self.workers = []
659 662
660 663 def _ntasks(self):
661 664 return len(self.tasks)
662 665
663 666 def _nworkers(self):
664 667 return len(self.workers)
665 668
666 669 ntasks = property(_ntasks, lambda self, _:None)
667 670 nworkers = property(_nworkers, lambda self, _:None)
668 671
669 672 def _taskids(self):
670 673 return [t.taskid for t in self.tasks]
671 674
672 675 def _workerids(self):
673 676 return [w.workerid for w in self.workers]
674 677
675 678 taskids = property(_taskids, lambda self,_:None)
676 679 workerids = property(_workerids, lambda self,_:None)
677 680
678 681 def add_task(self, task, **flags):
679 682 self.tasks.append(task)
680 683
681 684 def pop_task(self, id=None):
682 685 if id is None:
683 686 return self.tasks.pop(0)
684 687 else:
685 688 for i in range(len(self.tasks)):
686 689 taskid = self.tasks[i].taskid
687 690 if id == taskid:
688 691 return self.tasks.pop(i)
689 692 raise IndexError("No task #%i"%id)
690 693
691 694 def add_worker(self, worker, **flags):
692 695 self.workers.append(worker)
693 696
694 697 def pop_worker(self, id=None):
695 698 if id is None:
696 699 return self.workers.pop(0)
697 700 else:
698 701 for i in range(len(self.workers)):
699 702 workerid = self.workers[i].workerid
700 703 if id == workerid:
701 704 return self.workers.pop(i)
702 705 raise IndexError("No worker #%i"%id)
703 706
704 707 def schedule(self):
705 708 for t in self.tasks:
706 709 for w in self.workers:
707 710 try:# do not allow exceptions to break this
708 711 # Allow the task to check itself using its
709 712 # check_depend method.
710 713 cando = t.check_depend(w.properties)
711 714 except:
712 715 cando = False
713 716 if cando:
714 717 return self.pop_worker(w.workerid), self.pop_task(t.taskid)
715 718 return None, None
716 719
717 720
718 721
719 722 class LIFOScheduler(FIFOScheduler):
720 723 """
721 724 A Last-In-First-Out (Stack) Scheduler.
722 725
723 726 This scheduler should naively reward fast engines by giving
724 727 them more jobs. This risks starvation, but only in cases with
725 728 low load, where starvation does not really matter.
726 729 """
727 730
728 731 def add_task(self, task, **flags):
729 732 # self.tasks.reverse()
730 733 self.tasks.insert(0, task)
731 734 # self.tasks.reverse()
732 735
733 736 def add_worker(self, worker, **flags):
734 737 # self.workers.reverse()
735 738 self.workers.insert(0, worker)
736 739 # self.workers.reverse()
737 740
738 741
739 742 class ITaskController(cs.IControllerBase):
740 743 """
741 744 The Task based interface to a `ControllerService` object
742 745
743 746 This adapts a `ControllerService` to the ITaskController interface.
744 747 """
745 748
746 749 def run(task):
747 750 """
748 751 Run a task.
749 752
750 753 :Parameters:
751 754 task : an IPython `Task` object
752 755
753 756 :Returns: the integer ID of the task
754 757 """
755 758
756 759 def get_task_result(taskid, block=False):
757 760 """
758 761 Get the result of a task by its ID.
759 762
760 763 :Parameters:
761 764 taskid : int
762 765 the id of the task whose result is requested
763 766
764 767 :Returns: `Deferred` to the task result if the task is done, and None
765 768 if not.
766 769
767 770 :Exceptions:
768 771 actualResult will be an `IndexError` if no such task has been submitted
769 772 """
770 773
771 774 def abort(taskid):
772 775 """Remove task from queue if task is has not been submitted.
773 776
774 777 If the task has already been submitted, wait for it to finish and discard
775 778 results and prevent resubmission.
776 779
777 780 :Parameters:
778 781 taskid : the id of the task to be aborted
779 782
780 783 :Returns:
781 784 `Deferred` to abort attempt completion. Will be None on success.
782 785
783 786 :Exceptions:
784 787 deferred will fail with `IndexError` if no such task has been submitted
785 788 or the task has already completed.
786 789 """
787 790
788 791 def barrier(taskids):
789 792 """
790 793 Block until the list of taskids are completed.
791 794
792 795 Returns None on success.
793 796 """
794 797
795 798 def spin():
796 799 """
797 800 Touch the scheduler, to resume scheduling without submitting a task.
798 801 """
799 802
800 803 def queue_status(verbose=False):
801 804 """
802 805 Get a dictionary with the current state of the task queue.
803 806
804 807 If verbose is True, then return lists of taskids, otherwise,
805 808 return the number of tasks with each status.
806 809 """
807 810
808 811 def clear():
809 812 """
810 813 Clear all previously run tasks from the task controller.
811 814
812 815 This is needed because the task controller keep all task results
813 816 in memory. This can be a problem is there are many completed
814 817 tasks. Users should call this periodically to clean out these
815 818 cached task results.
816 819 """
817 820
818 821
819 822 class TaskController(cs.ControllerAdapterBase):
820 823 """The Task based interface to a Controller object.
821 824
822 825 If you want to use a different scheduler, just subclass this and set
823 826 the `SchedulerClass` member to the *class* of your chosen scheduler.
824 827 """
825 828
826 829 zi.implements(ITaskController)
827 830 SchedulerClass = FIFOScheduler
828 831
829 832 timeout = 30
830 833
831 834 def __init__(self, controller):
832 835 self.controller = controller
833 836 self.controller.on_register_engine_do(self.registerWorker, True)
834 837 self.controller.on_unregister_engine_do(self.unregisterWorker, True)
835 838 self.taskid = 0
836 839 self.failurePenalty = 1 # the time in seconds to penalize
837 840 # a worker for failing a task
838 841 self.pendingTasks = {} # dict of {workerid:(taskid, task)}
839 842 self.deferredResults = {} # dict of {taskid:deferred}
840 843 self.finishedResults = {} # dict of {taskid:actualResult}
841 844 self.workers = {} # dict of {workerid:worker}
842 845 self.abortPending = [] # dict of {taskid:abortDeferred}
843 846 self.idleLater = None # delayed call object for timeout
844 847 self.scheduler = self.SchedulerClass()
845 848
846 849 for id in self.controller.engines.keys():
847 850 self.workers[id] = IWorker(self.controller.engines[id])
848 851 self.workers[id].workerid = id
849 852 self.schedule.add_worker(self.workers[id])
850 853
851 854 def registerWorker(self, id):
852 855 """Called by controller.register_engine."""
853 856 if self.workers.get(id):
854 857 raise ValueError("worker with id %s already exists. This should not happen." % id)
855 858 self.workers[id] = IWorker(self.controller.engines[id])
856 859 self.workers[id].workerid = id
857 860 if not self.pendingTasks.has_key(id):# if not working
858 861 self.scheduler.add_worker(self.workers[id])
859 862 self.distributeTasks()
860 863
861 864 def unregisterWorker(self, id):
862 865 """Called by controller.unregister_engine"""
863 866
864 867 if self.workers.has_key(id):
865 868 try:
866 869 self.scheduler.pop_worker(id)
867 870 except IndexError:
868 871 pass
869 872 self.workers.pop(id)
870 873
871 874 def _pendingTaskIDs(self):
872 875 return [t.taskid for t in self.pendingTasks.values()]
873 876
874 877 #---------------------------------------------------------------------------
875 878 # Interface methods
876 879 #---------------------------------------------------------------------------
877 880
878 881 def run(self, task):
879 882 """
880 883 Run a task and return `Deferred` to its taskid.
881 884 """
882 885 task.taskid = self.taskid
883 886 task.start = time.localtime()
884 887 self.taskid += 1
885 888 d = defer.Deferred()
886 889 self.scheduler.add_task(task)
887 890 log.msg('Queuing task: %i' % task.taskid)
888 891
889 892 self.deferredResults[task.taskid] = []
890 893 self.distributeTasks()
891 894 return defer.succeed(task.taskid)
892 895
893 896 def get_task_result(self, taskid, block=False):
894 897 """
895 898 Returns a `Deferred` to the task result, or None.
896 899 """
897 900 log.msg("Getting task result: %i" % taskid)
898 901 if self.finishedResults.has_key(taskid):
899 902 tr = self.finishedResults[taskid]
900 903 return defer.succeed(tr)
901 904 elif self.deferredResults.has_key(taskid):
902 905 if block:
903 906 d = defer.Deferred()
904 907 self.deferredResults[taskid].append(d)
905 908 return d
906 909 else:
907 910 return defer.succeed(None)
908 911 else:
909 912 return defer.fail(IndexError("task ID not registered: %r" % taskid))
910 913
911 914 def abort(self, taskid):
912 915 """
913 916 Remove a task from the queue if it has not been run already.
914 917 """
915 918 if not isinstance(taskid, int):
916 919 return defer.fail(failure.Failure(TypeError("an integer task id expected: %r" % taskid)))
917 920 try:
918 921 self.scheduler.pop_task(taskid)
919 922 except IndexError, e:
920 923 if taskid in self.finishedResults.keys():
921 924 d = defer.fail(IndexError("Task Already Completed"))
922 925 elif taskid in self.abortPending:
923 926 d = defer.fail(IndexError("Task Already Aborted"))
924 927 elif taskid in self._pendingTaskIDs():# task is pending
925 928 self.abortPending.append(taskid)
926 929 d = defer.succeed(None)
927 930 else:
928 931 d = defer.fail(e)
929 932 else:
930 933 d = defer.execute(self._doAbort, taskid)
931 934
932 935 return d
933 936
934 937 def barrier(self, taskids):
935 938 dList = []
936 939 if isinstance(taskids, int):
937 940 taskids = [taskids]
938 941 for id in taskids:
939 942 d = self.get_task_result(id, block=True)
940 943 dList.append(d)
941 944 d = DeferredList(dList, consumeErrors=1)
942 945 d.addCallbacks(lambda r: None)
943 946 return d
944 947
945 948 def spin(self):
946 949 return defer.succeed(self.distributeTasks())
947 950
948 951 def queue_status(self, verbose=False):
949 952 pending = self._pendingTaskIDs()
950 953 failed = []
951 954 succeeded = []
952 955 for k,v in self.finishedResults.iteritems():
953 956 if not isinstance(v, failure.Failure):
954 957 if hasattr(v,'failure'):
955 958 if v.failure is None:
956 959 succeeded.append(k)
957 960 else:
958 961 failed.append(k)
959 962 scheduled = self.scheduler.taskids
960 963 if verbose:
961 964 result = dict(pending=pending, failed=failed,
962 965 succeeded=succeeded, scheduled=scheduled)
963 966 else:
964 967 result = dict(pending=len(pending),failed=len(failed),
965 968 succeeded=len(succeeded),scheduled=len(scheduled))
966 969 return defer.succeed(result)
967 970
968 971 #---------------------------------------------------------------------------
969 972 # Queue methods
970 973 #---------------------------------------------------------------------------
971 974
972 975 def _doAbort(self, taskid):
973 976 """
974 977 Helper function for aborting a pending task.
975 978 """
976 979 log.msg("Task aborted: %i" % taskid)
977 980 result = failure.Failure(error.TaskAborted())
978 981 self._finishTask(taskid, result)
979 982 if taskid in self.abortPending:
980 983 self.abortPending.remove(taskid)
981 984
982 985 def _finishTask(self, taskid, result):
983 986 dlist = self.deferredResults.pop(taskid)
984 987 # result.taskid = taskid # The TaskResult should save the taskid
985 988 self.finishedResults[taskid] = result
986 989 for d in dlist:
987 990 d.callback(result)
988 991
989 992 def distributeTasks(self):
990 993 """
991 994 Distribute tasks while self.scheduler has things to do.
992 995 """
993 996 log.msg("distributing Tasks")
994 997 worker, task = self.scheduler.schedule()
995 998 if not worker and not task:
996 999 if self.idleLater and self.idleLater.called:# we are inside failIdle
997 1000 self.idleLater = None
998 1001 else:
999 1002 self.checkIdle()
1000 1003 return False
1001 1004 # else something to do:
1002 1005 while worker and task:
1003 1006 # get worker and task
1004 1007 # add to pending
1005 1008 self.pendingTasks[worker.workerid] = task
1006 1009 # run/link callbacks
1007 1010 d = worker.run(task)
1008 1011 log.msg("Running task %i on worker %i" %(task.taskid, worker.workerid))
1009 1012 d.addBoth(self.taskCompleted, task.taskid, worker.workerid)
1010 1013 worker, task = self.scheduler.schedule()
1011 1014 # check for idle timeout:
1012 1015 self.checkIdle()
1013 1016 return True
1014 1017
1015 1018 def checkIdle(self):
1016 1019 if self.idleLater and not self.idleLater.called:
1017 1020 self.idleLater.cancel()
1018 1021 if self.scheduler.ntasks and self.workers and \
1019 1022 self.scheduler.nworkers == len(self.workers):
1020 1023 self.idleLater = reactor.callLater(self.timeout, self.failIdle)
1021 1024 else:
1022 1025 self.idleLater = None
1023 1026
1024 1027 def failIdle(self):
1025 1028 if not self.distributeTasks():
1026 1029 while self.scheduler.ntasks:
1027 1030 t = self.scheduler.pop_task()
1028 1031 msg = "task %i failed to execute due to unmet dependencies"%t.taskid
1029 1032 msg += " for %i seconds"%self.timeout
1030 1033 log.msg("Task aborted by timeout: %i" % t.taskid)
1031 1034 f = failure.Failure(error.TaskTimeout(msg))
1032 1035 self._finishTask(t.taskid, f)
1033 1036 self.idleLater = None
1034 1037
1035 1038
1036 1039 def taskCompleted(self, success_and_result, taskid, workerid):
1037 1040 """This is the err/callback for a completed task."""
1038 1041 success, result = success_and_result
1039 1042 try:
1040 1043 task = self.pendingTasks.pop(workerid)
1041 1044 except:
1042 1045 # this should not happen
1043 1046 log.msg("Tried to pop bad pending task %i from worker %i"%(taskid, workerid))
1044 1047 log.msg("Result: %r"%result)
1045 1048 log.msg("Pending tasks: %s"%self.pendingTasks)
1046 1049 return
1047 1050
1048 1051 # Check if aborted while pending
1049 1052 aborted = False
1050 1053 if taskid in self.abortPending:
1051 1054 self._doAbort(taskid)
1052 1055 aborted = True
1053 1056
1054 1057 if not aborted:
1055 1058 if not success:
1056 1059 log.msg("Task %i failed on worker %i"% (taskid, workerid))
1057 1060 if task.retries > 0: # resubmit
1058 1061 task.retries -= 1
1059 1062 self.scheduler.add_task(task)
1060 1063 s = "Resubmitting task %i, %i retries remaining" %(taskid, task.retries)
1061 1064 log.msg(s)
1062 1065 self.distributeTasks()
1063 1066 elif isinstance(task.recovery_task, BaseTask) and \
1064 1067 task.recovery_task.retries > -1:
1065 1068 # retries = -1 is to prevent infinite recovery_task loop
1066 1069 task.retries = -1
1067 1070 task.recovery_task.taskid = taskid
1068 1071 task = task.recovery_task
1069 1072 self.scheduler.add_task(task)
1070 1073 s = "Recovering task %i, %i retries remaining" %(taskid, task.retries)
1071 1074 log.msg(s)
1072 1075 self.distributeTasks()
1073 1076 else: # done trying
1074 1077 self._finishTask(taskid, result)
1075 1078 # wait a second before readmitting a worker that failed
1076 1079 # it may have died, and not yet been unregistered
1077 1080 reactor.callLater(self.failurePenalty, self.readmitWorker, workerid)
1078 1081 else: # we succeeded
1079 1082 log.msg("Task completed: %i"% taskid)
1080 1083 self._finishTask(taskid, result)
1081 1084 self.readmitWorker(workerid)
1082 1085 else: # we aborted the task
1083 1086 if not success:
1084 1087 reactor.callLater(self.failurePenalty, self.readmitWorker, workerid)
1085 1088 else:
1086 1089 self.readmitWorker(workerid)
1087 1090
1088 1091 def readmitWorker(self, workerid):
1089 1092 """
1090 1093 Readmit a worker to the scheduler.
1091 1094
1092 1095 This is outside `taskCompleted` because of the `failurePenalty` being
1093 1096 implemented through `reactor.callLater`.
1094 1097 """
1095 1098
1096 1099 if workerid in self.workers.keys() and workerid not in self.pendingTasks.keys():
1097 1100 self.scheduler.add_worker(self.workers[workerid])
1098 1101 self.distributeTasks()
1099 1102
1100 1103 def clear(self):
1101 1104 """
1102 1105 Clear all previously run tasks from the task controller.
1103 1106
1104 1107 This is needed because the task controller keep all task results
1105 1108 in memory. This can be a problem is there are many completed
1106 1109 tasks. Users should call this periodically to clean out these
1107 1110 cached task results.
1108 1111 """
1109 1112 self.finishedResults = {}
1110 1113 return defer.succeed(None)
1111 1114
1112 1115
1113 1116 components.registerAdapter(TaskController, cs.IControllerBase, ITaskController)
@@ -1,244 +1,246 b''
1 1 # -*- coding: utf-8 -*-
2 2 """IPython Test Suite Runner.
3 3
4 4 This module provides a main entry point to a user script to test IPython
5 5 itself from the command line. There are two ways of running this script:
6 6
7 7 1. With the syntax `iptest all`. This runs our entire test suite by
8 8 calling this script (with different arguments) or trial recursively. This
9 9 causes modules and package to be tested in different processes, using nose
10 10 or trial where appropriate.
11 11 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
12 12 the script simply calls nose, but with special command line flags and
13 13 plugins loaded.
14 14
15 15 For now, this script requires that both nose and twisted are installed. This
16 16 will change in the future.
17 17 """
18 18
19 19 #-----------------------------------------------------------------------------
20 20 # Module imports
21 21 #-----------------------------------------------------------------------------
22 22
23 23 import os
24 24 import os.path as path
25 25 import sys
26 26 import subprocess
27 27 import time
28 28 import warnings
29 29
30 30 import nose.plugins.builtin
31 31 from nose.core import TestProgram
32 32
33 33 from IPython.testing.plugin.ipdoctest import IPythonDoctest
34 34
35 35 #-----------------------------------------------------------------------------
36 36 # Globals and constants
37 37 #-----------------------------------------------------------------------------
38 38
39 39 # For the IPythonDoctest plugin, we need to exclude certain patterns that cause
40 40 # testing problems. We should strive to minimize the number of skipped
41 41 # modules, since this means untested code. As the testing machinery
42 42 # solidifies, this list should eventually become empty.
43 43 EXCLUDE = ['IPython/external/',
44 44 'IPython/platutils_win32',
45 45 'IPython/frontend/cocoa',
46 46 'IPython/frontend/process/winprocess.py',
47 47 'IPython_doctest_plugin',
48 48 'IPython/Gnuplot',
49 49 'IPython/Extensions/ipy_',
50 50 'IPython/Extensions/clearcmd',
51 51 'IPython/Extensions/PhysicalQIn',
52 52 'IPython/Extensions/scitedirector',
53 53 'IPython/Extensions/numeric_formats',
54 54 'IPython/testing/attic',
55 55 ]
56 56
57 57 #-----------------------------------------------------------------------------
58 58 # Functions and classes
59 59 #-----------------------------------------------------------------------------
60 60
61 61 def run_iptest():
62 62 """Run the IPython test suite using nose.
63 63
64 64 This function is called when this script is **not** called with the form
65 65 `iptest all`. It simply calls nose with appropriate command line flags
66 66 and accepts all of the standard nose arguments.
67 67 """
68 68
69 69 warnings.filterwarnings('ignore',
70 70 'This will be removed soon. Use IPython.testing.util instead')
71 71
72 72 argv = sys.argv + [
73 73 # Loading ipdoctest causes problems with Twisted.
74 74 # I am removing this as a temporary fix to get the
75 75 # test suite back into working shape. Our nose
76 76 # plugin needs to be gone through with a fine
77 77 # toothed comb to find what is causing the problem.
78 78 '--with-ipdoctest',
79 79 '--ipdoctest-tests','--ipdoctest-extension=txt',
80 80 '--detailed-errors',
81 81
82 82 # We add --exe because of setuptools' imbecility (it
83 83 # blindly does chmod +x on ALL files). Nose does the
84 84 # right thing and it tries to avoid executables,
85 85 # setuptools unfortunately forces our hand here. This
86 86 # has been discussed on the distutils list and the
87 87 # setuptools devs refuse to fix this problem!
88 88 '--exe',
89 89 ]
90 90
91 91 # Detect if any tests were required by explicitly calling an IPython
92 92 # submodule or giving a specific path
93 93 has_tests = False
94 94 for arg in sys.argv:
95 95 if 'IPython' in arg or arg.endswith('.py') or \
96 96 (':' in arg and '.py' in arg):
97 97 has_tests = True
98 98 break
99 99
100 100 # If nothing was specifically requested, test full IPython
101 101 if not has_tests:
102 102 argv.append('IPython')
103 103
104 104 # Construct list of plugins, omitting the existing doctest plugin, which
105 105 # ours replaces (and extends).
106 106 plugins = [IPythonDoctest(EXCLUDE)]
107 107 for p in nose.plugins.builtin.plugins:
108 108 plug = p()
109 109 if plug.name == 'doctest':
110 110 continue
111 111
112 112 #print '*** adding plugin:',plug.name # dbg
113 113 plugins.append(plug)
114 114
115 115 TestProgram(argv=argv,plugins=plugins)
116 116
117 117
118 118 class IPTester(object):
119 119 """Call that calls iptest or trial in a subprocess.
120 120 """
121 121 def __init__(self,runner='iptest',params=None):
122 122 """ """
123 123 if runner == 'iptest':
124 124 self.runner = ['iptest','-v']
125 125 else:
126 126 self.runner = ['trial']
127 127 if params is None:
128 128 params = []
129 129 if isinstance(params,str):
130 130 params = [params]
131 131 self.params = params
132 132
133 133 # Assemble call
134 134 self.call_args = self.runner+self.params
135 135
136 136 def run(self):
137 137 """Run the stored commands"""
138 138 return subprocess.call(self.call_args)
139 139
140 140
141 141 def make_runners():
142 142 """Define the modules and packages that need to be tested.
143 143 """
144 144
145 145 # This omits additional top-level modules that should not be doctested.
146 146 # XXX: Shell.py is also ommited because of a bug in the skip_doctest
147 147 # decorator. See ticket https://bugs.launchpad.net/bugs/366209
148 148 top_mod = \
149 149 ['background_jobs.py', 'ColorANSI.py', 'completer.py', 'ConfigLoader.py',
150 150 'CrashHandler.py', 'Debugger.py', 'deep_reload.py', 'demo.py',
151 151 'DPyGetOpt.py', 'dtutils.py', 'excolors.py', 'FakeModule.py',
152 152 'generics.py', 'genutils.py', 'history.py', 'hooks.py', 'ipapi.py',
153 153 'iplib.py', 'ipmaker.py', 'ipstruct.py', 'irunner.py', 'Itpl.py',
154 154 'Logger.py', 'macro.py', 'Magic.py', 'OInspect.py',
155 155 'OutputTrap.py', 'platutils.py', 'prefilter.py', 'Prompts.py',
156 156 'PyColorize.py', 'Release.py', 'rlineimpl.py', 'shadowns.py',
157 157 'shellglobals.py', 'strdispatch.py', 'twshell.py',
158 158 'ultraTB.py', 'upgrade_dir.py', 'usage.py', 'wildcard.py',
159 159 # See note above for why this is skipped
160 160 # 'Shell.py',
161 161 'winconsole.py']
162 162
163 163 if os.name == 'posix':
164 164 top_mod.append('platutils_posix.py')
165 165 elif sys.platform == 'win32':
166 166 top_mod.append('platutils_win32.py')
167 167 else:
168 168 top_mod.append('platutils_dummy.py')
169 169
170 top_pack = ['config','Extensions','frontend','gui','kernel',
170 # These are tested by nose, so skip IPython.kernel
171 top_pack = ['config','Extensions','frontend','gui',
171 172 'testing','tests','tools','UserConfig']
172 173
173 174 modules = ['IPython.%s' % m[:-3] for m in top_mod ]
174 175 packages = ['IPython.%s' % m for m in top_pack ]
175 176
176 177 # Make runners
177 178 runners = dict(zip(top_pack, [IPTester(params=v) for v in packages]))
178 179
180 # Test IPython.kernel using trial if twisted is installed
179 181 try:
180 182 import zope.interface
181 183 import twisted
182 184 import foolscap
183 185 except ImportError:
184 186 pass
185 187 else:
186 188 runners['trial'] = IPTester('trial',['IPython'])
187 189
188 190 for m in modules:
189 191 runners[m] = IPTester(params=m)
190 192
191 193 return runners
192 194
193 195
194 196 def run_iptestall():
195 197 """Run the entire IPython test suite by calling nose and trial.
196 198
197 199 This function constructs :class:`IPTester` instances for all IPython
198 200 modules and package and then runs each of them. This causes the modules
199 201 and packages of IPython to be tested each in their own subprocess using
200 202 nose or twisted.trial appropriately.
201 203 """
202 204 runners = make_runners()
203 205 # Run all test runners, tracking execution time
204 206 failed = {}
205 207 t_start = time.time()
206 208 for name,runner in runners.iteritems():
207 209 print '*'*77
208 210 print 'IPython test set:',name
209 211 res = runner.run()
210 212 if res:
211 213 failed[name] = res
212 214 t_end = time.time()
213 215 t_tests = t_end - t_start
214 216 nrunners = len(runners)
215 217 nfail = len(failed)
216 218 # summarize results
217 219 print
218 220 print '*'*77
219 221 print 'Ran %s test sets in %.3fs' % (nrunners, t_tests)
220 222 print
221 223 if not failed:
222 224 print 'OK'
223 225 else:
224 226 # If anything went wrong, point out what command to rerun manually to
225 227 # see the actual errors and individual summary
226 228 print 'ERROR - %s out of %s test sets failed.' % (nfail, nrunners)
227 229 for name in failed:
228 230 failed_runner = runners[name]
229 231 print '-'*40
230 232 print 'Runner failed:',name
231 233 print 'You may wish to rerun this one individually, with:'
232 234 print ' '.join(failed_runner.call_args)
233 235 print
234 236
235 237
236 238 def main():
237 239 if sys.argv[1] == 'all':
238 240 run_iptestall()
239 241 else:
240 242 run_iptest()
241 243
242 244
243 245 if __name__ == '__main__':
244 246 main() No newline at end of file
@@ -1,889 +1,908 b''
1 1 """Nose Plugin that supports IPython doctests.
2 2
3 3 Limitations:
4 4
5 5 - When generating examples for use as doctests, make sure that you have
6 6 pretty-printing OFF. This can be done either by starting ipython with the
7 7 flag '--nopprint', by setting pprint to 0 in your ipythonrc file, or by
8 8 interactively disabling it with %Pprint. This is required so that IPython
9 9 output matches that of normal Python, which is used by doctest for internal
10 10 execution.
11 11
12 12 - Do not rely on specific prompt numbers for results (such as using
13 13 '_34==True', for example). For IPython tests run via an external process the
14 14 prompt numbers may be different, and IPython tests run as normal python code
15 15 won't even have these special _NN variables set at all.
16 16 """
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Module imports
20 20
21 21 # From the standard library
22 22 import __builtin__
23 23 import commands
24 24 import doctest
25 25 import inspect
26 26 import logging
27 27 import os
28 28 import re
29 29 import sys
30 30 import traceback
31 31 import unittest
32 32
33 33 from inspect import getmodule
34 34 from StringIO import StringIO
35 35
36 36 # We are overriding the default doctest runner, so we need to import a few
37 37 # things from doctest directly
38 38 from doctest import (REPORTING_FLAGS, REPORT_ONLY_FIRST_FAILURE,
39 39 _unittest_reportflags, DocTestRunner,
40 40 _extract_future_flags, pdb, _OutputRedirectingPdb,
41 41 _exception_traceback,
42 42 linecache)
43 43
44 44 # Third-party modules
45 45 import nose.core
46 46
47 47 from nose.plugins import doctests, Plugin
48 48 from nose.util import anyp, getpackage, test_address, resolve_name, tolist
49 49
50 50 #-----------------------------------------------------------------------------
51 51 # Module globals and other constants
52 52
53 53 log = logging.getLogger(__name__)
54 54
55 55 ###########################################################################
56 56 # *** HACK ***
57 57 # We must start our own ipython object and heavily muck with it so that all the
58 58 # modifications IPython makes to system behavior don't send the doctest
59 59 # machinery into a fit. This code should be considered a gross hack, but it
60 60 # gets the job done.
61 61
62 62 def default_argv():
63 63 """Return a valid default argv for creating testing instances of ipython"""
64 64
65 65 # Get the install directory for the user configuration and tell ipython to
66 66 # use the default profile from there.
67 67 from IPython import UserConfig
68 68 ipcdir = os.path.dirname(UserConfig.__file__)
69 69 #ipconf = os.path.join(ipcdir,'ipy_user_conf.py')
70 70 ipconf = os.path.join(ipcdir,'ipythonrc')
71 71 #print 'conf:',ipconf # dbg
72 72
73 73 return ['--colors=NoColor','--noterm_title','-rcfile=%s' % ipconf]
74 74
75 75
76 76 # Hack to modify the %run command so we can sync the user's namespace with the
77 77 # test globals. Once we move over to a clean magic system, this will be done
78 78 # with much less ugliness.
79 79
80 80 class py_file_finder(object):
81 81 def __init__(self,test_filename):
82 82 self.test_filename = test_filename
83 83
84 84 def __call__(self,name):
85 85 from IPython.genutils import get_py_filename
86 86 try:
87 87 return get_py_filename(name)
88 88 except IOError:
89 89 test_dir = os.path.dirname(self.test_filename)
90 90 new_path = os.path.join(test_dir,name)
91 91 return get_py_filename(new_path)
92 92
93 93
94 94 def _run_ns_sync(self,arg_s,runner=None):
95 95 """Modified version of %run that syncs testing namespaces.
96 96
97 97 This is strictly needed for running doctests that call %run.
98 98 """
99 99
100 finder = py_file_finder(_run_ns_sync.test_filename)
100 # When tests call %run directly (not via doctest) these function attributes
101 # are not set
102 try:
103 fname = _run_ns_sync.test_filename
104 except AttributeError:
105 fname = arg_s
106
107 finder = py_file_finder(fname)
101 108 out = _ip.IP.magic_run_ori(arg_s,runner,finder)
102 _run_ns_sync.test_globs.update(_ip.user_ns)
109
110 # Simliarly, there is no test_globs when a test is NOT a doctest
111 if hasattr(_run_ns_sync,'test_globs'):
112 _run_ns_sync.test_globs.update(_ip.user_ns)
103 113 return out
104 114
105 115
106 116 class ipnsdict(dict):
107 117 """A special subclass of dict for use as an IPython namespace in doctests.
108 118
109 119 This subclass adds a simple checkpointing capability so that when testing
110 120 machinery clears it (we use it as the test execution context), it doesn't
111 121 get completely destroyed.
112 122 """
113 123
114 124 def __init__(self,*a):
115 125 dict.__init__(self,*a)
116 126 self._savedict = {}
117 127
118 128 def clear(self):
119 129 dict.clear(self)
120 130 self.update(self._savedict)
121 131
122 132 def _checkpoint(self):
123 133 self._savedict.clear()
124 134 self._savedict.update(self)
125 135
126 136 def update(self,other):
127 137 self._checkpoint()
128 138 dict.update(self,other)
139
129 140 # If '_' is in the namespace, python won't set it when executing code,
130 141 # and we have examples that test it. So we ensure that the namespace
131 142 # is always 'clean' of it before it's used for test code execution.
132 143 self.pop('_',None)
144
145 # The builtins namespace must *always* be the real __builtin__ module,
146 # else weird stuff happens. The main ipython code does have provisions
147 # to ensure this after %run, but since in this class we do some
148 # aggressive low-level cleaning of the execution namespace, we need to
149 # correct for that ourselves, to ensure consitency with the 'real'
150 # ipython.
151 self['__builtins__'] = __builtin__
133 152
134 153
135 154 def start_ipython():
136 155 """Start a global IPython shell, which we need for IPython-specific syntax.
137 156 """
138 157
139 158 # This function should only ever run once!
140 159 if hasattr(start_ipython,'already_called'):
141 160 return
142 161 start_ipython.already_called = True
143 162
144 163 # Ok, first time we're called, go ahead
145 164 import new
146 165
147 166 import IPython
148 167
149 168 def xsys(cmd):
150 169 """Execute a command and print its output.
151 170
152 171 This is just a convenience function to replace the IPython system call
153 172 with one that is more doctest-friendly.
154 173 """
155 174 cmd = _ip.IP.var_expand(cmd,depth=1)
156 175 sys.stdout.write(commands.getoutput(cmd))
157 176 sys.stdout.flush()
158 177
159 178 # Store certain global objects that IPython modifies
160 179 _displayhook = sys.displayhook
161 180 _excepthook = sys.excepthook
162 181 _main = sys.modules.get('__main__')
163 182
164 183 argv = default_argv()
165 184
166 185 # Start IPython instance. We customize it to start with minimal frills.
167 186 user_ns,global_ns = IPython.ipapi.make_user_namespaces(ipnsdict(),dict())
168 187 IPython.Shell.IPShell(argv,user_ns,global_ns)
169 188
170 189 # Deactivate the various python system hooks added by ipython for
171 190 # interactive convenience so we don't confuse the doctest system
172 191 sys.modules['__main__'] = _main
173 192 sys.displayhook = _displayhook
174 193 sys.excepthook = _excepthook
175 194
176 195 # So that ipython magics and aliases can be doctested (they work by making
177 196 # a call into a global _ip object)
178 197 _ip = IPython.ipapi.get()
179 198 __builtin__._ip = _ip
180 199
181 200 # Modify the IPython system call with one that uses getoutput, so that we
182 201 # can capture subcommands and print them to Python's stdout, otherwise the
183 202 # doctest machinery would miss them.
184 203 _ip.system = xsys
185 204
186 205 # Also patch our %run function in.
187 206 im = new.instancemethod(_run_ns_sync,_ip.IP, _ip.IP.__class__)
188 207 _ip.IP.magic_run_ori = _ip.IP.magic_run
189 208 _ip.IP.magic_run = im
190 209
191 210 # The start call MUST be made here. I'm not sure yet why it doesn't work if
192 211 # it is made later, at plugin initialization time, but in all my tests, that's
193 212 # the case.
194 213 start_ipython()
195 214
196 215 # *** END HACK ***
197 216 ###########################################################################
198 217
199 218 # Classes and functions
200 219
201 220 def is_extension_module(filename):
202 221 """Return whether the given filename is an extension module.
203 222
204 223 This simply checks that the extension is either .so or .pyd.
205 224 """
206 225 return os.path.splitext(filename)[1].lower() in ('.so','.pyd')
207 226
208 227
209 228 class DocTestSkip(object):
210 229 """Object wrapper for doctests to be skipped."""
211 230
212 231 ds_skip = """Doctest to skip.
213 232 >>> 1 #doctest: +SKIP
214 233 """
215 234
216 235 def __init__(self,obj):
217 236 self.obj = obj
218 237
219 238 def __getattribute__(self,key):
220 239 if key == '__doc__':
221 240 return DocTestSkip.ds_skip
222 241 else:
223 242 return getattr(object.__getattribute__(self,'obj'),key)
224 243
225 244 # Modified version of the one in the stdlib, that fixes a python bug (doctests
226 245 # not found in extension modules, http://bugs.python.org/issue3158)
227 246 class DocTestFinder(doctest.DocTestFinder):
228 247
229 248 def _from_module(self, module, object):
230 249 """
231 250 Return true if the given object is defined in the given
232 251 module.
233 252 """
234 253 if module is None:
235 254 return True
236 255 elif inspect.isfunction(object):
237 256 return module.__dict__ is object.func_globals
238 257 elif inspect.isbuiltin(object):
239 258 return module.__name__ == object.__module__
240 259 elif inspect.isclass(object):
241 260 return module.__name__ == object.__module__
242 261 elif inspect.ismethod(object):
243 262 # This one may be a bug in cython that fails to correctly set the
244 263 # __module__ attribute of methods, but since the same error is easy
245 264 # to make by extension code writers, having this safety in place
246 265 # isn't such a bad idea
247 266 return module.__name__ == object.im_class.__module__
248 267 elif inspect.getmodule(object) is not None:
249 268 return module is inspect.getmodule(object)
250 269 elif hasattr(object, '__module__'):
251 270 return module.__name__ == object.__module__
252 271 elif isinstance(object, property):
253 272 return True # [XX] no way not be sure.
254 273 else:
255 274 raise ValueError("object must be a class or function")
256 275
257 276 def _find(self, tests, obj, name, module, source_lines, globs, seen):
258 277 """
259 278 Find tests for the given object and any contained objects, and
260 279 add them to `tests`.
261 280 """
262 281
263 282 if hasattr(obj,"skip_doctest"):
264 283 #print 'SKIPPING DOCTEST FOR:',obj # dbg
265 284 obj = DocTestSkip(obj)
266 285
267 286 doctest.DocTestFinder._find(self,tests, obj, name, module,
268 287 source_lines, globs, seen)
269 288
270 289 # Below we re-run pieces of the above method with manual modifications,
271 290 # because the original code is buggy and fails to correctly identify
272 291 # doctests in extension modules.
273 292
274 293 # Local shorthands
275 294 from inspect import isroutine, isclass, ismodule
276 295
277 296 # Look for tests in a module's contained objects.
278 297 if inspect.ismodule(obj) and self._recurse:
279 298 for valname, val in obj.__dict__.items():
280 299 valname1 = '%s.%s' % (name, valname)
281 300 if ( (isroutine(val) or isclass(val))
282 301 and self._from_module(module, val) ):
283 302
284 303 self._find(tests, val, valname1, module, source_lines,
285 304 globs, seen)
286 305
287 306 # Look for tests in a class's contained objects.
288 307 if inspect.isclass(obj) and self._recurse:
289 308 #print 'RECURSE into class:',obj # dbg
290 309 for valname, val in obj.__dict__.items():
291 310 # Special handling for staticmethod/classmethod.
292 311 if isinstance(val, staticmethod):
293 312 val = getattr(obj, valname)
294 313 if isinstance(val, classmethod):
295 314 val = getattr(obj, valname).im_func
296 315
297 316 # Recurse to methods, properties, and nested classes.
298 317 if ((inspect.isfunction(val) or inspect.isclass(val) or
299 318 inspect.ismethod(val) or
300 319 isinstance(val, property)) and
301 320 self._from_module(module, val)):
302 321 valname = '%s.%s' % (name, valname)
303 322 self._find(tests, val, valname, module, source_lines,
304 323 globs, seen)
305 324
306 325
307 326 class IPDoctestOutputChecker(doctest.OutputChecker):
308 327 """Second-chance checker with support for random tests.
309 328
310 329 If the default comparison doesn't pass, this checker looks in the expected
311 330 output string for flags that tell us to ignore the output.
312 331 """
313 332
314 333 random_re = re.compile(r'#\s*random\s+')
315 334
316 335 def check_output(self, want, got, optionflags):
317 336 """Check output, accepting special markers embedded in the output.
318 337
319 338 If the output didn't pass the default validation but the special string
320 339 '#random' is included, we accept it."""
321 340
322 341 # Let the original tester verify first, in case people have valid tests
323 342 # that happen to have a comment saying '#random' embedded in.
324 343 ret = doctest.OutputChecker.check_output(self, want, got,
325 344 optionflags)
326 345 if not ret and self.random_re.search(want):
327 346 #print >> sys.stderr, 'RANDOM OK:',want # dbg
328 347 return True
329 348
330 349 return ret
331 350
332 351
333 352 class DocTestCase(doctests.DocTestCase):
334 353 """Proxy for DocTestCase: provides an address() method that
335 354 returns the correct address for the doctest case. Otherwise
336 355 acts as a proxy to the test case. To provide hints for address(),
337 356 an obj may also be passed -- this will be used as the test object
338 357 for purposes of determining the test address, if it is provided.
339 358 """
340 359
341 360 # Note: this method was taken from numpy's nosetester module.
342 361
343 362 # Subclass nose.plugins.doctests.DocTestCase to work around a bug in
344 363 # its constructor that blocks non-default arguments from being passed
345 364 # down into doctest.DocTestCase
346 365
347 366 def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
348 367 checker=None, obj=None, result_var='_'):
349 368 self._result_var = result_var
350 369 doctests.DocTestCase.__init__(self, test,
351 370 optionflags=optionflags,
352 371 setUp=setUp, tearDown=tearDown,
353 372 checker=checker)
354 373 # Now we must actually copy the original constructor from the stdlib
355 374 # doctest class, because we can't call it directly and a bug in nose
356 375 # means it never gets passed the right arguments.
357 376
358 377 self._dt_optionflags = optionflags
359 378 self._dt_checker = checker
360 379 self._dt_test = test
361 380 self._dt_setUp = setUp
362 381 self._dt_tearDown = tearDown
363 382
364 383 # XXX - store this runner once in the object!
365 384 runner = IPDocTestRunner(optionflags=optionflags,
366 385 checker=checker, verbose=False)
367 386 self._dt_runner = runner
368 387
369 388
370 389 # Each doctest should remember what directory it was loaded from...
371 390 self._ori_dir = os.getcwd()
372 391
373 392 # Modified runTest from the default stdlib
374 393 def runTest(self):
375 394 test = self._dt_test
376 395 runner = self._dt_runner
377 396
378 397 old = sys.stdout
379 398 new = StringIO()
380 399 optionflags = self._dt_optionflags
381 400
382 401 if not (optionflags & REPORTING_FLAGS):
383 402 # The option flags don't include any reporting flags,
384 403 # so add the default reporting flags
385 404 optionflags |= _unittest_reportflags
386 405
387 406 try:
388 407 # Save our current directory and switch out to the one where the
389 408 # test was originally created, in case another doctest did a
390 409 # directory change. We'll restore this in the finally clause.
391 410 curdir = os.getcwd()
392 411 os.chdir(self._ori_dir)
393 412
394 413 runner.DIVIDER = "-"*70
395 414 failures, tries = runner.run(test,out=new.write,
396 415 clear_globs=False)
397 416 finally:
398 417 sys.stdout = old
399 418 os.chdir(curdir)
400 419
401 420 if failures:
402 421 raise self.failureException(self.format_failure(new.getvalue()))
403 422
404 423 def setUp(self):
405 424 """Modified test setup that syncs with ipython namespace"""
406 425
407 426 if isinstance(self._dt_test.examples[0],IPExample):
408 427 # for IPython examples *only*, we swap the globals with the ipython
409 428 # namespace, after updating it with the globals (which doctest
410 429 # fills with the necessary info from the module being tested).
411 430 _ip.IP.user_ns.update(self._dt_test.globs)
412 431 self._dt_test.globs = _ip.IP.user_ns
413 432
414 433 doctests.DocTestCase.setUp(self)
415 434
416 435
417 436 # A simple subclassing of the original with a different class name, so we can
418 437 # distinguish and treat differently IPython examples from pure python ones.
419 438 class IPExample(doctest.Example): pass
420 439
421 440
422 441 class IPExternalExample(doctest.Example):
423 442 """Doctest examples to be run in an external process."""
424 443
425 444 def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
426 445 options=None):
427 446 # Parent constructor
428 447 doctest.Example.__init__(self,source,want,exc_msg,lineno,indent,options)
429 448
430 449 # An EXTRA newline is needed to prevent pexpect hangs
431 450 self.source += '\n'
432 451
433 452
434 453 class IPDocTestParser(doctest.DocTestParser):
435 454 """
436 455 A class used to parse strings containing doctest examples.
437 456
438 457 Note: This is a version modified to properly recognize IPython input and
439 458 convert any IPython examples into valid Python ones.
440 459 """
441 460 # This regular expression is used to find doctest examples in a
442 461 # string. It defines three groups: `source` is the source code
443 462 # (including leading indentation and prompts); `indent` is the
444 463 # indentation of the first (PS1) line of the source code; and
445 464 # `want` is the expected output (including leading indentation).
446 465
447 466 # Classic Python prompts or default IPython ones
448 467 _PS1_PY = r'>>>'
449 468 _PS2_PY = r'\.\.\.'
450 469
451 470 _PS1_IP = r'In\ \[\d+\]:'
452 471 _PS2_IP = r'\ \ \ \.\.\.+:'
453 472
454 473 _RE_TPL = r'''
455 474 # Source consists of a PS1 line followed by zero or more PS2 lines.
456 475 (?P<source>
457 476 (?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
458 477 (?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
459 478 \n? # a newline
460 479 # Want consists of any non-blank lines that do not start with PS1.
461 480 (?P<want> (?:(?![ ]*$) # Not a blank line
462 481 (?![ ]*%s) # Not a line starting with PS1
463 482 (?![ ]*%s) # Not a line starting with PS2
464 483 .*$\n? # But any other line
465 484 )*)
466 485 '''
467 486
468 487 _EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
469 488 re.MULTILINE | re.VERBOSE)
470 489
471 490 _EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
472 491 re.MULTILINE | re.VERBOSE)
473 492
474 493 # Mark a test as being fully random. In this case, we simply append the
475 494 # random marker ('#random') to each individual example's output. This way
476 495 # we don't need to modify any other code.
477 496 _RANDOM_TEST = re.compile(r'#\s*all-random\s+')
478 497
479 498 # Mark tests to be executed in an external process - currently unsupported.
480 499 _EXTERNAL_IP = re.compile(r'#\s*ipdoctest:\s*EXTERNAL')
481 500
482 501 def ip2py(self,source):
483 502 """Convert input IPython source into valid Python."""
484 503 out = []
485 504 newline = out.append
486 505 #print 'IPSRC:\n',source,'\n###' # dbg
487 506 # The input source must be first stripped of all bracketing whitespace
488 507 # and turned into lines, so it looks to the parser like regular user
489 508 # input
490 509 for lnum,line in enumerate(source.strip().splitlines()):
491 510 newline(_ip.IP.prefilter(line,lnum>0))
492 511 newline('') # ensure a closing newline, needed by doctest
493 512 #print "PYSRC:", '\n'.join(out) # dbg
494 513 return '\n'.join(out)
495 514
496 515 def parse(self, string, name='<string>'):
497 516 """
498 517 Divide the given string into examples and intervening text,
499 518 and return them as a list of alternating Examples and strings.
500 519 Line numbers for the Examples are 0-based. The optional
501 520 argument `name` is a name identifying this string, and is only
502 521 used for error messages.
503 522 """
504 523
505 524 #print 'Parse string:\n',string # dbg
506 525
507 526 string = string.expandtabs()
508 527 # If all lines begin with the same indentation, then strip it.
509 528 min_indent = self._min_indent(string)
510 529 if min_indent > 0:
511 530 string = '\n'.join([l[min_indent:] for l in string.split('\n')])
512 531
513 532 output = []
514 533 charno, lineno = 0, 0
515 534
516 535 # We make 'all random' tests by adding the '# random' mark to every
517 536 # block of output in the test.
518 537 if self._RANDOM_TEST.search(string):
519 538 random_marker = '\n# random'
520 539 else:
521 540 random_marker = ''
522 541
523 542 # Whether to convert the input from ipython to python syntax
524 543 ip2py = False
525 544 # Find all doctest examples in the string. First, try them as Python
526 545 # examples, then as IPython ones
527 546 terms = list(self._EXAMPLE_RE_PY.finditer(string))
528 547 if terms:
529 548 # Normal Python example
530 549 #print '-'*70 # dbg
531 550 #print 'PyExample, Source:\n',string # dbg
532 551 #print '-'*70 # dbg
533 552 Example = doctest.Example
534 553 else:
535 554 # It's an ipython example. Note that IPExamples are run
536 555 # in-process, so their syntax must be turned into valid python.
537 556 # IPExternalExamples are run out-of-process (via pexpect) so they
538 557 # don't need any filtering (a real ipython will be executing them).
539 558 terms = list(self._EXAMPLE_RE_IP.finditer(string))
540 559 if self._EXTERNAL_IP.search(string):
541 560 #print '-'*70 # dbg
542 561 #print 'IPExternalExample, Source:\n',string # dbg
543 562 #print '-'*70 # dbg
544 563 Example = IPExternalExample
545 564 else:
546 565 #print '-'*70 # dbg
547 566 #print 'IPExample, Source:\n',string # dbg
548 567 #print '-'*70 # dbg
549 568 Example = IPExample
550 569 ip2py = True
551 570
552 571 for m in terms:
553 572 # Add the pre-example text to `output`.
554 573 output.append(string[charno:m.start()])
555 574 # Update lineno (lines before this example)
556 575 lineno += string.count('\n', charno, m.start())
557 576 # Extract info from the regexp match.
558 577 (source, options, want, exc_msg) = \
559 578 self._parse_example(m, name, lineno,ip2py)
560 579
561 580 # Append the random-output marker (it defaults to empty in most
562 581 # cases, it's only non-empty for 'all-random' tests):
563 582 want += random_marker
564 583
565 584 if Example is IPExternalExample:
566 585 options[doctest.NORMALIZE_WHITESPACE] = True
567 586 want += '\n'
568 587
569 588 # Create an Example, and add it to the list.
570 589 if not self._IS_BLANK_OR_COMMENT(source):
571 590 output.append(Example(source, want, exc_msg,
572 591 lineno=lineno,
573 592 indent=min_indent+len(m.group('indent')),
574 593 options=options))
575 594 # Update lineno (lines inside this example)
576 595 lineno += string.count('\n', m.start(), m.end())
577 596 # Update charno.
578 597 charno = m.end()
579 598 # Add any remaining post-example text to `output`.
580 599 output.append(string[charno:])
581 600 return output
582 601
583 602 def _parse_example(self, m, name, lineno,ip2py=False):
584 603 """
585 604 Given a regular expression match from `_EXAMPLE_RE` (`m`),
586 605 return a pair `(source, want)`, where `source` is the matched
587 606 example's source code (with prompts and indentation stripped);
588 607 and `want` is the example's expected output (with indentation
589 608 stripped).
590 609
591 610 `name` is the string's name, and `lineno` is the line number
592 611 where the example starts; both are used for error messages.
593 612
594 613 Optional:
595 614 `ip2py`: if true, filter the input via IPython to convert the syntax
596 615 into valid python.
597 616 """
598 617
599 618 # Get the example's indentation level.
600 619 indent = len(m.group('indent'))
601 620
602 621 # Divide source into lines; check that they're properly
603 622 # indented; and then strip their indentation & prompts.
604 623 source_lines = m.group('source').split('\n')
605 624
606 625 # We're using variable-length input prompts
607 626 ps1 = m.group('ps1')
608 627 ps2 = m.group('ps2')
609 628 ps1_len = len(ps1)
610 629
611 630 self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
612 631 if ps2:
613 632 self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
614 633
615 634 source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
616 635
617 636 if ip2py:
618 637 # Convert source input from IPython into valid Python syntax
619 638 source = self.ip2py(source)
620 639
621 640 # Divide want into lines; check that it's properly indented; and
622 641 # then strip the indentation. Spaces before the last newline should
623 642 # be preserved, so plain rstrip() isn't good enough.
624 643 want = m.group('want')
625 644 want_lines = want.split('\n')
626 645 if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
627 646 del want_lines[-1] # forget final newline & spaces after it
628 647 self._check_prefix(want_lines, ' '*indent, name,
629 648 lineno + len(source_lines))
630 649
631 650 # Remove ipython output prompt that might be present in the first line
632 651 want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
633 652
634 653 want = '\n'.join([wl[indent:] for wl in want_lines])
635 654
636 655 # If `want` contains a traceback message, then extract it.
637 656 m = self._EXCEPTION_RE.match(want)
638 657 if m:
639 658 exc_msg = m.group('msg')
640 659 else:
641 660 exc_msg = None
642 661
643 662 # Extract options from the source.
644 663 options = self._find_options(source, name, lineno)
645 664
646 665 return source, options, want, exc_msg
647 666
648 667 def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
649 668 """
650 669 Given the lines of a source string (including prompts and
651 670 leading indentation), check to make sure that every prompt is
652 671 followed by a space character. If any line is not followed by
653 672 a space character, then raise ValueError.
654 673
655 674 Note: IPython-modified version which takes the input prompt length as a
656 675 parameter, so that prompts of variable length can be dealt with.
657 676 """
658 677 space_idx = indent+ps1_len
659 678 min_len = space_idx+1
660 679 for i, line in enumerate(lines):
661 680 if len(line) >= min_len and line[space_idx] != ' ':
662 681 raise ValueError('line %r of the docstring for %s '
663 682 'lacks blank after %s: %r' %
664 683 (lineno+i+1, name,
665 684 line[indent:space_idx], line))
666 685
667 686
668 687 SKIP = doctest.register_optionflag('SKIP')
669 688
670 689
671 690 class IPDocTestRunner(doctest.DocTestRunner,object):
672 691 """Test runner that synchronizes the IPython namespace with test globals.
673 692 """
674 693
675 694 def run(self, test, compileflags=None, out=None, clear_globs=True):
676 695
677 696 # Hack: ipython needs access to the execution context of the example,
678 697 # so that it can propagate user variables loaded by %run into
679 698 # test.globs. We put them here into our modified %run as a function
680 699 # attribute. Our new %run will then only make the namespace update
681 700 # when called (rather than unconconditionally updating test.globs here
682 701 # for all examples, most of which won't be calling %run anyway).
683 702 _run_ns_sync.test_globs = test.globs
684 703 _run_ns_sync.test_filename = test.filename
685 704
686 705 return super(IPDocTestRunner,self).run(test,
687 706 compileflags,out,clear_globs)
688 707
689 708
690 709 class DocFileCase(doctest.DocFileCase):
691 710 """Overrides to provide filename
692 711 """
693 712 def address(self):
694 713 return (self._dt_test.filename, None, None)
695 714
696 715
697 716 class ExtensionDoctest(doctests.Doctest):
698 717 """Nose Plugin that supports doctests in extension modules.
699 718 """
700 719 name = 'extdoctest' # call nosetests with --with-extdoctest
701 720 enabled = True
702 721
703 722 def __init__(self,exclude_patterns=None):
704 723 """Create a new ExtensionDoctest plugin.
705 724
706 725 Parameters
707 726 ----------
708 727
709 728 exclude_patterns : sequence of strings, optional
710 729 These patterns are compiled as regular expressions, subsequently used
711 730 to exclude any filename which matches them from inclusion in the test
712 731 suite (using pattern.search(), NOT pattern.match() ).
713 732 """
714 733
715 734 if exclude_patterns is None:
716 735 exclude_patterns = []
717 736 self.exclude_patterns = map(re.compile,exclude_patterns)
718 737 doctests.Doctest.__init__(self)
719 738
720 739 def options(self, parser, env=os.environ):
721 740 Plugin.options(self, parser, env)
722 741 parser.add_option('--doctest-tests', action='store_true',
723 742 dest='doctest_tests',
724 743 default=env.get('NOSE_DOCTEST_TESTS',True),
725 744 help="Also look for doctests in test modules. "
726 745 "Note that classes, methods and functions should "
727 746 "have either doctests or non-doctest tests, "
728 747 "not both. [NOSE_DOCTEST_TESTS]")
729 748 parser.add_option('--doctest-extension', action="append",
730 749 dest="doctestExtension",
731 750 help="Also look for doctests in files with "
732 751 "this extension [NOSE_DOCTEST_EXTENSION]")
733 752 # Set the default as a list, if given in env; otherwise
734 753 # an additional value set on the command line will cause
735 754 # an error.
736 755 env_setting = env.get('NOSE_DOCTEST_EXTENSION')
737 756 if env_setting is not None:
738 757 parser.set_defaults(doctestExtension=tolist(env_setting))
739 758
740 759
741 760 def configure(self, options, config):
742 761 Plugin.configure(self, options, config)
743 762 self.doctest_tests = options.doctest_tests
744 763 self.extension = tolist(options.doctestExtension)
745 764
746 765 self.parser = doctest.DocTestParser()
747 766 self.finder = DocTestFinder()
748 767 self.checker = IPDoctestOutputChecker()
749 768 self.globs = None
750 769 self.extraglobs = None
751 770
752 771
753 772 def loadTestsFromExtensionModule(self,filename):
754 773 bpath,mod = os.path.split(filename)
755 774 modname = os.path.splitext(mod)[0]
756 775 try:
757 776 sys.path.append(bpath)
758 777 module = __import__(modname)
759 778 tests = list(self.loadTestsFromModule(module))
760 779 finally:
761 780 sys.path.pop()
762 781 return tests
763 782
764 783 # NOTE: the method below is almost a copy of the original one in nose, with
765 784 # a few modifications to control output checking.
766 785
767 786 def loadTestsFromModule(self, module):
768 787 #print '*** ipdoctest - lTM',module # dbg
769 788
770 789 if not self.matches(module.__name__):
771 790 log.debug("Doctest doesn't want module %s", module)
772 791 return
773 792
774 793 tests = self.finder.find(module,globs=self.globs,
775 794 extraglobs=self.extraglobs)
776 795 if not tests:
777 796 return
778 797
779 798 # always use whitespace and ellipsis options
780 799 optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
781 800
782 801 tests.sort()
783 802 module_file = module.__file__
784 803 if module_file[-4:] in ('.pyc', '.pyo'):
785 804 module_file = module_file[:-1]
786 805 for test in tests:
787 806 if not test.examples:
788 807 continue
789 808 if not test.filename:
790 809 test.filename = module_file
791 810
792 811 yield DocTestCase(test,
793 812 optionflags=optionflags,
794 813 checker=self.checker)
795 814
796 815
797 816 def loadTestsFromFile(self, filename):
798 817 if is_extension_module(filename):
799 818 for t in self.loadTestsFromExtensionModule(filename):
800 819 yield t
801 820 else:
802 821 if self.extension and anyp(filename.endswith, self.extension):
803 822 name = os.path.basename(filename)
804 823 dh = open(filename)
805 824 try:
806 825 doc = dh.read()
807 826 finally:
808 827 dh.close()
809 828 test = self.parser.get_doctest(
810 829 doc, globs={'__file__': filename}, name=name,
811 830 filename=filename, lineno=0)
812 831 if test.examples:
813 832 #print 'FileCase:',test.examples # dbg
814 833 yield DocFileCase(test)
815 834 else:
816 835 yield False # no tests to load
817 836
818 837 def wantFile(self,filename):
819 838 """Return whether the given filename should be scanned for tests.
820 839
821 840 Modified version that accepts extension modules as valid containers for
822 841 doctests.
823 842 """
824 843 #print '*** ipdoctest- wantFile:',filename # dbg
825 844
826 845 for pat in self.exclude_patterns:
827 846 if pat.search(filename):
828 847 #print '###>>> SKIP:',filename # dbg
829 848 return False
830 849
831 850 if is_extension_module(filename):
832 851 return True
833 852 else:
834 853 return doctests.Doctest.wantFile(self,filename)
835 854
836 855
837 856 class IPythonDoctest(ExtensionDoctest):
838 857 """Nose Plugin that supports doctests in extension modules.
839 858 """
840 859 name = 'ipdoctest' # call nosetests with --with-ipdoctest
841 860 enabled = True
842 861
843 862 def makeTest(self, obj, parent):
844 863 """Look for doctests in the given object, which will be a
845 864 function, method or class.
846 865 """
847 866 # always use whitespace and ellipsis options
848 867 optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
849 868
850 869 doctests = self.finder.find(obj, module=getmodule(parent))
851 870 if doctests:
852 871 for test in doctests:
853 872 if len(test.examples) == 0:
854 873 continue
855 874
856 875 yield DocTestCase(test, obj=obj,
857 876 optionflags=optionflags,
858 877 checker=self.checker)
859 878
860 879 def options(self, parser, env=os.environ):
861 880 Plugin.options(self, parser, env)
862 881 parser.add_option('--ipdoctest-tests', action='store_true',
863 882 dest='ipdoctest_tests',
864 883 default=env.get('NOSE_IPDOCTEST_TESTS',True),
865 884 help="Also look for doctests in test modules. "
866 885 "Note that classes, methods and functions should "
867 886 "have either doctests or non-doctest tests, "
868 887 "not both. [NOSE_IPDOCTEST_TESTS]")
869 888 parser.add_option('--ipdoctest-extension', action="append",
870 889 dest="ipdoctest_extension",
871 890 help="Also look for doctests in files with "
872 891 "this extension [NOSE_IPDOCTEST_EXTENSION]")
873 892 # Set the default as a list, if given in env; otherwise
874 893 # an additional value set on the command line will cause
875 894 # an error.
876 895 env_setting = env.get('NOSE_IPDOCTEST_EXTENSION')
877 896 if env_setting is not None:
878 897 parser.set_defaults(ipdoctest_extension=tolist(env_setting))
879 898
880 899 def configure(self, options, config):
881 900 Plugin.configure(self, options, config)
882 901 self.doctest_tests = options.ipdoctest_tests
883 902 self.extension = tolist(options.ipdoctest_extension)
884 903
885 904 self.parser = IPDocTestParser()
886 905 self.finder = DocTestFinder(parser=self.parser)
887 906 self.checker = IPDoctestOutputChecker()
888 907 self.globs = None
889 908 self.extraglobs = None
@@ -1,151 +1,235 b''
1 1 """Tests for various magic functions.
2 2
3 3 Needs to be run by nose (to make ipython session available).
4 4 """
5 5
6 6 # Standard library imports
7 7 import os
8 8 import sys
9 import tempfile
10 import types
9 11
10 12 # Third-party imports
11 13 import nose.tools as nt
12 14
13 15 # From our own code
14 16 from IPython.testing import decorators as dec
17 from IPython.testing import tools as tt
15 18
16 19 #-----------------------------------------------------------------------------
17 20 # Test functions begin
18 21
19 22 def test_rehashx():
20 23 # clear up everything
21 24 _ip.IP.alias_table.clear()
22 25 del _ip.db['syscmdlist']
23 26
24 27 _ip.magic('rehashx')
25 28 # Practically ALL ipython development systems will have more than 10 aliases
26 29
27 30 assert len(_ip.IP.alias_table) > 10
28 31 for key, val in _ip.IP.alias_table.items():
29 32 # we must strip dots from alias names
30 33 assert '.' not in key
31 34
32 35 # rehashx must fill up syscmdlist
33 36 scoms = _ip.db['syscmdlist']
34 37 assert len(scoms) > 10
35 38
36 39
37 def doctest_run_ns():
38 """Classes declared %run scripts must be instantiable afterwards.
39
40 In [11]: run tclass foo
41
42 In [12]: isinstance(f(),foo)
43 Out[12]: True
44 """
45
46
47 def doctest_run_ns2():
48 """Classes declared %run scripts must be instantiable afterwards.
49
50 In [4]: run tclass C-first_pass
51
52 In [5]: run tclass C-second_pass
53 tclass.py: deleting object: C-first_pass
54 """
55
56
57 40 def doctest_hist_f():
58 41 """Test %hist -f with temporary filename.
59 42
60 43 In [9]: import tempfile
61 44
62 45 In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
63 46
64 47 In [11]: %history -n -f $tfile 3
65 48 """
66 49
67 50
68 51 def doctest_hist_r():
69 52 """Test %hist -r
70 53
71 54 XXX - This test is not recording the output correctly. Not sure why...
72 55
73 56 In [6]: x=1
74 57
75 58 In [7]: hist -n -r 2
76 59 x=1 # random
77 60 hist -n -r 2 # random
78 61 """
79 62
80 63
81 64 def test_obj_del():
82 65 """Test that object's __del__ methods are called on exit."""
83 66 test_dir = os.path.dirname(__file__)
84 67 del_file = os.path.join(test_dir,'obj_del.py')
85 68 out = _ip.IP.getoutput('ipython %s' % del_file)
86 69 nt.assert_equals(out,'obj_del.py: object A deleted')
87 70
88 71
89 72 def test_shist():
90 73 # Simple tests of ShadowHist class - test generator.
91 74 import os, shutil, tempfile
92 75
93 76 from IPython.Extensions import pickleshare
94 77 from IPython.history import ShadowHist
95 78
96 79 tfile = tempfile.mktemp('','tmp-ipython-')
97 80
98 81 db = pickleshare.PickleShareDB(tfile)
99 82 s = ShadowHist(db)
100 83 s.add('hello')
101 84 s.add('world')
102 85 s.add('hello')
103 86 s.add('hello')
104 87 s.add('karhu')
105 88
106 89 yield nt.assert_equals,s.all(),[(1, 'hello'), (2, 'world'), (3, 'karhu')]
107 90
108 91 yield nt.assert_equal,s.get(2),'world'
109 92
110 93 shutil.rmtree(tfile)
111 94
112 95 @dec.skipif_not_numpy
113 96 def test_numpy_clear_array_undec():
114 97 _ip.ex('import numpy as np')
115 98 _ip.ex('a = np.empty(2)')
116 99
117 100 yield nt.assert_true,'a' in _ip.user_ns
118 101 _ip.magic('clear array')
119 102 yield nt.assert_false,'a' in _ip.user_ns
120 103
121 104
122 105 @dec.skip()
123 106 def test_fail_dec(*a,**k):
124 107 yield nt.assert_true, False
125 108
126 109 @dec.skip('This one shouldn not run')
127 110 def test_fail_dec2(*a,**k):
128 111 yield nt.assert_true, False
129 112
130 113 @dec.skipknownfailure
131 114 def test_fail_dec3(*a,**k):
132 115 yield nt.assert_true, False
133 116
134 117
135 118 def doctest_refbug():
136 119 """Very nasty problem with references held by multiple runs of a script.
137 120 See: https://bugs.launchpad.net/ipython/+bug/269966
138 121
139 122 In [1]: _ip.IP.clear_main_mod_cache()
140 123
141 124 In [2]: run refbug
142 125
143 126 In [3]: call_f()
144 127 lowercased: hello
145 128
146 129 In [4]: run refbug
147 130
148 131 In [5]: call_f()
149 132 lowercased: hello
150 133 lowercased: hello
151 134 """
135
136 #-----------------------------------------------------------------------------
137 # Tests for %run
138 #-----------------------------------------------------------------------------
139
140 # %run is critical enough that it's a good idea to have a solid collection of
141 # tests for it, some as doctests and some as normal tests.
142
143 def doctest_run_ns():
144 """Classes declared %run scripts must be instantiable afterwards.
145
146 In [11]: run tclass foo
147
148 In [12]: isinstance(f(),foo)
149 Out[12]: True
150 """
151
152
153 def doctest_run_ns2():
154 """Classes declared %run scripts must be instantiable afterwards.
155
156 In [4]: run tclass C-first_pass
157
158 In [5]: run tclass C-second_pass
159 tclass.py: deleting object: C-first_pass
160 """
161
162 def doctest_run_builtins():
163 """Check that %run doesn't damage __builtins__ via a doctest.
164
165 This is similar to the test_run_builtins, but I want *both* forms of the
166 test to catch any possible glitches in our testing machinery, since that
167 modifies %run somewhat. So for this, we have both a normal test (below)
168 and a doctest (this one).
169
170 In [1]: import tempfile
171
172 In [2]: bid1 = id(__builtins__)
173
174 In [3]: f = tempfile.NamedTemporaryFile()
175
176 In [4]: f.write('pass\\n')
177
178 In [5]: f.flush()
179
180 In [6]: print 'B1:',type(__builtins__)
181 B1: <type 'module'>
182
183 In [7]: %run $f.name
184
185 In [8]: bid2 = id(__builtins__)
186
187 In [9]: print 'B2:',type(__builtins__)
188 B2: <type 'module'>
189
190 In [10]: bid1 == bid2
191 Out[10]: True
192 """
193
194 # For some tests, it will be handy to organize them in a class with a common
195 # setup that makes a temp file
196
197 class TestMagicRun(object):
198
199 def setup(self):
200 """Make a valid python temp file."""
201 f = tempfile.NamedTemporaryFile()
202 f.write('pass\n')
203 f.flush()
204 self.tmpfile = f
205
206 def run_tmpfile(self):
207 _ip.magic('run %s' % self.tmpfile.name)
208
209 def test_builtins_id(self):
210 """Check that %run doesn't damage __builtins__ """
211
212 # Test that the id of __builtins__ is not modified by %run
213 bid1 = id(_ip.user_ns['__builtins__'])
214 self.run_tmpfile()
215 bid2 = id(_ip.user_ns['__builtins__'])
216 tt.assert_equals(bid1, bid2)
217
218 def test_builtins_type(self):
219 """Check that the type of __builtins__ doesn't change with %run.
220
221 However, the above could pass if __builtins__ was already modified to
222 be a dict (it should be a module) by a previous use of %run. So we
223 also check explicitly that it really is a module:
224 """
225 self.run_tmpfile()
226 tt.assert_equals(type(_ip.user_ns['__builtins__']),type(sys))
227
228 def test_prompts(self):
229 """Test that prompts correctly generate after %run"""
230 self.run_tmpfile()
231 p2 = str(_ip.IP.outputcache.prompt2).strip()
232 nt.assert_equals(p2[:3], '...')
233
234 def teardown(self):
235 self.tmpfile.close()
General Comments 0
You need to be logged in to leave comments. Login now