##// END OF EJS Templates
Merge pull request #1870 from minrk/captureio...
Fernando Perez -
r7411:024e3846 merge
parent child Browse files
Show More
@@ -0,0 +1,182 b''
1 {
2 "metadata": {
3 "name": "Capturing Output"
4 },
5 "nbformat": 3,
6 "worksheets": [
7 {
8 "cells": [
9 {
10 "cell_type": "heading",
11 "level": 1,
12 "source": [
13 "Capturing Output with <tt>%%capture</tt>"
14 ]
15 },
16 {
17 "cell_type": "markdown",
18 "source": [
19 "One of IPython's new cell magics is `%%capture`, which captures stdout/err for a cell,",
20 "and discards them or stores them in variables in your namespace."
21 ]
22 },
23 {
24 "cell_type": "code",
25 "input": [
26 "import sys"
27 ],
28 "language": "python",
29 "outputs": []
30 },
31 {
32 "cell_type": "markdown",
33 "source": [
34 "By default, it just swallows it up. This is a simple way to suppress unwanted output."
35 ]
36 },
37 {
38 "cell_type": "code",
39 "input": [
40 "%%capture",
41 "print 'hi, stdout'",
42 "print >> sys.stderr, 'hi, stderr'"
43 ],
44 "language": "python",
45 "outputs": []
46 },
47 {
48 "cell_type": "markdown",
49 "source": [
50 "If you specify a name, then stdout and stderr will be stored in an object in your namespace."
51 ]
52 },
53 {
54 "cell_type": "code",
55 "input": [
56 "%%capture captured",
57 "print 'hi, stdout'",
58 "print >> sys.stderr, 'hi, stderr'"
59 ],
60 "language": "python",
61 "outputs": []
62 },
63 {
64 "cell_type": "code",
65 "input": [
66 "captured"
67 ],
68 "language": "python",
69 "outputs": []
70 },
71 {
72 "cell_type": "markdown",
73 "source": [
74 "Calling the object writes the output to stdout/err as appropriate."
75 ]
76 },
77 {
78 "cell_type": "code",
79 "input": [
80 "captured()"
81 ],
82 "language": "python",
83 "outputs": []
84 },
85 {
86 "cell_type": "code",
87 "input": [
88 "captured.stdout"
89 ],
90 "language": "python",
91 "outputs": []
92 },
93 {
94 "cell_type": "code",
95 "input": [
96 "captured.stderr"
97 ],
98 "language": "python",
99 "outputs": []
100 },
101 {
102 "cell_type": "markdown",
103 "source": [
104 "`%%capture` only captures stdout/err, not displaypub, so you can still do plots and use the display protocol inside %%capture"
105 ]
106 },
107 {
108 "cell_type": "code",
109 "input": [
110 "%pylab inline"
111 ],
112 "language": "python",
113 "outputs": []
114 },
115 {
116 "cell_type": "code",
117 "input": [
118 "%%capture wontshutup",
119 "",
120 "print \"setting up X\"",
121 "x = np.linspace(0,5,1000)",
122 "print \"step 2: constructing y-data\"",
123 "y = np.sin(x)",
124 "print \"step 3: display info about y\"",
125 "plt.plot(x,y)",
126 "print \"okay, I'm done now\""
127 ],
128 "language": "python",
129 "outputs": []
130 },
131 {
132 "cell_type": "code",
133 "input": [
134 "wontshutup()"
135 ],
136 "language": "python",
137 "outputs": []
138 },
139 {
140 "cell_type": "markdown",
141 "source": [
142 "And you can selectively disable capturing stdout or stderr by passing `--no-stdout/err`."
143 ]
144 },
145 {
146 "cell_type": "code",
147 "input": [
148 "%%capture cap --no-stderr",
149 "print 'hi, stdout'",
150 "print >> sys.stderr, \"hello, stderr\""
151 ],
152 "language": "python",
153 "outputs": []
154 },
155 {
156 "cell_type": "code",
157 "input": [
158 "cap.stdout"
159 ],
160 "language": "python",
161 "outputs": []
162 },
163 {
164 "cell_type": "code",
165 "input": [
166 "cap.stderr"
167 ],
168 "language": "python",
169 "outputs": []
170 },
171 {
172 "cell_type": "code",
173 "input": [
174 ""
175 ],
176 "language": "python",
177 "outputs": []
178 }
179 ]
180 }
181 ]
182 } No newline at end of file
@@ -1,990 +1,1022 b''
1 1 """Implementation of execution-related magic functions.
2 2 """
3 3 #-----------------------------------------------------------------------------
4 4 # Copyright (c) 2012 The IPython Development Team.
5 5 #
6 6 # Distributed under the terms of the Modified BSD License.
7 7 #
8 8 # The full license is in the file COPYING.txt, distributed with this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 15 # Stdlib
16 16 import __builtin__ as builtin_mod
17 17 import bdb
18 18 import os
19 19 import sys
20 20 import time
21 21 from StringIO import StringIO
22 22
23 23 # cProfile was added in Python2.5
24 24 try:
25 25 import cProfile as profile
26 26 import pstats
27 27 except ImportError:
28 28 # profile isn't bundled by default in Debian for license reasons
29 29 try:
30 30 import profile, pstats
31 31 except ImportError:
32 32 profile = pstats = None
33 33
34 34 # Our own packages
35 35 from IPython.core import debugger, oinspect
36 from IPython.core import magic_arguments
36 37 from IPython.core import page
37 38 from IPython.core.error import UsageError
38 39 from IPython.core.macro import Macro
39 from IPython.core.magic import (Magics, magics_class, line_magic,
40 from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
40 41 line_cell_magic, on_off, needs_local_scope)
41 42 from IPython.testing.skipdoctest import skip_doctest
42 43 from IPython.utils import py3compat
44 from IPython.utils.io import capture_output
43 45 from IPython.utils.ipstruct import Struct
44 46 from IPython.utils.module_paths import find_mod
45 47 from IPython.utils.path import get_py_filename, unquote_filename
46 48 from IPython.utils.timing import clock, clock2
47 49 from IPython.utils.warn import warn, error
48 50
49 51 #-----------------------------------------------------------------------------
50 52 # Magic implementation classes
51 53 #-----------------------------------------------------------------------------
52 54
53 55 @magics_class
54 56 class ExecutionMagics(Magics):
55 57 """Magics related to code execution, debugging, profiling, etc.
56 58
57 59 """
58 60
59 61 def __init__(self, shell):
60 62 super(ExecutionMagics, self).__init__(shell)
61 63 if profile is None:
62 64 self.prun = self.profile_missing_notice
63 65 # Default execution function used to actually run user code.
64 66 self.default_runner = None
65 67
66 68 def profile_missing_notice(self, *args, **kwargs):
67 69 error("""\
68 70 The profile module could not be found. It has been removed from the standard
69 71 python packages because of its non-free license. To use profiling, install the
70 72 python-profiler package from non-free.""")
71 73
72 74 @skip_doctest
73 75 @line_cell_magic
74 76 def prun(self, parameter_s='', cell=None, user_mode=True,
75 77 opts=None,arg_lst=None,prog_ns=None):
76 78
77 79 """Run a statement through the python code profiler.
78 80
79 81 Usage, in line mode:
80 82 %prun [options] statement
81 83
82 84 Usage, in cell mode:
83 85 %%prun [options] [statement]
84 86 code...
85 87 code...
86 88
87 89 In cell mode, the additional code lines are appended to the (possibly
88 90 empty) statement in the first line. Cell mode allows you to easily
89 91 profile multiline blocks without having to put them in a separate
90 92 function.
91 93
92 94 The given statement (which doesn't require quote marks) is run via the
93 95 python profiler in a manner similar to the profile.run() function.
94 96 Namespaces are internally managed to work correctly; profile.run
95 97 cannot be used in IPython because it makes certain assumptions about
96 98 namespaces which do not hold under IPython.
97 99
98 100 Options:
99 101
100 102 -l <limit>: you can place restrictions on what or how much of the
101 103 profile gets printed. The limit value can be:
102 104
103 105 * A string: only information for function names containing this string
104 106 is printed.
105 107
106 108 * An integer: only these many lines are printed.
107 109
108 110 * A float (between 0 and 1): this fraction of the report is printed
109 111 (for example, use a limit of 0.4 to see the topmost 40% only).
110 112
111 113 You can combine several limits with repeated use of the option. For
112 114 example, '-l __init__ -l 5' will print only the topmost 5 lines of
113 115 information about class constructors.
114 116
115 117 -r: return the pstats.Stats object generated by the profiling. This
116 118 object has all the information about the profile in it, and you can
117 119 later use it for further analysis or in other functions.
118 120
119 121 -s <key>: sort profile by given key. You can provide more than one key
120 122 by using the option several times: '-s key1 -s key2 -s key3...'. The
121 123 default sorting key is 'time'.
122 124
123 125 The following is copied verbatim from the profile documentation
124 126 referenced below:
125 127
126 128 When more than one key is provided, additional keys are used as
127 129 secondary criteria when the there is equality in all keys selected
128 130 before them.
129 131
130 132 Abbreviations can be used for any key names, as long as the
131 133 abbreviation is unambiguous. The following are the keys currently
132 134 defined:
133 135
134 136 Valid Arg Meaning
135 137 "calls" call count
136 138 "cumulative" cumulative time
137 139 "file" file name
138 140 "module" file name
139 141 "pcalls" primitive call count
140 142 "line" line number
141 143 "name" function name
142 144 "nfl" name/file/line
143 145 "stdname" standard name
144 146 "time" internal time
145 147
146 148 Note that all sorts on statistics are in descending order (placing
147 149 most time consuming items first), where as name, file, and line number
148 150 searches are in ascending order (i.e., alphabetical). The subtle
149 151 distinction between "nfl" and "stdname" is that the standard name is a
150 152 sort of the name as printed, which means that the embedded line
151 153 numbers get compared in an odd way. For example, lines 3, 20, and 40
152 154 would (if the file names were the same) appear in the string order
153 155 "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
154 156 line numbers. In fact, sort_stats("nfl") is the same as
155 157 sort_stats("name", "file", "line").
156 158
157 159 -T <filename>: save profile results as shown on screen to a text
158 160 file. The profile is still shown on screen.
159 161
160 162 -D <filename>: save (via dump_stats) profile statistics to given
161 163 filename. This data is in a format understood by the pstats module, and
162 164 is generated by a call to the dump_stats() method of profile
163 165 objects. The profile is still shown on screen.
164 166
165 167 -q: suppress output to the pager. Best used with -T and/or -D above.
166 168
167 169 If you want to run complete programs under the profiler's control, use
168 170 '%run -p [prof_opts] filename.py [args to program]' where prof_opts
169 171 contains profiler specific options as described here.
170 172
171 173 You can read the complete documentation for the profile module with::
172 174
173 175 In [1]: import profile; profile.help()
174 176 """
175 177
176 178 opts_def = Struct(D=[''],l=[],s=['time'],T=[''])
177 179
178 180 if user_mode: # regular user call
179 181 opts,arg_str = self.parse_options(parameter_s,'D:l:rs:T:q',
180 182 list_all=True, posix=False)
181 183 namespace = self.shell.user_ns
182 184 if cell is not None:
183 185 arg_str += '\n' + cell
184 186 else: # called to run a program by %run -p
185 187 try:
186 188 filename = get_py_filename(arg_lst[0])
187 189 except IOError as e:
188 190 try:
189 191 msg = str(e)
190 192 except UnicodeError:
191 193 msg = e.message
192 194 error(msg)
193 195 return
194 196
195 197 arg_str = 'execfile(filename,prog_ns)'
196 198 namespace = {
197 199 'execfile': self.shell.safe_execfile,
198 200 'prog_ns': prog_ns,
199 201 'filename': filename
200 202 }
201 203
202 204 opts.merge(opts_def)
203 205
204 206 prof = profile.Profile()
205 207 try:
206 208 prof = prof.runctx(arg_str,namespace,namespace)
207 209 sys_exit = ''
208 210 except SystemExit:
209 211 sys_exit = """*** SystemExit exception caught in code being profiled."""
210 212
211 213 stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
212 214
213 215 lims = opts.l
214 216 if lims:
215 217 lims = [] # rebuild lims with ints/floats/strings
216 218 for lim in opts.l:
217 219 try:
218 220 lims.append(int(lim))
219 221 except ValueError:
220 222 try:
221 223 lims.append(float(lim))
222 224 except ValueError:
223 225 lims.append(lim)
224 226
225 227 # Trap output.
226 228 stdout_trap = StringIO()
227 229
228 230 if hasattr(stats,'stream'):
229 231 # In newer versions of python, the stats object has a 'stream'
230 232 # attribute to write into.
231 233 stats.stream = stdout_trap
232 234 stats.print_stats(*lims)
233 235 else:
234 236 # For older versions, we manually redirect stdout during printing
235 237 sys_stdout = sys.stdout
236 238 try:
237 239 sys.stdout = stdout_trap
238 240 stats.print_stats(*lims)
239 241 finally:
240 242 sys.stdout = sys_stdout
241 243
242 244 output = stdout_trap.getvalue()
243 245 output = output.rstrip()
244 246
245 247 if 'q' not in opts:
246 248 page.page(output)
247 249 print sys_exit,
248 250
249 251 dump_file = opts.D[0]
250 252 text_file = opts.T[0]
251 253 if dump_file:
252 254 dump_file = unquote_filename(dump_file)
253 255 prof.dump_stats(dump_file)
254 256 print '\n*** Profile stats marshalled to file',\
255 257 `dump_file`+'.',sys_exit
256 258 if text_file:
257 259 text_file = unquote_filename(text_file)
258 260 pfile = open(text_file,'w')
259 261 pfile.write(output)
260 262 pfile.close()
261 263 print '\n*** Profile printout saved to text file',\
262 264 `text_file`+'.',sys_exit
263 265
264 266 if opts.has_key('r'):
265 267 return stats
266 268 else:
267 269 return None
268 270
269 271 @line_magic
270 272 def pdb(self, parameter_s=''):
271 273 """Control the automatic calling of the pdb interactive debugger.
272 274
273 275 Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
274 276 argument it works as a toggle.
275 277
276 278 When an exception is triggered, IPython can optionally call the
277 279 interactive pdb debugger after the traceback printout. %pdb toggles
278 280 this feature on and off.
279 281
280 282 The initial state of this feature is set in your configuration
281 283 file (the option is ``InteractiveShell.pdb``).
282 284
283 285 If you want to just activate the debugger AFTER an exception has fired,
284 286 without having to type '%pdb on' and rerunning your code, you can use
285 287 the %debug magic."""
286 288
287 289 par = parameter_s.strip().lower()
288 290
289 291 if par:
290 292 try:
291 293 new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
292 294 except KeyError:
293 295 print ('Incorrect argument. Use on/1, off/0, '
294 296 'or nothing for a toggle.')
295 297 return
296 298 else:
297 299 # toggle
298 300 new_pdb = not self.shell.call_pdb
299 301
300 302 # set on the shell
301 303 self.shell.call_pdb = new_pdb
302 304 print 'Automatic pdb calling has been turned',on_off(new_pdb)
303 305
304 306 @line_magic
305 307 def debug(self, parameter_s=''):
306 308 """Activate the interactive debugger in post-mortem mode.
307 309
308 310 If an exception has just occurred, this lets you inspect its stack
309 311 frames interactively. Note that this will always work only on the last
310 312 traceback that occurred, so you must call this quickly after an
311 313 exception that you wish to inspect has fired, because if another one
312 314 occurs, it clobbers the previous one.
313 315
314 316 If you want IPython to automatically do this on every exception, see
315 317 the %pdb magic for more details.
316 318 """
317 319 self.shell.debugger(force=True)
318 320
319 321 @line_magic
320 322 def tb(self, s):
321 323 """Print the last traceback with the currently active exception mode.
322 324
323 325 See %xmode for changing exception reporting modes."""
324 326 self.shell.showtraceback()
325 327
326 328 @skip_doctest
327 329 @line_magic
328 330 def run(self, parameter_s='', runner=None,
329 331 file_finder=get_py_filename):
330 332 """Run the named file inside IPython as a program.
331 333
332 334 Usage:\\
333 335 %run [-n -i -t [-N<N>] -d [-b<N>] -p [profile options]] file [args]
334 336
335 337 Parameters after the filename are passed as command-line arguments to
336 338 the program (put in sys.argv). Then, control returns to IPython's
337 339 prompt.
338 340
339 341 This is similar to running at a system prompt:\\
340 342 $ python file args\\
341 343 but with the advantage of giving you IPython's tracebacks, and of
342 344 loading all variables into your interactive namespace for further use
343 345 (unless -p is used, see below).
344 346
345 347 The file is executed in a namespace initially consisting only of
346 348 __name__=='__main__' and sys.argv constructed as indicated. It thus
347 349 sees its environment as if it were being run as a stand-alone program
348 350 (except for sharing global objects such as previously imported
349 351 modules). But after execution, the IPython interactive namespace gets
350 352 updated with all variables defined in the program (except for __name__
351 353 and sys.argv). This allows for very convenient loading of code for
352 354 interactive work, while giving each program a 'clean sheet' to run in.
353 355
354 356 Options:
355 357
356 358 -n: __name__ is NOT set to '__main__', but to the running file's name
357 359 without extension (as python does under import). This allows running
358 360 scripts and reloading the definitions in them without calling code
359 361 protected by an ' if __name__ == "__main__" ' clause.
360 362
361 363 -i: run the file in IPython's namespace instead of an empty one. This
362 364 is useful if you are experimenting with code written in a text editor
363 365 which depends on variables defined interactively.
364 366
365 367 -e: ignore sys.exit() calls or SystemExit exceptions in the script
366 368 being run. This is particularly useful if IPython is being used to
367 369 run unittests, which always exit with a sys.exit() call. In such
368 370 cases you are interested in the output of the test results, not in
369 371 seeing a traceback of the unittest module.
370 372
371 373 -t: print timing information at the end of the run. IPython will give
372 374 you an estimated CPU time consumption for your script, which under
373 375 Unix uses the resource module to avoid the wraparound problems of
374 376 time.clock(). Under Unix, an estimate of time spent on system tasks
375 377 is also given (for Windows platforms this is reported as 0.0).
376 378
377 379 If -t is given, an additional -N<N> option can be given, where <N>
378 380 must be an integer indicating how many times you want the script to
379 381 run. The final timing report will include total and per run results.
380 382
381 383 For example (testing the script uniq_stable.py)::
382 384
383 385 In [1]: run -t uniq_stable
384 386
385 387 IPython CPU timings (estimated):\\
386 388 User : 0.19597 s.\\
387 389 System: 0.0 s.\\
388 390
389 391 In [2]: run -t -N5 uniq_stable
390 392
391 393 IPython CPU timings (estimated):\\
392 394 Total runs performed: 5\\
393 395 Times : Total Per run\\
394 396 User : 0.910862 s, 0.1821724 s.\\
395 397 System: 0.0 s, 0.0 s.
396 398
397 399 -d: run your program under the control of pdb, the Python debugger.
398 400 This allows you to execute your program step by step, watch variables,
399 401 etc. Internally, what IPython does is similar to calling:
400 402
401 403 pdb.run('execfile("YOURFILENAME")')
402 404
403 405 with a breakpoint set on line 1 of your file. You can change the line
404 406 number for this automatic breakpoint to be <N> by using the -bN option
405 407 (where N must be an integer). For example::
406 408
407 409 %run -d -b40 myscript
408 410
409 411 will set the first breakpoint at line 40 in myscript.py. Note that
410 412 the first breakpoint must be set on a line which actually does
411 413 something (not a comment or docstring) for it to stop execution.
412 414
413 415 When the pdb debugger starts, you will see a (Pdb) prompt. You must
414 416 first enter 'c' (without quotes) to start execution up to the first
415 417 breakpoint.
416 418
417 419 Entering 'help' gives information about the use of the debugger. You
418 420 can easily see pdb's full documentation with "import pdb;pdb.help()"
419 421 at a prompt.
420 422
421 423 -p: run program under the control of the Python profiler module (which
422 424 prints a detailed report of execution times, function calls, etc).
423 425
424 426 You can pass other options after -p which affect the behavior of the
425 427 profiler itself. See the docs for %prun for details.
426 428
427 429 In this mode, the program's variables do NOT propagate back to the
428 430 IPython interactive namespace (because they remain in the namespace
429 431 where the profiler executes them).
430 432
431 433 Internally this triggers a call to %prun, see its documentation for
432 434 details on the options available specifically for profiling.
433 435
434 436 There is one special usage for which the text above doesn't apply:
435 437 if the filename ends with .ipy, the file is run as ipython script,
436 438 just as if the commands were written on IPython prompt.
437 439
438 440 -m: specify module name to load instead of script path. Similar to
439 441 the -m option for the python interpreter. Use this option last if you
440 442 want to combine with other %run options. Unlike the python interpreter
441 443 only source modules are allowed no .pyc or .pyo files.
442 444 For example::
443 445
444 446 %run -m example
445 447
446 448 will run the example module.
447 449
448 450 """
449 451
450 452 # get arguments and set sys.argv for program to be run.
451 453 opts, arg_lst = self.parse_options(parameter_s, 'nidtN:b:pD:l:rs:T:em:',
452 454 mode='list', list_all=1)
453 455 if "m" in opts:
454 456 modulename = opts["m"][0]
455 457 modpath = find_mod(modulename)
456 458 if modpath is None:
457 459 warn('%r is not a valid modulename on sys.path'%modulename)
458 460 return
459 461 arg_lst = [modpath] + arg_lst
460 462 try:
461 463 filename = file_finder(arg_lst[0])
462 464 except IndexError:
463 465 warn('you must provide at least a filename.')
464 466 print '\n%run:\n', oinspect.getdoc(self.run)
465 467 return
466 468 except IOError as e:
467 469 try:
468 470 msg = str(e)
469 471 except UnicodeError:
470 472 msg = e.message
471 473 error(msg)
472 474 return
473 475
474 476 if filename.lower().endswith('.ipy'):
475 477 self.shell.safe_execfile_ipy(filename)
476 478 return
477 479
478 480 # Control the response to exit() calls made by the script being run
479 481 exit_ignore = 'e' in opts
480 482
481 483 # Make sure that the running script gets a proper sys.argv as if it
482 484 # were run from a system shell.
483 485 save_argv = sys.argv # save it for later restoring
484 486
485 487 # simulate shell expansion on arguments, at least tilde expansion
486 488 args = [ os.path.expanduser(a) for a in arg_lst[1:] ]
487 489
488 490 sys.argv = [filename] + args # put in the proper filename
489 491 # protect sys.argv from potential unicode strings on Python 2:
490 492 if not py3compat.PY3:
491 493 sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
492 494
493 495 if 'i' in opts:
494 496 # Run in user's interactive namespace
495 497 prog_ns = self.shell.user_ns
496 498 __name__save = self.shell.user_ns['__name__']
497 499 prog_ns['__name__'] = '__main__'
498 500 main_mod = self.shell.new_main_mod(prog_ns)
499 501 else:
500 502 # Run in a fresh, empty namespace
501 503 if 'n' in opts:
502 504 name = os.path.splitext(os.path.basename(filename))[0]
503 505 else:
504 506 name = '__main__'
505 507
506 508 main_mod = self.shell.new_main_mod()
507 509 prog_ns = main_mod.__dict__
508 510 prog_ns['__name__'] = name
509 511
510 512 # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
511 513 # set the __file__ global in the script's namespace
512 514 prog_ns['__file__'] = filename
513 515
514 516 # pickle fix. See interactiveshell for an explanation. But we need to
515 517 # make sure that, if we overwrite __main__, we replace it at the end
516 518 main_mod_name = prog_ns['__name__']
517 519
518 520 if main_mod_name == '__main__':
519 521 restore_main = sys.modules['__main__']
520 522 else:
521 523 restore_main = False
522 524
523 525 # This needs to be undone at the end to prevent holding references to
524 526 # every single object ever created.
525 527 sys.modules[main_mod_name] = main_mod
526 528
527 529 try:
528 530 stats = None
529 531 with self.shell.readline_no_record:
530 532 if 'p' in opts:
531 533 stats = self.prun('', None, False, opts, arg_lst, prog_ns)
532 534 else:
533 535 if 'd' in opts:
534 536 deb = debugger.Pdb(self.shell.colors)
535 537 # reset Breakpoint state, which is moronically kept
536 538 # in a class
537 539 bdb.Breakpoint.next = 1
538 540 bdb.Breakpoint.bplist = {}
539 541 bdb.Breakpoint.bpbynumber = [None]
540 542 # Set an initial breakpoint to stop execution
541 543 maxtries = 10
542 544 bp = int(opts.get('b', [1])[0])
543 545 checkline = deb.checkline(filename, bp)
544 546 if not checkline:
545 547 for bp in range(bp + 1, bp + maxtries + 1):
546 548 if deb.checkline(filename, bp):
547 549 break
548 550 else:
549 551 msg = ("\nI failed to find a valid line to set "
550 552 "a breakpoint\n"
551 553 "after trying up to line: %s.\n"
552 554 "Please set a valid breakpoint manually "
553 555 "with the -b option." % bp)
554 556 error(msg)
555 557 return
556 558 # if we find a good linenumber, set the breakpoint
557 559 deb.do_break('%s:%s' % (filename, bp))
558 560 # Start file run
559 561 print "NOTE: Enter 'c' at the",
560 562 print "%s prompt to start your script." % deb.prompt
561 563 ns = {'execfile': py3compat.execfile, 'prog_ns': prog_ns}
562 564 try:
563 565 deb.run('execfile("%s", prog_ns)' % filename, ns)
564 566
565 567 except:
566 568 etype, value, tb = sys.exc_info()
567 569 # Skip three frames in the traceback: the %run one,
568 570 # one inside bdb.py, and the command-line typed by the
569 571 # user (run by exec in pdb itself).
570 572 self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
571 573 else:
572 574 if runner is None:
573 575 runner = self.default_runner
574 576 if runner is None:
575 577 runner = self.shell.safe_execfile
576 578 if 't' in opts:
577 579 # timed execution
578 580 try:
579 581 nruns = int(opts['N'][0])
580 582 if nruns < 1:
581 583 error('Number of runs must be >=1')
582 584 return
583 585 except (KeyError):
584 586 nruns = 1
585 587 twall0 = time.time()
586 588 if nruns == 1:
587 589 t0 = clock2()
588 590 runner(filename, prog_ns, prog_ns,
589 591 exit_ignore=exit_ignore)
590 592 t1 = clock2()
591 593 t_usr = t1[0] - t0[0]
592 594 t_sys = t1[1] - t0[1]
593 595 print "\nIPython CPU timings (estimated):"
594 596 print " User : %10.2f s." % t_usr
595 597 print " System : %10.2f s." % t_sys
596 598 else:
597 599 runs = range(nruns)
598 600 t0 = clock2()
599 601 for nr in runs:
600 602 runner(filename, prog_ns, prog_ns,
601 603 exit_ignore=exit_ignore)
602 604 t1 = clock2()
603 605 t_usr = t1[0] - t0[0]
604 606 t_sys = t1[1] - t0[1]
605 607 print "\nIPython CPU timings (estimated):"
606 608 print "Total runs performed:", nruns
607 609 print " Times : %10.2f %10.2f" % ('Total', 'Per run')
608 610 print " User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns)
609 611 print " System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns)
610 612 twall1 = time.time()
611 613 print "Wall time: %10.2f s." % (twall1 - twall0)
612 614
613 615 else:
614 616 # regular execution
615 617 runner(filename, prog_ns, prog_ns, exit_ignore=exit_ignore)
616 618
617 619 if 'i' in opts:
618 620 self.shell.user_ns['__name__'] = __name__save
619 621 else:
620 622 # The shell MUST hold a reference to prog_ns so after %run
621 623 # exits, the python deletion mechanism doesn't zero it out
622 624 # (leaving dangling references).
623 625 self.shell.cache_main_mod(prog_ns, filename)
624 626 # update IPython interactive namespace
625 627
626 628 # Some forms of read errors on the file may mean the
627 629 # __name__ key was never set; using pop we don't have to
628 630 # worry about a possible KeyError.
629 631 prog_ns.pop('__name__', None)
630 632
631 633 self.shell.user_ns.update(prog_ns)
632 634 finally:
633 635 # It's a bit of a mystery why, but __builtins__ can change from
634 636 # being a module to becoming a dict missing some key data after
635 637 # %run. As best I can see, this is NOT something IPython is doing
636 638 # at all, and similar problems have been reported before:
637 639 # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
638 640 # Since this seems to be done by the interpreter itself, the best
639 641 # we can do is to at least restore __builtins__ for the user on
640 642 # exit.
641 643 self.shell.user_ns['__builtins__'] = builtin_mod
642 644
643 645 # Ensure key global structures are restored
644 646 sys.argv = save_argv
645 647 if restore_main:
646 648 sys.modules['__main__'] = restore_main
647 649 else:
648 650 # Remove from sys.modules the reference to main_mod we'd
649 651 # added. Otherwise it will trap references to objects
650 652 # contained therein.
651 653 del sys.modules[main_mod_name]
652 654
653 655 return stats
654 656
655 657 @skip_doctest
656 658 @line_cell_magic
657 659 def timeit(self, line='', cell=None):
658 660 """Time execution of a Python statement or expression
659 661
660 662 Usage, in line mode:
661 663 %timeit [-n<N> -r<R> [-t|-c]] statement
662 664 or in cell mode:
663 665 %%timeit [-n<N> -r<R> [-t|-c]] setup_code
664 666 code
665 667 code...
666 668
667 669 Time execution of a Python statement or expression using the timeit
668 670 module. This function can be used both as a line and cell magic:
669 671
670 672 - In line mode you can time a single-line statement (though multiple
671 673 ones can be chained with using semicolons).
672 674
673 675 - In cell mode, the statement in the first line is used as setup code
674 676 (executed but not timed) and the body of the cell is timed. The cell
675 677 body has access to any variables created in the setup code.
676 678
677 679 Options:
678 680 -n<N>: execute the given statement <N> times in a loop. If this value
679 681 is not given, a fitting value is chosen.
680 682
681 683 -r<R>: repeat the loop iteration <R> times and take the best result.
682 684 Default: 3
683 685
684 686 -t: use time.time to measure the time, which is the default on Unix.
685 687 This function measures wall time.
686 688
687 689 -c: use time.clock to measure the time, which is the default on
688 690 Windows and measures wall time. On Unix, resource.getrusage is used
689 691 instead and returns the CPU user time.
690 692
691 693 -p<P>: use a precision of <P> digits to display the timing result.
692 694 Default: 3
693 695
694 696
695 697 Examples
696 698 --------
697 699 ::
698 700
699 701 In [1]: %timeit pass
700 702 10000000 loops, best of 3: 53.3 ns per loop
701 703
702 704 In [2]: u = None
703 705
704 706 In [3]: %timeit u is None
705 707 10000000 loops, best of 3: 184 ns per loop
706 708
707 709 In [4]: %timeit -r 4 u == None
708 710 1000000 loops, best of 4: 242 ns per loop
709 711
710 712 In [5]: import time
711 713
712 714 In [6]: %timeit -n1 time.sleep(2)
713 715 1 loops, best of 3: 2 s per loop
714 716
715 717
716 718 The times reported by %timeit will be slightly higher than those
717 719 reported by the timeit.py script when variables are accessed. This is
718 720 due to the fact that %timeit executes the statement in the namespace
719 721 of the shell, compared with timeit.py, which uses a single setup
720 722 statement to import function or create variables. Generally, the bias
721 723 does not matter as long as results from timeit.py are not mixed with
722 724 those from %timeit."""
723 725
724 726 import timeit
725 727 import math
726 728
727 729 # XXX: Unfortunately the unicode 'micro' symbol can cause problems in
728 730 # certain terminals. Until we figure out a robust way of
729 731 # auto-detecting if the terminal can deal with it, use plain 'us' for
730 732 # microseconds. I am really NOT happy about disabling the proper
731 733 # 'micro' prefix, but crashing is worse... If anyone knows what the
732 734 # right solution for this is, I'm all ears...
733 735 #
734 736 # Note: using
735 737 #
736 738 # s = u'\xb5'
737 739 # s.encode(sys.getdefaultencoding())
738 740 #
739 741 # is not sufficient, as I've seen terminals where that fails but
740 742 # print s
741 743 #
742 744 # succeeds
743 745 #
744 746 # See bug: https://bugs.launchpad.net/ipython/+bug/348466
745 747
746 748 #units = [u"s", u"ms",u'\xb5',"ns"]
747 749 units = [u"s", u"ms",u'us',"ns"]
748 750
749 751 scaling = [1, 1e3, 1e6, 1e9]
750 752
751 753 opts, stmt = self.parse_options(line,'n:r:tcp:',
752 754 posix=False, strict=False)
753 755 if stmt == "" and cell is None:
754 756 return
755 757 timefunc = timeit.default_timer
756 758 number = int(getattr(opts, "n", 0))
757 759 repeat = int(getattr(opts, "r", timeit.default_repeat))
758 760 precision = int(getattr(opts, "p", 3))
759 761 if hasattr(opts, "t"):
760 762 timefunc = time.time
761 763 if hasattr(opts, "c"):
762 764 timefunc = clock
763 765
764 766 timer = timeit.Timer(timer=timefunc)
765 767 # this code has tight coupling to the inner workings of timeit.Timer,
766 768 # but is there a better way to achieve that the code stmt has access
767 769 # to the shell namespace?
768 770
769 771 if cell is None:
770 772 # called as line magic
771 773 setup = 'pass'
772 774 stmt = timeit.reindent(stmt, 8)
773 775 else:
774 776 setup = timeit.reindent(stmt, 4)
775 777 stmt = timeit.reindent(cell, 8)
776 778
777 779 # From Python 3.3, this template uses new-style string formatting.
778 780 if sys.version_info >= (3, 3):
779 781 src = timeit.template.format(stmt=stmt, setup=setup)
780 782 else:
781 783 src = timeit.template % dict(stmt=stmt, setup=setup)
782 784
783 785 # Track compilation time so it can be reported if too long
784 786 # Minimum time above which compilation time will be reported
785 787 tc_min = 0.1
786 788
787 789 t0 = clock()
788 790 code = compile(src, "<magic-timeit>", "exec")
789 791 tc = clock()-t0
790 792
791 793 ns = {}
792 794 exec code in self.shell.user_ns, ns
793 795 timer.inner = ns["inner"]
794 796
795 797 if number == 0:
796 798 # determine number so that 0.2 <= total time < 2.0
797 799 number = 1
798 800 for i in range(1, 10):
799 801 if timer.timeit(number) >= 0.2:
800 802 break
801 803 number *= 10
802 804
803 805 best = min(timer.repeat(repeat, number)) / number
804 806
805 807 if best > 0.0 and best < 1000.0:
806 808 order = min(-int(math.floor(math.log10(best)) // 3), 3)
807 809 elif best >= 1000.0:
808 810 order = 0
809 811 else:
810 812 order = 3
811 813 print u"%d loops, best of %d: %.*g %s per loop" % (number, repeat,
812 814 precision,
813 815 best * scaling[order],
814 816 units[order])
815 817 if tc > tc_min:
816 818 print "Compiler time: %.2f s" % tc
817 819
818 820 @skip_doctest
819 821 @needs_local_scope
820 822 @line_magic
821 823 def time(self,parameter_s, user_locals):
822 824 """Time execution of a Python statement or expression.
823 825
824 826 The CPU and wall clock times are printed, and the value of the
825 827 expression (if any) is returned. Note that under Win32, system time
826 828 is always reported as 0, since it can not be measured.
827 829
828 830 This function provides very basic timing functionality. In Python
829 831 2.3, the timeit module offers more control and sophistication, so this
830 832 could be rewritten to use it (patches welcome).
831 833
832 834 Examples
833 835 --------
834 836 ::
835 837
836 838 In [1]: time 2**128
837 839 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
838 840 Wall time: 0.00
839 841 Out[1]: 340282366920938463463374607431768211456L
840 842
841 843 In [2]: n = 1000000
842 844
843 845 In [3]: time sum(range(n))
844 846 CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
845 847 Wall time: 1.37
846 848 Out[3]: 499999500000L
847 849
848 850 In [4]: time print 'hello world'
849 851 hello world
850 852 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
851 853 Wall time: 0.00
852 854
853 855 Note that the time needed by Python to compile the given expression
854 856 will be reported if it is more than 0.1s. In this example, the
855 857 actual exponentiation is done by Python at compilation time, so while
856 858 the expression can take a noticeable amount of time to compute, that
857 859 time is purely due to the compilation:
858 860
859 861 In [5]: time 3**9999;
860 862 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
861 863 Wall time: 0.00 s
862 864
863 865 In [6]: time 3**999999;
864 866 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
865 867 Wall time: 0.00 s
866 868 Compiler : 0.78 s
867 869 """
868 870
869 871 # fail immediately if the given expression can't be compiled
870 872
871 873 expr = self.shell.prefilter(parameter_s,False)
872 874
873 875 # Minimum time above which compilation time will be reported
874 876 tc_min = 0.1
875 877
876 878 try:
877 879 mode = 'eval'
878 880 t0 = clock()
879 881 code = compile(expr,'<timed eval>',mode)
880 882 tc = clock()-t0
881 883 except SyntaxError:
882 884 mode = 'exec'
883 885 t0 = clock()
884 886 code = compile(expr,'<timed exec>',mode)
885 887 tc = clock()-t0
886 888 # skew measurement as little as possible
887 889 glob = self.shell.user_ns
888 890 wtime = time.time
889 891 # time execution
890 892 wall_st = wtime()
891 893 if mode=='eval':
892 894 st = clock2()
893 895 out = eval(code, glob, user_locals)
894 896 end = clock2()
895 897 else:
896 898 st = clock2()
897 899 exec code in glob, user_locals
898 900 end = clock2()
899 901 out = None
900 902 wall_end = wtime()
901 903 # Compute actual times and report
902 904 wall_time = wall_end-wall_st
903 905 cpu_user = end[0]-st[0]
904 906 cpu_sys = end[1]-st[1]
905 907 cpu_tot = cpu_user+cpu_sys
906 908 print "CPU times: user %.2f s, sys: %.2f s, total: %.2f s" % \
907 909 (cpu_user,cpu_sys,cpu_tot)
908 910 print "Wall time: %.2f s" % wall_time
909 911 if tc > tc_min:
910 912 print "Compiler : %.2f s" % tc
911 913 return out
912 914
913 915 @skip_doctest
914 916 @line_magic
915 917 def macro(self, parameter_s=''):
916 918 """Define a macro for future re-execution. It accepts ranges of history,
917 919 filenames or string objects.
918 920
919 921 Usage:\\
920 922 %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
921 923
922 924 Options:
923 925
924 926 -r: use 'raw' input. By default, the 'processed' history is used,
925 927 so that magics are loaded in their transformed version to valid
926 928 Python. If this option is given, the raw input as typed as the
927 929 command line is used instead.
928 930
929 931 This will define a global variable called `name` which is a string
930 932 made of joining the slices and lines you specify (n1,n2,... numbers
931 933 above) from your input history into a single string. This variable
932 934 acts like an automatic function which re-executes those lines as if
933 935 you had typed them. You just type 'name' at the prompt and the code
934 936 executes.
935 937
936 938 The syntax for indicating input ranges is described in %history.
937 939
938 940 Note: as a 'hidden' feature, you can also use traditional python slice
939 941 notation, where N:M means numbers N through M-1.
940 942
941 943 For example, if your history contains (%hist prints it)::
942 944
943 945 44: x=1
944 946 45: y=3
945 947 46: z=x+y
946 948 47: print x
947 949 48: a=5
948 950 49: print 'x',x,'y',y
949 951
950 952 you can create a macro with lines 44 through 47 (included) and line 49
951 953 called my_macro with::
952 954
953 955 In [55]: %macro my_macro 44-47 49
954 956
955 957 Now, typing `my_macro` (without quotes) will re-execute all this code
956 958 in one pass.
957 959
958 960 You don't need to give the line-numbers in order, and any given line
959 961 number can appear multiple times. You can assemble macros with any
960 962 lines from your input history in any order.
961 963
962 964 The macro is a simple object which holds its value in an attribute,
963 965 but IPython's display system checks for macros and executes them as
964 966 code instead of printing them when you type their name.
965 967
966 968 You can view a macro's contents by explicitly printing it with::
967 969
968 970 print macro_name
969 971
970 972 """
971 973 opts,args = self.parse_options(parameter_s,'r',mode='list')
972 974 if not args: # List existing macros
973 975 return sorted(k for k,v in self.shell.user_ns.iteritems() if\
974 976 isinstance(v, Macro))
975 977 if len(args) == 1:
976 978 raise UsageError(
977 979 "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
978 980 name, codefrom = args[0], " ".join(args[1:])
979 981
980 982 #print 'rng',ranges # dbg
981 983 try:
982 984 lines = self.shell.find_user_code(codefrom, 'r' in opts)
983 985 except (ValueError, TypeError) as e:
984 986 print e.args[0]
985 987 return
986 988 macro = Macro(lines)
987 989 self.shell.define_macro(name, macro)
988 990 print 'Macro `%s` created. To execute, type its name (without quotes).' % name
989 991 print '=== Macro contents: ==='
990 992 print macro,
993
994 @magic_arguments.magic_arguments()
995 @magic_arguments.argument('output', type=str, default='', nargs='?',
996 help="""The name of the variable in which to store output.
997 This is a utils.io.CapturedIO object with stdout/err attributes
998 for the text of the captured output.
999
1000 CapturedOutput also has a show() method for displaying the output,
1001 and __call__ as well, so you can use that to quickly display the
1002 output.
1003
1004 If unspecified, captured output is discarded.
1005 """
1006 )
1007 @magic_arguments.argument('--no-stderr', action="store_true",
1008 help="""Don't capture stderr."""
1009 )
1010 @magic_arguments.argument('--no-stdout', action="store_true",
1011 help="""Don't capture stdout."""
1012 )
1013 @cell_magic
1014 def capture(self, line, cell):
1015 """run the cell, capturing stdout/err"""
1016 args = magic_arguments.parse_argstring(self.capture, line)
1017 out = not args.no_stdout
1018 err = not args.no_stderr
1019 with capture_output(out, err) as io:
1020 self.shell.run_cell(cell)
1021 if args.output:
1022 self.shell.user_ns[args.output] = io
@@ -1,213 +1,183 b''
1 1 """base class for parallel client tests
2 2
3 3 Authors:
4 4
5 5 * Min RK
6 6 """
7 7
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2011 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 import sys
16 16 import tempfile
17 17 import time
18 18 from StringIO import StringIO
19 19
20 20 from nose import SkipTest
21 21
22 22 import zmq
23 23 from zmq.tests import BaseZMQTestCase
24 24
25 25 from IPython.external.decorator import decorator
26 26
27 27 from IPython.parallel import error
28 28 from IPython.parallel import Client
29 29
30 30 from IPython.parallel.tests import launchers, add_engines
31 31
32 32 # simple tasks for use in apply tests
33 33
34 34 def segfault():
35 35 """this will segfault"""
36 36 import ctypes
37 37 ctypes.memset(-1,0,1)
38 38
39 39 def crash():
40 40 """from stdlib crashers in the test suite"""
41 41 import types
42 42 if sys.platform.startswith('win'):
43 43 import ctypes
44 44 ctypes.windll.kernel32.SetErrorMode(0x0002);
45 45 args = [ 0, 0, 0, 0, b'\x04\x71\x00\x00', (), (), (), '', '', 1, b'']
46 46 if sys.version_info[0] >= 3:
47 47 # Python3 adds 'kwonlyargcount' as the second argument to Code
48 48 args.insert(1, 0)
49 49
50 50 co = types.CodeType(*args)
51 51 exec(co)
52 52
53 53 def wait(n):
54 54 """sleep for a time"""
55 55 import time
56 56 time.sleep(n)
57 57 return n
58 58
59 59 def raiser(eclass):
60 60 """raise an exception"""
61 61 raise eclass()
62 62
63 63 def generate_output():
64 64 """function for testing output
65 65
66 66 publishes two outputs of each type, and returns
67 67 a rich displayable object.
68 68 """
69 69
70 70 import sys
71 71 from IPython.core.display import display, HTML, Math
72 72
73 73 print "stdout"
74 74 print >> sys.stderr, "stderr"
75 75
76 76 display(HTML("<b>HTML</b>"))
77 77
78 78 print "stdout2"
79 79 print >> sys.stderr, "stderr2"
80 80
81 81 display(Math(r"\alpha=\beta"))
82 82
83 83 return Math("42")
84 84
85 85 # test decorator for skipping tests when libraries are unavailable
86 86 def skip_without(*names):
87 87 """skip a test if some names are not importable"""
88 88 @decorator
89 89 def skip_without_names(f, *args, **kwargs):
90 90 """decorator to skip tests in the absence of numpy."""
91 91 for name in names:
92 92 try:
93 93 __import__(name)
94 94 except ImportError:
95 95 raise SkipTest
96 96 return f(*args, **kwargs)
97 97 return skip_without_names
98 98
99 99 #-------------------------------------------------------------------------------
100 100 # Classes
101 101 #-------------------------------------------------------------------------------
102 102
103 class CapturedIO(object):
104 """Simple object for containing captured stdout/err StringIO objects"""
105
106 def __init__(self, stdout, stderr):
107 self.stdout_io = stdout
108 self.stderr_io = stderr
109
110 @property
111 def stdout(self):
112 return self.stdout_io.getvalue()
113
114 @property
115 def stderr(self):
116 return self.stderr_io.getvalue()
117
118
119 class capture_output(object):
120 """context manager for capturing stdout/err"""
121
122 def __enter__(self):
123 self.sys_stdout = sys.stdout
124 self.sys_stderr = sys.stderr
125 stdout = sys.stdout = StringIO()
126 stderr = sys.stderr = StringIO()
127 return CapturedIO(stdout, stderr)
128
129 def __exit__(self, exc_type, exc_value, traceback):
130 sys.stdout = self.sys_stdout
131 sys.stderr = self.sys_stderr
132
133 103
134 104 class ClusterTestCase(BaseZMQTestCase):
135 105
136 106 def add_engines(self, n=1, block=True):
137 107 """add multiple engines to our cluster"""
138 108 self.engines.extend(add_engines(n))
139 109 if block:
140 110 self.wait_on_engines()
141 111
142 112 def minimum_engines(self, n=1, block=True):
143 113 """add engines until there are at least n connected"""
144 114 self.engines.extend(add_engines(n, total=True))
145 115 if block:
146 116 self.wait_on_engines()
147 117
148 118
149 119 def wait_on_engines(self, timeout=5):
150 120 """wait for our engines to connect."""
151 121 n = len(self.engines)+self.base_engine_count
152 122 tic = time.time()
153 123 while time.time()-tic < timeout and len(self.client.ids) < n:
154 124 time.sleep(0.1)
155 125
156 126 assert not len(self.client.ids) < n, "waiting for engines timed out"
157 127
158 128 def connect_client(self):
159 129 """connect a client with my Context, and track its sockets for cleanup"""
160 130 c = Client(profile='iptest', context=self.context)
161 131 for name in filter(lambda n:n.endswith('socket'), dir(c)):
162 132 s = getattr(c, name)
163 133 s.setsockopt(zmq.LINGER, 0)
164 134 self.sockets.append(s)
165 135 return c
166 136
167 137 def assertRaisesRemote(self, etype, f, *args, **kwargs):
168 138 try:
169 139 try:
170 140 f(*args, **kwargs)
171 141 except error.CompositeError as e:
172 142 e.raise_exception()
173 143 except error.RemoteError as e:
174 144 self.assertEquals(etype.__name__, e.ename, "Should have raised %r, but raised %r"%(etype.__name__, e.ename))
175 145 else:
176 146 self.fail("should have raised a RemoteError")
177 147
178 148 def _wait_for(self, f, timeout=10):
179 149 """wait for a condition"""
180 150 tic = time.time()
181 151 while time.time() <= tic + timeout:
182 152 if f():
183 153 return
184 154 time.sleep(0.1)
185 155 self.client.spin()
186 156 if not f():
187 157 print "Warning: Awaited condition never arrived"
188 158
189 159 def setUp(self):
190 160 BaseZMQTestCase.setUp(self)
191 161 self.client = self.connect_client()
192 162 # start every test with clean engine namespaces:
193 163 self.client.clear(block=True)
194 164 self.base_engine_count=len(self.client.ids)
195 165 self.engines=[]
196 166
197 167 def tearDown(self):
198 168 # self.client.clear(block=True)
199 169 # close fds:
200 170 for e in filter(lambda e: e.poll() is not None, launchers):
201 171 launchers.remove(e)
202 172
203 173 # allow flushing of incoming messages to prevent crash on socket close
204 174 self.client.wait(timeout=2)
205 175 # time.sleep(2)
206 176 self.client.spin()
207 177 self.client.close()
208 178 BaseZMQTestCase.tearDown(self)
209 179 # this will be redundant when pyzmq merges PR #88
210 180 # self.context.term()
211 181 # print tempfile.TemporaryFile().fileno(),
212 182 # sys.stdout.flush()
213 183 No newline at end of file
@@ -1,266 +1,267 b''
1 1 """Tests for asyncresult.py
2 2
3 3 Authors:
4 4
5 5 * Min RK
6 6 """
7 7
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2011 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Imports
17 17 #-------------------------------------------------------------------------------
18 18
19 19 import time
20 20
21 from IPython.parallel.error import TimeoutError
21 from IPython.utils.io import capture_output
22 22
23 from IPython.parallel.error import TimeoutError
23 24 from IPython.parallel import error, Client
24 25 from IPython.parallel.tests import add_engines
25 from .clienttest import ClusterTestCase, capture_output
26 from .clienttest import ClusterTestCase
26 27
27 28 def setup():
28 29 add_engines(2, total=True)
29 30
30 31 def wait(n):
31 32 import time
32 33 time.sleep(n)
33 34 return n
34 35
35 36 class AsyncResultTest(ClusterTestCase):
36 37
37 38 def test_single_result_view(self):
38 39 """various one-target views get the right value for single_result"""
39 40 eid = self.client.ids[-1]
40 41 ar = self.client[eid].apply_async(lambda : 42)
41 42 self.assertEquals(ar.get(), 42)
42 43 ar = self.client[[eid]].apply_async(lambda : 42)
43 44 self.assertEquals(ar.get(), [42])
44 45 ar = self.client[-1:].apply_async(lambda : 42)
45 46 self.assertEquals(ar.get(), [42])
46 47
47 48 def test_get_after_done(self):
48 49 ar = self.client[-1].apply_async(lambda : 42)
49 50 ar.wait()
50 51 self.assertTrue(ar.ready())
51 52 self.assertEquals(ar.get(), 42)
52 53 self.assertEquals(ar.get(), 42)
53 54
54 55 def test_get_before_done(self):
55 56 ar = self.client[-1].apply_async(wait, 0.1)
56 57 self.assertRaises(TimeoutError, ar.get, 0)
57 58 ar.wait(0)
58 59 self.assertFalse(ar.ready())
59 60 self.assertEquals(ar.get(), 0.1)
60 61
61 62 def test_get_after_error(self):
62 63 ar = self.client[-1].apply_async(lambda : 1/0)
63 64 ar.wait(10)
64 65 self.assertRaisesRemote(ZeroDivisionError, ar.get)
65 66 self.assertRaisesRemote(ZeroDivisionError, ar.get)
66 67 self.assertRaisesRemote(ZeroDivisionError, ar.get_dict)
67 68
68 69 def test_get_dict(self):
69 70 n = len(self.client)
70 71 ar = self.client[:].apply_async(lambda : 5)
71 72 self.assertEquals(ar.get(), [5]*n)
72 73 d = ar.get_dict()
73 74 self.assertEquals(sorted(d.keys()), sorted(self.client.ids))
74 75 for eid,r in d.iteritems():
75 76 self.assertEquals(r, 5)
76 77
77 78 def test_list_amr(self):
78 79 ar = self.client.load_balanced_view().map_async(wait, [0.1]*5)
79 80 rlist = list(ar)
80 81
81 82 def test_getattr(self):
82 83 ar = self.client[:].apply_async(wait, 0.5)
83 84 self.assertRaises(AttributeError, lambda : ar._foo)
84 85 self.assertRaises(AttributeError, lambda : ar.__length_hint__())
85 86 self.assertRaises(AttributeError, lambda : ar.foo)
86 87 self.assertRaises(AttributeError, lambda : ar.engine_id)
87 88 self.assertFalse(hasattr(ar, '__length_hint__'))
88 89 self.assertFalse(hasattr(ar, 'foo'))
89 90 self.assertFalse(hasattr(ar, 'engine_id'))
90 91 ar.get(5)
91 92 self.assertRaises(AttributeError, lambda : ar._foo)
92 93 self.assertRaises(AttributeError, lambda : ar.__length_hint__())
93 94 self.assertRaises(AttributeError, lambda : ar.foo)
94 95 self.assertTrue(isinstance(ar.engine_id, list))
95 96 self.assertEquals(ar.engine_id, ar['engine_id'])
96 97 self.assertFalse(hasattr(ar, '__length_hint__'))
97 98 self.assertFalse(hasattr(ar, 'foo'))
98 99 self.assertTrue(hasattr(ar, 'engine_id'))
99 100
100 101 def test_getitem(self):
101 102 ar = self.client[:].apply_async(wait, 0.5)
102 103 self.assertRaises(TimeoutError, lambda : ar['foo'])
103 104 self.assertRaises(TimeoutError, lambda : ar['engine_id'])
104 105 ar.get(5)
105 106 self.assertRaises(KeyError, lambda : ar['foo'])
106 107 self.assertTrue(isinstance(ar['engine_id'], list))
107 108 self.assertEquals(ar.engine_id, ar['engine_id'])
108 109
109 110 def test_single_result(self):
110 111 ar = self.client[-1].apply_async(wait, 0.5)
111 112 self.assertRaises(TimeoutError, lambda : ar['foo'])
112 113 self.assertRaises(TimeoutError, lambda : ar['engine_id'])
113 114 self.assertTrue(ar.get(5) == 0.5)
114 115 self.assertTrue(isinstance(ar['engine_id'], int))
115 116 self.assertTrue(isinstance(ar.engine_id, int))
116 117 self.assertEquals(ar.engine_id, ar['engine_id'])
117 118
118 119 def test_abort(self):
119 120 e = self.client[-1]
120 121 ar = e.execute('import time; time.sleep(1)', block=False)
121 122 ar2 = e.apply_async(lambda : 2)
122 123 ar2.abort()
123 124 self.assertRaises(error.TaskAborted, ar2.get)
124 125 ar.get()
125 126
126 127 def test_len(self):
127 128 v = self.client.load_balanced_view()
128 129 ar = v.map_async(lambda x: x, range(10))
129 130 self.assertEquals(len(ar), 10)
130 131 ar = v.apply_async(lambda x: x, range(10))
131 132 self.assertEquals(len(ar), 1)
132 133 ar = self.client[:].apply_async(lambda x: x, range(10))
133 134 self.assertEquals(len(ar), len(self.client.ids))
134 135
135 136 def test_wall_time_single(self):
136 137 v = self.client.load_balanced_view()
137 138 ar = v.apply_async(time.sleep, 0.25)
138 139 self.assertRaises(TimeoutError, getattr, ar, 'wall_time')
139 140 ar.get(2)
140 141 self.assertTrue(ar.wall_time < 1.)
141 142 self.assertTrue(ar.wall_time > 0.2)
142 143
143 144 def test_wall_time_multi(self):
144 145 self.minimum_engines(4)
145 146 v = self.client[:]
146 147 ar = v.apply_async(time.sleep, 0.25)
147 148 self.assertRaises(TimeoutError, getattr, ar, 'wall_time')
148 149 ar.get(2)
149 150 self.assertTrue(ar.wall_time < 1.)
150 151 self.assertTrue(ar.wall_time > 0.2)
151 152
152 153 def test_serial_time_single(self):
153 154 v = self.client.load_balanced_view()
154 155 ar = v.apply_async(time.sleep, 0.25)
155 156 self.assertRaises(TimeoutError, getattr, ar, 'serial_time')
156 157 ar.get(2)
157 158 self.assertTrue(ar.serial_time < 1.)
158 159 self.assertTrue(ar.serial_time > 0.2)
159 160
160 161 def test_serial_time_multi(self):
161 162 self.minimum_engines(4)
162 163 v = self.client[:]
163 164 ar = v.apply_async(time.sleep, 0.25)
164 165 self.assertRaises(TimeoutError, getattr, ar, 'serial_time')
165 166 ar.get(2)
166 167 self.assertTrue(ar.serial_time < 2.)
167 168 self.assertTrue(ar.serial_time > 0.8)
168 169
169 170 def test_elapsed_single(self):
170 171 v = self.client.load_balanced_view()
171 172 ar = v.apply_async(time.sleep, 0.25)
172 173 while not ar.ready():
173 174 time.sleep(0.01)
174 175 self.assertTrue(ar.elapsed < 1)
175 176 self.assertTrue(ar.elapsed < 1)
176 177 ar.get(2)
177 178
178 179 def test_elapsed_multi(self):
179 180 v = self.client[:]
180 181 ar = v.apply_async(time.sleep, 0.25)
181 182 while not ar.ready():
182 183 time.sleep(0.01)
183 184 self.assertTrue(ar.elapsed < 1)
184 185 self.assertTrue(ar.elapsed < 1)
185 186 ar.get(2)
186 187
187 188 def test_hubresult_timestamps(self):
188 189 self.minimum_engines(4)
189 190 v = self.client[:]
190 191 ar = v.apply_async(time.sleep, 0.25)
191 192 ar.get(2)
192 193 rc2 = Client(profile='iptest')
193 194 # must have try/finally to close second Client, otherwise
194 195 # will have dangling sockets causing problems
195 196 try:
196 197 time.sleep(0.25)
197 198 hr = rc2.get_result(ar.msg_ids)
198 199 self.assertTrue(hr.elapsed > 0., "got bad elapsed: %s" % hr.elapsed)
199 200 hr.get(1)
200 201 self.assertTrue(hr.wall_time < ar.wall_time + 0.2, "got bad wall_time: %s > %s" % (hr.wall_time, ar.wall_time))
201 202 self.assertEquals(hr.serial_time, ar.serial_time)
202 203 finally:
203 204 rc2.close()
204 205
205 206 def test_display_empty_streams_single(self):
206 207 """empty stdout/err are not displayed (single result)"""
207 208 self.minimum_engines(1)
208 209
209 210 v = self.client[-1]
210 211 ar = v.execute("print (5555)")
211 212 ar.get(5)
212 213 with capture_output() as io:
213 214 ar.display_outputs()
214 215 self.assertEquals(io.stderr, '')
215 216 self.assertEquals('5555\n', io.stdout)
216 217
217 218 ar = v.execute("a=5")
218 219 ar.get(5)
219 220 with capture_output() as io:
220 221 ar.display_outputs()
221 222 self.assertEquals(io.stderr, '')
222 223 self.assertEquals(io.stdout, '')
223 224
224 225 def test_display_empty_streams_type(self):
225 226 """empty stdout/err are not displayed (groupby type)"""
226 227 self.minimum_engines(1)
227 228
228 229 v = self.client[:]
229 230 ar = v.execute("print (5555)")
230 231 ar.get(5)
231 232 with capture_output() as io:
232 233 ar.display_outputs()
233 234 self.assertEquals(io.stderr, '')
234 235 self.assertEquals(io.stdout.count('5555'), len(v), io.stdout)
235 236 self.assertFalse('\n\n' in io.stdout, io.stdout)
236 237 self.assertEquals(io.stdout.count('[stdout:'), len(v), io.stdout)
237 238
238 239 ar = v.execute("a=5")
239 240 ar.get(5)
240 241 with capture_output() as io:
241 242 ar.display_outputs()
242 243 self.assertEquals(io.stderr, '')
243 244 self.assertEquals(io.stdout, '')
244 245
245 246 def test_display_empty_streams_engine(self):
246 247 """empty stdout/err are not displayed (groupby engine)"""
247 248 self.minimum_engines(1)
248 249
249 250 v = self.client[:]
250 251 ar = v.execute("print (5555)")
251 252 ar.get(5)
252 253 with capture_output() as io:
253 254 ar.display_outputs('engine')
254 255 self.assertEquals(io.stderr, '')
255 256 self.assertEquals(io.stdout.count('5555'), len(v), io.stdout)
256 257 self.assertFalse('\n\n' in io.stdout, io.stdout)
257 258 self.assertEquals(io.stdout.count('[stdout:'), len(v), io.stdout)
258 259
259 260 ar = v.execute("a=5")
260 261 ar.get(5)
261 262 with capture_output() as io:
262 263 ar.display_outputs('engine')
263 264 self.assertEquals(io.stderr, '')
264 265 self.assertEquals(io.stdout, '')
265 266
266 267
@@ -1,339 +1,340 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Test Parallel magics
3 3
4 4 Authors:
5 5
6 6 * Min RK
7 7 """
8 8 #-------------------------------------------------------------------------------
9 9 # Copyright (C) 2011 The IPython Development Team
10 10 #
11 11 # Distributed under the terms of the BSD License. The full license is in
12 12 # the file COPYING, distributed as part of this software.
13 13 #-------------------------------------------------------------------------------
14 14
15 15 #-------------------------------------------------------------------------------
16 16 # Imports
17 17 #-------------------------------------------------------------------------------
18 18
19 19 import re
20 20 import sys
21 21 import time
22 22
23 23 import zmq
24 24 from nose import SkipTest
25 25
26 26 from IPython.testing import decorators as dec
27 27 from IPython.testing.ipunittest import ParametricTestCase
28 from IPython.utils.io import capture_output
28 29
29 30 from IPython import parallel as pmod
30 31 from IPython.parallel import error
31 32 from IPython.parallel import AsyncResult
32 33 from IPython.parallel.util import interactive
33 34
34 35 from IPython.parallel.tests import add_engines
35 36
36 from .clienttest import ClusterTestCase, capture_output, generate_output
37 from .clienttest import ClusterTestCase, generate_output
37 38
38 39 def setup():
39 40 add_engines(3, total=True)
40 41
41 42 class TestParallelMagics(ClusterTestCase, ParametricTestCase):
42 43
43 44 def test_px_blocking(self):
44 45 ip = get_ipython()
45 46 v = self.client[-1:]
46 47 v.activate()
47 48 v.block=True
48 49
49 50 ip.magic('px a=5')
50 51 self.assertEquals(v['a'], [5])
51 52 ip.magic('px a=10')
52 53 self.assertEquals(v['a'], [10])
53 54 # just 'print a' works ~99% of the time, but this ensures that
54 55 # the stdout message has arrived when the result is finished:
55 56 with capture_output() as io:
56 57 ip.magic(
57 58 'px import sys,time;print(a);sys.stdout.flush();time.sleep(0.2)'
58 59 )
59 60 out = io.stdout
60 61 self.assertTrue('[stdout:' in out, out)
61 62 self.assertFalse('\n\n' in out)
62 63 self.assertTrue(out.rstrip().endswith('10'))
63 64 self.assertRaisesRemote(ZeroDivisionError, ip.magic, 'px 1/0')
64 65
65 66 def _check_generated_stderr(self, stderr, n):
66 67 expected = [
67 68 r'\[stderr:\d+\]',
68 69 '^stderr$',
69 70 '^stderr2$',
70 71 ] * n
71 72
72 73 self.assertFalse('\n\n' in stderr, stderr)
73 74 lines = stderr.splitlines()
74 75 self.assertEquals(len(lines), len(expected), stderr)
75 76 for line,expect in zip(lines, expected):
76 77 if isinstance(expect, str):
77 78 expect = [expect]
78 79 for ex in expect:
79 80 self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
80 81
81 82 def test_cellpx_block_args(self):
82 83 """%%px --[no]block flags work"""
83 84 ip = get_ipython()
84 85 v = self.client[-1:]
85 86 v.activate()
86 87 v.block=False
87 88
88 89 for block in (True, False):
89 90 v.block = block
90 91
91 92 with capture_output() as io:
92 93 ip.run_cell_magic("px", "", "1")
93 94 if block:
94 95 self.assertTrue(io.stdout.startswith("Parallel"), io.stdout)
95 96 else:
96 97 self.assertTrue(io.stdout.startswith("Async"), io.stdout)
97 98
98 99 with capture_output() as io:
99 100 ip.run_cell_magic("px", "--block", "1")
100 101 self.assertTrue(io.stdout.startswith("Parallel"), io.stdout)
101 102
102 103 with capture_output() as io:
103 104 ip.run_cell_magic("px", "--noblock", "1")
104 105 self.assertTrue(io.stdout.startswith("Async"), io.stdout)
105 106
106 107 def test_cellpx_groupby_engine(self):
107 108 """%%px --group-outputs=engine"""
108 109 ip = get_ipython()
109 110 v = self.client[:]
110 111 v.block = True
111 112 v.activate()
112 113
113 114 v['generate_output'] = generate_output
114 115
115 116 with capture_output() as io:
116 117 ip.run_cell_magic('px', '--group-outputs=engine', 'generate_output()')
117 118
118 119 self.assertFalse('\n\n' in io.stdout)
119 120 lines = io.stdout.splitlines()[1:]
120 121 expected = [
121 122 r'\[stdout:\d+\]',
122 123 'stdout',
123 124 'stdout2',
124 125 r'\[output:\d+\]',
125 126 r'IPython\.core\.display\.HTML',
126 127 r'IPython\.core\.display\.Math',
127 128 r'Out\[\d+:\d+\]:.*IPython\.core\.display\.Math',
128 129 ] * len(v)
129 130
130 131 self.assertEquals(len(lines), len(expected), io.stdout)
131 132 for line,expect in zip(lines, expected):
132 133 if isinstance(expect, str):
133 134 expect = [expect]
134 135 for ex in expect:
135 136 self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
136 137
137 138 self._check_generated_stderr(io.stderr, len(v))
138 139
139 140
140 141 def test_cellpx_groupby_order(self):
141 142 """%%px --group-outputs=order"""
142 143 ip = get_ipython()
143 144 v = self.client[:]
144 145 v.block = True
145 146 v.activate()
146 147
147 148 v['generate_output'] = generate_output
148 149
149 150 with capture_output() as io:
150 151 ip.run_cell_magic('px', '--group-outputs=order', 'generate_output()')
151 152
152 153 self.assertFalse('\n\n' in io.stdout)
153 154 lines = io.stdout.splitlines()[1:]
154 155 expected = []
155 156 expected.extend([
156 157 r'\[stdout:\d+\]',
157 158 'stdout',
158 159 'stdout2',
159 160 ] * len(v))
160 161 expected.extend([
161 162 r'\[output:\d+\]',
162 163 'IPython.core.display.HTML',
163 164 ] * len(v))
164 165 expected.extend([
165 166 r'\[output:\d+\]',
166 167 'IPython.core.display.Math',
167 168 ] * len(v))
168 169 expected.extend([
169 170 r'Out\[\d+:\d+\]:.*IPython\.core\.display\.Math'
170 171 ] * len(v))
171 172
172 173 self.assertEquals(len(lines), len(expected), io.stdout)
173 174 for line,expect in zip(lines, expected):
174 175 if isinstance(expect, str):
175 176 expect = [expect]
176 177 for ex in expect:
177 178 self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
178 179
179 180 self._check_generated_stderr(io.stderr, len(v))
180 181
181 182 def test_cellpx_groupby_type(self):
182 183 """%%px --group-outputs=type"""
183 184 ip = get_ipython()
184 185 v = self.client[:]
185 186 v.block = True
186 187 v.activate()
187 188
188 189 v['generate_output'] = generate_output
189 190
190 191 with capture_output() as io:
191 192 ip.run_cell_magic('px', '--group-outputs=type', 'generate_output()')
192 193
193 194 self.assertFalse('\n\n' in io.stdout)
194 195 lines = io.stdout.splitlines()[1:]
195 196
196 197 expected = []
197 198 expected.extend([
198 199 r'\[stdout:\d+\]',
199 200 'stdout',
200 201 'stdout2',
201 202 ] * len(v))
202 203 expected.extend([
203 204 r'\[output:\d+\]',
204 205 r'IPython\.core\.display\.HTML',
205 206 r'IPython\.core\.display\.Math',
206 207 ] * len(v))
207 208 expected.extend([
208 209 (r'Out\[\d+:\d+\]', r'IPython\.core\.display\.Math')
209 210 ] * len(v))
210 211
211 212 self.assertEquals(len(lines), len(expected), io.stdout)
212 213 for line,expect in zip(lines, expected):
213 214 if isinstance(expect, str):
214 215 expect = [expect]
215 216 for ex in expect:
216 217 self.assertTrue(re.search(ex, line) is not None, "Expected %r in %r" % (ex, line))
217 218
218 219 self._check_generated_stderr(io.stderr, len(v))
219 220
220 221
221 222 def test_px_nonblocking(self):
222 223 ip = get_ipython()
223 224 v = self.client[-1:]
224 225 v.activate()
225 226 v.block=False
226 227
227 228 ip.magic('px a=5')
228 229 self.assertEquals(v['a'], [5])
229 230 ip.magic('px a=10')
230 231 self.assertEquals(v['a'], [10])
231 232 with capture_output() as io:
232 233 ar = ip.magic('px print (a)')
233 234 self.assertTrue(isinstance(ar, AsyncResult))
234 235 self.assertTrue('Async' in io.stdout)
235 236 self.assertFalse('[stdout:' in io.stdout)
236 237 self.assertFalse('\n\n' in io.stdout)
237 238
238 239 ar = ip.magic('px 1/0')
239 240 self.assertRaisesRemote(ZeroDivisionError, ar.get)
240 241
241 242 def test_autopx_blocking(self):
242 243 ip = get_ipython()
243 244 v = self.client[-1]
244 245 v.activate()
245 246 v.block=True
246 247
247 248 with capture_output() as io:
248 249 ip.magic('autopx')
249 250 ip.run_cell('\n'.join(('a=5','b=12345','c=0')))
250 251 ip.run_cell('b*=2')
251 252 ip.run_cell('print (b)')
252 253 ip.run_cell('b')
253 254 ip.run_cell("b/c")
254 255 ip.magic('autopx')
255 256
256 257 output = io.stdout
257 258
258 259 self.assertTrue(output.startswith('%autopx enabled'), output)
259 260 self.assertTrue(output.rstrip().endswith('%autopx disabled'), output)
260 261 self.assertTrue('RemoteError: ZeroDivisionError' in output, output)
261 262 self.assertTrue('\nOut[' in output, output)
262 263 self.assertTrue(': 24690' in output, output)
263 264 ar = v.get_result(-1)
264 265 self.assertEquals(v['a'], 5)
265 266 self.assertEquals(v['b'], 24690)
266 267 self.assertRaisesRemote(ZeroDivisionError, ar.get)
267 268
268 269 def test_autopx_nonblocking(self):
269 270 ip = get_ipython()
270 271 v = self.client[-1]
271 272 v.activate()
272 273 v.block=False
273 274
274 275 with capture_output() as io:
275 276 ip.magic('autopx')
276 277 ip.run_cell('\n'.join(('a=5','b=10','c=0')))
277 278 ip.run_cell('print (b)')
278 279 ip.run_cell('import time; time.sleep(0.1)')
279 280 ip.run_cell("b/c")
280 281 ip.run_cell('b*=2')
281 282 ip.magic('autopx')
282 283
283 284 output = io.stdout.rstrip()
284 285
285 286 self.assertTrue(output.startswith('%autopx enabled'))
286 287 self.assertTrue(output.endswith('%autopx disabled'))
287 288 self.assertFalse('ZeroDivisionError' in output)
288 289 ar = v.get_result(-2)
289 290 self.assertRaisesRemote(ZeroDivisionError, ar.get)
290 291 # prevent TaskAborted on pulls, due to ZeroDivisionError
291 292 time.sleep(0.5)
292 293 self.assertEquals(v['a'], 5)
293 294 # b*=2 will not fire, due to abort
294 295 self.assertEquals(v['b'], 10)
295 296
296 297 def test_result(self):
297 298 ip = get_ipython()
298 299 v = self.client[-1]
299 300 v.activate()
300 301 data = dict(a=111,b=222)
301 302 v.push(data, block=True)
302 303
303 304 ip.magic('px a')
304 305 ip.magic('px b')
305 306 for idx, name in [
306 307 ('', 'b'),
307 308 ('-1', 'b'),
308 309 ('2', 'b'),
309 310 ('1', 'a'),
310 311 ('-2', 'a'),
311 312 ]:
312 313 with capture_output() as io:
313 314 ip.magic('result ' + idx)
314 315 output = io.stdout
315 316 msg = "expected %s output to include %s, but got: %s" % \
316 317 ('%result '+idx, str(data[name]), output)
317 318 self.assertTrue(str(data[name]) in output, msg)
318 319
319 320 @dec.skipif_not_matplotlib
320 321 def test_px_pylab(self):
321 322 """%pylab works on engines"""
322 323 ip = get_ipython()
323 324 v = self.client[-1]
324 325 v.block = True
325 326 v.activate()
326 327
327 328 with capture_output() as io:
328 329 ip.magic("px %pylab inline")
329 330
330 331 self.assertTrue("Welcome to pylab" in io.stdout, io.stdout)
331 332 self.assertTrue("backend_inline" in io.stdout, io.stdout)
332 333
333 334 with capture_output() as io:
334 335 ip.magic("px plot(rand(100))")
335 336
336 337 self.assertTrue('Out[' in io.stdout, io.stdout)
337 338 self.assertTrue('matplotlib.lines' in io.stdout, io.stdout)
338 339
339 340
@@ -1,323 +1,384 b''
1 1 # encoding: utf-8
2 2 """
3 3 IO related utilities.
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2008-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12 from __future__ import print_function
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 17 import os
18 18 import sys
19 19 import tempfile
20 from StringIO import StringIO
20 21
21 22 #-----------------------------------------------------------------------------
22 23 # Code
23 24 #-----------------------------------------------------------------------------
24 25
25 26
26 27 class IOStream:
27 28
28 29 def __init__(self,stream, fallback=None):
29 30 if not hasattr(stream,'write') or not hasattr(stream,'flush'):
30 31 if fallback is not None:
31 32 stream = fallback
32 33 else:
33 34 raise ValueError("fallback required, but not specified")
34 35 self.stream = stream
35 36 self._swrite = stream.write
36 37
37 38 # clone all methods not overridden:
38 39 def clone(meth):
39 40 return not hasattr(self, meth) and not meth.startswith('_')
40 41 for meth in filter(clone, dir(stream)):
41 42 setattr(self, meth, getattr(stream, meth))
42 43
43 44 def write(self,data):
44 45 try:
45 46 self._swrite(data)
46 47 except:
47 48 try:
48 49 # print handles some unicode issues which may trip a plain
49 50 # write() call. Emulate write() by using an empty end
50 51 # argument.
51 52 print(data, end='', file=self.stream)
52 53 except:
53 54 # if we get here, something is seriously broken.
54 55 print('ERROR - failed to write data to stream:', self.stream,
55 56 file=sys.stderr)
56 57
57 58 def writelines(self, lines):
58 59 if isinstance(lines, basestring):
59 60 lines = [lines]
60 61 for line in lines:
61 62 self.write(line)
62 63
63 64 # This class used to have a writeln method, but regular files and streams
64 65 # in Python don't have this method. We need to keep this completely
65 66 # compatible so we removed it.
66 67
67 68 @property
68 69 def closed(self):
69 70 return self.stream.closed
70 71
71 72 def close(self):
72 73 pass
73 74
74 75 # setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
75 76 devnull = open(os.devnull, 'a')
76 77 stdin = IOStream(sys.stdin, fallback=devnull)
77 78 stdout = IOStream(sys.stdout, fallback=devnull)
78 79 stderr = IOStream(sys.stderr, fallback=devnull)
79 80
80 81 class IOTerm:
81 82 """ Term holds the file or file-like objects for handling I/O operations.
82 83
83 84 These are normally just sys.stdin, sys.stdout and sys.stderr but for
84 85 Windows they can can replaced to allow editing the strings before they are
85 86 displayed."""
86 87
87 88 # In the future, having IPython channel all its I/O operations through
88 89 # this class will make it easier to embed it into other environments which
89 90 # are not a normal terminal (such as a GUI-based shell)
90 91 def __init__(self, stdin=None, stdout=None, stderr=None):
91 92 mymodule = sys.modules[__name__]
92 93 self.stdin = IOStream(stdin, mymodule.stdin)
93 94 self.stdout = IOStream(stdout, mymodule.stdout)
94 95 self.stderr = IOStream(stderr, mymodule.stderr)
95 96
96 97
97 98 class Tee(object):
98 99 """A class to duplicate an output stream to stdout/err.
99 100
100 101 This works in a manner very similar to the Unix 'tee' command.
101 102
102 103 When the object is closed or deleted, it closes the original file given to
103 104 it for duplication.
104 105 """
105 106 # Inspired by:
106 107 # http://mail.python.org/pipermail/python-list/2007-May/442737.html
107 108
108 109 def __init__(self, file_or_name, mode="w", channel='stdout'):
109 110 """Construct a new Tee object.
110 111
111 112 Parameters
112 113 ----------
113 114 file_or_name : filename or open filehandle (writable)
114 115 File that will be duplicated
115 116
116 117 mode : optional, valid mode for open().
117 118 If a filename was give, open with this mode.
118 119
119 120 channel : str, one of ['stdout', 'stderr']
120 121 """
121 122 if channel not in ['stdout', 'stderr']:
122 123 raise ValueError('Invalid channel spec %s' % channel)
123 124
124 125 if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
125 126 self.file = file_or_name
126 127 else:
127 128 self.file = open(file_or_name, mode)
128 129 self.channel = channel
129 130 self.ostream = getattr(sys, channel)
130 131 setattr(sys, channel, self)
131 132 self._closed = False
132 133
133 134 def close(self):
134 135 """Close the file and restore the channel."""
135 136 self.flush()
136 137 setattr(sys, self.channel, self.ostream)
137 138 self.file.close()
138 139 self._closed = True
139 140
140 141 def write(self, data):
141 142 """Write data to both channels."""
142 143 self.file.write(data)
143 144 self.ostream.write(data)
144 145 self.ostream.flush()
145 146
146 147 def flush(self):
147 148 """Flush both channels."""
148 149 self.file.flush()
149 150 self.ostream.flush()
150 151
151 152 def __del__(self):
152 153 if not self._closed:
153 154 self.close()
154 155
155 156
156 157 def file_read(filename):
157 158 """Read a file and close it. Returns the file source."""
158 159 fobj = open(filename,'r');
159 160 source = fobj.read();
160 161 fobj.close()
161 162 return source
162 163
163 164
164 165 def file_readlines(filename):
165 166 """Read a file and close it. Returns the file source using readlines()."""
166 167 fobj = open(filename,'r');
167 168 lines = fobj.readlines();
168 169 fobj.close()
169 170 return lines
170 171
171 172
172 173 def raw_input_multi(header='', ps1='==> ', ps2='..> ',terminate_str = '.'):
173 174 """Take multiple lines of input.
174 175
175 176 A list with each line of input as a separate element is returned when a
176 177 termination string is entered (defaults to a single '.'). Input can also
177 178 terminate via EOF (^D in Unix, ^Z-RET in Windows).
178 179
179 180 Lines of input which end in \\ are joined into single entries (and a
180 181 secondary continuation prompt is issued as long as the user terminates
181 182 lines with \\). This allows entering very long strings which are still
182 183 meant to be treated as single entities.
183 184 """
184 185
185 186 try:
186 187 if header:
187 188 header += '\n'
188 189 lines = [raw_input(header + ps1)]
189 190 except EOFError:
190 191 return []
191 192 terminate = [terminate_str]
192 193 try:
193 194 while lines[-1:] != terminate:
194 195 new_line = raw_input(ps1)
195 196 while new_line.endswith('\\'):
196 197 new_line = new_line[:-1] + raw_input(ps2)
197 198 lines.append(new_line)
198 199
199 200 return lines[:-1] # don't return the termination command
200 201 except EOFError:
201 202 print()
202 203 return lines
203 204
204 205
205 206 def raw_input_ext(prompt='', ps2='... '):
206 207 """Similar to raw_input(), but accepts extended lines if input ends with \\."""
207 208
208 209 line = raw_input(prompt)
209 210 while line.endswith('\\'):
210 211 line = line[:-1] + raw_input(ps2)
211 212 return line
212 213
213 214
214 215 def ask_yes_no(prompt,default=None):
215 216 """Asks a question and returns a boolean (y/n) answer.
216 217
217 218 If default is given (one of 'y','n'), it is used if the user input is
218 219 empty. Otherwise the question is repeated until an answer is given.
219 220
220 221 An EOF is treated as the default answer. If there is no default, an
221 222 exception is raised to prevent infinite loops.
222 223
223 224 Valid answers are: y/yes/n/no (match is not case sensitive)."""
224 225
225 226 answers = {'y':True,'n':False,'yes':True,'no':False}
226 227 ans = None
227 228 while ans not in answers.keys():
228 229 try:
229 230 ans = raw_input(prompt+' ').lower()
230 231 if not ans: # response was an empty string
231 232 ans = default
232 233 except KeyboardInterrupt:
233 234 pass
234 235 except EOFError:
235 236 if default in answers.keys():
236 237 ans = default
237 238 print()
238 239 else:
239 240 raise
240 241
241 242 return answers[ans]
242 243
243 244
244 245 class NLprinter:
245 246 """Print an arbitrarily nested list, indicating index numbers.
246 247
247 248 An instance of this class called nlprint is available and callable as a
248 249 function.
249 250
250 251 nlprint(list,indent=' ',sep=': ') -> prints indenting each level by 'indent'
251 252 and using 'sep' to separate the index from the value. """
252 253
253 254 def __init__(self):
254 255 self.depth = 0
255 256
256 257 def __call__(self,lst,pos='',**kw):
257 258 """Prints the nested list numbering levels."""
258 259 kw.setdefault('indent',' ')
259 260 kw.setdefault('sep',': ')
260 261 kw.setdefault('start',0)
261 262 kw.setdefault('stop',len(lst))
262 263 # we need to remove start and stop from kw so they don't propagate
263 264 # into a recursive call for a nested list.
264 265 start = kw['start']; del kw['start']
265 266 stop = kw['stop']; del kw['stop']
266 267 if self.depth == 0 and 'header' in kw.keys():
267 268 print(kw['header'])
268 269
269 270 for idx in range(start,stop):
270 271 elem = lst[idx]
271 272 newpos = pos + str(idx)
272 273 if type(elem)==type([]):
273 274 self.depth += 1
274 275 self.__call__(elem, newpos+",", **kw)
275 276 self.depth -= 1
276 277 else:
277 278 print(kw['indent']*self.depth + newpos + kw["sep"] + repr(elem))
278 279
279 280 nlprint = NLprinter()
280 281
281 282
282 283 def temp_pyfile(src, ext='.py'):
283 284 """Make a temporary python file, return filename and filehandle.
284 285
285 286 Parameters
286 287 ----------
287 288 src : string or list of strings (no need for ending newlines if list)
288 289 Source code to be written to the file.
289 290
290 291 ext : optional, string
291 292 Extension for the generated file.
292 293
293 294 Returns
294 295 -------
295 296 (filename, open filehandle)
296 297 It is the caller's responsibility to close the open file and unlink it.
297 298 """
298 299 fname = tempfile.mkstemp(ext)[1]
299 300 f = open(fname,'w')
300 301 f.write(src)
301 302 f.flush()
302 303 return fname, f
303 304
304 305
305 306 def raw_print(*args, **kw):
306 307 """Raw print to sys.__stdout__, otherwise identical interface to print()."""
307 308
308 309 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
309 310 file=sys.__stdout__)
310 311 sys.__stdout__.flush()
311 312
312 313
313 314 def raw_print_err(*args, **kw):
314 315 """Raw print to sys.__stderr__, otherwise identical interface to print()."""
315 316
316 317 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
317 318 file=sys.__stderr__)
318 319 sys.__stderr__.flush()
319 320
320 321
321 322 # Short aliases for quick debugging, do NOT use these in production code.
322 323 rprint = raw_print
323 324 rprinte = raw_print_err
325
326
327 class CapturedIO(object):
328 """Simple object for containing captured stdout/err StringIO objects"""
329
330 def __init__(self, stdout, stderr):
331 self._stdout = stdout
332 self._stderr = stderr
333
334 def __str__(self):
335 return self.stdout
336
337 @property
338 def stdout(self):
339 if not self._stdout:
340 return ''
341 return self._stdout.getvalue()
342
343 @property
344 def stderr(self):
345 if not self._stderr:
346 return ''
347 return self._stderr.getvalue()
348
349 def show(self):
350 """write my output to sys.stdout/err as appropriate"""
351 sys.stdout.write(self.stdout)
352 sys.stderr.write(self.stderr)
353 sys.stdout.flush()
354 sys.stderr.flush()
355
356 __call__ = show
357
358
359 class capture_output(object):
360 """context manager for capturing stdout/err"""
361 stdout = True
362 stderr = True
363
364 def __init__(self, stdout=True, stderr=True):
365 self.stdout = stdout
366 self.stderr = stderr
367
368 def __enter__(self):
369 self.sys_stdout = sys.stdout
370 self.sys_stderr = sys.stderr
371
372 stdout = stderr = False
373 if self.stdout:
374 stdout = sys.stdout = StringIO()
375 if self.stderr:
376 stderr = sys.stderr = StringIO()
377
378 return CapturedIO(stdout, stderr)
379
380 def __exit__(self, exc_type, exc_value, traceback):
381 sys.stdout = self.sys_stdout
382 sys.stderr = self.sys_stderr
383
384
@@ -1,75 +1,85 b''
1 1 # encoding: utf-8
2 2 """Tests for io.py"""
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2008-2011 The IPython Development Team
6 6 #
7 7 # Distributed under the terms of the BSD License. The full license is in
8 8 # the file COPYING, distributed as part of this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 15 import sys
16 16
17 17 from StringIO import StringIO
18 18 from subprocess import Popen, PIPE
19 19
20 20 import nose.tools as nt
21 21
22 22 from IPython.testing import decorators as dec
23 from IPython.utils.io import Tee
23 from IPython.utils.io import Tee, capture_output
24 24 from IPython.utils.py3compat import doctest_refactor_print
25 25
26 26 #-----------------------------------------------------------------------------
27 27 # Tests
28 28 #-----------------------------------------------------------------------------
29 29
30 30
31 31 def test_tee_simple():
32 32 "Very simple check with stdout only"
33 33 chan = StringIO()
34 34 text = 'Hello'
35 35 tee = Tee(chan, channel='stdout')
36 36 print >> chan, text
37 37 nt.assert_equal(chan.getvalue(), text+"\n")
38 38
39 39
40 40 class TeeTestCase(dec.ParametricTestCase):
41 41
42 42 def tchan(self, channel, check='close'):
43 43 trap = StringIO()
44 44 chan = StringIO()
45 45 text = 'Hello'
46 46
47 47 std_ori = getattr(sys, channel)
48 48 setattr(sys, channel, trap)
49 49
50 50 tee = Tee(chan, channel=channel)
51 51 print >> chan, text,
52 52 setattr(sys, channel, std_ori)
53 53 trap_val = trap.getvalue()
54 54 nt.assert_equals(chan.getvalue(), text)
55 55 if check=='close':
56 56 tee.close()
57 57 else:
58 58 del tee
59 59
60 60 def test(self):
61 61 for chan in ['stdout', 'stderr']:
62 62 for check in ['close', 'del']:
63 63 yield self.tchan(chan, check)
64 64
65 65 def test_io_init():
66 66 """Test that io.stdin/out/err exist at startup"""
67 67 for name in ('stdin', 'stdout', 'stderr'):
68 68 cmd = doctest_refactor_print("from IPython.utils import io;print io.%s.__class__"%name)
69 69 p = Popen([sys.executable, '-c', cmd],
70 70 stdout=PIPE)
71 71 p.wait()
72 72 classname = p.stdout.read().strip().decode('ascii')
73 73 # __class__ is a reference to the class object in Python 3, so we can't
74 74 # just test for string equality.
75 75 assert 'IPython.utils.io.IOStream' in classname, classname
76
77 def test_capture_output():
78 """capture_output() context works"""
79
80 with capture_output() as io:
81 print 'hi, stdout'
82 print >> sys.stderr, 'hi, stderr'
83
84 nt.assert_equals(io.stdout, 'hi, stdout\n')
85 nt.assert_equals(io.stderr, 'hi, stderr\n')
General Comments 0
You need to be logged in to leave comments. Login now