##// END OF EJS Templates
remove sys_version for Python 3...
Paul Ivanov -
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,631 +1,630 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Pdb debugger class.
4 4
5 5 Modified from the standard pdb.Pdb class to avoid including readline, so that
6 6 the command line completion of other programs which include this isn't
7 7 damaged.
8 8
9 9 In the future, this class will be expanded with improvements over the standard
10 10 pdb.
11 11
12 12 The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
13 13 changes. Licensing should therefore be under the standard Python terms. For
14 14 details on the PSF (Python Software Foundation) standard license, see:
15 15
16 16 http://www.python.org/2.2.3/license.html"""
17 17
18 18 #*****************************************************************************
19 19 #
20 20 # This file is licensed under the PSF license.
21 21 #
22 22 # Copyright (C) 2001 Python Software Foundation, www.python.org
23 23 # Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
24 24 #
25 25 #
26 26 #*****************************************************************************
27 27 from __future__ import print_function
28 28
29 29 import bdb
30 30 import functools
31 31 import inspect
32 32 import sys
33 33 import warnings
34 34
35 35 from IPython import get_ipython
36 36 from IPython.utils import PyColorize, ulinecache
37 37 from IPython.utils import coloransi, py3compat
38 38 from IPython.core.excolors import exception_colors
39 39 from IPython.testing.skipdoctest import skip_doctest
40 40
41 41
42 42 prompt = 'ipdb> '
43 43
44 44 #We have to check this directly from sys.argv, config struct not yet available
45 45 from pdb import Pdb as OldPdb
46 46
47 47 # Allow the set_trace code to operate outside of an ipython instance, even if
48 48 # it does so with some limitations. The rest of this support is implemented in
49 49 # the Tracer constructor.
50 50
51 51 def make_arrow(pad):
52 52 """generate the leading arrow in front of traceback or debugger"""
53 53 if pad >= 2:
54 54 return '-'*(pad-2) + '> '
55 55 elif pad == 1:
56 56 return '>'
57 57 return ''
58 58
59 59
60 60 def BdbQuit_excepthook(et, ev, tb, excepthook=None):
61 61 """Exception hook which handles `BdbQuit` exceptions.
62 62
63 63 All other exceptions are processed using the `excepthook`
64 64 parameter.
65 65 """
66 66 warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
67 67 DeprecationWarning)
68 68 if et==bdb.BdbQuit:
69 69 print('Exiting Debugger.')
70 70 elif excepthook is not None:
71 71 excepthook(et, ev, tb)
72 72 else:
73 73 # Backwards compatibility. Raise deprecation warning?
74 74 BdbQuit_excepthook.excepthook_ori(et,ev,tb)
75 75
76 76
77 77 def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
78 78 warnings.warn(
79 79 "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
80 80 DeprecationWarning)
81 81 print('Exiting Debugger.')
82 82
83 83
84 84 class Tracer(object):
85 85 """
86 86 DEPRECATED
87 87
88 88 Class for local debugging, similar to pdb.set_trace.
89 89
90 90 Instances of this class, when called, behave like pdb.set_trace, but
91 91 providing IPython's enhanced capabilities.
92 92
93 93 This is implemented as a class which must be initialized in your own code
94 94 and not as a standalone function because we need to detect at runtime
95 95 whether IPython is already active or not. That detection is done in the
96 96 constructor, ensuring that this code plays nicely with a running IPython,
97 97 while functioning acceptably (though with limitations) if outside of it.
98 98 """
99 99
100 100 @skip_doctest
101 101 def __init__(self, colors=None):
102 102 """
103 103 DEPRECATED
104 104
105 105 Create a local debugger instance.
106 106
107 107 Parameters
108 108 ----------
109 109
110 110 colors : str, optional
111 111 The name of the color scheme to use, it must be one of IPython's
112 112 valid color schemes. If not given, the function will default to
113 113 the current IPython scheme when running inside IPython, and to
114 114 'NoColor' otherwise.
115 115
116 116 Examples
117 117 --------
118 118 ::
119 119
120 120 from IPython.core.debugger import Tracer; debug_here = Tracer()
121 121
122 122 Later in your code::
123 123
124 124 debug_here() # -> will open up the debugger at that point.
125 125
126 126 Once the debugger activates, you can use all of its regular commands to
127 127 step through code, set breakpoints, etc. See the pdb documentation
128 128 from the Python standard library for usage details.
129 129 """
130 130 warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
131 131 "`IPython.core.debugger.Pdb.set_trace()`",
132 132 DeprecationWarning)
133 133
134 134 ip = get_ipython()
135 135 if ip is None:
136 136 # Outside of ipython, we set our own exception hook manually
137 137 sys.excepthook = functools.partial(BdbQuit_excepthook,
138 138 excepthook=sys.excepthook)
139 139 def_colors = 'NoColor'
140 140 else:
141 141 # In ipython, we use its custom exception handler mechanism
142 142 def_colors = ip.colors
143 143 ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
144 144
145 145 if colors is None:
146 146 colors = def_colors
147 147
148 148 # The stdlib debugger internally uses a modified repr from the `repr`
149 149 # module, that limits the length of printed strings to a hardcoded
150 150 # limit of 30 characters. That much trimming is too aggressive, let's
151 151 # at least raise that limit to 80 chars, which should be enough for
152 152 # most interactive uses.
153 153 try:
154 154 try:
155 155 from reprlib import aRepr # Py 3
156 156 except ImportError:
157 157 from repr import aRepr # Py 2
158 158 aRepr.maxstring = 80
159 159 except:
160 160 # This is only a user-facing convenience, so any error we encounter
161 161 # here can be warned about but can be otherwise ignored. These
162 162 # printouts will tell us about problems if this API changes
163 163 import traceback
164 164 traceback.print_exc()
165 165
166 166 self.debugger = Pdb(colors)
167 167
168 168 def __call__(self):
169 169 """Starts an interactive debugger at the point where called.
170 170
171 171 This is similar to the pdb.set_trace() function from the std lib, but
172 172 using IPython's enhanced debugger."""
173 173
174 174 self.debugger.set_trace(sys._getframe().f_back)
175 175
176 176
177 177 def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
178 178 """Make new_fn have old_fn's doc string. This is particularly useful
179 179 for the ``do_...`` commands that hook into the help system.
180 180 Adapted from from a comp.lang.python posting
181 181 by Duncan Booth."""
182 182 def wrapper(*args, **kw):
183 183 return new_fn(*args, **kw)
184 184 if old_fn.__doc__:
185 185 wrapper.__doc__ = old_fn.__doc__ + additional_text
186 186 return wrapper
187 187
188 188
189 189 def _file_lines(fname):
190 190 """Return the contents of a named file as a list of lines.
191 191
192 192 This function never raises an IOError exception: if the file can't be
193 193 read, it simply returns an empty list."""
194 194
195 195 try:
196 196 outfile = open(fname)
197 197 except IOError:
198 198 return []
199 199 else:
200 200 out = outfile.readlines()
201 201 outfile.close()
202 202 return out
203 203
204 204
205 205 class Pdb(OldPdb, object):
206 206 """Modified Pdb class, does not load readline.
207 207
208 208 for a standalone version that uses prompt_toolkit, see
209 209 `IPython.terminal.debugger.TerminalPdb` and
210 210 `IPython.terminal.debugger.set_trace()`
211 211 """
212 212
213 213 def __init__(self, color_scheme=None, completekey=None,
214 214 stdin=None, stdout=None, context=5):
215 215
216 216 # Parent constructor:
217 217 try:
218 218 self.context = int(context)
219 219 if self.context <= 0:
220 220 raise ValueError("Context must be a positive integer")
221 221 except (TypeError, ValueError):
222 222 raise ValueError("Context must be a positive integer")
223 223
224 224 OldPdb.__init__(self, completekey, stdin, stdout)
225 225
226 226 # IPython changes...
227 227 self.shell = get_ipython()
228 228
229 229 if self.shell is None:
230 230 save_main = sys.modules['__main__']
231 231 # No IPython instance running, we must create one
232 232 from IPython.terminal.interactiveshell import \
233 233 TerminalInteractiveShell
234 234 self.shell = TerminalInteractiveShell.instance()
235 235 # needed by any code which calls __import__("__main__") after
236 236 # the debugger was entered. See also #9941.
237 237 sys.modules['__main__'] = save_main
238 238
239 239 if color_scheme is not None:
240 240 warnings.warn(
241 241 "The `color_scheme` argument is deprecated since version 5.1",
242 242 DeprecationWarning, stacklevel=2)
243 243 else:
244 244 color_scheme = self.shell.colors
245 245
246 246 self.aliases = {}
247 247
248 248 # Create color table: we copy the default one from the traceback
249 249 # module and add a few attributes needed for debugging
250 250 self.color_scheme_table = exception_colors()
251 251
252 252 # shorthands
253 253 C = coloransi.TermColors
254 254 cst = self.color_scheme_table
255 255
256 256 cst['NoColor'].colors.prompt = C.NoColor
257 257 cst['NoColor'].colors.breakpoint_enabled = C.NoColor
258 258 cst['NoColor'].colors.breakpoint_disabled = C.NoColor
259 259
260 260 cst['Linux'].colors.prompt = C.Green
261 261 cst['Linux'].colors.breakpoint_enabled = C.LightRed
262 262 cst['Linux'].colors.breakpoint_disabled = C.Red
263 263
264 264 cst['LightBG'].colors.prompt = C.Blue
265 265 cst['LightBG'].colors.breakpoint_enabled = C.LightRed
266 266 cst['LightBG'].colors.breakpoint_disabled = C.Red
267 267
268 268 cst['Neutral'].colors.prompt = C.Blue
269 269 cst['Neutral'].colors.breakpoint_enabled = C.LightRed
270 270 cst['Neutral'].colors.breakpoint_disabled = C.Red
271 271
272 272
273 273 # Add a python parser so we can syntax highlight source while
274 274 # debugging.
275 275 self.parser = PyColorize.Parser(style=color_scheme)
276 276 self.set_colors(color_scheme)
277 277
278 278 # Set the prompt - the default prompt is '(Pdb)'
279 279 self.prompt = prompt
280 280
281 281 def set_colors(self, scheme):
282 282 """Shorthand access to the color table scheme selector method."""
283 283 self.color_scheme_table.set_active_scheme(scheme)
284 284 self.parser.style = scheme
285 285
286 286 def interaction(self, frame, traceback):
287 287 try:
288 288 OldPdb.interaction(self, frame, traceback)
289 289 except KeyboardInterrupt:
290 290 sys.stdout.write('\n' + self.shell.get_exception_only())
291 291
292 292 def parseline(self, line):
293 293 if line.startswith("!!"):
294 294 # Force standard behavior.
295 295 return super(Pdb, self).parseline(line[2:])
296 296 # "Smart command mode" from pdb++: don't execute commands if a variable
297 297 # with the same name exists.
298 298 cmd, arg, newline = super(Pdb, self).parseline(line)
299 299 # Fix for #9611: Do not trigger smart command if the command is `exit`
300 300 # or `quit` and it would resolve to their *global* value (the
301 301 # `ExitAutocall` object). Just checking that it is not present in the
302 302 # locals dict is not enough as locals and globals match at the
303 303 # toplevel.
304 304 if ((cmd in self.curframe.f_locals or cmd in self.curframe.f_globals)
305 305 and not (cmd in ["exit", "quit"]
306 306 and (self.curframe.f_locals is self.curframe.f_globals
307 307 or cmd not in self.curframe.f_locals))):
308 308 return super(Pdb, self).parseline("!" + line)
309 309 return super(Pdb, self).parseline(line)
310 310
311 311 def new_do_up(self, arg):
312 312 OldPdb.do_up(self, arg)
313 313 do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
314 314
315 315 def new_do_down(self, arg):
316 316 OldPdb.do_down(self, arg)
317 317
318 318 do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
319 319
320 320 def new_do_frame(self, arg):
321 321 OldPdb.do_frame(self, arg)
322 322
323 323 def new_do_quit(self, arg):
324 324
325 325 if hasattr(self, 'old_all_completions'):
326 326 self.shell.Completer.all_completions=self.old_all_completions
327 327
328 328 return OldPdb.do_quit(self, arg)
329 329
330 330 do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
331 331
332 332 def new_do_restart(self, arg):
333 333 """Restart command. In the context of ipython this is exactly the same
334 334 thing as 'quit'."""
335 335 self.msg("Restart doesn't make sense here. Using 'quit' instead.")
336 336 return self.do_quit(arg)
337 337
338 338 def print_stack_trace(self, context=None):
339 339 if context is None:
340 340 context = self.context
341 341 try:
342 342 context=int(context)
343 343 if context <= 0:
344 344 raise ValueError("Context must be a positive integer")
345 345 except (TypeError, ValueError):
346 346 raise ValueError("Context must be a positive integer")
347 347 try:
348 348 for frame_lineno in self.stack:
349 349 self.print_stack_entry(frame_lineno, context=context)
350 350 except KeyboardInterrupt:
351 351 pass
352 352
353 353 def print_stack_entry(self,frame_lineno, prompt_prefix='\n-> ',
354 354 context=None):
355 355 if context is None:
356 356 context = self.context
357 357 try:
358 358 context=int(context)
359 359 if context <= 0:
360 360 raise ValueError("Context must be a positive integer")
361 361 except (TypeError, ValueError):
362 362 raise ValueError("Context must be a positive integer")
363 363 print(self.format_stack_entry(frame_lineno, '', context))
364 364
365 365 # vds: >>
366 366 frame, lineno = frame_lineno
367 367 filename = frame.f_code.co_filename
368 368 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
369 369 # vds: <<
370 370
371 371 def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
372 372 if context is None:
373 373 context = self.context
374 374 try:
375 375 context=int(context)
376 376 if context <= 0:
377 377 print("Context must be a positive integer")
378 378 except (TypeError, ValueError):
379 379 print("Context must be a positive integer")
380 380 try:
381 381 import reprlib # Py 3
382 382 except ImportError:
383 383 import repr as reprlib # Py 2
384 384
385 385 ret = []
386 386
387 387 Colors = self.color_scheme_table.active_colors
388 388 ColorsNormal = Colors.Normal
389 389 tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
390 390 tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
391 391 tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
392 392 tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
393 393 ColorsNormal)
394 394
395 395 frame, lineno = frame_lineno
396 396
397 397 return_value = ''
398 398 if '__return__' in frame.f_locals:
399 399 rv = frame.f_locals['__return__']
400 400 #return_value += '->'
401 401 return_value += reprlib.repr(rv) + '\n'
402 402 ret.append(return_value)
403 403
404 404 #s = filename + '(' + `lineno` + ')'
405 405 filename = self.canonic(frame.f_code.co_filename)
406 406 link = tpl_link % py3compat.cast_unicode(filename)
407 407
408 408 if frame.f_code.co_name:
409 409 func = frame.f_code.co_name
410 410 else:
411 411 func = "<lambda>"
412 412
413 413 call = ''
414 414 if func != '?':
415 415 if '__args__' in frame.f_locals:
416 416 args = reprlib.repr(frame.f_locals['__args__'])
417 417 else:
418 418 args = '()'
419 419 call = tpl_call % (func, args)
420 420
421 421 # The level info should be generated in the same format pdb uses, to
422 422 # avoid breaking the pdbtrack functionality of python-mode in *emacs.
423 423 if frame is self.curframe:
424 424 ret.append('> ')
425 425 else:
426 426 ret.append(' ')
427 427 ret.append(u'%s(%s)%s\n' % (link,lineno,call))
428 428
429 429 start = lineno - 1 - context//2
430 430 lines = ulinecache.getlines(filename)
431 431 start = min(start, len(lines) - context)
432 432 start = max(start, 0)
433 433 lines = lines[start : start + context]
434 434
435 435 for i,line in enumerate(lines):
436 436 show_arrow = (start + 1 + i == lineno)
437 437 linetpl = (frame is self.curframe or show_arrow) \
438 438 and tpl_line_em \
439 439 or tpl_line
440 440 ret.append(self.__format_line(linetpl, filename,
441 441 start + 1 + i, line,
442 442 arrow = show_arrow) )
443 443 return ''.join(ret)
444 444
445 445 def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
446 446 bp_mark = ""
447 447 bp_mark_color = ""
448 448
449 449 new_line, err = self.parser.format2(line, 'str')
450 450 if not err:
451 451 line = new_line
452 452
453 453 bp = None
454 454 if lineno in self.get_file_breaks(filename):
455 455 bps = self.get_breaks(filename, lineno)
456 456 bp = bps[-1]
457 457
458 458 if bp:
459 459 Colors = self.color_scheme_table.active_colors
460 460 bp_mark = str(bp.number)
461 461 bp_mark_color = Colors.breakpoint_enabled
462 462 if not bp.enabled:
463 463 bp_mark_color = Colors.breakpoint_disabled
464 464
465 465 numbers_width = 7
466 466 if arrow:
467 467 # This is the line with the error
468 468 pad = numbers_width - len(str(lineno)) - len(bp_mark)
469 469 num = '%s%s' % (make_arrow(pad), str(lineno))
470 470 else:
471 471 num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
472 472
473 473 return tpl_line % (bp_mark_color + bp_mark, num, line)
474 474
475 475
476 476 def print_list_lines(self, filename, first, last):
477 477 """The printing (as opposed to the parsing part of a 'list'
478 478 command."""
479 479 try:
480 480 Colors = self.color_scheme_table.active_colors
481 481 ColorsNormal = Colors.Normal
482 482 tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
483 483 tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
484 484 src = []
485 485 if filename == "<string>" and hasattr(self, "_exec_filename"):
486 486 filename = self._exec_filename
487 487
488 488 for lineno in range(first, last+1):
489 489 line = ulinecache.getline(filename, lineno)
490 490 if not line:
491 491 break
492 492
493 493 if lineno == self.curframe.f_lineno:
494 494 line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
495 495 else:
496 496 line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
497 497
498 498 src.append(line)
499 499 self.lineno = lineno
500 500
501 501 print(''.join(src))
502 502
503 503 except KeyboardInterrupt:
504 504 pass
505 505
506 506 def do_list(self, arg):
507 507 self.lastcmd = 'list'
508 508 last = None
509 509 if arg:
510 510 try:
511 511 x = eval(arg, {}, {})
512 512 if type(x) == type(()):
513 513 first, last = x
514 514 first = int(first)
515 515 last = int(last)
516 516 if last < first:
517 517 # Assume it's a count
518 518 last = first + last
519 519 else:
520 520 first = max(1, int(x) - 5)
521 521 except:
522 522 print('*** Error in argument:', repr(arg))
523 523 return
524 524 elif self.lineno is None:
525 525 first = max(1, self.curframe.f_lineno - 5)
526 526 else:
527 527 first = self.lineno + 1
528 528 if last is None:
529 529 last = first + 10
530 530 self.print_list_lines(self.curframe.f_code.co_filename, first, last)
531 531
532 532 # vds: >>
533 533 lineno = first
534 534 filename = self.curframe.f_code.co_filename
535 535 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
536 536 # vds: <<
537 537
538 538 do_l = do_list
539 539
540 540 def getsourcelines(self, obj):
541 541 lines, lineno = inspect.findsource(obj)
542 542 if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
543 543 # must be a module frame: do not try to cut a block out of it
544 544 return lines, 1
545 545 elif inspect.ismodule(obj):
546 546 return lines, 1
547 547 return inspect.getblock(lines[lineno:]), lineno+1
548 548
549 549 def do_longlist(self, arg):
550 550 self.lastcmd = 'longlist'
551 551 try:
552 552 lines, lineno = self.getsourcelines(self.curframe)
553 553 except OSError as err:
554 554 self.error(err)
555 555 return
556 556 last = lineno + len(lines)
557 557 self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
558 558 do_ll = do_longlist
559 559
560 560 def do_pdef(self, arg):
561 561 """Print the call signature for any callable object.
562 562
563 563 The debugger interface to %pdef"""
564 564 namespaces = [('Locals', self.curframe.f_locals),
565 565 ('Globals', self.curframe.f_globals)]
566 566 self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
567 567
568 568 def do_pdoc(self, arg):
569 569 """Print the docstring for an object.
570 570
571 571 The debugger interface to %pdoc."""
572 572 namespaces = [('Locals', self.curframe.f_locals),
573 573 ('Globals', self.curframe.f_globals)]
574 574 self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
575 575
576 576 def do_pfile(self, arg):
577 577 """Print (or run through pager) the file where an object is defined.
578 578
579 579 The debugger interface to %pfile.
580 580 """
581 581 namespaces = [('Locals', self.curframe.f_locals),
582 582 ('Globals', self.curframe.f_globals)]
583 583 self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
584 584
585 585 def do_pinfo(self, arg):
586 586 """Provide detailed information about an object.
587 587
588 588 The debugger interface to %pinfo, i.e., obj?."""
589 589 namespaces = [('Locals', self.curframe.f_locals),
590 590 ('Globals', self.curframe.f_globals)]
591 591 self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
592 592
593 593 def do_pinfo2(self, arg):
594 594 """Provide extra detailed information about an object.
595 595
596 596 The debugger interface to %pinfo2, i.e., obj??."""
597 597 namespaces = [('Locals', self.curframe.f_locals),
598 598 ('Globals', self.curframe.f_globals)]
599 599 self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
600 600
601 601 def do_psource(self, arg):
602 602 """Print (or run through pager) the source code for an object."""
603 603 namespaces = [('Locals', self.curframe.f_locals),
604 604 ('Globals', self.curframe.f_globals)]
605 605 self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
606 606
607 if sys.version_info > (3, ):
608 def do_where(self, arg):
609 """w(here)
610 Print a stack trace, with the most recent frame at the bottom.
611 An arrow indicates the "current frame", which determines the
612 context of most commands. 'bt' is an alias for this command.
613
614 Take a number as argument as an (optional) number of context line to
615 print"""
616 if arg:
617 context = int(arg)
618 self.print_stack_trace(context)
619 else:
620 self.print_stack_trace()
607 def do_where(self, arg):
608 """w(here)
609 Print a stack trace, with the most recent frame at the bottom.
610 An arrow indicates the "current frame", which determines the
611 context of most commands. 'bt' is an alias for this command.
612
613 Take a number as argument as an (optional) number of context line to
614 print"""
615 if arg:
616 context = int(arg)
617 self.print_stack_trace(context)
618 else:
619 self.print_stack_trace()
621 620
622 do_w = do_where
621 do_w = do_where
623 622
624 623
625 624 def set_trace(frame=None):
626 625 """
627 626 Start debugging from `frame`.
628 627
629 628 If frame is not specified, debugging starts from caller's frame.
630 629 """
631 630 Pdb().set_trace(frame or sys._getframe().f_back)
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,806 +1,799 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import unittest
10 10
11 11 from contextlib import contextmanager
12 12
13 13 import nose.tools as nt
14 14
15 15 from traitlets.config.loader import Config
16 16 from IPython import get_ipython
17 17 from IPython.core import completer
18 18 from IPython.external.decorators import knownfailureif
19 19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 20 from IPython.utils.generics import complete_object
21 21 from IPython.utils.py3compat import string_types, unicode_type
22 22 from IPython.testing import decorators as dec
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Test functions
26 26 #-----------------------------------------------------------------------------
27 27
28 28 @contextmanager
29 29 def greedy_completion():
30 30 ip = get_ipython()
31 31 greedy_original = ip.Completer.greedy
32 32 try:
33 33 ip.Completer.greedy = True
34 34 yield
35 35 finally:
36 36 ip.Completer.greedy = greedy_original
37 37
38 38 def test_protect_filename():
39 39 if sys.platform == 'win32':
40 40 pairs = [('abc','abc'),
41 41 (' abc','" abc"'),
42 42 ('a bc','"a bc"'),
43 43 ('a bc','"a bc"'),
44 44 (' bc','" bc"'),
45 45 ]
46 46 else:
47 47 pairs = [('abc','abc'),
48 48 (' abc',r'\ abc'),
49 49 ('a bc',r'a\ bc'),
50 50 ('a bc',r'a\ \ bc'),
51 51 (' bc',r'\ \ bc'),
52 52 # On posix, we also protect parens and other special characters.
53 53 ('a(bc',r'a\(bc'),
54 54 ('a)bc',r'a\)bc'),
55 55 ('a( )bc',r'a\(\ \)bc'),
56 56 ('a[1]bc', r'a\[1\]bc'),
57 57 ('a{1}bc', r'a\{1\}bc'),
58 58 ('a#bc', r'a\#bc'),
59 59 ('a?bc', r'a\?bc'),
60 60 ('a=bc', r'a\=bc'),
61 61 ('a\\bc', r'a\\bc'),
62 62 ('a|bc', r'a\|bc'),
63 63 ('a;bc', r'a\;bc'),
64 64 ('a:bc', r'a\:bc'),
65 65 ("a'bc", r"a\'bc"),
66 66 ('a*bc', r'a\*bc'),
67 67 ('a"bc', r'a\"bc'),
68 68 ('a^bc', r'a\^bc'),
69 69 ('a&bc', r'a\&bc'),
70 70 ]
71 71 # run the actual tests
72 72 for s1, s2 in pairs:
73 73 s1p = completer.protect_filename(s1)
74 74 nt.assert_equal(s1p, s2)
75 75
76 76
77 77 def check_line_split(splitter, test_specs):
78 78 for part1, part2, split in test_specs:
79 79 cursor_pos = len(part1)
80 80 line = part1+part2
81 81 out = splitter.split_line(line, cursor_pos)
82 82 nt.assert_equal(out, split)
83 83
84 84
85 85 def test_line_split():
86 86 """Basic line splitter test with default specs."""
87 87 sp = completer.CompletionSplitter()
88 88 # The format of the test specs is: part1, part2, expected answer. Parts 1
89 89 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
90 90 # was at the end of part1. So an empty part2 represents someone hitting
91 91 # tab at the end of the line, the most common case.
92 92 t = [('run some/scrip', '', 'some/scrip'),
93 93 ('run scripts/er', 'ror.py foo', 'scripts/er'),
94 94 ('echo $HOM', '', 'HOM'),
95 95 ('print sys.pa', '', 'sys.pa'),
96 96 ('print(sys.pa', '', 'sys.pa'),
97 97 ("execfile('scripts/er", '', 'scripts/er'),
98 98 ('a[x.', '', 'x.'),
99 99 ('a[x.', 'y', 'x.'),
100 100 ('cd "some_file/', '', 'some_file/'),
101 101 ]
102 102 check_line_split(sp, t)
103 103 # Ensure splitting works OK with unicode by re-running the tests with
104 104 # all inputs turned into unicode
105 105 check_line_split(sp, [ map(unicode_type, p) for p in t] )
106 106
107 107
108 108 def test_custom_completion_error():
109 109 """Test that errors from custom attribute completers are silenced."""
110 110 ip = get_ipython()
111 111 class A(object): pass
112 112 ip.user_ns['a'] = A()
113 113
114 114 @complete_object.when_type(A)
115 115 def complete_A(a, existing_completions):
116 116 raise TypeError("this should be silenced")
117 117
118 118 ip.complete("a.")
119 119
120 120
121 121 def test_unicode_completions():
122 122 ip = get_ipython()
123 123 # Some strings that trigger different types of completion. Check them both
124 124 # in str and unicode forms
125 125 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
126 126 for t in s + list(map(unicode_type, s)):
127 127 # We don't need to check exact completion values (they may change
128 128 # depending on the state of the namespace, but at least no exceptions
129 129 # should be thrown and the return value should be a pair of text, list
130 130 # values.
131 131 text, matches = ip.complete(t)
132 132 nt.assert_true(isinstance(text, string_types))
133 133 nt.assert_true(isinstance(matches, list))
134 134
135 135 def test_latex_completions():
136 136 from IPython.core.latex_symbols import latex_symbols
137 137 import random
138 138 ip = get_ipython()
139 139 # Test some random unicode symbols
140 140 keys = random.sample(latex_symbols.keys(), 10)
141 141 for k in keys:
142 142 text, matches = ip.complete(k)
143 143 nt.assert_equal(len(matches),1)
144 144 nt.assert_equal(text, k)
145 145 nt.assert_equal(matches[0], latex_symbols[k])
146 146 # Test a more complex line
147 147 text, matches = ip.complete(u'print(\\alpha')
148 148 nt.assert_equals(text, u'\\alpha')
149 149 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
150 150 # Test multiple matching latex symbols
151 151 text, matches = ip.complete(u'\\al')
152 152 nt.assert_in('\\alpha', matches)
153 153 nt.assert_in('\\aleph', matches)
154 154
155 155
156 156
157 157
158 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
159 158 def test_back_latex_completion():
160 159 ip = get_ipython()
161 160
162 161 # do not return more than 1 matches fro \beta, only the latex one.
163 162 name, matches = ip.complete('\\β')
164 163 nt.assert_equal(len(matches), 1)
165 164 nt.assert_equal(matches[0], '\\beta')
166 165
167 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
168 166 def test_back_unicode_completion():
169 167 ip = get_ipython()
170 168
171 169 name, matches = ip.complete('\\Ⅴ')
172 170 nt.assert_equal(len(matches), 1)
173 171 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
174 172
175 173
176 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
177 174 def test_forward_unicode_completion():
178 175 ip = get_ipython()
179 176
180 177 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
181 178 nt.assert_equal(len(matches), 1)
182 179 nt.assert_equal(matches[0], 'Ⅴ')
183 180
184 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
185 181 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
186 182 def test_no_ascii_back_completion():
187 183 ip = get_ipython()
188 184 with TemporaryWorkingDirectory(): # Avoid any filename completions
189 185 # single ascii letter that don't have yet completions
190 186 for letter in 'jJ' :
191 187 name, matches = ip.complete('\\'+letter)
192 188 nt.assert_equal(matches, [])
193 189
194 190
195 191
196 192
197 193 class CompletionSplitterTestCase(unittest.TestCase):
198 194 def setUp(self):
199 195 self.sp = completer.CompletionSplitter()
200 196
201 197 def test_delim_setting(self):
202 198 self.sp.delims = ' '
203 199 nt.assert_equal(self.sp.delims, ' ')
204 200 nt.assert_equal(self.sp._delim_expr, '[\ ]')
205 201
206 202 def test_spaces(self):
207 203 """Test with only spaces as split chars."""
208 204 self.sp.delims = ' '
209 205 t = [('foo', '', 'foo'),
210 206 ('run foo', '', 'foo'),
211 207 ('run foo', 'bar', 'foo'),
212 208 ]
213 209 check_line_split(self.sp, t)
214 210
215 211
216 212 def test_has_open_quotes1():
217 213 for s in ["'", "'''", "'hi' '"]:
218 214 nt.assert_equal(completer.has_open_quotes(s), "'")
219 215
220 216
221 217 def test_has_open_quotes2():
222 218 for s in ['"', '"""', '"hi" "']:
223 219 nt.assert_equal(completer.has_open_quotes(s), '"')
224 220
225 221
226 222 def test_has_open_quotes3():
227 223 for s in ["''", "''' '''", "'hi' 'ipython'"]:
228 224 nt.assert_false(completer.has_open_quotes(s))
229 225
230 226
231 227 def test_has_open_quotes4():
232 228 for s in ['""', '""" """', '"hi" "ipython"']:
233 229 nt.assert_false(completer.has_open_quotes(s))
234 230
235 231
236 232 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
237 233 def test_abspath_file_completions():
238 234 ip = get_ipython()
239 235 with TemporaryDirectory() as tmpdir:
240 236 prefix = os.path.join(tmpdir, 'foo')
241 237 suffixes = ['1', '2']
242 238 names = [prefix+s for s in suffixes]
243 239 for n in names:
244 240 open(n, 'w').close()
245 241
246 242 # Check simple completion
247 243 c = ip.complete(prefix)[1]
248 244 nt.assert_equal(c, names)
249 245
250 246 # Now check with a function call
251 247 cmd = 'a = f("%s' % prefix
252 248 c = ip.complete(prefix, cmd)[1]
253 249 comp = [prefix+s for s in suffixes]
254 250 nt.assert_equal(c, comp)
255 251
256 252
257 253 def test_local_file_completions():
258 254 ip = get_ipython()
259 255 with TemporaryWorkingDirectory():
260 256 prefix = './foo'
261 257 suffixes = ['1', '2']
262 258 names = [prefix+s for s in suffixes]
263 259 for n in names:
264 260 open(n, 'w').close()
265 261
266 262 # Check simple completion
267 263 c = ip.complete(prefix)[1]
268 264 nt.assert_equal(c, names)
269 265
270 266 # Now check with a function call
271 267 cmd = 'a = f("%s' % prefix
272 268 c = ip.complete(prefix, cmd)[1]
273 269 comp = set(prefix+s for s in suffixes)
274 270 nt.assert_true(comp.issubset(set(c)))
275 271
276 272
277 273 def test_greedy_completions():
278 274 ip = get_ipython()
279 275 ip.ex('a=list(range(5))')
280 276 _,c = ip.complete('.',line='a[0].')
281 277 nt.assert_false('.real' in c,
282 278 "Shouldn't have completed on a[0]: %s"%c)
283 279 with greedy_completion():
284 280 def _(line, cursor_pos, expect, message):
285 281 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
286 282 nt.assert_in(expect, c, message%c)
287 283
288 284 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s"
289 285 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s"
290 286
291 287 if sys.version_info > (3,4):
292 288 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s"
293 289
294 290
295 291
296 292 def test_omit__names():
297 293 # also happens to test IPCompleter as a configurable
298 294 ip = get_ipython()
299 295 ip._hidden_attr = 1
300 296 ip._x = {}
301 297 c = ip.Completer
302 298 ip.ex('ip=get_ipython()')
303 299 cfg = Config()
304 300 cfg.IPCompleter.omit__names = 0
305 301 c.update_config(cfg)
306 302 s,matches = c.complete('ip.')
307 303 nt.assert_in('ip.__str__', matches)
308 304 nt.assert_in('ip._hidden_attr', matches)
309 305 cfg = Config()
310 306 cfg.IPCompleter.omit__names = 1
311 307 c.update_config(cfg)
312 308 s,matches = c.complete('ip.')
313 309 nt.assert_not_in('ip.__str__', matches)
314 310 nt.assert_in('ip._hidden_attr', matches)
315 311 cfg = Config()
316 312 cfg.IPCompleter.omit__names = 2
317 313 c.update_config(cfg)
318 314 s,matches = c.complete('ip.')
319 315 nt.assert_not_in('ip.__str__', matches)
320 316 nt.assert_not_in('ip._hidden_attr', matches)
321 317 s,matches = c.complete('ip._x.')
322 318 nt.assert_in('ip._x.keys', matches)
323 319 del ip._hidden_attr
324 320
325 321
326 322 def test_limit_to__all__False_ok():
327 323 ip = get_ipython()
328 324 c = ip.Completer
329 325 ip.ex('class D: x=24')
330 326 ip.ex('d=D()')
331 327 cfg = Config()
332 328 cfg.IPCompleter.limit_to__all__ = False
333 329 c.update_config(cfg)
334 330 s, matches = c.complete('d.')
335 331 nt.assert_in('d.x', matches)
336 332
337 333
338 334 def test_get__all__entries_ok():
339 335 class A(object):
340 336 __all__ = ['x', 1]
341 337 words = completer.get__all__entries(A())
342 338 nt.assert_equal(words, ['x'])
343 339
344 340
345 341 def test_get__all__entries_no__all__ok():
346 342 class A(object):
347 343 pass
348 344 words = completer.get__all__entries(A())
349 345 nt.assert_equal(words, [])
350 346
351 347
352 348 def test_func_kw_completions():
353 349 ip = get_ipython()
354 350 c = ip.Completer
355 351 ip.ex('def myfunc(a=1,b=2): return a+b')
356 352 s, matches = c.complete(None, 'myfunc(1,b')
357 353 nt.assert_in('b=', matches)
358 354 # Simulate completing with cursor right after b (pos==10):
359 355 s, matches = c.complete(None, 'myfunc(1,b)', 10)
360 356 nt.assert_in('b=', matches)
361 357 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
362 358 nt.assert_in('b=', matches)
363 359 #builtin function
364 360 s, matches = c.complete(None, 'min(k, k')
365 361 nt.assert_in('key=', matches)
366 362
367 363
368 364 def test_default_arguments_from_docstring():
369 365 ip = get_ipython()
370 366 c = ip.Completer
371 367 kwd = c._default_arguments_from_docstring(
372 368 'min(iterable[, key=func]) -> value')
373 369 nt.assert_equal(kwd, ['key'])
374 370 #with cython type etc
375 371 kwd = c._default_arguments_from_docstring(
376 372 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
377 373 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
378 374 #white spaces
379 375 kwd = c._default_arguments_from_docstring(
380 376 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
381 377 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
382 378
383 379 def test_line_magics():
384 380 ip = get_ipython()
385 381 c = ip.Completer
386 382 s, matches = c.complete(None, 'lsmag')
387 383 nt.assert_in('%lsmagic', matches)
388 384 s, matches = c.complete(None, '%lsmag')
389 385 nt.assert_in('%lsmagic', matches)
390 386
391 387
392 388 def test_cell_magics():
393 389 from IPython.core.magic import register_cell_magic
394 390
395 391 @register_cell_magic
396 392 def _foo_cellm(line, cell):
397 393 pass
398 394
399 395 ip = get_ipython()
400 396 c = ip.Completer
401 397
402 398 s, matches = c.complete(None, '_foo_ce')
403 399 nt.assert_in('%%_foo_cellm', matches)
404 400 s, matches = c.complete(None, '%%_foo_ce')
405 401 nt.assert_in('%%_foo_cellm', matches)
406 402
407 403
408 404 def test_line_cell_magics():
409 405 from IPython.core.magic import register_line_cell_magic
410 406
411 407 @register_line_cell_magic
412 408 def _bar_cellm(line, cell):
413 409 pass
414 410
415 411 ip = get_ipython()
416 412 c = ip.Completer
417 413
418 414 # The policy here is trickier, see comments in completion code. The
419 415 # returned values depend on whether the user passes %% or not explicitly,
420 416 # and this will show a difference if the same name is both a line and cell
421 417 # magic.
422 418 s, matches = c.complete(None, '_bar_ce')
423 419 nt.assert_in('%_bar_cellm', matches)
424 420 nt.assert_in('%%_bar_cellm', matches)
425 421 s, matches = c.complete(None, '%_bar_ce')
426 422 nt.assert_in('%_bar_cellm', matches)
427 423 nt.assert_in('%%_bar_cellm', matches)
428 424 s, matches = c.complete(None, '%%_bar_ce')
429 425 nt.assert_not_in('%_bar_cellm', matches)
430 426 nt.assert_in('%%_bar_cellm', matches)
431 427
432 428
433 429 def test_magic_completion_order():
434 430
435 431 ip = get_ipython()
436 432 c = ip.Completer
437 433
438 434 # Test ordering of magics and non-magics with the same name
439 435 # We want the non-magic first
440 436
441 437 # Before importing matplotlib, there should only be one option:
442 438
443 439 text, matches = c.complete('mat')
444 440 nt.assert_equal(matches, ["%matplotlib"])
445 441
446 442
447 443 ip.run_cell("matplotlib = 1") # introduce name into namespace
448 444
449 445 # After the import, there should be two options, ordered like this:
450 446 text, matches = c.complete('mat')
451 447 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
452 448
453 449
454 450 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
455 451
456 452 # Order of user variable and line and cell magics with same name:
457 453 text, matches = c.complete('timeit')
458 454 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
459 455
460 456
461 457 def test_dict_key_completion_string():
462 458 """Test dictionary key completion for string keys"""
463 459 ip = get_ipython()
464 460 complete = ip.Completer.complete
465 461
466 462 ip.user_ns['d'] = {'abc': None}
467 463
468 464 # check completion at different stages
469 465 _, matches = complete(line_buffer="d[")
470 466 nt.assert_in("'abc'", matches)
471 467 nt.assert_not_in("'abc']", matches)
472 468
473 469 _, matches = complete(line_buffer="d['")
474 470 nt.assert_in("abc", matches)
475 471 nt.assert_not_in("abc']", matches)
476 472
477 473 _, matches = complete(line_buffer="d['a")
478 474 nt.assert_in("abc", matches)
479 475 nt.assert_not_in("abc']", matches)
480 476
481 477 # check use of different quoting
482 478 _, matches = complete(line_buffer="d[\"")
483 479 nt.assert_in("abc", matches)
484 480 nt.assert_not_in('abc\"]', matches)
485 481
486 482 _, matches = complete(line_buffer="d[\"a")
487 483 nt.assert_in("abc", matches)
488 484 nt.assert_not_in('abc\"]', matches)
489 485
490 486 # check sensitivity to following context
491 487 _, matches = complete(line_buffer="d[]", cursor_pos=2)
492 488 nt.assert_in("'abc'", matches)
493 489
494 490 _, matches = complete(line_buffer="d['']", cursor_pos=3)
495 491 nt.assert_in("abc", matches)
496 492 nt.assert_not_in("abc'", matches)
497 493 nt.assert_not_in("abc']", matches)
498 494
499 495 # check multiple solutions are correctly returned and that noise is not
500 496 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
501 497 5: None}
502 498
503 499 _, matches = complete(line_buffer="d['a")
504 500 nt.assert_in("abc", matches)
505 501 nt.assert_in("abd", matches)
506 502 nt.assert_not_in("bad", matches)
507 503 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
508 504
509 505 # check escaping and whitespace
510 506 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
511 507 _, matches = complete(line_buffer="d['a")
512 508 nt.assert_in("a\\nb", matches)
513 509 nt.assert_in("a\\'b", matches)
514 510 nt.assert_in("a\"b", matches)
515 511 nt.assert_in("a word", matches)
516 512 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
517 513
518 514 # - can complete on non-initial word of the string
519 515 _, matches = complete(line_buffer="d['a w")
520 516 nt.assert_in("word", matches)
521 517
522 518 # - understands quote escaping
523 519 _, matches = complete(line_buffer="d['a\\'")
524 520 nt.assert_in("b", matches)
525 521
526 522 # - default quoting should work like repr
527 523 _, matches = complete(line_buffer="d[")
528 524 nt.assert_in("\"a'b\"", matches)
529 525
530 526 # - when opening quote with ", possible to match with unescaped apostrophe
531 527 _, matches = complete(line_buffer="d[\"a'")
532 528 nt.assert_in("b", matches)
533 529
534 530 # need to not split at delims that readline won't split at
535 531 if '-' not in ip.Completer.splitter.delims:
536 532 ip.user_ns['d'] = {'before-after': None}
537 533 _, matches = complete(line_buffer="d['before-af")
538 534 nt.assert_in('before-after', matches)
539 535
540 536 def test_dict_key_completion_contexts():
541 537 """Test expression contexts in which dict key completion occurs"""
542 538 ip = get_ipython()
543 539 complete = ip.Completer.complete
544 540 d = {'abc': None}
545 541 ip.user_ns['d'] = d
546 542
547 543 class C:
548 544 data = d
549 545 ip.user_ns['C'] = C
550 546 ip.user_ns['get'] = lambda: d
551 547
552 548 def assert_no_completion(**kwargs):
553 549 _, matches = complete(**kwargs)
554 550 nt.assert_not_in('abc', matches)
555 551 nt.assert_not_in('abc\'', matches)
556 552 nt.assert_not_in('abc\']', matches)
557 553 nt.assert_not_in('\'abc\'', matches)
558 554 nt.assert_not_in('\'abc\']', matches)
559 555
560 556 def assert_completion(**kwargs):
561 557 _, matches = complete(**kwargs)
562 558 nt.assert_in("'abc'", matches)
563 559 nt.assert_not_in("'abc']", matches)
564 560
565 561 # no completion after string closed, even if reopened
566 562 assert_no_completion(line_buffer="d['a'")
567 563 assert_no_completion(line_buffer="d[\"a\"")
568 564 assert_no_completion(line_buffer="d['a' + ")
569 565 assert_no_completion(line_buffer="d['a' + '")
570 566
571 567 # completion in non-trivial expressions
572 568 assert_completion(line_buffer="+ d[")
573 569 assert_completion(line_buffer="(d[")
574 570 assert_completion(line_buffer="C.data[")
575 571
576 572 # greedy flag
577 573 def assert_completion(**kwargs):
578 574 _, matches = complete(**kwargs)
579 575 nt.assert_in("get()['abc']", matches)
580 576
581 577 assert_no_completion(line_buffer="get()[")
582 578 with greedy_completion():
583 579 assert_completion(line_buffer="get()[")
584 580 assert_completion(line_buffer="get()['")
585 581 assert_completion(line_buffer="get()['a")
586 582 assert_completion(line_buffer="get()['ab")
587 583 assert_completion(line_buffer="get()['abc")
588 584
589 585
590 586
591 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
592 587 def test_dict_key_completion_bytes():
593 588 """Test handling of bytes in dict key completion"""
594 589 ip = get_ipython()
595 590 complete = ip.Completer.complete
596 591
597 592 ip.user_ns['d'] = {'abc': None, b'abd': None}
598 593
599 594 _, matches = complete(line_buffer="d[")
600 595 nt.assert_in("'abc'", matches)
601 596 nt.assert_in("b'abd'", matches)
602 597
603 598 if False: # not currently implemented
604 599 _, matches = complete(line_buffer="d[b")
605 600 nt.assert_in("b'abd'", matches)
606 601 nt.assert_not_in("b'abc'", matches)
607 602
608 603 _, matches = complete(line_buffer="d[b'")
609 604 nt.assert_in("abd", matches)
610 605 nt.assert_not_in("abc", matches)
611 606
612 607 _, matches = complete(line_buffer="d[B'")
613 608 nt.assert_in("abd", matches)
614 609 nt.assert_not_in("abc", matches)
615 610
616 611 _, matches = complete(line_buffer="d['")
617 612 nt.assert_in("abc", matches)
618 613 nt.assert_not_in("abd", matches)
619 614
620 615
621 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
622 616 def test_dict_key_completion_unicode_py2():
623 617 """Test handling of unicode in dict key completion"""
624 618 ip = get_ipython()
625 619 complete = ip.Completer.complete
626 620
627 621 ip.user_ns['d'] = {u'abc': None,
628 622 u'a\u05d0b': None}
629 623
630 624 _, matches = complete(line_buffer="d[")
631 625 nt.assert_in("u'abc'", matches)
632 626 nt.assert_in("u'a\\u05d0b'", matches)
633 627
634 628 _, matches = complete(line_buffer="d['a")
635 629 nt.assert_in("abc", matches)
636 630 nt.assert_not_in("a\\u05d0b", matches)
637 631
638 632 _, matches = complete(line_buffer="d[u'a")
639 633 nt.assert_in("abc", matches)
640 634 nt.assert_in("a\\u05d0b", matches)
641 635
642 636 _, matches = complete(line_buffer="d[U'a")
643 637 nt.assert_in("abc", matches)
644 638 nt.assert_in("a\\u05d0b", matches)
645 639
646 640 # query using escape
647 641 if sys.platform != 'win32':
648 642 # Known failure on Windows
649 643 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
650 644 nt.assert_in("u05d0b", matches) # tokenized after \\
651 645
652 646 # query using character
653 647 _, matches = complete(line_buffer=u"d[u'a\u05d0")
654 648 nt.assert_in(u"a\u05d0b", matches)
655 649
656 650 with greedy_completion():
657 651 _, matches = complete(line_buffer="d[")
658 652 nt.assert_in("d[u'abc']", matches)
659 653 nt.assert_in("d[u'a\\u05d0b']", matches)
660 654
661 655 _, matches = complete(line_buffer="d['a")
662 656 nt.assert_in("d['abc']", matches)
663 657 nt.assert_not_in("d[u'a\\u05d0b']", matches)
664 658
665 659 _, matches = complete(line_buffer="d[u'a")
666 660 nt.assert_in("d[u'abc']", matches)
667 661 nt.assert_in("d[u'a\\u05d0b']", matches)
668 662
669 663 _, matches = complete(line_buffer="d[U'a")
670 664 nt.assert_in("d[U'abc']", matches)
671 665 nt.assert_in("d[U'a\\u05d0b']", matches)
672 666
673 667 # query using escape
674 668 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
675 669 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
676 670
677 671 # query using character
678 672 _, matches = complete(line_buffer=u"d[u'a\u05d0")
679 673 nt.assert_in(u"d[u'a\u05d0b']", matches)
680 674
681 675
682 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
683 676 def test_dict_key_completion_unicode_py3():
684 677 """Test handling of unicode in dict key completion"""
685 678 ip = get_ipython()
686 679 complete = ip.Completer.complete
687 680
688 681 ip.user_ns['d'] = {u'a\u05d0': None}
689 682
690 683 # query using escape
691 684 if sys.platform != 'win32':
692 685 # Known failure on Windows
693 686 _, matches = complete(line_buffer="d['a\\u05d0")
694 687 nt.assert_in("u05d0", matches) # tokenized after \\
695 688
696 689 # query using character
697 690 _, matches = complete(line_buffer="d['a\u05d0")
698 691 nt.assert_in(u"a\u05d0", matches)
699 692
700 693 with greedy_completion():
701 694 # query using escape
702 695 _, matches = complete(line_buffer="d['a\\u05d0")
703 696 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
704 697
705 698 # query using character
706 699 _, matches = complete(line_buffer="d['a\u05d0")
707 700 nt.assert_in(u"d['a\u05d0']", matches)
708 701
709 702
710 703
711 704 @dec.skip_without('numpy')
712 705 def test_struct_array_key_completion():
713 706 """Test dict key completion applies to numpy struct arrays"""
714 707 import numpy
715 708 ip = get_ipython()
716 709 complete = ip.Completer.complete
717 710 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
718 711 _, matches = complete(line_buffer="d['")
719 712 nt.assert_in("hello", matches)
720 713 nt.assert_in("world", matches)
721 714 # complete on the numpy struct itself
722 715 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
723 716 ('my_data', '>f4', 5)])
724 717 x = numpy.zeros(2, dtype=dt)
725 718 ip.user_ns['d'] = x[1]
726 719 _, matches = complete(line_buffer="d['")
727 720 nt.assert_in("my_head", matches)
728 721 nt.assert_in("my_data", matches)
729 722 # complete on a nested level
730 723 with greedy_completion():
731 724 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
732 725 _, matches = complete(line_buffer="d[1]['my_head']['")
733 726 nt.assert_true(any(["my_dt" in m for m in matches]))
734 727 nt.assert_true(any(["my_df" in m for m in matches]))
735 728
736 729
737 730 @dec.skip_without('pandas')
738 731 def test_dataframe_key_completion():
739 732 """Test dict key completion applies to pandas DataFrames"""
740 733 import pandas
741 734 ip = get_ipython()
742 735 complete = ip.Completer.complete
743 736 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
744 737 _, matches = complete(line_buffer="d['")
745 738 nt.assert_in("hello", matches)
746 739 nt.assert_in("world", matches)
747 740
748 741
749 742 def test_dict_key_completion_invalids():
750 743 """Smoke test cases dict key completion can't handle"""
751 744 ip = get_ipython()
752 745 complete = ip.Completer.complete
753 746
754 747 ip.user_ns['no_getitem'] = None
755 748 ip.user_ns['no_keys'] = []
756 749 ip.user_ns['cant_call_keys'] = dict
757 750 ip.user_ns['empty'] = {}
758 751 ip.user_ns['d'] = {'abc': 5}
759 752
760 753 _, matches = complete(line_buffer="no_getitem['")
761 754 _, matches = complete(line_buffer="no_keys['")
762 755 _, matches = complete(line_buffer="cant_call_keys['")
763 756 _, matches = complete(line_buffer="empty['")
764 757 _, matches = complete(line_buffer="name_error['")
765 758 _, matches = complete(line_buffer="d['\\") # incomplete escape
766 759
767 760 class KeyCompletable(object):
768 761 def __init__(self, things=()):
769 762 self.things = things
770 763
771 764 def _ipython_key_completions_(self):
772 765 return list(self.things)
773 766
774 767 def test_object_key_completion():
775 768 ip = get_ipython()
776 769 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
777 770
778 771 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
779 772 nt.assert_in('qwerty', matches)
780 773 nt.assert_in('qwick', matches)
781 774
782 775
783 776 def test_aimport_module_completer():
784 777 ip = get_ipython()
785 778 _, matches = ip.complete('i', '%aimport i')
786 779 nt.assert_in('io', matches)
787 780 nt.assert_not_in('int', matches)
788 781
789 782 def test_nested_import_module_completer():
790 783 ip = get_ipython()
791 784 _, matches = ip.complete(None, 'import IPython.co', 17)
792 785 nt.assert_in('IPython.core', matches)
793 786 nt.assert_not_in('import IPython.core', matches)
794 787 nt.assert_not_in('IPython.display', matches)
795 788
796 789 def test_import_module_completer():
797 790 ip = get_ipython()
798 791 _, matches = ip.complete('i', 'import i')
799 792 nt.assert_in('io', matches)
800 793 nt.assert_not_in('int', matches)
801 794
802 795 def test_from_module_completer():
803 796 ip = get_ipython()
804 797 _, matches = ip.complete('B', 'from io import B', 16)
805 798 nt.assert_in('BytesIO', matches)
806 799 nt.assert_not_in('BaseException', matches)
@@ -1,950 +1,918 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for the key interactiveshell module.
3 3
4 4 Historically the main classes in interactiveshell have been under-tested. This
5 5 module should grow as many single-method tests as possible to trap many of the
6 6 recurring bugs we seem to encounter with high-level interaction.
7 7 """
8 8
9 9 # Copyright (c) IPython Development Team.
10 10 # Distributed under the terms of the Modified BSD License.
11 11
12 12 import ast
13 13 import os
14 14 import signal
15 15 import shutil
16 16 import sys
17 17 import tempfile
18 18 import unittest
19 19 try:
20 20 from unittest import mock
21 21 except ImportError:
22 22 import mock
23 23 from os.path import join
24 24
25 25 import nose.tools as nt
26 26
27 27 from IPython.core.error import InputRejected
28 28 from IPython.core.inputtransformer import InputTransformer
29 29 from IPython.testing.decorators import (
30 30 skipif, skip_win32, onlyif_unicode_paths, onlyif_cmds_exist,
31 31 )
32 32 from IPython.testing import tools as tt
33 33 from IPython.utils.process import find_cmd
34 34 from IPython.utils import py3compat
35 35 from IPython.utils.py3compat import unicode_type, PY3
36 36
37 37 if PY3:
38 38 from io import StringIO
39 39 else:
40 40 from StringIO import StringIO
41 41
42 42 #-----------------------------------------------------------------------------
43 43 # Globals
44 44 #-----------------------------------------------------------------------------
45 45 # This is used by every single test, no point repeating it ad nauseam
46 46 ip = get_ipython()
47 47
48 48 #-----------------------------------------------------------------------------
49 49 # Tests
50 50 #-----------------------------------------------------------------------------
51 51
52 52 class DerivedInterrupt(KeyboardInterrupt):
53 53 pass
54 54
55 55 class InteractiveShellTestCase(unittest.TestCase):
56 56 def test_naked_string_cells(self):
57 57 """Test that cells with only naked strings are fully executed"""
58 58 # First, single-line inputs
59 59 ip.run_cell('"a"\n')
60 60 self.assertEqual(ip.user_ns['_'], 'a')
61 61 # And also multi-line cells
62 62 ip.run_cell('"""a\nb"""\n')
63 63 self.assertEqual(ip.user_ns['_'], 'a\nb')
64 64
65 65 def test_run_empty_cell(self):
66 66 """Just make sure we don't get a horrible error with a blank
67 67 cell of input. Yes, I did overlook that."""
68 68 old_xc = ip.execution_count
69 69 res = ip.run_cell('')
70 70 self.assertEqual(ip.execution_count, old_xc)
71 71 self.assertEqual(res.execution_count, None)
72 72
73 73 def test_run_cell_multiline(self):
74 74 """Multi-block, multi-line cells must execute correctly.
75 75 """
76 76 src = '\n'.join(["x=1",
77 77 "y=2",
78 78 "if 1:",
79 79 " x += 1",
80 80 " y += 1",])
81 81 res = ip.run_cell(src)
82 82 self.assertEqual(ip.user_ns['x'], 2)
83 83 self.assertEqual(ip.user_ns['y'], 3)
84 84 self.assertEqual(res.success, True)
85 85 self.assertEqual(res.result, None)
86 86
87 87 def test_multiline_string_cells(self):
88 88 "Code sprinkled with multiline strings should execute (GH-306)"
89 89 ip.run_cell('tmp=0')
90 90 self.assertEqual(ip.user_ns['tmp'], 0)
91 91 res = ip.run_cell('tmp=1;"""a\nb"""\n')
92 92 self.assertEqual(ip.user_ns['tmp'], 1)
93 93 self.assertEqual(res.success, True)
94 94 self.assertEqual(res.result, "a\nb")
95 95
96 96 def test_dont_cache_with_semicolon(self):
97 97 "Ending a line with semicolon should not cache the returned object (GH-307)"
98 98 oldlen = len(ip.user_ns['Out'])
99 99 for cell in ['1;', '1;1;']:
100 100 res = ip.run_cell(cell, store_history=True)
101 101 newlen = len(ip.user_ns['Out'])
102 102 self.assertEqual(oldlen, newlen)
103 103 self.assertIsNone(res.result)
104 104 i = 0
105 105 #also test the default caching behavior
106 106 for cell in ['1', '1;1']:
107 107 ip.run_cell(cell, store_history=True)
108 108 newlen = len(ip.user_ns['Out'])
109 109 i += 1
110 110 self.assertEqual(oldlen+i, newlen)
111 111
112 112 def test_syntax_error(self):
113 113 res = ip.run_cell("raise = 3")
114 114 self.assertIsInstance(res.error_before_exec, SyntaxError)
115 115
116 116 def test_In_variable(self):
117 117 "Verify that In variable grows with user input (GH-284)"
118 118 oldlen = len(ip.user_ns['In'])
119 119 ip.run_cell('1;', store_history=True)
120 120 newlen = len(ip.user_ns['In'])
121 121 self.assertEqual(oldlen+1, newlen)
122 122 self.assertEqual(ip.user_ns['In'][-1],'1;')
123 123
124 124 def test_magic_names_in_string(self):
125 125 ip.run_cell('a = """\n%exit\n"""')
126 126 self.assertEqual(ip.user_ns['a'], '\n%exit\n')
127 127
128 128 def test_trailing_newline(self):
129 129 """test that running !(command) does not raise a SyntaxError"""
130 130 ip.run_cell('!(true)\n', False)
131 131 ip.run_cell('!(true)\n\n\n', False)
132 132
133 133 def test_gh_597(self):
134 134 """Pretty-printing lists of objects with non-ascii reprs may cause
135 135 problems."""
136 136 class Spam(object):
137 137 def __repr__(self):
138 138 return "\xe9"*50
139 139 import IPython.core.formatters
140 140 f = IPython.core.formatters.PlainTextFormatter()
141 141 f([Spam(),Spam()])
142 142
143 143
144 144 def test_future_flags(self):
145 145 """Check that future flags are used for parsing code (gh-777)"""
146 146 ip.run_cell('from __future__ import print_function')
147 147 try:
148 148 ip.run_cell('prfunc_return_val = print(1,2, sep=" ")')
149 149 assert 'prfunc_return_val' in ip.user_ns
150 150 finally:
151 151 # Reset compiler flags so we don't mess up other tests.
152 152 ip.compile.reset_compiler_flags()
153 153
154 154 def test_future_unicode(self):
155 155 """Check that unicode_literals is imported from __future__ (gh #786)"""
156 156 try:
157 157 ip.run_cell(u'byte_str = "a"')
158 158 assert isinstance(ip.user_ns['byte_str'], str) # string literals are byte strings by default
159 159 ip.run_cell('from __future__ import unicode_literals')
160 160 ip.run_cell(u'unicode_str = "a"')
161 161 assert isinstance(ip.user_ns['unicode_str'], unicode_type) # strings literals are now unicode
162 162 finally:
163 163 # Reset compiler flags so we don't mess up other tests.
164 164 ip.compile.reset_compiler_flags()
165 165
166 166 def test_can_pickle(self):
167 167 "Can we pickle objects defined interactively (GH-29)"
168 168 ip = get_ipython()
169 169 ip.reset()
170 170 ip.run_cell(("class Mylist(list):\n"
171 171 " def __init__(self,x=[]):\n"
172 172 " list.__init__(self,x)"))
173 173 ip.run_cell("w=Mylist([1,2,3])")
174 174
175 175 from pickle import dumps
176 176
177 177 # We need to swap in our main module - this is only necessary
178 178 # inside the test framework, because IPython puts the interactive module
179 179 # in place (but the test framework undoes this).
180 180 _main = sys.modules['__main__']
181 181 sys.modules['__main__'] = ip.user_module
182 182 try:
183 183 res = dumps(ip.user_ns["w"])
184 184 finally:
185 185 sys.modules['__main__'] = _main
186 186 self.assertTrue(isinstance(res, bytes))
187 187
188 188 def test_global_ns(self):
189 189 "Code in functions must be able to access variables outside them."
190 190 ip = get_ipython()
191 191 ip.run_cell("a = 10")
192 192 ip.run_cell(("def f(x):\n"
193 193 " return x + a"))
194 194 ip.run_cell("b = f(12)")
195 195 self.assertEqual(ip.user_ns["b"], 22)
196 196
197 197 def test_bad_custom_tb(self):
198 198 """Check that InteractiveShell is protected from bad custom exception handlers"""
199 199 ip.set_custom_exc((IOError,), lambda etype,value,tb: 1/0)
200 200 self.assertEqual(ip.custom_exceptions, (IOError,))
201 201 with tt.AssertPrints("Custom TB Handler failed", channel='stderr'):
202 202 ip.run_cell(u'raise IOError("foo")')
203 203 self.assertEqual(ip.custom_exceptions, ())
204 204
205 205 def test_bad_custom_tb_return(self):
206 206 """Check that InteractiveShell is protected from bad return types in custom exception handlers"""
207 207 ip.set_custom_exc((NameError,),lambda etype,value,tb, tb_offset=None: 1)
208 208 self.assertEqual(ip.custom_exceptions, (NameError,))
209 209 with tt.AssertPrints("Custom TB Handler failed", channel='stderr'):
210 210 ip.run_cell(u'a=abracadabra')
211 211 self.assertEqual(ip.custom_exceptions, ())
212 212
213 213 def test_drop_by_id(self):
214 214 myvars = {"a":object(), "b":object(), "c": object()}
215 215 ip.push(myvars, interactive=False)
216 216 for name in myvars:
217 217 assert name in ip.user_ns, name
218 218 assert name in ip.user_ns_hidden, name
219 219 ip.user_ns['b'] = 12
220 220 ip.drop_by_id(myvars)
221 221 for name in ["a", "c"]:
222 222 assert name not in ip.user_ns, name
223 223 assert name not in ip.user_ns_hidden, name
224 224 assert ip.user_ns['b'] == 12
225 225 ip.reset()
226 226
227 227 def test_var_expand(self):
228 228 ip.user_ns['f'] = u'Ca\xf1o'
229 229 self.assertEqual(ip.var_expand(u'echo $f'), u'echo Ca\xf1o')
230 230 self.assertEqual(ip.var_expand(u'echo {f}'), u'echo Ca\xf1o')
231 231 self.assertEqual(ip.var_expand(u'echo {f[:-1]}'), u'echo Ca\xf1')
232 232 self.assertEqual(ip.var_expand(u'echo {1*2}'), u'echo 2')
233 233
234 234 ip.user_ns['f'] = b'Ca\xc3\xb1o'
235 235 # This should not raise any exception:
236 236 ip.var_expand(u'echo $f')
237 237
238 238 def test_var_expand_local(self):
239 239 """Test local variable expansion in !system and %magic calls"""
240 240 # !system
241 241 ip.run_cell('def test():\n'
242 242 ' lvar = "ttt"\n'
243 243 ' ret = !echo {lvar}\n'
244 244 ' return ret[0]\n')
245 245 res = ip.user_ns['test']()
246 246 nt.assert_in('ttt', res)
247 247
248 248 # %magic
249 249 ip.run_cell('def makemacro():\n'
250 250 ' macroname = "macro_var_expand_locals"\n'
251 251 ' %macro {macroname} codestr\n')
252 252 ip.user_ns['codestr'] = "str(12)"
253 253 ip.run_cell('makemacro()')
254 254 nt.assert_in('macro_var_expand_locals', ip.user_ns)
255 255
256 256 def test_var_expand_self(self):
257 257 """Test variable expansion with the name 'self', which was failing.
258 258
259 259 See https://github.com/ipython/ipython/issues/1878#issuecomment-7698218
260 260 """
261 261 ip.run_cell('class cTest:\n'
262 262 ' classvar="see me"\n'
263 263 ' def test(self):\n'
264 264 ' res = !echo Variable: {self.classvar}\n'
265 265 ' return res[0]\n')
266 266 nt.assert_in('see me', ip.user_ns['cTest']().test())
267 267
268 268 def test_bad_var_expand(self):
269 269 """var_expand on invalid formats shouldn't raise"""
270 270 # SyntaxError
271 271 self.assertEqual(ip.var_expand(u"{'a':5}"), u"{'a':5}")
272 272 # NameError
273 273 self.assertEqual(ip.var_expand(u"{asdf}"), u"{asdf}")
274 274 # ZeroDivisionError
275 275 self.assertEqual(ip.var_expand(u"{1/0}"), u"{1/0}")
276 276
277 277 def test_silent_postexec(self):
278 278 """run_cell(silent=True) doesn't invoke pre/post_run_cell callbacks"""
279 279 pre_explicit = mock.Mock()
280 280 pre_always = mock.Mock()
281 281 post_explicit = mock.Mock()
282 282 post_always = mock.Mock()
283 283
284 284 ip.events.register('pre_run_cell', pre_explicit)
285 285 ip.events.register('pre_execute', pre_always)
286 286 ip.events.register('post_run_cell', post_explicit)
287 287 ip.events.register('post_execute', post_always)
288 288
289 289 try:
290 290 ip.run_cell("1", silent=True)
291 291 assert pre_always.called
292 292 assert not pre_explicit.called
293 293 assert post_always.called
294 294 assert not post_explicit.called
295 295 # double-check that non-silent exec did what we expected
296 296 # silent to avoid
297 297 ip.run_cell("1")
298 298 assert pre_explicit.called
299 299 assert post_explicit.called
300 300 finally:
301 301 # remove post-exec
302 302 ip.events.unregister('pre_run_cell', pre_explicit)
303 303 ip.events.unregister('pre_execute', pre_always)
304 304 ip.events.unregister('post_run_cell', post_explicit)
305 305 ip.events.unregister('post_execute', post_always)
306 306
307 307 def test_silent_noadvance(self):
308 308 """run_cell(silent=True) doesn't advance execution_count"""
309 309 ec = ip.execution_count
310 310 # silent should force store_history=False
311 311 ip.run_cell("1", store_history=True, silent=True)
312 312
313 313 self.assertEqual(ec, ip.execution_count)
314 314 # double-check that non-silent exec did what we expected
315 315 # silent to avoid
316 316 ip.run_cell("1", store_history=True)
317 317 self.assertEqual(ec+1, ip.execution_count)
318 318
319 319 def test_silent_nodisplayhook(self):
320 320 """run_cell(silent=True) doesn't trigger displayhook"""
321 321 d = dict(called=False)
322 322
323 323 trap = ip.display_trap
324 324 save_hook = trap.hook
325 325
326 326 def failing_hook(*args, **kwargs):
327 327 d['called'] = True
328 328
329 329 try:
330 330 trap.hook = failing_hook
331 331 res = ip.run_cell("1", silent=True)
332 332 self.assertFalse(d['called'])
333 333 self.assertIsNone(res.result)
334 334 # double-check that non-silent exec did what we expected
335 335 # silent to avoid
336 336 ip.run_cell("1")
337 337 self.assertTrue(d['called'])
338 338 finally:
339 339 trap.hook = save_hook
340 340
341 @skipif(sys.version_info[0] >= 3, "softspace removed in py3")
342 def test_print_softspace(self):
343 """Verify that softspace is handled correctly when executing multiple
344 statements.
345
346 In [1]: print 1; print 2
347 1
348 2
349
350 In [2]: print 1,; print 2
351 1 2
352 """
353
354 341 def test_ofind_line_magic(self):
355 342 from IPython.core.magic import register_line_magic
356 343
357 344 @register_line_magic
358 345 def lmagic(line):
359 346 "A line magic"
360 347
361 348 # Get info on line magic
362 349 lfind = ip._ofind('lmagic')
363 350 info = dict(found=True, isalias=False, ismagic=True,
364 351 namespace = 'IPython internal', obj= lmagic.__wrapped__,
365 352 parent = None)
366 353 nt.assert_equal(lfind, info)
367 354
368 355 def test_ofind_cell_magic(self):
369 356 from IPython.core.magic import register_cell_magic
370 357
371 358 @register_cell_magic
372 359 def cmagic(line, cell):
373 360 "A cell magic"
374 361
375 362 # Get info on cell magic
376 363 find = ip._ofind('cmagic')
377 364 info = dict(found=True, isalias=False, ismagic=True,
378 365 namespace = 'IPython internal', obj= cmagic.__wrapped__,
379 366 parent = None)
380 367 nt.assert_equal(find, info)
381 368
382 369 def test_ofind_property_with_error(self):
383 370 class A(object):
384 371 @property
385 372 def foo(self):
386 373 raise NotImplementedError()
387 374 a = A()
388 375
389 376 found = ip._ofind('a.foo', [('locals', locals())])
390 377 info = dict(found=True, isalias=False, ismagic=False,
391 378 namespace='locals', obj=A.foo, parent=a)
392 379 nt.assert_equal(found, info)
393 380
394 381 def test_ofind_multiple_attribute_lookups(self):
395 382 class A(object):
396 383 @property
397 384 def foo(self):
398 385 raise NotImplementedError()
399 386
400 387 a = A()
401 388 a.a = A()
402 389 a.a.a = A()
403 390
404 391 found = ip._ofind('a.a.a.foo', [('locals', locals())])
405 392 info = dict(found=True, isalias=False, ismagic=False,
406 393 namespace='locals', obj=A.foo, parent=a.a.a)
407 394 nt.assert_equal(found, info)
408 395
409 396 def test_ofind_slotted_attributes(self):
410 397 class A(object):
411 398 __slots__ = ['foo']
412 399 def __init__(self):
413 400 self.foo = 'bar'
414 401
415 402 a = A()
416 403 found = ip._ofind('a.foo', [('locals', locals())])
417 404 info = dict(found=True, isalias=False, ismagic=False,
418 405 namespace='locals', obj=a.foo, parent=a)
419 406 nt.assert_equal(found, info)
420 407
421 408 found = ip._ofind('a.bar', [('locals', locals())])
422 409 info = dict(found=False, isalias=False, ismagic=False,
423 410 namespace=None, obj=None, parent=a)
424 411 nt.assert_equal(found, info)
425 412
426 413 def test_ofind_prefers_property_to_instance_level_attribute(self):
427 414 class A(object):
428 415 @property
429 416 def foo(self):
430 417 return 'bar'
431 418 a = A()
432 419 a.__dict__['foo'] = 'baz'
433 420 nt.assert_equal(a.foo, 'bar')
434 421 found = ip._ofind('a.foo', [('locals', locals())])
435 422 nt.assert_is(found['obj'], A.foo)
436 423
437 424 def test_custom_syntaxerror_exception(self):
438 425 called = []
439 426 def my_handler(shell, etype, value, tb, tb_offset=None):
440 427 called.append(etype)
441 428 shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
442 429
443 430 ip.set_custom_exc((SyntaxError,), my_handler)
444 431 try:
445 432 ip.run_cell("1f")
446 433 # Check that this was called, and only once.
447 434 self.assertEqual(called, [SyntaxError])
448 435 finally:
449 436 # Reset the custom exception hook
450 437 ip.set_custom_exc((), None)
451 438
452 439 def test_custom_exception(self):
453 440 called = []
454 441 def my_handler(shell, etype, value, tb, tb_offset=None):
455 442 called.append(etype)
456 443 shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
457 444
458 445 ip.set_custom_exc((ValueError,), my_handler)
459 446 try:
460 447 res = ip.run_cell("raise ValueError('test')")
461 448 # Check that this was called, and only once.
462 449 self.assertEqual(called, [ValueError])
463 450 # Check that the error is on the result object
464 451 self.assertIsInstance(res.error_in_exec, ValueError)
465 452 finally:
466 453 # Reset the custom exception hook
467 454 ip.set_custom_exc((), None)
468 455
469 @skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
470 def test_future_environment(self):
471 "Can we run code with & without the shell's __future__ imports?"
472 ip.run_cell("from __future__ import division")
473 ip.run_cell("a = 1/2", shell_futures=True)
474 self.assertEqual(ip.user_ns['a'], 0.5)
475 ip.run_cell("b = 1/2", shell_futures=False)
476 self.assertEqual(ip.user_ns['b'], 0)
477
478 ip.compile.reset_compiler_flags()
479 # This shouldn't leak to the shell's compiler
480 ip.run_cell("from __future__ import division \nc=1/2", shell_futures=False)
481 self.assertEqual(ip.user_ns['c'], 0.5)
482 ip.run_cell("d = 1/2", shell_futures=True)
483 self.assertEqual(ip.user_ns['d'], 0)
484
485 456 def test_mktempfile(self):
486 457 filename = ip.mktempfile()
487 458 # Check that we can open the file again on Windows
488 459 with open(filename, 'w') as f:
489 460 f.write('abc')
490 461
491 462 filename = ip.mktempfile(data='blah')
492 463 with open(filename, 'r') as f:
493 464 self.assertEqual(f.read(), 'blah')
494 465
495 466 def test_new_main_mod(self):
496 467 # Smoketest to check that this accepts a unicode module name
497 468 name = u'jiefmw'
498 469 mod = ip.new_main_mod(u'%s.py' % name, name)
499 470 self.assertEqual(mod.__name__, name)
500 471
501 472 def test_get_exception_only(self):
502 473 try:
503 474 raise KeyboardInterrupt
504 475 except KeyboardInterrupt:
505 476 msg = ip.get_exception_only()
506 477 self.assertEqual(msg, 'KeyboardInterrupt\n')
507 478
508 479 try:
509 480 raise DerivedInterrupt("foo")
510 481 except KeyboardInterrupt:
511 482 msg = ip.get_exception_only()
512 if sys.version_info[0] <= 2:
513 self.assertEqual(msg, 'DerivedInterrupt: foo\n')
514 else:
515 self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n')
483 self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n')
516 484
517 485 def test_inspect_text(self):
518 486 ip.run_cell('a = 5')
519 487 text = ip.object_inspect_text('a')
520 488 self.assertIsInstance(text, unicode_type)
521 489
522 490
523 491 class TestSafeExecfileNonAsciiPath(unittest.TestCase):
524 492
525 493 @onlyif_unicode_paths
526 494 def setUp(self):
527 495 self.BASETESTDIR = tempfile.mkdtemp()
528 496 self.TESTDIR = join(self.BASETESTDIR, u"åäö")
529 497 os.mkdir(self.TESTDIR)
530 498 with open(join(self.TESTDIR, u"åäötestscript.py"), "w") as sfile:
531 499 sfile.write("pass\n")
532 500 self.oldpath = py3compat.getcwd()
533 501 os.chdir(self.TESTDIR)
534 502 self.fname = u"åäötestscript.py"
535 503
536 504 def tearDown(self):
537 505 os.chdir(self.oldpath)
538 506 shutil.rmtree(self.BASETESTDIR)
539 507
540 508 @onlyif_unicode_paths
541 509 def test_1(self):
542 510 """Test safe_execfile with non-ascii path
543 511 """
544 512 ip.safe_execfile(self.fname, {}, raise_exceptions=True)
545 513
546 514 class ExitCodeChecks(tt.TempFileMixin):
547 515 def test_exit_code_ok(self):
548 516 self.system('exit 0')
549 517 self.assertEqual(ip.user_ns['_exit_code'], 0)
550 518
551 519 def test_exit_code_error(self):
552 520 self.system('exit 1')
553 521 self.assertEqual(ip.user_ns['_exit_code'], 1)
554 522
555 523 @skipif(not hasattr(signal, 'SIGALRM'))
556 524 def test_exit_code_signal(self):
557 525 self.mktmp("import signal, time\n"
558 526 "signal.setitimer(signal.ITIMER_REAL, 0.1)\n"
559 527 "time.sleep(1)\n")
560 528 self.system("%s %s" % (sys.executable, self.fname))
561 529 self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGALRM)
562 530
563 531 @onlyif_cmds_exist("csh")
564 532 def test_exit_code_signal_csh(self):
565 533 SHELL = os.environ.get('SHELL', None)
566 534 os.environ['SHELL'] = find_cmd("csh")
567 535 try:
568 536 self.test_exit_code_signal()
569 537 finally:
570 538 if SHELL is not None:
571 539 os.environ['SHELL'] = SHELL
572 540 else:
573 541 del os.environ['SHELL']
574 542
575 543 class TestSystemRaw(unittest.TestCase, ExitCodeChecks):
576 544 system = ip.system_raw
577 545
578 546 @onlyif_unicode_paths
579 547 def test_1(self):
580 548 """Test system_raw with non-ascii cmd
581 549 """
582 550 cmd = u'''python -c "'åäö'" '''
583 551 ip.system_raw(cmd)
584 552
585 553 @mock.patch('subprocess.call', side_effect=KeyboardInterrupt)
586 554 @mock.patch('os.system', side_effect=KeyboardInterrupt)
587 555 def test_control_c(self, *mocks):
588 556 try:
589 557 self.system("sleep 1 # wont happen")
590 558 except KeyboardInterrupt:
591 559 self.fail("system call should intercept "
592 560 "keyboard interrupt from subprocess.call")
593 561 self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGINT)
594 562
595 563 # TODO: Exit codes are currently ignored on Windows.
596 564 class TestSystemPipedExitCode(unittest.TestCase, ExitCodeChecks):
597 565 system = ip.system_piped
598 566
599 567 @skip_win32
600 568 def test_exit_code_ok(self):
601 569 ExitCodeChecks.test_exit_code_ok(self)
602 570
603 571 @skip_win32
604 572 def test_exit_code_error(self):
605 573 ExitCodeChecks.test_exit_code_error(self)
606 574
607 575 @skip_win32
608 576 def test_exit_code_signal(self):
609 577 ExitCodeChecks.test_exit_code_signal(self)
610 578
611 579 class TestModules(unittest.TestCase, tt.TempFileMixin):
612 580 def test_extraneous_loads(self):
613 581 """Test we're not loading modules on startup that we shouldn't.
614 582 """
615 583 self.mktmp("import sys\n"
616 584 "print('numpy' in sys.modules)\n"
617 585 "print('ipyparallel' in sys.modules)\n"
618 586 "print('ipykernel' in sys.modules)\n"
619 587 )
620 588 out = "False\nFalse\nFalse\n"
621 589 tt.ipexec_validate(self.fname, out)
622 590
623 591 class Negator(ast.NodeTransformer):
624 592 """Negates all number literals in an AST."""
625 593 def visit_Num(self, node):
626 594 node.n = -node.n
627 595 return node
628 596
629 597 class TestAstTransform(unittest.TestCase):
630 598 def setUp(self):
631 599 self.negator = Negator()
632 600 ip.ast_transformers.append(self.negator)
633 601
634 602 def tearDown(self):
635 603 ip.ast_transformers.remove(self.negator)
636 604
637 605 def test_run_cell(self):
638 606 with tt.AssertPrints('-34'):
639 607 ip.run_cell('print (12 + 22)')
640 608
641 609 # A named reference to a number shouldn't be transformed.
642 610 ip.user_ns['n'] = 55
643 611 with tt.AssertNotPrints('-55'):
644 612 ip.run_cell('print (n)')
645 613
646 614 def test_timeit(self):
647 615 called = set()
648 616 def f(x):
649 617 called.add(x)
650 618 ip.push({'f':f})
651 619
652 620 with tt.AssertPrints("average of "):
653 621 ip.run_line_magic("timeit", "-n1 f(1)")
654 622 self.assertEqual(called, {-1})
655 623 called.clear()
656 624
657 625 with tt.AssertPrints("average of "):
658 626 ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)")
659 627 self.assertEqual(called, {-2, -3})
660 628
661 629 def test_time(self):
662 630 called = []
663 631 def f(x):
664 632 called.append(x)
665 633 ip.push({'f':f})
666 634
667 635 # Test with an expression
668 636 with tt.AssertPrints("Wall time: "):
669 637 ip.run_line_magic("time", "f(5+9)")
670 638 self.assertEqual(called, [-14])
671 639 called[:] = []
672 640
673 641 # Test with a statement (different code path)
674 642 with tt.AssertPrints("Wall time: "):
675 643 ip.run_line_magic("time", "a = f(-3 + -2)")
676 644 self.assertEqual(called, [5])
677 645
678 646 def test_macro(self):
679 647 ip.push({'a':10})
680 648 # The AST transformation makes this do a+=-1
681 649 ip.define_macro("amacro", "a+=1\nprint(a)")
682 650
683 651 with tt.AssertPrints("9"):
684 652 ip.run_cell("amacro")
685 653 with tt.AssertPrints("8"):
686 654 ip.run_cell("amacro")
687 655
688 656 class IntegerWrapper(ast.NodeTransformer):
689 657 """Wraps all integers in a call to Integer()"""
690 658 def visit_Num(self, node):
691 659 if isinstance(node.n, int):
692 660 return ast.Call(func=ast.Name(id='Integer', ctx=ast.Load()),
693 661 args=[node], keywords=[])
694 662 return node
695 663
696 664 class TestAstTransform2(unittest.TestCase):
697 665 def setUp(self):
698 666 self.intwrapper = IntegerWrapper()
699 667 ip.ast_transformers.append(self.intwrapper)
700 668
701 669 self.calls = []
702 670 def Integer(*args):
703 671 self.calls.append(args)
704 672 return args
705 673 ip.push({"Integer": Integer})
706 674
707 675 def tearDown(self):
708 676 ip.ast_transformers.remove(self.intwrapper)
709 677 del ip.user_ns['Integer']
710 678
711 679 def test_run_cell(self):
712 680 ip.run_cell("n = 2")
713 681 self.assertEqual(self.calls, [(2,)])
714 682
715 683 # This shouldn't throw an error
716 684 ip.run_cell("o = 2.0")
717 685 self.assertEqual(ip.user_ns['o'], 2.0)
718 686
719 687 def test_timeit(self):
720 688 called = set()
721 689 def f(x):
722 690 called.add(x)
723 691 ip.push({'f':f})
724 692
725 693 with tt.AssertPrints("average of "):
726 694 ip.run_line_magic("timeit", "-n1 f(1)")
727 695 self.assertEqual(called, {(1,)})
728 696 called.clear()
729 697
730 698 with tt.AssertPrints("average of "):
731 699 ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)")
732 700 self.assertEqual(called, {(2,), (3,)})
733 701
734 702 class ErrorTransformer(ast.NodeTransformer):
735 703 """Throws an error when it sees a number."""
736 704 def visit_Num(self, node):
737 705 raise ValueError("test")
738 706
739 707 class TestAstTransformError(unittest.TestCase):
740 708 def test_unregistering(self):
741 709 err_transformer = ErrorTransformer()
742 710 ip.ast_transformers.append(err_transformer)
743 711
744 712 with tt.AssertPrints("unregister", channel='stderr'):
745 713 ip.run_cell("1 + 2")
746 714
747 715 # This should have been removed.
748 716 nt.assert_not_in(err_transformer, ip.ast_transformers)
749 717
750 718
751 719 class StringRejector(ast.NodeTransformer):
752 720 """Throws an InputRejected when it sees a string literal.
753 721
754 722 Used to verify that NodeTransformers can signal that a piece of code should
755 723 not be executed by throwing an InputRejected.
756 724 """
757 725
758 726 def visit_Str(self, node):
759 727 raise InputRejected("test")
760 728
761 729
762 730 class TestAstTransformInputRejection(unittest.TestCase):
763 731
764 732 def setUp(self):
765 733 self.transformer = StringRejector()
766 734 ip.ast_transformers.append(self.transformer)
767 735
768 736 def tearDown(self):
769 737 ip.ast_transformers.remove(self.transformer)
770 738
771 739 def test_input_rejection(self):
772 740 """Check that NodeTransformers can reject input."""
773 741
774 742 expect_exception_tb = tt.AssertPrints("InputRejected: test")
775 743 expect_no_cell_output = tt.AssertNotPrints("'unsafe'", suppress=False)
776 744
777 745 # Run the same check twice to verify that the transformer is not
778 746 # disabled after raising.
779 747 with expect_exception_tb, expect_no_cell_output:
780 748 ip.run_cell("'unsafe'")
781 749
782 750 with expect_exception_tb, expect_no_cell_output:
783 751 res = ip.run_cell("'unsafe'")
784 752
785 753 self.assertIsInstance(res.error_before_exec, InputRejected)
786 754
787 755 def test__IPYTHON__():
788 756 # This shouldn't raise a NameError, that's all
789 757 __IPYTHON__
790 758
791 759
792 760 class DummyRepr(object):
793 761 def __repr__(self):
794 762 return "DummyRepr"
795 763
796 764 def _repr_html_(self):
797 765 return "<b>dummy</b>"
798 766
799 767 def _repr_javascript_(self):
800 768 return "console.log('hi');", {'key': 'value'}
801 769
802 770
803 771 def test_user_variables():
804 772 # enable all formatters
805 773 ip.display_formatter.active_types = ip.display_formatter.format_types
806 774
807 775 ip.user_ns['dummy'] = d = DummyRepr()
808 776 keys = {'dummy', 'doesnotexist'}
809 777 r = ip.user_expressions({ key:key for key in keys})
810 778
811 779 nt.assert_equal(keys, set(r.keys()))
812 780 dummy = r['dummy']
813 781 nt.assert_equal({'status', 'data', 'metadata'}, set(dummy.keys()))
814 782 nt.assert_equal(dummy['status'], 'ok')
815 783 data = dummy['data']
816 784 metadata = dummy['metadata']
817 785 nt.assert_equal(data.get('text/html'), d._repr_html_())
818 786 js, jsmd = d._repr_javascript_()
819 787 nt.assert_equal(data.get('application/javascript'), js)
820 788 nt.assert_equal(metadata.get('application/javascript'), jsmd)
821 789
822 790 dne = r['doesnotexist']
823 791 nt.assert_equal(dne['status'], 'error')
824 792 nt.assert_equal(dne['ename'], 'NameError')
825 793
826 794 # back to text only
827 795 ip.display_formatter.active_types = ['text/plain']
828 796
829 797 def test_user_expression():
830 798 # enable all formatters
831 799 ip.display_formatter.active_types = ip.display_formatter.format_types
832 800 query = {
833 801 'a' : '1 + 2',
834 802 'b' : '1/0',
835 803 }
836 804 r = ip.user_expressions(query)
837 805 import pprint
838 806 pprint.pprint(r)
839 807 nt.assert_equal(set(r.keys()), set(query.keys()))
840 808 a = r['a']
841 809 nt.assert_equal({'status', 'data', 'metadata'}, set(a.keys()))
842 810 nt.assert_equal(a['status'], 'ok')
843 811 data = a['data']
844 812 metadata = a['metadata']
845 813 nt.assert_equal(data.get('text/plain'), '3')
846 814
847 815 b = r['b']
848 816 nt.assert_equal(b['status'], 'error')
849 817 nt.assert_equal(b['ename'], 'ZeroDivisionError')
850 818
851 819 # back to text only
852 820 ip.display_formatter.active_types = ['text/plain']
853 821
854 822
855 823
856 824
857 825
858 826 class TestSyntaxErrorTransformer(unittest.TestCase):
859 827 """Check that SyntaxError raised by an input transformer is handled by run_cell()"""
860 828
861 829 class SyntaxErrorTransformer(InputTransformer):
862 830
863 831 def push(self, line):
864 832 pos = line.find('syntaxerror')
865 833 if pos >= 0:
866 834 e = SyntaxError('input contains "syntaxerror"')
867 835 e.text = line
868 836 e.offset = pos + 1
869 837 raise e
870 838 return line
871 839
872 840 def reset(self):
873 841 pass
874 842
875 843 def setUp(self):
876 844 self.transformer = TestSyntaxErrorTransformer.SyntaxErrorTransformer()
877 845 ip.input_splitter.python_line_transforms.append(self.transformer)
878 846 ip.input_transformer_manager.python_line_transforms.append(self.transformer)
879 847
880 848 def tearDown(self):
881 849 ip.input_splitter.python_line_transforms.remove(self.transformer)
882 850 ip.input_transformer_manager.python_line_transforms.remove(self.transformer)
883 851
884 852 def test_syntaxerror_input_transformer(self):
885 853 with tt.AssertPrints('1234'):
886 854 ip.run_cell('1234')
887 855 with tt.AssertPrints('SyntaxError: invalid syntax'):
888 856 ip.run_cell('1 2 3') # plain python syntax error
889 857 with tt.AssertPrints('SyntaxError: input contains "syntaxerror"'):
890 858 ip.run_cell('2345 # syntaxerror') # input transformer syntax error
891 859 with tt.AssertPrints('3456'):
892 860 ip.run_cell('3456')
893 861
894 862
895 863
896 864 def test_warning_suppression():
897 865 ip.run_cell("import warnings")
898 866 try:
899 867 with tt.AssertPrints("UserWarning: asdf", channel="stderr"):
900 868 ip.run_cell("warnings.warn('asdf')")
901 869 # Here's the real test -- if we run that again, we should get the
902 870 # warning again. Traditionally, each warning was only issued once per
903 871 # IPython session (approximately), even if the user typed in new and
904 872 # different code that should have also triggered the warning, leading
905 873 # to much confusion.
906 874 with tt.AssertPrints("UserWarning: asdf", channel="stderr"):
907 875 ip.run_cell("warnings.warn('asdf')")
908 876 finally:
909 877 ip.run_cell("del warnings")
910 878
911 879
912 880 def test_deprecation_warning():
913 881 ip.run_cell("""
914 882 import warnings
915 883 def wrn():
916 884 warnings.warn(
917 885 "I AM A WARNING",
918 886 DeprecationWarning
919 887 )
920 888 """)
921 889 try:
922 890 with tt.AssertPrints("I AM A WARNING", channel="stderr"):
923 891 ip.run_cell("wrn()")
924 892 finally:
925 893 ip.run_cell("del warnings")
926 894 ip.run_cell("del wrn")
927 895
928 896
929 897 class TestImportNoDeprecate(tt.TempFileMixin):
930 898
931 899 def setup(self):
932 900 """Make a valid python temp file."""
933 901 self.mktmp("""
934 902 import warnings
935 903 def wrn():
936 904 warnings.warn(
937 905 "I AM A WARNING",
938 906 DeprecationWarning
939 907 )
940 908 """)
941 909
942 910 def test_no_dep(self):
943 911 """
944 912 No deprecation warning should be raised from imported functions
945 913 """
946 914 ip.run_cell("from {} import wrn".format(self.fname))
947 915
948 916 with tt.AssertNotPrints("I AM A WARNING"):
949 917 ip.run_cell("wrn()")
950 918 ip.run_cell("del wrn")
@@ -1,1011 +1,988 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for various magic functions.
3 3
4 4 Needs to be run by nose (to make ipython session available).
5 5 """
6 6 from __future__ import absolute_import
7 7
8 8 import io
9 9 import os
10 10 import sys
11 11 import warnings
12 12 from unittest import TestCase
13 13
14 14 try:
15 15 from importlib import invalidate_caches # Required from Python 3.3
16 16 except ImportError:
17 17 def invalidate_caches():
18 18 pass
19 19
20 20 import nose.tools as nt
21 21
22 22 from IPython import get_ipython
23 23 from IPython.core import magic
24 24 from IPython.core.error import UsageError
25 25 from IPython.core.magic import (Magics, magics_class, line_magic,
26 26 cell_magic,
27 27 register_line_magic, register_cell_magic)
28 28 from IPython.core.magics import execution, script, code
29 29 from IPython.testing import decorators as dec
30 30 from IPython.testing import tools as tt
31 31 from IPython.utils import py3compat
32 32 from IPython.utils.io import capture_output
33 33 from IPython.utils.tempdir import TemporaryDirectory
34 34 from IPython.utils.process import find_cmd
35 35
36 36 if py3compat.PY3:
37 37 from io import StringIO
38 38 else:
39 39 from StringIO import StringIO
40 40
41 41
42 42 _ip = get_ipython()
43 43
44 44 @magic.magics_class
45 45 class DummyMagics(magic.Magics): pass
46 46
47 47 def test_extract_code_ranges():
48 48 instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
49 49 expected = [(0, 1),
50 50 (2, 3),
51 51 (4, 6),
52 52 (6, 9),
53 53 (9, 14),
54 54 (16, None),
55 55 (None, 9),
56 56 (9, None),
57 57 (None, 13),
58 58 (None, None)]
59 59 actual = list(code.extract_code_ranges(instr))
60 60 nt.assert_equal(actual, expected)
61 61
62 62 def test_extract_symbols():
63 63 source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
64 64 symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
65 65 expected = [([], ['a']),
66 66 (["def b():\n return 42\n"], []),
67 67 (["class A: pass\n"], []),
68 68 (["class A: pass\n", "def b():\n return 42\n"], []),
69 69 (["class A: pass\n"], ['a']),
70 70 ([], ['z'])]
71 71 for symbols, exp in zip(symbols_args, expected):
72 72 nt.assert_equal(code.extract_symbols(source, symbols), exp)
73 73
74 74
75 75 def test_extract_symbols_raises_exception_with_non_python_code():
76 76 source = ("=begin A Ruby program :)=end\n"
77 77 "def hello\n"
78 78 "puts 'Hello world'\n"
79 79 "end")
80 80 with nt.assert_raises(SyntaxError):
81 81 code.extract_symbols(source, "hello")
82 82
83 83 def test_config():
84 84 """ test that config magic does not raise
85 85 can happen if Configurable init is moved too early into
86 86 Magics.__init__ as then a Config object will be registerd as a
87 87 magic.
88 88 """
89 89 ## should not raise.
90 90 _ip.magic('config')
91 91
92 92 def test_rehashx():
93 93 # clear up everything
94 94 _ip.alias_manager.clear_aliases()
95 95 del _ip.db['syscmdlist']
96 96
97 97 _ip.magic('rehashx')
98 98 # Practically ALL ipython development systems will have more than 10 aliases
99 99
100 100 nt.assert_true(len(_ip.alias_manager.aliases) > 10)
101 101 for name, cmd in _ip.alias_manager.aliases:
102 102 # we must strip dots from alias names
103 103 nt.assert_not_in('.', name)
104 104
105 105 # rehashx must fill up syscmdlist
106 106 scoms = _ip.db['syscmdlist']
107 107 nt.assert_true(len(scoms) > 10)
108 108
109 109
110 110 def test_magic_parse_options():
111 111 """Test that we don't mangle paths when parsing magic options."""
112 112 ip = get_ipython()
113 113 path = 'c:\\x'
114 114 m = DummyMagics(ip)
115 115 opts = m.parse_options('-f %s' % path,'f:')[0]
116 116 # argv splitting is os-dependent
117 117 if os.name == 'posix':
118 118 expected = 'c:x'
119 119 else:
120 120 expected = path
121 121 nt.assert_equal(opts['f'], expected)
122 122
123 123 def test_magic_parse_long_options():
124 124 """Magic.parse_options can handle --foo=bar long options"""
125 125 ip = get_ipython()
126 126 m = DummyMagics(ip)
127 127 opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
128 128 nt.assert_in('foo', opts)
129 129 nt.assert_in('bar', opts)
130 130 nt.assert_equal(opts['bar'], "bubble")
131 131
132 132
133 133 @dec.skip_without('sqlite3')
134 134 def doctest_hist_f():
135 135 """Test %hist -f with temporary filename.
136 136
137 137 In [9]: import tempfile
138 138
139 139 In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
140 140
141 141 In [11]: %hist -nl -f $tfile 3
142 142
143 143 In [13]: import os; os.unlink(tfile)
144 144 """
145 145
146 146
147 147 @dec.skip_without('sqlite3')
148 148 def doctest_hist_r():
149 149 """Test %hist -r
150 150
151 151 XXX - This test is not recording the output correctly. For some reason, in
152 152 testing mode the raw history isn't getting populated. No idea why.
153 153 Disabling the output checking for now, though at least we do run it.
154 154
155 155 In [1]: 'hist' in _ip.lsmagic()
156 156 Out[1]: True
157 157
158 158 In [2]: x=1
159 159
160 160 In [3]: %hist -rl 2
161 161 x=1 # random
162 162 %hist -r 2
163 163 """
164 164
165 165
166 166 @dec.skip_without('sqlite3')
167 167 def doctest_hist_op():
168 168 """Test %hist -op
169 169
170 170 In [1]: class b(float):
171 171 ...: pass
172 172 ...:
173 173
174 174 In [2]: class s(object):
175 175 ...: def __str__(self):
176 176 ...: return 's'
177 177 ...:
178 178
179 179 In [3]:
180 180
181 181 In [4]: class r(b):
182 182 ...: def __repr__(self):
183 183 ...: return 'r'
184 184 ...:
185 185
186 186 In [5]: class sr(s,r): pass
187 187 ...:
188 188
189 189 In [6]:
190 190
191 191 In [7]: bb=b()
192 192
193 193 In [8]: ss=s()
194 194
195 195 In [9]: rr=r()
196 196
197 197 In [10]: ssrr=sr()
198 198
199 199 In [11]: 4.5
200 200 Out[11]: 4.5
201 201
202 202 In [12]: str(ss)
203 203 Out[12]: 's'
204 204
205 205 In [13]:
206 206
207 207 In [14]: %hist -op
208 208 >>> class b:
209 209 ... pass
210 210 ...
211 211 >>> class s(b):
212 212 ... def __str__(self):
213 213 ... return 's'
214 214 ...
215 215 >>>
216 216 >>> class r(b):
217 217 ... def __repr__(self):
218 218 ... return 'r'
219 219 ...
220 220 >>> class sr(s,r): pass
221 221 >>>
222 222 >>> bb=b()
223 223 >>> ss=s()
224 224 >>> rr=r()
225 225 >>> ssrr=sr()
226 226 >>> 4.5
227 227 4.5
228 228 >>> str(ss)
229 229 's'
230 230 >>>
231 231 """
232 232
233 233 def test_hist_pof():
234 234 ip = get_ipython()
235 235 ip.run_cell(u"1+2", store_history=True)
236 236 #raise Exception(ip.history_manager.session_number)
237 237 #raise Exception(list(ip.history_manager._get_range_session()))
238 238 with TemporaryDirectory() as td:
239 239 tf = os.path.join(td, 'hist.py')
240 240 ip.run_line_magic('history', '-pof %s' % tf)
241 241 assert os.path.isfile(tf)
242 242
243 243
244 244 @dec.skip_without('sqlite3')
245 245 def test_macro():
246 246 ip = get_ipython()
247 247 ip.history_manager.reset() # Clear any existing history.
248 248 cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
249 249 for i, cmd in enumerate(cmds, start=1):
250 250 ip.history_manager.store_inputs(i, cmd)
251 251 ip.magic("macro test 1-3")
252 252 nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
253 253
254 254 # List macros
255 255 nt.assert_in("test", ip.magic("macro"))
256 256
257 257
258 258 @dec.skip_without('sqlite3')
259 259 def test_macro_run():
260 260 """Test that we can run a multi-line macro successfully."""
261 261 ip = get_ipython()
262 262 ip.history_manager.reset()
263 263 cmds = ["a=10", "a+=1", py3compat.doctest_refactor_print("print a"),
264 264 "%macro test 2-3"]
265 265 for cmd in cmds:
266 266 ip.run_cell(cmd, store_history=True)
267 267 nt.assert_equal(ip.user_ns["test"].value,
268 268 py3compat.doctest_refactor_print("a+=1\nprint a\n"))
269 269 with tt.AssertPrints("12"):
270 270 ip.run_cell("test")
271 271 with tt.AssertPrints("13"):
272 272 ip.run_cell("test")
273 273
274 274
275 275 def test_magic_magic():
276 276 """Test %magic"""
277 277 ip = get_ipython()
278 278 with capture_output() as captured:
279 279 ip.magic("magic")
280 280
281 281 stdout = captured.stdout
282 282 nt.assert_in('%magic', stdout)
283 283 nt.assert_in('IPython', stdout)
284 284 nt.assert_in('Available', stdout)
285 285
286 286
287 287 @dec.skipif_not_numpy
288 288 def test_numpy_reset_array_undec():
289 289 "Test '%reset array' functionality"
290 290 _ip.ex('import numpy as np')
291 291 _ip.ex('a = np.empty(2)')
292 292 nt.assert_in('a', _ip.user_ns)
293 293 _ip.magic('reset -f array')
294 294 nt.assert_not_in('a', _ip.user_ns)
295 295
296 296 def test_reset_out():
297 297 "Test '%reset out' magic"
298 298 _ip.run_cell("parrot = 'dead'", store_history=True)
299 299 # test '%reset -f out', make an Out prompt
300 300 _ip.run_cell("parrot", store_history=True)
301 301 nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
302 302 _ip.magic('reset -f out')
303 303 nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
304 304 nt.assert_equal(len(_ip.user_ns['Out']), 0)
305 305
306 306 def test_reset_in():
307 307 "Test '%reset in' magic"
308 308 # test '%reset -f in'
309 309 _ip.run_cell("parrot", store_history=True)
310 310 nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
311 311 _ip.magic('%reset -f in')
312 312 nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
313 313 nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
314 314
315 315 def test_reset_dhist():
316 316 "Test '%reset dhist' magic"
317 317 _ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
318 318 _ip.magic('cd ' + os.path.dirname(nt.__file__))
319 319 _ip.magic('cd -')
320 320 nt.assert_true(len(_ip.user_ns['_dh']) > 0)
321 321 _ip.magic('reset -f dhist')
322 322 nt.assert_equal(len(_ip.user_ns['_dh']), 0)
323 323 _ip.run_cell("_dh = [d for d in tmp]") #restore
324 324
325 325 def test_reset_in_length():
326 326 "Test that '%reset in' preserves In[] length"
327 327 _ip.run_cell("print 'foo'")
328 328 _ip.run_cell("reset -f in")
329 329 nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
330 330
331 331 def test_tb_syntaxerror():
332 332 """test %tb after a SyntaxError"""
333 333 ip = get_ipython()
334 334 ip.run_cell("for")
335 335
336 336 # trap and validate stdout
337 337 save_stdout = sys.stdout
338 338 try:
339 339 sys.stdout = StringIO()
340 340 ip.run_cell("%tb")
341 341 out = sys.stdout.getvalue()
342 342 finally:
343 343 sys.stdout = save_stdout
344 344 # trim output, and only check the last line
345 345 last_line = out.rstrip().splitlines()[-1].strip()
346 346 nt.assert_equal(last_line, "SyntaxError: invalid syntax")
347 347
348 348
349 349 def test_time():
350 350 ip = get_ipython()
351 351
352 352 with tt.AssertPrints("Wall time: "):
353 353 ip.run_cell("%time None")
354 354
355 355 ip.run_cell("def f(kmjy):\n"
356 356 " %time print (2*kmjy)")
357 357
358 358 with tt.AssertPrints("Wall time: "):
359 359 with tt.AssertPrints("hihi", suppress=False):
360 360 ip.run_cell("f('hi')")
361 361
362 362
363 363 @dec.skip_win32
364 364 def test_time2():
365 365 ip = get_ipython()
366 366
367 367 with tt.AssertPrints("CPU times: user "):
368 368 ip.run_cell("%time None")
369 369
370 370 def test_time3():
371 371 """Erroneous magic function calls, issue gh-3334"""
372 372 ip = get_ipython()
373 373 ip.user_ns.pop('run', None)
374 374
375 375 with tt.AssertNotPrints("not found", channel='stderr'):
376 376 ip.run_cell("%%time\n"
377 377 "run = 0\n"
378 378 "run += 1")
379 379
380 @dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
381 def test_time_futures():
382 "Test %time with __future__ environments"
383 ip = get_ipython()
384 ip.autocall = 0
385 ip.run_cell("from __future__ import division")
386 with tt.AssertPrints('0.25'):
387 ip.run_line_magic('time', 'print(1/4)')
388 ip.compile.reset_compiler_flags()
389 with tt.AssertNotPrints('0.25'):
390 ip.run_line_magic('time', 'print(1/4)')
391
392 380 def test_doctest_mode():
393 381 "Toggle doctest_mode twice, it should be a no-op and run without error"
394 382 _ip.magic('doctest_mode')
395 383 _ip.magic('doctest_mode')
396 384
397 385
398 386 def test_parse_options():
399 387 """Tests for basic options parsing in magics."""
400 388 # These are only the most minimal of tests, more should be added later. At
401 389 # the very least we check that basic text/unicode calls work OK.
402 390 m = DummyMagics(_ip)
403 391 nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
404 392 nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
405 393
406 394
407 395 def test_dirops():
408 396 """Test various directory handling operations."""
409 397 # curpath = lambda :os.path.splitdrive(py3compat.getcwd())[1].replace('\\','/')
410 398 curpath = py3compat.getcwd
411 399 startdir = py3compat.getcwd()
412 400 ipdir = os.path.realpath(_ip.ipython_dir)
413 401 try:
414 402 _ip.magic('cd "%s"' % ipdir)
415 403 nt.assert_equal(curpath(), ipdir)
416 404 _ip.magic('cd -')
417 405 nt.assert_equal(curpath(), startdir)
418 406 _ip.magic('pushd "%s"' % ipdir)
419 407 nt.assert_equal(curpath(), ipdir)
420 408 _ip.magic('popd')
421 409 nt.assert_equal(curpath(), startdir)
422 410 finally:
423 411 os.chdir(startdir)
424 412
425 413
426 414 def test_xmode():
427 415 # Calling xmode three times should be a no-op
428 416 xmode = _ip.InteractiveTB.mode
429 417 for i in range(3):
430 418 _ip.magic("xmode")
431 419 nt.assert_equal(_ip.InteractiveTB.mode, xmode)
432 420
433 421 def test_reset_hard():
434 422 monitor = []
435 423 class A(object):
436 424 def __del__(self):
437 425 monitor.append(1)
438 426 def __repr__(self):
439 427 return "<A instance>"
440 428
441 429 _ip.user_ns["a"] = A()
442 430 _ip.run_cell("a")
443 431
444 432 nt.assert_equal(monitor, [])
445 433 _ip.magic("reset -f")
446 434 nt.assert_equal(monitor, [1])
447 435
448 436 class TestXdel(tt.TempFileMixin):
449 437 def test_xdel(self):
450 438 """Test that references from %run are cleared by xdel."""
451 439 src = ("class A(object):\n"
452 440 " monitor = []\n"
453 441 " def __del__(self):\n"
454 442 " self.monitor.append(1)\n"
455 443 "a = A()\n")
456 444 self.mktmp(src)
457 445 # %run creates some hidden references...
458 446 _ip.magic("run %s" % self.fname)
459 447 # ... as does the displayhook.
460 448 _ip.run_cell("a")
461 449
462 450 monitor = _ip.user_ns["A"].monitor
463 451 nt.assert_equal(monitor, [])
464 452
465 453 _ip.magic("xdel a")
466 454
467 455 # Check that a's __del__ method has been called.
468 456 nt.assert_equal(monitor, [1])
469 457
470 458 def doctest_who():
471 459 """doctest for %who
472 460
473 461 In [1]: %reset -f
474 462
475 463 In [2]: alpha = 123
476 464
477 465 In [3]: beta = 'beta'
478 466
479 467 In [4]: %who int
480 468 alpha
481 469
482 470 In [5]: %who str
483 471 beta
484 472
485 473 In [6]: %whos
486 474 Variable Type Data/Info
487 475 ----------------------------
488 476 alpha int 123
489 477 beta str beta
490 478
491 479 In [7]: %who_ls
492 480 Out[7]: ['alpha', 'beta']
493 481 """
494 482
495 483 def test_whos():
496 484 """Check that whos is protected against objects where repr() fails."""
497 485 class A(object):
498 486 def __repr__(self):
499 487 raise Exception()
500 488 _ip.user_ns['a'] = A()
501 489 _ip.magic("whos")
502 490
503 491 @py3compat.u_format
504 492 def doctest_precision():
505 493 """doctest for %precision
506 494
507 495 In [1]: f = get_ipython().display_formatter.formatters['text/plain']
508 496
509 497 In [2]: %precision 5
510 498 Out[2]: {u}'%.5f'
511 499
512 500 In [3]: f.float_format
513 501 Out[3]: {u}'%.5f'
514 502
515 503 In [4]: %precision %e
516 504 Out[4]: {u}'%e'
517 505
518 506 In [5]: f(3.1415927)
519 507 Out[5]: {u}'3.141593e+00'
520 508 """
521 509
522 510 def test_psearch():
523 511 with tt.AssertPrints("dict.fromkeys"):
524 512 _ip.run_cell("dict.fr*?")
525 513
526 514 def test_timeit_shlex():
527 515 """test shlex issues with timeit (#1109)"""
528 516 _ip.ex("def f(*a,**kw): pass")
529 517 _ip.magic('timeit -n1 "this is a bug".count(" ")')
530 518 _ip.magic('timeit -r1 -n1 f(" ", 1)')
531 519 _ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
532 520 _ip.magic('timeit -r1 -n1 ("a " + "b")')
533 521 _ip.magic('timeit -r1 -n1 f("a " + "b")')
534 522 _ip.magic('timeit -r1 -n1 f("a " + "b ")')
535 523
536 524
537 525 def test_timeit_arguments():
538 526 "Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
539 527 _ip.magic("timeit ('#')")
540 528
541 529
542 530 def test_timeit_special_syntax():
543 531 "Test %%timeit with IPython special syntax"
544 532 @register_line_magic
545 533 def lmagic(line):
546 534 ip = get_ipython()
547 535 ip.user_ns['lmagic_out'] = line
548 536
549 537 # line mode test
550 538 _ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
551 539 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
552 540 # cell mode test
553 541 _ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
554 542 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
555 543
556 544 def test_timeit_return():
557 545 """
558 546 test wether timeit -o return object
559 547 """
560 548
561 549 res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
562 550 assert(res is not None)
563 551
564 552 def test_timeit_quiet():
565 553 """
566 554 test quiet option of timeit magic
567 555 """
568 556 with tt.AssertNotPrints("loops"):
569 557 _ip.run_cell("%timeit -n1 -r1 -q 1")
570 558
571 559 def test_timeit_return_quiet():
572 560 with tt.AssertNotPrints("loops"):
573 561 res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')
574 562 assert (res is not None)
575 563
576 @dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
577 def test_timeit_futures():
578 "Test %timeit with __future__ environments"
579 ip = get_ipython()
580 ip.run_cell("from __future__ import division")
581 with tt.AssertPrints('0.25'):
582 ip.run_line_magic('timeit', '-n1 -r1 print(1/4)')
583 ip.compile.reset_compiler_flags()
584 with tt.AssertNotPrints('0.25'):
585 ip.run_line_magic('timeit', '-n1 -r1 print(1/4)')
586
587 564 @dec.skipif(execution.profile is None)
588 565 def test_prun_special_syntax():
589 566 "Test %%prun with IPython special syntax"
590 567 @register_line_magic
591 568 def lmagic(line):
592 569 ip = get_ipython()
593 570 ip.user_ns['lmagic_out'] = line
594 571
595 572 # line mode test
596 573 _ip.run_line_magic('prun', '-q %lmagic my line')
597 574 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
598 575 # cell mode test
599 576 _ip.run_cell_magic('prun', '-q', '%lmagic my line2')
600 577 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
601 578
602 579 @dec.skipif(execution.profile is None)
603 580 def test_prun_quotes():
604 581 "Test that prun does not clobber string escapes (GH #1302)"
605 582 _ip.magic(r"prun -q x = '\t'")
606 583 nt.assert_equal(_ip.user_ns['x'], '\t')
607 584
608 585 def test_extension():
609 586 # Debugging information for failures of this test
610 587 print('sys.path:')
611 588 for p in sys.path:
612 589 print(' ', p)
613 590 print('CWD', os.getcwd())
614 591
615 592 nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
616 593 daft_path = os.path.join(os.path.dirname(__file__), "daft_extension")
617 594 sys.path.insert(0, daft_path)
618 595 try:
619 596 _ip.user_ns.pop('arq', None)
620 597 invalidate_caches() # Clear import caches
621 598 _ip.magic("load_ext daft_extension")
622 599 nt.assert_equal(_ip.user_ns['arq'], 185)
623 600 _ip.magic("unload_ext daft_extension")
624 601 assert 'arq' not in _ip.user_ns
625 602 finally:
626 603 sys.path.remove(daft_path)
627 604
628 605
629 606 def test_notebook_export_json():
630 607 _ip = get_ipython()
631 608 _ip.history_manager.reset() # Clear any existing history.
632 609 cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"]
633 610 for i, cmd in enumerate(cmds, start=1):
634 611 _ip.history_manager.store_inputs(i, cmd)
635 612 with TemporaryDirectory() as td:
636 613 outfile = os.path.join(td, "nb.ipynb")
637 614 _ip.magic("notebook -e %s" % outfile)
638 615
639 616
640 617 class TestEnv(TestCase):
641 618
642 619 def test_env(self):
643 620 env = _ip.magic("env")
644 621 self.assertTrue(isinstance(env, dict))
645 622
646 623 def test_env_get_set_simple(self):
647 624 env = _ip.magic("env var val1")
648 625 self.assertEqual(env, None)
649 626 self.assertEqual(os.environ['var'], 'val1')
650 627 self.assertEqual(_ip.magic("env var"), 'val1')
651 628 env = _ip.magic("env var=val2")
652 629 self.assertEqual(env, None)
653 630 self.assertEqual(os.environ['var'], 'val2')
654 631
655 632 def test_env_get_set_complex(self):
656 633 env = _ip.magic("env var 'val1 '' 'val2")
657 634 self.assertEqual(env, None)
658 635 self.assertEqual(os.environ['var'], "'val1 '' 'val2")
659 636 self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2")
660 637 env = _ip.magic('env var=val2 val3="val4')
661 638 self.assertEqual(env, None)
662 639 self.assertEqual(os.environ['var'], 'val2 val3="val4')
663 640
664 641 def test_env_set_bad_input(self):
665 642 self.assertRaises(UsageError, lambda: _ip.magic("set_env var"))
666 643
667 644 def test_env_set_whitespace(self):
668 645 self.assertRaises(UsageError, lambda: _ip.magic("env var A=B"))
669 646
670 647
671 648 class CellMagicTestCase(TestCase):
672 649
673 650 def check_ident(self, magic):
674 651 # Manually called, we get the result
675 652 out = _ip.run_cell_magic(magic, 'a', 'b')
676 653 nt.assert_equal(out, ('a','b'))
677 654 # Via run_cell, it goes into the user's namespace via displayhook
678 655 _ip.run_cell('%%' + magic +' c\nd')
679 656 nt.assert_equal(_ip.user_ns['_'], ('c','d'))
680 657
681 658 def test_cell_magic_func_deco(self):
682 659 "Cell magic using simple decorator"
683 660 @register_cell_magic
684 661 def cellm(line, cell):
685 662 return line, cell
686 663
687 664 self.check_ident('cellm')
688 665
689 666 def test_cell_magic_reg(self):
690 667 "Cell magic manually registered"
691 668 def cellm(line, cell):
692 669 return line, cell
693 670
694 671 _ip.register_magic_function(cellm, 'cell', 'cellm2')
695 672 self.check_ident('cellm2')
696 673
697 674 def test_cell_magic_class(self):
698 675 "Cell magics declared via a class"
699 676 @magics_class
700 677 class MyMagics(Magics):
701 678
702 679 @cell_magic
703 680 def cellm3(self, line, cell):
704 681 return line, cell
705 682
706 683 _ip.register_magics(MyMagics)
707 684 self.check_ident('cellm3')
708 685
709 686 def test_cell_magic_class2(self):
710 687 "Cell magics declared via a class, #2"
711 688 @magics_class
712 689 class MyMagics2(Magics):
713 690
714 691 @cell_magic('cellm4')
715 692 def cellm33(self, line, cell):
716 693 return line, cell
717 694
718 695 _ip.register_magics(MyMagics2)
719 696 self.check_ident('cellm4')
720 697 # Check that nothing is registered as 'cellm33'
721 698 c33 = _ip.find_cell_magic('cellm33')
722 699 nt.assert_equal(c33, None)
723 700
724 701 def test_file():
725 702 """Basic %%file"""
726 703 ip = get_ipython()
727 704 with TemporaryDirectory() as td:
728 705 fname = os.path.join(td, 'file1')
729 706 ip.run_cell_magic("file", fname, u'\n'.join([
730 707 'line1',
731 708 'line2',
732 709 ]))
733 710 with open(fname) as f:
734 711 s = f.read()
735 712 nt.assert_in('line1\n', s)
736 713 nt.assert_in('line2', s)
737 714
738 715 def test_file_var_expand():
739 716 """%%file $filename"""
740 717 ip = get_ipython()
741 718 with TemporaryDirectory() as td:
742 719 fname = os.path.join(td, 'file1')
743 720 ip.user_ns['filename'] = fname
744 721 ip.run_cell_magic("file", '$filename', u'\n'.join([
745 722 'line1',
746 723 'line2',
747 724 ]))
748 725 with open(fname) as f:
749 726 s = f.read()
750 727 nt.assert_in('line1\n', s)
751 728 nt.assert_in('line2', s)
752 729
753 730 def test_file_unicode():
754 731 """%%file with unicode cell"""
755 732 ip = get_ipython()
756 733 with TemporaryDirectory() as td:
757 734 fname = os.path.join(td, 'file1')
758 735 ip.run_cell_magic("file", fname, u'\n'.join([
759 736 u'liné1',
760 737 u'liné2',
761 738 ]))
762 739 with io.open(fname, encoding='utf-8') as f:
763 740 s = f.read()
764 741 nt.assert_in(u'liné1\n', s)
765 742 nt.assert_in(u'liné2', s)
766 743
767 744 def test_file_amend():
768 745 """%%file -a amends files"""
769 746 ip = get_ipython()
770 747 with TemporaryDirectory() as td:
771 748 fname = os.path.join(td, 'file2')
772 749 ip.run_cell_magic("file", fname, u'\n'.join([
773 750 'line1',
774 751 'line2',
775 752 ]))
776 753 ip.run_cell_magic("file", "-a %s" % fname, u'\n'.join([
777 754 'line3',
778 755 'line4',
779 756 ]))
780 757 with open(fname) as f:
781 758 s = f.read()
782 759 nt.assert_in('line1\n', s)
783 760 nt.assert_in('line3\n', s)
784 761
785 762
786 763 def test_script_config():
787 764 ip = get_ipython()
788 765 ip.config.ScriptMagics.script_magics = ['whoda']
789 766 sm = script.ScriptMagics(shell=ip)
790 767 nt.assert_in('whoda', sm.magics['cell'])
791 768
792 769 @dec.skip_win32
793 770 def test_script_out():
794 771 ip = get_ipython()
795 772 ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
796 773 nt.assert_equal(ip.user_ns['output'], 'hi\n')
797 774
798 775 @dec.skip_win32
799 776 def test_script_err():
800 777 ip = get_ipython()
801 778 ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
802 779 nt.assert_equal(ip.user_ns['error'], 'hello\n')
803 780
804 781 @dec.skip_win32
805 782 def test_script_out_err():
806 783 ip = get_ipython()
807 784 ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
808 785 nt.assert_equal(ip.user_ns['output'], 'hi\n')
809 786 nt.assert_equal(ip.user_ns['error'], 'hello\n')
810 787
811 788 @dec.skip_win32
812 789 def test_script_bg_out():
813 790 ip = get_ipython()
814 791 ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
815 792 nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
816 793
817 794 @dec.skip_win32
818 795 def test_script_bg_err():
819 796 ip = get_ipython()
820 797 ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
821 798 nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
822 799
823 800 @dec.skip_win32
824 801 def test_script_bg_out_err():
825 802 ip = get_ipython()
826 803 ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
827 804 nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
828 805 nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
829 806
830 807 def test_script_defaults():
831 808 ip = get_ipython()
832 809 for cmd in ['sh', 'bash', 'perl', 'ruby']:
833 810 try:
834 811 find_cmd(cmd)
835 812 except Exception:
836 813 pass
837 814 else:
838 815 nt.assert_in(cmd, ip.magics_manager.magics['cell'])
839 816
840 817
841 818 @magics_class
842 819 class FooFoo(Magics):
843 820 """class with both %foo and %%foo magics"""
844 821 @line_magic('foo')
845 822 def line_foo(self, line):
846 823 "I am line foo"
847 824 pass
848 825
849 826 @cell_magic("foo")
850 827 def cell_foo(self, line, cell):
851 828 "I am cell foo, not line foo"
852 829 pass
853 830
854 831 def test_line_cell_info():
855 832 """%%foo and %foo magics are distinguishable to inspect"""
856 833 ip = get_ipython()
857 834 ip.magics_manager.register(FooFoo)
858 835 oinfo = ip.object_inspect('foo')
859 836 nt.assert_true(oinfo['found'])
860 837 nt.assert_true(oinfo['ismagic'])
861 838
862 839 oinfo = ip.object_inspect('%%foo')
863 840 nt.assert_true(oinfo['found'])
864 841 nt.assert_true(oinfo['ismagic'])
865 842 nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
866 843
867 844 oinfo = ip.object_inspect('%foo')
868 845 nt.assert_true(oinfo['found'])
869 846 nt.assert_true(oinfo['ismagic'])
870 847 nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
871 848
872 849 def test_multiple_magics():
873 850 ip = get_ipython()
874 851 foo1 = FooFoo(ip)
875 852 foo2 = FooFoo(ip)
876 853 mm = ip.magics_manager
877 854 mm.register(foo1)
878 855 nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
879 856 mm.register(foo2)
880 857 nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
881 858
882 859 def test_alias_magic():
883 860 """Test %alias_magic."""
884 861 ip = get_ipython()
885 862 mm = ip.magics_manager
886 863
887 864 # Basic operation: both cell and line magics are created, if possible.
888 865 ip.run_line_magic('alias_magic', 'timeit_alias timeit')
889 866 nt.assert_in('timeit_alias', mm.magics['line'])
890 867 nt.assert_in('timeit_alias', mm.magics['cell'])
891 868
892 869 # --cell is specified, line magic not created.
893 870 ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
894 871 nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
895 872 nt.assert_in('timeit_cell_alias', mm.magics['cell'])
896 873
897 874 # Test that line alias is created successfully.
898 875 ip.run_line_magic('alias_magic', '--line env_alias env')
899 876 nt.assert_equal(ip.run_line_magic('env', ''),
900 877 ip.run_line_magic('env_alias', ''))
901 878
902 879 def test_save():
903 880 """Test %save."""
904 881 ip = get_ipython()
905 882 ip.history_manager.reset() # Clear any existing history.
906 883 cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
907 884 for i, cmd in enumerate(cmds, start=1):
908 885 ip.history_manager.store_inputs(i, cmd)
909 886 with TemporaryDirectory() as tmpdir:
910 887 file = os.path.join(tmpdir, "testsave.py")
911 888 ip.run_line_magic("save", "%s 1-10" % file)
912 889 with open(file) as f:
913 890 content = f.read()
914 891 nt.assert_equal(content.count(cmds[0]), 1)
915 892 nt.assert_in('coding: utf-8', content)
916 893 ip.run_line_magic("save", "-a %s 1-10" % file)
917 894 with open(file) as f:
918 895 content = f.read()
919 896 nt.assert_equal(content.count(cmds[0]), 2)
920 897 nt.assert_in('coding: utf-8', content)
921 898
922 899
923 900 def test_store():
924 901 """Test %store."""
925 902 ip = get_ipython()
926 903 ip.run_line_magic('load_ext', 'storemagic')
927 904
928 905 # make sure the storage is empty
929 906 ip.run_line_magic('store', '-z')
930 907 ip.user_ns['var'] = 42
931 908 ip.run_line_magic('store', 'var')
932 909 ip.user_ns['var'] = 39
933 910 ip.run_line_magic('store', '-r')
934 911 nt.assert_equal(ip.user_ns['var'], 42)
935 912
936 913 ip.run_line_magic('store', '-d var')
937 914 ip.user_ns['var'] = 39
938 915 ip.run_line_magic('store' , '-r')
939 916 nt.assert_equal(ip.user_ns['var'], 39)
940 917
941 918
942 919 def _run_edit_test(arg_s, exp_filename=None,
943 920 exp_lineno=-1,
944 921 exp_contents=None,
945 922 exp_is_temp=None):
946 923 ip = get_ipython()
947 924 M = code.CodeMagics(ip)
948 925 last_call = ['','']
949 926 opts,args = M.parse_options(arg_s,'prxn:')
950 927 filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
951 928
952 929 if exp_filename is not None:
953 930 nt.assert_equal(exp_filename, filename)
954 931 if exp_contents is not None:
955 932 with io.open(filename, 'r', encoding='utf-8') as f:
956 933 contents = f.read()
957 934 nt.assert_equal(exp_contents, contents)
958 935 if exp_lineno != -1:
959 936 nt.assert_equal(exp_lineno, lineno)
960 937 if exp_is_temp is not None:
961 938 nt.assert_equal(exp_is_temp, is_temp)
962 939
963 940
964 941 def test_edit_interactive():
965 942 """%edit on interactively defined objects"""
966 943 ip = get_ipython()
967 944 n = ip.execution_count
968 945 ip.run_cell(u"def foo(): return 1", store_history=True)
969 946
970 947 try:
971 948 _run_edit_test("foo")
972 949 except code.InteractivelyDefined as e:
973 950 nt.assert_equal(e.index, n)
974 951 else:
975 952 raise AssertionError("Should have raised InteractivelyDefined")
976 953
977 954
978 955 def test_edit_cell():
979 956 """%edit [cell id]"""
980 957 ip = get_ipython()
981 958
982 959 ip.run_cell(u"def foo(): return 1", store_history=True)
983 960
984 961 # test
985 962 _run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
986 963
987 964 def test_bookmark():
988 965 ip = get_ipython()
989 966 ip.run_line_magic('bookmark', 'bmname')
990 967 with tt.AssertPrints('bmname'):
991 968 ip.run_line_magic('bookmark', '-l')
992 969 ip.run_line_magic('bookmark', '-d bmname')
993 970
994 971 def test_ls_magic():
995 972 ip = get_ipython()
996 973 json_formatter = ip.display_formatter.formatters['application/json']
997 974 json_formatter.enabled = True
998 975 lsmagic = ip.magic('lsmagic')
999 976 with warnings.catch_warnings(record=True) as w:
1000 977 j = json_formatter(lsmagic)
1001 978 nt.assert_equal(sorted(j), ['cell', 'line'])
1002 979 nt.assert_equal(w, []) # no warnings
1003 980
1004 981 def test_strip_initial_indent():
1005 982 def sii(s):
1006 983 lines = s.splitlines()
1007 984 return '\n'.join(code.strip_initial_indent(lines))
1008 985
1009 986 nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2")
1010 987 nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc")
1011 988 nt.assert_equal(sii("a\n b"), "a\n b")
@@ -1,210 +1,203 b''
1 1 """Tests for various magic functions specific to the terminal frontend.
2 2
3 3 Needs to be run by nose (to make ipython session available).
4 4 """
5 5 from __future__ import absolute_import
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Imports
9 9 #-----------------------------------------------------------------------------
10 10
11 11 import sys
12 12 from unittest import TestCase
13 13
14 14 import nose.tools as nt
15 15
16 16 from IPython.testing import tools as tt
17 17 from IPython.utils.py3compat import PY3
18 18
19 19 if PY3:
20 20 from io import StringIO
21 21 else:
22 22 from StringIO import StringIO
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Globals
26 26 #-----------------------------------------------------------------------------
27 27 ip = get_ipython()
28 28
29 29 #-----------------------------------------------------------------------------
30 30 # Test functions begin
31 31 #-----------------------------------------------------------------------------
32 32
33 33 def check_cpaste(code, should_fail=False):
34 34 """Execute code via 'cpaste' and ensure it was executed, unless
35 35 should_fail is set.
36 36 """
37 37 ip.user_ns['code_ran'] = False
38 38
39 39 src = StringIO()
40 40 if not hasattr(src, 'encoding'):
41 41 # IPython expects stdin to have an encoding attribute
42 42 src.encoding = None
43 43 src.write(code)
44 44 src.write('\n--\n')
45 45 src.seek(0)
46 46
47 47 stdin_save = sys.stdin
48 48 sys.stdin = src
49 49
50 50 try:
51 51 context = tt.AssertPrints if should_fail else tt.AssertNotPrints
52 52 with context("Traceback (most recent call last)"):
53 53 ip.magic('cpaste')
54 54
55 55 if not should_fail:
56 56 assert ip.user_ns['code_ran'], "%r failed" % code
57 57 finally:
58 58 sys.stdin = stdin_save
59 59
60 PY31 = sys.version_info[:2] == (3,1)
61
62 60 def test_cpaste():
63 61 """Test cpaste magic"""
64 62
65 63 def runf():
66 64 """Marker function: sets a flag when executed.
67 65 """
68 66 ip.user_ns['code_ran'] = True
69 67 return 'runf' # return string so '+ runf()' doesn't result in success
70 68
71 69 tests = {'pass': ["runf()",
72 70 "In [1]: runf()",
73 71 "In [1]: if 1:\n ...: runf()",
74 72 "> > > runf()",
75 73 ">>> runf()",
76 74 " >>> runf()",
77 75 ],
78 76
79 77 'fail': ["1 + runf()",
78 "++ runf()",
80 79 ]}
81
82 # I don't know why this is failing specifically on Python 3.1. I've
83 # checked it manually interactively, but we don't care enough about 3.1
84 # to spend time fiddling with the tests, so we just skip it.
85 if not PY31:
86 tests['fail'].append("++ runf()")
87 80
88 81 ip.user_ns['runf'] = runf
89 82
90 83 for code in tests['pass']:
91 84 check_cpaste(code)
92 85
93 86 for code in tests['fail']:
94 87 check_cpaste(code, should_fail=True)
95 88
96 89
97 90 class PasteTestCase(TestCase):
98 91 """Multiple tests for clipboard pasting"""
99 92
100 93 def paste(self, txt, flags='-q'):
101 94 """Paste input text, by default in quiet mode"""
102 95 ip.hooks.clipboard_get = lambda : txt
103 96 ip.magic('paste '+flags)
104 97
105 98 def setUp(self):
106 99 # Inject fake clipboard hook but save original so we can restore it later
107 100 self.original_clip = ip.hooks.clipboard_get
108 101
109 102 def tearDown(self):
110 103 # Restore original hook
111 104 ip.hooks.clipboard_get = self.original_clip
112 105
113 106 def test_paste(self):
114 107 ip.user_ns.pop('x', None)
115 108 self.paste('x = 1')
116 109 nt.assert_equal(ip.user_ns['x'], 1)
117 110 ip.user_ns.pop('x')
118 111
119 112 def test_paste_pyprompt(self):
120 113 ip.user_ns.pop('x', None)
121 114 self.paste('>>> x=2')
122 115 nt.assert_equal(ip.user_ns['x'], 2)
123 116 ip.user_ns.pop('x')
124 117
125 118 def test_paste_py_multi(self):
126 119 self.paste("""
127 120 >>> x = [1,2,3]
128 121 >>> y = []
129 122 >>> for i in x:
130 123 ... y.append(i**2)
131 124 ...
132 125 """)
133 126 nt.assert_equal(ip.user_ns['x'], [1,2,3])
134 127 nt.assert_equal(ip.user_ns['y'], [1,4,9])
135 128
136 129 def test_paste_py_multi_r(self):
137 130 "Now, test that self.paste -r works"
138 131 self.test_paste_py_multi()
139 132 nt.assert_equal(ip.user_ns.pop('x'), [1,2,3])
140 133 nt.assert_equal(ip.user_ns.pop('y'), [1,4,9])
141 134 nt.assert_false('x' in ip.user_ns)
142 135 ip.magic('paste -r')
143 136 nt.assert_equal(ip.user_ns['x'], [1,2,3])
144 137 nt.assert_equal(ip.user_ns['y'], [1,4,9])
145 138
146 139 def test_paste_email(self):
147 140 "Test pasting of email-quoted contents"
148 141 self.paste("""\
149 142 >> def foo(x):
150 143 >> return x + 1
151 144 >> xx = foo(1.1)""")
152 145 nt.assert_equal(ip.user_ns['xx'], 2.1)
153 146
154 147 def test_paste_email2(self):
155 148 "Email again; some programs add a space also at each quoting level"
156 149 self.paste("""\
157 150 > > def foo(x):
158 151 > > return x + 1
159 152 > > yy = foo(2.1) """)
160 153 nt.assert_equal(ip.user_ns['yy'], 3.1)
161 154
162 155 def test_paste_email_py(self):
163 156 "Email quoting of interactive input"
164 157 self.paste("""\
165 158 >> >>> def f(x):
166 159 >> ... return x+1
167 160 >> ...
168 161 >> >>> zz = f(2.5) """)
169 162 nt.assert_equal(ip.user_ns['zz'], 3.5)
170 163
171 164 def test_paste_echo(self):
172 165 "Also test self.paste echoing, by temporarily faking the writer"
173 166 w = StringIO()
174 167 writer = ip.write
175 168 ip.write = w.write
176 169 code = """
177 170 a = 100
178 171 b = 200"""
179 172 try:
180 173 self.paste(code,'')
181 174 out = w.getvalue()
182 175 finally:
183 176 ip.write = writer
184 177 nt.assert_equal(ip.user_ns['a'], 100)
185 178 nt.assert_equal(ip.user_ns['b'], 200)
186 179 nt.assert_equal(out, code+"\n## -- End pasted text --\n")
187 180
188 181 def test_paste_leading_commas(self):
189 182 "Test multiline strings with leading commas"
190 183 tm = ip.magics_manager.registry['TerminalMagics']
191 184 s = '''\
192 185 a = """
193 186 ,1,2,3
194 187 """'''
195 188 ip.user_ns.pop('foo', None)
196 189 tm.store_or_execute(s, 'foo')
197 190 nt.assert_in('foo', ip.user_ns)
198 191
199 192
200 193 def test_paste_trailing_question(self):
201 194 "Test pasting sources with trailing question marks"
202 195 tm = ip.magics_manager.registry['TerminalMagics']
203 196 s = '''\
204 197 def funcfoo():
205 198 if True: #am i true?
206 199 return 'fooresult'
207 200 '''
208 201 ip.user_ns.pop('funcfoo', None)
209 202 self.paste(s)
210 203 nt.assert_equal(ip.user_ns['funcfoo'](), 'fooresult')
@@ -1,456 +1,455 b''
1 1 """Tests for the object inspection functionality.
2 2 """
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 from __future__ import print_function
8 8
9 9 import os
10 10 import re
11 11 import sys
12 12
13 13 import nose.tools as nt
14 14
15 15 from .. import oinspect
16 16 from IPython.core.magic import (Magics, magics_class, line_magic,
17 17 cell_magic, line_cell_magic,
18 18 register_line_magic, register_cell_magic,
19 19 register_line_cell_magic)
20 20 from decorator import decorator
21 21 from IPython.testing.decorators import skipif
22 22 from IPython.testing.tools import AssertPrints
23 23 from IPython.utils.path import compress_user
24 24 from IPython.utils import py3compat
25 25 from IPython.utils.signatures import Signature, Parameter
26 26
27 27
28 28 #-----------------------------------------------------------------------------
29 29 # Globals and constants
30 30 #-----------------------------------------------------------------------------
31 31
32 32 inspector = oinspect.Inspector()
33 33 ip = get_ipython()
34 34
35 35 #-----------------------------------------------------------------------------
36 36 # Local utilities
37 37 #-----------------------------------------------------------------------------
38 38
39 39 # WARNING: since this test checks the line number where a function is
40 40 # defined, if any code is inserted above, the following line will need to be
41 41 # updated. Do NOT insert any whitespace between the next line and the function
42 42 # definition below.
43 43 THIS_LINE_NUMBER = 43 # Put here the actual number of this line
44 44
45 45 from unittest import TestCase
46 46
47 47 class Test(TestCase):
48 48
49 49 def test_find_source_lines(self):
50 50 self.assertEqual(oinspect.find_source_lines(Test.test_find_source_lines),
51 51 THIS_LINE_NUMBER+6)
52 52
53 53
54 54 # A couple of utilities to ensure these tests work the same from a source or a
55 55 # binary install
56 56 def pyfile(fname):
57 57 return os.path.normcase(re.sub('.py[co]$', '.py', fname))
58 58
59 59
60 60 def match_pyfiles(f1, f2):
61 61 nt.assert_equal(pyfile(f1), pyfile(f2))
62 62
63 63
64 64 def test_find_file():
65 65 match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__))
66 66
67 67
68 68 def test_find_file_decorated1():
69 69
70 70 @decorator
71 71 def noop1(f):
72 72 def wrapper():
73 73 return f(*a, **kw)
74 74 return wrapper
75 75
76 76 @noop1
77 77 def f(x):
78 78 "My docstring"
79 79
80 80 match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
81 81 nt.assert_equal(f.__doc__, "My docstring")
82 82
83 83
84 84 def test_find_file_decorated2():
85 85
86 86 @decorator
87 87 def noop2(f, *a, **kw):
88 88 return f(*a, **kw)
89 89
90 90 @noop2
91 91 @noop2
92 92 @noop2
93 93 def f(x):
94 94 "My docstring 2"
95 95
96 96 match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
97 97 nt.assert_equal(f.__doc__, "My docstring 2")
98 98
99 99
100 100 def test_find_file_magic():
101 101 run = ip.find_line_magic('run')
102 102 nt.assert_not_equal(oinspect.find_file(run), None)
103 103
104 104
105 105 # A few generic objects we can then inspect in the tests below
106 106
107 107 class Call(object):
108 108 """This is the class docstring."""
109 109
110 110 def __init__(self, x, y=1):
111 111 """This is the constructor docstring."""
112 112
113 113 def __call__(self, *a, **kw):
114 114 """This is the call docstring."""
115 115
116 116 def method(self, x, z=2):
117 117 """Some method's docstring"""
118 118
119 119 class HasSignature(object):
120 120 """This is the class docstring."""
121 121 __signature__ = Signature([Parameter('test', Parameter.POSITIONAL_OR_KEYWORD)])
122 122
123 123 def __init__(self, *args):
124 124 """This is the init docstring"""
125 125
126 126
127 127 class SimpleClass(object):
128 128 def method(self, x, z=2):
129 129 """Some method's docstring"""
130 130
131 131
132 132 class OldStyle:
133 133 """An old-style class for testing."""
134 134 pass
135 135
136 136
137 137 def f(x, y=2, *a, **kw):
138 138 """A simple function."""
139 139
140 140
141 141 def g(y, z=3, *a, **kw):
142 142 pass # no docstring
143 143
144 144
145 145 @register_line_magic
146 146 def lmagic(line):
147 147 "A line magic"
148 148
149 149
150 150 @register_cell_magic
151 151 def cmagic(line, cell):
152 152 "A cell magic"
153 153
154 154
155 155 @register_line_cell_magic
156 156 def lcmagic(line, cell=None):
157 157 "A line/cell magic"
158 158
159 159
160 160 @magics_class
161 161 class SimpleMagics(Magics):
162 162 @line_magic
163 163 def Clmagic(self, cline):
164 164 "A class-based line magic"
165 165
166 166 @cell_magic
167 167 def Ccmagic(self, cline, ccell):
168 168 "A class-based cell magic"
169 169
170 170 @line_cell_magic
171 171 def Clcmagic(self, cline, ccell=None):
172 172 "A class-based line/cell magic"
173 173
174 174
175 175 class Awkward(object):
176 176 def __getattr__(self, name):
177 177 raise Exception(name)
178 178
179 179 class NoBoolCall:
180 180 """
181 181 callable with `__bool__` raising should still be inspect-able.
182 182 """
183 183
184 184 def __call__(self):
185 185 """does nothing"""
186 186 pass
187 187
188 188 def __bool__(self):
189 189 """just raise NotImplemented"""
190 190 raise NotImplementedError('Must be implemented')
191 191
192 192
193 193 class SerialLiar(object):
194 194 """Attribute accesses always get another copy of the same class.
195 195
196 196 unittest.mock.call does something similar, but it's not ideal for testing
197 197 as the failure mode is to eat all your RAM. This gives up after 10k levels.
198 198 """
199 199 def __init__(self, max_fibbing_twig, lies_told=0):
200 200 if lies_told > 10000:
201 201 raise RuntimeError('Nose too long, honesty is the best policy')
202 202 self.max_fibbing_twig = max_fibbing_twig
203 203 self.lies_told = lies_told
204 204 max_fibbing_twig[0] = max(max_fibbing_twig[0], lies_told)
205 205
206 206 def __getattr__(self, item):
207 207 return SerialLiar(self.max_fibbing_twig, self.lies_told + 1)
208 208
209 209
210 210 def check_calltip(obj, name, call, docstring):
211 211 """Generic check pattern all calltip tests will use"""
212 212 info = inspector.info(obj, name)
213 213 call_line, ds = oinspect.call_tip(info)
214 214 nt.assert_equal(call_line, call)
215 215 nt.assert_equal(ds, docstring)
216 216
217 217 #-----------------------------------------------------------------------------
218 218 # Tests
219 219 #-----------------------------------------------------------------------------
220 220
221 221 def test_calltip_class():
222 222 check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
223 223
224 224
225 225 def test_calltip_instance():
226 226 c = Call(1)
227 227 check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
228 228
229 229
230 230 def test_calltip_method():
231 231 c = Call(1)
232 232 check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
233 233
234 234
235 235 def test_calltip_function():
236 236 check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
237 237
238 238
239 239 def test_calltip_function2():
240 240 check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
241 241
242 242
243 243 @skipif(sys.version_info >= (3, 5))
244 244 def test_calltip_builtin():
245 245 check_calltip(sum, 'sum', None, sum.__doc__)
246 246
247 247
248 248 def test_calltip_line_magic():
249 249 check_calltip(lmagic, 'lmagic', 'lmagic(line)', "A line magic")
250 250
251 251
252 252 def test_calltip_cell_magic():
253 253 check_calltip(cmagic, 'cmagic', 'cmagic(line, cell)', "A cell magic")
254 254
255 255
256 256 def test_calltip_line_cell_magic():
257 257 check_calltip(lcmagic, 'lcmagic', 'lcmagic(line, cell=None)',
258 258 "A line/cell magic")
259 259
260 260
261 261 def test_class_magics():
262 262 cm = SimpleMagics(ip)
263 263 ip.register_magics(cm)
264 264 check_calltip(cm.Clmagic, 'Clmagic', 'Clmagic(cline)',
265 265 "A class-based line magic")
266 266 check_calltip(cm.Ccmagic, 'Ccmagic', 'Ccmagic(cline, ccell)',
267 267 "A class-based cell magic")
268 268 check_calltip(cm.Clcmagic, 'Clcmagic', 'Clcmagic(cline, ccell=None)',
269 269 "A class-based line/cell magic")
270 270
271 271
272 272 def test_info():
273 273 "Check that Inspector.info fills out various fields as expected."
274 274 i = inspector.info(Call, oname='Call')
275 275 nt.assert_equal(i['type_name'], 'type')
276 276 expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
277 277 nt.assert_equal(i['base_class'], expted_class)
278 if sys.version_info > (3,):
279 nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>")
278 nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>")
280 279 fname = __file__
281 280 if fname.endswith(".pyc"):
282 281 fname = fname[:-1]
283 282 # case-insensitive comparison needed on some filesystems
284 283 # e.g. Windows:
285 284 nt.assert_equal(i['file'].lower(), compress_user(fname).lower())
286 285 nt.assert_equal(i['definition'], None)
287 286 nt.assert_equal(i['docstring'], Call.__doc__)
288 287 nt.assert_equal(i['source'], None)
289 288 nt.assert_true(i['isclass'])
290 289 _self_py2 = '' if py3compat.PY3 else 'self, '
291 290 nt.assert_equal(i['init_definition'], "Call(%sx, y=1)" % _self_py2)
292 291 nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
293 292
294 293 i = inspector.info(Call, detail_level=1)
295 294 nt.assert_not_equal(i['source'], None)
296 295 nt.assert_equal(i['docstring'], None)
297 296
298 297 c = Call(1)
299 298 c.__doc__ = "Modified instance docstring"
300 299 i = inspector.info(c)
301 300 nt.assert_equal(i['type_name'], 'Call')
302 301 nt.assert_equal(i['docstring'], "Modified instance docstring")
303 302 nt.assert_equal(i['class_docstring'], Call.__doc__)
304 303 nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
305 304 nt.assert_equal(i['call_docstring'], Call.__call__.__doc__)
306 305
307 306 # Test old-style classes, which for example may not have an __init__ method.
308 307 if not py3compat.PY3:
309 308 i = inspector.info(OldStyle)
310 309 nt.assert_equal(i['type_name'], 'classobj')
311 310
312 311 i = inspector.info(OldStyle())
313 312 nt.assert_equal(i['type_name'], 'instance')
314 313 nt.assert_equal(i['docstring'], OldStyle.__doc__)
315 314
316 315 def test_class_signature():
317 316 info = inspector.info(HasSignature, 'HasSignature')
318 317 nt.assert_equal(info['init_definition'], "HasSignature(test)")
319 318 nt.assert_equal(info['init_docstring'], HasSignature.__init__.__doc__)
320 319
321 320 def test_info_awkward():
322 321 # Just test that this doesn't throw an error.
323 322 inspector.info(Awkward())
324 323
325 324 def test_bool_raise():
326 325 inspector.info(NoBoolCall())
327 326
328 327 def test_info_serialliar():
329 328 fib_tracker = [0]
330 329 inspector.info(SerialLiar(fib_tracker))
331 330
332 331 # Nested attribute access should be cut off at 100 levels deep to avoid
333 332 # infinite loops: https://github.com/ipython/ipython/issues/9122
334 333 nt.assert_less(fib_tracker[0], 9000)
335 334
336 335 def test_calldef_none():
337 336 # We should ignore __call__ for all of these.
338 337 for obj in [f, SimpleClass().method, any, str.upper]:
339 338 print(obj)
340 339 i = inspector.info(obj)
341 340 nt.assert_is(i['call_def'], None)
342 341
343 342 def f_kwarg(pos, *, kwonly):
344 343 pass
345 344
346 345 def test_definition_kwonlyargs():
347 346 i = inspector.info(f_kwarg, oname='f_kwarg') # analysis:ignore
348 347 nt.assert_equal(i['definition'], "f_kwarg(pos, *, kwonly)")
349 348
350 349 def test_getdoc():
351 350 class A(object):
352 351 """standard docstring"""
353 352 pass
354 353
355 354 class B(object):
356 355 """standard docstring"""
357 356 def getdoc(self):
358 357 return "custom docstring"
359 358
360 359 class C(object):
361 360 """standard docstring"""
362 361 def getdoc(self):
363 362 return None
364 363
365 364 a = A()
366 365 b = B()
367 366 c = C()
368 367
369 368 nt.assert_equal(oinspect.getdoc(a), "standard docstring")
370 369 nt.assert_equal(oinspect.getdoc(b), "custom docstring")
371 370 nt.assert_equal(oinspect.getdoc(c), "standard docstring")
372 371
373 372
374 373 def test_empty_property_has_no_source():
375 374 i = inspector.info(property(), detail_level=1)
376 375 nt.assert_is(i['source'], None)
377 376
378 377
379 378 def test_property_sources():
380 379 import zlib
381 380
382 381 class A(object):
383 382 @property
384 383 def foo(self):
385 384 return 'bar'
386 385
387 386 foo = foo.setter(lambda self, v: setattr(self, 'bar', v))
388 387
389 388 id = property(id)
390 389 compress = property(zlib.compress)
391 390
392 391 i = inspector.info(A.foo, detail_level=1)
393 392 nt.assert_in('def foo(self):', i['source'])
394 393 nt.assert_in('lambda self, v:', i['source'])
395 394
396 395 i = inspector.info(A.id, detail_level=1)
397 396 nt.assert_in('fget = <function id>', i['source'])
398 397
399 398 i = inspector.info(A.compress, detail_level=1)
400 399 nt.assert_in('fget = <function zlib.compress>', i['source'])
401 400
402 401
403 402 def test_property_docstring_is_in_info_for_detail_level_0():
404 403 class A(object):
405 404 @property
406 405 def foobar(self):
407 406 """This is `foobar` property."""
408 407 pass
409 408
410 409 ip.user_ns['a_obj'] = A()
411 410 nt.assert_equals(
412 411 'This is `foobar` property.',
413 412 ip.object_inspect('a_obj.foobar', detail_level=0)['docstring'])
414 413
415 414 ip.user_ns['a_cls'] = A
416 415 nt.assert_equals(
417 416 'This is `foobar` property.',
418 417 ip.object_inspect('a_cls.foobar', detail_level=0)['docstring'])
419 418
420 419
421 420 def test_pdef():
422 421 # See gh-1914
423 422 def foo(): pass
424 423 inspector.pdef(foo, 'foo')
425 424
426 425
427 426 def test_pinfo_nonascii():
428 427 # See gh-1177
429 428 from . import nonascii2
430 429 ip.user_ns['nonascii2'] = nonascii2
431 430 ip._inspect('pinfo', 'nonascii2', detail_level=1)
432 431
433 432
434 433 def test_pinfo_magic():
435 434 with AssertPrints('Docstring:'):
436 435 ip._inspect('pinfo', 'lsmagic', detail_level=0)
437 436
438 437 with AssertPrints('Source:'):
439 438 ip._inspect('pinfo', 'lsmagic', detail_level=1)
440 439
441 440
442 441 def test_init_colors():
443 442 # ensure colors are not present in signature info
444 443 info = inspector.info(HasSignature)
445 444 init_def = info['init_definition']
446 445 nt.assert_not_in('[0m', init_def)
447 446
448 447
449 448 def test_builtin_init():
450 449 info = inspector.info(list)
451 450 init_def = info['init_definition']
452 451 # Python < 3.4 can't get init definition from builtins,
453 452 # but still exercise the inspection in case of error-raising bugs.
454 453 if sys.version_info >= (3,4):
455 454 nt.assert_is_not_none(init_def)
456 455
@@ -1,356 +1,355 b''
1 1 # encoding: utf-8
2 2 """Tests for IPython.core.ultratb
3 3 """
4 4 import io
5 5 import sys
6 6 import os.path
7 7 from textwrap import dedent
8 8 import traceback
9 9 import unittest
10 10
11 11 try:
12 12 from unittest import mock
13 13 except ImportError:
14 14 import mock # Python 2
15 15
16 16 from ..ultratb import ColorTB, VerboseTB, find_recursion
17 17
18 18
19 19 from IPython.testing import tools as tt
20 20 from IPython.testing.decorators import onlyif_unicode_paths
21 21 from IPython.utils.syspathcontext import prepended_to_syspath
22 22 from IPython.utils.tempdir import TemporaryDirectory
23 23 from IPython.utils.py3compat import PY3
24 24
25 25 ip = get_ipython()
26 26
27 27 file_1 = """1
28 28 2
29 29 3
30 30 def f():
31 31 1/0
32 32 """
33 33
34 34 file_2 = """def f():
35 35 1/0
36 36 """
37 37
38 38 class ChangedPyFileTest(unittest.TestCase):
39 39 def test_changing_py_file(self):
40 40 """Traceback produced if the line where the error occurred is missing?
41 41
42 42 https://github.com/ipython/ipython/issues/1456
43 43 """
44 44 with TemporaryDirectory() as td:
45 45 fname = os.path.join(td, "foo.py")
46 46 with open(fname, "w") as f:
47 47 f.write(file_1)
48 48
49 49 with prepended_to_syspath(td):
50 50 ip.run_cell("import foo")
51 51
52 52 with tt.AssertPrints("ZeroDivisionError"):
53 53 ip.run_cell("foo.f()")
54 54
55 55 # Make the file shorter, so the line of the error is missing.
56 56 with open(fname, "w") as f:
57 57 f.write(file_2)
58 58
59 59 # For some reason, this was failing on the *second* call after
60 60 # changing the file, so we call f() twice.
61 61 with tt.AssertNotPrints("Internal Python error", channel='stderr'):
62 62 with tt.AssertPrints("ZeroDivisionError"):
63 63 ip.run_cell("foo.f()")
64 64 with tt.AssertPrints("ZeroDivisionError"):
65 65 ip.run_cell("foo.f()")
66 66
67 67 iso_8859_5_file = u'''# coding: iso-8859-5
68 68
69 69 def fail():
70 70 """дбИЖ"""
71 71 1/0 # дбИЖ
72 72 '''
73 73
74 74 class NonAsciiTest(unittest.TestCase):
75 75 @onlyif_unicode_paths
76 76 def test_nonascii_path(self):
77 77 # Non-ascii directory name as well.
78 78 with TemporaryDirectory(suffix=u'é') as td:
79 79 fname = os.path.join(td, u"fooé.py")
80 80 with open(fname, "w") as f:
81 81 f.write(file_1)
82 82
83 83 with prepended_to_syspath(td):
84 84 ip.run_cell("import foo")
85 85
86 86 with tt.AssertPrints("ZeroDivisionError"):
87 87 ip.run_cell("foo.f()")
88 88
89 89 def test_iso8859_5(self):
90 90 with TemporaryDirectory() as td:
91 91 fname = os.path.join(td, 'dfghjkl.py')
92 92
93 93 with io.open(fname, 'w', encoding='iso-8859-5') as f:
94 94 f.write(iso_8859_5_file)
95 95
96 96 with prepended_to_syspath(td):
97 97 ip.run_cell("from dfghjkl import fail")
98 98
99 99 with tt.AssertPrints("ZeroDivisionError"):
100 100 with tt.AssertPrints(u'дбИЖ', suppress=False):
101 101 ip.run_cell('fail()')
102 102
103 103 def test_nonascii_msg(self):
104 104 cell = u"raise Exception('é')"
105 105 expected = u"Exception('é')"
106 106 ip.run_cell("%xmode plain")
107 107 with tt.AssertPrints(expected):
108 108 ip.run_cell(cell)
109 109
110 110 ip.run_cell("%xmode verbose")
111 111 with tt.AssertPrints(expected):
112 112 ip.run_cell(cell)
113 113
114 114 ip.run_cell("%xmode context")
115 115 with tt.AssertPrints(expected):
116 116 ip.run_cell(cell)
117 117
118 118
119 119 class NestedGenExprTestCase(unittest.TestCase):
120 120 """
121 121 Regression test for the following issues:
122 122 https://github.com/ipython/ipython/issues/8293
123 123 https://github.com/ipython/ipython/issues/8205
124 124 """
125 125 def test_nested_genexpr(self):
126 126 code = dedent(
127 127 """\
128 128 class SpecificException(Exception):
129 129 pass
130 130
131 131 def foo(x):
132 132 raise SpecificException("Success!")
133 133
134 134 sum(sum(foo(x) for _ in [0]) for x in [0])
135 135 """
136 136 )
137 137 with tt.AssertPrints('SpecificException: Success!', suppress=False):
138 138 ip.run_cell(code)
139 139
140 140
141 141 indentationerror_file = """if True:
142 142 zoon()
143 143 """
144 144
145 145 class IndentationErrorTest(unittest.TestCase):
146 146 def test_indentationerror_shows_line(self):
147 147 # See issue gh-2398
148 148 with tt.AssertPrints("IndentationError"):
149 149 with tt.AssertPrints("zoon()", suppress=False):
150 150 ip.run_cell(indentationerror_file)
151 151
152 152 with TemporaryDirectory() as td:
153 153 fname = os.path.join(td, "foo.py")
154 154 with open(fname, "w") as f:
155 155 f.write(indentationerror_file)
156 156
157 157 with tt.AssertPrints("IndentationError"):
158 158 with tt.AssertPrints("zoon()", suppress=False):
159 159 ip.magic('run %s' % fname)
160 160
161 161 se_file_1 = """1
162 162 2
163 163 7/
164 164 """
165 165
166 166 se_file_2 = """7/
167 167 """
168 168
169 169 class SyntaxErrorTest(unittest.TestCase):
170 170 def test_syntaxerror_without_lineno(self):
171 171 with tt.AssertNotPrints("TypeError"):
172 172 with tt.AssertPrints("line unknown"):
173 173 ip.run_cell("raise SyntaxError()")
174 174
175 175 def test_changing_py_file(self):
176 176 with TemporaryDirectory() as td:
177 177 fname = os.path.join(td, "foo.py")
178 178 with open(fname, 'w') as f:
179 179 f.write(se_file_1)
180 180
181 181 with tt.AssertPrints(["7/", "SyntaxError"]):
182 182 ip.magic("run " + fname)
183 183
184 184 # Modify the file
185 185 with open(fname, 'w') as f:
186 186 f.write(se_file_2)
187 187
188 188 # The SyntaxError should point to the correct line
189 189 with tt.AssertPrints(["7/", "SyntaxError"]):
190 190 ip.magic("run " + fname)
191 191
192 192 def test_non_syntaxerror(self):
193 193 # SyntaxTB may be called with an error other than a SyntaxError
194 194 # See e.g. gh-4361
195 195 try:
196 196 raise ValueError('QWERTY')
197 197 except ValueError:
198 198 with tt.AssertPrints('QWERTY'):
199 199 ip.showsyntaxerror()
200 200
201 201
202 202 class Python3ChainedExceptionsTest(unittest.TestCase):
203 203 DIRECT_CAUSE_ERROR_CODE = """
204 204 try:
205 205 x = 1 + 2
206 206 print(not_defined_here)
207 207 except Exception as e:
208 208 x += 55
209 209 x - 1
210 210 y = {}
211 211 raise KeyError('uh') from e
212 212 """
213 213
214 214 EXCEPTION_DURING_HANDLING_CODE = """
215 215 try:
216 216 x = 1 + 2
217 217 print(not_defined_here)
218 218 except Exception as e:
219 219 x += 55
220 220 x - 1
221 221 y = {}
222 222 raise KeyError('uh')
223 223 """
224 224
225 225 SUPPRESS_CHAINING_CODE = """
226 226 try:
227 227 1/0
228 228 except Exception:
229 229 raise ValueError("Yikes") from None
230 230 """
231 231
232 232 def test_direct_cause_error(self):
233 233 if PY3:
234 234 with tt.AssertPrints(["KeyError", "NameError", "direct cause"]):
235 235 ip.run_cell(self.DIRECT_CAUSE_ERROR_CODE)
236 236
237 237 def test_exception_during_handling_error(self):
238 238 if PY3:
239 239 with tt.AssertPrints(["KeyError", "NameError", "During handling"]):
240 240 ip.run_cell(self.EXCEPTION_DURING_HANDLING_CODE)
241 241
242 242 def test_suppress_exception_chaining(self):
243 243 if PY3:
244 244 with tt.AssertNotPrints("ZeroDivisionError"), \
245 245 tt.AssertPrints("ValueError", suppress=False):
246 246 ip.run_cell(self.SUPPRESS_CHAINING_CODE)
247 247
248 248
249 249 class RecursionTest(unittest.TestCase):
250 250 DEFINITIONS = """
251 251 def non_recurs():
252 252 1/0
253 253
254 254 def r1():
255 255 r1()
256 256
257 257 def r3a():
258 258 r3b()
259 259
260 260 def r3b():
261 261 r3c()
262 262
263 263 def r3c():
264 264 r3a()
265 265
266 266 def r3o1():
267 267 r3a()
268 268
269 269 def r3o2():
270 270 r3o1()
271 271 """
272 272 def setUp(self):
273 273 ip.run_cell(self.DEFINITIONS)
274 274
275 275 def test_no_recursion(self):
276 276 with tt.AssertNotPrints("frames repeated"):
277 277 ip.run_cell("non_recurs()")
278 278
279 279 def test_recursion_one_frame(self):
280 280 with tt.AssertPrints("1 frames repeated"):
281 281 ip.run_cell("r1()")
282 282
283 283 def test_recursion_three_frames(self):
284 284 with tt.AssertPrints("3 frames repeated"):
285 285 ip.run_cell("r3o2()")
286 286
287 287 def test_find_recursion(self):
288 288 captured = []
289 289 def capture_exc(*args, **kwargs):
290 290 captured.append(sys.exc_info())
291 291 with mock.patch.object(ip, 'showtraceback', capture_exc):
292 292 ip.run_cell("r3o2()")
293 293
294 294 self.assertEqual(len(captured), 1)
295 295 etype, evalue, tb = captured[0]
296 296 self.assertIn("recursion", str(evalue))
297 297
298 298 records = ip.InteractiveTB.get_records(tb, 3, ip.InteractiveTB.tb_offset)
299 299 for r in records[:10]:
300 300 print(r[1:4])
301 301
302 302 # The outermost frames should be:
303 303 # 0: the 'cell' that was running when the exception came up
304 304 # 1: r3o2()
305 305 # 2: r3o1()
306 306 # 3: r3a()
307 307 # Then repeating r3b, r3c, r3a
308 308 last_unique, repeat_length = find_recursion(etype, evalue, records)
309 309 self.assertEqual(last_unique, 2)
310 310 self.assertEqual(repeat_length, 3)
311 311
312 312
313 313 #----------------------------------------------------------------------------
314 314
315 315 # module testing (minimal)
316 if sys.version_info > (3,):
317 def test_handlers():
318 def spam(c, d_e):
319 (d, e) = d_e
320 x = c + d
321 y = c * d
322 foo(x, y)
323
324 def foo(a, b, bar=1):
325 eggs(a, b + bar)
326
327 def eggs(f, g, z=globals()):
328 h = f + g
329 i = f - g
330 return h / i
331
332 buff = io.StringIO()
333
334 buff.write('')
335 buff.write('*** Before ***')
336 try:
337 buff.write(spam(1, (2, 3)))
338 except:
339 traceback.print_exc(file=buff)
340
341 handler = ColorTB(ostream=buff)
342 buff.write('*** ColorTB ***')
343 try:
344 buff.write(spam(1, (2, 3)))
345 except:
346 handler(*sys.exc_info())
347 buff.write('')
348
349 handler = VerboseTB(ostream=buff)
350 buff.write('*** VerboseTB ***')
351 try:
352 buff.write(spam(1, (2, 3)))
353 except:
354 handler(*sys.exc_info())
355 buff.write('')
316 def test_handlers():
317 def spam(c, d_e):
318 (d, e) = d_e
319 x = c + d
320 y = c * d
321 foo(x, y)
322
323 def foo(a, b, bar=1):
324 eggs(a, b + bar)
325
326 def eggs(f, g, z=globals()):
327 h = f + g
328 i = f - g
329 return h / i
330
331 buff = io.StringIO()
332
333 buff.write('')
334 buff.write('*** Before ***')
335 try:
336 buff.write(spam(1, (2, 3)))
337 except:
338 traceback.print_exc(file=buff)
339
340 handler = ColorTB(ostream=buff)
341 buff.write('*** ColorTB ***')
342 try:
343 buff.write(spam(1, (2, 3)))
344 except:
345 handler(*sys.exc_info())
346 buff.write('')
347
348 handler = VerboseTB(ostream=buff)
349 buff.write('*** VerboseTB ***')
350 try:
351 buff.write(spam(1, (2, 3)))
352 except:
353 handler(*sys.exc_info())
354 buff.write('')
356 355
@@ -1,433 +1,432 b''
1 1 # -*- coding: utf-8 -*-
2 2 """IPython Test Suite Runner.
3 3
4 4 This module provides a main entry point to a user script to test IPython
5 5 itself from the command line. There are two ways of running this script:
6 6
7 7 1. With the syntax `iptest all`. This runs our entire test suite by
8 8 calling this script (with different arguments) recursively. This
9 9 causes modules and package to be tested in different processes, using nose
10 10 or trial where appropriate.
11 11 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
12 12 the script simply calls nose, but with special command line flags and
13 13 plugins loaded.
14 14
15 15 """
16 16
17 17 # Copyright (c) IPython Development Team.
18 18 # Distributed under the terms of the Modified BSD License.
19 19
20 20 from __future__ import print_function
21 21
22 22 import glob
23 23 from io import BytesIO
24 24 import os
25 25 import os.path as path
26 26 import sys
27 27 from threading import Thread, Lock, Event
28 28 import warnings
29 29
30 30 import nose.plugins.builtin
31 31 from nose.plugins.xunit import Xunit
32 32 from nose import SkipTest
33 33 from nose.core import TestProgram
34 34 from nose.plugins import Plugin
35 35 from nose.util import safe_str
36 36
37 37 from IPython import version_info
38 38 from IPython.utils.py3compat import bytes_to_str
39 39 from IPython.utils.importstring import import_item
40 40 from IPython.testing.plugin.ipdoctest import IPythonDoctest
41 41 from IPython.external.decorators import KnownFailure, knownfailureif
42 42
43 43 pjoin = path.join
44 44
45 45
46 46 # Enable printing all warnings raise by IPython's modules
47 47 warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
48 if sys.version_info > (3,0):
49 warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
48 warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
50 49 warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
51 50 warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
52 51
53 52 if version_info < (6,):
54 53 # nose.tools renames all things from `camelCase` to `snake_case` which raise an
55 54 # warning with the runner they also import from standard import library. (as of Dec 2015)
56 55 # Ignore, let's revisit that in a couple of years for IPython 6.
57 56 warnings.filterwarnings('ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
58 57
59 58
60 59 # ------------------------------------------------------------------------------
61 60 # Monkeypatch Xunit to count known failures as skipped.
62 61 # ------------------------------------------------------------------------------
63 62 def monkeypatch_xunit():
64 63 try:
65 64 knownfailureif(True)(lambda: None)()
66 65 except Exception as e:
67 66 KnownFailureTest = type(e)
68 67
69 68 def addError(self, test, err, capt=None):
70 69 if issubclass(err[0], KnownFailureTest):
71 70 err = (SkipTest,) + err[1:]
72 71 return self.orig_addError(test, err, capt)
73 72
74 73 Xunit.orig_addError = Xunit.addError
75 74 Xunit.addError = addError
76 75
77 76 #-----------------------------------------------------------------------------
78 77 # Check which dependencies are installed and greater than minimum version.
79 78 #-----------------------------------------------------------------------------
80 79 def extract_version(mod):
81 80 return mod.__version__
82 81
83 82 def test_for(item, min_version=None, callback=extract_version):
84 83 """Test to see if item is importable, and optionally check against a minimum
85 84 version.
86 85
87 86 If min_version is given, the default behavior is to check against the
88 87 `__version__` attribute of the item, but specifying `callback` allows you to
89 88 extract the value you are interested in. e.g::
90 89
91 90 In [1]: import sys
92 91
93 92 In [2]: from IPython.testing.iptest import test_for
94 93
95 94 In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
96 95 Out[3]: True
97 96
98 97 """
99 98 try:
100 99 check = import_item(item)
101 100 except (ImportError, RuntimeError):
102 101 # GTK reports Runtime error if it can't be initialized even if it's
103 102 # importable.
104 103 return False
105 104 else:
106 105 if min_version:
107 106 if callback:
108 107 # extra processing step to get version to compare
109 108 check = callback(check)
110 109
111 110 return check >= min_version
112 111 else:
113 112 return True
114 113
115 114 # Global dict where we can store information on what we have and what we don't
116 115 # have available at test run time
117 116 have = {'matplotlib': test_for('matplotlib'),
118 117 'pygments': test_for('pygments'),
119 118 'sqlite3': test_for('sqlite3')}
120 119
121 120 #-----------------------------------------------------------------------------
122 121 # Test suite definitions
123 122 #-----------------------------------------------------------------------------
124 123
125 124 test_group_names = ['core',
126 125 'extensions', 'lib', 'terminal', 'testing', 'utils',
127 126 ]
128 127
129 128 class TestSection(object):
130 129 def __init__(self, name, includes):
131 130 self.name = name
132 131 self.includes = includes
133 132 self.excludes = []
134 133 self.dependencies = []
135 134 self.enabled = True
136 135
137 136 def exclude(self, module):
138 137 if not module.startswith('IPython'):
139 138 module = self.includes[0] + "." + module
140 139 self.excludes.append(module.replace('.', os.sep))
141 140
142 141 def requires(self, *packages):
143 142 self.dependencies.extend(packages)
144 143
145 144 @property
146 145 def will_run(self):
147 146 return self.enabled and all(have[p] for p in self.dependencies)
148 147
149 148 # Name -> (include, exclude, dependencies_met)
150 149 test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
151 150
152 151
153 152 # Exclusions and dependencies
154 153 # ---------------------------
155 154
156 155 # core:
157 156 sec = test_sections['core']
158 157 if not have['sqlite3']:
159 158 sec.exclude('tests.test_history')
160 159 sec.exclude('history')
161 160 if not have['matplotlib']:
162 161 sec.exclude('pylabtools'),
163 162 sec.exclude('tests.test_pylabtools')
164 163
165 164 # lib:
166 165 sec = test_sections['lib']
167 166 sec.exclude('kernel')
168 167 if not have['pygments']:
169 168 sec.exclude('tests.test_lexers')
170 169 # We do this unconditionally, so that the test suite doesn't import
171 170 # gtk, changing the default encoding and masking some unicode bugs.
172 171 sec.exclude('inputhookgtk')
173 172 # We also do this unconditionally, because wx can interfere with Unix signals.
174 173 # There are currently no tests for it anyway.
175 174 sec.exclude('inputhookwx')
176 175 # Testing inputhook will need a lot of thought, to figure out
177 176 # how to have tests that don't lock up with the gui event
178 177 # loops in the picture
179 178 sec.exclude('inputhook')
180 179
181 180 # testing:
182 181 sec = test_sections['testing']
183 182 # These have to be skipped on win32 because they use echo, rm, cd, etc.
184 183 # See ticket https://github.com/ipython/ipython/issues/87
185 184 if sys.platform == 'win32':
186 185 sec.exclude('plugin.test_exampleip')
187 186 sec.exclude('plugin.dtexample')
188 187
189 188 # don't run jupyter_console tests found via shim
190 189 test_sections['terminal'].exclude('console')
191 190
192 191 # extensions:
193 192 sec = test_sections['extensions']
194 193 # This is deprecated in favour of rpy2
195 194 sec.exclude('rmagic')
196 195 # autoreload does some strange stuff, so move it to its own test section
197 196 sec.exclude('autoreload')
198 197 sec.exclude('tests.test_autoreload')
199 198 test_sections['autoreload'] = TestSection('autoreload',
200 199 ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
201 200 test_group_names.append('autoreload')
202 201
203 202
204 203 #-----------------------------------------------------------------------------
205 204 # Functions and classes
206 205 #-----------------------------------------------------------------------------
207 206
208 207 def check_exclusions_exist():
209 208 from IPython.paths import get_ipython_package_dir
210 209 from warnings import warn
211 210 parent = os.path.dirname(get_ipython_package_dir())
212 211 for sec in test_sections:
213 212 for pattern in sec.exclusions:
214 213 fullpath = pjoin(parent, pattern)
215 214 if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
216 215 warn("Excluding nonexistent file: %r" % pattern)
217 216
218 217
219 218 class ExclusionPlugin(Plugin):
220 219 """A nose plugin to effect our exclusions of files and directories.
221 220 """
222 221 name = 'exclusions'
223 222 score = 3000 # Should come before any other plugins
224 223
225 224 def __init__(self, exclude_patterns=None):
226 225 """
227 226 Parameters
228 227 ----------
229 228
230 229 exclude_patterns : sequence of strings, optional
231 230 Filenames containing these patterns (as raw strings, not as regular
232 231 expressions) are excluded from the tests.
233 232 """
234 233 self.exclude_patterns = exclude_patterns or []
235 234 super(ExclusionPlugin, self).__init__()
236 235
237 236 def options(self, parser, env=os.environ):
238 237 Plugin.options(self, parser, env)
239 238
240 239 def configure(self, options, config):
241 240 Plugin.configure(self, options, config)
242 241 # Override nose trying to disable plugin.
243 242 self.enabled = True
244 243
245 244 def wantFile(self, filename):
246 245 """Return whether the given filename should be scanned for tests.
247 246 """
248 247 if any(pat in filename for pat in self.exclude_patterns):
249 248 return False
250 249 return None
251 250
252 251 def wantDirectory(self, directory):
253 252 """Return whether the given directory should be scanned for tests.
254 253 """
255 254 if any(pat in directory for pat in self.exclude_patterns):
256 255 return False
257 256 return None
258 257
259 258
260 259 class StreamCapturer(Thread):
261 260 daemon = True # Don't hang if main thread crashes
262 261 started = False
263 262 def __init__(self, echo=False):
264 263 super(StreamCapturer, self).__init__()
265 264 self.echo = echo
266 265 self.streams = []
267 266 self.buffer = BytesIO()
268 267 self.readfd, self.writefd = os.pipe()
269 268 self.buffer_lock = Lock()
270 269 self.stop = Event()
271 270
272 271 def run(self):
273 272 self.started = True
274 273
275 274 while not self.stop.is_set():
276 275 chunk = os.read(self.readfd, 1024)
277 276
278 277 with self.buffer_lock:
279 278 self.buffer.write(chunk)
280 279 if self.echo:
281 280 sys.stdout.write(bytes_to_str(chunk))
282 281
283 282 os.close(self.readfd)
284 283 os.close(self.writefd)
285 284
286 285 def reset_buffer(self):
287 286 with self.buffer_lock:
288 287 self.buffer.truncate(0)
289 288 self.buffer.seek(0)
290 289
291 290 def get_buffer(self):
292 291 with self.buffer_lock:
293 292 return self.buffer.getvalue()
294 293
295 294 def ensure_started(self):
296 295 if not self.started:
297 296 self.start()
298 297
299 298 def halt(self):
300 299 """Safely stop the thread."""
301 300 if not self.started:
302 301 return
303 302
304 303 self.stop.set()
305 304 os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
306 305 self.join()
307 306
308 307 class SubprocessStreamCapturePlugin(Plugin):
309 308 name='subprocstreams'
310 309 def __init__(self):
311 310 Plugin.__init__(self)
312 311 self.stream_capturer = StreamCapturer()
313 312 self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
314 313 # This is ugly, but distant parts of the test machinery need to be able
315 314 # to redirect streams, so we make the object globally accessible.
316 315 nose.iptest_stdstreams_fileno = self.get_write_fileno
317 316
318 317 def get_write_fileno(self):
319 318 if self.destination == 'capture':
320 319 self.stream_capturer.ensure_started()
321 320 return self.stream_capturer.writefd
322 321 elif self.destination == 'discard':
323 322 return os.open(os.devnull, os.O_WRONLY)
324 323 else:
325 324 return sys.__stdout__.fileno()
326 325
327 326 def configure(self, options, config):
328 327 Plugin.configure(self, options, config)
329 328 # Override nose trying to disable plugin.
330 329 if self.destination == 'capture':
331 330 self.enabled = True
332 331
333 332 def startTest(self, test):
334 333 # Reset log capture
335 334 self.stream_capturer.reset_buffer()
336 335
337 336 def formatFailure(self, test, err):
338 337 # Show output
339 338 ec, ev, tb = err
340 339 captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
341 340 if captured.strip():
342 341 ev = safe_str(ev)
343 342 out = [ev, '>> begin captured subprocess output <<',
344 343 captured,
345 344 '>> end captured subprocess output <<']
346 345 return ec, '\n'.join(out), tb
347 346
348 347 return err
349 348
350 349 formatError = formatFailure
351 350
352 351 def finalize(self, result):
353 352 self.stream_capturer.halt()
354 353
355 354
356 355 def run_iptest():
357 356 """Run the IPython test suite using nose.
358 357
359 358 This function is called when this script is **not** called with the form
360 359 `iptest all`. It simply calls nose with appropriate command line flags
361 360 and accepts all of the standard nose arguments.
362 361 """
363 362 # Apply our monkeypatch to Xunit
364 363 if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
365 364 monkeypatch_xunit()
366 365
367 366 arg1 = sys.argv[1]
368 367 if arg1 in test_sections:
369 368 section = test_sections[arg1]
370 369 sys.argv[1:2] = section.includes
371 370 elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
372 371 section = test_sections[arg1[8:]]
373 372 sys.argv[1:2] = section.includes
374 373 else:
375 374 section = TestSection(arg1, includes=[arg1])
376 375
377 376
378 377 argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
379 378 # We add --exe because of setuptools' imbecility (it
380 379 # blindly does chmod +x on ALL files). Nose does the
381 380 # right thing and it tries to avoid executables,
382 381 # setuptools unfortunately forces our hand here. This
383 382 # has been discussed on the distutils list and the
384 383 # setuptools devs refuse to fix this problem!
385 384 '--exe',
386 385 ]
387 386 if '-a' not in argv and '-A' not in argv:
388 387 argv = argv + ['-a', '!crash']
389 388
390 389 if nose.__version__ >= '0.11':
391 390 # I don't fully understand why we need this one, but depending on what
392 391 # directory the test suite is run from, if we don't give it, 0 tests
393 392 # get run. Specifically, if the test suite is run from the source dir
394 393 # with an argument (like 'iptest.py IPython.core', 0 tests are run,
395 394 # even if the same call done in this directory works fine). It appears
396 395 # that if the requested package is in the current dir, nose bails early
397 396 # by default. Since it's otherwise harmless, leave it in by default
398 397 # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
399 398 argv.append('--traverse-namespace')
400 399
401 400 plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
402 401 SubprocessStreamCapturePlugin() ]
403 402
404 403 # we still have some vestigial doctests in core
405 404 if (section.name.startswith(('core', 'IPython.core'))):
406 405 plugins.append(IPythonDoctest())
407 406 argv.extend([
408 407 '--with-ipdoctest',
409 408 '--ipdoctest-tests',
410 409 '--ipdoctest-extension=txt',
411 410 ])
412 411
413 412
414 413 # Use working directory set by parent process (see iptestcontroller)
415 414 if 'IPTEST_WORKING_DIR' in os.environ:
416 415 os.chdir(os.environ['IPTEST_WORKING_DIR'])
417 416
418 417 # We need a global ipython running in this process, but the special
419 418 # in-process group spawns its own IPython kernels, so for *that* group we
420 419 # must avoid also opening the global one (otherwise there's a conflict of
421 420 # singletons). Ultimately the solution to this problem is to refactor our
422 421 # assumptions about what needs to be a singleton and what doesn't (app
423 422 # objects should, individual shells shouldn't). But for now, this
424 423 # workaround allows the test suite for the inprocess module to complete.
425 424 if 'kernel.inprocess' not in section.name:
426 425 from IPython.testing import globalipapp
427 426 globalipapp.start_ipython()
428 427
429 428 # Now nose can run
430 429 TestProgram(argv=argv, addplugins=plugins)
431 430
432 431 if __name__ == '__main__':
433 432 run_iptest()
@@ -1,43 +1,30 b''
1 """Decorators marks that a doctest should be skipped, for both python 2 and 3.
1 """Decorators marks that a doctest should be skipped.
2 2
3 3 The IPython.testing.decorators module triggers various extra imports, including
4 4 numpy and sympy if they're present. Since this decorator is used in core parts
5 5 of IPython, it's in a separate module so that running IPython doesn't trigger
6 6 those imports."""
7 7
8 #-----------------------------------------------------------------------------
9 # Copyright (C) 2009-2011 The IPython Development Team
10 #
11 # Distributed under the terms of the BSD License. The full license is in
12 # the file COPYING, distributed as part of this software.
13 #-----------------------------------------------------------------------------
8 # Copyright (C) IPython Development Team
9 # Distributed under the terms of the Modified BSD License.
14 10
15 #-----------------------------------------------------------------------------
16 # Imports
17 #-----------------------------------------------------------------------------
18
19 import sys
20
21 #-----------------------------------------------------------------------------
22 # Decorators
23 #-----------------------------------------------------------------------------
24 11
25 12 def skip_doctest(f):
26 13 """Decorator - mark a function or method for skipping its doctest.
27 14
28 15 This decorator allows you to mark a function whose docstring you wish to
29 16 omit from testing, while preserving the docstring for introspection, help,
30 17 etc."""
31 18 f.skip_doctest = True
32 19 return f
33 20
34 21
35 22 def skip_doctest_py3(f):
36 23 """Decorator - skip the doctest under Python 3."""
37 24 f.skip_doctest = (sys.version_info[0] >= 3)
38 25 return f
39 26
40 27 def skip_doctest_py2(f):
41 28 """Decorator - skip the doctest under Python 3."""
42 29 f.skip_doctest = (sys.version_info[0] < 3)
43 30 return f
This diff has been collapsed as it changes many lines, (594 lines changed) Show them Hide them
@@ -1,9 +1,595 b''
1 """Load our patched versions of tokenize.
1 """Patched version of standard library tokenize, to deal with various bugs.
2
3 Based on Python 3.2 code.
4
5 Patches:
6
7 - Gareth Rees' patch for Python issue #12691 (untokenizing)
8 - Except we don't encode the output of untokenize
9 - Python 2 compatible syntax, so that it can be byte-compiled at installation
10 - Newlines in comments and blank lines should be either NL or NEWLINE, depending
11 on whether they are in a multi-line statement. Filed as Python issue #17061.
12 - Export generate_tokens & TokenError
13 - u and rb literals are allowed under Python 3.3 and above.
14
15 ------------------------------------------------------------------------------
16 Tokenization help for Python programs.
17
18 tokenize(readline) is a generator that breaks a stream of bytes into
19 Python tokens. It decodes the bytes according to PEP-0263 for
20 determining source file encoding.
21
22 It accepts a readline-like method which is called repeatedly to get the
23 next line of input (or b"" for EOF). It generates 5-tuples with these
24 members:
25
26 the token type (see token.py)
27 the token (a string)
28 the starting (row, column) indices of the token (a 2-tuple of ints)
29 the ending (row, column) indices of the token (a 2-tuple of ints)
30 the original line (string)
31
32 It is designed to match the working of the Python tokenizer exactly, except
33 that it produces COMMENT tokens for comments and gives type OP for all
34 operators. Additionally, all token lists start with an ENCODING token
35 which tells you which encoding was used to decode the bytes stream.
2 36 """
37 from __future__ import absolute_import
3 38
39 __author__ = 'Ka-Ping Yee <ping@lfw.org>'
40 __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
41 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
42 'Michael Foord')
43 import builtins
44 import re
4 45 import sys
46 from token import *
47 from codecs import lookup, BOM_UTF8
48 import collections
49 from io import TextIOWrapper
50 cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
51
52 import token
53 __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
54 "NL", "untokenize", "ENCODING", "TokenInfo"]
55 del token
56
57 __all__ += ["generate_tokens", "TokenError"]
5 58
6 if sys.version_info[0] >= 3:
7 from ._tokenize_py3 import *
59 COMMENT = N_TOKENS
60 tok_name[COMMENT] = 'COMMENT'
61 NL = N_TOKENS + 1
62 tok_name[NL] = 'NL'
63 ENCODING = N_TOKENS + 2
64 tok_name[ENCODING] = 'ENCODING'
65 N_TOKENS += 3
66
67 class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
68 def __repr__(self):
69 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
70 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
71 self._replace(type=annotated_type))
72
73 def group(*choices): return '(' + '|'.join(choices) + ')'
74 def any(*choices): return group(*choices) + '*'
75 def maybe(*choices): return group(*choices) + '?'
76
77 # Note: we use unicode matching for names ("\w") but ascii matching for
78 # number literals.
79 Whitespace = r'[ \f\t]*'
80 Comment = r'#[^\r\n]*'
81 Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
82 Name = r'\w+'
83
84 Hexnumber = r'0[xX][0-9a-fA-F]+'
85 Binnumber = r'0[bB][01]+'
86 Octnumber = r'0[oO][0-7]+'
87 Decnumber = r'(?:0+|[1-9][0-9]*)'
88 Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
89 Exponent = r'[eE][-+]?[0-9]+'
90 Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
91 Expfloat = r'[0-9]+' + Exponent
92 Floatnumber = group(Pointfloat, Expfloat)
93 Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
94 Number = group(Imagnumber, Floatnumber, Intnumber)
95
96 if sys.version_info.minor >= 3:
97 StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
8 98 else:
9 from ._tokenize_py2 import *
99 StringPrefix = r'(?:[bB]?[rR]?)?'
100
101 # Tail end of ' string.
102 Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
103 # Tail end of " string.
104 Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
105 # Tail end of ''' string.
106 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
107 # Tail end of """ string.
108 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
109 Triple = group(StringPrefix + "'''", StringPrefix + '"""')
110 # Single-line ' or " string.
111 String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
112 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
113
114 # Because of leftmost-then-longest match semantics, be sure to put the
115 # longest operators first (e.g., if = came before ==, == would get
116 # recognized as two instances of =).
117 Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
118 r"//=?", r"->",
119 r"[+\-*/%&|^=<>]=?",
120 r"~")
121
122 Bracket = '[][(){}]'
123 Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
124 Funny = group(Operator, Bracket, Special)
125
126 PlainToken = group(Number, Funny, String, Name)
127 Token = Ignore + PlainToken
128
129 # First (or only) line of ' or " string.
130 ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
131 group("'", r'\\\r?\n'),
132 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
133 group('"', r'\\\r?\n'))
134 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
135 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
136
137 def _compile(expr):
138 return re.compile(expr, re.UNICODE)
139
140 tokenprog, pseudoprog, single3prog, double3prog = map(
141 _compile, (Token, PseudoToken, Single3, Double3))
142 endprogs = {"'": _compile(Single), '"': _compile(Double),
143 "'''": single3prog, '"""': double3prog,
144 "r'''": single3prog, 'r"""': double3prog,
145 "b'''": single3prog, 'b"""': double3prog,
146 "R'''": single3prog, 'R"""': double3prog,
147 "B'''": single3prog, 'B"""': double3prog,
148 "br'''": single3prog, 'br"""': double3prog,
149 "bR'''": single3prog, 'bR"""': double3prog,
150 "Br'''": single3prog, 'Br"""': double3prog,
151 "BR'''": single3prog, 'BR"""': double3prog,
152 'r': None, 'R': None, 'b': None, 'B': None}
153
154 triple_quoted = {}
155 for t in ("'''", '"""',
156 "r'''", 'r"""', "R'''", 'R"""',
157 "b'''", 'b"""', "B'''", 'B"""',
158 "br'''", 'br"""', "Br'''", 'Br"""',
159 "bR'''", 'bR"""', "BR'''", 'BR"""'):
160 triple_quoted[t] = t
161 single_quoted = {}
162 for t in ("'", '"',
163 "r'", 'r"', "R'", 'R"',
164 "b'", 'b"', "B'", 'B"',
165 "br'", 'br"', "Br'", 'Br"',
166 "bR'", 'bR"', "BR'", 'BR"' ):
167 single_quoted[t] = t
168
169 if sys.version_info.minor >= 3:
170 # Python 3.3
171 for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']:
172 _t2 = _prefix+'"""'
173 endprogs[_t2] = double3prog
174 triple_quoted[_t2] = _t2
175 _t1 = _prefix + "'''"
176 endprogs[_t1] = single3prog
177 triple_quoted[_t1] = _t1
178 single_quoted[_prefix+'"'] = _prefix+'"'
179 single_quoted[_prefix+"'"] = _prefix+"'"
180 del _prefix, _t2, _t1
181 endprogs['u'] = None
182 endprogs['U'] = None
183
184 del _compile
185
186 tabsize = 8
187
188 class TokenError(Exception): pass
189
190 class StopTokenizing(Exception): pass
191
192
193 class Untokenizer:
194
195 def __init__(self):
196 self.tokens = []
197 self.prev_row = 1
198 self.prev_col = 0
199 self.encoding = 'utf-8'
200
201 def add_whitespace(self, tok_type, start):
202 row, col = start
203 assert row >= self.prev_row
204 col_offset = col - self.prev_col
205 if col_offset > 0:
206 self.tokens.append(" " * col_offset)
207 elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
208 # Line was backslash-continued.
209 self.tokens.append(" ")
210
211 def untokenize(self, tokens):
212 iterable = iter(tokens)
213 for t in iterable:
214 if len(t) == 2:
215 self.compat(t, iterable)
216 break
217 tok_type, token, start, end = t[:4]
218 if tok_type == ENCODING:
219 self.encoding = token
220 continue
221 self.add_whitespace(tok_type, start)
222 self.tokens.append(token)
223 self.prev_row, self.prev_col = end
224 if tok_type in (NEWLINE, NL):
225 self.prev_row += 1
226 self.prev_col = 0
227 return "".join(self.tokens)
228
229 def compat(self, token, iterable):
230 # This import is here to avoid problems when the itertools
231 # module is not built yet and tokenize is imported.
232 from itertools import chain
233 startline = False
234 prevstring = False
235 indents = []
236 toks_append = self.tokens.append
237
238 for tok in chain([token], iterable):
239 toknum, tokval = tok[:2]
240 if toknum == ENCODING:
241 self.encoding = tokval
242 continue
243
244 if toknum in (NAME, NUMBER):
245 tokval += ' '
246
247 # Insert a space between two consecutive strings
248 if toknum == STRING:
249 if prevstring:
250 tokval = ' ' + tokval
251 prevstring = True
252 else:
253 prevstring = False
254
255 if toknum == INDENT:
256 indents.append(tokval)
257 continue
258 elif toknum == DEDENT:
259 indents.pop()
260 continue
261 elif toknum in (NEWLINE, NL):
262 startline = True
263 elif startline and indents:
264 toks_append(indents[-1])
265 startline = False
266 toks_append(tokval)
267
268
269 def untokenize(tokens):
270 """
271 Convert ``tokens`` (an iterable) back into Python source code. Return
272 a bytes object, encoded using the encoding specified by the last
273 ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found.
274
275 The result is guaranteed to tokenize back to match the input so that
276 the conversion is lossless and round-trips are assured. The
277 guarantee applies only to the token type and token string as the
278 spacing between tokens (column positions) may change.
279
280 :func:`untokenize` has two modes. If the input tokens are sequences
281 of length 2 (``type``, ``string``) then spaces are added as necessary to
282 preserve the round-trip property.
283
284 If the input tokens are sequences of length 4 or more (``type``,
285 ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then
286 spaces are added so that each token appears in the result at the
287 position indicated by ``start`` and ``end``, if possible.
288 """
289 return Untokenizer().untokenize(tokens)
290
291
292 def _get_normal_name(orig_enc):
293 """Imitates get_normal_name in tokenizer.c."""
294 # Only care about the first 12 characters.
295 enc = orig_enc[:12].lower().replace("_", "-")
296 if enc == "utf-8" or enc.startswith("utf-8-"):
297 return "utf-8"
298 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
299 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
300 return "iso-8859-1"
301 return orig_enc
302
303 def detect_encoding(readline):
304 """
305 The detect_encoding() function is used to detect the encoding that should
306 be used to decode a Python source file. It requires one argment, readline,
307 in the same way as the tokenize() generator.
308
309 It will call readline a maximum of twice, and return the encoding used
310 (as a string) and a list of any lines (left as bytes) it has read in.
311
312 It detects the encoding from the presence of a utf-8 bom or an encoding
313 cookie as specified in pep-0263. If both a bom and a cookie are present,
314 but disagree, a SyntaxError will be raised. If the encoding cookie is an
315 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
316 'utf-8-sig' is returned.
317
318 If no encoding is specified, then the default of 'utf-8' will be returned.
319 """
320 bom_found = False
321 encoding = None
322 default = 'utf-8'
323 def read_or_stop():
324 try:
325 return readline()
326 except StopIteration:
327 return b''
328
329 def find_cookie(line):
330 try:
331 # Decode as UTF-8. Either the line is an encoding declaration,
332 # in which case it should be pure ASCII, or it must be UTF-8
333 # per default encoding.
334 line_string = line.decode('utf-8')
335 except UnicodeDecodeError:
336 raise SyntaxError("invalid or missing encoding declaration")
337
338 matches = cookie_re.findall(line_string)
339 if not matches:
340 return None
341 encoding = _get_normal_name(matches[0])
342 try:
343 codec = lookup(encoding)
344 except LookupError:
345 # This behaviour mimics the Python interpreter
346 raise SyntaxError("unknown encoding: " + encoding)
347
348 if bom_found:
349 if encoding != 'utf-8':
350 # This behaviour mimics the Python interpreter
351 raise SyntaxError('encoding problem: utf-8')
352 encoding += '-sig'
353 return encoding
354
355 first = read_or_stop()
356 if first.startswith(BOM_UTF8):
357 bom_found = True
358 first = first[3:]
359 default = 'utf-8-sig'
360 if not first:
361 return default, []
362
363 encoding = find_cookie(first)
364 if encoding:
365 return encoding, [first]
366
367 second = read_or_stop()
368 if not second:
369 return default, [first]
370
371 encoding = find_cookie(second)
372 if encoding:
373 return encoding, [first, second]
374
375 return default, [first, second]
376
377
378 def open(filename):
379 """Open a file in read only mode using the encoding detected by
380 detect_encoding().
381 """
382 buffer = builtins.open(filename, 'rb')
383 encoding, lines = detect_encoding(buffer.readline)
384 buffer.seek(0)
385 text = TextIOWrapper(buffer, encoding, line_buffering=True)
386 text.mode = 'r'
387 return text
388
389
390 def tokenize(readline):
391 """
392 The tokenize() generator requires one argment, readline, which
393 must be a callable object which provides the same interface as the
394 readline() method of built-in file objects. Each call to the function
395 should return one line of input as bytes. Alternately, readline
396 can be a callable function terminating with StopIteration:
397 readline = open(myfile, 'rb').__next__ # Example of alternate readline
398
399 The generator produces 5-tuples with these members: the token type; the
400 token string; a 2-tuple (srow, scol) of ints specifying the row and
401 column where the token begins in the source; a 2-tuple (erow, ecol) of
402 ints specifying the row and column where the token ends in the source;
403 and the line on which the token was found. The line passed is the
404 logical line; continuation lines are included.
405
406 The first token sequence will always be an ENCODING token
407 which tells you which encoding was used to decode the bytes stream.
408 """
409 # This import is here to avoid problems when the itertools module is not
410 # built yet and tokenize is imported.
411 from itertools import chain, repeat
412 encoding, consumed = detect_encoding(readline)
413 rl_gen = iter(readline, b"")
414 empty = repeat(b"")
415 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
416
417
418 def _tokenize(readline, encoding):
419 lnum = parenlev = continued = 0
420 numchars = '0123456789'
421 contstr, needcont = '', 0
422 contline = None
423 indents = [0]
424
425 if encoding is not None:
426 if encoding == "utf-8-sig":
427 # BOM will already have been stripped.
428 encoding = "utf-8"
429 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
430 while True: # loop over lines in stream
431 try:
432 line = readline()
433 except StopIteration:
434 line = b''
435
436 if encoding is not None:
437 line = line.decode(encoding)
438 lnum += 1
439 pos, max = 0, len(line)
440
441 if contstr: # continued string
442 if not line:
443 raise TokenError("EOF in multi-line string", strstart)
444 endmatch = endprog.match(line)
445 if endmatch:
446 pos = end = endmatch.end(0)
447 yield TokenInfo(STRING, contstr + line[:end],
448 strstart, (lnum, end), contline + line)
449 contstr, needcont = '', 0
450 contline = None
451 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
452 yield TokenInfo(ERRORTOKEN, contstr + line,
453 strstart, (lnum, len(line)), contline)
454 contstr = ''
455 contline = None
456 continue
457 else:
458 contstr = contstr + line
459 contline = contline + line
460 continue
461
462 elif parenlev == 0 and not continued: # new statement
463 if not line: break
464 column = 0
465 while pos < max: # measure leading whitespace
466 if line[pos] == ' ':
467 column += 1
468 elif line[pos] == '\t':
469 column = (column//tabsize + 1)*tabsize
470 elif line[pos] == '\f':
471 column = 0
472 else:
473 break
474 pos += 1
475 if pos == max:
476 break
477
478 if line[pos] in '#\r\n': # skip comments or blank lines
479 if line[pos] == '#':
480 comment_token = line[pos:].rstrip('\r\n')
481 nl_pos = pos + len(comment_token)
482 yield TokenInfo(COMMENT, comment_token,
483 (lnum, pos), (lnum, pos + len(comment_token)), line)
484 yield TokenInfo(NEWLINE, line[nl_pos:],
485 (lnum, nl_pos), (lnum, len(line)), line)
486 else:
487 yield TokenInfo(NEWLINE, line[pos:],
488 (lnum, pos), (lnum, len(line)), line)
489 continue
490
491 if column > indents[-1]: # count indents or dedents
492 indents.append(column)
493 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
494 while column < indents[-1]:
495 if column not in indents:
496 raise IndentationError(
497 "unindent does not match any outer indentation level",
498 ("<tokenize>", lnum, pos, line))
499 indents = indents[:-1]
500 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
501
502 else: # continued statement
503 if not line:
504 raise TokenError("EOF in multi-line statement", (lnum, 0))
505 continued = 0
506
507 while pos < max:
508 pseudomatch = pseudoprog.match(line, pos)
509 if pseudomatch: # scan for tokens
510 start, end = pseudomatch.span(1)
511 spos, epos, pos = (lnum, start), (lnum, end), end
512 token, initial = line[start:end], line[start]
513
514 if (initial in numchars or # ordinary number
515 (initial == '.' and token != '.' and token != '...')):
516 yield TokenInfo(NUMBER, token, spos, epos, line)
517 elif initial in '\r\n':
518 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
519 token, spos, epos, line)
520 elif initial == '#':
521 assert not token.endswith("\n")
522 yield TokenInfo(COMMENT, token, spos, epos, line)
523 elif token in triple_quoted:
524 endprog = endprogs[token]
525 endmatch = endprog.match(line, pos)
526 if endmatch: # all on one line
527 pos = endmatch.end(0)
528 token = line[start:pos]
529 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
530 else:
531 strstart = (lnum, start) # multiple lines
532 contstr = line[start:]
533 contline = line
534 break
535 elif initial in single_quoted or \
536 token[:2] in single_quoted or \
537 token[:3] in single_quoted:
538 if token[-1] == '\n': # continued string
539 strstart = (lnum, start)
540 endprog = (endprogs[initial] or endprogs[token[1]] or
541 endprogs[token[2]])
542 contstr, needcont = line[start:], 1
543 contline = line
544 break
545 else: # ordinary string
546 yield TokenInfo(STRING, token, spos, epos, line)
547 elif initial.isidentifier(): # ordinary name
548 yield TokenInfo(NAME, token, spos, epos, line)
549 elif initial == '\\': # continued stmt
550 continued = 1
551 else:
552 if initial in '([{':
553 parenlev += 1
554 elif initial in ')]}':
555 parenlev -= 1
556 yield TokenInfo(OP, token, spos, epos, line)
557 else:
558 yield TokenInfo(ERRORTOKEN, line[pos],
559 (lnum, pos), (lnum, pos+1), line)
560 pos += 1
561
562 for indent in indents[1:]: # pop remaining indent levels
563 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
564 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
565
566
567 # An undocumented, backwards compatible, API for all the places in the standard
568 # library that expect to be able to use tokenize with strings
569 def generate_tokens(readline):
570 return _tokenize(readline, None)
571
572 if __name__ == "__main__":
573 # Quick sanity check
574 s = b'''def parseline(self, line):
575 """Parse the line into a command name and a string containing
576 the arguments. Returns a tuple containing (command, args, line).
577 'command' and 'args' may be None if the line couldn't be parsed.
578 """
579 line = line.strip()
580 if not line:
581 return None, None, line
582 elif line[0] == '?':
583 line = 'help ' + line[1:]
584 elif line[0] == '!':
585 if hasattr(self, 'do_shell'):
586 line = 'shell ' + line[1:]
587 else:
588 return None, None, line
589 i, n = 0, len(line)
590 while i < n and line[i] in self.identchars: i = i+1
591 cmd, arg = line[:i], line[i:].strip()
592 return cmd, arg, line
593 '''
594 for tok in tokenize(iter(s.splitlines()).__next__):
595 print(tok)
@@ -1,299 +1,297 b''
1 1 #!/usr/bin/env python
2 2 # -*- coding: utf-8 -*-
3 3 """Setup script for IPython.
4 4
5 5 Under Posix environments it works like a typical setup.py script.
6 6 Under Windows, the command sdist is not supported, since IPython
7 7 requires utilities which are not available under Windows."""
8 8
9 9 #-----------------------------------------------------------------------------
10 10 # Copyright (c) 2008-2011, IPython Development Team.
11 11 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
12 12 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
13 13 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
14 14 #
15 15 # Distributed under the terms of the Modified BSD License.
16 16 #
17 17 # The full license is in the file COPYING.rst, distributed with this software.
18 18 #-----------------------------------------------------------------------------
19 19
20 20 #-----------------------------------------------------------------------------
21 21 # Minimal Python version sanity check
22 22 #-----------------------------------------------------------------------------
23 23 from __future__ import print_function
24 24
25 25 import sys
26 26
27 27 # This check is also made in IPython/__init__, don't forget to update both when
28 28 # changing Python version requirements.
29 29 if sys.version_info < (3,3):
30 30 error = """
31 31 IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2.
32 32 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
33 33 Beginning with IPython 6.0, Python 3.3 and above is required.
34 34
35 35 See IPython `README.rst` file for more information:
36 36
37 37 https://github.com/ipython/ipython/blob/master/README.rst
38 38
39 39 """
40 40
41 41 print(error, file=sys.stderr)
42 42 sys.exit(1)
43 43
44 PY3 = (sys.version_info[0] >= 3)
45
46 44 # At least we're on the python version we need, move on.
47 45
48 46 #-------------------------------------------------------------------------------
49 47 # Imports
50 48 #-------------------------------------------------------------------------------
51 49
52 50 # Stdlib imports
53 51 import os
54 52
55 53 from glob import glob
56 54
57 55 # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
58 56 # update it when the contents of directories change.
59 57 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
60 58
61 59 from distutils.core import setup
62 60
63 61 # Our own imports
64 62 from setupbase import target_update
65 63
66 64 from setupbase import (
67 65 setup_args,
68 66 find_packages,
69 67 find_package_data,
70 68 check_package_data_first,
71 69 find_entry_points,
72 70 build_scripts_entrypt,
73 71 find_data_files,
74 72 git_prebuild,
75 73 install_symlinked,
76 74 install_lib_symlink,
77 75 install_scripts_for_symlink,
78 76 unsymlink,
79 77 )
80 78
81 79 isfile = os.path.isfile
82 80 pjoin = os.path.join
83 81
84 82 #-------------------------------------------------------------------------------
85 83 # Handle OS specific things
86 84 #-------------------------------------------------------------------------------
87 85
88 86 if os.name in ('nt','dos'):
89 87 os_name = 'windows'
90 88 else:
91 89 os_name = os.name
92 90
93 91 # Under Windows, 'sdist' has not been supported. Now that the docs build with
94 92 # Sphinx it might work, but let's not turn it on until someone confirms that it
95 93 # actually works.
96 94 if os_name == 'windows' and 'sdist' in sys.argv:
97 95 print('The sdist command is not available under Windows. Exiting.')
98 96 sys.exit(1)
99 97
100 98
101 99 #-------------------------------------------------------------------------------
102 100 # Things related to the IPython documentation
103 101 #-------------------------------------------------------------------------------
104 102
105 103 # update the manuals when building a source dist
106 104 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
107 105
108 106 # List of things to be updated. Each entry is a triplet of args for
109 107 # target_update()
110 108 to_update = [
111 109 ('docs/man/ipython.1.gz',
112 110 ['docs/man/ipython.1'],
113 111 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),
114 112 ]
115 113
116 114
117 115 [ target_update(*t) for t in to_update ]
118 116
119 117 #---------------------------------------------------------------------------
120 118 # Find all the packages, package data, and data_files
121 119 #---------------------------------------------------------------------------
122 120
123 121 packages = find_packages()
124 122 package_data = find_package_data()
125 123
126 124 data_files = find_data_files()
127 125
128 126 setup_args['packages'] = packages
129 127 setup_args['package_data'] = package_data
130 128 setup_args['data_files'] = data_files
131 129
132 130 #---------------------------------------------------------------------------
133 131 # custom distutils commands
134 132 #---------------------------------------------------------------------------
135 133 # imports here, so they are after setuptools import if there was one
136 134 from distutils.command.sdist import sdist
137 135 from distutils.command.upload import upload
138 136
139 137 class UploadWindowsInstallers(upload):
140 138
141 139 description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)"
142 140 user_options = upload.user_options + [
143 141 ('files=', 'f', 'exe file (or glob) to upload')
144 142 ]
145 143 def initialize_options(self):
146 144 upload.initialize_options(self)
147 145 meta = self.distribution.metadata
148 146 base = '{name}-{version}'.format(
149 147 name=meta.get_name(),
150 148 version=meta.get_version()
151 149 )
152 150 self.files = os.path.join('dist', '%s.*.exe' % base)
153 151
154 152 def run(self):
155 153 for dist_file in glob(self.files):
156 154 self.upload_file('bdist_wininst', 'any', dist_file)
157 155
158 156 setup_args['cmdclass'] = {
159 157 'build_py': \
160 158 check_package_data_first(git_prebuild('IPython')),
161 159 'sdist' : git_prebuild('IPython', sdist),
162 160 'upload_wininst' : UploadWindowsInstallers,
163 161 'symlink': install_symlinked,
164 162 'install_lib_symlink': install_lib_symlink,
165 163 'install_scripts_sym': install_scripts_for_symlink,
166 164 'unsymlink': unsymlink,
167 165 }
168 166
169 167
170 168 #---------------------------------------------------------------------------
171 169 # Handle scripts, dependencies, and setuptools specific things
172 170 #---------------------------------------------------------------------------
173 171
174 172 # For some commands, use setuptools. Note that we do NOT list install here!
175 173 # If you want a setuptools-enhanced install, just run 'setupegg.py install'
176 174 needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
177 175 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',
178 176 'egg_info', 'easy_install', 'upload', 'install_egg_info',
179 177 ))
180 178
181 179 if len(needs_setuptools.intersection(sys.argv)) > 0:
182 180 import setuptools
183 181
184 182 # This dict is used for passing extra arguments that are setuptools
185 183 # specific to setup
186 184 setuptools_extra_args = {}
187 185
188 186 # setuptools requirements
189 187
190 188 extras_require = dict(
191 189 parallel = ['ipyparallel'],
192 190 qtconsole = ['qtconsole'],
193 191 doc = ['Sphinx>=1.3'],
194 192 test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments', 'nbformat', 'ipykernel', 'numpy'],
195 193 terminal = [],
196 194 kernel = ['ipykernel'],
197 195 nbformat = ['nbformat'],
198 196 notebook = ['notebook', 'ipywidgets'],
199 197 nbconvert = ['nbconvert'],
200 198 )
201 199
202 200 install_requires = [
203 201 'setuptools>=18.5',
204 202 'decorator',
205 203 'pickleshare',
206 204 'simplegeneric>0.8',
207 205 'traitlets>=4.2',
208 206 'prompt_toolkit>=1.0.3,<2.0.0',
209 207 'pygments',
210 208 ]
211 209
212 210 # Platform-specific dependencies:
213 211 # This is the correct way to specify these,
214 212 # but requires pip >= 6. pip < 6 ignores these.
215 213
216 214 extras_require.update({
217 215 ':python_version == "2.7"': ['backports.shutil_get_terminal_size'],
218 216 ':python_version == "2.7" or python_version == "3.3"': ['pathlib2'],
219 217 ':sys_platform != "win32"': ['pexpect'],
220 218 ':sys_platform == "darwin"': ['appnope'],
221 219 ':sys_platform == "win32"': ['colorama'],
222 220 ':sys_platform == "win32" and python_version < "3.6"': ['win_unicode_console>=0.5'],
223 221 'test:python_version == "2.7"': ['mock'],
224 222 })
225 223 # FIXME: re-specify above platform dependencies for pip < 6
226 224 # These would result in non-portable bdists.
227 225 if not any(arg.startswith('bdist') for arg in sys.argv):
228 226 if sys.version_info < (3, 3):
229 227 extras_require['test'].append('mock')
230 228
231 229 if sys.platform == 'darwin':
232 230 install_requires.extend(['appnope'])
233 231
234 232 if not sys.platform.startswith('win'):
235 233 install_requires.append('pexpect')
236 234
237 235 # workaround pypa/setuptools#147, where setuptools misspells
238 236 # platform_python_implementation as python_implementation
239 237 if 'setuptools' in sys.modules:
240 238 for key in list(extras_require):
241 239 if 'platform_python_implementation' in key:
242 240 new_key = key.replace('platform_python_implementation', 'python_implementation')
243 241 extras_require[new_key] = extras_require.pop(key)
244 242
245 243 everything = set()
246 244 for key, deps in extras_require.items():
247 245 if ':' not in key:
248 246 everything.update(deps)
249 247 extras_require['all'] = everything
250 248
251 249 if 'setuptools' in sys.modules:
252 250 setuptools_extra_args['python_requires'] = '>=3.3'
253 251 setuptools_extra_args['zip_safe'] = False
254 252 setuptools_extra_args['entry_points'] = {
255 253 'console_scripts': find_entry_points(),
256 254 'pygments.lexers': [
257 255 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',
258 256 'ipython = IPython.lib.lexers:IPythonLexer',
259 257 'ipython3 = IPython.lib.lexers:IPython3Lexer',
260 258 ],
261 259 }
262 260 setup_args['extras_require'] = extras_require
263 261 requires = setup_args['install_requires'] = install_requires
264 262
265 263 # Script to be run by the windows binary installer after the default setup
266 264 # routine, to add shortcuts and similar windows-only things. Windows
267 265 # post-install scripts MUST reside in the scripts/ dir, otherwise distutils
268 266 # doesn't find them.
269 267 if 'bdist_wininst' in sys.argv:
270 268 if len(sys.argv) > 2 and \
271 269 ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):
272 270 print("ERROR: bdist_wininst must be run alone. Exiting.", file=sys.stderr)
273 271 sys.exit(1)
274 272 setup_args['data_files'].append(
275 273 ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])
276 274 setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]
277 275 setup_args['options'] = {"bdist_wininst":
278 276 {"install_script":
279 277 "ipython_win_post_install.py"}}
280 278
281 279 else:
282 280 # scripts has to be a non-empty list, or install_scripts isn't called
283 281 setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]
284 282
285 283 setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt
286 284
287 285 #---------------------------------------------------------------------------
288 286 # Do the actual setup now
289 287 #---------------------------------------------------------------------------
290 288
291 289 setup_args.update(setuptools_extra_args)
292 290
293 291
294 292
295 293 def main():
296 294 setup(**setup_args)
297 295
298 296 if __name__ == '__main__':
299 297 main()
@@ -1,235 +1,232 b''
1 1 #!/usr/bin/env python
2 2 """Simple tools to query github.com and gather stats about issues.
3 3
4 4 To generate a report for IPython 2.0, run:
5 5
6 6 python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
7 7 """
8 8 #-----------------------------------------------------------------------------
9 9 # Imports
10 10 #-----------------------------------------------------------------------------
11 11
12 12 from __future__ import print_function
13 13
14 14 import codecs
15 15 import sys
16 16
17 17 from argparse import ArgumentParser
18 18 from datetime import datetime, timedelta
19 19 from subprocess import check_output
20 20
21 21 from gh_api import (
22 22 get_paged_request, make_auth_header, get_pull_request, is_pull_request,
23 23 get_milestone_id, get_issues_list, get_authors,
24 24 )
25 25 #-----------------------------------------------------------------------------
26 26 # Globals
27 27 #-----------------------------------------------------------------------------
28 28
29 29 ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
30 30 PER_PAGE = 100
31 31
32 32 #-----------------------------------------------------------------------------
33 33 # Functions
34 34 #-----------------------------------------------------------------------------
35 35
36 36 def round_hour(dt):
37 37 return dt.replace(minute=0,second=0,microsecond=0)
38 38
39 39 def _parse_datetime(s):
40 40 """Parse dates in the format returned by the Github API."""
41 41 if s:
42 42 return datetime.strptime(s, ISO8601)
43 43 else:
44 44 return datetime.fromtimestamp(0)
45 45
46 46 def issues2dict(issues):
47 47 """Convert a list of issues to a dict, keyed by issue number."""
48 48 idict = {}
49 49 for i in issues:
50 50 idict[i['number']] = i
51 51 return idict
52 52
53 53 def split_pulls(all_issues, project="ipython/ipython"):
54 54 """split a list of closed issues into non-PR Issues and Pull Requests"""
55 55 pulls = []
56 56 issues = []
57 57 for i in all_issues:
58 58 if is_pull_request(i):
59 59 pull = get_pull_request(project, i['number'], auth=True)
60 60 pulls.append(pull)
61 61 else:
62 62 issues.append(i)
63 63 return issues, pulls
64 64
65 65
66 66 def issues_closed_since(period=timedelta(days=365), project="ipython/ipython", pulls=False):
67 67 """Get all issues closed since a particular point in time. period
68 68 can either be a datetime object, or a timedelta object. In the
69 69 latter case, it is used as a time before the present.
70 70 """
71 71
72 72 which = 'pulls' if pulls else 'issues'
73 73
74 74 if isinstance(period, timedelta):
75 75 since = round_hour(datetime.utcnow() - period)
76 76 else:
77 77 since = period
78 78 url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
79 79 allclosed = get_paged_request(url, headers=make_auth_header())
80 80
81 81 filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
82 82 if pulls:
83 83 filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
84 84 # filter out PRs not against master (backports)
85 85 filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
86 86 else:
87 87 filtered = [ i for i in filtered if not is_pull_request(i) ]
88 88
89 89 return filtered
90 90
91 91
92 92 def sorted_by_field(issues, field='closed_at', reverse=False):
93 93 """Return a list of issues sorted by closing date date."""
94 94 return sorted(issues, key = lambda i:i[field], reverse=reverse)
95 95
96 96
97 97 def report(issues, show_urls=False):
98 98 """Summary report about a list of issues, printing number and title."""
99 99 if show_urls:
100 100 for i in issues:
101 101 role = 'ghpull' if 'merged_at' in i else 'ghissue'
102 102 print(u'* :%s:`%d`: %s' % (role, i['number'],
103 103 i['title'].replace(u'`', u'``')))
104 104 else:
105 105 for i in issues:
106 106 print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
107 107
108 108 #-----------------------------------------------------------------------------
109 109 # Main script
110 110 #-----------------------------------------------------------------------------
111 111
112 112 if __name__ == "__main__":
113 113
114 114 print("DEPRECATE: backport_pr.py is deprecated and is is now recommended"
115 115 "to install `ghpro` from PyPI.", file=sys.stderr)
116 116
117 # deal with unicode
118 if sys.version_info < (3,):
119 sys.stdout = codecs.getwriter('utf8')(sys.stdout)
120 117
121 118 # Whether to add reST urls for all issues in printout.
122 119 show_urls = True
123 120
124 121 parser = ArgumentParser()
125 122 parser.add_argument('--since-tag', type=str,
126 123 help="The git tag to use for the starting point (typically the last major release)."
127 124 )
128 125 parser.add_argument('--milestone', type=str,
129 126 help="The GitHub milestone to use for filtering issues [optional]."
130 127 )
131 128 parser.add_argument('--days', type=int,
132 129 help="The number of days of data to summarize (use this or --since-tag)."
133 130 )
134 131 parser.add_argument('--project', type=str, default="ipython/ipython",
135 132 help="The project to summarize."
136 133 )
137 134 parser.add_argument('--links', action='store_true', default=False,
138 135 help="Include links to all closed Issues and PRs in the output."
139 136 )
140 137
141 138 opts = parser.parse_args()
142 139 tag = opts.since_tag
143 140
144 141 # set `since` from days or git tag
145 142 if opts.days:
146 143 since = datetime.utcnow() - timedelta(days=opts.days)
147 144 else:
148 145 if not tag:
149 146 tag = check_output(['git', 'describe', '--abbrev=0']).strip().decode('utf8')
150 147 cmd = ['git', 'log', '-1', '--format=%ai', tag]
151 148 tagday, tz = check_output(cmd).strip().decode('utf8').rsplit(' ', 1)
152 149 since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
153 150 h = int(tz[1:3])
154 151 m = int(tz[3:])
155 152 td = timedelta(hours=h, minutes=m)
156 153 if tz[0] == '-':
157 154 since += td
158 155 else:
159 156 since -= td
160 157
161 158 since = round_hour(since)
162 159
163 160 milestone = opts.milestone
164 161 project = opts.project
165 162
166 163 print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
167 164 if milestone:
168 165 milestone_id = get_milestone_id(project=project, milestone=milestone,
169 166 auth=True)
170 167 issues_and_pulls = get_issues_list(project=project,
171 168 milestone=milestone_id,
172 169 state='closed',
173 170 auth=True,
174 171 )
175 172 issues, pulls = split_pulls(issues_and_pulls, project=project)
176 173 else:
177 174 issues = issues_closed_since(since, project=project, pulls=False)
178 175 pulls = issues_closed_since(since, project=project, pulls=True)
179 176
180 177 # For regular reports, it's nice to show them in reverse chronological order
181 178 issues = sorted_by_field(issues, reverse=True)
182 179 pulls = sorted_by_field(pulls, reverse=True)
183 180
184 181 n_issues, n_pulls = map(len, (issues, pulls))
185 182 n_total = n_issues + n_pulls
186 183
187 184 # Print summary report we can directly include into release notes.
188 185
189 186 print()
190 187 since_day = since.strftime("%Y/%m/%d")
191 188 today = datetime.today().strftime("%Y/%m/%d")
192 189 print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
193 190 print()
194 191 print("These lists are automatically generated, and may be incomplete or contain duplicates.")
195 192 print()
196 193
197 194 ncommits = 0
198 195 all_authors = []
199 196 if tag:
200 197 # print git info, in addition to GitHub info:
201 198 since_tag = tag+'..'
202 199 cmd = ['git', 'log', '--oneline', since_tag]
203 200 ncommits += len(check_output(cmd).splitlines())
204 201
205 202 author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
206 203 all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
207 204
208 205 pr_authors = []
209 206 for pr in pulls:
210 207 pr_authors.extend(get_authors(pr))
211 208 ncommits = len(pr_authors) + ncommits - len(pulls)
212 209 author_cmd = ['git', 'check-mailmap'] + pr_authors
213 210 with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
214 211 all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
215 212 unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
216 213
217 214 print("We closed %d issues and merged %d pull requests." % (n_issues, n_pulls))
218 215 if milestone:
219 216 print("The full list can be seen `on GitHub <https://github.com/{project}/issues?q=milestone%3A{milestone}+>`__".format(project=project,milestone=milestone)
220 217 )
221 218
222 219 print()
223 220 print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
224 221 print()
225 222 print('\n'.join(unique_authors))
226 223
227 224 if opts.links:
228 225 print()
229 226 print("GitHub issues and pull requests:")
230 227 print()
231 228 print('Pull Requests (%d):\n' % n_pulls)
232 229 report(pulls, show_urls)
233 230 print()
234 231 print('Issues (%d):\n' % n_issues)
235 232 report(issues, show_urls)
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
This diff has been collapsed as it changes many lines, (595 lines changed) Show them Hide them
General Comments 0
You need to be logged in to leave comments. Login now