##// END OF EJS Templates
Merge pull request #10021 from ivanov/remove-sys-version-checks...
Thomas Kluyver -
r22981:6a5220d0 merge
parent child Browse files
Show More
@@ -0,0 +1,5 b''
1 """This tests that future compiler flags are passed to the embedded IPython."""
2 from __future__ import barry_as_FLUFL
3 from IPython import embed
4 embed(banner1='', header='check 1 <> 2 == True')
5 embed(banner1='', header='check 1 <> 2 cause SyntaxError', compile_flags=0)
@@ -1,152 +1,151 b''
1 1 # encoding: utf-8
2 2 """
3 3 IPython: tools for interactive and parallel computing in Python.
4 4
5 5 http://ipython.org
6 6 """
7 7 #-----------------------------------------------------------------------------
8 8 # Copyright (c) 2008-2011, IPython Development Team.
9 9 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
10 10 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
11 11 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
12 12 #
13 13 # Distributed under the terms of the Modified BSD License.
14 14 #
15 15 # The full license is in the file COPYING.txt, distributed with this software.
16 16 #-----------------------------------------------------------------------------
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Imports
20 20 #-----------------------------------------------------------------------------
21 from __future__ import absolute_import
22 21
23 22 import os
24 23 import sys
25 24
26 25 #-----------------------------------------------------------------------------
27 26 # Setup everything
28 27 #-----------------------------------------------------------------------------
29 28
30 29 # Don't forget to also update setup.py when this changes!
31 30 if sys.version_info < (3,3):
32 31 raise ImportError(
33 32 """
34 33 IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2.
35 34 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
36 35 Beginning with IPython 6.0, Python 3.3 and above is required.
37 36
38 37 See IPython `README.rst` file for more information:
39 38
40 39 https://github.com/ipython/ipython/blob/master/README.rst
41 40
42 41 """)
43 42
44 43 # Make it easy to import extensions - they are always directly on pythonpath.
45 44 # Therefore, non-IPython modules can be added to extensions directory.
46 45 # This should probably be in ipapp.py.
47 46 sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
48 47
49 48 #-----------------------------------------------------------------------------
50 49 # Setup the top level names
51 50 #-----------------------------------------------------------------------------
52 51
53 52 from .core.getipython import get_ipython
54 53 from .core import release
55 54 from .core.application import Application
56 55 from .terminal.embed import embed
57 56
58 57 from .core.interactiveshell import InteractiveShell
59 58 from .testing import test
60 59 from .utils.sysinfo import sys_info
61 60 from .utils.frame import extract_module_locals
62 61
63 62 # Release data
64 63 __author__ = '%s <%s>' % (release.author, release.author_email)
65 64 __license__ = release.license
66 65 __version__ = release.version
67 66 version_info = release.version_info
68 67
69 68 def embed_kernel(module=None, local_ns=None, **kwargs):
70 69 """Embed and start an IPython kernel in a given scope.
71 70
72 71 If you don't want the kernel to initialize the namespace
73 72 from the scope of the surrounding function,
74 73 and/or you want to load full IPython configuration,
75 74 you probably want `IPython.start_kernel()` instead.
76 75
77 76 Parameters
78 77 ----------
79 78 module : ModuleType, optional
80 79 The module to load into IPython globals (default: caller)
81 80 local_ns : dict, optional
82 81 The namespace to load into IPython user namespace (default: caller)
83 82
84 83 kwargs : various, optional
85 84 Further keyword args are relayed to the IPKernelApp constructor,
86 85 allowing configuration of the Kernel. Will only have an effect
87 86 on the first embed_kernel call for a given process.
88 87 """
89 88
90 89 (caller_module, caller_locals) = extract_module_locals(1)
91 90 if module is None:
92 91 module = caller_module
93 92 if local_ns is None:
94 93 local_ns = caller_locals
95 94
96 95 # Only import .zmq when we really need it
97 96 from ipykernel.embed import embed_kernel as real_embed_kernel
98 97 real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
99 98
100 99 def start_ipython(argv=None, **kwargs):
101 100 """Launch a normal IPython instance (as opposed to embedded)
102 101
103 102 `IPython.embed()` puts a shell in a particular calling scope,
104 103 such as a function or method for debugging purposes,
105 104 which is often not desirable.
106 105
107 106 `start_ipython()` does full, regular IPython initialization,
108 107 including loading startup files, configuration, etc.
109 108 much of which is skipped by `embed()`.
110 109
111 110 This is a public API method, and will survive implementation changes.
112 111
113 112 Parameters
114 113 ----------
115 114
116 115 argv : list or None, optional
117 116 If unspecified or None, IPython will parse command-line options from sys.argv.
118 117 To prevent any command-line parsing, pass an empty list: `argv=[]`.
119 118 user_ns : dict, optional
120 119 specify this dictionary to initialize the IPython user namespace with particular values.
121 120 kwargs : various, optional
122 121 Any other kwargs will be passed to the Application constructor,
123 122 such as `config`.
124 123 """
125 124 from IPython.terminal.ipapp import launch_new_instance
126 125 return launch_new_instance(argv=argv, **kwargs)
127 126
128 127 def start_kernel(argv=None, **kwargs):
129 128 """Launch a normal IPython kernel instance (as opposed to embedded)
130 129
131 130 `IPython.embed_kernel()` puts a shell in a particular calling scope,
132 131 such as a function or method for debugging purposes,
133 132 which is often not desirable.
134 133
135 134 `start_kernel()` does full, regular IPython initialization,
136 135 including loading startup files, configuration, etc.
137 136 much of which is skipped by `embed()`.
138 137
139 138 Parameters
140 139 ----------
141 140
142 141 argv : list or None, optional
143 142 If unspecified or None, IPython will parse command-line options from sys.argv.
144 143 To prevent any command-line parsing, pass an empty list: `argv=[]`.
145 144 user_ns : dict, optional
146 145 specify this dictionary to initialize the IPython user namespace with particular values.
147 146 kwargs : various, optional
148 147 Any other kwargs will be passed to the Application constructor,
149 148 such as `config`.
150 149 """
151 150 from IPython.kernel.zmq.kernelapp import launch_new_instance
152 151 return launch_new_instance(argv=argv, **kwargs)
@@ -1,144 +1,143 b''
1 1 """Compiler tools with improved interactive support.
2 2
3 3 Provides compilation machinery similar to codeop, but with caching support so
4 4 we can provide interactive tracebacks.
5 5
6 6 Authors
7 7 -------
8 8 * Robert Kern
9 9 * Fernando Perez
10 10 * Thomas Kluyver
11 11 """
12 12
13 13 # Note: though it might be more natural to name this module 'compiler', that
14 14 # name is in the stdlib and name collisions with the stdlib tend to produce
15 15 # weird problems (often with third-party tools).
16 16
17 17 #-----------------------------------------------------------------------------
18 18 # Copyright (C) 2010-2011 The IPython Development Team.
19 19 #
20 20 # Distributed under the terms of the BSD License.
21 21 #
22 22 # The full license is in the file COPYING.txt, distributed with this software.
23 23 #-----------------------------------------------------------------------------
24 24
25 25 #-----------------------------------------------------------------------------
26 26 # Imports
27 27 #-----------------------------------------------------------------------------
28 from __future__ import print_function
29 28
30 29 # Stdlib imports
31 30 import __future__
32 31 from ast import PyCF_ONLY_AST
33 32 import codeop
34 33 import functools
35 34 import hashlib
36 35 import linecache
37 36 import operator
38 37 import time
39 38
40 39 #-----------------------------------------------------------------------------
41 40 # Constants
42 41 #-----------------------------------------------------------------------------
43 42
44 43 # Roughtly equal to PyCF_MASK | PyCF_MASK_OBSOLETE as defined in pythonrun.h,
45 44 # this is used as a bitmask to extract future-related code flags.
46 45 PyCF_MASK = functools.reduce(operator.or_,
47 46 (getattr(__future__, fname).compiler_flag
48 47 for fname in __future__.all_feature_names))
49 48
50 49 #-----------------------------------------------------------------------------
51 50 # Local utilities
52 51 #-----------------------------------------------------------------------------
53 52
54 53 def code_name(code, number=0):
55 54 """ Compute a (probably) unique name for code for caching.
56 55
57 56 This now expects code to be unicode.
58 57 """
59 58 hash_digest = hashlib.md5(code.encode("utf-8")).hexdigest()
60 59 # Include the number and 12 characters of the hash in the name. It's
61 60 # pretty much impossible that in a single session we'll have collisions
62 61 # even with truncated hashes, and the full one makes tracebacks too long
63 62 return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
64 63
65 64 #-----------------------------------------------------------------------------
66 65 # Classes and functions
67 66 #-----------------------------------------------------------------------------
68 67
69 68 class CachingCompiler(codeop.Compile):
70 69 """A compiler that caches code compiled from interactive statements.
71 70 """
72 71
73 72 def __init__(self):
74 73 codeop.Compile.__init__(self)
75 74
76 75 # This is ugly, but it must be done this way to allow multiple
77 76 # simultaneous ipython instances to coexist. Since Python itself
78 77 # directly accesses the data structures in the linecache module, and
79 78 # the cache therein is global, we must work with that data structure.
80 79 # We must hold a reference to the original checkcache routine and call
81 80 # that in our own check_cache() below, but the special IPython cache
82 81 # must also be shared by all IPython instances. If we were to hold
83 82 # separate caches (one in each CachingCompiler instance), any call made
84 83 # by Python itself to linecache.checkcache() would obliterate the
85 84 # cached data from the other IPython instances.
86 85 if not hasattr(linecache, '_ipython_cache'):
87 86 linecache._ipython_cache = {}
88 87 if not hasattr(linecache, '_checkcache_ori'):
89 88 linecache._checkcache_ori = linecache.checkcache
90 89 # Now, we must monkeypatch the linecache directly so that parts of the
91 90 # stdlib that call it outside our control go through our codepath
92 91 # (otherwise we'd lose our tracebacks).
93 92 linecache.checkcache = check_linecache_ipython
94 93
95 94 def ast_parse(self, source, filename='<unknown>', symbol='exec'):
96 95 """Parse code to an AST with the current compiler flags active.
97 96
98 97 Arguments are exactly the same as ast.parse (in the standard library),
99 98 and are passed to the built-in compile function."""
100 99 return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
101 100
102 101 def reset_compiler_flags(self):
103 102 """Reset compiler flags to default state."""
104 103 # This value is copied from codeop.Compile.__init__, so if that ever
105 104 # changes, it will need to be updated.
106 105 self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
107 106
108 107 @property
109 108 def compiler_flags(self):
110 109 """Flags currently active in the compilation process.
111 110 """
112 111 return self.flags
113 112
114 113 def cache(self, code, number=0):
115 114 """Make a name for a block of code, and cache the code.
116 115
117 116 Parameters
118 117 ----------
119 118 code : str
120 119 The Python source code to cache.
121 120 number : int
122 121 A number which forms part of the code's name. Used for the execution
123 122 counter.
124 123
125 124 Returns
126 125 -------
127 126 The name of the cached code (as a string). Pass this as the filename
128 127 argument to compilation, so that tracebacks are correctly hooked up.
129 128 """
130 129 name = code_name(code, number)
131 130 entry = (len(code), time.time(),
132 131 [line+'\n' for line in code.splitlines()], name)
133 132 linecache.cache[name] = entry
134 133 linecache._ipython_cache[name] = entry
135 134 return name
136 135
137 136 def check_linecache_ipython(*args):
138 137 """Call linecache.checkcache() safely protecting our cached values.
139 138 """
140 139 # First call the orignal checkcache as intended
141 140 linecache._checkcache_ori(*args)
142 141 # Then, update back the cache with our data, so that tracebacks related
143 142 # to our compiled codes can be produced.
144 143 linecache.cache.update(linecache._ipython_cache)
@@ -1,1237 +1,1236 b''
1 1 # encoding: utf-8
2 2 """Word completion for IPython.
3 3
4 4 This module started as fork of the rlcompleter module in the Python standard
5 5 library. The original enhancements made to rlcompleter have been sent
6 6 upstream and were accepted as of Python 2.3,
7 7
8 8 """
9 9
10 10 # Copyright (c) IPython Development Team.
11 11 # Distributed under the terms of the Modified BSD License.
12 12 #
13 13 # Some of this code originated from rlcompleter in the Python standard library
14 14 # Copyright (C) 2001 Python Software Foundation, www.python.org
15 15
16 from __future__ import print_function
17 16
18 17 import __main__
19 18 import glob
20 19 import inspect
21 20 import itertools
22 21 import keyword
23 22 import os
24 23 import re
25 24 import sys
26 25 import unicodedata
27 26 import string
28 27 import warnings
29 28 from importlib import import_module
30 29
31 30 from traitlets.config.configurable import Configurable
32 31 from IPython.core.error import TryNext
33 32 from IPython.core.inputsplitter import ESC_MAGIC
34 33 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
35 34 from IPython.utils import generics
36 35 from IPython.utils.decorators import undoc
37 36 from IPython.utils.dir2 import dir2, get_real_method
38 37 from IPython.utils.process import arg_split
39 38 from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
40 39 from traitlets import Bool, Enum, observe
41 40
42 41 from functools import wraps
43 42
44 43 #-----------------------------------------------------------------------------
45 44 # Globals
46 45 #-----------------------------------------------------------------------------
47 46
48 47 # Public API
49 48 __all__ = ['Completer','IPCompleter']
50 49
51 50 if sys.platform == 'win32':
52 51 PROTECTABLES = ' '
53 52 else:
54 53 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
55 54
56 55
57 56 #-----------------------------------------------------------------------------
58 57 # Work around BUG decorators.
59 58 #-----------------------------------------------------------------------------
60 59
61 60 def _strip_single_trailing_space(complete):
62 61 """
63 62 This is a workaround for a weird IPython/Prompt_toolkit behavior,
64 63 that can be removed once we rely on a slightly more recent prompt_toolkit
65 64 version (likely > 1.0.3). So this can likely be removed in IPython 6.0
66 65
67 66 cf https://github.com/ipython/ipython/issues/9658
68 67 and https://github.com/jonathanslenders/python-prompt-toolkit/pull/328
69 68
70 69 The bug is due to the fact that in PTK the completer will reinvoke itself
71 70 after trying to completer to the longuest common prefix of all the
72 71 completions, unless only one completion is available.
73 72
74 73 This logic is faulty if the completion ends with space, which can happen in
75 74 case like::
76 75
77 76 from foo import im<ta>
78 77
79 78 which only matching completion is `import `. Note the leading space at the
80 79 end. So leaving a space at the end is a reasonable request, but for now
81 80 we'll strip it.
82 81 """
83 82
84 83 @wraps(complete)
85 84 def comp(*args, **kwargs):
86 85 text, matches = complete(*args, **kwargs)
87 86 if len(matches) == 1:
88 87 return text, [matches[0].rstrip()]
89 88 return text, matches
90 89
91 90 return comp
92 91
93 92
94 93
95 94 #-----------------------------------------------------------------------------
96 95 # Main functions and classes
97 96 #-----------------------------------------------------------------------------
98 97
99 98 def has_open_quotes(s):
100 99 """Return whether a string has open quotes.
101 100
102 101 This simply counts whether the number of quote characters of either type in
103 102 the string is odd.
104 103
105 104 Returns
106 105 -------
107 106 If there is an open quote, the quote character is returned. Else, return
108 107 False.
109 108 """
110 109 # We check " first, then ', so complex cases with nested quotes will get
111 110 # the " to take precedence.
112 111 if s.count('"') % 2:
113 112 return '"'
114 113 elif s.count("'") % 2:
115 114 return "'"
116 115 else:
117 116 return False
118 117
119 118
120 119 def protect_filename(s):
121 120 """Escape a string to protect certain characters."""
122 121 if set(s) & set(PROTECTABLES):
123 122 if sys.platform == "win32":
124 123 return '"' + s + '"'
125 124 else:
126 125 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
127 126 else:
128 127 return s
129 128
130 129
131 130 def expand_user(path):
132 131 """Expand '~'-style usernames in strings.
133 132
134 133 This is similar to :func:`os.path.expanduser`, but it computes and returns
135 134 extra information that will be useful if the input was being used in
136 135 computing completions, and you wish to return the completions with the
137 136 original '~' instead of its expanded value.
138 137
139 138 Parameters
140 139 ----------
141 140 path : str
142 141 String to be expanded. If no ~ is present, the output is the same as the
143 142 input.
144 143
145 144 Returns
146 145 -------
147 146 newpath : str
148 147 Result of ~ expansion in the input path.
149 148 tilde_expand : bool
150 149 Whether any expansion was performed or not.
151 150 tilde_val : str
152 151 The value that ~ was replaced with.
153 152 """
154 153 # Default values
155 154 tilde_expand = False
156 155 tilde_val = ''
157 156 newpath = path
158 157
159 158 if path.startswith('~'):
160 159 tilde_expand = True
161 160 rest = len(path)-1
162 161 newpath = os.path.expanduser(path)
163 162 if rest:
164 163 tilde_val = newpath[:-rest]
165 164 else:
166 165 tilde_val = newpath
167 166
168 167 return newpath, tilde_expand, tilde_val
169 168
170 169
171 170 def compress_user(path, tilde_expand, tilde_val):
172 171 """Does the opposite of expand_user, with its outputs.
173 172 """
174 173 if tilde_expand:
175 174 return path.replace(tilde_val, '~')
176 175 else:
177 176 return path
178 177
179 178
180 179 def completions_sorting_key(word):
181 180 """key for sorting completions
182 181
183 182 This does several things:
184 183
185 184 - Lowercase all completions, so they are sorted alphabetically with
186 185 upper and lower case words mingled
187 186 - Demote any completions starting with underscores to the end
188 187 - Insert any %magic and %%cellmagic completions in the alphabetical order
189 188 by their name
190 189 """
191 190 # Case insensitive sort
192 191 word = word.lower()
193 192
194 193 prio1, prio2 = 0, 0
195 194
196 195 if word.startswith('__'):
197 196 prio1 = 2
198 197 elif word.startswith('_'):
199 198 prio1 = 1
200 199
201 200 if word.endswith('='):
202 201 prio1 = -1
203 202
204 203 if word.startswith('%%'):
205 204 # If there's another % in there, this is something else, so leave it alone
206 205 if not "%" in word[2:]:
207 206 word = word[2:]
208 207 prio2 = 2
209 208 elif word.startswith('%'):
210 209 if not "%" in word[1:]:
211 210 word = word[1:]
212 211 prio2 = 1
213 212
214 213 return prio1, word, prio2
215 214
216 215
217 216 @undoc
218 217 class Bunch(object): pass
219 218
220 219
221 220 if sys.platform == 'win32':
222 221 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
223 222 else:
224 223 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
225 224
226 225 GREEDY_DELIMS = ' =\r\n'
227 226
228 227
229 228 class CompletionSplitter(object):
230 229 """An object to split an input line in a manner similar to readline.
231 230
232 231 By having our own implementation, we can expose readline-like completion in
233 232 a uniform manner to all frontends. This object only needs to be given the
234 233 line of text to be split and the cursor position on said line, and it
235 234 returns the 'word' to be completed on at the cursor after splitting the
236 235 entire line.
237 236
238 237 What characters are used as splitting delimiters can be controlled by
239 238 setting the `delims` attribute (this is a property that internally
240 239 automatically builds the necessary regular expression)"""
241 240
242 241 # Private interface
243 242
244 243 # A string of delimiter characters. The default value makes sense for
245 244 # IPython's most typical usage patterns.
246 245 _delims = DELIMS
247 246
248 247 # The expression (a normal string) to be compiled into a regular expression
249 248 # for actual splitting. We store it as an attribute mostly for ease of
250 249 # debugging, since this type of code can be so tricky to debug.
251 250 _delim_expr = None
252 251
253 252 # The regular expression that does the actual splitting
254 253 _delim_re = None
255 254
256 255 def __init__(self, delims=None):
257 256 delims = CompletionSplitter._delims if delims is None else delims
258 257 self.delims = delims
259 258
260 259 @property
261 260 def delims(self):
262 261 """Return the string of delimiter characters."""
263 262 return self._delims
264 263
265 264 @delims.setter
266 265 def delims(self, delims):
267 266 """Set the delimiters for line splitting."""
268 267 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
269 268 self._delim_re = re.compile(expr)
270 269 self._delims = delims
271 270 self._delim_expr = expr
272 271
273 272 def split_line(self, line, cursor_pos=None):
274 273 """Split a line of text with a cursor at the given position.
275 274 """
276 275 l = line if cursor_pos is None else line[:cursor_pos]
277 276 return self._delim_re.split(l)[-1]
278 277
279 278
280 279 class Completer(Configurable):
281 280
282 281 greedy = Bool(False,
283 282 help="""Activate greedy completion
284 283 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
285 284
286 285 This will enable completion on elements of lists, results of function calls, etc.,
287 286 but can be unsafe because the code is actually evaluated on TAB.
288 287 """
289 288 ).tag(config=True)
290 289
291 290
292 291 def __init__(self, namespace=None, global_namespace=None, **kwargs):
293 292 """Create a new completer for the command line.
294 293
295 294 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
296 295
297 296 If unspecified, the default namespace where completions are performed
298 297 is __main__ (technically, __main__.__dict__). Namespaces should be
299 298 given as dictionaries.
300 299
301 300 An optional second namespace can be given. This allows the completer
302 301 to handle cases where both the local and global scopes need to be
303 302 distinguished.
304 303
305 304 Completer instances should be used as the completion mechanism of
306 305 readline via the set_completer() call:
307 306
308 307 readline.set_completer(Completer(my_namespace).complete)
309 308 """
310 309
311 310 # Don't bind to namespace quite yet, but flag whether the user wants a
312 311 # specific namespace or to use __main__.__dict__. This will allow us
313 312 # to bind to __main__.__dict__ at completion time, not now.
314 313 if namespace is None:
315 314 self.use_main_ns = 1
316 315 else:
317 316 self.use_main_ns = 0
318 317 self.namespace = namespace
319 318
320 319 # The global namespace, if given, can be bound directly
321 320 if global_namespace is None:
322 321 self.global_namespace = {}
323 322 else:
324 323 self.global_namespace = global_namespace
325 324
326 325 super(Completer, self).__init__(**kwargs)
327 326
328 327 def complete(self, text, state):
329 328 """Return the next possible completion for 'text'.
330 329
331 330 This is called successively with state == 0, 1, 2, ... until it
332 331 returns None. The completion should begin with 'text'.
333 332
334 333 """
335 334 if self.use_main_ns:
336 335 self.namespace = __main__.__dict__
337 336
338 337 if state == 0:
339 338 if "." in text:
340 339 self.matches = self.attr_matches(text)
341 340 else:
342 341 self.matches = self.global_matches(text)
343 342 try:
344 343 return self.matches[state]
345 344 except IndexError:
346 345 return None
347 346
348 347 def global_matches(self, text):
349 348 """Compute matches when text is a simple name.
350 349
351 350 Return a list of all keywords, built-in functions and names currently
352 351 defined in self.namespace or self.global_namespace that match.
353 352
354 353 """
355 354 matches = []
356 355 match_append = matches.append
357 356 n = len(text)
358 357 for lst in [keyword.kwlist,
359 358 builtin_mod.__dict__.keys(),
360 359 self.namespace.keys(),
361 360 self.global_namespace.keys()]:
362 361 for word in lst:
363 362 if word[:n] == text and word != "__builtins__":
364 363 match_append(word)
365 364 return [cast_unicode_py2(m) for m in matches]
366 365
367 366 def attr_matches(self, text):
368 367 """Compute matches when text contains a dot.
369 368
370 369 Assuming the text is of the form NAME.NAME....[NAME], and is
371 370 evaluatable in self.namespace or self.global_namespace, it will be
372 371 evaluated and its attributes (as revealed by dir()) are used as
373 372 possible completions. (For class instances, class members are are
374 373 also considered.)
375 374
376 375 WARNING: this can still invoke arbitrary C code, if an object
377 376 with a __getattr__ hook is evaluated.
378 377
379 378 """
380 379
381 380 # Another option, seems to work great. Catches things like ''.<tab>
382 381 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
383 382
384 383 if m:
385 384 expr, attr = m.group(1, 3)
386 385 elif self.greedy:
387 386 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
388 387 if not m2:
389 388 return []
390 389 expr, attr = m2.group(1,2)
391 390 else:
392 391 return []
393 392
394 393 try:
395 394 obj = eval(expr, self.namespace)
396 395 except:
397 396 try:
398 397 obj = eval(expr, self.global_namespace)
399 398 except:
400 399 return []
401 400
402 401 if self.limit_to__all__ and hasattr(obj, '__all__'):
403 402 words = get__all__entries(obj)
404 403 else:
405 404 words = dir2(obj)
406 405
407 406 try:
408 407 words = generics.complete_object(obj, words)
409 408 except TryNext:
410 409 pass
411 410 except Exception:
412 411 # Silence errors from completion function
413 412 #raise # dbg
414 413 pass
415 414 # Build match list to return
416 415 n = len(attr)
417 416 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
418 417
419 418
420 419 def get__all__entries(obj):
421 420 """returns the strings in the __all__ attribute"""
422 421 try:
423 422 words = getattr(obj, '__all__')
424 423 except:
425 424 return []
426 425
427 426 return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
428 427
429 428
430 429 def match_dict_keys(keys, prefix, delims):
431 430 """Used by dict_key_matches, matching the prefix to a list of keys"""
432 431 if not prefix:
433 432 return None, 0, [repr(k) for k in keys
434 433 if isinstance(k, (string_types, bytes))]
435 434 quote_match = re.search('["\']', prefix)
436 435 quote = quote_match.group()
437 436 try:
438 437 prefix_str = eval(prefix + quote, {})
439 438 except Exception:
440 439 return None, 0, []
441 440
442 441 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
443 442 token_match = re.search(pattern, prefix, re.UNICODE)
444 443 token_start = token_match.start()
445 444 token_prefix = token_match.group()
446 445
447 446 # TODO: support bytes in Py3k
448 447 matched = []
449 448 for key in keys:
450 449 try:
451 450 if not key.startswith(prefix_str):
452 451 continue
453 452 except (AttributeError, TypeError, UnicodeError):
454 453 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
455 454 continue
456 455
457 456 # reformat remainder of key to begin with prefix
458 457 rem = key[len(prefix_str):]
459 458 # force repr wrapped in '
460 459 rem_repr = repr(rem + '"')
461 460 if rem_repr.startswith('u') and prefix[0] not in 'uU':
462 461 # Found key is unicode, but prefix is Py2 string.
463 462 # Therefore attempt to interpret key as string.
464 463 try:
465 464 rem_repr = repr(rem.encode('ascii') + '"')
466 465 except UnicodeEncodeError:
467 466 continue
468 467
469 468 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
470 469 if quote == '"':
471 470 # The entered prefix is quoted with ",
472 471 # but the match is quoted with '.
473 472 # A contained " hence needs escaping for comparison:
474 473 rem_repr = rem_repr.replace('"', '\\"')
475 474
476 475 # then reinsert prefix from start of token
477 476 matched.append('%s%s' % (token_prefix, rem_repr))
478 477 return quote, token_start, matched
479 478
480 479
481 480 def _safe_isinstance(obj, module, class_name):
482 481 """Checks if obj is an instance of module.class_name if loaded
483 482 """
484 483 return (module in sys.modules and
485 484 isinstance(obj, getattr(import_module(module), class_name)))
486 485
487 486
488 487 def back_unicode_name_matches(text):
489 488 u"""Match unicode characters back to unicode name
490 489
491 490 This does ☃ -> \\snowman
492 491
493 492 Note that snowman is not a valid python3 combining character but will be expanded.
494 493 Though it will not recombine back to the snowman character by the completion machinery.
495 494
496 495 This will not either back-complete standard sequences like \\n, \\b ...
497 496
498 497 Used on Python 3 only.
499 498 """
500 499 if len(text)<2:
501 500 return u'', ()
502 501 maybe_slash = text[-2]
503 502 if maybe_slash != '\\':
504 503 return u'', ()
505 504
506 505 char = text[-1]
507 506 # no expand on quote for completion in strings.
508 507 # nor backcomplete standard ascii keys
509 508 if char in string.ascii_letters or char in ['"',"'"]:
510 509 return u'', ()
511 510 try :
512 511 unic = unicodedata.name(char)
513 512 return '\\'+char,['\\'+unic]
514 513 except KeyError:
515 514 pass
516 515 return u'', ()
517 516
518 517 def back_latex_name_matches(text):
519 518 u"""Match latex characters back to unicode name
520 519
521 520 This does ->\\sqrt
522 521
523 522 Used on Python 3 only.
524 523 """
525 524 if len(text)<2:
526 525 return u'', ()
527 526 maybe_slash = text[-2]
528 527 if maybe_slash != '\\':
529 528 return u'', ()
530 529
531 530
532 531 char = text[-1]
533 532 # no expand on quote for completion in strings.
534 533 # nor backcomplete standard ascii keys
535 534 if char in string.ascii_letters or char in ['"',"'"]:
536 535 return u'', ()
537 536 try :
538 537 latex = reverse_latex_symbol[char]
539 538 # '\\' replace the \ as well
540 539 return '\\'+char,[latex]
541 540 except KeyError:
542 541 pass
543 542 return u'', ()
544 543
545 544
546 545 class IPCompleter(Completer):
547 546 """Extension of the completer class with IPython-specific features"""
548 547
549 548 @observe('greedy')
550 549 def _greedy_changed(self, change):
551 550 """update the splitter and readline delims when greedy is changed"""
552 551 if change['new']:
553 552 self.splitter.delims = GREEDY_DELIMS
554 553 else:
555 554 self.splitter.delims = DELIMS
556 555
557 556 merge_completions = Bool(True,
558 557 help="""Whether to merge completion results into a single list
559 558
560 559 If False, only the completion results from the first non-empty
561 560 completer will be returned.
562 561 """
563 562 ).tag(config=True)
564 563 omit__names = Enum((0,1,2), default_value=2,
565 564 help="""Instruct the completer to omit private method names
566 565
567 566 Specifically, when completing on ``object.<tab>``.
568 567
569 568 When 2 [default]: all names that start with '_' will be excluded.
570 569
571 570 When 1: all 'magic' names (``__foo__``) will be excluded.
572 571
573 572 When 0: nothing will be excluded.
574 573 """
575 574 ).tag(config=True)
576 575 limit_to__all__ = Bool(False,
577 576 help="""
578 577 DEPRECATED as of version 5.0.
579 578
580 579 Instruct the completer to use __all__ for the completion
581 580
582 581 Specifically, when completing on ``object.<tab>``.
583 582
584 583 When True: only those names in obj.__all__ will be included.
585 584
586 585 When False [default]: the __all__ attribute is ignored
587 586 """,
588 587 ).tag(config=True)
589 588
590 589 def __init__(self, shell=None, namespace=None, global_namespace=None,
591 590 use_readline=False, config=None, **kwargs):
592 591 """IPCompleter() -> completer
593 592
594 593 Return a completer object suitable for use by the readline library
595 594 via readline.set_completer().
596 595
597 596 Inputs:
598 597
599 598 - shell: a pointer to the ipython shell itself. This is needed
600 599 because this completer knows about magic functions, and those can
601 600 only be accessed via the ipython instance.
602 601
603 602 - namespace: an optional dict where completions are performed.
604 603
605 604 - global_namespace: secondary optional dict for completions, to
606 605 handle cases (such as IPython embedded inside functions) where
607 606 both Python scopes are visible.
608 607
609 608 use_readline : bool, optional
610 609 DEPRECATED, ignored.
611 610 """
612 611
613 612 self.magic_escape = ESC_MAGIC
614 613 self.splitter = CompletionSplitter()
615 614
616 615 if use_readline:
617 616 warnings.warn('The use_readline parameter is deprecated and ignored since IPython 6.0.',
618 617 DeprecationWarning, stacklevel=2)
619 618
620 619 # _greedy_changed() depends on splitter and readline being defined:
621 620 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
622 621 config=config, **kwargs)
623 622
624 623 # List where completion matches will be stored
625 624 self.matches = []
626 625 self.shell = shell
627 626 # Regexp to split filenames with spaces in them
628 627 self.space_name_re = re.compile(r'([^\\] )')
629 628 # Hold a local ref. to glob.glob for speed
630 629 self.glob = glob.glob
631 630
632 631 # Determine if we are running on 'dumb' terminals, like (X)Emacs
633 632 # buffers, to avoid completion problems.
634 633 term = os.environ.get('TERM','xterm')
635 634 self.dumb_terminal = term in ['dumb','emacs']
636 635
637 636 # Special handling of backslashes needed in win32 platforms
638 637 if sys.platform == "win32":
639 638 self.clean_glob = self._clean_glob_win32
640 639 else:
641 640 self.clean_glob = self._clean_glob
642 641
643 642 #regexp to parse docstring for function signature
644 643 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
645 644 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
646 645 #use this if positional argument name is also needed
647 646 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
648 647
649 648 # All active matcher routines for completion
650 649 self.matchers = [
651 650 self.python_matches,
652 651 self.file_matches,
653 652 self.magic_matches,
654 653 self.python_func_kw_matches,
655 654 self.dict_key_matches,
656 655 ]
657 656
658 657 # This is set externally by InteractiveShell
659 658 self.custom_completers = None
660 659
661 660 def all_completions(self, text):
662 661 """
663 662 Wrapper around the complete method for the benefit of emacs.
664 663 """
665 664 return self.complete(text)[1]
666 665
667 666 def _clean_glob(self, text):
668 667 return self.glob("%s*" % text)
669 668
670 669 def _clean_glob_win32(self,text):
671 670 return [f.replace("\\","/")
672 671 for f in self.glob("%s*" % text)]
673 672
674 673 def file_matches(self, text):
675 674 """Match filenames, expanding ~USER type strings.
676 675
677 676 Most of the seemingly convoluted logic in this completer is an
678 677 attempt to handle filenames with spaces in them. And yet it's not
679 678 quite perfect, because Python's readline doesn't expose all of the
680 679 GNU readline details needed for this to be done correctly.
681 680
682 681 For a filename with a space in it, the printed completions will be
683 682 only the parts after what's already been typed (instead of the
684 683 full completions, as is normally done). I don't think with the
685 684 current (as of Python 2.3) Python readline it's possible to do
686 685 better."""
687 686
688 687 # chars that require escaping with backslash - i.e. chars
689 688 # that readline treats incorrectly as delimiters, but we
690 689 # don't want to treat as delimiters in filename matching
691 690 # when escaped with backslash
692 691 if text.startswith('!'):
693 692 text = text[1:]
694 693 text_prefix = u'!'
695 694 else:
696 695 text_prefix = u''
697 696
698 697 text_until_cursor = self.text_until_cursor
699 698 # track strings with open quotes
700 699 open_quotes = has_open_quotes(text_until_cursor)
701 700
702 701 if '(' in text_until_cursor or '[' in text_until_cursor:
703 702 lsplit = text
704 703 else:
705 704 try:
706 705 # arg_split ~ shlex.split, but with unicode bugs fixed by us
707 706 lsplit = arg_split(text_until_cursor)[-1]
708 707 except ValueError:
709 708 # typically an unmatched ", or backslash without escaped char.
710 709 if open_quotes:
711 710 lsplit = text_until_cursor.split(open_quotes)[-1]
712 711 else:
713 712 return []
714 713 except IndexError:
715 714 # tab pressed on empty line
716 715 lsplit = ""
717 716
718 717 if not open_quotes and lsplit != protect_filename(lsplit):
719 718 # if protectables are found, do matching on the whole escaped name
720 719 has_protectables = True
721 720 text0,text = text,lsplit
722 721 else:
723 722 has_protectables = False
724 723 text = os.path.expanduser(text)
725 724
726 725 if text == "":
727 726 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
728 727
729 728 # Compute the matches from the filesystem
730 729 if sys.platform == 'win32':
731 730 m0 = self.clean_glob(text)
732 731 else:
733 732 m0 = self.clean_glob(text.replace('\\', ''))
734 733
735 734 if has_protectables:
736 735 # If we had protectables, we need to revert our changes to the
737 736 # beginning of filename so that we don't double-write the part
738 737 # of the filename we have so far
739 738 len_lsplit = len(lsplit)
740 739 matches = [text_prefix + text0 +
741 740 protect_filename(f[len_lsplit:]) for f in m0]
742 741 else:
743 742 if open_quotes:
744 743 # if we have a string with an open quote, we don't need to
745 744 # protect the names at all (and we _shouldn't_, as it
746 745 # would cause bugs when the filesystem call is made).
747 746 matches = m0
748 747 else:
749 748 matches = [text_prefix +
750 749 protect_filename(f) for f in m0]
751 750
752 751 # Mark directories in input list by appending '/' to their names.
753 752 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
754 753
755 754 def magic_matches(self, text):
756 755 """Match magics"""
757 756 # Get all shell magics now rather than statically, so magics loaded at
758 757 # runtime show up too.
759 758 lsm = self.shell.magics_manager.lsmagic()
760 759 line_magics = lsm['line']
761 760 cell_magics = lsm['cell']
762 761 pre = self.magic_escape
763 762 pre2 = pre+pre
764 763
765 764 # Completion logic:
766 765 # - user gives %%: only do cell magics
767 766 # - user gives %: do both line and cell magics
768 767 # - no prefix: do both
769 768 # In other words, line magics are skipped if the user gives %% explicitly
770 769 bare_text = text.lstrip(pre)
771 770 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
772 771 if not text.startswith(pre2):
773 772 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
774 773 return [cast_unicode_py2(c) for c in comp]
775 774
776 775
777 776 def python_matches(self, text):
778 777 """Match attributes or global python names"""
779 778 if "." in text:
780 779 try:
781 780 matches = self.attr_matches(text)
782 781 if text.endswith('.') and self.omit__names:
783 782 if self.omit__names == 1:
784 783 # true if txt is _not_ a __ name, false otherwise:
785 784 no__name = (lambda txt:
786 785 re.match(r'.*\.__.*?__',txt) is None)
787 786 else:
788 787 # true if txt is _not_ a _ name, false otherwise:
789 788 no__name = (lambda txt:
790 789 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
791 790 matches = filter(no__name, matches)
792 791 except NameError:
793 792 # catches <undefined attributes>.<tab>
794 793 matches = []
795 794 else:
796 795 matches = self.global_matches(text)
797 796 return matches
798 797
799 798 def _default_arguments_from_docstring(self, doc):
800 799 """Parse the first line of docstring for call signature.
801 800
802 801 Docstring should be of the form 'min(iterable[, key=func])\n'.
803 802 It can also parse cython docstring of the form
804 803 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
805 804 """
806 805 if doc is None:
807 806 return []
808 807
809 808 #care only the firstline
810 809 line = doc.lstrip().splitlines()[0]
811 810
812 811 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
813 812 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
814 813 sig = self.docstring_sig_re.search(line)
815 814 if sig is None:
816 815 return []
817 816 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
818 817 sig = sig.groups()[0].split(',')
819 818 ret = []
820 819 for s in sig:
821 820 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
822 821 ret += self.docstring_kwd_re.findall(s)
823 822 return ret
824 823
825 824 def _default_arguments(self, obj):
826 825 """Return the list of default arguments of obj if it is callable,
827 826 or empty list otherwise."""
828 827 call_obj = obj
829 828 ret = []
830 829 if inspect.isbuiltin(obj):
831 830 pass
832 831 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
833 832 if inspect.isclass(obj):
834 833 #for cython embededsignature=True the constructor docstring
835 834 #belongs to the object itself not __init__
836 835 ret += self._default_arguments_from_docstring(
837 836 getattr(obj, '__doc__', ''))
838 837 # for classes, check for __init__,__new__
839 838 call_obj = (getattr(obj, '__init__', None) or
840 839 getattr(obj, '__new__', None))
841 840 # for all others, check if they are __call__able
842 841 elif hasattr(obj, '__call__'):
843 842 call_obj = obj.__call__
844 843 ret += self._default_arguments_from_docstring(
845 844 getattr(call_obj, '__doc__', ''))
846 845
847 846 if PY3:
848 847 _keeps = (inspect.Parameter.KEYWORD_ONLY,
849 848 inspect.Parameter.POSITIONAL_OR_KEYWORD)
850 849 signature = inspect.signature
851 850 else:
852 851 import IPython.utils.signatures
853 852 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
854 853 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
855 854 signature = IPython.utils.signatures.signature
856 855
857 856 try:
858 857 sig = signature(call_obj)
859 858 ret.extend(k for k, v in sig.parameters.items() if
860 859 v.kind in _keeps)
861 860 except ValueError:
862 861 pass
863 862
864 863 return list(set(ret))
865 864
866 865 def python_func_kw_matches(self,text):
867 866 """Match named parameters (kwargs) of the last open function"""
868 867
869 868 if "." in text: # a parameter cannot be dotted
870 869 return []
871 870 try: regexp = self.__funcParamsRegex
872 871 except AttributeError:
873 872 regexp = self.__funcParamsRegex = re.compile(r'''
874 873 '.*?(?<!\\)' | # single quoted strings or
875 874 ".*?(?<!\\)" | # double quoted strings or
876 875 \w+ | # identifier
877 876 \S # other characters
878 877 ''', re.VERBOSE | re.DOTALL)
879 878 # 1. find the nearest identifier that comes before an unclosed
880 879 # parenthesis before the cursor
881 880 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
882 881 tokens = regexp.findall(self.text_until_cursor)
883 882 iterTokens = reversed(tokens); openPar = 0
884 883
885 884 for token in iterTokens:
886 885 if token == ')':
887 886 openPar -= 1
888 887 elif token == '(':
889 888 openPar += 1
890 889 if openPar > 0:
891 890 # found the last unclosed parenthesis
892 891 break
893 892 else:
894 893 return []
895 894 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
896 895 ids = []
897 896 isId = re.compile(r'\w+$').match
898 897
899 898 while True:
900 899 try:
901 900 ids.append(next(iterTokens))
902 901 if not isId(ids[-1]):
903 902 ids.pop(); break
904 903 if not next(iterTokens) == '.':
905 904 break
906 905 except StopIteration:
907 906 break
908 907
909 908 # Find all named arguments already assigned to, as to avoid suggesting
910 909 # them again
911 910 usedNamedArgs = set()
912 911 par_level = -1
913 912 for token, next_token in zip(tokens, tokens[1:]):
914 913 if token == '(':
915 914 par_level += 1
916 915 elif token == ')':
917 916 par_level -= 1
918 917
919 918 if par_level != 0:
920 919 continue
921 920
922 921 if next_token != '=':
923 922 continue
924 923
925 924 usedNamedArgs.add(token)
926 925
927 926 # lookup the candidate callable matches either using global_matches
928 927 # or attr_matches for dotted names
929 928 if len(ids) == 1:
930 929 callableMatches = self.global_matches(ids[0])
931 930 else:
932 931 callableMatches = self.attr_matches('.'.join(ids[::-1]))
933 932 argMatches = []
934 933 for callableMatch in callableMatches:
935 934 try:
936 935 namedArgs = self._default_arguments(eval(callableMatch,
937 936 self.namespace))
938 937 except:
939 938 continue
940 939
941 940 # Remove used named arguments from the list, no need to show twice
942 941 for namedArg in set(namedArgs) - usedNamedArgs:
943 942 if namedArg.startswith(text):
944 943 argMatches.append(u"%s=" %namedArg)
945 944 return argMatches
946 945
947 946 def dict_key_matches(self, text):
948 947 "Match string keys in a dictionary, after e.g. 'foo[' "
949 948 def get_keys(obj):
950 949 # Objects can define their own completions by defining an
951 950 # _ipy_key_completions_() method.
952 951 method = get_real_method(obj, '_ipython_key_completions_')
953 952 if method is not None:
954 953 return method()
955 954
956 955 # Special case some common in-memory dict-like types
957 956 if isinstance(obj, dict) or\
958 957 _safe_isinstance(obj, 'pandas', 'DataFrame'):
959 958 try:
960 959 return list(obj.keys())
961 960 except Exception:
962 961 return []
963 962 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
964 963 _safe_isinstance(obj, 'numpy', 'void'):
965 964 return obj.dtype.names or []
966 965 return []
967 966
968 967 try:
969 968 regexps = self.__dict_key_regexps
970 969 except AttributeError:
971 970 dict_key_re_fmt = r'''(?x)
972 971 ( # match dict-referring expression wrt greedy setting
973 972 %s
974 973 )
975 974 \[ # open bracket
976 975 \s* # and optional whitespace
977 976 ([uUbB]? # string prefix (r not handled)
978 977 (?: # unclosed string
979 978 '(?:[^']|(?<!\\)\\')*
980 979 |
981 980 "(?:[^"]|(?<!\\)\\")*
982 981 )
983 982 )?
984 983 $
985 984 '''
986 985 regexps = self.__dict_key_regexps = {
987 986 False: re.compile(dict_key_re_fmt % '''
988 987 # identifiers separated by .
989 988 (?!\d)\w+
990 989 (?:\.(?!\d)\w+)*
991 990 '''),
992 991 True: re.compile(dict_key_re_fmt % '''
993 992 .+
994 993 ''')
995 994 }
996 995
997 996 match = regexps[self.greedy].search(self.text_until_cursor)
998 997 if match is None:
999 998 return []
1000 999
1001 1000 expr, prefix = match.groups()
1002 1001 try:
1003 1002 obj = eval(expr, self.namespace)
1004 1003 except Exception:
1005 1004 try:
1006 1005 obj = eval(expr, self.global_namespace)
1007 1006 except Exception:
1008 1007 return []
1009 1008
1010 1009 keys = get_keys(obj)
1011 1010 if not keys:
1012 1011 return keys
1013 1012 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1014 1013 if not matches:
1015 1014 return matches
1016 1015
1017 1016 # get the cursor position of
1018 1017 # - the text being completed
1019 1018 # - the start of the key text
1020 1019 # - the start of the completion
1021 1020 text_start = len(self.text_until_cursor) - len(text)
1022 1021 if prefix:
1023 1022 key_start = match.start(2)
1024 1023 completion_start = key_start + token_offset
1025 1024 else:
1026 1025 key_start = completion_start = match.end()
1027 1026
1028 1027 # grab the leading prefix, to make sure all completions start with `text`
1029 1028 if text_start > key_start:
1030 1029 leading = ''
1031 1030 else:
1032 1031 leading = text[text_start:completion_start]
1033 1032
1034 1033 # the index of the `[` character
1035 1034 bracket_idx = match.end(1)
1036 1035
1037 1036 # append closing quote and bracket as appropriate
1038 1037 # this is *not* appropriate if the opening quote or bracket is outside
1039 1038 # the text given to this method
1040 1039 suf = ''
1041 1040 continuation = self.line_buffer[len(self.text_until_cursor):]
1042 1041 if key_start > text_start and closing_quote:
1043 1042 # quotes were opened inside text, maybe close them
1044 1043 if continuation.startswith(closing_quote):
1045 1044 continuation = continuation[len(closing_quote):]
1046 1045 else:
1047 1046 suf += closing_quote
1048 1047 if bracket_idx > text_start:
1049 1048 # brackets were opened inside text, maybe close them
1050 1049 if not continuation.startswith(']'):
1051 1050 suf += ']'
1052 1051
1053 1052 return [leading + k + suf for k in matches]
1054 1053
1055 1054 def unicode_name_matches(self, text):
1056 1055 u"""Match Latex-like syntax for unicode characters base
1057 1056 on the name of the character.
1058 1057
1059 1058 This does \\GREEK SMALL LETTER ETA -> η
1060 1059
1061 1060 Works only on valid python 3 identifier, or on combining characters that
1062 1061 will combine to form a valid identifier.
1063 1062
1064 1063 Used on Python 3 only.
1065 1064 """
1066 1065 slashpos = text.rfind('\\')
1067 1066 if slashpos > -1:
1068 1067 s = text[slashpos+1:]
1069 1068 try :
1070 1069 unic = unicodedata.lookup(s)
1071 1070 # allow combining chars
1072 1071 if ('a'+unic).isidentifier():
1073 1072 return '\\'+s,[unic]
1074 1073 except KeyError:
1075 1074 pass
1076 1075 return u'', []
1077 1076
1078 1077
1079 1078
1080 1079
1081 1080 def latex_matches(self, text):
1082 1081 u"""Match Latex syntax for unicode characters.
1083 1082
1084 1083 This does both \\alp -> \\alpha and \\alpha -> α
1085 1084
1086 1085 Used on Python 3 only.
1087 1086 """
1088 1087 slashpos = text.rfind('\\')
1089 1088 if slashpos > -1:
1090 1089 s = text[slashpos:]
1091 1090 if s in latex_symbols:
1092 1091 # Try to complete a full latex symbol to unicode
1093 1092 # \\alpha -> α
1094 1093 return s, [latex_symbols[s]]
1095 1094 else:
1096 1095 # If a user has partially typed a latex symbol, give them
1097 1096 # a full list of options \al -> [\aleph, \alpha]
1098 1097 matches = [k for k in latex_symbols if k.startswith(s)]
1099 1098 return s, matches
1100 1099 return u'', []
1101 1100
1102 1101 def dispatch_custom_completer(self, text):
1103 1102 if not self.custom_completers:
1104 1103 return
1105 1104
1106 1105 line = self.line_buffer
1107 1106 if not line.strip():
1108 1107 return None
1109 1108
1110 1109 # Create a little structure to pass all the relevant information about
1111 1110 # the current completion to any custom completer.
1112 1111 event = Bunch()
1113 1112 event.line = line
1114 1113 event.symbol = text
1115 1114 cmd = line.split(None,1)[0]
1116 1115 event.command = cmd
1117 1116 event.text_until_cursor = self.text_until_cursor
1118 1117
1119 1118 # for foo etc, try also to find completer for %foo
1120 1119 if not cmd.startswith(self.magic_escape):
1121 1120 try_magic = self.custom_completers.s_matches(
1122 1121 self.magic_escape + cmd)
1123 1122 else:
1124 1123 try_magic = []
1125 1124
1126 1125 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1127 1126 try_magic,
1128 1127 self.custom_completers.flat_matches(self.text_until_cursor)):
1129 1128 try:
1130 1129 res = c(event)
1131 1130 if res:
1132 1131 # first, try case sensitive match
1133 1132 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1134 1133 if withcase:
1135 1134 return withcase
1136 1135 # if none, then case insensitive ones are ok too
1137 1136 text_low = text.lower()
1138 1137 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1139 1138 except TryNext:
1140 1139 pass
1141 1140
1142 1141 return None
1143 1142
1144 1143 @_strip_single_trailing_space
1145 1144 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1146 1145 """Find completions for the given text and line context.
1147 1146
1148 1147 Note that both the text and the line_buffer are optional, but at least
1149 1148 one of them must be given.
1150 1149
1151 1150 Parameters
1152 1151 ----------
1153 1152 text : string, optional
1154 1153 Text to perform the completion on. If not given, the line buffer
1155 1154 is split using the instance's CompletionSplitter object.
1156 1155
1157 1156 line_buffer : string, optional
1158 1157 If not given, the completer attempts to obtain the current line
1159 1158 buffer via readline. This keyword allows clients which are
1160 1159 requesting for text completions in non-readline contexts to inform
1161 1160 the completer of the entire text.
1162 1161
1163 1162 cursor_pos : int, optional
1164 1163 Index of the cursor in the full line buffer. Should be provided by
1165 1164 remote frontends where kernel has no access to frontend state.
1166 1165
1167 1166 Returns
1168 1167 -------
1169 1168 text : str
1170 1169 Text that was actually used in the completion.
1171 1170
1172 1171 matches : list
1173 1172 A list of completion matches.
1174 1173 """
1175 1174 # if the cursor position isn't given, the only sane assumption we can
1176 1175 # make is that it's at the end of the line (the common case)
1177 1176 if cursor_pos is None:
1178 1177 cursor_pos = len(line_buffer) if text is None else len(text)
1179 1178
1180 1179 if self.use_main_ns:
1181 1180 self.namespace = __main__.__dict__
1182 1181
1183 1182 if PY3:
1184 1183
1185 1184 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1186 1185 latex_text, latex_matches = self.latex_matches(base_text)
1187 1186 if latex_matches:
1188 1187 return latex_text, latex_matches
1189 1188 name_text = ''
1190 1189 name_matches = []
1191 1190 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1192 1191 name_text, name_matches = meth(base_text)
1193 1192 if name_text:
1194 1193 return name_text, name_matches
1195 1194
1196 1195 # if text is either None or an empty string, rely on the line buffer
1197 1196 if not text:
1198 1197 text = self.splitter.split_line(line_buffer, cursor_pos)
1199 1198
1200 1199 # If no line buffer is given, assume the input text is all there was
1201 1200 if line_buffer is None:
1202 1201 line_buffer = text
1203 1202
1204 1203 self.line_buffer = line_buffer
1205 1204 self.text_until_cursor = self.line_buffer[:cursor_pos]
1206 1205
1207 1206 # Start with a clean slate of completions
1208 1207 self.matches[:] = []
1209 1208 custom_res = self.dispatch_custom_completer(text)
1210 1209 if custom_res is not None:
1211 1210 # did custom completers produce something?
1212 1211 self.matches = custom_res
1213 1212 else:
1214 1213 # Extend the list of completions with the results of each
1215 1214 # matcher, so we return results to the user from all
1216 1215 # namespaces.
1217 1216 if self.merge_completions:
1218 1217 self.matches = []
1219 1218 for matcher in self.matchers:
1220 1219 try:
1221 1220 self.matches.extend(matcher(text))
1222 1221 except:
1223 1222 # Show the ugly traceback if the matcher causes an
1224 1223 # exception, but do NOT crash the kernel!
1225 1224 sys.excepthook(*sys.exc_info())
1226 1225 else:
1227 1226 for matcher in self.matchers:
1228 1227 self.matches = matcher(text)
1229 1228 if self.matches:
1230 1229 break
1231 1230 # FIXME: we should extend our api to return a dict with completions for
1232 1231 # different types of objects. The rlcomplete() method could then
1233 1232 # simply collapse the dict into a list for readline, but we'd have
1234 1233 # richer completion semantics in other evironments.
1235 1234 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1236 1235
1237 1236 return text, self.matches
@@ -1,349 +1,348 b''
1 1 # encoding: utf-8
2 2 """Implementations for various useful completers.
3 3
4 4 These are all loaded by default by IPython.
5 5 """
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2010-2011 The IPython Development Team.
8 8 #
9 9 # Distributed under the terms of the BSD License.
10 10 #
11 11 # The full license is in the file COPYING.txt, distributed with this software.
12 12 #-----------------------------------------------------------------------------
13 13
14 14 #-----------------------------------------------------------------------------
15 15 # Imports
16 16 #-----------------------------------------------------------------------------
17 from __future__ import print_function
18 17
19 18 # Stdlib imports
20 19 import glob
21 20 import inspect
22 21 import os
23 22 import re
24 23 import sys
25 24 from importlib import import_module
26 25
27 26 try:
28 27 # Python >= 3.3
29 28 from importlib.machinery import all_suffixes
30 29 _suffixes = all_suffixes()
31 30 except ImportError:
32 31 from imp import get_suffixes
33 32 _suffixes = [ s[0] for s in get_suffixes() ]
34 33
35 34 # Third-party imports
36 35 from time import time
37 36 from zipimport import zipimporter
38 37
39 38 # Our own imports
40 39 from IPython.core.completer import expand_user, compress_user
41 40 from IPython.core.error import TryNext
42 41 from IPython.utils._process_common import arg_split
43 42 from IPython.utils.py3compat import string_types
44 43
45 44 # FIXME: this should be pulled in with the right call via the component system
46 45 from IPython import get_ipython
47 46
48 47 #-----------------------------------------------------------------------------
49 48 # Globals and constants
50 49 #-----------------------------------------------------------------------------
51 50
52 51 # Time in seconds after which the rootmodules will be stored permanently in the
53 52 # ipython ip.db database (kept in the user's .ipython dir).
54 53 TIMEOUT_STORAGE = 2
55 54
56 55 # Time in seconds after which we give up
57 56 TIMEOUT_GIVEUP = 20
58 57
59 58 # Regular expression for the python import statement
60 59 import_re = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*?)'
61 60 r'(?P<package>[/\\]__init__)?'
62 61 r'(?P<suffix>%s)$' %
63 62 r'|'.join(re.escape(s) for s in _suffixes))
64 63
65 64 # RE for the ipython %run command (python + ipython scripts)
66 65 magic_run_re = re.compile(r'.*(\.ipy|\.ipynb|\.py[w]?)$')
67 66
68 67 #-----------------------------------------------------------------------------
69 68 # Local utilities
70 69 #-----------------------------------------------------------------------------
71 70
72 71 def module_list(path):
73 72 """
74 73 Return the list containing the names of the modules available in the given
75 74 folder.
76 75 """
77 76 # sys.path has the cwd as an empty string, but isdir/listdir need it as '.'
78 77 if path == '':
79 78 path = '.'
80 79
81 80 # A few local constants to be used in loops below
82 81 pjoin = os.path.join
83 82
84 83 if os.path.isdir(path):
85 84 # Build a list of all files in the directory and all files
86 85 # in its subdirectories. For performance reasons, do not
87 86 # recurse more than one level into subdirectories.
88 87 files = []
89 88 for root, dirs, nondirs in os.walk(path, followlinks=True):
90 89 subdir = root[len(path)+1:]
91 90 if subdir:
92 91 files.extend(pjoin(subdir, f) for f in nondirs)
93 92 dirs[:] = [] # Do not recurse into additional subdirectories.
94 93 else:
95 94 files.extend(nondirs)
96 95
97 96 else:
98 97 try:
99 98 files = list(zipimporter(path)._files.keys())
100 99 except:
101 100 files = []
102 101
103 102 # Build a list of modules which match the import_re regex.
104 103 modules = []
105 104 for f in files:
106 105 m = import_re.match(f)
107 106 if m:
108 107 modules.append(m.group('name'))
109 108 return list(set(modules))
110 109
111 110
112 111 def get_root_modules():
113 112 """
114 113 Returns a list containing the names of all the modules available in the
115 114 folders of the pythonpath.
116 115
117 116 ip.db['rootmodules_cache'] maps sys.path entries to list of modules.
118 117 """
119 118 ip = get_ipython()
120 119 rootmodules_cache = ip.db.get('rootmodules_cache', {})
121 120 rootmodules = list(sys.builtin_module_names)
122 121 start_time = time()
123 122 store = False
124 123 for path in sys.path:
125 124 try:
126 125 modules = rootmodules_cache[path]
127 126 except KeyError:
128 127 modules = module_list(path)
129 128 try:
130 129 modules.remove('__init__')
131 130 except ValueError:
132 131 pass
133 132 if path not in ('', '.'): # cwd modules should not be cached
134 133 rootmodules_cache[path] = modules
135 134 if time() - start_time > TIMEOUT_STORAGE and not store:
136 135 store = True
137 136 print("\nCaching the list of root modules, please wait!")
138 137 print("(This will only be done once - type '%rehashx' to "
139 138 "reset cache!)\n")
140 139 sys.stdout.flush()
141 140 if time() - start_time > TIMEOUT_GIVEUP:
142 141 print("This is taking too long, we give up.\n")
143 142 return []
144 143 rootmodules.extend(modules)
145 144 if store:
146 145 ip.db['rootmodules_cache'] = rootmodules_cache
147 146 rootmodules = list(set(rootmodules))
148 147 return rootmodules
149 148
150 149
151 150 def is_importable(module, attr, only_modules):
152 151 if only_modules:
153 152 return inspect.ismodule(getattr(module, attr))
154 153 else:
155 154 return not(attr[:2] == '__' and attr[-2:] == '__')
156 155
157 156 def try_import(mod, only_modules=False):
158 157 try:
159 158 m = import_module(mod)
160 159 except:
161 160 return []
162 161
163 162 m_is_init = hasattr(m, '__file__') and '__init__' in m.__file__
164 163
165 164 completions = []
166 165 if (not hasattr(m, '__file__')) or (not only_modules) or m_is_init:
167 166 completions.extend( [attr for attr in dir(m) if
168 167 is_importable(m, attr, only_modules)])
169 168
170 169 completions.extend(getattr(m, '__all__', []))
171 170 if m_is_init:
172 171 completions.extend(module_list(os.path.dirname(m.__file__)))
173 172 completions = {c for c in completions if isinstance(c, string_types)}
174 173 completions.discard('__init__')
175 174 return list(completions)
176 175
177 176
178 177 #-----------------------------------------------------------------------------
179 178 # Completion-related functions.
180 179 #-----------------------------------------------------------------------------
181 180
182 181 def quick_completer(cmd, completions):
183 182 """ Easily create a trivial completer for a command.
184 183
185 184 Takes either a list of completions, or all completions in string (that will
186 185 be split on whitespace).
187 186
188 187 Example::
189 188
190 189 [d:\ipython]|1> import ipy_completers
191 190 [d:\ipython]|2> ipy_completers.quick_completer('foo', ['bar','baz'])
192 191 [d:\ipython]|3> foo b<TAB>
193 192 bar baz
194 193 [d:\ipython]|3> foo ba
195 194 """
196 195
197 196 if isinstance(completions, string_types):
198 197 completions = completions.split()
199 198
200 199 def do_complete(self, event):
201 200 return completions
202 201
203 202 get_ipython().set_hook('complete_command',do_complete, str_key = cmd)
204 203
205 204 def module_completion(line):
206 205 """
207 206 Returns a list containing the completion possibilities for an import line.
208 207
209 208 The line looks like this :
210 209 'import xml.d'
211 210 'from xml.dom import'
212 211 """
213 212
214 213 words = line.split(' ')
215 214 nwords = len(words)
216 215
217 216 # from whatever <tab> -> 'import '
218 217 if nwords == 3 and words[0] == 'from':
219 218 return ['import ']
220 219
221 220 # 'from xy<tab>' or 'import xy<tab>'
222 221 if nwords < 3 and (words[0] in {'%aimport', 'import', 'from'}) :
223 222 if nwords == 1:
224 223 return get_root_modules()
225 224 mod = words[1].split('.')
226 225 if len(mod) < 2:
227 226 return get_root_modules()
228 227 completion_list = try_import('.'.join(mod[:-1]), True)
229 228 return ['.'.join(mod[:-1] + [el]) for el in completion_list]
230 229
231 230 # 'from xyz import abc<tab>'
232 231 if nwords >= 3 and words[0] == 'from':
233 232 mod = words[1]
234 233 return try_import(mod)
235 234
236 235 #-----------------------------------------------------------------------------
237 236 # Completers
238 237 #-----------------------------------------------------------------------------
239 238 # These all have the func(self, event) signature to be used as custom
240 239 # completers
241 240
242 241 def module_completer(self,event):
243 242 """Give completions after user has typed 'import ...' or 'from ...'"""
244 243
245 244 # This works in all versions of python. While 2.5 has
246 245 # pkgutil.walk_packages(), that particular routine is fairly dangerous,
247 246 # since it imports *EVERYTHING* on sys.path. That is: a) very slow b) full
248 247 # of possibly problematic side effects.
249 248 # This search the folders in the sys.path for available modules.
250 249
251 250 return module_completion(event.line)
252 251
253 252 # FIXME: there's a lot of logic common to the run, cd and builtin file
254 253 # completers, that is currently reimplemented in each.
255 254
256 255 def magic_run_completer(self, event):
257 256 """Complete files that end in .py or .ipy or .ipynb for the %run command.
258 257 """
259 258 comps = arg_split(event.line, strict=False)
260 259 # relpath should be the current token that we need to complete.
261 260 if (len(comps) > 1) and (not event.line.endswith(' ')):
262 261 relpath = comps[-1].strip("'\"")
263 262 else:
264 263 relpath = ''
265 264
266 265 #print("\nev=", event) # dbg
267 266 #print("rp=", relpath) # dbg
268 267 #print('comps=', comps) # dbg
269 268
270 269 lglob = glob.glob
271 270 isdir = os.path.isdir
272 271 relpath, tilde_expand, tilde_val = expand_user(relpath)
273 272
274 273 # Find if the user has already typed the first filename, after which we
275 274 # should complete on all files, since after the first one other files may
276 275 # be arguments to the input script.
277 276
278 277 if any(magic_run_re.match(c) for c in comps):
279 278 matches = [f.replace('\\','/') + ('/' if isdir(f) else '')
280 279 for f in lglob(relpath+'*')]
281 280 else:
282 281 dirs = [f.replace('\\','/') + "/" for f in lglob(relpath+'*') if isdir(f)]
283 282 pys = [f.replace('\\','/')
284 283 for f in lglob(relpath+'*.py') + lglob(relpath+'*.ipy') +
285 284 lglob(relpath+'*.ipynb') + lglob(relpath + '*.pyw')]
286 285
287 286 matches = dirs + pys
288 287
289 288 #print('run comp:', dirs+pys) # dbg
290 289 return [compress_user(p, tilde_expand, tilde_val) for p in matches]
291 290
292 291
293 292 def cd_completer(self, event):
294 293 """Completer function for cd, which only returns directories."""
295 294 ip = get_ipython()
296 295 relpath = event.symbol
297 296
298 297 #print(event) # dbg
299 298 if event.line.endswith('-b') or ' -b ' in event.line:
300 299 # return only bookmark completions
301 300 bkms = self.db.get('bookmarks', None)
302 301 if bkms:
303 302 return bkms.keys()
304 303 else:
305 304 return []
306 305
307 306 if event.symbol == '-':
308 307 width_dh = str(len(str(len(ip.user_ns['_dh']) + 1)))
309 308 # jump in directory history by number
310 309 fmt = '-%0' + width_dh +'d [%s]'
311 310 ents = [ fmt % (i,s) for i,s in enumerate(ip.user_ns['_dh'])]
312 311 if len(ents) > 1:
313 312 return ents
314 313 return []
315 314
316 315 if event.symbol.startswith('--'):
317 316 return ["--" + os.path.basename(d) for d in ip.user_ns['_dh']]
318 317
319 318 # Expand ~ in path and normalize directory separators.
320 319 relpath, tilde_expand, tilde_val = expand_user(relpath)
321 320 relpath = relpath.replace('\\','/')
322 321
323 322 found = []
324 323 for d in [f.replace('\\','/') + '/' for f in glob.glob(relpath+'*')
325 324 if os.path.isdir(f)]:
326 325 if ' ' in d:
327 326 # we don't want to deal with any of that, complex code
328 327 # for this is elsewhere
329 328 raise TryNext
330 329
331 330 found.append(d)
332 331
333 332 if not found:
334 333 if os.path.isdir(relpath):
335 334 return [compress_user(relpath, tilde_expand, tilde_val)]
336 335
337 336 # if no completions so far, try bookmarks
338 337 bks = self.db.get('bookmarks',{})
339 338 bkmatches = [s for s in bks if s.startswith(event.symbol)]
340 339 if bkmatches:
341 340 return bkmatches
342 341
343 342 raise TryNext
344 343
345 344 return [compress_user(p, tilde_expand, tilde_val) for p in found]
346 345
347 346 def reset_completer(self, event):
348 347 "A completer for %reset magic"
349 348 return '-f -s in out array dhist'.split()
@@ -1,216 +1,215 b''
1 1 # encoding: utf-8
2 2 """sys.excepthook for IPython itself, leaves a detailed report on disk.
3 3
4 4 Authors:
5 5
6 6 * Fernando Perez
7 7 * Brian E. Granger
8 8 """
9 9
10 10 #-----------------------------------------------------------------------------
11 11 # Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
12 12 # Copyright (C) 2008-2011 The IPython Development Team
13 13 #
14 14 # Distributed under the terms of the BSD License. The full license is in
15 15 # the file COPYING, distributed as part of this software.
16 16 #-----------------------------------------------------------------------------
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Imports
20 20 #-----------------------------------------------------------------------------
21 from __future__ import print_function
22 21
23 22 import os
24 23 import sys
25 24 import traceback
26 25 from pprint import pformat
27 26
28 27 from IPython.core import ultratb
29 28 from IPython.core.release import author_email
30 29 from IPython.utils.sysinfo import sys_info
31 30 from IPython.utils.py3compat import input, getcwd
32 31
33 32 #-----------------------------------------------------------------------------
34 33 # Code
35 34 #-----------------------------------------------------------------------------
36 35
37 36 # Template for the user message.
38 37 _default_message_template = """\
39 38 Oops, {app_name} crashed. We do our best to make it stable, but...
40 39
41 40 A crash report was automatically generated with the following information:
42 41 - A verbatim copy of the crash traceback.
43 42 - A copy of your input history during this session.
44 43 - Data on your current {app_name} configuration.
45 44
46 45 It was left in the file named:
47 46 \t'{crash_report_fname}'
48 47 If you can email this file to the developers, the information in it will help
49 48 them in understanding and correcting the problem.
50 49
51 50 You can mail it to: {contact_name} at {contact_email}
52 51 with the subject '{app_name} Crash Report'.
53 52
54 53 If you want to do it now, the following command will work (under Unix):
55 54 mail -s '{app_name} Crash Report' {contact_email} < {crash_report_fname}
56 55
57 56 To ensure accurate tracking of this issue, please file a report about it at:
58 57 {bug_tracker}
59 58 """
60 59
61 60 _lite_message_template = """
62 61 If you suspect this is an IPython bug, please report it at:
63 62 https://github.com/ipython/ipython/issues
64 63 or send an email to the mailing list at {email}
65 64
66 65 You can print a more detailed traceback right now with "%tb", or use "%debug"
67 66 to interactively debug it.
68 67
69 68 Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
70 69 {config}Application.verbose_crash=True
71 70 """
72 71
73 72
74 73 class CrashHandler(object):
75 74 """Customizable crash handlers for IPython applications.
76 75
77 76 Instances of this class provide a :meth:`__call__` method which can be
78 77 used as a ``sys.excepthook``. The :meth:`__call__` signature is::
79 78
80 79 def __call__(self, etype, evalue, etb)
81 80 """
82 81
83 82 message_template = _default_message_template
84 83 section_sep = '\n\n'+'*'*75+'\n\n'
85 84
86 85 def __init__(self, app, contact_name=None, contact_email=None,
87 86 bug_tracker=None, show_crash_traceback=True, call_pdb=False):
88 87 """Create a new crash handler
89 88
90 89 Parameters
91 90 ----------
92 91 app : Application
93 92 A running :class:`Application` instance, which will be queried at
94 93 crash time for internal information.
95 94
96 95 contact_name : str
97 96 A string with the name of the person to contact.
98 97
99 98 contact_email : str
100 99 A string with the email address of the contact.
101 100
102 101 bug_tracker : str
103 102 A string with the URL for your project's bug tracker.
104 103
105 104 show_crash_traceback : bool
106 105 If false, don't print the crash traceback on stderr, only generate
107 106 the on-disk report
108 107
109 108 Non-argument instance attributes:
110 109
111 110 These instances contain some non-argument attributes which allow for
112 111 further customization of the crash handler's behavior. Please see the
113 112 source for further details.
114 113 """
115 114 self.crash_report_fname = "Crash_report_%s.txt" % app.name
116 115 self.app = app
117 116 self.call_pdb = call_pdb
118 117 #self.call_pdb = True # dbg
119 118 self.show_crash_traceback = show_crash_traceback
120 119 self.info = dict(app_name = app.name,
121 120 contact_name = contact_name,
122 121 contact_email = contact_email,
123 122 bug_tracker = bug_tracker,
124 123 crash_report_fname = self.crash_report_fname)
125 124
126 125
127 126 def __call__(self, etype, evalue, etb):
128 127 """Handle an exception, call for compatible with sys.excepthook"""
129 128
130 129 # do not allow the crash handler to be called twice without reinstalling it
131 130 # this prevents unlikely errors in the crash handling from entering an
132 131 # infinite loop.
133 132 sys.excepthook = sys.__excepthook__
134 133
135 134 # Report tracebacks shouldn't use color in general (safer for users)
136 135 color_scheme = 'NoColor'
137 136
138 137 # Use this ONLY for developer debugging (keep commented out for release)
139 138 #color_scheme = 'Linux' # dbg
140 139 try:
141 140 rptdir = self.app.ipython_dir
142 141 except:
143 142 rptdir = getcwd()
144 143 if rptdir is None or not os.path.isdir(rptdir):
145 144 rptdir = getcwd()
146 145 report_name = os.path.join(rptdir,self.crash_report_fname)
147 146 # write the report filename into the instance dict so it can get
148 147 # properly expanded out in the user message template
149 148 self.crash_report_fname = report_name
150 149 self.info['crash_report_fname'] = report_name
151 150 TBhandler = ultratb.VerboseTB(
152 151 color_scheme=color_scheme,
153 152 long_header=1,
154 153 call_pdb=self.call_pdb,
155 154 )
156 155 if self.call_pdb:
157 156 TBhandler(etype,evalue,etb)
158 157 return
159 158 else:
160 159 traceback = TBhandler.text(etype,evalue,etb,context=31)
161 160
162 161 # print traceback to screen
163 162 if self.show_crash_traceback:
164 163 print(traceback, file=sys.stderr)
165 164
166 165 # and generate a complete report on disk
167 166 try:
168 167 report = open(report_name,'w')
169 168 except:
170 169 print('Could not create crash report on disk.', file=sys.stderr)
171 170 return
172 171
173 172 # Inform user on stderr of what happened
174 173 print('\n'+'*'*70+'\n', file=sys.stderr)
175 174 print(self.message_template.format(**self.info), file=sys.stderr)
176 175
177 176 # Construct report on disk
178 177 report.write(self.make_report(traceback))
179 178 report.close()
180 179 input("Hit <Enter> to quit (your terminal may close):")
181 180
182 181 def make_report(self,traceback):
183 182 """Return a string containing a crash report."""
184 183
185 184 sec_sep = self.section_sep
186 185
187 186 report = ['*'*75+'\n\n'+'IPython post-mortem report\n\n']
188 187 rpt_add = report.append
189 188 rpt_add(sys_info())
190 189
191 190 try:
192 191 config = pformat(self.app.config)
193 192 rpt_add(sec_sep)
194 193 rpt_add('Application name: %s\n\n' % self.app_name)
195 194 rpt_add('Current user configuration structure:\n\n')
196 195 rpt_add(config)
197 196 except:
198 197 pass
199 198 rpt_add(sec_sep+'Crash traceback:\n\n' + traceback)
200 199
201 200 return ''.join(report)
202 201
203 202
204 203 def crash_handler_lite(etype, evalue, tb):
205 204 """a light excepthook, adding a small message to the usual traceback"""
206 205 traceback.print_exception(etype, evalue, tb)
207 206
208 207 from IPython.core.interactiveshell import InteractiveShell
209 208 if InteractiveShell.initialized():
210 209 # we are in a Shell environment, give %magic example
211 210 config = "%config "
212 211 else:
213 212 # we are not in a shell, show generic config
214 213 config = "c."
215 214 print(_lite_message_template.format(email=author_email, config=config), file=sys.stderr)
216 215
@@ -1,631 +1,629 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Pdb debugger class.
4 4
5 5 Modified from the standard pdb.Pdb class to avoid including readline, so that
6 6 the command line completion of other programs which include this isn't
7 7 damaged.
8 8
9 9 In the future, this class will be expanded with improvements over the standard
10 10 pdb.
11 11
12 12 The code in this file is mainly lifted out of cmd.py in Python 2.2, with minor
13 13 changes. Licensing should therefore be under the standard Python terms. For
14 14 details on the PSF (Python Software Foundation) standard license, see:
15 15
16 16 http://www.python.org/2.2.3/license.html"""
17 17
18 18 #*****************************************************************************
19 19 #
20 20 # This file is licensed under the PSF license.
21 21 #
22 22 # Copyright (C) 2001 Python Software Foundation, www.python.org
23 23 # Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
24 24 #
25 25 #
26 26 #*****************************************************************************
27 from __future__ import print_function
28 27
29 28 import bdb
30 29 import functools
31 30 import inspect
32 31 import sys
33 32 import warnings
34 33
35 34 from IPython import get_ipython
36 35 from IPython.utils import PyColorize, ulinecache
37 36 from IPython.utils import coloransi, py3compat
38 37 from IPython.core.excolors import exception_colors
39 38 from IPython.testing.skipdoctest import skip_doctest
40 39
41 40
42 41 prompt = 'ipdb> '
43 42
44 43 #We have to check this directly from sys.argv, config struct not yet available
45 44 from pdb import Pdb as OldPdb
46 45
47 46 # Allow the set_trace code to operate outside of an ipython instance, even if
48 47 # it does so with some limitations. The rest of this support is implemented in
49 48 # the Tracer constructor.
50 49
51 50 def make_arrow(pad):
52 51 """generate the leading arrow in front of traceback or debugger"""
53 52 if pad >= 2:
54 53 return '-'*(pad-2) + '> '
55 54 elif pad == 1:
56 55 return '>'
57 56 return ''
58 57
59 58
60 59 def BdbQuit_excepthook(et, ev, tb, excepthook=None):
61 60 """Exception hook which handles `BdbQuit` exceptions.
62 61
63 62 All other exceptions are processed using the `excepthook`
64 63 parameter.
65 64 """
66 65 warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
67 66 DeprecationWarning)
68 67 if et==bdb.BdbQuit:
69 68 print('Exiting Debugger.')
70 69 elif excepthook is not None:
71 70 excepthook(et, ev, tb)
72 71 else:
73 72 # Backwards compatibility. Raise deprecation warning?
74 73 BdbQuit_excepthook.excepthook_ori(et,ev,tb)
75 74
76 75
77 76 def BdbQuit_IPython_excepthook(self,et,ev,tb,tb_offset=None):
78 77 warnings.warn(
79 78 "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
80 79 DeprecationWarning)
81 80 print('Exiting Debugger.')
82 81
83 82
84 83 class Tracer(object):
85 84 """
86 85 DEPRECATED
87 86
88 87 Class for local debugging, similar to pdb.set_trace.
89 88
90 89 Instances of this class, when called, behave like pdb.set_trace, but
91 90 providing IPython's enhanced capabilities.
92 91
93 92 This is implemented as a class which must be initialized in your own code
94 93 and not as a standalone function because we need to detect at runtime
95 94 whether IPython is already active or not. That detection is done in the
96 95 constructor, ensuring that this code plays nicely with a running IPython,
97 96 while functioning acceptably (though with limitations) if outside of it.
98 97 """
99 98
100 99 @skip_doctest
101 100 def __init__(self, colors=None):
102 101 """
103 102 DEPRECATED
104 103
105 104 Create a local debugger instance.
106 105
107 106 Parameters
108 107 ----------
109 108
110 109 colors : str, optional
111 110 The name of the color scheme to use, it must be one of IPython's
112 111 valid color schemes. If not given, the function will default to
113 112 the current IPython scheme when running inside IPython, and to
114 113 'NoColor' otherwise.
115 114
116 115 Examples
117 116 --------
118 117 ::
119 118
120 119 from IPython.core.debugger import Tracer; debug_here = Tracer()
121 120
122 121 Later in your code::
123 122
124 123 debug_here() # -> will open up the debugger at that point.
125 124
126 125 Once the debugger activates, you can use all of its regular commands to
127 126 step through code, set breakpoints, etc. See the pdb documentation
128 127 from the Python standard library for usage details.
129 128 """
130 129 warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
131 130 "`IPython.core.debugger.Pdb.set_trace()`",
132 131 DeprecationWarning)
133 132
134 133 ip = get_ipython()
135 134 if ip is None:
136 135 # Outside of ipython, we set our own exception hook manually
137 136 sys.excepthook = functools.partial(BdbQuit_excepthook,
138 137 excepthook=sys.excepthook)
139 138 def_colors = 'NoColor'
140 139 else:
141 140 # In ipython, we use its custom exception handler mechanism
142 141 def_colors = ip.colors
143 142 ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
144 143
145 144 if colors is None:
146 145 colors = def_colors
147 146
148 147 # The stdlib debugger internally uses a modified repr from the `repr`
149 148 # module, that limits the length of printed strings to a hardcoded
150 149 # limit of 30 characters. That much trimming is too aggressive, let's
151 150 # at least raise that limit to 80 chars, which should be enough for
152 151 # most interactive uses.
153 152 try:
154 153 try:
155 154 from reprlib import aRepr # Py 3
156 155 except ImportError:
157 156 from repr import aRepr # Py 2
158 157 aRepr.maxstring = 80
159 158 except:
160 159 # This is only a user-facing convenience, so any error we encounter
161 160 # here can be warned about but can be otherwise ignored. These
162 161 # printouts will tell us about problems if this API changes
163 162 import traceback
164 163 traceback.print_exc()
165 164
166 165 self.debugger = Pdb(colors)
167 166
168 167 def __call__(self):
169 168 """Starts an interactive debugger at the point where called.
170 169
171 170 This is similar to the pdb.set_trace() function from the std lib, but
172 171 using IPython's enhanced debugger."""
173 172
174 173 self.debugger.set_trace(sys._getframe().f_back)
175 174
176 175
177 176 def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
178 177 """Make new_fn have old_fn's doc string. This is particularly useful
179 178 for the ``do_...`` commands that hook into the help system.
180 179 Adapted from from a comp.lang.python posting
181 180 by Duncan Booth."""
182 181 def wrapper(*args, **kw):
183 182 return new_fn(*args, **kw)
184 183 if old_fn.__doc__:
185 184 wrapper.__doc__ = old_fn.__doc__ + additional_text
186 185 return wrapper
187 186
188 187
189 188 def _file_lines(fname):
190 189 """Return the contents of a named file as a list of lines.
191 190
192 191 This function never raises an IOError exception: if the file can't be
193 192 read, it simply returns an empty list."""
194 193
195 194 try:
196 195 outfile = open(fname)
197 196 except IOError:
198 197 return []
199 198 else:
200 199 out = outfile.readlines()
201 200 outfile.close()
202 201 return out
203 202
204 203
205 204 class Pdb(OldPdb, object):
206 205 """Modified Pdb class, does not load readline.
207 206
208 207 for a standalone version that uses prompt_toolkit, see
209 208 `IPython.terminal.debugger.TerminalPdb` and
210 209 `IPython.terminal.debugger.set_trace()`
211 210 """
212 211
213 212 def __init__(self, color_scheme=None, completekey=None,
214 213 stdin=None, stdout=None, context=5):
215 214
216 215 # Parent constructor:
217 216 try:
218 217 self.context = int(context)
219 218 if self.context <= 0:
220 219 raise ValueError("Context must be a positive integer")
221 220 except (TypeError, ValueError):
222 221 raise ValueError("Context must be a positive integer")
223 222
224 223 OldPdb.__init__(self, completekey, stdin, stdout)
225 224
226 225 # IPython changes...
227 226 self.shell = get_ipython()
228 227
229 228 if self.shell is None:
230 229 save_main = sys.modules['__main__']
231 230 # No IPython instance running, we must create one
232 231 from IPython.terminal.interactiveshell import \
233 232 TerminalInteractiveShell
234 233 self.shell = TerminalInteractiveShell.instance()
235 234 # needed by any code which calls __import__("__main__") after
236 235 # the debugger was entered. See also #9941.
237 236 sys.modules['__main__'] = save_main
238 237
239 238 if color_scheme is not None:
240 239 warnings.warn(
241 240 "The `color_scheme` argument is deprecated since version 5.1",
242 241 DeprecationWarning, stacklevel=2)
243 242 else:
244 243 color_scheme = self.shell.colors
245 244
246 245 self.aliases = {}
247 246
248 247 # Create color table: we copy the default one from the traceback
249 248 # module and add a few attributes needed for debugging
250 249 self.color_scheme_table = exception_colors()
251 250
252 251 # shorthands
253 252 C = coloransi.TermColors
254 253 cst = self.color_scheme_table
255 254
256 255 cst['NoColor'].colors.prompt = C.NoColor
257 256 cst['NoColor'].colors.breakpoint_enabled = C.NoColor
258 257 cst['NoColor'].colors.breakpoint_disabled = C.NoColor
259 258
260 259 cst['Linux'].colors.prompt = C.Green
261 260 cst['Linux'].colors.breakpoint_enabled = C.LightRed
262 261 cst['Linux'].colors.breakpoint_disabled = C.Red
263 262
264 263 cst['LightBG'].colors.prompt = C.Blue
265 264 cst['LightBG'].colors.breakpoint_enabled = C.LightRed
266 265 cst['LightBG'].colors.breakpoint_disabled = C.Red
267 266
268 267 cst['Neutral'].colors.prompt = C.Blue
269 268 cst['Neutral'].colors.breakpoint_enabled = C.LightRed
270 269 cst['Neutral'].colors.breakpoint_disabled = C.Red
271 270
272 271
273 272 # Add a python parser so we can syntax highlight source while
274 273 # debugging.
275 274 self.parser = PyColorize.Parser(style=color_scheme)
276 275 self.set_colors(color_scheme)
277 276
278 277 # Set the prompt - the default prompt is '(Pdb)'
279 278 self.prompt = prompt
280 279
281 280 def set_colors(self, scheme):
282 281 """Shorthand access to the color table scheme selector method."""
283 282 self.color_scheme_table.set_active_scheme(scheme)
284 283 self.parser.style = scheme
285 284
286 285 def interaction(self, frame, traceback):
287 286 try:
288 287 OldPdb.interaction(self, frame, traceback)
289 288 except KeyboardInterrupt:
290 289 sys.stdout.write('\n' + self.shell.get_exception_only())
291 290
292 291 def parseline(self, line):
293 292 if line.startswith("!!"):
294 293 # Force standard behavior.
295 294 return super(Pdb, self).parseline(line[2:])
296 295 # "Smart command mode" from pdb++: don't execute commands if a variable
297 296 # with the same name exists.
298 297 cmd, arg, newline = super(Pdb, self).parseline(line)
299 298 # Fix for #9611: Do not trigger smart command if the command is `exit`
300 299 # or `quit` and it would resolve to their *global* value (the
301 300 # `ExitAutocall` object). Just checking that it is not present in the
302 301 # locals dict is not enough as locals and globals match at the
303 302 # toplevel.
304 303 if ((cmd in self.curframe.f_locals or cmd in self.curframe.f_globals)
305 304 and not (cmd in ["exit", "quit"]
306 305 and (self.curframe.f_locals is self.curframe.f_globals
307 306 or cmd not in self.curframe.f_locals))):
308 307 return super(Pdb, self).parseline("!" + line)
309 308 return super(Pdb, self).parseline(line)
310 309
311 310 def new_do_up(self, arg):
312 311 OldPdb.do_up(self, arg)
313 312 do_u = do_up = decorate_fn_with_doc(new_do_up, OldPdb.do_up)
314 313
315 314 def new_do_down(self, arg):
316 315 OldPdb.do_down(self, arg)
317 316
318 317 do_d = do_down = decorate_fn_with_doc(new_do_down, OldPdb.do_down)
319 318
320 319 def new_do_frame(self, arg):
321 320 OldPdb.do_frame(self, arg)
322 321
323 322 def new_do_quit(self, arg):
324 323
325 324 if hasattr(self, 'old_all_completions'):
326 325 self.shell.Completer.all_completions=self.old_all_completions
327 326
328 327 return OldPdb.do_quit(self, arg)
329 328
330 329 do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
331 330
332 331 def new_do_restart(self, arg):
333 332 """Restart command. In the context of ipython this is exactly the same
334 333 thing as 'quit'."""
335 334 self.msg("Restart doesn't make sense here. Using 'quit' instead.")
336 335 return self.do_quit(arg)
337 336
338 337 def print_stack_trace(self, context=None):
339 338 if context is None:
340 339 context = self.context
341 340 try:
342 341 context=int(context)
343 342 if context <= 0:
344 343 raise ValueError("Context must be a positive integer")
345 344 except (TypeError, ValueError):
346 345 raise ValueError("Context must be a positive integer")
347 346 try:
348 347 for frame_lineno in self.stack:
349 348 self.print_stack_entry(frame_lineno, context=context)
350 349 except KeyboardInterrupt:
351 350 pass
352 351
353 352 def print_stack_entry(self,frame_lineno, prompt_prefix='\n-> ',
354 353 context=None):
355 354 if context is None:
356 355 context = self.context
357 356 try:
358 357 context=int(context)
359 358 if context <= 0:
360 359 raise ValueError("Context must be a positive integer")
361 360 except (TypeError, ValueError):
362 361 raise ValueError("Context must be a positive integer")
363 362 print(self.format_stack_entry(frame_lineno, '', context))
364 363
365 364 # vds: >>
366 365 frame, lineno = frame_lineno
367 366 filename = frame.f_code.co_filename
368 367 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
369 368 # vds: <<
370 369
371 370 def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
372 371 if context is None:
373 372 context = self.context
374 373 try:
375 374 context=int(context)
376 375 if context <= 0:
377 376 print("Context must be a positive integer")
378 377 except (TypeError, ValueError):
379 378 print("Context must be a positive integer")
380 379 try:
381 380 import reprlib # Py 3
382 381 except ImportError:
383 382 import repr as reprlib # Py 2
384 383
385 384 ret = []
386 385
387 386 Colors = self.color_scheme_table.active_colors
388 387 ColorsNormal = Colors.Normal
389 388 tpl_link = u'%s%%s%s' % (Colors.filenameEm, ColorsNormal)
390 389 tpl_call = u'%s%%s%s%%s%s' % (Colors.vName, Colors.valEm, ColorsNormal)
391 390 tpl_line = u'%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
392 391 tpl_line_em = u'%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line,
393 392 ColorsNormal)
394 393
395 394 frame, lineno = frame_lineno
396 395
397 396 return_value = ''
398 397 if '__return__' in frame.f_locals:
399 398 rv = frame.f_locals['__return__']
400 399 #return_value += '->'
401 400 return_value += reprlib.repr(rv) + '\n'
402 401 ret.append(return_value)
403 402
404 403 #s = filename + '(' + `lineno` + ')'
405 404 filename = self.canonic(frame.f_code.co_filename)
406 405 link = tpl_link % py3compat.cast_unicode(filename)
407 406
408 407 if frame.f_code.co_name:
409 408 func = frame.f_code.co_name
410 409 else:
411 410 func = "<lambda>"
412 411
413 412 call = ''
414 413 if func != '?':
415 414 if '__args__' in frame.f_locals:
416 415 args = reprlib.repr(frame.f_locals['__args__'])
417 416 else:
418 417 args = '()'
419 418 call = tpl_call % (func, args)
420 419
421 420 # The level info should be generated in the same format pdb uses, to
422 421 # avoid breaking the pdbtrack functionality of python-mode in *emacs.
423 422 if frame is self.curframe:
424 423 ret.append('> ')
425 424 else:
426 425 ret.append(' ')
427 426 ret.append(u'%s(%s)%s\n' % (link,lineno,call))
428 427
429 428 start = lineno - 1 - context//2
430 429 lines = ulinecache.getlines(filename)
431 430 start = min(start, len(lines) - context)
432 431 start = max(start, 0)
433 432 lines = lines[start : start + context]
434 433
435 434 for i,line in enumerate(lines):
436 435 show_arrow = (start + 1 + i == lineno)
437 436 linetpl = (frame is self.curframe or show_arrow) \
438 437 and tpl_line_em \
439 438 or tpl_line
440 439 ret.append(self.__format_line(linetpl, filename,
441 440 start + 1 + i, line,
442 441 arrow = show_arrow) )
443 442 return ''.join(ret)
444 443
445 444 def __format_line(self, tpl_line, filename, lineno, line, arrow = False):
446 445 bp_mark = ""
447 446 bp_mark_color = ""
448 447
449 448 new_line, err = self.parser.format2(line, 'str')
450 449 if not err:
451 450 line = new_line
452 451
453 452 bp = None
454 453 if lineno in self.get_file_breaks(filename):
455 454 bps = self.get_breaks(filename, lineno)
456 455 bp = bps[-1]
457 456
458 457 if bp:
459 458 Colors = self.color_scheme_table.active_colors
460 459 bp_mark = str(bp.number)
461 460 bp_mark_color = Colors.breakpoint_enabled
462 461 if not bp.enabled:
463 462 bp_mark_color = Colors.breakpoint_disabled
464 463
465 464 numbers_width = 7
466 465 if arrow:
467 466 # This is the line with the error
468 467 pad = numbers_width - len(str(lineno)) - len(bp_mark)
469 468 num = '%s%s' % (make_arrow(pad), str(lineno))
470 469 else:
471 470 num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
472 471
473 472 return tpl_line % (bp_mark_color + bp_mark, num, line)
474 473
475 474
476 475 def print_list_lines(self, filename, first, last):
477 476 """The printing (as opposed to the parsing part of a 'list'
478 477 command."""
479 478 try:
480 479 Colors = self.color_scheme_table.active_colors
481 480 ColorsNormal = Colors.Normal
482 481 tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
483 482 tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
484 483 src = []
485 484 if filename == "<string>" and hasattr(self, "_exec_filename"):
486 485 filename = self._exec_filename
487 486
488 487 for lineno in range(first, last+1):
489 488 line = ulinecache.getline(filename, lineno)
490 489 if not line:
491 490 break
492 491
493 492 if lineno == self.curframe.f_lineno:
494 493 line = self.__format_line(tpl_line_em, filename, lineno, line, arrow = True)
495 494 else:
496 495 line = self.__format_line(tpl_line, filename, lineno, line, arrow = False)
497 496
498 497 src.append(line)
499 498 self.lineno = lineno
500 499
501 500 print(''.join(src))
502 501
503 502 except KeyboardInterrupt:
504 503 pass
505 504
506 505 def do_list(self, arg):
507 506 self.lastcmd = 'list'
508 507 last = None
509 508 if arg:
510 509 try:
511 510 x = eval(arg, {}, {})
512 511 if type(x) == type(()):
513 512 first, last = x
514 513 first = int(first)
515 514 last = int(last)
516 515 if last < first:
517 516 # Assume it's a count
518 517 last = first + last
519 518 else:
520 519 first = max(1, int(x) - 5)
521 520 except:
522 521 print('*** Error in argument:', repr(arg))
523 522 return
524 523 elif self.lineno is None:
525 524 first = max(1, self.curframe.f_lineno - 5)
526 525 else:
527 526 first = self.lineno + 1
528 527 if last is None:
529 528 last = first + 10
530 529 self.print_list_lines(self.curframe.f_code.co_filename, first, last)
531 530
532 531 # vds: >>
533 532 lineno = first
534 533 filename = self.curframe.f_code.co_filename
535 534 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
536 535 # vds: <<
537 536
538 537 do_l = do_list
539 538
540 539 def getsourcelines(self, obj):
541 540 lines, lineno = inspect.findsource(obj)
542 541 if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
543 542 # must be a module frame: do not try to cut a block out of it
544 543 return lines, 1
545 544 elif inspect.ismodule(obj):
546 545 return lines, 1
547 546 return inspect.getblock(lines[lineno:]), lineno+1
548 547
549 548 def do_longlist(self, arg):
550 549 self.lastcmd = 'longlist'
551 550 try:
552 551 lines, lineno = self.getsourcelines(self.curframe)
553 552 except OSError as err:
554 553 self.error(err)
555 554 return
556 555 last = lineno + len(lines)
557 556 self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
558 557 do_ll = do_longlist
559 558
560 559 def do_pdef(self, arg):
561 560 """Print the call signature for any callable object.
562 561
563 562 The debugger interface to %pdef"""
564 563 namespaces = [('Locals', self.curframe.f_locals),
565 564 ('Globals', self.curframe.f_globals)]
566 565 self.shell.find_line_magic('pdef')(arg, namespaces=namespaces)
567 566
568 567 def do_pdoc(self, arg):
569 568 """Print the docstring for an object.
570 569
571 570 The debugger interface to %pdoc."""
572 571 namespaces = [('Locals', self.curframe.f_locals),
573 572 ('Globals', self.curframe.f_globals)]
574 573 self.shell.find_line_magic('pdoc')(arg, namespaces=namespaces)
575 574
576 575 def do_pfile(self, arg):
577 576 """Print (or run through pager) the file where an object is defined.
578 577
579 578 The debugger interface to %pfile.
580 579 """
581 580 namespaces = [('Locals', self.curframe.f_locals),
582 581 ('Globals', self.curframe.f_globals)]
583 582 self.shell.find_line_magic('pfile')(arg, namespaces=namespaces)
584 583
585 584 def do_pinfo(self, arg):
586 585 """Provide detailed information about an object.
587 586
588 587 The debugger interface to %pinfo, i.e., obj?."""
589 588 namespaces = [('Locals', self.curframe.f_locals),
590 589 ('Globals', self.curframe.f_globals)]
591 590 self.shell.find_line_magic('pinfo')(arg, namespaces=namespaces)
592 591
593 592 def do_pinfo2(self, arg):
594 593 """Provide extra detailed information about an object.
595 594
596 595 The debugger interface to %pinfo2, i.e., obj??."""
597 596 namespaces = [('Locals', self.curframe.f_locals),
598 597 ('Globals', self.curframe.f_globals)]
599 598 self.shell.find_line_magic('pinfo2')(arg, namespaces=namespaces)
600 599
601 600 def do_psource(self, arg):
602 601 """Print (or run through pager) the source code for an object."""
603 602 namespaces = [('Locals', self.curframe.f_locals),
604 603 ('Globals', self.curframe.f_globals)]
605 604 self.shell.find_line_magic('psource')(arg, namespaces=namespaces)
606 605
607 if sys.version_info > (3, ):
608 def do_where(self, arg):
609 """w(here)
610 Print a stack trace, with the most recent frame at the bottom.
611 An arrow indicates the "current frame", which determines the
612 context of most commands. 'bt' is an alias for this command.
613
614 Take a number as argument as an (optional) number of context line to
615 print"""
616 if arg:
617 context = int(arg)
618 self.print_stack_trace(context)
619 else:
620 self.print_stack_trace()
606 def do_where(self, arg):
607 """w(here)
608 Print a stack trace, with the most recent frame at the bottom.
609 An arrow indicates the "current frame", which determines the
610 context of most commands. 'bt' is an alias for this command.
611
612 Take a number as argument as an (optional) number of context line to
613 print"""
614 if arg:
615 context = int(arg)
616 self.print_stack_trace(context)
617 else:
618 self.print_stack_trace()
621 619
622 do_w = do_where
620 do_w = do_where
623 621
624 622
625 623 def set_trace(frame=None):
626 624 """
627 625 Start debugging from `frame`.
628 626
629 627 If frame is not specified, debugging starts from caller's frame.
630 628 """
631 629 Pdb().set_trace(frame or sys._getframe().f_back)
@@ -1,1031 +1,1030 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Top-level display functions for displaying object in different formats."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function
8 7
9 8 try:
10 9 from base64 import encodebytes as base64_encode
11 10 except ImportError:
12 11 from base64 import encodestring as base64_encode
13 12
14 13 import json
15 14 import mimetypes
16 15 import os
17 16 import struct
18 17 import sys
19 18 import warnings
20 19
21 20 from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
22 21 unicode_type)
23 22 from IPython.testing.skipdoctest import skip_doctest
24 23
25 24 __all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
26 25 'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
27 26 'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
28 27 'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
29 28 'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
30 29 'publish_display_data']
31 30
32 31 #-----------------------------------------------------------------------------
33 32 # utility functions
34 33 #-----------------------------------------------------------------------------
35 34
36 35 def _safe_exists(path):
37 36 """Check path, but don't let exceptions raise"""
38 37 try:
39 38 return os.path.exists(path)
40 39 except Exception:
41 40 return False
42 41
43 42 def _merge(d1, d2):
44 43 """Like update, but merges sub-dicts instead of clobbering at the top level.
45 44
46 45 Updates d1 in-place
47 46 """
48 47
49 48 if not isinstance(d2, dict) or not isinstance(d1, dict):
50 49 return d2
51 50 for key, value in d2.items():
52 51 d1[key] = _merge(d1.get(key), value)
53 52 return d1
54 53
55 54 def _display_mimetype(mimetype, objs, raw=False, metadata=None):
56 55 """internal implementation of all display_foo methods
57 56
58 57 Parameters
59 58 ----------
60 59 mimetype : str
61 60 The mimetype to be published (e.g. 'image/png')
62 61 objs : tuple of objects
63 62 The Python objects to display, or if raw=True raw text data to
64 63 display.
65 64 raw : bool
66 65 Are the data objects raw data or Python objects that need to be
67 66 formatted before display? [default: False]
68 67 metadata : dict (optional)
69 68 Metadata to be associated with the specific mimetype output.
70 69 """
71 70 if metadata:
72 71 metadata = {mimetype: metadata}
73 72 if raw:
74 73 # turn list of pngdata into list of { 'image/png': pngdata }
75 74 objs = [ {mimetype: obj} for obj in objs ]
76 75 display(*objs, raw=raw, metadata=metadata, include=[mimetype])
77 76
78 77 #-----------------------------------------------------------------------------
79 78 # Main functions
80 79 #-----------------------------------------------------------------------------
81 80
82 81 def publish_display_data(data, metadata=None, source=None):
83 82 """Publish data and metadata to all frontends.
84 83
85 84 See the ``display_data`` message in the messaging documentation for
86 85 more details about this message type.
87 86
88 87 The following MIME types are currently implemented:
89 88
90 89 * text/plain
91 90 * text/html
92 91 * text/markdown
93 92 * text/latex
94 93 * application/json
95 94 * application/javascript
96 95 * image/png
97 96 * image/jpeg
98 97 * image/svg+xml
99 98
100 99 Parameters
101 100 ----------
102 101 data : dict
103 102 A dictionary having keys that are valid MIME types (like
104 103 'text/plain' or 'image/svg+xml') and values that are the data for
105 104 that MIME type. The data itself must be a JSON'able data
106 105 structure. Minimally all data should have the 'text/plain' data,
107 106 which can be displayed by all frontends. If more than the plain
108 107 text is given, it is up to the frontend to decide which
109 108 representation to use.
110 109 metadata : dict
111 110 A dictionary for metadata related to the data. This can contain
112 111 arbitrary key, value pairs that frontends can use to interpret
113 112 the data. mime-type keys matching those in data can be used
114 113 to specify metadata about particular representations.
115 114 source : str, deprecated
116 115 Unused.
117 116 """
118 117 from IPython.core.interactiveshell import InteractiveShell
119 118 InteractiveShell.instance().display_pub.publish(
120 119 data=data,
121 120 metadata=metadata,
122 121 )
123 122
124 123 def display(*objs, **kwargs):
125 124 """Display a Python object in all frontends.
126 125
127 126 By default all representations will be computed and sent to the frontends.
128 127 Frontends can decide which representation is used and how.
129 128
130 129 Parameters
131 130 ----------
132 131 objs : tuple of objects
133 132 The Python objects to display.
134 133 raw : bool, optional
135 134 Are the objects to be displayed already mimetype-keyed dicts of raw display data,
136 135 or Python objects that need to be formatted before display? [default: False]
137 136 include : list or tuple, optional
138 137 A list of format type strings (MIME types) to include in the
139 138 format data dict. If this is set *only* the format types included
140 139 in this list will be computed.
141 140 exclude : list or tuple, optional
142 141 A list of format type strings (MIME types) to exclude in the format
143 142 data dict. If this is set all format types will be computed,
144 143 except for those included in this argument.
145 144 metadata : dict, optional
146 145 A dictionary of metadata to associate with the output.
147 146 mime-type keys in this dictionary will be associated with the individual
148 147 representation formats, if they exist.
149 148 """
150 149 raw = kwargs.get('raw', False)
151 150 include = kwargs.get('include')
152 151 exclude = kwargs.get('exclude')
153 152 metadata = kwargs.get('metadata')
154 153
155 154 from IPython.core.interactiveshell import InteractiveShell
156 155
157 156 if not raw:
158 157 format = InteractiveShell.instance().display_formatter.format
159 158
160 159 for obj in objs:
161 160 if raw:
162 161 publish_display_data(data=obj, metadata=metadata)
163 162 else:
164 163 format_dict, md_dict = format(obj, include=include, exclude=exclude)
165 164 if not format_dict:
166 165 # nothing to display (e.g. _ipython_display_ took over)
167 166 continue
168 167 if metadata:
169 168 # kwarg-specified metadata gets precedence
170 169 _merge(md_dict, metadata)
171 170 publish_display_data(data=format_dict, metadata=md_dict)
172 171
173 172
174 173 def display_pretty(*objs, **kwargs):
175 174 """Display the pretty (default) representation of an object.
176 175
177 176 Parameters
178 177 ----------
179 178 objs : tuple of objects
180 179 The Python objects to display, or if raw=True raw text data to
181 180 display.
182 181 raw : bool
183 182 Are the data objects raw data or Python objects that need to be
184 183 formatted before display? [default: False]
185 184 metadata : dict (optional)
186 185 Metadata to be associated with the specific mimetype output.
187 186 """
188 187 _display_mimetype('text/plain', objs, **kwargs)
189 188
190 189
191 190 def display_html(*objs, **kwargs):
192 191 """Display the HTML representation of an object.
193 192
194 193 Note: If raw=False and the object does not have a HTML
195 194 representation, no HTML will be shown.
196 195
197 196 Parameters
198 197 ----------
199 198 objs : tuple of objects
200 199 The Python objects to display, or if raw=True raw HTML data to
201 200 display.
202 201 raw : bool
203 202 Are the data objects raw data or Python objects that need to be
204 203 formatted before display? [default: False]
205 204 metadata : dict (optional)
206 205 Metadata to be associated with the specific mimetype output.
207 206 """
208 207 _display_mimetype('text/html', objs, **kwargs)
209 208
210 209
211 210 def display_markdown(*objs, **kwargs):
212 211 """Displays the Markdown representation of an object.
213 212
214 213 Parameters
215 214 ----------
216 215 objs : tuple of objects
217 216 The Python objects to display, or if raw=True raw markdown data to
218 217 display.
219 218 raw : bool
220 219 Are the data objects raw data or Python objects that need to be
221 220 formatted before display? [default: False]
222 221 metadata : dict (optional)
223 222 Metadata to be associated with the specific mimetype output.
224 223 """
225 224
226 225 _display_mimetype('text/markdown', objs, **kwargs)
227 226
228 227
229 228 def display_svg(*objs, **kwargs):
230 229 """Display the SVG representation of an object.
231 230
232 231 Parameters
233 232 ----------
234 233 objs : tuple of objects
235 234 The Python objects to display, or if raw=True raw svg data to
236 235 display.
237 236 raw : bool
238 237 Are the data objects raw data or Python objects that need to be
239 238 formatted before display? [default: False]
240 239 metadata : dict (optional)
241 240 Metadata to be associated with the specific mimetype output.
242 241 """
243 242 _display_mimetype('image/svg+xml', objs, **kwargs)
244 243
245 244
246 245 def display_png(*objs, **kwargs):
247 246 """Display the PNG representation of an object.
248 247
249 248 Parameters
250 249 ----------
251 250 objs : tuple of objects
252 251 The Python objects to display, or if raw=True raw png data to
253 252 display.
254 253 raw : bool
255 254 Are the data objects raw data or Python objects that need to be
256 255 formatted before display? [default: False]
257 256 metadata : dict (optional)
258 257 Metadata to be associated with the specific mimetype output.
259 258 """
260 259 _display_mimetype('image/png', objs, **kwargs)
261 260
262 261
263 262 def display_jpeg(*objs, **kwargs):
264 263 """Display the JPEG representation of an object.
265 264
266 265 Parameters
267 266 ----------
268 267 objs : tuple of objects
269 268 The Python objects to display, or if raw=True raw JPEG data to
270 269 display.
271 270 raw : bool
272 271 Are the data objects raw data or Python objects that need to be
273 272 formatted before display? [default: False]
274 273 metadata : dict (optional)
275 274 Metadata to be associated with the specific mimetype output.
276 275 """
277 276 _display_mimetype('image/jpeg', objs, **kwargs)
278 277
279 278
280 279 def display_latex(*objs, **kwargs):
281 280 """Display the LaTeX representation of an object.
282 281
283 282 Parameters
284 283 ----------
285 284 objs : tuple of objects
286 285 The Python objects to display, or if raw=True raw latex data to
287 286 display.
288 287 raw : bool
289 288 Are the data objects raw data or Python objects that need to be
290 289 formatted before display? [default: False]
291 290 metadata : dict (optional)
292 291 Metadata to be associated with the specific mimetype output.
293 292 """
294 293 _display_mimetype('text/latex', objs, **kwargs)
295 294
296 295
297 296 def display_json(*objs, **kwargs):
298 297 """Display the JSON representation of an object.
299 298
300 299 Note that not many frontends support displaying JSON.
301 300
302 301 Parameters
303 302 ----------
304 303 objs : tuple of objects
305 304 The Python objects to display, or if raw=True raw json data to
306 305 display.
307 306 raw : bool
308 307 Are the data objects raw data or Python objects that need to be
309 308 formatted before display? [default: False]
310 309 metadata : dict (optional)
311 310 Metadata to be associated with the specific mimetype output.
312 311 """
313 312 _display_mimetype('application/json', objs, **kwargs)
314 313
315 314
316 315 def display_javascript(*objs, **kwargs):
317 316 """Display the Javascript representation of an object.
318 317
319 318 Parameters
320 319 ----------
321 320 objs : tuple of objects
322 321 The Python objects to display, or if raw=True raw javascript data to
323 322 display.
324 323 raw : bool
325 324 Are the data objects raw data or Python objects that need to be
326 325 formatted before display? [default: False]
327 326 metadata : dict (optional)
328 327 Metadata to be associated with the specific mimetype output.
329 328 """
330 329 _display_mimetype('application/javascript', objs, **kwargs)
331 330
332 331
333 332 def display_pdf(*objs, **kwargs):
334 333 """Display the PDF representation of an object.
335 334
336 335 Parameters
337 336 ----------
338 337 objs : tuple of objects
339 338 The Python objects to display, or if raw=True raw javascript data to
340 339 display.
341 340 raw : bool
342 341 Are the data objects raw data or Python objects that need to be
343 342 formatted before display? [default: False]
344 343 metadata : dict (optional)
345 344 Metadata to be associated with the specific mimetype output.
346 345 """
347 346 _display_mimetype('application/pdf', objs, **kwargs)
348 347
349 348
350 349 #-----------------------------------------------------------------------------
351 350 # Smart classes
352 351 #-----------------------------------------------------------------------------
353 352
354 353
355 354 class DisplayObject(object):
356 355 """An object that wraps data to be displayed."""
357 356
358 357 _read_flags = 'r'
359 358 _show_mem_addr = False
360 359
361 360 def __init__(self, data=None, url=None, filename=None):
362 361 """Create a display object given raw data.
363 362
364 363 When this object is returned by an expression or passed to the
365 364 display function, it will result in the data being displayed
366 365 in the frontend. The MIME type of the data should match the
367 366 subclasses used, so the Png subclass should be used for 'image/png'
368 367 data. If the data is a URL, the data will first be downloaded
369 368 and then displayed. If
370 369
371 370 Parameters
372 371 ----------
373 372 data : unicode, str or bytes
374 373 The raw data or a URL or file to load the data from
375 374 url : unicode
376 375 A URL to download the data from.
377 376 filename : unicode
378 377 Path to a local file to load the data from.
379 378 """
380 379 if data is not None and isinstance(data, string_types):
381 380 if data.startswith('http') and url is None:
382 381 url = data
383 382 filename = None
384 383 data = None
385 384 elif _safe_exists(data) and filename is None:
386 385 url = None
387 386 filename = data
388 387 data = None
389 388
390 389 self.data = data
391 390 self.url = url
392 391 self.filename = None if filename is None else unicode_type(filename)
393 392
394 393 self.reload()
395 394 self._check_data()
396 395
397 396 def __repr__(self):
398 397 if not self._show_mem_addr:
399 398 cls = self.__class__
400 399 r = "<%s.%s object>" % (cls.__module__, cls.__name__)
401 400 else:
402 401 r = super(DisplayObject, self).__repr__()
403 402 return r
404 403
405 404 def _check_data(self):
406 405 """Override in subclasses if there's something to check."""
407 406 pass
408 407
409 408 def reload(self):
410 409 """Reload the raw data from file or URL."""
411 410 if self.filename is not None:
412 411 with open(self.filename, self._read_flags) as f:
413 412 self.data = f.read()
414 413 elif self.url is not None:
415 414 try:
416 415 try:
417 416 from urllib.request import urlopen # Py3
418 417 except ImportError:
419 418 from urllib2 import urlopen
420 419 response = urlopen(self.url)
421 420 self.data = response.read()
422 421 # extract encoding from header, if there is one:
423 422 encoding = None
424 423 for sub in response.headers['content-type'].split(';'):
425 424 sub = sub.strip()
426 425 if sub.startswith('charset'):
427 426 encoding = sub.split('=')[-1].strip()
428 427 break
429 428 # decode data, if an encoding was specified
430 429 if encoding:
431 430 self.data = self.data.decode(encoding, 'replace')
432 431 except:
433 432 self.data = None
434 433
435 434 class TextDisplayObject(DisplayObject):
436 435 """Validate that display data is text"""
437 436 def _check_data(self):
438 437 if self.data is not None and not isinstance(self.data, string_types):
439 438 raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
440 439
441 440 class Pretty(TextDisplayObject):
442 441
443 442 def _repr_pretty_(self):
444 443 return self.data
445 444
446 445
447 446 class HTML(TextDisplayObject):
448 447
449 448 def _repr_html_(self):
450 449 return self.data
451 450
452 451 def __html__(self):
453 452 """
454 453 This method exists to inform other HTML-using modules (e.g. Markupsafe,
455 454 htmltag, etc) that this object is HTML and does not need things like
456 455 special characters (<>&) escaped.
457 456 """
458 457 return self._repr_html_()
459 458
460 459
461 460 class Markdown(TextDisplayObject):
462 461
463 462 def _repr_markdown_(self):
464 463 return self.data
465 464
466 465
467 466 class Math(TextDisplayObject):
468 467
469 468 def _repr_latex_(self):
470 469 s = self.data.strip('$')
471 470 return "$$%s$$" % s
472 471
473 472
474 473 class Latex(TextDisplayObject):
475 474
476 475 def _repr_latex_(self):
477 476 return self.data
478 477
479 478
480 479 class SVG(DisplayObject):
481 480
482 481 # wrap data in a property, which extracts the <svg> tag, discarding
483 482 # document headers
484 483 _data = None
485 484
486 485 @property
487 486 def data(self):
488 487 return self._data
489 488
490 489 @data.setter
491 490 def data(self, svg):
492 491 if svg is None:
493 492 self._data = None
494 493 return
495 494 # parse into dom object
496 495 from xml.dom import minidom
497 496 svg = cast_bytes_py2(svg)
498 497 x = minidom.parseString(svg)
499 498 # get svg tag (should be 1)
500 499 found_svg = x.getElementsByTagName('svg')
501 500 if found_svg:
502 501 svg = found_svg[0].toxml()
503 502 else:
504 503 # fallback on the input, trust the user
505 504 # but this is probably an error.
506 505 pass
507 506 svg = cast_unicode(svg)
508 507 self._data = svg
509 508
510 509 def _repr_svg_(self):
511 510 return self.data
512 511
513 512
514 513 class JSON(DisplayObject):
515 514 """JSON expects a JSON-able dict or list
516 515
517 516 not an already-serialized JSON string.
518 517
519 518 Scalar types (None, number, string) are not allowed, only dict or list containers.
520 519 """
521 520 # wrap data in a property, which warns about passing already-serialized JSON
522 521 _data = None
523 522 def __init__(self, data=None, url=None, filename=None, expanded=False, metadata=None):
524 523 """Create a JSON display object given raw data.
525 524
526 525 Parameters
527 526 ----------
528 527 data : dict or list
529 528 JSON data to display. Not an already-serialized JSON string.
530 529 Scalar types (None, number, string) are not allowed, only dict
531 530 or list containers.
532 531 url : unicode
533 532 A URL to download the data from.
534 533 filename : unicode
535 534 Path to a local file to load the data from.
536 535 expanded : boolean
537 536 Metadata to control whether a JSON display component is expanded.
538 537 metadata: dict
539 538 Specify extra metadata to attach to the json display object.
540 539 """
541 540 self.expanded = expanded
542 541 self.metadata = metadata
543 542 super(JSON, self).__init__(data=data, url=url, filename=filename)
544 543
545 544 def _check_data(self):
546 545 if self.data is not None and not isinstance(self.data, (dict, list)):
547 546 raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
548 547
549 548 @property
550 549 def data(self):
551 550 return self._data
552 551
553 552 @data.setter
554 553 def data(self, data):
555 554 if isinstance(data, string_types):
556 555 warnings.warn("JSON expects JSONable dict or list, not JSON strings")
557 556 data = json.loads(data)
558 557 self._data = data
559 558
560 559 def _data_and_metadata(self):
561 560 md = {'expanded': self.expanded}
562 561 if self.metadata:
563 562 md.update(self.metadata)
564 563 return self.data, md
565 564
566 565 def _repr_json_(self):
567 566 return self._data_and_metadata()
568 567
569 568 css_t = """$("head").append($("<link/>").attr({
570 569 rel: "stylesheet",
571 570 type: "text/css",
572 571 href: "%s"
573 572 }));
574 573 """
575 574
576 575 lib_t1 = """$.getScript("%s", function () {
577 576 """
578 577 lib_t2 = """});
579 578 """
580 579
581 580 class Javascript(TextDisplayObject):
582 581
583 582 def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
584 583 """Create a Javascript display object given raw data.
585 584
586 585 When this object is returned by an expression or passed to the
587 586 display function, it will result in the data being displayed
588 587 in the frontend. If the data is a URL, the data will first be
589 588 downloaded and then displayed.
590 589
591 590 In the Notebook, the containing element will be available as `element`,
592 591 and jQuery will be available. Content appended to `element` will be
593 592 visible in the output area.
594 593
595 594 Parameters
596 595 ----------
597 596 data : unicode, str or bytes
598 597 The Javascript source code or a URL to download it from.
599 598 url : unicode
600 599 A URL to download the data from.
601 600 filename : unicode
602 601 Path to a local file to load the data from.
603 602 lib : list or str
604 603 A sequence of Javascript library URLs to load asynchronously before
605 604 running the source code. The full URLs of the libraries should
606 605 be given. A single Javascript library URL can also be given as a
607 606 string.
608 607 css: : list or str
609 608 A sequence of css files to load before running the source code.
610 609 The full URLs of the css files should be given. A single css URL
611 610 can also be given as a string.
612 611 """
613 612 if isinstance(lib, string_types):
614 613 lib = [lib]
615 614 elif lib is None:
616 615 lib = []
617 616 if isinstance(css, string_types):
618 617 css = [css]
619 618 elif css is None:
620 619 css = []
621 620 if not isinstance(lib, (list,tuple)):
622 621 raise TypeError('expected sequence, got: %r' % lib)
623 622 if not isinstance(css, (list,tuple)):
624 623 raise TypeError('expected sequence, got: %r' % css)
625 624 self.lib = lib
626 625 self.css = css
627 626 super(Javascript, self).__init__(data=data, url=url, filename=filename)
628 627
629 628 def _repr_javascript_(self):
630 629 r = ''
631 630 for c in self.css:
632 631 r += css_t % c
633 632 for l in self.lib:
634 633 r += lib_t1 % l
635 634 r += self.data
636 635 r += lib_t2*len(self.lib)
637 636 return r
638 637
639 638 # constants for identifying png/jpeg data
640 639 _PNG = b'\x89PNG\r\n\x1a\n'
641 640 _JPEG = b'\xff\xd8'
642 641
643 642 def _pngxy(data):
644 643 """read the (width, height) from a PNG header"""
645 644 ihdr = data.index(b'IHDR')
646 645 # next 8 bytes are width/height
647 646 w4h4 = data[ihdr+4:ihdr+12]
648 647 return struct.unpack('>ii', w4h4)
649 648
650 649 def _jpegxy(data):
651 650 """read the (width, height) from a JPEG header"""
652 651 # adapted from http://www.64lines.com/jpeg-width-height
653 652
654 653 idx = 4
655 654 while True:
656 655 block_size = struct.unpack('>H', data[idx:idx+2])[0]
657 656 idx = idx + block_size
658 657 if data[idx:idx+2] == b'\xFF\xC0':
659 658 # found Start of Frame
660 659 iSOF = idx
661 660 break
662 661 else:
663 662 # read another block
664 663 idx += 2
665 664
666 665 h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
667 666 return w, h
668 667
669 668 class Image(DisplayObject):
670 669
671 670 _read_flags = 'rb'
672 671 _FMT_JPEG = u'jpeg'
673 672 _FMT_PNG = u'png'
674 673 _ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
675 674
676 675 def __init__(self, data=None, url=None, filename=None, format=None,
677 676 embed=None, width=None, height=None, retina=False,
678 677 unconfined=False, metadata=None):
679 678 """Create a PNG/JPEG image object given raw data.
680 679
681 680 When this object is returned by an input cell or passed to the
682 681 display function, it will result in the image being displayed
683 682 in the frontend.
684 683
685 684 Parameters
686 685 ----------
687 686 data : unicode, str or bytes
688 687 The raw image data or a URL or filename to load the data from.
689 688 This always results in embedded image data.
690 689 url : unicode
691 690 A URL to download the data from. If you specify `url=`,
692 691 the image data will not be embedded unless you also specify `embed=True`.
693 692 filename : unicode
694 693 Path to a local file to load the data from.
695 694 Images from a file are always embedded.
696 695 format : unicode
697 696 The format of the image data (png/jpeg/jpg). If a filename or URL is given
698 697 for format will be inferred from the filename extension.
699 698 embed : bool
700 699 Should the image data be embedded using a data URI (True) or be
701 700 loaded using an <img> tag. Set this to True if you want the image
702 701 to be viewable later with no internet connection in the notebook.
703 702
704 703 Default is `True`, unless the keyword argument `url` is set, then
705 704 default value is `False`.
706 705
707 706 Note that QtConsole is not able to display images if `embed` is set to `False`
708 707 width : int
709 708 Width in pixels to which to constrain the image in html
710 709 height : int
711 710 Height in pixels to which to constrain the image in html
712 711 retina : bool
713 712 Automatically set the width and height to half of the measured
714 713 width and height.
715 714 This only works for embedded images because it reads the width/height
716 715 from image data.
717 716 For non-embedded images, you can just set the desired display width
718 717 and height directly.
719 718 unconfined: bool
720 719 Set unconfined=True to disable max-width confinement of the image.
721 720 metadata: dict
722 721 Specify extra metadata to attach to the image.
723 722
724 723 Examples
725 724 --------
726 725 # embedded image data, works in qtconsole and notebook
727 726 # when passed positionally, the first arg can be any of raw image data,
728 727 # a URL, or a filename from which to load image data.
729 728 # The result is always embedding image data for inline images.
730 729 Image('http://www.google.fr/images/srpr/logo3w.png')
731 730 Image('/path/to/image.jpg')
732 731 Image(b'RAW_PNG_DATA...')
733 732
734 733 # Specifying Image(url=...) does not embed the image data,
735 734 # it only generates `<img>` tag with a link to the source.
736 735 # This will not work in the qtconsole or offline.
737 736 Image(url='http://www.google.fr/images/srpr/logo3w.png')
738 737
739 738 """
740 739 if filename is not None:
741 740 ext = self._find_ext(filename)
742 741 elif url is not None:
743 742 ext = self._find_ext(url)
744 743 elif data is None:
745 744 raise ValueError("No image data found. Expecting filename, url, or data.")
746 745 elif isinstance(data, string_types) and (
747 746 data.startswith('http') or _safe_exists(data)
748 747 ):
749 748 ext = self._find_ext(data)
750 749 else:
751 750 ext = None
752 751
753 752 if format is None:
754 753 if ext is not None:
755 754 if ext == u'jpg' or ext == u'jpeg':
756 755 format = self._FMT_JPEG
757 756 if ext == u'png':
758 757 format = self._FMT_PNG
759 758 else:
760 759 format = ext.lower()
761 760 elif isinstance(data, bytes):
762 761 # infer image type from image data header,
763 762 # only if format has not been specified.
764 763 if data[:2] == _JPEG:
765 764 format = self._FMT_JPEG
766 765
767 766 # failed to detect format, default png
768 767 if format is None:
769 768 format = 'png'
770 769
771 770 if format.lower() == 'jpg':
772 771 # jpg->jpeg
773 772 format = self._FMT_JPEG
774 773
775 774 self.format = unicode_type(format).lower()
776 775 self.embed = embed if embed is not None else (url is None)
777 776
778 777 if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
779 778 raise ValueError("Cannot embed the '%s' image format" % (self.format))
780 779 self.width = width
781 780 self.height = height
782 781 self.retina = retina
783 782 self.unconfined = unconfined
784 783 self.metadata = metadata
785 784 super(Image, self).__init__(data=data, url=url, filename=filename)
786 785
787 786 if retina:
788 787 self._retina_shape()
789 788
790 789 def _retina_shape(self):
791 790 """load pixel-doubled width and height from image data"""
792 791 if not self.embed:
793 792 return
794 793 if self.format == 'png':
795 794 w, h = _pngxy(self.data)
796 795 elif self.format == 'jpeg':
797 796 w, h = _jpegxy(self.data)
798 797 else:
799 798 # retina only supports png
800 799 return
801 800 self.width = w // 2
802 801 self.height = h // 2
803 802
804 803 def reload(self):
805 804 """Reload the raw data from file or URL."""
806 805 if self.embed:
807 806 super(Image,self).reload()
808 807 if self.retina:
809 808 self._retina_shape()
810 809
811 810 def _repr_html_(self):
812 811 if not self.embed:
813 812 width = height = klass = ''
814 813 if self.width:
815 814 width = ' width="%d"' % self.width
816 815 if self.height:
817 816 height = ' height="%d"' % self.height
818 817 if self.unconfined:
819 818 klass = ' class="unconfined"'
820 819 return u'<img src="{url}"{width}{height}{klass}/>'.format(
821 820 url=self.url,
822 821 width=width,
823 822 height=height,
824 823 klass=klass,
825 824 )
826 825
827 826 def _data_and_metadata(self):
828 827 """shortcut for returning metadata with shape information, if defined"""
829 828 md = {}
830 829 if self.width:
831 830 md['width'] = self.width
832 831 if self.height:
833 832 md['height'] = self.height
834 833 if self.unconfined:
835 834 md['unconfined'] = self.unconfined
836 835 if self.metadata:
837 836 md.update(self.metadata)
838 837 if md:
839 838 return self.data, md
840 839 else:
841 840 return self.data
842 841
843 842 def _repr_png_(self):
844 843 if self.embed and self.format == u'png':
845 844 return self._data_and_metadata()
846 845
847 846 def _repr_jpeg_(self):
848 847 if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
849 848 return self._data_and_metadata()
850 849
851 850 def _find_ext(self, s):
852 851 return unicode_type(s.split('.')[-1].lower())
853 852
854 853 class Video(DisplayObject):
855 854
856 855 def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
857 856 """Create a video object given raw data or an URL.
858 857
859 858 When this object is returned by an input cell or passed to the
860 859 display function, it will result in the video being displayed
861 860 in the frontend.
862 861
863 862 Parameters
864 863 ----------
865 864 data : unicode, str or bytes
866 865 The raw video data or a URL or filename to load the data from.
867 866 Raw data will require passing `embed=True`.
868 867 url : unicode
869 868 A URL for the video. If you specify `url=`,
870 869 the image data will not be embedded.
871 870 filename : unicode
872 871 Path to a local file containing the video.
873 872 Will be interpreted as a local URL unless `embed=True`.
874 873 embed : bool
875 874 Should the video be embedded using a data URI (True) or be
876 875 loaded using a <video> tag (False).
877 876
878 877 Since videos are large, embedding them should be avoided, if possible.
879 878 You must confirm embedding as your intention by passing `embed=True`.
880 879
881 880 Local files can be displayed with URLs without embedding the content, via::
882 881
883 882 Video('./video.mp4')
884 883
885 884 mimetype: unicode
886 885 Specify the mimetype for embedded videos.
887 886 Default will be guessed from file extension, if available.
888 887
889 888 Examples
890 889 --------
891 890
892 891 Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
893 892 Video('path/to/video.mp4')
894 893 Video('path/to/video.mp4', embed=True)
895 894 Video(b'raw-videodata', embed=True)
896 895 """
897 896 if url is None and isinstance(data, string_types) and data.startswith(('http:', 'https:')):
898 897 url = data
899 898 data = None
900 899 elif os.path.exists(data):
901 900 filename = data
902 901 data = None
903 902
904 903 if data and not embed:
905 904 msg = ''.join([
906 905 "To embed videos, you must pass embed=True ",
907 906 "(this may make your notebook files huge)\n",
908 907 "Consider passing Video(url='...')",
909 908 ])
910 909 raise ValueError(msg)
911 910
912 911 self.mimetype = mimetype
913 912 self.embed = embed
914 913 super(Video, self).__init__(data=data, url=url, filename=filename)
915 914
916 915 def _repr_html_(self):
917 916 # External URLs and potentially local files are not embedded into the
918 917 # notebook output.
919 918 if not self.embed:
920 919 url = self.url if self.url is not None else self.filename
921 920 output = """<video src="{0}" controls>
922 921 Your browser does not support the <code>video</code> element.
923 922 </video>""".format(url)
924 923 return output
925 924
926 925 # Embedded videos are base64-encoded.
927 926 mimetype = self.mimetype
928 927 if self.filename is not None:
929 928 if not mimetype:
930 929 mimetype, _ = mimetypes.guess_type(self.filename)
931 930
932 931 with open(self.filename, 'rb') as f:
933 932 video = f.read()
934 933 else:
935 934 video = self.data
936 935 if isinstance(video, unicode_type):
937 936 # unicode input is already b64-encoded
938 937 b64_video = video
939 938 else:
940 939 b64_video = base64_encode(video).decode('ascii').rstrip()
941 940
942 941 output = """<video controls>
943 942 <source src="data:{0};base64,{1}" type="{0}">
944 943 Your browser does not support the video tag.
945 944 </video>""".format(mimetype, b64_video)
946 945 return output
947 946
948 947 def reload(self):
949 948 # TODO
950 949 pass
951 950
952 951 def _repr_png_(self):
953 952 # TODO
954 953 pass
955 954 def _repr_jpeg_(self):
956 955 # TODO
957 956 pass
958 957
959 958 def clear_output(wait=False):
960 959 """Clear the output of the current cell receiving output.
961 960
962 961 Parameters
963 962 ----------
964 963 wait : bool [default: false]
965 964 Wait to clear the output until new output is available to replace it."""
966 965 from IPython.core.interactiveshell import InteractiveShell
967 966 if InteractiveShell.initialized():
968 967 InteractiveShell.instance().display_pub.clear_output(wait)
969 968 else:
970 969 print('\033[2K\r', end='')
971 970 sys.stdout.flush()
972 971 print('\033[2K\r', end='')
973 972 sys.stderr.flush()
974 973
975 974
976 975 @skip_doctest
977 976 def set_matplotlib_formats(*formats, **kwargs):
978 977 """Select figure formats for the inline backend. Optionally pass quality for JPEG.
979 978
980 979 For example, this enables PNG and JPEG output with a JPEG quality of 90%::
981 980
982 981 In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
983 982
984 983 To set this in your config files use the following::
985 984
986 985 c.InlineBackend.figure_formats = {'png', 'jpeg'}
987 986 c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
988 987
989 988 Parameters
990 989 ----------
991 990 *formats : strs
992 991 One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
993 992 **kwargs :
994 993 Keyword args will be relayed to ``figure.canvas.print_figure``.
995 994 """
996 995 from IPython.core.interactiveshell import InteractiveShell
997 996 from IPython.core.pylabtools import select_figure_formats
998 997 # build kwargs, starting with InlineBackend config
999 998 kw = {}
1000 999 from ipykernel.pylab.config import InlineBackend
1001 1000 cfg = InlineBackend.instance()
1002 1001 kw.update(cfg.print_figure_kwargs)
1003 1002 kw.update(**kwargs)
1004 1003 shell = InteractiveShell.instance()
1005 1004 select_figure_formats(shell, formats, **kw)
1006 1005
1007 1006 @skip_doctest
1008 1007 def set_matplotlib_close(close=True):
1009 1008 """Set whether the inline backend closes all figures automatically or not.
1010 1009
1011 1010 By default, the inline backend used in the IPython Notebook will close all
1012 1011 matplotlib figures automatically after each cell is run. This means that
1013 1012 plots in different cells won't interfere. Sometimes, you may want to make
1014 1013 a plot in one cell and then refine it in later cells. This can be accomplished
1015 1014 by::
1016 1015
1017 1016 In [1]: set_matplotlib_close(False)
1018 1017
1019 1018 To set this in your config files use the following::
1020 1019
1021 1020 c.InlineBackend.close_figures = False
1022 1021
1023 1022 Parameters
1024 1023 ----------
1025 1024 close : bool
1026 1025 Should all matplotlib figures be automatically closed after each cell is
1027 1026 run?
1028 1027 """
1029 1028 from ipykernel.pylab.config import InlineBackend
1030 1029 cfg = InlineBackend.instance()
1031 1030 cfg.close_figures = close
@@ -1,322 +1,321 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Displayhook for IPython.
3 3
4 4 This defines a callable class that IPython uses for `sys.displayhook`.
5 5 """
6 6
7 7 # Copyright (c) IPython Development Team.
8 8 # Distributed under the terms of the Modified BSD License.
9 9
10 from __future__ import print_function
11 10
12 11 import sys
13 12 import io as _io
14 13 import tokenize
15 14
16 15 from traitlets.config.configurable import Configurable
17 16 from IPython.utils.py3compat import builtin_mod, cast_unicode_py2
18 17 from traitlets import Instance, Float
19 18 from warnings import warn
20 19
21 20 # TODO: Move the various attributes (cache_size, [others now moved]). Some
22 21 # of these are also attributes of InteractiveShell. They should be on ONE object
23 22 # only and the other objects should ask that one object for their values.
24 23
25 24 class DisplayHook(Configurable):
26 25 """The custom IPython displayhook to replace sys.displayhook.
27 26
28 27 This class does many things, but the basic idea is that it is a callable
29 28 that gets called anytime user code returns a value.
30 29 """
31 30
32 31 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
33 32 allow_none=True)
34 33 exec_result = Instance('IPython.core.interactiveshell.ExecutionResult',
35 34 allow_none=True)
36 35 cull_fraction = Float(0.2)
37 36
38 37 def __init__(self, shell=None, cache_size=1000, **kwargs):
39 38 super(DisplayHook, self).__init__(shell=shell, **kwargs)
40 39 cache_size_min = 3
41 40 if cache_size <= 0:
42 41 self.do_full_cache = 0
43 42 cache_size = 0
44 43 elif cache_size < cache_size_min:
45 44 self.do_full_cache = 0
46 45 cache_size = 0
47 46 warn('caching was disabled (min value for cache size is %s).' %
48 47 cache_size_min,stacklevel=3)
49 48 else:
50 49 self.do_full_cache = 1
51 50
52 51 self.cache_size = cache_size
53 52
54 53 # we need a reference to the user-level namespace
55 54 self.shell = shell
56 55
57 56 self._,self.__,self.___ = '','',''
58 57
59 58 # these are deliberately global:
60 59 to_user_ns = {'_':self._,'__':self.__,'___':self.___}
61 60 self.shell.user_ns.update(to_user_ns)
62 61
63 62 @property
64 63 def prompt_count(self):
65 64 return self.shell.execution_count
66 65
67 66 #-------------------------------------------------------------------------
68 67 # Methods used in __call__. Override these methods to modify the behavior
69 68 # of the displayhook.
70 69 #-------------------------------------------------------------------------
71 70
72 71 def check_for_underscore(self):
73 72 """Check if the user has set the '_' variable by hand."""
74 73 # If something injected a '_' variable in __builtin__, delete
75 74 # ipython's automatic one so we don't clobber that. gettext() in
76 75 # particular uses _, so we need to stay away from it.
77 76 if '_' in builtin_mod.__dict__:
78 77 try:
79 78 user_value = self.shell.user_ns['_']
80 79 if user_value is not self._:
81 80 return
82 81 del self.shell.user_ns['_']
83 82 except KeyError:
84 83 pass
85 84
86 85 def quiet(self):
87 86 """Should we silence the display hook because of ';'?"""
88 87 # do not print output if input ends in ';'
89 88
90 89 try:
91 90 cell = cast_unicode_py2(self.shell.history_manager.input_hist_parsed[-1])
92 91 except IndexError:
93 92 # some uses of ipshellembed may fail here
94 93 return False
95 94
96 95 sio = _io.StringIO(cell)
97 96 tokens = list(tokenize.generate_tokens(sio.readline))
98 97
99 98 for token in reversed(tokens):
100 99 if token[0] in (tokenize.ENDMARKER, tokenize.NL, tokenize.NEWLINE, tokenize.COMMENT):
101 100 continue
102 101 if (token[0] == tokenize.OP) and (token[1] == ';'):
103 102 return True
104 103 else:
105 104 return False
106 105
107 106 def start_displayhook(self):
108 107 """Start the displayhook, initializing resources."""
109 108 pass
110 109
111 110 def write_output_prompt(self):
112 111 """Write the output prompt.
113 112
114 113 The default implementation simply writes the prompt to
115 114 ``sys.stdout``.
116 115 """
117 116 # Use write, not print which adds an extra space.
118 117 sys.stdout.write(self.shell.separate_out)
119 118 outprompt = 'Out[{}]: '.format(self.shell.execution_count)
120 119 if self.do_full_cache:
121 120 sys.stdout.write(outprompt)
122 121
123 122 def compute_format_data(self, result):
124 123 """Compute format data of the object to be displayed.
125 124
126 125 The format data is a generalization of the :func:`repr` of an object.
127 126 In the default implementation the format data is a :class:`dict` of
128 127 key value pair where the keys are valid MIME types and the values
129 128 are JSON'able data structure containing the raw data for that MIME
130 129 type. It is up to frontends to determine pick a MIME to to use and
131 130 display that data in an appropriate manner.
132 131
133 132 This method only computes the format data for the object and should
134 133 NOT actually print or write that to a stream.
135 134
136 135 Parameters
137 136 ----------
138 137 result : object
139 138 The Python object passed to the display hook, whose format will be
140 139 computed.
141 140
142 141 Returns
143 142 -------
144 143 (format_dict, md_dict) : dict
145 144 format_dict is a :class:`dict` whose keys are valid MIME types and values are
146 145 JSON'able raw data for that MIME type. It is recommended that
147 146 all return values of this should always include the "text/plain"
148 147 MIME type representation of the object.
149 148 md_dict is a :class:`dict` with the same MIME type keys
150 149 of metadata associated with each output.
151 150
152 151 """
153 152 return self.shell.display_formatter.format(result)
154 153
155 154 # This can be set to True by the write_output_prompt method in a subclass
156 155 prompt_end_newline = False
157 156
158 157 def write_format_data(self, format_dict, md_dict=None):
159 158 """Write the format data dict to the frontend.
160 159
161 160 This default version of this method simply writes the plain text
162 161 representation of the object to ``sys.stdout``. Subclasses should
163 162 override this method to send the entire `format_dict` to the
164 163 frontends.
165 164
166 165 Parameters
167 166 ----------
168 167 format_dict : dict
169 168 The format dict for the object passed to `sys.displayhook`.
170 169 md_dict : dict (optional)
171 170 The metadata dict to be associated with the display data.
172 171 """
173 172 if 'text/plain' not in format_dict:
174 173 # nothing to do
175 174 return
176 175 # We want to print because we want to always make sure we have a
177 176 # newline, even if all the prompt separators are ''. This is the
178 177 # standard IPython behavior.
179 178 result_repr = format_dict['text/plain']
180 179 if '\n' in result_repr:
181 180 # So that multi-line strings line up with the left column of
182 181 # the screen, instead of having the output prompt mess up
183 182 # their first line.
184 183 # We use the prompt template instead of the expanded prompt
185 184 # because the expansion may add ANSI escapes that will interfere
186 185 # with our ability to determine whether or not we should add
187 186 # a newline.
188 187 if not self.prompt_end_newline:
189 188 # But avoid extraneous empty lines.
190 189 result_repr = '\n' + result_repr
191 190
192 191 print(result_repr)
193 192
194 193 def update_user_ns(self, result):
195 194 """Update user_ns with various things like _, __, _1, etc."""
196 195
197 196 # Avoid recursive reference when displaying _oh/Out
198 197 if result is not self.shell.user_ns['_oh']:
199 198 if len(self.shell.user_ns['_oh']) >= self.cache_size and self.do_full_cache:
200 199 self.cull_cache()
201 200
202 201 # Don't overwrite '_' and friends if '_' is in __builtin__
203 202 # (otherwise we cause buggy behavior for things like gettext). and
204 203 # do not overwrite _, __ or ___ if one of these has been assigned
205 204 # by the user.
206 205 update_unders = True
207 206 for unders in ['_'*i for i in range(1,4)]:
208 207 if not unders in self.shell.user_ns:
209 208 continue
210 209 if getattr(self, unders) is not self.shell.user_ns.get(unders):
211 210 update_unders = False
212 211
213 212 self.___ = self.__
214 213 self.__ = self._
215 214 self._ = result
216 215
217 216 if ('_' not in builtin_mod.__dict__) and (update_unders):
218 217 self.shell.push({'_':self._,
219 218 '__':self.__,
220 219 '___':self.___}, interactive=False)
221 220
222 221 # hackish access to top-level namespace to create _1,_2... dynamically
223 222 to_main = {}
224 223 if self.do_full_cache:
225 224 new_result = '_%s' % self.prompt_count
226 225 to_main[new_result] = result
227 226 self.shell.push(to_main, interactive=False)
228 227 self.shell.user_ns['_oh'][self.prompt_count] = result
229 228
230 229 def fill_exec_result(self, result):
231 230 if self.exec_result is not None:
232 231 self.exec_result.result = result
233 232
234 233 def log_output(self, format_dict):
235 234 """Log the output."""
236 235 if 'text/plain' not in format_dict:
237 236 # nothing to do
238 237 return
239 238 if self.shell.logger.log_output:
240 239 self.shell.logger.log_write(format_dict['text/plain'], 'output')
241 240 self.shell.history_manager.output_hist_reprs[self.prompt_count] = \
242 241 format_dict['text/plain']
243 242
244 243 def finish_displayhook(self):
245 244 """Finish up all displayhook activities."""
246 245 sys.stdout.write(self.shell.separate_out2)
247 246 sys.stdout.flush()
248 247
249 248 def __call__(self, result=None):
250 249 """Printing with history cache management.
251 250
252 251 This is invoked everytime the interpreter needs to print, and is
253 252 activated by setting the variable sys.displayhook to it.
254 253 """
255 254 self.check_for_underscore()
256 255 if result is not None and not self.quiet():
257 256 self.start_displayhook()
258 257 self.write_output_prompt()
259 258 format_dict, md_dict = self.compute_format_data(result)
260 259 self.update_user_ns(result)
261 260 self.fill_exec_result(result)
262 261 if format_dict:
263 262 self.write_format_data(format_dict, md_dict)
264 263 self.log_output(format_dict)
265 264 self.finish_displayhook()
266 265
267 266 def cull_cache(self):
268 267 """Output cache is full, cull the oldest entries"""
269 268 oh = self.shell.user_ns.get('_oh', {})
270 269 sz = len(oh)
271 270 cull_count = max(int(sz * self.cull_fraction), 2)
272 271 warn('Output cache limit (currently {sz} entries) hit.\n'
273 272 'Flushing oldest {cull_count} entries.'.format(sz=sz, cull_count=cull_count))
274 273
275 274 for i, n in enumerate(sorted(oh)):
276 275 if i >= cull_count:
277 276 break
278 277 self.shell.user_ns.pop('_%i' % n, None)
279 278 oh.pop(n, None)
280 279
281 280
282 281 def flush(self):
283 282 if not self.do_full_cache:
284 283 raise ValueError("You shouldn't have reached the cache flush "
285 284 "if full caching is not enabled!")
286 285 # delete auto-generated vars from global namespace
287 286
288 287 for n in range(1,self.prompt_count + 1):
289 288 key = '_'+repr(n)
290 289 try:
291 290 del self.shell.user_ns[key]
292 291 except: pass
293 292 # In some embedded circumstances, the user_ns doesn't have the
294 293 # '_oh' key set up.
295 294 oh = self.shell.user_ns.get('_oh', None)
296 295 if oh is not None:
297 296 oh.clear()
298 297
299 298 # Release our own references to objects:
300 299 self._, self.__, self.___ = '', '', ''
301 300
302 301 if '_' not in builtin_mod.__dict__:
303 302 self.shell.user_ns.update({'_':None,'__':None, '___':None})
304 303 import gc
305 304 # TODO: Is this really needed?
306 305 # IronPython blocks here forever
307 306 if sys.platform != "cli":
308 307 gc.collect()
309 308
310 309
311 310 class CapturingDisplayHook(object):
312 311 def __init__(self, shell, outputs=None):
313 312 self.shell = shell
314 313 if outputs is None:
315 314 outputs = []
316 315 self.outputs = outputs
317 316
318 317 def __call__(self, result=None):
319 318 if result is None:
320 319 return
321 320 format_dict, md_dict = self.shell.display_formatter.format(result)
322 321 self.outputs.append((format_dict, md_dict))
@@ -1,117 +1,116 b''
1 1 """An interface for publishing rich data to frontends.
2 2
3 3 There are two components of the display system:
4 4
5 5 * Display formatters, which take a Python object and compute the
6 6 representation of the object in various formats (text, HTML, SVG, etc.).
7 7 * The display publisher that is used to send the representation data to the
8 8 various frontends.
9 9
10 10 This module defines the logic display publishing. The display publisher uses
11 11 the ``display_data`` message type that is defined in the IPython messaging
12 12 spec.
13 13 """
14 14
15 15 # Copyright (c) IPython Development Team.
16 16 # Distributed under the terms of the Modified BSD License.
17 17
18 from __future__ import print_function
19 18
20 19 import sys
21 20
22 21 from traitlets.config.configurable import Configurable
23 22 from traitlets import List
24 23
25 24 # This used to be defined here - it is imported for backwards compatibility
26 25 from .display import publish_display_data
27 26
28 27 #-----------------------------------------------------------------------------
29 28 # Main payload class
30 29 #-----------------------------------------------------------------------------
31 30
32 31 class DisplayPublisher(Configurable):
33 32 """A traited class that publishes display data to frontends.
34 33
35 34 Instances of this class are created by the main IPython object and should
36 35 be accessed there.
37 36 """
38 37
39 38 def _validate_data(self, data, metadata=None):
40 39 """Validate the display data.
41 40
42 41 Parameters
43 42 ----------
44 43 data : dict
45 44 The formata data dictionary.
46 45 metadata : dict
47 46 Any metadata for the data.
48 47 """
49 48
50 49 if not isinstance(data, dict):
51 50 raise TypeError('data must be a dict, got: %r' % data)
52 51 if metadata is not None:
53 52 if not isinstance(metadata, dict):
54 53 raise TypeError('metadata must be a dict, got: %r' % data)
55 54
56 55 def publish(self, data, metadata=None, source=None):
57 56 """Publish data and metadata to all frontends.
58 57
59 58 See the ``display_data`` message in the messaging documentation for
60 59 more details about this message type.
61 60
62 61 The following MIME types are currently implemented:
63 62
64 63 * text/plain
65 64 * text/html
66 65 * text/markdown
67 66 * text/latex
68 67 * application/json
69 68 * application/javascript
70 69 * image/png
71 70 * image/jpeg
72 71 * image/svg+xml
73 72
74 73 Parameters
75 74 ----------
76 75 data : dict
77 76 A dictionary having keys that are valid MIME types (like
78 77 'text/plain' or 'image/svg+xml') and values that are the data for
79 78 that MIME type. The data itself must be a JSON'able data
80 79 structure. Minimally all data should have the 'text/plain' data,
81 80 which can be displayed by all frontends. If more than the plain
82 81 text is given, it is up to the frontend to decide which
83 82 representation to use.
84 83 metadata : dict
85 84 A dictionary for metadata related to the data. This can contain
86 85 arbitrary key, value pairs that frontends can use to interpret
87 86 the data. Metadata specific to each mime-type can be specified
88 87 in the metadata dict with the same mime-type keys as
89 88 the data itself.
90 89 source : str, deprecated
91 90 Unused.
92 91 """
93 92
94 93 # The default is to simply write the plain text data using sys.stdout.
95 94 if 'text/plain' in data:
96 95 print(data['text/plain'])
97 96
98 97 def clear_output(self, wait=False):
99 98 """Clear the output of the cell receiving output."""
100 99 print('\033[2K\r', end='')
101 100 sys.stdout.flush()
102 101 print('\033[2K\r', end='')
103 102 sys.stderr.flush()
104 103
105 104
106 105 class CapturingDisplayPublisher(DisplayPublisher):
107 106 """A DisplayPublisher that stores"""
108 107 outputs = List()
109 108
110 109 def publish(self, data, metadata=None, source=None):
111 110 self.outputs.append((data, metadata))
112 111
113 112 def clear_output(self, wait=False):
114 113 super(CapturingDisplayPublisher, self).clear_output(wait)
115 114
116 115 # empty the list, *do not* reassign a new list
117 116 del self.outputs[:]
@@ -1,131 +1,130 b''
1 1 """Infrastructure for registering and firing callbacks on application events.
2 2
3 3 Unlike :mod:`IPython.core.hooks`, which lets end users set single functions to
4 4 be called at specific times, or a collection of alternative methods to try,
5 5 callbacks are designed to be used by extension authors. A number of callbacks
6 6 can be registered for the same event without needing to be aware of one another.
7 7
8 8 The functions defined in this module are no-ops indicating the names of available
9 9 events and the arguments which will be passed to them.
10 10
11 11 .. note::
12 12
13 13 This API is experimental in IPython 2.0, and may be revised in future versions.
14 14 """
15 from __future__ import print_function
16 15
17 16 class EventManager(object):
18 17 """Manage a collection of events and a sequence of callbacks for each.
19 18
20 19 This is attached to :class:`~IPython.core.interactiveshell.InteractiveShell`
21 20 instances as an ``events`` attribute.
22 21
23 22 .. note::
24 23
25 24 This API is experimental in IPython 2.0, and may be revised in future versions.
26 25 """
27 26 def __init__(self, shell, available_events):
28 27 """Initialise the :class:`CallbackManager`.
29 28
30 29 Parameters
31 30 ----------
32 31 shell
33 32 The :class:`~IPython.core.interactiveshell.InteractiveShell` instance
34 33 available_callbacks
35 34 An iterable of names for callback events.
36 35 """
37 36 self.shell = shell
38 37 self.callbacks = {n:[] for n in available_events}
39 38
40 39 def register(self, event, function):
41 40 """Register a new event callback
42 41
43 42 Parameters
44 43 ----------
45 44 event : str
46 45 The event for which to register this callback.
47 46 function : callable
48 47 A function to be called on the given event. It should take the same
49 48 parameters as the appropriate callback prototype.
50 49
51 50 Raises
52 51 ------
53 52 TypeError
54 53 If ``function`` is not callable.
55 54 KeyError
56 55 If ``event`` is not one of the known events.
57 56 """
58 57 if not callable(function):
59 58 raise TypeError('Need a callable, got %r' % function)
60 59 self.callbacks[event].append(function)
61 60
62 61 def unregister(self, event, function):
63 62 """Remove a callback from the given event."""
64 63 self.callbacks[event].remove(function)
65 64
66 65 def trigger(self, event, *args, **kwargs):
67 66 """Call callbacks for ``event``.
68 67
69 68 Any additional arguments are passed to all callbacks registered for this
70 69 event. Exceptions raised by callbacks are caught, and a message printed.
71 70 """
72 71 for func in self.callbacks[event][:]:
73 72 try:
74 73 func(*args, **kwargs)
75 74 except Exception:
76 75 print("Error in callback {} (for {}):".format(func, event))
77 76 self.shell.showtraceback()
78 77
79 78 # event_name -> prototype mapping
80 79 available_events = {}
81 80
82 81 def _define_event(callback_proto):
83 82 available_events[callback_proto.__name__] = callback_proto
84 83 return callback_proto
85 84
86 85 # ------------------------------------------------------------------------------
87 86 # Callback prototypes
88 87 #
89 88 # No-op functions which describe the names of available events and the
90 89 # signatures of callbacks for those events.
91 90 # ------------------------------------------------------------------------------
92 91
93 92 @_define_event
94 93 def pre_execute():
95 94 """Fires before code is executed in response to user/frontend action.
96 95
97 96 This includes comm and widget messages and silent execution, as well as user
98 97 code cells."""
99 98 pass
100 99
101 100 @_define_event
102 101 def pre_run_cell():
103 102 """Fires before user-entered code runs."""
104 103 pass
105 104
106 105 @_define_event
107 106 def post_execute():
108 107 """Fires after code is executed in response to user/frontend action.
109 108
110 109 This includes comm and widget messages and silent execution, as well as user
111 110 code cells."""
112 111 pass
113 112
114 113 @_define_event
115 114 def post_run_cell():
116 115 """Fires after user-entered code runs."""
117 116 pass
118 117
119 118 @_define_event
120 119 def shell_initialized(ip):
121 120 """Fires after initialisation of :class:`~IPython.core.interactiveshell.InteractiveShell`.
122 121
123 122 This is before extensions and startup scripts are loaded, so it can only be
124 123 set by subclassing.
125 124
126 125 Parameters
127 126 ----------
128 127 ip : :class:`~IPython.core.interactiveshell.InteractiveShell`
129 128 The newly initialised shell.
130 129 """
131 130 pass
@@ -1,911 +1,910 b''
1 1 """ History related magics and functionality """
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 from __future__ import print_function
7 6
8 7 import atexit
9 8 import datetime
10 9 import os
11 10 import re
12 11 try:
13 12 import sqlite3
14 13 except ImportError:
15 14 try:
16 15 from pysqlite2 import dbapi2 as sqlite3
17 16 except ImportError:
18 17 sqlite3 = None
19 18 import threading
20 19
21 20 from traitlets.config.configurable import LoggingConfigurable
22 21 from decorator import decorator
23 22 from IPython.utils.decorators import undoc
24 23 from IPython.utils.path import locate_profile
25 24 from IPython.utils import py3compat
26 25 from traitlets import (
27 26 Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
28 27 default, observe,
29 28 )
30 29 from warnings import warn
31 30
32 31 #-----------------------------------------------------------------------------
33 32 # Classes and functions
34 33 #-----------------------------------------------------------------------------
35 34
36 35 @undoc
37 36 class DummyDB(object):
38 37 """Dummy DB that will act as a black hole for history.
39 38
40 39 Only used in the absence of sqlite"""
41 40 def execute(*args, **kwargs):
42 41 return []
43 42
44 43 def commit(self, *args, **kwargs):
45 44 pass
46 45
47 46 def __enter__(self, *args, **kwargs):
48 47 pass
49 48
50 49 def __exit__(self, *args, **kwargs):
51 50 pass
52 51
53 52
54 53 @decorator
55 54 def needs_sqlite(f, self, *a, **kw):
56 55 """Decorator: return an empty list in the absence of sqlite."""
57 56 if sqlite3 is None or not self.enabled:
58 57 return []
59 58 else:
60 59 return f(self, *a, **kw)
61 60
62 61
63 62 if sqlite3 is not None:
64 63 DatabaseError = sqlite3.DatabaseError
65 64 OperationalError = sqlite3.OperationalError
66 65 else:
67 66 @undoc
68 67 class DatabaseError(Exception):
69 68 "Dummy exception when sqlite could not be imported. Should never occur."
70 69
71 70 @undoc
72 71 class OperationalError(Exception):
73 72 "Dummy exception when sqlite could not be imported. Should never occur."
74 73
75 74 # use 16kB as threshold for whether a corrupt history db should be saved
76 75 # that should be at least 100 entries or so
77 76 _SAVE_DB_SIZE = 16384
78 77
79 78 @decorator
80 79 def catch_corrupt_db(f, self, *a, **kw):
81 80 """A decorator which wraps HistoryAccessor method calls to catch errors from
82 81 a corrupt SQLite database, move the old database out of the way, and create
83 82 a new one.
84 83
85 84 We avoid clobbering larger databases because this may be triggered due to filesystem issues,
86 85 not just a corrupt file.
87 86 """
88 87 try:
89 88 return f(self, *a, **kw)
90 89 except (DatabaseError, OperationalError) as e:
91 90 self._corrupt_db_counter += 1
92 91 self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
93 92 if self.hist_file != ':memory:':
94 93 if self._corrupt_db_counter > self._corrupt_db_limit:
95 94 self.hist_file = ':memory:'
96 95 self.log.error("Failed to load history too many times, history will not be saved.")
97 96 elif os.path.isfile(self.hist_file):
98 97 # move the file out of the way
99 98 base, ext = os.path.splitext(self.hist_file)
100 99 size = os.stat(self.hist_file).st_size
101 100 if size >= _SAVE_DB_SIZE:
102 101 # if there's significant content, avoid clobbering
103 102 now = datetime.datetime.now().isoformat().replace(':', '.')
104 103 newpath = base + '-corrupt-' + now + ext
105 104 # don't clobber previous corrupt backups
106 105 for i in range(100):
107 106 if not os.path.isfile(newpath):
108 107 break
109 108 else:
110 109 newpath = base + '-corrupt-' + now + (u'-%i' % i) + ext
111 110 else:
112 111 # not much content, possibly empty; don't worry about clobbering
113 112 # maybe we should just delete it?
114 113 newpath = base + '-corrupt' + ext
115 114 os.rename(self.hist_file, newpath)
116 115 self.log.error("History file was moved to %s and a new file created.", newpath)
117 116 self.init_db()
118 117 return []
119 118 else:
120 119 # Failed with :memory:, something serious is wrong
121 120 raise
122 121
123 122 class HistoryAccessorBase(LoggingConfigurable):
124 123 """An abstract class for History Accessors """
125 124
126 125 def get_tail(self, n=10, raw=True, output=False, include_latest=False):
127 126 raise NotImplementedError
128 127
129 128 def search(self, pattern="*", raw=True, search_raw=True,
130 129 output=False, n=None, unique=False):
131 130 raise NotImplementedError
132 131
133 132 def get_range(self, session, start=1, stop=None, raw=True,output=False):
134 133 raise NotImplementedError
135 134
136 135 def get_range_by_str(self, rangestr, raw=True, output=False):
137 136 raise NotImplementedError
138 137
139 138
140 139 class HistoryAccessor(HistoryAccessorBase):
141 140 """Access the history database without adding to it.
142 141
143 142 This is intended for use by standalone history tools. IPython shells use
144 143 HistoryManager, below, which is a subclass of this."""
145 144
146 145 # counter for init_db retries, so we don't keep trying over and over
147 146 _corrupt_db_counter = 0
148 147 # after two failures, fallback on :memory:
149 148 _corrupt_db_limit = 2
150 149
151 150 # String holding the path to the history file
152 151 hist_file = Unicode(
153 152 help="""Path to file to use for SQLite history database.
154 153
155 154 By default, IPython will put the history database in the IPython
156 155 profile directory. If you would rather share one history among
157 156 profiles, you can set this value in each, so that they are consistent.
158 157
159 158 Due to an issue with fcntl, SQLite is known to misbehave on some NFS
160 159 mounts. If you see IPython hanging, try setting this to something on a
161 160 local disk, e.g::
162 161
163 162 ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
164 163
165 164 you can also use the specific value `:memory:` (including the colon
166 165 at both end but not the back ticks), to avoid creating an history file.
167 166
168 167 """).tag(config=True)
169 168
170 169 enabled = Bool(True,
171 170 help="""enable the SQLite history
172 171
173 172 set enabled=False to disable the SQLite history,
174 173 in which case there will be no stored history, no SQLite connection,
175 174 and no background saving thread. This may be necessary in some
176 175 threaded environments where IPython is embedded.
177 176 """
178 177 ).tag(config=True)
179 178
180 179 connection_options = Dict(
181 180 help="""Options for configuring the SQLite connection
182 181
183 182 These options are passed as keyword args to sqlite3.connect
184 183 when establishing database conenctions.
185 184 """
186 185 ).tag(config=True)
187 186
188 187 # The SQLite database
189 188 db = Any()
190 189 @observe('db')
191 190 def _db_changed(self, change):
192 191 """validate the db, since it can be an Instance of two different types"""
193 192 new = change['new']
194 193 connection_types = (DummyDB,)
195 194 if sqlite3 is not None:
196 195 connection_types = (DummyDB, sqlite3.Connection)
197 196 if not isinstance(new, connection_types):
198 197 msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
199 198 (self.__class__.__name__, new)
200 199 raise TraitError(msg)
201 200
202 201 def __init__(self, profile='default', hist_file=u'', **traits):
203 202 """Create a new history accessor.
204 203
205 204 Parameters
206 205 ----------
207 206 profile : str
208 207 The name of the profile from which to open history.
209 208 hist_file : str
210 209 Path to an SQLite history database stored by IPython. If specified,
211 210 hist_file overrides profile.
212 211 config : :class:`~traitlets.config.loader.Config`
213 212 Config object. hist_file can also be set through this.
214 213 """
215 214 # We need a pointer back to the shell for various tasks.
216 215 super(HistoryAccessor, self).__init__(**traits)
217 216 # defer setting hist_file from kwarg until after init,
218 217 # otherwise the default kwarg value would clobber any value
219 218 # set by config
220 219 if hist_file:
221 220 self.hist_file = hist_file
222 221
223 222 if self.hist_file == u'':
224 223 # No one has set the hist_file, yet.
225 224 self.hist_file = self._get_hist_file_name(profile)
226 225
227 226 if sqlite3 is None and self.enabled:
228 227 warn("IPython History requires SQLite, your history will not be saved")
229 228 self.enabled = False
230 229
231 230 self.init_db()
232 231
233 232 def _get_hist_file_name(self, profile='default'):
234 233 """Find the history file for the given profile name.
235 234
236 235 This is overridden by the HistoryManager subclass, to use the shell's
237 236 active profile.
238 237
239 238 Parameters
240 239 ----------
241 240 profile : str
242 241 The name of a profile which has a history file.
243 242 """
244 243 return os.path.join(locate_profile(profile), 'history.sqlite')
245 244
246 245 @catch_corrupt_db
247 246 def init_db(self):
248 247 """Connect to the database, and create tables if necessary."""
249 248 if not self.enabled:
250 249 self.db = DummyDB()
251 250 return
252 251
253 252 # use detect_types so that timestamps return datetime objects
254 253 kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
255 254 kwargs.update(self.connection_options)
256 255 self.db = sqlite3.connect(self.hist_file, **kwargs)
257 256 self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
258 257 primary key autoincrement, start timestamp,
259 258 end timestamp, num_cmds integer, remark text)""")
260 259 self.db.execute("""CREATE TABLE IF NOT EXISTS history
261 260 (session integer, line integer, source text, source_raw text,
262 261 PRIMARY KEY (session, line))""")
263 262 # Output history is optional, but ensure the table's there so it can be
264 263 # enabled later.
265 264 self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
266 265 (session integer, line integer, output text,
267 266 PRIMARY KEY (session, line))""")
268 267 self.db.commit()
269 268 # success! reset corrupt db count
270 269 self._corrupt_db_counter = 0
271 270
272 271 def writeout_cache(self):
273 272 """Overridden by HistoryManager to dump the cache before certain
274 273 database lookups."""
275 274 pass
276 275
277 276 ## -------------------------------
278 277 ## Methods for retrieving history:
279 278 ## -------------------------------
280 279 def _run_sql(self, sql, params, raw=True, output=False):
281 280 """Prepares and runs an SQL query for the history database.
282 281
283 282 Parameters
284 283 ----------
285 284 sql : str
286 285 Any filtering expressions to go after SELECT ... FROM ...
287 286 params : tuple
288 287 Parameters passed to the SQL query (to replace "?")
289 288 raw, output : bool
290 289 See :meth:`get_range`
291 290
292 291 Returns
293 292 -------
294 293 Tuples as :meth:`get_range`
295 294 """
296 295 toget = 'source_raw' if raw else 'source'
297 296 sqlfrom = "history"
298 297 if output:
299 298 sqlfrom = "history LEFT JOIN output_history USING (session, line)"
300 299 toget = "history.%s, output_history.output" % toget
301 300 cur = self.db.execute("SELECT session, line, %s FROM %s " %\
302 301 (toget, sqlfrom) + sql, params)
303 302 if output: # Regroup into 3-tuples, and parse JSON
304 303 return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
305 304 return cur
306 305
307 306 @needs_sqlite
308 307 @catch_corrupt_db
309 308 def get_session_info(self, session):
310 309 """Get info about a session.
311 310
312 311 Parameters
313 312 ----------
314 313
315 314 session : int
316 315 Session number to retrieve.
317 316
318 317 Returns
319 318 -------
320 319
321 320 session_id : int
322 321 Session ID number
323 322 start : datetime
324 323 Timestamp for the start of the session.
325 324 end : datetime
326 325 Timestamp for the end of the session, or None if IPython crashed.
327 326 num_cmds : int
328 327 Number of commands run, or None if IPython crashed.
329 328 remark : unicode
330 329 A manually set description.
331 330 """
332 331 query = "SELECT * from sessions where session == ?"
333 332 return self.db.execute(query, (session,)).fetchone()
334 333
335 334 @catch_corrupt_db
336 335 def get_last_session_id(self):
337 336 """Get the last session ID currently in the database.
338 337
339 338 Within IPython, this should be the same as the value stored in
340 339 :attr:`HistoryManager.session_number`.
341 340 """
342 341 for record in self.get_tail(n=1, include_latest=True):
343 342 return record[0]
344 343
345 344 @catch_corrupt_db
346 345 def get_tail(self, n=10, raw=True, output=False, include_latest=False):
347 346 """Get the last n lines from the history database.
348 347
349 348 Parameters
350 349 ----------
351 350 n : int
352 351 The number of lines to get
353 352 raw, output : bool
354 353 See :meth:`get_range`
355 354 include_latest : bool
356 355 If False (default), n+1 lines are fetched, and the latest one
357 356 is discarded. This is intended to be used where the function
358 357 is called by a user command, which it should not return.
359 358
360 359 Returns
361 360 -------
362 361 Tuples as :meth:`get_range`
363 362 """
364 363 self.writeout_cache()
365 364 if not include_latest:
366 365 n += 1
367 366 cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
368 367 (n,), raw=raw, output=output)
369 368 if not include_latest:
370 369 return reversed(list(cur)[1:])
371 370 return reversed(list(cur))
372 371
373 372 @catch_corrupt_db
374 373 def search(self, pattern="*", raw=True, search_raw=True,
375 374 output=False, n=None, unique=False):
376 375 """Search the database using unix glob-style matching (wildcards
377 376 * and ?).
378 377
379 378 Parameters
380 379 ----------
381 380 pattern : str
382 381 The wildcarded pattern to match when searching
383 382 search_raw : bool
384 383 If True, search the raw input, otherwise, the parsed input
385 384 raw, output : bool
386 385 See :meth:`get_range`
387 386 n : None or int
388 387 If an integer is given, it defines the limit of
389 388 returned entries.
390 389 unique : bool
391 390 When it is true, return only unique entries.
392 391
393 392 Returns
394 393 -------
395 394 Tuples as :meth:`get_range`
396 395 """
397 396 tosearch = "source_raw" if search_raw else "source"
398 397 if output:
399 398 tosearch = "history." + tosearch
400 399 self.writeout_cache()
401 400 sqlform = "WHERE %s GLOB ?" % tosearch
402 401 params = (pattern,)
403 402 if unique:
404 403 sqlform += ' GROUP BY {0}'.format(tosearch)
405 404 if n is not None:
406 405 sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
407 406 params += (n,)
408 407 elif unique:
409 408 sqlform += " ORDER BY session, line"
410 409 cur = self._run_sql(sqlform, params, raw=raw, output=output)
411 410 if n is not None:
412 411 return reversed(list(cur))
413 412 return cur
414 413
415 414 @catch_corrupt_db
416 415 def get_range(self, session, start=1, stop=None, raw=True,output=False):
417 416 """Retrieve input by session.
418 417
419 418 Parameters
420 419 ----------
421 420 session : int
422 421 Session number to retrieve.
423 422 start : int
424 423 First line to retrieve.
425 424 stop : int
426 425 End of line range (excluded from output itself). If None, retrieve
427 426 to the end of the session.
428 427 raw : bool
429 428 If True, return untranslated input
430 429 output : bool
431 430 If True, attempt to include output. This will be 'real' Python
432 431 objects for the current session, or text reprs from previous
433 432 sessions if db_log_output was enabled at the time. Where no output
434 433 is found, None is used.
435 434
436 435 Returns
437 436 -------
438 437 entries
439 438 An iterator over the desired lines. Each line is a 3-tuple, either
440 439 (session, line, input) if output is False, or
441 440 (session, line, (input, output)) if output is True.
442 441 """
443 442 if stop:
444 443 lineclause = "line >= ? AND line < ?"
445 444 params = (session, start, stop)
446 445 else:
447 446 lineclause = "line>=?"
448 447 params = (session, start)
449 448
450 449 return self._run_sql("WHERE session==? AND %s" % lineclause,
451 450 params, raw=raw, output=output)
452 451
453 452 def get_range_by_str(self, rangestr, raw=True, output=False):
454 453 """Get lines of history from a string of ranges, as used by magic
455 454 commands %hist, %save, %macro, etc.
456 455
457 456 Parameters
458 457 ----------
459 458 rangestr : str
460 459 A string specifying ranges, e.g. "5 ~2/1-4". See
461 460 :func:`magic_history` for full details.
462 461 raw, output : bool
463 462 As :meth:`get_range`
464 463
465 464 Returns
466 465 -------
467 466 Tuples as :meth:`get_range`
468 467 """
469 468 for sess, s, e in extract_hist_ranges(rangestr):
470 469 for line in self.get_range(sess, s, e, raw=raw, output=output):
471 470 yield line
472 471
473 472
474 473 class HistoryManager(HistoryAccessor):
475 474 """A class to organize all history-related functionality in one place.
476 475 """
477 476 # Public interface
478 477
479 478 # An instance of the IPython shell we are attached to
480 479 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
481 480 allow_none=True)
482 481 # Lists to hold processed and raw history. These start with a blank entry
483 482 # so that we can index them starting from 1
484 483 input_hist_parsed = List([""])
485 484 input_hist_raw = List([""])
486 485 # A list of directories visited during session
487 486 dir_hist = List()
488 487 @default('dir_hist')
489 488 def _dir_hist_default(self):
490 489 try:
491 490 return [py3compat.getcwd()]
492 491 except OSError:
493 492 return []
494 493
495 494 # A dict of output history, keyed with ints from the shell's
496 495 # execution count.
497 496 output_hist = Dict()
498 497 # The text/plain repr of outputs.
499 498 output_hist_reprs = Dict()
500 499
501 500 # The number of the current session in the history database
502 501 session_number = Integer()
503 502
504 503 db_log_output = Bool(False,
505 504 help="Should the history database include output? (default: no)"
506 505 ).tag(config=True)
507 506 db_cache_size = Integer(0,
508 507 help="Write to database every x commands (higher values save disk access & power).\n"
509 508 "Values of 1 or less effectively disable caching."
510 509 ).tag(config=True)
511 510 # The input and output caches
512 511 db_input_cache = List()
513 512 db_output_cache = List()
514 513
515 514 # History saving in separate thread
516 515 save_thread = Instance('IPython.core.history.HistorySavingThread',
517 516 allow_none=True)
518 517 try: # Event is a function returning an instance of _Event...
519 518 save_flag = Instance(threading._Event, allow_none=True)
520 519 except AttributeError: # ...until Python 3.3, when it's a class.
521 520 save_flag = Instance(threading.Event, allow_none=True)
522 521
523 522 # Private interface
524 523 # Variables used to store the three last inputs from the user. On each new
525 524 # history update, we populate the user's namespace with these, shifted as
526 525 # necessary.
527 526 _i00 = Unicode(u'')
528 527 _i = Unicode(u'')
529 528 _ii = Unicode(u'')
530 529 _iii = Unicode(u'')
531 530
532 531 # A regex matching all forms of the exit command, so that we don't store
533 532 # them in the history (it's annoying to rewind the first entry and land on
534 533 # an exit call).
535 534 _exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
536 535
537 536 def __init__(self, shell=None, config=None, **traits):
538 537 """Create a new history manager associated with a shell instance.
539 538 """
540 539 # We need a pointer back to the shell for various tasks.
541 540 super(HistoryManager, self).__init__(shell=shell, config=config,
542 541 **traits)
543 542 self.save_flag = threading.Event()
544 543 self.db_input_cache_lock = threading.Lock()
545 544 self.db_output_cache_lock = threading.Lock()
546 545
547 546 try:
548 547 self.new_session()
549 548 except OperationalError:
550 549 self.log.error("Failed to create history session in %s. History will not be saved.",
551 550 self.hist_file, exc_info=True)
552 551 self.hist_file = ':memory:'
553 552
554 553 if self.enabled and self.hist_file != ':memory:':
555 554 self.save_thread = HistorySavingThread(self)
556 555 self.save_thread.start()
557 556
558 557 def _get_hist_file_name(self, profile=None):
559 558 """Get default history file name based on the Shell's profile.
560 559
561 560 The profile parameter is ignored, but must exist for compatibility with
562 561 the parent class."""
563 562 profile_dir = self.shell.profile_dir.location
564 563 return os.path.join(profile_dir, 'history.sqlite')
565 564
566 565 @needs_sqlite
567 566 def new_session(self, conn=None):
568 567 """Get a new session number."""
569 568 if conn is None:
570 569 conn = self.db
571 570
572 571 with conn:
573 572 cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
574 573 NULL, "") """, (datetime.datetime.now(),))
575 574 self.session_number = cur.lastrowid
576 575
577 576 def end_session(self):
578 577 """Close the database session, filling in the end time and line count."""
579 578 self.writeout_cache()
580 579 with self.db:
581 580 self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
582 581 session==?""", (datetime.datetime.now(),
583 582 len(self.input_hist_parsed)-1, self.session_number))
584 583 self.session_number = 0
585 584
586 585 def name_session(self, name):
587 586 """Give the current session a name in the history database."""
588 587 with self.db:
589 588 self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
590 589 (name, self.session_number))
591 590
592 591 def reset(self, new_session=True):
593 592 """Clear the session history, releasing all object references, and
594 593 optionally open a new session."""
595 594 self.output_hist.clear()
596 595 # The directory history can't be completely empty
597 596 self.dir_hist[:] = [py3compat.getcwd()]
598 597
599 598 if new_session:
600 599 if self.session_number:
601 600 self.end_session()
602 601 self.input_hist_parsed[:] = [""]
603 602 self.input_hist_raw[:] = [""]
604 603 self.new_session()
605 604
606 605 # ------------------------------
607 606 # Methods for retrieving history
608 607 # ------------------------------
609 608 def get_session_info(self, session=0):
610 609 """Get info about a session.
611 610
612 611 Parameters
613 612 ----------
614 613
615 614 session : int
616 615 Session number to retrieve. The current session is 0, and negative
617 616 numbers count back from current session, so -1 is the previous session.
618 617
619 618 Returns
620 619 -------
621 620
622 621 session_id : int
623 622 Session ID number
624 623 start : datetime
625 624 Timestamp for the start of the session.
626 625 end : datetime
627 626 Timestamp for the end of the session, or None if IPython crashed.
628 627 num_cmds : int
629 628 Number of commands run, or None if IPython crashed.
630 629 remark : unicode
631 630 A manually set description.
632 631 """
633 632 if session <= 0:
634 633 session += self.session_number
635 634
636 635 return super(HistoryManager, self).get_session_info(session=session)
637 636
638 637 def _get_range_session(self, start=1, stop=None, raw=True, output=False):
639 638 """Get input and output history from the current session. Called by
640 639 get_range, and takes similar parameters."""
641 640 input_hist = self.input_hist_raw if raw else self.input_hist_parsed
642 641
643 642 n = len(input_hist)
644 643 if start < 0:
645 644 start += n
646 645 if not stop or (stop > n):
647 646 stop = n
648 647 elif stop < 0:
649 648 stop += n
650 649
651 650 for i in range(start, stop):
652 651 if output:
653 652 line = (input_hist[i], self.output_hist_reprs.get(i))
654 653 else:
655 654 line = input_hist[i]
656 655 yield (0, i, line)
657 656
658 657 def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
659 658 """Retrieve input by session.
660 659
661 660 Parameters
662 661 ----------
663 662 session : int
664 663 Session number to retrieve. The current session is 0, and negative
665 664 numbers count back from current session, so -1 is previous session.
666 665 start : int
667 666 First line to retrieve.
668 667 stop : int
669 668 End of line range (excluded from output itself). If None, retrieve
670 669 to the end of the session.
671 670 raw : bool
672 671 If True, return untranslated input
673 672 output : bool
674 673 If True, attempt to include output. This will be 'real' Python
675 674 objects for the current session, or text reprs from previous
676 675 sessions if db_log_output was enabled at the time. Where no output
677 676 is found, None is used.
678 677
679 678 Returns
680 679 -------
681 680 entries
682 681 An iterator over the desired lines. Each line is a 3-tuple, either
683 682 (session, line, input) if output is False, or
684 683 (session, line, (input, output)) if output is True.
685 684 """
686 685 if session <= 0:
687 686 session += self.session_number
688 687 if session==self.session_number: # Current session
689 688 return self._get_range_session(start, stop, raw, output)
690 689 return super(HistoryManager, self).get_range(session, start, stop, raw,
691 690 output)
692 691
693 692 ## ----------------------------
694 693 ## Methods for storing history:
695 694 ## ----------------------------
696 695 def store_inputs(self, line_num, source, source_raw=None):
697 696 """Store source and raw input in history and create input cache
698 697 variables ``_i*``.
699 698
700 699 Parameters
701 700 ----------
702 701 line_num : int
703 702 The prompt number of this input.
704 703
705 704 source : str
706 705 Python input.
707 706
708 707 source_raw : str, optional
709 708 If given, this is the raw input without any IPython transformations
710 709 applied to it. If not given, ``source`` is used.
711 710 """
712 711 if source_raw is None:
713 712 source_raw = source
714 713 source = source.rstrip('\n')
715 714 source_raw = source_raw.rstrip('\n')
716 715
717 716 # do not store exit/quit commands
718 717 if self._exit_re.match(source_raw.strip()):
719 718 return
720 719
721 720 self.input_hist_parsed.append(source)
722 721 self.input_hist_raw.append(source_raw)
723 722
724 723 with self.db_input_cache_lock:
725 724 self.db_input_cache.append((line_num, source, source_raw))
726 725 # Trigger to flush cache and write to DB.
727 726 if len(self.db_input_cache) >= self.db_cache_size:
728 727 self.save_flag.set()
729 728
730 729 # update the auto _i variables
731 730 self._iii = self._ii
732 731 self._ii = self._i
733 732 self._i = self._i00
734 733 self._i00 = source_raw
735 734
736 735 # hackish access to user namespace to create _i1,_i2... dynamically
737 736 new_i = '_i%s' % line_num
738 737 to_main = {'_i': self._i,
739 738 '_ii': self._ii,
740 739 '_iii': self._iii,
741 740 new_i : self._i00 }
742 741
743 742 if self.shell is not None:
744 743 self.shell.push(to_main, interactive=False)
745 744
746 745 def store_output(self, line_num):
747 746 """If database output logging is enabled, this saves all the
748 747 outputs from the indicated prompt number to the database. It's
749 748 called by run_cell after code has been executed.
750 749
751 750 Parameters
752 751 ----------
753 752 line_num : int
754 753 The line number from which to save outputs
755 754 """
756 755 if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
757 756 return
758 757 output = self.output_hist_reprs[line_num]
759 758
760 759 with self.db_output_cache_lock:
761 760 self.db_output_cache.append((line_num, output))
762 761 if self.db_cache_size <= 1:
763 762 self.save_flag.set()
764 763
765 764 def _writeout_input_cache(self, conn):
766 765 with conn:
767 766 for line in self.db_input_cache:
768 767 conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
769 768 (self.session_number,)+line)
770 769
771 770 def _writeout_output_cache(self, conn):
772 771 with conn:
773 772 for line in self.db_output_cache:
774 773 conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
775 774 (self.session_number,)+line)
776 775
777 776 @needs_sqlite
778 777 def writeout_cache(self, conn=None):
779 778 """Write any entries in the cache to the database."""
780 779 if conn is None:
781 780 conn = self.db
782 781
783 782 with self.db_input_cache_lock:
784 783 try:
785 784 self._writeout_input_cache(conn)
786 785 except sqlite3.IntegrityError:
787 786 self.new_session(conn)
788 787 print("ERROR! Session/line number was not unique in",
789 788 "database. History logging moved to new session",
790 789 self.session_number)
791 790 try:
792 791 # Try writing to the new session. If this fails, don't
793 792 # recurse
794 793 self._writeout_input_cache(conn)
795 794 except sqlite3.IntegrityError:
796 795 pass
797 796 finally:
798 797 self.db_input_cache = []
799 798
800 799 with self.db_output_cache_lock:
801 800 try:
802 801 self._writeout_output_cache(conn)
803 802 except sqlite3.IntegrityError:
804 803 print("!! Session/line number for output was not unique",
805 804 "in database. Output will not be stored.")
806 805 finally:
807 806 self.db_output_cache = []
808 807
809 808
810 809 class HistorySavingThread(threading.Thread):
811 810 """This thread takes care of writing history to the database, so that
812 811 the UI isn't held up while that happens.
813 812
814 813 It waits for the HistoryManager's save_flag to be set, then writes out
815 814 the history cache. The main thread is responsible for setting the flag when
816 815 the cache size reaches a defined threshold."""
817 816 daemon = True
818 817 stop_now = False
819 818 enabled = True
820 819 def __init__(self, history_manager):
821 820 super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
822 821 self.history_manager = history_manager
823 822 self.enabled = history_manager.enabled
824 823 atexit.register(self.stop)
825 824
826 825 @needs_sqlite
827 826 def run(self):
828 827 # We need a separate db connection per thread:
829 828 try:
830 829 self.db = sqlite3.connect(self.history_manager.hist_file,
831 830 **self.history_manager.connection_options
832 831 )
833 832 while True:
834 833 self.history_manager.save_flag.wait()
835 834 if self.stop_now:
836 835 self.db.close()
837 836 return
838 837 self.history_manager.save_flag.clear()
839 838 self.history_manager.writeout_cache(self.db)
840 839 except Exception as e:
841 840 print(("The history saving thread hit an unexpected error (%s)."
842 841 "History will not be written to the database.") % repr(e))
843 842
844 843 def stop(self):
845 844 """This can be called from the main thread to safely stop this thread.
846 845
847 846 Note that it does not attempt to write out remaining history before
848 847 exiting. That should be done by calling the HistoryManager's
849 848 end_session method."""
850 849 self.stop_now = True
851 850 self.history_manager.save_flag.set()
852 851 self.join()
853 852
854 853
855 854 # To match, e.g. ~5/8-~2/3
856 855 range_re = re.compile(r"""
857 856 ((?P<startsess>~?\d+)/)?
858 857 (?P<start>\d+)?
859 858 ((?P<sep>[\-:])
860 859 ((?P<endsess>~?\d+)/)?
861 860 (?P<end>\d+))?
862 861 $""", re.VERBOSE)
863 862
864 863
865 864 def extract_hist_ranges(ranges_str):
866 865 """Turn a string of history ranges into 3-tuples of (session, start, stop).
867 866
868 867 Examples
869 868 --------
870 869 >>> list(extract_hist_ranges("~8/5-~7/4 2"))
871 870 [(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
872 871 """
873 872 for range_str in ranges_str.split():
874 873 rmatch = range_re.match(range_str)
875 874 if not rmatch:
876 875 continue
877 876 start = rmatch.group("start")
878 877 if start:
879 878 start = int(start)
880 879 end = rmatch.group("end")
881 880 # If no end specified, get (a, a + 1)
882 881 end = int(end) if end else start + 1
883 882 else: # start not specified
884 883 if not rmatch.group('startsess'): # no startsess
885 884 continue
886 885 start = 1
887 886 end = None # provide the entire session hist
888 887
889 888 if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
890 889 end += 1
891 890 startsess = rmatch.group("startsess") or "0"
892 891 endsess = rmatch.group("endsess") or startsess
893 892 startsess = int(startsess.replace("~","-"))
894 893 endsess = int(endsess.replace("~","-"))
895 894 assert endsess >= startsess, "start session must be earlier than end session"
896 895
897 896 if endsess == startsess:
898 897 yield (startsess, start, end)
899 898 continue
900 899 # Multiple sessions in one range:
901 900 yield (startsess, start, None)
902 901 for sess in range(startsess+1, endsess):
903 902 yield (sess, 1, None)
904 903 yield (endsess, 1, end)
905 904
906 905
907 906 def _format_lineno(session, line):
908 907 """Helper function to format line numbers properly."""
909 908 if session == 0:
910 909 return str(line)
911 910 return "%s#%s" % (session, line)
@@ -1,162 +1,161 b''
1 1 # encoding: utf-8
2 2 """
3 3 An application for managing IPython history.
4 4
5 5 To be invoked as the `ipython history` subcommand.
6 6 """
7 from __future__ import print_function
8 7
9 8 import os
10 9 import sqlite3
11 10
12 11 from traitlets.config.application import Application
13 12 from IPython.core.application import BaseIPythonApplication
14 13 from traitlets import Bool, Int, Dict
15 14 from IPython.utils.io import ask_yes_no
16 15
17 16 trim_hist_help = """Trim the IPython history database to the last 1000 entries.
18 17
19 18 This actually copies the last 1000 entries to a new database, and then replaces
20 19 the old file with the new. Use the `--keep=` argument to specify a number
21 20 other than 1000.
22 21 """
23 22
24 23 clear_hist_help = """Clear the IPython history database, deleting all entries.
25 24
26 25 Because this is a destructive operation, IPython will prompt the user if they
27 26 really want to do this. Passing a `-f` flag will force clearing without a
28 27 prompt.
29 28
30 29 This is an handy alias to `ipython history trim --keep=0`
31 30 """
32 31
33 32
34 33 class HistoryTrim(BaseIPythonApplication):
35 34 description = trim_hist_help
36 35
37 36 backup = Bool(False,
38 37 help="Keep the old history file as history.sqlite.<N>"
39 38 ).tag(config=True)
40 39
41 40 keep = Int(1000,
42 41 help="Number of recent lines to keep in the database."
43 42 ).tag(config=True)
44 43
45 44 flags = Dict(dict(
46 45 backup = ({'HistoryTrim' : {'backup' : True}},
47 46 backup.help
48 47 )
49 48 ))
50 49
51 50 aliases=Dict(dict(
52 51 keep = 'HistoryTrim.keep'
53 52 ))
54 53
55 54 def start(self):
56 55 profile_dir = self.profile_dir.location
57 56 hist_file = os.path.join(profile_dir, 'history.sqlite')
58 57 con = sqlite3.connect(hist_file)
59 58
60 59 # Grab the recent history from the current database.
61 60 inputs = list(con.execute('SELECT session, line, source, source_raw FROM '
62 61 'history ORDER BY session DESC, line DESC LIMIT ?', (self.keep+1,)))
63 62 if len(inputs) <= self.keep:
64 63 print("There are already at most %d entries in the history database." % self.keep)
65 64 print("Not doing anything. Use --keep= argument to keep fewer entries")
66 65 return
67 66
68 67 print("Trimming history to the most recent %d entries." % self.keep)
69 68
70 69 inputs.pop() # Remove the extra element we got to check the length.
71 70 inputs.reverse()
72 71 if inputs:
73 72 first_session = inputs[0][0]
74 73 outputs = list(con.execute('SELECT session, line, output FROM '
75 74 'output_history WHERE session >= ?', (first_session,)))
76 75 sessions = list(con.execute('SELECT session, start, end, num_cmds, remark FROM '
77 76 'sessions WHERE session >= ?', (first_session,)))
78 77 con.close()
79 78
80 79 # Create the new history database.
81 80 new_hist_file = os.path.join(profile_dir, 'history.sqlite.new')
82 81 i = 0
83 82 while os.path.exists(new_hist_file):
84 83 # Make sure we don't interfere with an existing file.
85 84 i += 1
86 85 new_hist_file = os.path.join(profile_dir, 'history.sqlite.new'+str(i))
87 86 new_db = sqlite3.connect(new_hist_file)
88 87 new_db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
89 88 primary key autoincrement, start timestamp,
90 89 end timestamp, num_cmds integer, remark text)""")
91 90 new_db.execute("""CREATE TABLE IF NOT EXISTS history
92 91 (session integer, line integer, source text, source_raw text,
93 92 PRIMARY KEY (session, line))""")
94 93 new_db.execute("""CREATE TABLE IF NOT EXISTS output_history
95 94 (session integer, line integer, output text,
96 95 PRIMARY KEY (session, line))""")
97 96 new_db.commit()
98 97
99 98
100 99 if inputs:
101 100 with new_db:
102 101 # Add the recent history into the new database.
103 102 new_db.executemany('insert into sessions values (?,?,?,?,?)', sessions)
104 103 new_db.executemany('insert into history values (?,?,?,?)', inputs)
105 104 new_db.executemany('insert into output_history values (?,?,?)', outputs)
106 105 new_db.close()
107 106
108 107 if self.backup:
109 108 i = 1
110 109 backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
111 110 while os.path.exists(backup_hist_file):
112 111 i += 1
113 112 backup_hist_file = os.path.join(profile_dir, 'history.sqlite.old.%d' % i)
114 113 os.rename(hist_file, backup_hist_file)
115 114 print("Backed up longer history file to", backup_hist_file)
116 115 else:
117 116 os.remove(hist_file)
118 117
119 118 os.rename(new_hist_file, hist_file)
120 119
121 120 class HistoryClear(HistoryTrim):
122 121 description = clear_hist_help
123 122 keep = Int(0,
124 123 help="Number of recent lines to keep in the database.")
125 124
126 125 force = Bool(False,
127 126 help="Don't prompt user for confirmation"
128 127 ).tag(config=True)
129 128
130 129 flags = Dict(dict(
131 130 force = ({'HistoryClear' : {'force' : True}},
132 131 force.help),
133 132 f = ({'HistoryTrim' : {'force' : True}},
134 133 force.help
135 134 )
136 135 ))
137 136 aliases = Dict()
138 137
139 138 def start(self):
140 139 if self.force or ask_yes_no("Really delete all ipython history? ",
141 140 default="no", interrupt="no"):
142 141 HistoryTrim.start(self)
143 142
144 143 class HistoryApp(Application):
145 144 name = u'ipython-history'
146 145 description = "Manage the IPython history database."
147 146
148 147 subcommands = Dict(dict(
149 148 trim = (HistoryTrim, HistoryTrim.description.splitlines()[0]),
150 149 clear = (HistoryClear, HistoryClear.description.splitlines()[0]),
151 150 ))
152 151
153 152 def start(self):
154 153 if self.subapp is None:
155 154 print("No subcommand specified. Must specify one of: %s" % \
156 155 (self.subcommands.keys()))
157 156 print()
158 157 self.print_description()
159 158 self.print_subcommands()
160 159 self.exit(1)
161 160 else:
162 161 return self.subapp.start()
@@ -1,3230 +1,3225 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Main IPython class."""
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de>
6 6 # Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
7 7 # Copyright (C) 2008-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 from __future__ import absolute_import, print_function
14 13
15 14 import __future__
16 15 import abc
17 16 import ast
18 17 import atexit
19 18 import functools
20 19 import os
21 20 import re
22 21 import runpy
23 22 import sys
24 23 import tempfile
25 24 import traceback
26 25 import types
27 26 import subprocess
28 27 import warnings
29 28 from io import open as io_open
30 29
31 30 from pickleshare import PickleShareDB
32 31
33 32 from traitlets.config.configurable import SingletonConfigurable
34 33 from IPython.core import oinspect
35 34 from IPython.core import magic
36 35 from IPython.core import page
37 36 from IPython.core import prefilter
38 37 from IPython.core import shadowns
39 38 from IPython.core import ultratb
40 39 from IPython.core.alias import Alias, AliasManager
41 40 from IPython.core.autocall import ExitAutocall
42 41 from IPython.core.builtin_trap import BuiltinTrap
43 42 from IPython.core.events import EventManager, available_events
44 43 from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
45 44 from IPython.core.debugger import Pdb
46 45 from IPython.core.display_trap import DisplayTrap
47 46 from IPython.core.displayhook import DisplayHook
48 47 from IPython.core.displaypub import DisplayPublisher
49 48 from IPython.core.error import InputRejected, UsageError
50 49 from IPython.core.extensions import ExtensionManager
51 50 from IPython.core.formatters import DisplayFormatter
52 51 from IPython.core.history import HistoryManager
53 52 from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
54 53 from IPython.core.logger import Logger
55 54 from IPython.core.macro import Macro
56 55 from IPython.core.payload import PayloadManager
57 56 from IPython.core.prefilter import PrefilterManager
58 57 from IPython.core.profiledir import ProfileDir
59 58 from IPython.core.usage import default_banner
60 from IPython.testing.skipdoctest import skip_doctest_py2, skip_doctest
59 from IPython.testing.skipdoctest import skip_doctest
61 60 from IPython.utils import PyColorize
62 61 from IPython.utils import io
63 62 from IPython.utils import py3compat
64 63 from IPython.utils import openpy
65 64 from IPython.utils.decorators import undoc
66 65 from IPython.utils.io import ask_yes_no
67 66 from IPython.utils.ipstruct import Struct
68 67 from IPython.paths import get_ipython_dir
69 68 from IPython.utils.path import get_home_dir, get_py_filename, ensure_dir_exists
70 69 from IPython.utils.process import system, getoutput
71 70 from IPython.utils.py3compat import (builtin_mod, unicode_type, string_types,
72 71 with_metaclass, iteritems)
73 72 from IPython.utils.strdispatch import StrDispatch
74 73 from IPython.utils.syspathcontext import prepended_to_syspath
75 74 from IPython.utils.text import format_screen, LSString, SList, DollarFormatter
76 75 from IPython.utils.tempdir import TemporaryDirectory
77 76 from traitlets import (
78 77 Integer, Bool, CaselessStrEnum, Enum, List, Dict, Unicode, Instance, Type,
79 78 observe, default,
80 79 )
81 80 from warnings import warn
82 81 from logging import error
83 82 import IPython.core.hooks
84 83
85 84 # NoOpContext is deprecated, but ipykernel imports it from here.
86 85 # See https://github.com/ipython/ipykernel/issues/157
87 86 from IPython.utils.contexts import NoOpContext
88 87
89 88 try:
90 89 import docrepr.sphinxify as sphx
91 90
92 91 def sphinxify(doc):
93 92 with TemporaryDirectory() as dirname:
94 93 return {
95 94 'text/html': sphx.sphinxify(doc, dirname),
96 95 'text/plain': doc
97 96 }
98 97 except ImportError:
99 98 sphinxify = None
100 99
101 100
102 101 class ProvisionalWarning(DeprecationWarning):
103 102 """
104 103 Warning class for unstable features
105 104 """
106 105 pass
107 106
108 107 #-----------------------------------------------------------------------------
109 108 # Globals
110 109 #-----------------------------------------------------------------------------
111 110
112 111 # compiled regexps for autoindent management
113 112 dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
114 113
115 114 #-----------------------------------------------------------------------------
116 115 # Utilities
117 116 #-----------------------------------------------------------------------------
118 117
119 118 @undoc
120 119 def softspace(file, newvalue):
121 120 """Copied from code.py, to remove the dependency"""
122 121
123 122 oldvalue = 0
124 123 try:
125 124 oldvalue = file.softspace
126 125 except AttributeError:
127 126 pass
128 127 try:
129 128 file.softspace = newvalue
130 129 except (AttributeError, TypeError):
131 130 # "attribute-less object" or "read-only attributes"
132 131 pass
133 132 return oldvalue
134 133
135 134 @undoc
136 135 def no_op(*a, **kw): pass
137 136
138 137
139 138 class SpaceInInput(Exception): pass
140 139
141 140
142 141 def get_default_colors():
143 142 "DEPRECATED"
144 143 warn('get_default_color is Deprecated, and is `Neutral` on all platforms.',
145 144 DeprecationWarning, stacklevel=2)
146 145 return 'Neutral'
147 146
148 147
149 148 class SeparateUnicode(Unicode):
150 149 r"""A Unicode subclass to validate separate_in, separate_out, etc.
151 150
152 151 This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
153 152 """
154 153
155 154 def validate(self, obj, value):
156 155 if value == '0': value = ''
157 156 value = value.replace('\\n','\n')
158 157 return super(SeparateUnicode, self).validate(obj, value)
159 158
160 159
161 160 @undoc
162 161 class DummyMod(object):
163 162 """A dummy module used for IPython's interactive module when
164 163 a namespace must be assigned to the module's __dict__."""
165 164 pass
166 165
167 166
168 167 class ExecutionResult(object):
169 168 """The result of a call to :meth:`InteractiveShell.run_cell`
170 169
171 170 Stores information about what took place.
172 171 """
173 172 execution_count = None
174 173 error_before_exec = None
175 174 error_in_exec = None
176 175 result = None
177 176
178 177 @property
179 178 def success(self):
180 179 return (self.error_before_exec is None) and (self.error_in_exec is None)
181 180
182 181 def raise_error(self):
183 182 """Reraises error if `success` is `False`, otherwise does nothing"""
184 183 if self.error_before_exec is not None:
185 184 raise self.error_before_exec
186 185 if self.error_in_exec is not None:
187 186 raise self.error_in_exec
188 187
189 188 def __repr__(self):
190 if sys.version_info > (3,):
191 name = self.__class__.__qualname__
192 else:
193 name = self.__class__.__name__
189 name = self.__class__.__qualname__
194 190 return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s result=%s>' %\
195 191 (name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.result))
196 192
197 193
198 194 class InteractiveShell(SingletonConfigurable):
199 195 """An enhanced, interactive shell for Python."""
200 196
201 197 _instance = None
202 198
203 199 ast_transformers = List([], help=
204 200 """
205 201 A list of ast.NodeTransformer subclass instances, which will be applied
206 202 to user input before code is run.
207 203 """
208 204 ).tag(config=True)
209 205
210 206 autocall = Enum((0,1,2), default_value=0, help=
211 207 """
212 208 Make IPython automatically call any callable object even if you didn't
213 209 type explicit parentheses. For example, 'str 43' becomes 'str(43)'
214 210 automatically. The value can be '0' to disable the feature, '1' for
215 211 'smart' autocall, where it is not applied if there are no more
216 212 arguments on the line, and '2' for 'full' autocall, where all callable
217 213 objects are automatically called (even if no arguments are present).
218 214 """
219 215 ).tag(config=True)
220 216 # TODO: remove all autoindent logic and put into frontends.
221 217 # We can't do this yet because even runlines uses the autoindent.
222 218 autoindent = Bool(True, help=
223 219 """
224 220 Autoindent IPython code entered interactively.
225 221 """
226 222 ).tag(config=True)
227 223
228 224 automagic = Bool(True, help=
229 225 """
230 226 Enable magic commands to be called without the leading %.
231 227 """
232 228 ).tag(config=True)
233 229
234 230 banner1 = Unicode(default_banner,
235 231 help="""The part of the banner to be printed before the profile"""
236 232 ).tag(config=True)
237 233 banner2 = Unicode('',
238 234 help="""The part of the banner to be printed after the profile"""
239 235 ).tag(config=True)
240 236
241 237 cache_size = Integer(1000, help=
242 238 """
243 239 Set the size of the output cache. The default is 1000, you can
244 240 change it permanently in your config file. Setting it to 0 completely
245 241 disables the caching system, and the minimum value accepted is 20 (if
246 242 you provide a value less than 20, it is reset to 0 and a warning is
247 243 issued). This limit is defined because otherwise you'll spend more
248 244 time re-flushing a too small cache than working
249 245 """
250 246 ).tag(config=True)
251 247 color_info = Bool(True, help=
252 248 """
253 249 Use colors for displaying information about objects. Because this
254 250 information is passed through a pager (like 'less'), and some pagers
255 251 get confused with color codes, this capability can be turned off.
256 252 """
257 253 ).tag(config=True)
258 254 colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
259 255 default_value='Neutral',
260 256 help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
261 257 ).tag(config=True)
262 258 debug = Bool(False).tag(config=True)
263 259 disable_failing_post_execute = Bool(False,
264 260 help="Don't call post-execute functions that have failed in the past."
265 261 ).tag(config=True)
266 262 display_formatter = Instance(DisplayFormatter, allow_none=True)
267 263 displayhook_class = Type(DisplayHook)
268 264 display_pub_class = Type(DisplayPublisher)
269 265
270 266 sphinxify_docstring = Bool(False, help=
271 267 """
272 268 Enables rich html representation of docstrings. (This requires the
273 269 docrepr module).
274 270 """).tag(config=True)
275 271
276 272 @observe("sphinxify_docstring")
277 273 def _sphinxify_docstring_changed(self, change):
278 274 if change['new']:
279 275 warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
280 276
281 277 enable_html_pager = Bool(False, help=
282 278 """
283 279 (Provisional API) enables html representation in mime bundles sent
284 280 to pagers.
285 281 """).tag(config=True)
286 282
287 283 @observe("enable_html_pager")
288 284 def _enable_html_pager_changed(self, change):
289 285 if change['new']:
290 286 warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
291 287
292 288 data_pub_class = None
293 289
294 290 exit_now = Bool(False)
295 291 exiter = Instance(ExitAutocall)
296 292 @default('exiter')
297 293 def _exiter_default(self):
298 294 return ExitAutocall(self)
299 295 # Monotonically increasing execution counter
300 296 execution_count = Integer(1)
301 297 filename = Unicode("<ipython console>")
302 298 ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
303 299
304 300 # Input splitter, to transform input line by line and detect when a block
305 301 # is ready to be executed.
306 302 input_splitter = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
307 303 (), {'line_input_checker': True})
308 304
309 305 # This InputSplitter instance is used to transform completed cells before
310 306 # running them. It allows cell magics to contain blank lines.
311 307 input_transformer_manager = Instance('IPython.core.inputsplitter.IPythonInputSplitter',
312 308 (), {'line_input_checker': False})
313 309
314 310 logstart = Bool(False, help=
315 311 """
316 312 Start logging to the default log file in overwrite mode.
317 313 Use `logappend` to specify a log file to **append** logs to.
318 314 """
319 315 ).tag(config=True)
320 316 logfile = Unicode('', help=
321 317 """
322 318 The name of the logfile to use.
323 319 """
324 320 ).tag(config=True)
325 321 logappend = Unicode('', help=
326 322 """
327 323 Start logging to the given file in append mode.
328 324 Use `logfile` to specify a log file to **overwrite** logs to.
329 325 """
330 326 ).tag(config=True)
331 327 object_info_string_level = Enum((0,1,2), default_value=0,
332 328 ).tag(config=True)
333 329 pdb = Bool(False, help=
334 330 """
335 331 Automatically call the pdb debugger after every exception.
336 332 """
337 333 ).tag(config=True)
338 334 display_page = Bool(False,
339 335 help="""If True, anything that would be passed to the pager
340 336 will be displayed as regular output instead."""
341 337 ).tag(config=True)
342 338
343 339 # deprecated prompt traits:
344 340
345 341 prompt_in1 = Unicode('In [\\#]: ',
346 342 help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
347 343 ).tag(config=True)
348 344 prompt_in2 = Unicode(' .\\D.: ',
349 345 help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
350 346 ).tag(config=True)
351 347 prompt_out = Unicode('Out[\\#]: ',
352 348 help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
353 349 ).tag(config=True)
354 350 prompts_pad_left = Bool(True,
355 351 help="Deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly."
356 352 ).tag(config=True)
357 353
358 354 @observe('prompt_in1', 'prompt_in2', 'prompt_out', 'prompt_pad_left')
359 355 def _prompt_trait_changed(self, change):
360 356 name = change['name']
361 357 warn("InteractiveShell.{name} is deprecated since IPython 4.0 and ignored since 5.0, set TerminalInteractiveShell.prompts object directly.".format(
362 358 name=name)
363 359 )
364 360 # protect against weird cases where self.config may not exist:
365 361
366 362 show_rewritten_input = Bool(True,
367 363 help="Show rewritten input, e.g. for autocall."
368 364 ).tag(config=True)
369 365
370 366 quiet = Bool(False).tag(config=True)
371 367
372 368 history_length = Integer(10000,
373 369 help='Total length of command history'
374 370 ).tag(config=True)
375 371
376 372 history_load_length = Integer(1000, help=
377 373 """
378 374 The number of saved history entries to be loaded
379 375 into the history buffer at startup.
380 376 """
381 377 ).tag(config=True)
382 378
383 379 ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none'],
384 380 default_value='last_expr',
385 381 help="""
386 382 'all', 'last', 'last_expr' or 'none', specifying which nodes should be
387 383 run interactively (displaying output from expressions)."""
388 384 ).tag(config=True)
389 385
390 386 # TODO: this part of prompt management should be moved to the frontends.
391 387 # Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
392 388 separate_in = SeparateUnicode('\n').tag(config=True)
393 389 separate_out = SeparateUnicode('').tag(config=True)
394 390 separate_out2 = SeparateUnicode('').tag(config=True)
395 391 wildcards_case_sensitive = Bool(True).tag(config=True)
396 392 xmode = CaselessStrEnum(('Context','Plain', 'Verbose'),
397 393 default_value='Context').tag(config=True)
398 394
399 395 # Subcomponents of InteractiveShell
400 396 alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
401 397 prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
402 398 builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
403 399 display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
404 400 extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
405 401 payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
406 402 history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
407 403 magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
408 404
409 405 profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
410 406 @property
411 407 def profile(self):
412 408 if self.profile_dir is not None:
413 409 name = os.path.basename(self.profile_dir.location)
414 410 return name.replace('profile_','')
415 411
416 412
417 413 # Private interface
418 414 _post_execute = Dict()
419 415
420 416 # Tracks any GUI loop loaded for pylab
421 417 pylab_gui_select = None
422 418
423 419 last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
424 420
425 421 def __init__(self, ipython_dir=None, profile_dir=None,
426 422 user_module=None, user_ns=None,
427 423 custom_exceptions=((), None), **kwargs):
428 424
429 425 # This is where traits with a config_key argument are updated
430 426 # from the values on config.
431 427 super(InteractiveShell, self).__init__(**kwargs)
432 428 if 'PromptManager' in self.config:
433 429 warn('As of IPython 5.0 `PromptManager` config will have no effect'
434 430 ' and has been replaced by TerminalInteractiveShell.prompts_class')
435 431 self.configurables = [self]
436 432
437 433 # These are relatively independent and stateless
438 434 self.init_ipython_dir(ipython_dir)
439 435 self.init_profile_dir(profile_dir)
440 436 self.init_instance_attrs()
441 437 self.init_environment()
442 438
443 439 # Check if we're in a virtualenv, and set up sys.path.
444 440 self.init_virtualenv()
445 441
446 442 # Create namespaces (user_ns, user_global_ns, etc.)
447 443 self.init_create_namespaces(user_module, user_ns)
448 444 # This has to be done after init_create_namespaces because it uses
449 445 # something in self.user_ns, but before init_sys_modules, which
450 446 # is the first thing to modify sys.
451 447 # TODO: When we override sys.stdout and sys.stderr before this class
452 448 # is created, we are saving the overridden ones here. Not sure if this
453 449 # is what we want to do.
454 450 self.save_sys_module_state()
455 451 self.init_sys_modules()
456 452
457 453 # While we're trying to have each part of the code directly access what
458 454 # it needs without keeping redundant references to objects, we have too
459 455 # much legacy code that expects ip.db to exist.
460 456 self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
461 457
462 458 self.init_history()
463 459 self.init_encoding()
464 460 self.init_prefilter()
465 461
466 462 self.init_syntax_highlighting()
467 463 self.init_hooks()
468 464 self.init_events()
469 465 self.init_pushd_popd_magic()
470 466 self.init_user_ns()
471 467 self.init_logger()
472 468 self.init_builtins()
473 469
474 470 # The following was in post_config_initialization
475 471 self.init_inspector()
476 472 self.raw_input_original = input
477 473 self.init_completer()
478 474 # TODO: init_io() needs to happen before init_traceback handlers
479 475 # because the traceback handlers hardcode the stdout/stderr streams.
480 476 # This logic in in debugger.Pdb and should eventually be changed.
481 477 self.init_io()
482 478 self.init_traceback_handlers(custom_exceptions)
483 479 self.init_prompts()
484 480 self.init_display_formatter()
485 481 self.init_display_pub()
486 482 self.init_data_pub()
487 483 self.init_displayhook()
488 484 self.init_magics()
489 485 self.init_alias()
490 486 self.init_logstart()
491 487 self.init_pdb()
492 488 self.init_extension_manager()
493 489 self.init_payload()
494 490 self.init_deprecation_warnings()
495 491 self.hooks.late_startup_hook()
496 492 self.events.trigger('shell_initialized', self)
497 493 atexit.register(self.atexit_operations)
498 494
499 495 def get_ipython(self):
500 496 """Return the currently running IPython instance."""
501 497 return self
502 498
503 499 #-------------------------------------------------------------------------
504 500 # Trait changed handlers
505 501 #-------------------------------------------------------------------------
506 502 @observe('ipython_dir')
507 503 def _ipython_dir_changed(self, change):
508 504 ensure_dir_exists(change['new'])
509 505
510 506 def set_autoindent(self,value=None):
511 507 """Set the autoindent flag.
512 508
513 509 If called with no arguments, it acts as a toggle."""
514 510 if value is None:
515 511 self.autoindent = not self.autoindent
516 512 else:
517 513 self.autoindent = value
518 514
519 515 #-------------------------------------------------------------------------
520 516 # init_* methods called by __init__
521 517 #-------------------------------------------------------------------------
522 518
523 519 def init_ipython_dir(self, ipython_dir):
524 520 if ipython_dir is not None:
525 521 self.ipython_dir = ipython_dir
526 522 return
527 523
528 524 self.ipython_dir = get_ipython_dir()
529 525
530 526 def init_profile_dir(self, profile_dir):
531 527 if profile_dir is not None:
532 528 self.profile_dir = profile_dir
533 529 return
534 530 self.profile_dir =\
535 531 ProfileDir.create_profile_dir_by_name(self.ipython_dir, 'default')
536 532
537 533 def init_instance_attrs(self):
538 534 self.more = False
539 535
540 536 # command compiler
541 537 self.compile = CachingCompiler()
542 538
543 539 # Make an empty namespace, which extension writers can rely on both
544 540 # existing and NEVER being used by ipython itself. This gives them a
545 541 # convenient location for storing additional information and state
546 542 # their extensions may require, without fear of collisions with other
547 543 # ipython names that may develop later.
548 544 self.meta = Struct()
549 545
550 546 # Temporary files used for various purposes. Deleted at exit.
551 547 self.tempfiles = []
552 548 self.tempdirs = []
553 549
554 550 # keep track of where we started running (mainly for crash post-mortem)
555 551 # This is not being used anywhere currently.
556 552 self.starting_dir = py3compat.getcwd()
557 553
558 554 # Indentation management
559 555 self.indent_current_nsp = 0
560 556
561 557 # Dict to track post-execution functions that have been registered
562 558 self._post_execute = {}
563 559
564 560 def init_environment(self):
565 561 """Any changes we need to make to the user's environment."""
566 562 pass
567 563
568 564 def init_encoding(self):
569 565 # Get system encoding at startup time. Certain terminals (like Emacs
570 566 # under Win32 have it set to None, and we need to have a known valid
571 567 # encoding to use in the raw_input() method
572 568 try:
573 569 self.stdin_encoding = sys.stdin.encoding or 'ascii'
574 570 except AttributeError:
575 571 self.stdin_encoding = 'ascii'
576 572
577 573
578 574 @observe('colors')
579 575 def init_syntax_highlighting(self, changes=None):
580 576 # Python source parser/formatter for syntax highlighting
581 577 pyformat = PyColorize.Parser(style=self.colors, parent=self).format
582 578 self.pycolorize = lambda src: pyformat(src,'str')
583 579
584 580 def refresh_style(self):
585 581 # No-op here, used in subclass
586 582 pass
587 583
588 584 def init_pushd_popd_magic(self):
589 585 # for pushd/popd management
590 586 self.home_dir = get_home_dir()
591 587
592 588 self.dir_stack = []
593 589
594 590 def init_logger(self):
595 591 self.logger = Logger(self.home_dir, logfname='ipython_log.py',
596 592 logmode='rotate')
597 593
598 594 def init_logstart(self):
599 595 """Initialize logging in case it was requested at the command line.
600 596 """
601 597 if self.logappend:
602 598 self.magic('logstart %s append' % self.logappend)
603 599 elif self.logfile:
604 600 self.magic('logstart %s' % self.logfile)
605 601 elif self.logstart:
606 602 self.magic('logstart')
607 603
608 604 def init_deprecation_warnings(self):
609 605 """
610 606 register default filter for deprecation warning.
611 607
612 608 This will allow deprecation warning of function used interactively to show
613 609 warning to users, and still hide deprecation warning from libraries import.
614 610 """
615 611 warnings.filterwarnings("default", category=DeprecationWarning, module=self.user_ns.get("__name__"))
616 612
617 613 def init_builtins(self):
618 614 # A single, static flag that we set to True. Its presence indicates
619 615 # that an IPython shell has been created, and we make no attempts at
620 616 # removing on exit or representing the existence of more than one
621 617 # IPython at a time.
622 618 builtin_mod.__dict__['__IPYTHON__'] = True
623 619
624 620 self.builtin_trap = BuiltinTrap(shell=self)
625 621
626 622 def init_inspector(self):
627 623 # Object inspector
628 624 self.inspector = oinspect.Inspector(oinspect.InspectColors,
629 625 PyColorize.ANSICodeColors,
630 626 'NoColor',
631 627 self.object_info_string_level)
632 628
633 629 def init_io(self):
634 630 # This will just use sys.stdout and sys.stderr. If you want to
635 631 # override sys.stdout and sys.stderr themselves, you need to do that
636 632 # *before* instantiating this class, because io holds onto
637 633 # references to the underlying streams.
638 634 # io.std* are deprecated, but don't show our own deprecation warnings
639 635 # during initialization of the deprecated API.
640 636 with warnings.catch_warnings():
641 637 warnings.simplefilter('ignore', DeprecationWarning)
642 638 io.stdout = io.IOStream(sys.stdout)
643 639 io.stderr = io.IOStream(sys.stderr)
644 640
645 641 def init_prompts(self):
646 642 # Set system prompts, so that scripts can decide if they are running
647 643 # interactively.
648 644 sys.ps1 = 'In : '
649 645 sys.ps2 = '...: '
650 646 sys.ps3 = 'Out: '
651 647
652 648 def init_display_formatter(self):
653 649 self.display_formatter = DisplayFormatter(parent=self)
654 650 self.configurables.append(self.display_formatter)
655 651
656 652 def init_display_pub(self):
657 653 self.display_pub = self.display_pub_class(parent=self)
658 654 self.configurables.append(self.display_pub)
659 655
660 656 def init_data_pub(self):
661 657 if not self.data_pub_class:
662 658 self.data_pub = None
663 659 return
664 660 self.data_pub = self.data_pub_class(parent=self)
665 661 self.configurables.append(self.data_pub)
666 662
667 663 def init_displayhook(self):
668 664 # Initialize displayhook, set in/out prompts and printing system
669 665 self.displayhook = self.displayhook_class(
670 666 parent=self,
671 667 shell=self,
672 668 cache_size=self.cache_size,
673 669 )
674 670 self.configurables.append(self.displayhook)
675 671 # This is a context manager that installs/revmoes the displayhook at
676 672 # the appropriate time.
677 673 self.display_trap = DisplayTrap(hook=self.displayhook)
678 674
679 675 def init_virtualenv(self):
680 676 """Add a virtualenv to sys.path so the user can import modules from it.
681 677 This isn't perfect: it doesn't use the Python interpreter with which the
682 678 virtualenv was built, and it ignores the --no-site-packages option. A
683 679 warning will appear suggesting the user installs IPython in the
684 680 virtualenv, but for many cases, it probably works well enough.
685 681
686 682 Adapted from code snippets online.
687 683
688 684 http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
689 685 """
690 686 if 'VIRTUAL_ENV' not in os.environ:
691 687 # Not in a virtualenv
692 688 return
693 689
694 690 # venv detection:
695 691 # stdlib venv may symlink sys.executable, so we can't use realpath.
696 692 # but others can symlink *to* the venv Python, so we can't just use sys.executable.
697 693 # So we just check every item in the symlink tree (generally <= 3)
698 694 p = os.path.normcase(sys.executable)
699 695 paths = [p]
700 696 while os.path.islink(p):
701 697 p = os.path.normcase(os.path.join(os.path.dirname(p), os.readlink(p)))
702 698 paths.append(p)
703 699 p_venv = os.path.normcase(os.environ['VIRTUAL_ENV'])
704 700 if any(p.startswith(p_venv) for p in paths):
705 701 # Running properly in the virtualenv, don't need to do anything
706 702 return
707 703
708 704 warn("Attempting to work in a virtualenv. If you encounter problems, please "
709 705 "install IPython inside the virtualenv.")
710 706 if sys.platform == "win32":
711 707 virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'Lib', 'site-packages')
712 708 else:
713 709 virtual_env = os.path.join(os.environ['VIRTUAL_ENV'], 'lib',
714 710 'python%d.%d' % sys.version_info[:2], 'site-packages')
715 711
716 712 import site
717 713 sys.path.insert(0, virtual_env)
718 714 site.addsitedir(virtual_env)
719 715
720 716 #-------------------------------------------------------------------------
721 717 # Things related to injections into the sys module
722 718 #-------------------------------------------------------------------------
723 719
724 720 def save_sys_module_state(self):
725 721 """Save the state of hooks in the sys module.
726 722
727 723 This has to be called after self.user_module is created.
728 724 """
729 725 self._orig_sys_module_state = {'stdin': sys.stdin,
730 726 'stdout': sys.stdout,
731 727 'stderr': sys.stderr,
732 728 'excepthook': sys.excepthook}
733 729 self._orig_sys_modules_main_name = self.user_module.__name__
734 730 self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
735 731
736 732 def restore_sys_module_state(self):
737 733 """Restore the state of the sys module."""
738 734 try:
739 735 for k, v in iteritems(self._orig_sys_module_state):
740 736 setattr(sys, k, v)
741 737 except AttributeError:
742 738 pass
743 739 # Reset what what done in self.init_sys_modules
744 740 if self._orig_sys_modules_main_mod is not None:
745 741 sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
746 742
747 743 #-------------------------------------------------------------------------
748 744 # Things related to the banner
749 745 #-------------------------------------------------------------------------
750 746
751 747 @property
752 748 def banner(self):
753 749 banner = self.banner1
754 750 if self.profile and self.profile != 'default':
755 751 banner += '\nIPython profile: %s\n' % self.profile
756 752 if self.banner2:
757 753 banner += '\n' + self.banner2
758 754 return banner
759 755
760 756 def show_banner(self, banner=None):
761 757 if banner is None:
762 758 banner = self.banner
763 759 sys.stdout.write(banner)
764 760
765 761 #-------------------------------------------------------------------------
766 762 # Things related to hooks
767 763 #-------------------------------------------------------------------------
768 764
769 765 def init_hooks(self):
770 766 # hooks holds pointers used for user-side customizations
771 767 self.hooks = Struct()
772 768
773 769 self.strdispatchers = {}
774 770
775 771 # Set all default hooks, defined in the IPython.hooks module.
776 772 hooks = IPython.core.hooks
777 773 for hook_name in hooks.__all__:
778 774 # default hooks have priority 100, i.e. low; user hooks should have
779 775 # 0-100 priority
780 776 self.set_hook(hook_name,getattr(hooks,hook_name), 100, _warn_deprecated=False)
781 777
782 778 if self.display_page:
783 779 self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
784 780
785 781 def set_hook(self,name,hook, priority=50, str_key=None, re_key=None,
786 782 _warn_deprecated=True):
787 783 """set_hook(name,hook) -> sets an internal IPython hook.
788 784
789 785 IPython exposes some of its internal API as user-modifiable hooks. By
790 786 adding your function to one of these hooks, you can modify IPython's
791 787 behavior to call at runtime your own routines."""
792 788
793 789 # At some point in the future, this should validate the hook before it
794 790 # accepts it. Probably at least check that the hook takes the number
795 791 # of args it's supposed to.
796 792
797 793 f = types.MethodType(hook,self)
798 794
799 795 # check if the hook is for strdispatcher first
800 796 if str_key is not None:
801 797 sdp = self.strdispatchers.get(name, StrDispatch())
802 798 sdp.add_s(str_key, f, priority )
803 799 self.strdispatchers[name] = sdp
804 800 return
805 801 if re_key is not None:
806 802 sdp = self.strdispatchers.get(name, StrDispatch())
807 803 sdp.add_re(re.compile(re_key), f, priority )
808 804 self.strdispatchers[name] = sdp
809 805 return
810 806
811 807 dp = getattr(self.hooks, name, None)
812 808 if name not in IPython.core.hooks.__all__:
813 809 print("Warning! Hook '%s' is not one of %s" % \
814 810 (name, IPython.core.hooks.__all__ ))
815 811
816 812 if _warn_deprecated and (name in IPython.core.hooks.deprecated):
817 813 alternative = IPython.core.hooks.deprecated[name]
818 814 warn("Hook {} is deprecated. Use {} instead.".format(name, alternative))
819 815
820 816 if not dp:
821 817 dp = IPython.core.hooks.CommandChainDispatcher()
822 818
823 819 try:
824 820 dp.add(f,priority)
825 821 except AttributeError:
826 822 # it was not commandchain, plain old func - replace
827 823 dp = f
828 824
829 825 setattr(self.hooks,name, dp)
830 826
831 827 #-------------------------------------------------------------------------
832 828 # Things related to events
833 829 #-------------------------------------------------------------------------
834 830
835 831 def init_events(self):
836 832 self.events = EventManager(self, available_events)
837 833
838 834 self.events.register("pre_execute", self._clear_warning_registry)
839 835
840 836 def register_post_execute(self, func):
841 837 """DEPRECATED: Use ip.events.register('post_run_cell', func)
842 838
843 839 Register a function for calling after code execution.
844 840 """
845 841 warn("ip.register_post_execute is deprecated, use "
846 842 "ip.events.register('post_run_cell', func) instead.")
847 843 self.events.register('post_run_cell', func)
848 844
849 845 def _clear_warning_registry(self):
850 846 # clear the warning registry, so that different code blocks with
851 847 # overlapping line number ranges don't cause spurious suppression of
852 848 # warnings (see gh-6611 for details)
853 849 if "__warningregistry__" in self.user_global_ns:
854 850 del self.user_global_ns["__warningregistry__"]
855 851
856 852 #-------------------------------------------------------------------------
857 853 # Things related to the "main" module
858 854 #-------------------------------------------------------------------------
859 855
860 856 def new_main_mod(self, filename, modname):
861 857 """Return a new 'main' module object for user code execution.
862 858
863 859 ``filename`` should be the path of the script which will be run in the
864 860 module. Requests with the same filename will get the same module, with
865 861 its namespace cleared.
866 862
867 863 ``modname`` should be the module name - normally either '__main__' or
868 864 the basename of the file without the extension.
869 865
870 866 When scripts are executed via %run, we must keep a reference to their
871 867 __main__ module around so that Python doesn't
872 868 clear it, rendering references to module globals useless.
873 869
874 870 This method keeps said reference in a private dict, keyed by the
875 871 absolute path of the script. This way, for multiple executions of the
876 872 same script we only keep one copy of the namespace (the last one),
877 873 thus preventing memory leaks from old references while allowing the
878 874 objects from the last execution to be accessible.
879 875 """
880 876 filename = os.path.abspath(filename)
881 877 try:
882 878 main_mod = self._main_mod_cache[filename]
883 879 except KeyError:
884 880 main_mod = self._main_mod_cache[filename] = types.ModuleType(
885 881 py3compat.cast_bytes_py2(modname),
886 882 doc="Module created for script run in IPython")
887 883 else:
888 884 main_mod.__dict__.clear()
889 885 main_mod.__name__ = modname
890 886
891 887 main_mod.__file__ = filename
892 888 # It seems pydoc (and perhaps others) needs any module instance to
893 889 # implement a __nonzero__ method
894 890 main_mod.__nonzero__ = lambda : True
895 891
896 892 return main_mod
897 893
898 894 def clear_main_mod_cache(self):
899 895 """Clear the cache of main modules.
900 896
901 897 Mainly for use by utilities like %reset.
902 898
903 899 Examples
904 900 --------
905 901
906 902 In [15]: import IPython
907 903
908 904 In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
909 905
910 906 In [17]: len(_ip._main_mod_cache) > 0
911 907 Out[17]: True
912 908
913 909 In [18]: _ip.clear_main_mod_cache()
914 910
915 911 In [19]: len(_ip._main_mod_cache) == 0
916 912 Out[19]: True
917 913 """
918 914 self._main_mod_cache.clear()
919 915
920 916 #-------------------------------------------------------------------------
921 917 # Things related to debugging
922 918 #-------------------------------------------------------------------------
923 919
924 920 def init_pdb(self):
925 921 # Set calling of pdb on exceptions
926 922 # self.call_pdb is a property
927 923 self.call_pdb = self.pdb
928 924
929 925 def _get_call_pdb(self):
930 926 return self._call_pdb
931 927
932 928 def _set_call_pdb(self,val):
933 929
934 930 if val not in (0,1,False,True):
935 931 raise ValueError('new call_pdb value must be boolean')
936 932
937 933 # store value in instance
938 934 self._call_pdb = val
939 935
940 936 # notify the actual exception handlers
941 937 self.InteractiveTB.call_pdb = val
942 938
943 939 call_pdb = property(_get_call_pdb,_set_call_pdb,None,
944 940 'Control auto-activation of pdb at exceptions')
945 941
946 942 def debugger(self,force=False):
947 943 """Call the pdb debugger.
948 944
949 945 Keywords:
950 946
951 947 - force(False): by default, this routine checks the instance call_pdb
952 948 flag and does not actually invoke the debugger if the flag is false.
953 949 The 'force' option forces the debugger to activate even if the flag
954 950 is false.
955 951 """
956 952
957 953 if not (force or self.call_pdb):
958 954 return
959 955
960 956 if not hasattr(sys,'last_traceback'):
961 957 error('No traceback has been produced, nothing to debug.')
962 958 return
963 959
964 960 self.InteractiveTB.debugger(force=True)
965 961
966 962 #-------------------------------------------------------------------------
967 963 # Things related to IPython's various namespaces
968 964 #-------------------------------------------------------------------------
969 965 default_user_namespaces = True
970 966
971 967 def init_create_namespaces(self, user_module=None, user_ns=None):
972 968 # Create the namespace where the user will operate. user_ns is
973 969 # normally the only one used, and it is passed to the exec calls as
974 970 # the locals argument. But we do carry a user_global_ns namespace
975 971 # given as the exec 'globals' argument, This is useful in embedding
976 972 # situations where the ipython shell opens in a context where the
977 973 # distinction between locals and globals is meaningful. For
978 974 # non-embedded contexts, it is just the same object as the user_ns dict.
979 975
980 976 # FIXME. For some strange reason, __builtins__ is showing up at user
981 977 # level as a dict instead of a module. This is a manual fix, but I
982 978 # should really track down where the problem is coming from. Alex
983 979 # Schmolck reported this problem first.
984 980
985 981 # A useful post by Alex Martelli on this topic:
986 982 # Re: inconsistent value from __builtins__
987 983 # Von: Alex Martelli <aleaxit@yahoo.com>
988 984 # Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
989 985 # Gruppen: comp.lang.python
990 986
991 987 # Michael Hohn <hohn@hooknose.lbl.gov> wrote:
992 988 # > >>> print type(builtin_check.get_global_binding('__builtins__'))
993 989 # > <type 'dict'>
994 990 # > >>> print type(__builtins__)
995 991 # > <type 'module'>
996 992 # > Is this difference in return value intentional?
997 993
998 994 # Well, it's documented that '__builtins__' can be either a dictionary
999 995 # or a module, and it's been that way for a long time. Whether it's
1000 996 # intentional (or sensible), I don't know. In any case, the idea is
1001 997 # that if you need to access the built-in namespace directly, you
1002 998 # should start with "import __builtin__" (note, no 's') which will
1003 999 # definitely give you a module. Yeah, it's somewhat confusing:-(.
1004 1000
1005 1001 # These routines return a properly built module and dict as needed by
1006 1002 # the rest of the code, and can also be used by extension writers to
1007 1003 # generate properly initialized namespaces.
1008 1004 if (user_ns is not None) or (user_module is not None):
1009 1005 self.default_user_namespaces = False
1010 1006 self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
1011 1007
1012 1008 # A record of hidden variables we have added to the user namespace, so
1013 1009 # we can list later only variables defined in actual interactive use.
1014 1010 self.user_ns_hidden = {}
1015 1011
1016 1012 # Now that FakeModule produces a real module, we've run into a nasty
1017 1013 # problem: after script execution (via %run), the module where the user
1018 1014 # code ran is deleted. Now that this object is a true module (needed
1019 1015 # so doctest and other tools work correctly), the Python module
1020 1016 # teardown mechanism runs over it, and sets to None every variable
1021 1017 # present in that module. Top-level references to objects from the
1022 1018 # script survive, because the user_ns is updated with them. However,
1023 1019 # calling functions defined in the script that use other things from
1024 1020 # the script will fail, because the function's closure had references
1025 1021 # to the original objects, which are now all None. So we must protect
1026 1022 # these modules from deletion by keeping a cache.
1027 1023 #
1028 1024 # To avoid keeping stale modules around (we only need the one from the
1029 1025 # last run), we use a dict keyed with the full path to the script, so
1030 1026 # only the last version of the module is held in the cache. Note,
1031 1027 # however, that we must cache the module *namespace contents* (their
1032 1028 # __dict__). Because if we try to cache the actual modules, old ones
1033 1029 # (uncached) could be destroyed while still holding references (such as
1034 1030 # those held by GUI objects that tend to be long-lived)>
1035 1031 #
1036 1032 # The %reset command will flush this cache. See the cache_main_mod()
1037 1033 # and clear_main_mod_cache() methods for details on use.
1038 1034
1039 1035 # This is the cache used for 'main' namespaces
1040 1036 self._main_mod_cache = {}
1041 1037
1042 1038 # A table holding all the namespaces IPython deals with, so that
1043 1039 # introspection facilities can search easily.
1044 1040 self.ns_table = {'user_global':self.user_module.__dict__,
1045 1041 'user_local':self.user_ns,
1046 1042 'builtin':builtin_mod.__dict__
1047 1043 }
1048 1044
1049 1045 @property
1050 1046 def user_global_ns(self):
1051 1047 return self.user_module.__dict__
1052 1048
1053 1049 def prepare_user_module(self, user_module=None, user_ns=None):
1054 1050 """Prepare the module and namespace in which user code will be run.
1055 1051
1056 1052 When IPython is started normally, both parameters are None: a new module
1057 1053 is created automatically, and its __dict__ used as the namespace.
1058 1054
1059 1055 If only user_module is provided, its __dict__ is used as the namespace.
1060 1056 If only user_ns is provided, a dummy module is created, and user_ns
1061 1057 becomes the global namespace. If both are provided (as they may be
1062 1058 when embedding), user_ns is the local namespace, and user_module
1063 1059 provides the global namespace.
1064 1060
1065 1061 Parameters
1066 1062 ----------
1067 1063 user_module : module, optional
1068 1064 The current user module in which IPython is being run. If None,
1069 1065 a clean module will be created.
1070 1066 user_ns : dict, optional
1071 1067 A namespace in which to run interactive commands.
1072 1068
1073 1069 Returns
1074 1070 -------
1075 1071 A tuple of user_module and user_ns, each properly initialised.
1076 1072 """
1077 1073 if user_module is None and user_ns is not None:
1078 1074 user_ns.setdefault("__name__", "__main__")
1079 1075 user_module = DummyMod()
1080 1076 user_module.__dict__ = user_ns
1081 1077
1082 1078 if user_module is None:
1083 1079 user_module = types.ModuleType("__main__",
1084 1080 doc="Automatically created module for IPython interactive environment")
1085 1081
1086 1082 # We must ensure that __builtin__ (without the final 's') is always
1087 1083 # available and pointing to the __builtin__ *module*. For more details:
1088 1084 # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
1089 1085 user_module.__dict__.setdefault('__builtin__', builtin_mod)
1090 1086 user_module.__dict__.setdefault('__builtins__', builtin_mod)
1091 1087
1092 1088 if user_ns is None:
1093 1089 user_ns = user_module.__dict__
1094 1090
1095 1091 return user_module, user_ns
1096 1092
1097 1093 def init_sys_modules(self):
1098 1094 # We need to insert into sys.modules something that looks like a
1099 1095 # module but which accesses the IPython namespace, for shelve and
1100 1096 # pickle to work interactively. Normally they rely on getting
1101 1097 # everything out of __main__, but for embedding purposes each IPython
1102 1098 # instance has its own private namespace, so we can't go shoving
1103 1099 # everything into __main__.
1104 1100
1105 1101 # note, however, that we should only do this for non-embedded
1106 1102 # ipythons, which really mimic the __main__.__dict__ with their own
1107 1103 # namespace. Embedded instances, on the other hand, should not do
1108 1104 # this because they need to manage the user local/global namespaces
1109 1105 # only, but they live within a 'normal' __main__ (meaning, they
1110 1106 # shouldn't overtake the execution environment of the script they're
1111 1107 # embedded in).
1112 1108
1113 1109 # This is overridden in the InteractiveShellEmbed subclass to a no-op.
1114 1110 main_name = self.user_module.__name__
1115 1111 sys.modules[main_name] = self.user_module
1116 1112
1117 1113 def init_user_ns(self):
1118 1114 """Initialize all user-visible namespaces to their minimum defaults.
1119 1115
1120 1116 Certain history lists are also initialized here, as they effectively
1121 1117 act as user namespaces.
1122 1118
1123 1119 Notes
1124 1120 -----
1125 1121 All data structures here are only filled in, they are NOT reset by this
1126 1122 method. If they were not empty before, data will simply be added to
1127 1123 therm.
1128 1124 """
1129 1125 # This function works in two parts: first we put a few things in
1130 1126 # user_ns, and we sync that contents into user_ns_hidden so that these
1131 1127 # initial variables aren't shown by %who. After the sync, we add the
1132 1128 # rest of what we *do* want the user to see with %who even on a new
1133 1129 # session (probably nothing, so they really only see their own stuff)
1134 1130
1135 1131 # The user dict must *always* have a __builtin__ reference to the
1136 1132 # Python standard __builtin__ namespace, which must be imported.
1137 1133 # This is so that certain operations in prompt evaluation can be
1138 1134 # reliably executed with builtins. Note that we can NOT use
1139 1135 # __builtins__ (note the 's'), because that can either be a dict or a
1140 1136 # module, and can even mutate at runtime, depending on the context
1141 1137 # (Python makes no guarantees on it). In contrast, __builtin__ is
1142 1138 # always a module object, though it must be explicitly imported.
1143 1139
1144 1140 # For more details:
1145 1141 # http://mail.python.org/pipermail/python-dev/2001-April/014068.html
1146 1142 ns = dict()
1147 1143
1148 1144 # make global variables for user access to the histories
1149 1145 ns['_ih'] = self.history_manager.input_hist_parsed
1150 1146 ns['_oh'] = self.history_manager.output_hist
1151 1147 ns['_dh'] = self.history_manager.dir_hist
1152 1148
1153 1149 ns['_sh'] = shadowns
1154 1150
1155 1151 # user aliases to input and output histories. These shouldn't show up
1156 1152 # in %who, as they can have very large reprs.
1157 1153 ns['In'] = self.history_manager.input_hist_parsed
1158 1154 ns['Out'] = self.history_manager.output_hist
1159 1155
1160 1156 # Store myself as the public api!!!
1161 1157 ns['get_ipython'] = self.get_ipython
1162 1158
1163 1159 ns['exit'] = self.exiter
1164 1160 ns['quit'] = self.exiter
1165 1161
1166 1162 # Sync what we've added so far to user_ns_hidden so these aren't seen
1167 1163 # by %who
1168 1164 self.user_ns_hidden.update(ns)
1169 1165
1170 1166 # Anything put into ns now would show up in %who. Think twice before
1171 1167 # putting anything here, as we really want %who to show the user their
1172 1168 # stuff, not our variables.
1173 1169
1174 1170 # Finally, update the real user's namespace
1175 1171 self.user_ns.update(ns)
1176 1172
1177 1173 @property
1178 1174 def all_ns_refs(self):
1179 1175 """Get a list of references to all the namespace dictionaries in which
1180 1176 IPython might store a user-created object.
1181 1177
1182 1178 Note that this does not include the displayhook, which also caches
1183 1179 objects from the output."""
1184 1180 return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
1185 1181 [m.__dict__ for m in self._main_mod_cache.values()]
1186 1182
1187 1183 def reset(self, new_session=True):
1188 1184 """Clear all internal namespaces, and attempt to release references to
1189 1185 user objects.
1190 1186
1191 1187 If new_session is True, a new history session will be opened.
1192 1188 """
1193 1189 # Clear histories
1194 1190 self.history_manager.reset(new_session)
1195 1191 # Reset counter used to index all histories
1196 1192 if new_session:
1197 1193 self.execution_count = 1
1198 1194
1199 1195 # Flush cached output items
1200 1196 if self.displayhook.do_full_cache:
1201 1197 self.displayhook.flush()
1202 1198
1203 1199 # The main execution namespaces must be cleared very carefully,
1204 1200 # skipping the deletion of the builtin-related keys, because doing so
1205 1201 # would cause errors in many object's __del__ methods.
1206 1202 if self.user_ns is not self.user_global_ns:
1207 1203 self.user_ns.clear()
1208 1204 ns = self.user_global_ns
1209 1205 drop_keys = set(ns.keys())
1210 1206 drop_keys.discard('__builtin__')
1211 1207 drop_keys.discard('__builtins__')
1212 1208 drop_keys.discard('__name__')
1213 1209 for k in drop_keys:
1214 1210 del ns[k]
1215 1211
1216 1212 self.user_ns_hidden.clear()
1217 1213
1218 1214 # Restore the user namespaces to minimal usability
1219 1215 self.init_user_ns()
1220 1216
1221 1217 # Restore the default and user aliases
1222 1218 self.alias_manager.clear_aliases()
1223 1219 self.alias_manager.init_aliases()
1224 1220
1225 1221 # Flush the private list of module references kept for script
1226 1222 # execution protection
1227 1223 self.clear_main_mod_cache()
1228 1224
1229 1225 def del_var(self, varname, by_name=False):
1230 1226 """Delete a variable from the various namespaces, so that, as
1231 1227 far as possible, we're not keeping any hidden references to it.
1232 1228
1233 1229 Parameters
1234 1230 ----------
1235 1231 varname : str
1236 1232 The name of the variable to delete.
1237 1233 by_name : bool
1238 1234 If True, delete variables with the given name in each
1239 1235 namespace. If False (default), find the variable in the user
1240 1236 namespace, and delete references to it.
1241 1237 """
1242 1238 if varname in ('__builtin__', '__builtins__'):
1243 1239 raise ValueError("Refusing to delete %s" % varname)
1244 1240
1245 1241 ns_refs = self.all_ns_refs
1246 1242
1247 1243 if by_name: # Delete by name
1248 1244 for ns in ns_refs:
1249 1245 try:
1250 1246 del ns[varname]
1251 1247 except KeyError:
1252 1248 pass
1253 1249 else: # Delete by object
1254 1250 try:
1255 1251 obj = self.user_ns[varname]
1256 1252 except KeyError:
1257 1253 raise NameError("name '%s' is not defined" % varname)
1258 1254 # Also check in output history
1259 1255 ns_refs.append(self.history_manager.output_hist)
1260 1256 for ns in ns_refs:
1261 1257 to_delete = [n for n, o in iteritems(ns) if o is obj]
1262 1258 for name in to_delete:
1263 1259 del ns[name]
1264 1260
1265 1261 # displayhook keeps extra references, but not in a dictionary
1266 1262 for name in ('_', '__', '___'):
1267 1263 if getattr(self.displayhook, name) is obj:
1268 1264 setattr(self.displayhook, name, None)
1269 1265
1270 1266 def reset_selective(self, regex=None):
1271 1267 """Clear selective variables from internal namespaces based on a
1272 1268 specified regular expression.
1273 1269
1274 1270 Parameters
1275 1271 ----------
1276 1272 regex : string or compiled pattern, optional
1277 1273 A regular expression pattern that will be used in searching
1278 1274 variable names in the users namespaces.
1279 1275 """
1280 1276 if regex is not None:
1281 1277 try:
1282 1278 m = re.compile(regex)
1283 1279 except TypeError:
1284 1280 raise TypeError('regex must be a string or compiled pattern')
1285 1281 # Search for keys in each namespace that match the given regex
1286 1282 # If a match is found, delete the key/value pair.
1287 1283 for ns in self.all_ns_refs:
1288 1284 for var in ns:
1289 1285 if m.search(var):
1290 1286 del ns[var]
1291 1287
1292 1288 def push(self, variables, interactive=True):
1293 1289 """Inject a group of variables into the IPython user namespace.
1294 1290
1295 1291 Parameters
1296 1292 ----------
1297 1293 variables : dict, str or list/tuple of str
1298 1294 The variables to inject into the user's namespace. If a dict, a
1299 1295 simple update is done. If a str, the string is assumed to have
1300 1296 variable names separated by spaces. A list/tuple of str can also
1301 1297 be used to give the variable names. If just the variable names are
1302 1298 give (list/tuple/str) then the variable values looked up in the
1303 1299 callers frame.
1304 1300 interactive : bool
1305 1301 If True (default), the variables will be listed with the ``who``
1306 1302 magic.
1307 1303 """
1308 1304 vdict = None
1309 1305
1310 1306 # We need a dict of name/value pairs to do namespace updates.
1311 1307 if isinstance(variables, dict):
1312 1308 vdict = variables
1313 1309 elif isinstance(variables, string_types+(list, tuple)):
1314 1310 if isinstance(variables, string_types):
1315 1311 vlist = variables.split()
1316 1312 else:
1317 1313 vlist = variables
1318 1314 vdict = {}
1319 1315 cf = sys._getframe(1)
1320 1316 for name in vlist:
1321 1317 try:
1322 1318 vdict[name] = eval(name, cf.f_globals, cf.f_locals)
1323 1319 except:
1324 1320 print('Could not get variable %s from %s' %
1325 1321 (name,cf.f_code.co_name))
1326 1322 else:
1327 1323 raise ValueError('variables must be a dict/str/list/tuple')
1328 1324
1329 1325 # Propagate variables to user namespace
1330 1326 self.user_ns.update(vdict)
1331 1327
1332 1328 # And configure interactive visibility
1333 1329 user_ns_hidden = self.user_ns_hidden
1334 1330 if interactive:
1335 1331 for name in vdict:
1336 1332 user_ns_hidden.pop(name, None)
1337 1333 else:
1338 1334 user_ns_hidden.update(vdict)
1339 1335
1340 1336 def drop_by_id(self, variables):
1341 1337 """Remove a dict of variables from the user namespace, if they are the
1342 1338 same as the values in the dictionary.
1343 1339
1344 1340 This is intended for use by extensions: variables that they've added can
1345 1341 be taken back out if they are unloaded, without removing any that the
1346 1342 user has overwritten.
1347 1343
1348 1344 Parameters
1349 1345 ----------
1350 1346 variables : dict
1351 1347 A dictionary mapping object names (as strings) to the objects.
1352 1348 """
1353 1349 for name, obj in iteritems(variables):
1354 1350 if name in self.user_ns and self.user_ns[name] is obj:
1355 1351 del self.user_ns[name]
1356 1352 self.user_ns_hidden.pop(name, None)
1357 1353
1358 1354 #-------------------------------------------------------------------------
1359 1355 # Things related to object introspection
1360 1356 #-------------------------------------------------------------------------
1361 1357
1362 1358 def _ofind(self, oname, namespaces=None):
1363 1359 """Find an object in the available namespaces.
1364 1360
1365 1361 self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
1366 1362
1367 1363 Has special code to detect magic functions.
1368 1364 """
1369 1365 oname = oname.strip()
1370 1366 #print '1- oname: <%r>' % oname # dbg
1371 1367 if not oname.startswith(ESC_MAGIC) and \
1372 1368 not oname.startswith(ESC_MAGIC2) and \
1373 1369 not py3compat.isidentifier(oname, dotted=True):
1374 1370 return dict(found=False)
1375 1371
1376 1372 if namespaces is None:
1377 1373 # Namespaces to search in:
1378 1374 # Put them in a list. The order is important so that we
1379 1375 # find things in the same order that Python finds them.
1380 1376 namespaces = [ ('Interactive', self.user_ns),
1381 1377 ('Interactive (global)', self.user_global_ns),
1382 1378 ('Python builtin', builtin_mod.__dict__),
1383 1379 ]
1384 1380
1385 1381 # initialize results to 'null'
1386 1382 found = False; obj = None; ospace = None;
1387 1383 ismagic = False; isalias = False; parent = None
1388 1384
1389 1385 # We need to special-case 'print', which as of python2.6 registers as a
1390 1386 # function but should only be treated as one if print_function was
1391 1387 # loaded with a future import. In this case, just bail.
1392 1388 if (oname == 'print' and not py3compat.PY3 and not \
1393 1389 (self.compile.compiler_flags & __future__.CO_FUTURE_PRINT_FUNCTION)):
1394 1390 return {'found':found, 'obj':obj, 'namespace':ospace,
1395 1391 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
1396 1392
1397 1393 # Look for the given name by splitting it in parts. If the head is
1398 1394 # found, then we look for all the remaining parts as members, and only
1399 1395 # declare success if we can find them all.
1400 1396 oname_parts = oname.split('.')
1401 1397 oname_head, oname_rest = oname_parts[0],oname_parts[1:]
1402 1398 for nsname,ns in namespaces:
1403 1399 try:
1404 1400 obj = ns[oname_head]
1405 1401 except KeyError:
1406 1402 continue
1407 1403 else:
1408 1404 #print 'oname_rest:', oname_rest # dbg
1409 1405 for idx, part in enumerate(oname_rest):
1410 1406 try:
1411 1407 parent = obj
1412 1408 # The last part is looked up in a special way to avoid
1413 1409 # descriptor invocation as it may raise or have side
1414 1410 # effects.
1415 1411 if idx == len(oname_rest) - 1:
1416 1412 obj = self._getattr_property(obj, part)
1417 1413 else:
1418 1414 obj = getattr(obj, part)
1419 1415 except:
1420 1416 # Blanket except b/c some badly implemented objects
1421 1417 # allow __getattr__ to raise exceptions other than
1422 1418 # AttributeError, which then crashes IPython.
1423 1419 break
1424 1420 else:
1425 1421 # If we finish the for loop (no break), we got all members
1426 1422 found = True
1427 1423 ospace = nsname
1428 1424 break # namespace loop
1429 1425
1430 1426 # Try to see if it's magic
1431 1427 if not found:
1432 1428 obj = None
1433 1429 if oname.startswith(ESC_MAGIC2):
1434 1430 oname = oname.lstrip(ESC_MAGIC2)
1435 1431 obj = self.find_cell_magic(oname)
1436 1432 elif oname.startswith(ESC_MAGIC):
1437 1433 oname = oname.lstrip(ESC_MAGIC)
1438 1434 obj = self.find_line_magic(oname)
1439 1435 else:
1440 1436 # search without prefix, so run? will find %run?
1441 1437 obj = self.find_line_magic(oname)
1442 1438 if obj is None:
1443 1439 obj = self.find_cell_magic(oname)
1444 1440 if obj is not None:
1445 1441 found = True
1446 1442 ospace = 'IPython internal'
1447 1443 ismagic = True
1448 1444 isalias = isinstance(obj, Alias)
1449 1445
1450 1446 # Last try: special-case some literals like '', [], {}, etc:
1451 1447 if not found and oname_head in ["''",'""','[]','{}','()']:
1452 1448 obj = eval(oname_head)
1453 1449 found = True
1454 1450 ospace = 'Interactive'
1455 1451
1456 1452 return {'found':found, 'obj':obj, 'namespace':ospace,
1457 1453 'ismagic':ismagic, 'isalias':isalias, 'parent':parent}
1458 1454
1459 1455 @staticmethod
1460 1456 def _getattr_property(obj, attrname):
1461 1457 """Property-aware getattr to use in object finding.
1462 1458
1463 1459 If attrname represents a property, return it unevaluated (in case it has
1464 1460 side effects or raises an error.
1465 1461
1466 1462 """
1467 1463 if not isinstance(obj, type):
1468 1464 try:
1469 1465 # `getattr(type(obj), attrname)` is not guaranteed to return
1470 1466 # `obj`, but does so for property:
1471 1467 #
1472 1468 # property.__get__(self, None, cls) -> self
1473 1469 #
1474 1470 # The universal alternative is to traverse the mro manually
1475 1471 # searching for attrname in class dicts.
1476 1472 attr = getattr(type(obj), attrname)
1477 1473 except AttributeError:
1478 1474 pass
1479 1475 else:
1480 1476 # This relies on the fact that data descriptors (with both
1481 1477 # __get__ & __set__ magic methods) take precedence over
1482 1478 # instance-level attributes:
1483 1479 #
1484 1480 # class A(object):
1485 1481 # @property
1486 1482 # def foobar(self): return 123
1487 1483 # a = A()
1488 1484 # a.__dict__['foobar'] = 345
1489 1485 # a.foobar # == 123
1490 1486 #
1491 1487 # So, a property may be returned right away.
1492 1488 if isinstance(attr, property):
1493 1489 return attr
1494 1490
1495 1491 # Nothing helped, fall back.
1496 1492 return getattr(obj, attrname)
1497 1493
1498 1494 def _object_find(self, oname, namespaces=None):
1499 1495 """Find an object and return a struct with info about it."""
1500 1496 return Struct(self._ofind(oname, namespaces))
1501 1497
1502 1498 def _inspect(self, meth, oname, namespaces=None, **kw):
1503 1499 """Generic interface to the inspector system.
1504 1500
1505 1501 This function is meant to be called by pdef, pdoc & friends.
1506 1502 """
1507 1503 info = self._object_find(oname, namespaces)
1508 1504 docformat = sphinxify if self.sphinxify_docstring else None
1509 1505 if info.found:
1510 1506 pmethod = getattr(self.inspector, meth)
1511 1507 # TODO: only apply format_screen to the plain/text repr of the mime
1512 1508 # bundle.
1513 1509 formatter = format_screen if info.ismagic else docformat
1514 1510 if meth == 'pdoc':
1515 1511 pmethod(info.obj, oname, formatter)
1516 1512 elif meth == 'pinfo':
1517 1513 pmethod(info.obj, oname, formatter, info,
1518 1514 enable_html_pager=self.enable_html_pager, **kw)
1519 1515 else:
1520 1516 pmethod(info.obj, oname)
1521 1517 else:
1522 1518 print('Object `%s` not found.' % oname)
1523 1519 return 'not found' # so callers can take other action
1524 1520
1525 1521 def object_inspect(self, oname, detail_level=0):
1526 1522 """Get object info about oname"""
1527 1523 with self.builtin_trap:
1528 1524 info = self._object_find(oname)
1529 1525 if info.found:
1530 1526 return self.inspector.info(info.obj, oname, info=info,
1531 1527 detail_level=detail_level
1532 1528 )
1533 1529 else:
1534 1530 return oinspect.object_info(name=oname, found=False)
1535 1531
1536 1532 def object_inspect_text(self, oname, detail_level=0):
1537 1533 """Get object info as formatted text"""
1538 1534 return self.object_inspect_mime(oname, detail_level)['text/plain']
1539 1535
1540 1536 def object_inspect_mime(self, oname, detail_level=0):
1541 1537 """Get object info as a mimebundle of formatted representations.
1542 1538
1543 1539 A mimebundle is a dictionary, keyed by mime-type.
1544 1540 It must always have the key `'text/plain'`.
1545 1541 """
1546 1542 with self.builtin_trap:
1547 1543 info = self._object_find(oname)
1548 1544 if info.found:
1549 1545 return self.inspector._get_info(info.obj, oname, info=info,
1550 1546 detail_level=detail_level
1551 1547 )
1552 1548 else:
1553 1549 raise KeyError(oname)
1554 1550
1555 1551 #-------------------------------------------------------------------------
1556 1552 # Things related to history management
1557 1553 #-------------------------------------------------------------------------
1558 1554
1559 1555 def init_history(self):
1560 1556 """Sets up the command history, and starts regular autosaves."""
1561 1557 self.history_manager = HistoryManager(shell=self, parent=self)
1562 1558 self.configurables.append(self.history_manager)
1563 1559
1564 1560 #-------------------------------------------------------------------------
1565 1561 # Things related to exception handling and tracebacks (not debugging)
1566 1562 #-------------------------------------------------------------------------
1567 1563
1568 1564 debugger_cls = Pdb
1569 1565
1570 1566 def init_traceback_handlers(self, custom_exceptions):
1571 1567 # Syntax error handler.
1572 1568 self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
1573 1569
1574 1570 # The interactive one is initialized with an offset, meaning we always
1575 1571 # want to remove the topmost item in the traceback, which is our own
1576 1572 # internal code. Valid modes: ['Plain','Context','Verbose']
1577 1573 self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
1578 1574 color_scheme='NoColor',
1579 1575 tb_offset = 1,
1580 1576 check_cache=check_linecache_ipython,
1581 1577 debugger_cls=self.debugger_cls, parent=self)
1582 1578
1583 1579 # The instance will store a pointer to the system-wide exception hook,
1584 1580 # so that runtime code (such as magics) can access it. This is because
1585 1581 # during the read-eval loop, it may get temporarily overwritten.
1586 1582 self.sys_excepthook = sys.excepthook
1587 1583
1588 1584 # and add any custom exception handlers the user may have specified
1589 1585 self.set_custom_exc(*custom_exceptions)
1590 1586
1591 1587 # Set the exception mode
1592 1588 self.InteractiveTB.set_mode(mode=self.xmode)
1593 1589
1594 1590 def set_custom_exc(self, exc_tuple, handler):
1595 1591 """set_custom_exc(exc_tuple, handler)
1596 1592
1597 1593 Set a custom exception handler, which will be called if any of the
1598 1594 exceptions in exc_tuple occur in the mainloop (specifically, in the
1599 1595 run_code() method).
1600 1596
1601 1597 Parameters
1602 1598 ----------
1603 1599
1604 1600 exc_tuple : tuple of exception classes
1605 1601 A *tuple* of exception classes, for which to call the defined
1606 1602 handler. It is very important that you use a tuple, and NOT A
1607 1603 LIST here, because of the way Python's except statement works. If
1608 1604 you only want to trap a single exception, use a singleton tuple::
1609 1605
1610 1606 exc_tuple == (MyCustomException,)
1611 1607
1612 1608 handler : callable
1613 1609 handler must have the following signature::
1614 1610
1615 1611 def my_handler(self, etype, value, tb, tb_offset=None):
1616 1612 ...
1617 1613 return structured_traceback
1618 1614
1619 1615 Your handler must return a structured traceback (a list of strings),
1620 1616 or None.
1621 1617
1622 1618 This will be made into an instance method (via types.MethodType)
1623 1619 of IPython itself, and it will be called if any of the exceptions
1624 1620 listed in the exc_tuple are caught. If the handler is None, an
1625 1621 internal basic one is used, which just prints basic info.
1626 1622
1627 1623 To protect IPython from crashes, if your handler ever raises an
1628 1624 exception or returns an invalid result, it will be immediately
1629 1625 disabled.
1630 1626
1631 1627 WARNING: by putting in your own exception handler into IPython's main
1632 1628 execution loop, you run a very good chance of nasty crashes. This
1633 1629 facility should only be used if you really know what you are doing."""
1634 1630
1635 1631 assert type(exc_tuple)==type(()) , \
1636 1632 "The custom exceptions must be given AS A TUPLE."
1637 1633
1638 1634 def dummy_handler(self, etype, value, tb, tb_offset=None):
1639 1635 print('*** Simple custom exception handler ***')
1640 1636 print('Exception type :',etype)
1641 1637 print('Exception value:',value)
1642 1638 print('Traceback :',tb)
1643 1639 #print 'Source code :','\n'.join(self.buffer)
1644 1640
1645 1641 def validate_stb(stb):
1646 1642 """validate structured traceback return type
1647 1643
1648 1644 return type of CustomTB *should* be a list of strings, but allow
1649 1645 single strings or None, which are harmless.
1650 1646
1651 1647 This function will *always* return a list of strings,
1652 1648 and will raise a TypeError if stb is inappropriate.
1653 1649 """
1654 1650 msg = "CustomTB must return list of strings, not %r" % stb
1655 1651 if stb is None:
1656 1652 return []
1657 1653 elif isinstance(stb, string_types):
1658 1654 return [stb]
1659 1655 elif not isinstance(stb, list):
1660 1656 raise TypeError(msg)
1661 1657 # it's a list
1662 1658 for line in stb:
1663 1659 # check every element
1664 1660 if not isinstance(line, string_types):
1665 1661 raise TypeError(msg)
1666 1662 return stb
1667 1663
1668 1664 if handler is None:
1669 1665 wrapped = dummy_handler
1670 1666 else:
1671 1667 def wrapped(self,etype,value,tb,tb_offset=None):
1672 1668 """wrap CustomTB handler, to protect IPython from user code
1673 1669
1674 1670 This makes it harder (but not impossible) for custom exception
1675 1671 handlers to crash IPython.
1676 1672 """
1677 1673 try:
1678 1674 stb = handler(self,etype,value,tb,tb_offset=tb_offset)
1679 1675 return validate_stb(stb)
1680 1676 except:
1681 1677 # clear custom handler immediately
1682 1678 self.set_custom_exc((), None)
1683 1679 print("Custom TB Handler failed, unregistering", file=sys.stderr)
1684 1680 # show the exception in handler first
1685 1681 stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
1686 1682 print(self.InteractiveTB.stb2text(stb))
1687 1683 print("The original exception:")
1688 1684 stb = self.InteractiveTB.structured_traceback(
1689 1685 (etype,value,tb), tb_offset=tb_offset
1690 1686 )
1691 1687 return stb
1692 1688
1693 1689 self.CustomTB = types.MethodType(wrapped,self)
1694 1690 self.custom_exceptions = exc_tuple
1695 1691
1696 1692 def excepthook(self, etype, value, tb):
1697 1693 """One more defense for GUI apps that call sys.excepthook.
1698 1694
1699 1695 GUI frameworks like wxPython trap exceptions and call
1700 1696 sys.excepthook themselves. I guess this is a feature that
1701 1697 enables them to keep running after exceptions that would
1702 1698 otherwise kill their mainloop. This is a bother for IPython
1703 1699 which excepts to catch all of the program exceptions with a try:
1704 1700 except: statement.
1705 1701
1706 1702 Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
1707 1703 any app directly invokes sys.excepthook, it will look to the user like
1708 1704 IPython crashed. In order to work around this, we can disable the
1709 1705 CrashHandler and replace it with this excepthook instead, which prints a
1710 1706 regular traceback using our InteractiveTB. In this fashion, apps which
1711 1707 call sys.excepthook will generate a regular-looking exception from
1712 1708 IPython, and the CrashHandler will only be triggered by real IPython
1713 1709 crashes.
1714 1710
1715 1711 This hook should be used sparingly, only in places which are not likely
1716 1712 to be true IPython errors.
1717 1713 """
1718 1714 self.showtraceback((etype, value, tb), tb_offset=0)
1719 1715
1720 1716 def _get_exc_info(self, exc_tuple=None):
1721 1717 """get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
1722 1718
1723 1719 Ensures sys.last_type,value,traceback hold the exc_info we found,
1724 1720 from whichever source.
1725 1721
1726 1722 raises ValueError if none of these contain any information
1727 1723 """
1728 1724 if exc_tuple is None:
1729 1725 etype, value, tb = sys.exc_info()
1730 1726 else:
1731 1727 etype, value, tb = exc_tuple
1732 1728
1733 1729 if etype is None:
1734 1730 if hasattr(sys, 'last_type'):
1735 1731 etype, value, tb = sys.last_type, sys.last_value, \
1736 1732 sys.last_traceback
1737 1733
1738 1734 if etype is None:
1739 1735 raise ValueError("No exception to find")
1740 1736
1741 1737 # Now store the exception info in sys.last_type etc.
1742 1738 # WARNING: these variables are somewhat deprecated and not
1743 1739 # necessarily safe to use in a threaded environment, but tools
1744 1740 # like pdb depend on their existence, so let's set them. If we
1745 1741 # find problems in the field, we'll need to revisit their use.
1746 1742 sys.last_type = etype
1747 1743 sys.last_value = value
1748 1744 sys.last_traceback = tb
1749 1745
1750 1746 return etype, value, tb
1751 1747
1752 1748 def show_usage_error(self, exc):
1753 1749 """Show a short message for UsageErrors
1754 1750
1755 1751 These are special exceptions that shouldn't show a traceback.
1756 1752 """
1757 1753 print("UsageError: %s" % exc, file=sys.stderr)
1758 1754
1759 1755 def get_exception_only(self, exc_tuple=None):
1760 1756 """
1761 1757 Return as a string (ending with a newline) the exception that
1762 1758 just occurred, without any traceback.
1763 1759 """
1764 1760 etype, value, tb = self._get_exc_info(exc_tuple)
1765 1761 msg = traceback.format_exception_only(etype, value)
1766 1762 return ''.join(msg)
1767 1763
1768 1764 def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
1769 1765 exception_only=False):
1770 1766 """Display the exception that just occurred.
1771 1767
1772 1768 If nothing is known about the exception, this is the method which
1773 1769 should be used throughout the code for presenting user tracebacks,
1774 1770 rather than directly invoking the InteractiveTB object.
1775 1771
1776 1772 A specific showsyntaxerror() also exists, but this method can take
1777 1773 care of calling it if needed, so unless you are explicitly catching a
1778 1774 SyntaxError exception, don't try to analyze the stack manually and
1779 1775 simply call this method."""
1780 1776
1781 1777 try:
1782 1778 try:
1783 1779 etype, value, tb = self._get_exc_info(exc_tuple)
1784 1780 except ValueError:
1785 1781 print('No traceback available to show.', file=sys.stderr)
1786 1782 return
1787 1783
1788 1784 if issubclass(etype, SyntaxError):
1789 1785 # Though this won't be called by syntax errors in the input
1790 1786 # line, there may be SyntaxError cases with imported code.
1791 1787 self.showsyntaxerror(filename)
1792 1788 elif etype is UsageError:
1793 1789 self.show_usage_error(value)
1794 1790 else:
1795 1791 if exception_only:
1796 1792 stb = ['An exception has occurred, use %tb to see '
1797 1793 'the full traceback.\n']
1798 1794 stb.extend(self.InteractiveTB.get_exception_only(etype,
1799 1795 value))
1800 1796 else:
1801 1797 try:
1802 1798 # Exception classes can customise their traceback - we
1803 1799 # use this in IPython.parallel for exceptions occurring
1804 1800 # in the engines. This should return a list of strings.
1805 1801 stb = value._render_traceback_()
1806 1802 except Exception:
1807 1803 stb = self.InteractiveTB.structured_traceback(etype,
1808 1804 value, tb, tb_offset=tb_offset)
1809 1805
1810 1806 self._showtraceback(etype, value, stb)
1811 1807 if self.call_pdb:
1812 1808 # drop into debugger
1813 1809 self.debugger(force=True)
1814 1810 return
1815 1811
1816 1812 # Actually show the traceback
1817 1813 self._showtraceback(etype, value, stb)
1818 1814
1819 1815 except KeyboardInterrupt:
1820 1816 print('\n' + self.get_exception_only(), file=sys.stderr)
1821 1817
1822 1818 def _showtraceback(self, etype, evalue, stb):
1823 1819 """Actually show a traceback.
1824 1820
1825 1821 Subclasses may override this method to put the traceback on a different
1826 1822 place, like a side channel.
1827 1823 """
1828 1824 print(self.InteractiveTB.stb2text(stb))
1829 1825
1830 1826 def showsyntaxerror(self, filename=None):
1831 1827 """Display the syntax error that just occurred.
1832 1828
1833 1829 This doesn't display a stack trace because there isn't one.
1834 1830
1835 1831 If a filename is given, it is stuffed in the exception instead
1836 1832 of what was there before (because Python's parser always uses
1837 1833 "<string>" when reading from a string).
1838 1834 """
1839 1835 etype, value, last_traceback = self._get_exc_info()
1840 1836
1841 1837 if filename and issubclass(etype, SyntaxError):
1842 1838 try:
1843 1839 value.filename = filename
1844 1840 except:
1845 1841 # Not the format we expect; leave it alone
1846 1842 pass
1847 1843
1848 1844 stb = self.SyntaxTB.structured_traceback(etype, value, [])
1849 1845 self._showtraceback(etype, value, stb)
1850 1846
1851 1847 # This is overridden in TerminalInteractiveShell to show a message about
1852 1848 # the %paste magic.
1853 1849 def showindentationerror(self):
1854 1850 """Called by run_cell when there's an IndentationError in code entered
1855 1851 at the prompt.
1856 1852
1857 1853 This is overridden in TerminalInteractiveShell to show a message about
1858 1854 the %paste magic."""
1859 1855 self.showsyntaxerror()
1860 1856
1861 1857 #-------------------------------------------------------------------------
1862 1858 # Things related to readline
1863 1859 #-------------------------------------------------------------------------
1864 1860
1865 1861 def init_readline(self):
1866 1862 """DEPRECATED
1867 1863
1868 1864 Moved to terminal subclass, here only to simplify the init logic."""
1869 1865 # Set a number of methods that depend on readline to be no-op
1870 1866 warnings.warn('`init_readline` is no-op since IPython 5.0 and is Deprecated',
1871 1867 DeprecationWarning, stacklevel=2)
1872 1868 self.set_custom_completer = no_op
1873 1869
1874 1870 @skip_doctest
1875 1871 def set_next_input(self, s, replace=False):
1876 1872 """ Sets the 'default' input string for the next command line.
1877 1873
1878 1874 Example::
1879 1875
1880 1876 In [1]: _ip.set_next_input("Hello Word")
1881 1877 In [2]: Hello Word_ # cursor is here
1882 1878 """
1883 1879 self.rl_next_input = py3compat.cast_bytes_py2(s)
1884 1880
1885 1881 def _indent_current_str(self):
1886 1882 """return the current level of indentation as a string"""
1887 1883 return self.input_splitter.indent_spaces * ' '
1888 1884
1889 1885 #-------------------------------------------------------------------------
1890 1886 # Things related to text completion
1891 1887 #-------------------------------------------------------------------------
1892 1888
1893 1889 def init_completer(self):
1894 1890 """Initialize the completion machinery.
1895 1891
1896 1892 This creates completion machinery that can be used by client code,
1897 1893 either interactively in-process (typically triggered by the readline
1898 1894 library), programmatically (such as in test suites) or out-of-process
1899 1895 (typically over the network by remote frontends).
1900 1896 """
1901 1897 from IPython.core.completer import IPCompleter
1902 1898 from IPython.core.completerlib import (module_completer,
1903 1899 magic_run_completer, cd_completer, reset_completer)
1904 1900
1905 1901 self.Completer = IPCompleter(shell=self,
1906 1902 namespace=self.user_ns,
1907 1903 global_namespace=self.user_global_ns,
1908 1904 parent=self,
1909 1905 )
1910 1906 self.configurables.append(self.Completer)
1911 1907
1912 1908 # Add custom completers to the basic ones built into IPCompleter
1913 1909 sdisp = self.strdispatchers.get('complete_command', StrDispatch())
1914 1910 self.strdispatchers['complete_command'] = sdisp
1915 1911 self.Completer.custom_completers = sdisp
1916 1912
1917 1913 self.set_hook('complete_command', module_completer, str_key = 'import')
1918 1914 self.set_hook('complete_command', module_completer, str_key = 'from')
1919 1915 self.set_hook('complete_command', module_completer, str_key = '%aimport')
1920 1916 self.set_hook('complete_command', magic_run_completer, str_key = '%run')
1921 1917 self.set_hook('complete_command', cd_completer, str_key = '%cd')
1922 1918 self.set_hook('complete_command', reset_completer, str_key = '%reset')
1923 1919
1924 1920
1925 @skip_doctest_py2
1926 1921 def complete(self, text, line=None, cursor_pos=None):
1927 1922 """Return the completed text and a list of completions.
1928 1923
1929 1924 Parameters
1930 1925 ----------
1931 1926
1932 1927 text : string
1933 1928 A string of text to be completed on. It can be given as empty and
1934 1929 instead a line/position pair are given. In this case, the
1935 1930 completer itself will split the line like readline does.
1936 1931
1937 1932 line : string, optional
1938 1933 The complete line that text is part of.
1939 1934
1940 1935 cursor_pos : int, optional
1941 1936 The position of the cursor on the input line.
1942 1937
1943 1938 Returns
1944 1939 -------
1945 1940 text : string
1946 1941 The actual text that was completed.
1947 1942
1948 1943 matches : list
1949 1944 A sorted list with all possible completions.
1950 1945
1951 1946 The optional arguments allow the completion to take more context into
1952 1947 account, and are part of the low-level completion API.
1953 1948
1954 1949 This is a wrapper around the completion mechanism, similar to what
1955 1950 readline does at the command line when the TAB key is hit. By
1956 1951 exposing it as a method, it can be used by other non-readline
1957 1952 environments (such as GUIs) for text completion.
1958 1953
1959 1954 Simple usage example:
1960 1955
1961 1956 In [1]: x = 'hello'
1962 1957
1963 1958 In [2]: _ip.complete('x.l')
1964 1959 Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
1965 1960 """
1966 1961
1967 1962 # Inject names into __builtin__ so we can complete on the added names.
1968 1963 with self.builtin_trap:
1969 1964 return self.Completer.complete(text, line, cursor_pos)
1970 1965
1971 1966 def set_custom_completer(self, completer, pos=0):
1972 1967 """Adds a new custom completer function.
1973 1968
1974 1969 The position argument (defaults to 0) is the index in the completers
1975 1970 list where you want the completer to be inserted."""
1976 1971
1977 1972 newcomp = types.MethodType(completer,self.Completer)
1978 1973 self.Completer.matchers.insert(pos,newcomp)
1979 1974
1980 1975 def set_completer_frame(self, frame=None):
1981 1976 """Set the frame of the completer."""
1982 1977 if frame:
1983 1978 self.Completer.namespace = frame.f_locals
1984 1979 self.Completer.global_namespace = frame.f_globals
1985 1980 else:
1986 1981 self.Completer.namespace = self.user_ns
1987 1982 self.Completer.global_namespace = self.user_global_ns
1988 1983
1989 1984 #-------------------------------------------------------------------------
1990 1985 # Things related to magics
1991 1986 #-------------------------------------------------------------------------
1992 1987
1993 1988 def init_magics(self):
1994 1989 from IPython.core import magics as m
1995 1990 self.magics_manager = magic.MagicsManager(shell=self,
1996 1991 parent=self,
1997 1992 user_magics=m.UserMagics(self))
1998 1993 self.configurables.append(self.magics_manager)
1999 1994
2000 1995 # Expose as public API from the magics manager
2001 1996 self.register_magics = self.magics_manager.register
2002 1997
2003 1998 self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
2004 1999 m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
2005 2000 m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
2006 2001 m.NamespaceMagics, m.OSMagics, m.PylabMagics, m.ScriptMagics,
2007 2002 )
2008 2003
2009 2004 # Register Magic Aliases
2010 2005 mman = self.magics_manager
2011 2006 # FIXME: magic aliases should be defined by the Magics classes
2012 2007 # or in MagicsManager, not here
2013 2008 mman.register_alias('ed', 'edit')
2014 2009 mman.register_alias('hist', 'history')
2015 2010 mman.register_alias('rep', 'recall')
2016 2011 mman.register_alias('SVG', 'svg', 'cell')
2017 2012 mman.register_alias('HTML', 'html', 'cell')
2018 2013 mman.register_alias('file', 'writefile', 'cell')
2019 2014
2020 2015 # FIXME: Move the color initialization to the DisplayHook, which
2021 2016 # should be split into a prompt manager and displayhook. We probably
2022 2017 # even need a centralize colors management object.
2023 2018 self.magic('colors %s' % self.colors)
2024 2019
2025 2020 # Defined here so that it's included in the documentation
2026 2021 @functools.wraps(magic.MagicsManager.register_function)
2027 2022 def register_magic_function(self, func, magic_kind='line', magic_name=None):
2028 2023 self.magics_manager.register_function(func,
2029 2024 magic_kind=magic_kind, magic_name=magic_name)
2030 2025
2031 2026 def run_line_magic(self, magic_name, line):
2032 2027 """Execute the given line magic.
2033 2028
2034 2029 Parameters
2035 2030 ----------
2036 2031 magic_name : str
2037 2032 Name of the desired magic function, without '%' prefix.
2038 2033
2039 2034 line : str
2040 2035 The rest of the input line as a single string.
2041 2036 """
2042 2037 fn = self.find_line_magic(magic_name)
2043 2038 if fn is None:
2044 2039 cm = self.find_cell_magic(magic_name)
2045 2040 etpl = "Line magic function `%%%s` not found%s."
2046 2041 extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
2047 2042 'did you mean that instead?)' % magic_name )
2048 2043 error(etpl % (magic_name, extra))
2049 2044 else:
2050 2045 # Note: this is the distance in the stack to the user's frame.
2051 2046 # This will need to be updated if the internal calling logic gets
2052 2047 # refactored, or else we'll be expanding the wrong variables.
2053 2048 stack_depth = 2
2054 2049 magic_arg_s = self.var_expand(line, stack_depth)
2055 2050 # Put magic args in a list so we can call with f(*a) syntax
2056 2051 args = [magic_arg_s]
2057 2052 kwargs = {}
2058 2053 # Grab local namespace if we need it:
2059 2054 if getattr(fn, "needs_local_scope", False):
2060 2055 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
2061 2056 with self.builtin_trap:
2062 2057 result = fn(*args,**kwargs)
2063 2058 return result
2064 2059
2065 2060 def run_cell_magic(self, magic_name, line, cell):
2066 2061 """Execute the given cell magic.
2067 2062
2068 2063 Parameters
2069 2064 ----------
2070 2065 magic_name : str
2071 2066 Name of the desired magic function, without '%' prefix.
2072 2067
2073 2068 line : str
2074 2069 The rest of the first input line as a single string.
2075 2070
2076 2071 cell : str
2077 2072 The body of the cell as a (possibly multiline) string.
2078 2073 """
2079 2074 fn = self.find_cell_magic(magic_name)
2080 2075 if fn is None:
2081 2076 lm = self.find_line_magic(magic_name)
2082 2077 etpl = "Cell magic `%%{0}` not found{1}."
2083 2078 extra = '' if lm is None else (' (But line magic `%{0}` exists, '
2084 2079 'did you mean that instead?)'.format(magic_name))
2085 2080 error(etpl.format(magic_name, extra))
2086 2081 elif cell == '':
2087 2082 message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
2088 2083 if self.find_line_magic(magic_name) is not None:
2089 2084 message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
2090 2085 raise UsageError(message)
2091 2086 else:
2092 2087 # Note: this is the distance in the stack to the user's frame.
2093 2088 # This will need to be updated if the internal calling logic gets
2094 2089 # refactored, or else we'll be expanding the wrong variables.
2095 2090 stack_depth = 2
2096 2091 magic_arg_s = self.var_expand(line, stack_depth)
2097 2092 with self.builtin_trap:
2098 2093 result = fn(magic_arg_s, cell)
2099 2094 return result
2100 2095
2101 2096 def find_line_magic(self, magic_name):
2102 2097 """Find and return a line magic by name.
2103 2098
2104 2099 Returns None if the magic isn't found."""
2105 2100 return self.magics_manager.magics['line'].get(magic_name)
2106 2101
2107 2102 def find_cell_magic(self, magic_name):
2108 2103 """Find and return a cell magic by name.
2109 2104
2110 2105 Returns None if the magic isn't found."""
2111 2106 return self.magics_manager.magics['cell'].get(magic_name)
2112 2107
2113 2108 def find_magic(self, magic_name, magic_kind='line'):
2114 2109 """Find and return a magic of the given type by name.
2115 2110
2116 2111 Returns None if the magic isn't found."""
2117 2112 return self.magics_manager.magics[magic_kind].get(magic_name)
2118 2113
2119 2114 def magic(self, arg_s):
2120 2115 """DEPRECATED. Use run_line_magic() instead.
2121 2116
2122 2117 Call a magic function by name.
2123 2118
2124 2119 Input: a string containing the name of the magic function to call and
2125 2120 any additional arguments to be passed to the magic.
2126 2121
2127 2122 magic('name -opt foo bar') is equivalent to typing at the ipython
2128 2123 prompt:
2129 2124
2130 2125 In[1]: %name -opt foo bar
2131 2126
2132 2127 To call a magic without arguments, simply use magic('name').
2133 2128
2134 2129 This provides a proper Python function to call IPython's magics in any
2135 2130 valid Python code you can type at the interpreter, including loops and
2136 2131 compound statements.
2137 2132 """
2138 2133 # TODO: should we issue a loud deprecation warning here?
2139 2134 magic_name, _, magic_arg_s = arg_s.partition(' ')
2140 2135 magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
2141 2136 return self.run_line_magic(magic_name, magic_arg_s)
2142 2137
2143 2138 #-------------------------------------------------------------------------
2144 2139 # Things related to macros
2145 2140 #-------------------------------------------------------------------------
2146 2141
2147 2142 def define_macro(self, name, themacro):
2148 2143 """Define a new macro
2149 2144
2150 2145 Parameters
2151 2146 ----------
2152 2147 name : str
2153 2148 The name of the macro.
2154 2149 themacro : str or Macro
2155 2150 The action to do upon invoking the macro. If a string, a new
2156 2151 Macro object is created by passing the string to it.
2157 2152 """
2158 2153
2159 2154 from IPython.core import macro
2160 2155
2161 2156 if isinstance(themacro, string_types):
2162 2157 themacro = macro.Macro(themacro)
2163 2158 if not isinstance(themacro, macro.Macro):
2164 2159 raise ValueError('A macro must be a string or a Macro instance.')
2165 2160 self.user_ns[name] = themacro
2166 2161
2167 2162 #-------------------------------------------------------------------------
2168 2163 # Things related to the running of system commands
2169 2164 #-------------------------------------------------------------------------
2170 2165
2171 2166 def system_piped(self, cmd):
2172 2167 """Call the given cmd in a subprocess, piping stdout/err
2173 2168
2174 2169 Parameters
2175 2170 ----------
2176 2171 cmd : str
2177 2172 Command to execute (can not end in '&', as background processes are
2178 2173 not supported. Should not be a command that expects input
2179 2174 other than simple text.
2180 2175 """
2181 2176 if cmd.rstrip().endswith('&'):
2182 2177 # this is *far* from a rigorous test
2183 2178 # We do not support backgrounding processes because we either use
2184 2179 # pexpect or pipes to read from. Users can always just call
2185 2180 # os.system() or use ip.system=ip.system_raw
2186 2181 # if they really want a background process.
2187 2182 raise OSError("Background processes not supported.")
2188 2183
2189 2184 # we explicitly do NOT return the subprocess status code, because
2190 2185 # a non-None value would trigger :func:`sys.displayhook` calls.
2191 2186 # Instead, we store the exit_code in user_ns.
2192 2187 self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
2193 2188
2194 2189 def system_raw(self, cmd):
2195 2190 """Call the given cmd in a subprocess using os.system on Windows or
2196 2191 subprocess.call using the system shell on other platforms.
2197 2192
2198 2193 Parameters
2199 2194 ----------
2200 2195 cmd : str
2201 2196 Command to execute.
2202 2197 """
2203 2198 cmd = self.var_expand(cmd, depth=1)
2204 2199 # protect os.system from UNC paths on Windows, which it can't handle:
2205 2200 if sys.platform == 'win32':
2206 2201 from IPython.utils._process_win32 import AvoidUNCPath
2207 2202 with AvoidUNCPath() as path:
2208 2203 if path is not None:
2209 2204 cmd = '"pushd %s &&"%s' % (path, cmd)
2210 2205 cmd = py3compat.unicode_to_str(cmd)
2211 2206 try:
2212 2207 ec = os.system(cmd)
2213 2208 except KeyboardInterrupt:
2214 2209 print('\n' + self.get_exception_only(), file=sys.stderr)
2215 2210 ec = -2
2216 2211 else:
2217 2212 cmd = py3compat.unicode_to_str(cmd)
2218 2213 # For posix the result of the subprocess.call() below is an exit
2219 2214 # code, which by convention is zero for success, positive for
2220 2215 # program failure. Exit codes above 128 are reserved for signals,
2221 2216 # and the formula for converting a signal to an exit code is usually
2222 2217 # signal_number+128. To more easily differentiate between exit
2223 2218 # codes and signals, ipython uses negative numbers. For instance
2224 2219 # since control-c is signal 2 but exit code 130, ipython's
2225 2220 # _exit_code variable will read -2. Note that some shells like
2226 2221 # csh and fish don't follow sh/bash conventions for exit codes.
2227 2222 executable = os.environ.get('SHELL', None)
2228 2223 try:
2229 2224 # Use env shell instead of default /bin/sh
2230 2225 ec = subprocess.call(cmd, shell=True, executable=executable)
2231 2226 except KeyboardInterrupt:
2232 2227 # intercept control-C; a long traceback is not useful here
2233 2228 print('\n' + self.get_exception_only(), file=sys.stderr)
2234 2229 ec = 130
2235 2230 if ec > 128:
2236 2231 ec = -(ec - 128)
2237 2232
2238 2233 # We explicitly do NOT return the subprocess status code, because
2239 2234 # a non-None value would trigger :func:`sys.displayhook` calls.
2240 2235 # Instead, we store the exit_code in user_ns. Note the semantics
2241 2236 # of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
2242 2237 # but raising SystemExit(_exit_code) will give status 254!
2243 2238 self.user_ns['_exit_code'] = ec
2244 2239
2245 2240 # use piped system by default, because it is better behaved
2246 2241 system = system_piped
2247 2242
2248 2243 def getoutput(self, cmd, split=True, depth=0):
2249 2244 """Get output (possibly including stderr) from a subprocess.
2250 2245
2251 2246 Parameters
2252 2247 ----------
2253 2248 cmd : str
2254 2249 Command to execute (can not end in '&', as background processes are
2255 2250 not supported.
2256 2251 split : bool, optional
2257 2252 If True, split the output into an IPython SList. Otherwise, an
2258 2253 IPython LSString is returned. These are objects similar to normal
2259 2254 lists and strings, with a few convenience attributes for easier
2260 2255 manipulation of line-based output. You can use '?' on them for
2261 2256 details.
2262 2257 depth : int, optional
2263 2258 How many frames above the caller are the local variables which should
2264 2259 be expanded in the command string? The default (0) assumes that the
2265 2260 expansion variables are in the stack frame calling this function.
2266 2261 """
2267 2262 if cmd.rstrip().endswith('&'):
2268 2263 # this is *far* from a rigorous test
2269 2264 raise OSError("Background processes not supported.")
2270 2265 out = getoutput(self.var_expand(cmd, depth=depth+1))
2271 2266 if split:
2272 2267 out = SList(out.splitlines())
2273 2268 else:
2274 2269 out = LSString(out)
2275 2270 return out
2276 2271
2277 2272 #-------------------------------------------------------------------------
2278 2273 # Things related to aliases
2279 2274 #-------------------------------------------------------------------------
2280 2275
2281 2276 def init_alias(self):
2282 2277 self.alias_manager = AliasManager(shell=self, parent=self)
2283 2278 self.configurables.append(self.alias_manager)
2284 2279
2285 2280 #-------------------------------------------------------------------------
2286 2281 # Things related to extensions
2287 2282 #-------------------------------------------------------------------------
2288 2283
2289 2284 def init_extension_manager(self):
2290 2285 self.extension_manager = ExtensionManager(shell=self, parent=self)
2291 2286 self.configurables.append(self.extension_manager)
2292 2287
2293 2288 #-------------------------------------------------------------------------
2294 2289 # Things related to payloads
2295 2290 #-------------------------------------------------------------------------
2296 2291
2297 2292 def init_payload(self):
2298 2293 self.payload_manager = PayloadManager(parent=self)
2299 2294 self.configurables.append(self.payload_manager)
2300 2295
2301 2296 #-------------------------------------------------------------------------
2302 2297 # Things related to the prefilter
2303 2298 #-------------------------------------------------------------------------
2304 2299
2305 2300 def init_prefilter(self):
2306 2301 self.prefilter_manager = PrefilterManager(shell=self, parent=self)
2307 2302 self.configurables.append(self.prefilter_manager)
2308 2303 # Ultimately this will be refactored in the new interpreter code, but
2309 2304 # for now, we should expose the main prefilter method (there's legacy
2310 2305 # code out there that may rely on this).
2311 2306 self.prefilter = self.prefilter_manager.prefilter_lines
2312 2307
2313 2308 def auto_rewrite_input(self, cmd):
2314 2309 """Print to the screen the rewritten form of the user's command.
2315 2310
2316 2311 This shows visual feedback by rewriting input lines that cause
2317 2312 automatic calling to kick in, like::
2318 2313
2319 2314 /f x
2320 2315
2321 2316 into::
2322 2317
2323 2318 ------> f(x)
2324 2319
2325 2320 after the user's input prompt. This helps the user understand that the
2326 2321 input line was transformed automatically by IPython.
2327 2322 """
2328 2323 if not self.show_rewritten_input:
2329 2324 return
2330 2325
2331 2326 # This is overridden in TerminalInteractiveShell to use fancy prompts
2332 2327 print("------> " + cmd)
2333 2328
2334 2329 #-------------------------------------------------------------------------
2335 2330 # Things related to extracting values/expressions from kernel and user_ns
2336 2331 #-------------------------------------------------------------------------
2337 2332
2338 2333 def _user_obj_error(self):
2339 2334 """return simple exception dict
2340 2335
2341 2336 for use in user_expressions
2342 2337 """
2343 2338
2344 2339 etype, evalue, tb = self._get_exc_info()
2345 2340 stb = self.InteractiveTB.get_exception_only(etype, evalue)
2346 2341
2347 2342 exc_info = {
2348 2343 u'status' : 'error',
2349 2344 u'traceback' : stb,
2350 2345 u'ename' : unicode_type(etype.__name__),
2351 2346 u'evalue' : py3compat.safe_unicode(evalue),
2352 2347 }
2353 2348
2354 2349 return exc_info
2355 2350
2356 2351 def _format_user_obj(self, obj):
2357 2352 """format a user object to display dict
2358 2353
2359 2354 for use in user_expressions
2360 2355 """
2361 2356
2362 2357 data, md = self.display_formatter.format(obj)
2363 2358 value = {
2364 2359 'status' : 'ok',
2365 2360 'data' : data,
2366 2361 'metadata' : md,
2367 2362 }
2368 2363 return value
2369 2364
2370 2365 def user_expressions(self, expressions):
2371 2366 """Evaluate a dict of expressions in the user's namespace.
2372 2367
2373 2368 Parameters
2374 2369 ----------
2375 2370 expressions : dict
2376 2371 A dict with string keys and string values. The expression values
2377 2372 should be valid Python expressions, each of which will be evaluated
2378 2373 in the user namespace.
2379 2374
2380 2375 Returns
2381 2376 -------
2382 2377 A dict, keyed like the input expressions dict, with the rich mime-typed
2383 2378 display_data of each value.
2384 2379 """
2385 2380 out = {}
2386 2381 user_ns = self.user_ns
2387 2382 global_ns = self.user_global_ns
2388 2383
2389 2384 for key, expr in iteritems(expressions):
2390 2385 try:
2391 2386 value = self._format_user_obj(eval(expr, global_ns, user_ns))
2392 2387 except:
2393 2388 value = self._user_obj_error()
2394 2389 out[key] = value
2395 2390 return out
2396 2391
2397 2392 #-------------------------------------------------------------------------
2398 2393 # Things related to the running of code
2399 2394 #-------------------------------------------------------------------------
2400 2395
2401 2396 def ex(self, cmd):
2402 2397 """Execute a normal python statement in user namespace."""
2403 2398 with self.builtin_trap:
2404 2399 exec(cmd, self.user_global_ns, self.user_ns)
2405 2400
2406 2401 def ev(self, expr):
2407 2402 """Evaluate python expression expr in user namespace.
2408 2403
2409 2404 Returns the result of evaluation
2410 2405 """
2411 2406 with self.builtin_trap:
2412 2407 return eval(expr, self.user_global_ns, self.user_ns)
2413 2408
2414 2409 def safe_execfile(self, fname, *where, **kw):
2415 2410 """A safe version of the builtin execfile().
2416 2411
2417 2412 This version will never throw an exception, but instead print
2418 2413 helpful error messages to the screen. This only works on pure
2419 2414 Python files with the .py extension.
2420 2415
2421 2416 Parameters
2422 2417 ----------
2423 2418 fname : string
2424 2419 The name of the file to be executed.
2425 2420 where : tuple
2426 2421 One or two namespaces, passed to execfile() as (globals,locals).
2427 2422 If only one is given, it is passed as both.
2428 2423 exit_ignore : bool (False)
2429 2424 If True, then silence SystemExit for non-zero status (it is always
2430 2425 silenced for zero status, as it is so common).
2431 2426 raise_exceptions : bool (False)
2432 2427 If True raise exceptions everywhere. Meant for testing.
2433 2428 shell_futures : bool (False)
2434 2429 If True, the code will share future statements with the interactive
2435 2430 shell. It will both be affected by previous __future__ imports, and
2436 2431 any __future__ imports in the code will affect the shell. If False,
2437 2432 __future__ imports are not shared in either direction.
2438 2433
2439 2434 """
2440 2435 kw.setdefault('exit_ignore', False)
2441 2436 kw.setdefault('raise_exceptions', False)
2442 2437 kw.setdefault('shell_futures', False)
2443 2438
2444 2439 fname = os.path.abspath(os.path.expanduser(fname))
2445 2440
2446 2441 # Make sure we can open the file
2447 2442 try:
2448 2443 with open(fname):
2449 2444 pass
2450 2445 except:
2451 2446 warn('Could not open file <%s> for safe execution.' % fname)
2452 2447 return
2453 2448
2454 2449 # Find things also in current directory. This is needed to mimic the
2455 2450 # behavior of running a script from the system command line, where
2456 2451 # Python inserts the script's directory into sys.path
2457 2452 dname = os.path.dirname(fname)
2458 2453
2459 2454 with prepended_to_syspath(dname), self.builtin_trap:
2460 2455 try:
2461 2456 glob, loc = (where + (None, ))[:2]
2462 2457 py3compat.execfile(
2463 2458 fname, glob, loc,
2464 2459 self.compile if kw['shell_futures'] else None)
2465 2460 except SystemExit as status:
2466 2461 # If the call was made with 0 or None exit status (sys.exit(0)
2467 2462 # or sys.exit() ), don't bother showing a traceback, as both of
2468 2463 # these are considered normal by the OS:
2469 2464 # > python -c'import sys;sys.exit(0)'; echo $?
2470 2465 # 0
2471 2466 # > python -c'import sys;sys.exit()'; echo $?
2472 2467 # 0
2473 2468 # For other exit status, we show the exception unless
2474 2469 # explicitly silenced, but only in short form.
2475 2470 if status.code:
2476 2471 if kw['raise_exceptions']:
2477 2472 raise
2478 2473 if not kw['exit_ignore']:
2479 2474 self.showtraceback(exception_only=True)
2480 2475 except:
2481 2476 if kw['raise_exceptions']:
2482 2477 raise
2483 2478 # tb offset is 2 because we wrap execfile
2484 2479 self.showtraceback(tb_offset=2)
2485 2480
2486 2481 def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
2487 2482 """Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
2488 2483
2489 2484 Parameters
2490 2485 ----------
2491 2486 fname : str
2492 2487 The name of the file to execute. The filename must have a
2493 2488 .ipy or .ipynb extension.
2494 2489 shell_futures : bool (False)
2495 2490 If True, the code will share future statements with the interactive
2496 2491 shell. It will both be affected by previous __future__ imports, and
2497 2492 any __future__ imports in the code will affect the shell. If False,
2498 2493 __future__ imports are not shared in either direction.
2499 2494 raise_exceptions : bool (False)
2500 2495 If True raise exceptions everywhere. Meant for testing.
2501 2496 """
2502 2497 fname = os.path.abspath(os.path.expanduser(fname))
2503 2498
2504 2499 # Make sure we can open the file
2505 2500 try:
2506 2501 with open(fname):
2507 2502 pass
2508 2503 except:
2509 2504 warn('Could not open file <%s> for safe execution.' % fname)
2510 2505 return
2511 2506
2512 2507 # Find things also in current directory. This is needed to mimic the
2513 2508 # behavior of running a script from the system command line, where
2514 2509 # Python inserts the script's directory into sys.path
2515 2510 dname = os.path.dirname(fname)
2516 2511
2517 2512 def get_cells():
2518 2513 """generator for sequence of code blocks to run"""
2519 2514 if fname.endswith('.ipynb'):
2520 2515 from nbformat import read
2521 2516 with io_open(fname) as f:
2522 2517 nb = read(f, as_version=4)
2523 2518 if not nb.cells:
2524 2519 return
2525 2520 for cell in nb.cells:
2526 2521 if cell.cell_type == 'code':
2527 2522 yield cell.source
2528 2523 else:
2529 2524 with open(fname) as f:
2530 2525 yield f.read()
2531 2526
2532 2527 with prepended_to_syspath(dname):
2533 2528 try:
2534 2529 for cell in get_cells():
2535 2530 result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
2536 2531 if raise_exceptions:
2537 2532 result.raise_error()
2538 2533 elif not result.success:
2539 2534 break
2540 2535 except:
2541 2536 if raise_exceptions:
2542 2537 raise
2543 2538 self.showtraceback()
2544 2539 warn('Unknown failure executing file: <%s>' % fname)
2545 2540
2546 2541 def safe_run_module(self, mod_name, where):
2547 2542 """A safe version of runpy.run_module().
2548 2543
2549 2544 This version will never throw an exception, but instead print
2550 2545 helpful error messages to the screen.
2551 2546
2552 2547 `SystemExit` exceptions with status code 0 or None are ignored.
2553 2548
2554 2549 Parameters
2555 2550 ----------
2556 2551 mod_name : string
2557 2552 The name of the module to be executed.
2558 2553 where : dict
2559 2554 The globals namespace.
2560 2555 """
2561 2556 try:
2562 2557 try:
2563 2558 where.update(
2564 2559 runpy.run_module(str(mod_name), run_name="__main__",
2565 2560 alter_sys=True)
2566 2561 )
2567 2562 except SystemExit as status:
2568 2563 if status.code:
2569 2564 raise
2570 2565 except:
2571 2566 self.showtraceback()
2572 2567 warn('Unknown failure executing module: <%s>' % mod_name)
2573 2568
2574 2569 def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
2575 2570 """Run a complete IPython cell.
2576 2571
2577 2572 Parameters
2578 2573 ----------
2579 2574 raw_cell : str
2580 2575 The code (including IPython code such as %magic functions) to run.
2581 2576 store_history : bool
2582 2577 If True, the raw and translated cell will be stored in IPython's
2583 2578 history. For user code calling back into IPython's machinery, this
2584 2579 should be set to False.
2585 2580 silent : bool
2586 2581 If True, avoid side-effects, such as implicit displayhooks and
2587 2582 and logging. silent=True forces store_history=False.
2588 2583 shell_futures : bool
2589 2584 If True, the code will share future statements with the interactive
2590 2585 shell. It will both be affected by previous __future__ imports, and
2591 2586 any __future__ imports in the code will affect the shell. If False,
2592 2587 __future__ imports are not shared in either direction.
2593 2588
2594 2589 Returns
2595 2590 -------
2596 2591 result : :class:`ExecutionResult`
2597 2592 """
2598 2593 result = ExecutionResult()
2599 2594
2600 2595 if (not raw_cell) or raw_cell.isspace():
2601 2596 self.last_execution_succeeded = True
2602 2597 return result
2603 2598
2604 2599 if silent:
2605 2600 store_history = False
2606 2601
2607 2602 if store_history:
2608 2603 result.execution_count = self.execution_count
2609 2604
2610 2605 def error_before_exec(value):
2611 2606 result.error_before_exec = value
2612 2607 self.last_execution_succeeded = False
2613 2608 return result
2614 2609
2615 2610 self.events.trigger('pre_execute')
2616 2611 if not silent:
2617 2612 self.events.trigger('pre_run_cell')
2618 2613
2619 2614 # If any of our input transformation (input_transformer_manager or
2620 2615 # prefilter_manager) raises an exception, we store it in this variable
2621 2616 # so that we can display the error after logging the input and storing
2622 2617 # it in the history.
2623 2618 preprocessing_exc_tuple = None
2624 2619 try:
2625 2620 # Static input transformations
2626 2621 cell = self.input_transformer_manager.transform_cell(raw_cell)
2627 2622 except SyntaxError:
2628 2623 preprocessing_exc_tuple = sys.exc_info()
2629 2624 cell = raw_cell # cell has to exist so it can be stored/logged
2630 2625 else:
2631 2626 if len(cell.splitlines()) == 1:
2632 2627 # Dynamic transformations - only applied for single line commands
2633 2628 with self.builtin_trap:
2634 2629 try:
2635 2630 # use prefilter_lines to handle trailing newlines
2636 2631 # restore trailing newline for ast.parse
2637 2632 cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
2638 2633 except Exception:
2639 2634 # don't allow prefilter errors to crash IPython
2640 2635 preprocessing_exc_tuple = sys.exc_info()
2641 2636
2642 2637 # Store raw and processed history
2643 2638 if store_history:
2644 2639 self.history_manager.store_inputs(self.execution_count,
2645 2640 cell, raw_cell)
2646 2641 if not silent:
2647 2642 self.logger.log(cell, raw_cell)
2648 2643
2649 2644 # Display the exception if input processing failed.
2650 2645 if preprocessing_exc_tuple is not None:
2651 2646 self.showtraceback(preprocessing_exc_tuple)
2652 2647 if store_history:
2653 2648 self.execution_count += 1
2654 2649 return error_before_exec(preprocessing_exc_tuple[2])
2655 2650
2656 2651 # Our own compiler remembers the __future__ environment. If we want to
2657 2652 # run code with a separate __future__ environment, use the default
2658 2653 # compiler
2659 2654 compiler = self.compile if shell_futures else CachingCompiler()
2660 2655
2661 2656 with self.builtin_trap:
2662 2657 cell_name = self.compile.cache(cell, self.execution_count)
2663 2658
2664 2659 with self.display_trap:
2665 2660 # Compile to bytecode
2666 2661 try:
2667 2662 code_ast = compiler.ast_parse(cell, filename=cell_name)
2668 2663 except self.custom_exceptions as e:
2669 2664 etype, value, tb = sys.exc_info()
2670 2665 self.CustomTB(etype, value, tb)
2671 2666 return error_before_exec(e)
2672 2667 except IndentationError as e:
2673 2668 self.showindentationerror()
2674 2669 if store_history:
2675 2670 self.execution_count += 1
2676 2671 return error_before_exec(e)
2677 2672 except (OverflowError, SyntaxError, ValueError, TypeError,
2678 2673 MemoryError) as e:
2679 2674 self.showsyntaxerror()
2680 2675 if store_history:
2681 2676 self.execution_count += 1
2682 2677 return error_before_exec(e)
2683 2678
2684 2679 # Apply AST transformations
2685 2680 try:
2686 2681 code_ast = self.transform_ast(code_ast)
2687 2682 except InputRejected as e:
2688 2683 self.showtraceback()
2689 2684 if store_history:
2690 2685 self.execution_count += 1
2691 2686 return error_before_exec(e)
2692 2687
2693 2688 # Give the displayhook a reference to our ExecutionResult so it
2694 2689 # can fill in the output value.
2695 2690 self.displayhook.exec_result = result
2696 2691
2697 2692 # Execute the user code
2698 2693 interactivity = "none" if silent else self.ast_node_interactivity
2699 2694 has_raised = self.run_ast_nodes(code_ast.body, cell_name,
2700 2695 interactivity=interactivity, compiler=compiler, result=result)
2701 2696
2702 2697 self.last_execution_succeeded = not has_raised
2703 2698
2704 2699 # Reset this so later displayed values do not modify the
2705 2700 # ExecutionResult
2706 2701 self.displayhook.exec_result = None
2707 2702
2708 2703 self.events.trigger('post_execute')
2709 2704 if not silent:
2710 2705 self.events.trigger('post_run_cell')
2711 2706
2712 2707 if store_history:
2713 2708 # Write output to the database. Does nothing unless
2714 2709 # history output logging is enabled.
2715 2710 self.history_manager.store_output(self.execution_count)
2716 2711 # Each cell is a *single* input, regardless of how many lines it has
2717 2712 self.execution_count += 1
2718 2713
2719 2714 return result
2720 2715
2721 2716 def transform_ast(self, node):
2722 2717 """Apply the AST transformations from self.ast_transformers
2723 2718
2724 2719 Parameters
2725 2720 ----------
2726 2721 node : ast.Node
2727 2722 The root node to be transformed. Typically called with the ast.Module
2728 2723 produced by parsing user input.
2729 2724
2730 2725 Returns
2731 2726 -------
2732 2727 An ast.Node corresponding to the node it was called with. Note that it
2733 2728 may also modify the passed object, so don't rely on references to the
2734 2729 original AST.
2735 2730 """
2736 2731 for transformer in self.ast_transformers:
2737 2732 try:
2738 2733 node = transformer.visit(node)
2739 2734 except InputRejected:
2740 2735 # User-supplied AST transformers can reject an input by raising
2741 2736 # an InputRejected. Short-circuit in this case so that we
2742 2737 # don't unregister the transform.
2743 2738 raise
2744 2739 except Exception:
2745 2740 warn("AST transformer %r threw an error. It will be unregistered." % transformer)
2746 2741 self.ast_transformers.remove(transformer)
2747 2742
2748 2743 if self.ast_transformers:
2749 2744 ast.fix_missing_locations(node)
2750 2745 return node
2751 2746
2752 2747
2753 2748 def run_ast_nodes(self, nodelist, cell_name, interactivity='last_expr',
2754 2749 compiler=compile, result=None):
2755 2750 """Run a sequence of AST nodes. The execution mode depends on the
2756 2751 interactivity parameter.
2757 2752
2758 2753 Parameters
2759 2754 ----------
2760 2755 nodelist : list
2761 2756 A sequence of AST nodes to run.
2762 2757 cell_name : str
2763 2758 Will be passed to the compiler as the filename of the cell. Typically
2764 2759 the value returned by ip.compile.cache(cell).
2765 2760 interactivity : str
2766 2761 'all', 'last', 'last_expr' or 'none', specifying which nodes should be
2767 2762 run interactively (displaying output from expressions). 'last_expr'
2768 2763 will run the last node interactively only if it is an expression (i.e.
2769 2764 expressions in loops or other blocks are not displayed. Other values
2770 2765 for this parameter will raise a ValueError.
2771 2766 compiler : callable
2772 2767 A function with the same interface as the built-in compile(), to turn
2773 2768 the AST nodes into code objects. Default is the built-in compile().
2774 2769 result : ExecutionResult, optional
2775 2770 An object to store exceptions that occur during execution.
2776 2771
2777 2772 Returns
2778 2773 -------
2779 2774 True if an exception occurred while running code, False if it finished
2780 2775 running.
2781 2776 """
2782 2777 if not nodelist:
2783 2778 return
2784 2779
2785 2780 if interactivity == 'last_expr':
2786 2781 if isinstance(nodelist[-1], ast.Expr):
2787 2782 interactivity = "last"
2788 2783 else:
2789 2784 interactivity = "none"
2790 2785
2791 2786 if interactivity == 'none':
2792 2787 to_run_exec, to_run_interactive = nodelist, []
2793 2788 elif interactivity == 'last':
2794 2789 to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
2795 2790 elif interactivity == 'all':
2796 2791 to_run_exec, to_run_interactive = [], nodelist
2797 2792 else:
2798 2793 raise ValueError("Interactivity was %r" % interactivity)
2799 2794
2800 2795 try:
2801 2796 for i, node in enumerate(to_run_exec):
2802 2797 mod = ast.Module([node])
2803 2798 code = compiler(mod, cell_name, "exec")
2804 2799 if self.run_code(code, result):
2805 2800 return True
2806 2801
2807 2802 for i, node in enumerate(to_run_interactive):
2808 2803 mod = ast.Interactive([node])
2809 2804 code = compiler(mod, cell_name, "single")
2810 2805 if self.run_code(code, result):
2811 2806 return True
2812 2807
2813 2808 # Flush softspace
2814 2809 if softspace(sys.stdout, 0):
2815 2810 print()
2816 2811
2817 2812 except:
2818 2813 # It's possible to have exceptions raised here, typically by
2819 2814 # compilation of odd code (such as a naked 'return' outside a
2820 2815 # function) that did parse but isn't valid. Typically the exception
2821 2816 # is a SyntaxError, but it's safest just to catch anything and show
2822 2817 # the user a traceback.
2823 2818
2824 2819 # We do only one try/except outside the loop to minimize the impact
2825 2820 # on runtime, and also because if any node in the node list is
2826 2821 # broken, we should stop execution completely.
2827 2822 if result:
2828 2823 result.error_before_exec = sys.exc_info()[1]
2829 2824 self.showtraceback()
2830 2825 return True
2831 2826
2832 2827 return False
2833 2828
2834 2829 def run_code(self, code_obj, result=None):
2835 2830 """Execute a code object.
2836 2831
2837 2832 When an exception occurs, self.showtraceback() is called to display a
2838 2833 traceback.
2839 2834
2840 2835 Parameters
2841 2836 ----------
2842 2837 code_obj : code object
2843 2838 A compiled code object, to be executed
2844 2839 result : ExecutionResult, optional
2845 2840 An object to store exceptions that occur during execution.
2846 2841
2847 2842 Returns
2848 2843 -------
2849 2844 False : successful execution.
2850 2845 True : an error occurred.
2851 2846 """
2852 2847 # Set our own excepthook in case the user code tries to call it
2853 2848 # directly, so that the IPython crash handler doesn't get triggered
2854 2849 old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
2855 2850
2856 2851 # we save the original sys.excepthook in the instance, in case config
2857 2852 # code (such as magics) needs access to it.
2858 2853 self.sys_excepthook = old_excepthook
2859 2854 outflag = 1 # happens in more places, so it's easier as default
2860 2855 try:
2861 2856 try:
2862 2857 self.hooks.pre_run_code_hook()
2863 2858 #rprint('Running code', repr(code_obj)) # dbg
2864 2859 exec(code_obj, self.user_global_ns, self.user_ns)
2865 2860 finally:
2866 2861 # Reset our crash handler in place
2867 2862 sys.excepthook = old_excepthook
2868 2863 except SystemExit as e:
2869 2864 if result is not None:
2870 2865 result.error_in_exec = e
2871 2866 self.showtraceback(exception_only=True)
2872 2867 warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
2873 2868 except self.custom_exceptions:
2874 2869 etype, value, tb = sys.exc_info()
2875 2870 if result is not None:
2876 2871 result.error_in_exec = value
2877 2872 self.CustomTB(etype, value, tb)
2878 2873 except:
2879 2874 if result is not None:
2880 2875 result.error_in_exec = sys.exc_info()[1]
2881 2876 self.showtraceback()
2882 2877 else:
2883 2878 outflag = 0
2884 2879 return outflag
2885 2880
2886 2881 # For backwards compatibility
2887 2882 runcode = run_code
2888 2883
2889 2884 #-------------------------------------------------------------------------
2890 2885 # Things related to GUI support and pylab
2891 2886 #-------------------------------------------------------------------------
2892 2887
2893 2888 active_eventloop = None
2894 2889
2895 2890 def enable_gui(self, gui=None):
2896 2891 raise NotImplementedError('Implement enable_gui in a subclass')
2897 2892
2898 2893 def enable_matplotlib(self, gui=None):
2899 2894 """Enable interactive matplotlib and inline figure support.
2900 2895
2901 2896 This takes the following steps:
2902 2897
2903 2898 1. select the appropriate eventloop and matplotlib backend
2904 2899 2. set up matplotlib for interactive use with that backend
2905 2900 3. configure formatters for inline figure display
2906 2901 4. enable the selected gui eventloop
2907 2902
2908 2903 Parameters
2909 2904 ----------
2910 2905 gui : optional, string
2911 2906 If given, dictates the choice of matplotlib GUI backend to use
2912 2907 (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
2913 2908 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
2914 2909 matplotlib (as dictated by the matplotlib build-time options plus the
2915 2910 user's matplotlibrc configuration file). Note that not all backends
2916 2911 make sense in all contexts, for example a terminal ipython can't
2917 2912 display figures inline.
2918 2913 """
2919 2914 from IPython.core import pylabtools as pt
2920 2915 gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
2921 2916
2922 2917 if gui != 'inline':
2923 2918 # If we have our first gui selection, store it
2924 2919 if self.pylab_gui_select is None:
2925 2920 self.pylab_gui_select = gui
2926 2921 # Otherwise if they are different
2927 2922 elif gui != self.pylab_gui_select:
2928 2923 print ('Warning: Cannot change to a different GUI toolkit: %s.'
2929 2924 ' Using %s instead.' % (gui, self.pylab_gui_select))
2930 2925 gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
2931 2926
2932 2927 pt.activate_matplotlib(backend)
2933 2928 pt.configure_inline_support(self, backend)
2934 2929
2935 2930 # Now we must activate the gui pylab wants to use, and fix %run to take
2936 2931 # plot updates into account
2937 2932 self.enable_gui(gui)
2938 2933 self.magics_manager.registry['ExecutionMagics'].default_runner = \
2939 2934 pt.mpl_runner(self.safe_execfile)
2940 2935
2941 2936 return gui, backend
2942 2937
2943 2938 def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
2944 2939 """Activate pylab support at runtime.
2945 2940
2946 2941 This turns on support for matplotlib, preloads into the interactive
2947 2942 namespace all of numpy and pylab, and configures IPython to correctly
2948 2943 interact with the GUI event loop. The GUI backend to be used can be
2949 2944 optionally selected with the optional ``gui`` argument.
2950 2945
2951 2946 This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
2952 2947
2953 2948 Parameters
2954 2949 ----------
2955 2950 gui : optional, string
2956 2951 If given, dictates the choice of matplotlib GUI backend to use
2957 2952 (should be one of IPython's supported backends, 'qt', 'osx', 'tk',
2958 2953 'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
2959 2954 matplotlib (as dictated by the matplotlib build-time options plus the
2960 2955 user's matplotlibrc configuration file). Note that not all backends
2961 2956 make sense in all contexts, for example a terminal ipython can't
2962 2957 display figures inline.
2963 2958 import_all : optional, bool, default: True
2964 2959 Whether to do `from numpy import *` and `from pylab import *`
2965 2960 in addition to module imports.
2966 2961 welcome_message : deprecated
2967 2962 This argument is ignored, no welcome message will be displayed.
2968 2963 """
2969 2964 from IPython.core.pylabtools import import_pylab
2970 2965
2971 2966 gui, backend = self.enable_matplotlib(gui)
2972 2967
2973 2968 # We want to prevent the loading of pylab to pollute the user's
2974 2969 # namespace as shown by the %who* magics, so we execute the activation
2975 2970 # code in an empty namespace, and we update *both* user_ns and
2976 2971 # user_ns_hidden with this information.
2977 2972 ns = {}
2978 2973 import_pylab(ns, import_all)
2979 2974 # warn about clobbered names
2980 2975 ignored = {"__builtins__"}
2981 2976 both = set(ns).intersection(self.user_ns).difference(ignored)
2982 2977 clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
2983 2978 self.user_ns.update(ns)
2984 2979 self.user_ns_hidden.update(ns)
2985 2980 return gui, backend, clobbered
2986 2981
2987 2982 #-------------------------------------------------------------------------
2988 2983 # Utilities
2989 2984 #-------------------------------------------------------------------------
2990 2985
2991 2986 def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
2992 2987 """Expand python variables in a string.
2993 2988
2994 2989 The depth argument indicates how many frames above the caller should
2995 2990 be walked to look for the local namespace where to expand variables.
2996 2991
2997 2992 The global namespace for expansion is always the user's interactive
2998 2993 namespace.
2999 2994 """
3000 2995 ns = self.user_ns.copy()
3001 2996 try:
3002 2997 frame = sys._getframe(depth+1)
3003 2998 except ValueError:
3004 2999 # This is thrown if there aren't that many frames on the stack,
3005 3000 # e.g. if a script called run_line_magic() directly.
3006 3001 pass
3007 3002 else:
3008 3003 ns.update(frame.f_locals)
3009 3004
3010 3005 try:
3011 3006 # We have to use .vformat() here, because 'self' is a valid and common
3012 3007 # name, and expanding **ns for .format() would make it collide with
3013 3008 # the 'self' argument of the method.
3014 3009 cmd = formatter.vformat(cmd, args=[], kwargs=ns)
3015 3010 except Exception:
3016 3011 # if formatter couldn't format, just let it go untransformed
3017 3012 pass
3018 3013 return cmd
3019 3014
3020 3015 def mktempfile(self, data=None, prefix='ipython_edit_'):
3021 3016 """Make a new tempfile and return its filename.
3022 3017
3023 3018 This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
3024 3019 but it registers the created filename internally so ipython cleans it up
3025 3020 at exit time.
3026 3021
3027 3022 Optional inputs:
3028 3023
3029 3024 - data(None): if data is given, it gets written out to the temp file
3030 3025 immediately, and the file is closed again."""
3031 3026
3032 3027 dirname = tempfile.mkdtemp(prefix=prefix)
3033 3028 self.tempdirs.append(dirname)
3034 3029
3035 3030 handle, filename = tempfile.mkstemp('.py', prefix, dir=dirname)
3036 3031 os.close(handle) # On Windows, there can only be one open handle on a file
3037 3032 self.tempfiles.append(filename)
3038 3033
3039 3034 if data:
3040 3035 tmp_file = open(filename,'w')
3041 3036 tmp_file.write(data)
3042 3037 tmp_file.close()
3043 3038 return filename
3044 3039
3045 3040 @undoc
3046 3041 def write(self,data):
3047 3042 """DEPRECATED: Write a string to the default output"""
3048 3043 warn('InteractiveShell.write() is deprecated, use sys.stdout instead',
3049 3044 DeprecationWarning, stacklevel=2)
3050 3045 sys.stdout.write(data)
3051 3046
3052 3047 @undoc
3053 3048 def write_err(self,data):
3054 3049 """DEPRECATED: Write a string to the default error output"""
3055 3050 warn('InteractiveShell.write_err() is deprecated, use sys.stderr instead',
3056 3051 DeprecationWarning, stacklevel=2)
3057 3052 sys.stderr.write(data)
3058 3053
3059 3054 def ask_yes_no(self, prompt, default=None, interrupt=None):
3060 3055 if self.quiet:
3061 3056 return True
3062 3057 return ask_yes_no(prompt,default,interrupt)
3063 3058
3064 3059 def show_usage(self):
3065 3060 """Show a usage message"""
3066 3061 page.page(IPython.core.usage.interactive_usage)
3067 3062
3068 3063 def extract_input_lines(self, range_str, raw=False):
3069 3064 """Return as a string a set of input history slices.
3070 3065
3071 3066 Parameters
3072 3067 ----------
3073 3068 range_str : string
3074 3069 The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
3075 3070 since this function is for use by magic functions which get their
3076 3071 arguments as strings. The number before the / is the session
3077 3072 number: ~n goes n back from the current session.
3078 3073
3079 3074 raw : bool, optional
3080 3075 By default, the processed input is used. If this is true, the raw
3081 3076 input history is used instead.
3082 3077
3083 3078 Notes
3084 3079 -----
3085 3080
3086 3081 Slices can be described with two notations:
3087 3082
3088 3083 * ``N:M`` -> standard python form, means including items N...(M-1).
3089 3084 * ``N-M`` -> include items N..M (closed endpoint).
3090 3085 """
3091 3086 lines = self.history_manager.get_range_by_str(range_str, raw=raw)
3092 3087 return "\n".join(x for _, _, x in lines)
3093 3088
3094 3089 def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
3095 3090 """Get a code string from history, file, url, or a string or macro.
3096 3091
3097 3092 This is mainly used by magic functions.
3098 3093
3099 3094 Parameters
3100 3095 ----------
3101 3096
3102 3097 target : str
3103 3098
3104 3099 A string specifying code to retrieve. This will be tried respectively
3105 3100 as: ranges of input history (see %history for syntax), url,
3106 3101 corresponding .py file, filename, or an expression evaluating to a
3107 3102 string or Macro in the user namespace.
3108 3103
3109 3104 raw : bool
3110 3105 If true (default), retrieve raw history. Has no effect on the other
3111 3106 retrieval mechanisms.
3112 3107
3113 3108 py_only : bool (default False)
3114 3109 Only try to fetch python code, do not try alternative methods to decode file
3115 3110 if unicode fails.
3116 3111
3117 3112 Returns
3118 3113 -------
3119 3114 A string of code.
3120 3115
3121 3116 ValueError is raised if nothing is found, and TypeError if it evaluates
3122 3117 to an object of another type. In each case, .args[0] is a printable
3123 3118 message.
3124 3119 """
3125 3120 code = self.extract_input_lines(target, raw=raw) # Grab history
3126 3121 if code:
3127 3122 return code
3128 3123 try:
3129 3124 if target.startswith(('http://', 'https://')):
3130 3125 return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
3131 3126 except UnicodeDecodeError:
3132 3127 if not py_only :
3133 3128 # Deferred import
3134 3129 try:
3135 3130 from urllib.request import urlopen # Py3
3136 3131 except ImportError:
3137 3132 from urllib import urlopen
3138 3133 response = urlopen(target)
3139 3134 return response.read().decode('latin1')
3140 3135 raise ValueError(("'%s' seem to be unreadable.") % target)
3141 3136
3142 3137 potential_target = [target]
3143 3138 try :
3144 3139 potential_target.insert(0,get_py_filename(target))
3145 3140 except IOError:
3146 3141 pass
3147 3142
3148 3143 for tgt in potential_target :
3149 3144 if os.path.isfile(tgt): # Read file
3150 3145 try :
3151 3146 return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
3152 3147 except UnicodeDecodeError :
3153 3148 if not py_only :
3154 3149 with io_open(tgt,'r', encoding='latin1') as f :
3155 3150 return f.read()
3156 3151 raise ValueError(("'%s' seem to be unreadable.") % target)
3157 3152 elif os.path.isdir(os.path.expanduser(tgt)):
3158 3153 raise ValueError("'%s' is a directory, not a regular file." % target)
3159 3154
3160 3155 if search_ns:
3161 3156 # Inspect namespace to load object source
3162 3157 object_info = self.object_inspect(target, detail_level=1)
3163 3158 if object_info['found'] and object_info['source']:
3164 3159 return object_info['source']
3165 3160
3166 3161 try: # User namespace
3167 3162 codeobj = eval(target, self.user_ns)
3168 3163 except Exception:
3169 3164 raise ValueError(("'%s' was not found in history, as a file, url, "
3170 3165 "nor in the user namespace.") % target)
3171 3166
3172 3167 if isinstance(codeobj, string_types):
3173 3168 return codeobj
3174 3169 elif isinstance(codeobj, Macro):
3175 3170 return codeobj.value
3176 3171
3177 3172 raise TypeError("%s is neither a string nor a macro." % target,
3178 3173 codeobj)
3179 3174
3180 3175 #-------------------------------------------------------------------------
3181 3176 # Things related to IPython exiting
3182 3177 #-------------------------------------------------------------------------
3183 3178 def atexit_operations(self):
3184 3179 """This will be executed at the time of exit.
3185 3180
3186 3181 Cleanup operations and saving of persistent data that is done
3187 3182 unconditionally by IPython should be performed here.
3188 3183
3189 3184 For things that may depend on startup flags or platform specifics (such
3190 3185 as having readline or not), register a separate atexit function in the
3191 3186 code that has the appropriate information, rather than trying to
3192 3187 clutter
3193 3188 """
3194 3189 # Close the history session (this stores the end time and line count)
3195 3190 # this must be *before* the tempfile cleanup, in case of temporary
3196 3191 # history db
3197 3192 self.history_manager.end_session()
3198 3193
3199 3194 # Cleanup all tempfiles and folders left around
3200 3195 for tfile in self.tempfiles:
3201 3196 try:
3202 3197 os.unlink(tfile)
3203 3198 except OSError:
3204 3199 pass
3205 3200
3206 3201 for tdir in self.tempdirs:
3207 3202 try:
3208 3203 os.rmdir(tdir)
3209 3204 except OSError:
3210 3205 pass
3211 3206
3212 3207 # Clear all user namespaces to release all references cleanly.
3213 3208 self.reset(new_session=False)
3214 3209
3215 3210 # Run user hooks
3216 3211 self.hooks.shutdown_hook()
3217 3212
3218 3213 def cleanup(self):
3219 3214 self.restore_sys_module_state()
3220 3215
3221 3216
3222 3217 # Overridden in terminal subclass to change prompts
3223 3218 def switch_doctest_mode(self, mode):
3224 3219 pass
3225 3220
3226 3221
3227 3222 class InteractiveShellABC(with_metaclass(abc.ABCMeta, object)):
3228 3223 """An abstract base class for InteractiveShell."""
3229 3224
3230 3225 InteractiveShellABC.register(InteractiveShell)
@@ -1,221 +1,220 b''
1 1 """Logger class for IPython's logging facilities.
2 2 """
3 from __future__ import print_function
4 3
5 4 #*****************************************************************************
6 5 # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
7 6 # Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
8 7 #
9 8 # Distributed under the terms of the BSD License. The full license is in
10 9 # the file COPYING, distributed as part of this software.
11 10 #*****************************************************************************
12 11
13 12 #****************************************************************************
14 13 # Modules and globals
15 14
16 15 # Python standard modules
17 16 import glob
18 17 import io
19 18 import os
20 19 import time
21 20
22 21 from IPython.utils.py3compat import str_to_unicode
23 22
24 23 #****************************************************************************
25 24 # FIXME: This class isn't a mixin anymore, but it still needs attributes from
26 25 # ipython and does input cache management. Finish cleanup later...
27 26
28 27 class Logger(object):
29 28 """A Logfile class with different policies for file creation"""
30 29
31 30 def __init__(self, home_dir, logfname='Logger.log', loghead=u'',
32 31 logmode='over'):
33 32
34 33 # this is the full ipython instance, we need some attributes from it
35 34 # which won't exist until later. What a mess, clean up later...
36 35 self.home_dir = home_dir
37 36
38 37 self.logfname = logfname
39 38 self.loghead = loghead
40 39 self.logmode = logmode
41 40 self.logfile = None
42 41
43 42 # Whether to log raw or processed input
44 43 self.log_raw_input = False
45 44
46 45 # whether to also log output
47 46 self.log_output = False
48 47
49 48 # whether to put timestamps before each log entry
50 49 self.timestamp = False
51 50
52 51 # activity control flags
53 52 self.log_active = False
54 53
55 54 # logmode is a validated property
56 55 def _set_mode(self,mode):
57 56 if mode not in ['append','backup','global','over','rotate']:
58 57 raise ValueError('invalid log mode %s given' % mode)
59 58 self._logmode = mode
60 59
61 60 def _get_mode(self):
62 61 return self._logmode
63 62
64 63 logmode = property(_get_mode,_set_mode)
65 64
66 65 def logstart(self, logfname=None, loghead=None, logmode=None,
67 66 log_output=False, timestamp=False, log_raw_input=False):
68 67 """Generate a new log-file with a default header.
69 68
70 69 Raises RuntimeError if the log has already been started"""
71 70
72 71 if self.logfile is not None:
73 72 raise RuntimeError('Log file is already active: %s' %
74 73 self.logfname)
75 74
76 75 # The parameters can override constructor defaults
77 76 if logfname is not None: self.logfname = logfname
78 77 if loghead is not None: self.loghead = loghead
79 78 if logmode is not None: self.logmode = logmode
80 79
81 80 # Parameters not part of the constructor
82 81 self.timestamp = timestamp
83 82 self.log_output = log_output
84 83 self.log_raw_input = log_raw_input
85 84
86 85 # init depending on the log mode requested
87 86 isfile = os.path.isfile
88 87 logmode = self.logmode
89 88
90 89 if logmode == 'append':
91 90 self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
92 91
93 92 elif logmode == 'backup':
94 93 if isfile(self.logfname):
95 94 backup_logname = self.logfname+'~'
96 95 # Manually remove any old backup, since os.rename may fail
97 96 # under Windows.
98 97 if isfile(backup_logname):
99 98 os.remove(backup_logname)
100 99 os.rename(self.logfname,backup_logname)
101 100 self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
102 101
103 102 elif logmode == 'global':
104 103 self.logfname = os.path.join(self.home_dir,self.logfname)
105 104 self.logfile = io.open(self.logfname, 'a', encoding='utf-8')
106 105
107 106 elif logmode == 'over':
108 107 if isfile(self.logfname):
109 108 os.remove(self.logfname)
110 109 self.logfile = io.open(self.logfname,'w', encoding='utf-8')
111 110
112 111 elif logmode == 'rotate':
113 112 if isfile(self.logfname):
114 113 if isfile(self.logfname+'.001~'):
115 114 old = glob.glob(self.logfname+'.*~')
116 115 old.sort()
117 116 old.reverse()
118 117 for f in old:
119 118 root, ext = os.path.splitext(f)
120 119 num = int(ext[1:-1])+1
121 120 os.rename(f, root+'.'+repr(num).zfill(3)+'~')
122 121 os.rename(self.logfname, self.logfname+'.001~')
123 122 self.logfile = io.open(self.logfname, 'w', encoding='utf-8')
124 123
125 124 if logmode != 'append':
126 125 self.logfile.write(self.loghead)
127 126
128 127 self.logfile.flush()
129 128 self.log_active = True
130 129
131 130 def switch_log(self,val):
132 131 """Switch logging on/off. val should be ONLY a boolean."""
133 132
134 133 if val not in [False,True,0,1]:
135 134 raise ValueError('Call switch_log ONLY with a boolean argument, '
136 135 'not with: %s' % val)
137 136
138 137 label = {0:'OFF',1:'ON',False:'OFF',True:'ON'}
139 138
140 139 if self.logfile is None:
141 140 print("""
142 141 Logging hasn't been started yet (use logstart for that).
143 142
144 143 %logon/%logoff are for temporarily starting and stopping logging for a logfile
145 144 which already exists. But you must first start the logging process with
146 145 %logstart (optionally giving a logfile name).""")
147 146
148 147 else:
149 148 if self.log_active == val:
150 149 print('Logging is already',label[val])
151 150 else:
152 151 print('Switching logging',label[val])
153 152 self.log_active = not self.log_active
154 153 self.log_active_out = self.log_active
155 154
156 155 def logstate(self):
157 156 """Print a status message about the logger."""
158 157 if self.logfile is None:
159 158 print('Logging has not been activated.')
160 159 else:
161 160 state = self.log_active and 'active' or 'temporarily suspended'
162 161 print('Filename :', self.logfname)
163 162 print('Mode :', self.logmode)
164 163 print('Output logging :', self.log_output)
165 164 print('Raw input log :', self.log_raw_input)
166 165 print('Timestamping :', self.timestamp)
167 166 print('State :', state)
168 167
169 168 def log(self, line_mod, line_ori):
170 169 """Write the sources to a log.
171 170
172 171 Inputs:
173 172
174 173 - line_mod: possibly modified input, such as the transformations made
175 174 by input prefilters or input handlers of various kinds. This should
176 175 always be valid Python.
177 176
178 177 - line_ori: unmodified input line from the user. This is not
179 178 necessarily valid Python.
180 179 """
181 180
182 181 # Write the log line, but decide which one according to the
183 182 # log_raw_input flag, set when the log is started.
184 183 if self.log_raw_input:
185 184 self.log_write(line_ori)
186 185 else:
187 186 self.log_write(line_mod)
188 187
189 188 def log_write(self, data, kind='input'):
190 189 """Write data to the log file, if active"""
191 190
192 191 #print 'data: %r' % data # dbg
193 192 if self.log_active and data:
194 193 write = self.logfile.write
195 194 if kind=='input':
196 195 if self.timestamp:
197 196 write(str_to_unicode(time.strftime('# %a, %d %b %Y %H:%M:%S\n',
198 197 time.localtime())))
199 198 write(data)
200 199 elif kind=='output' and self.log_output:
201 200 odata = u'\n'.join([u'#[Out]# %s' % s
202 201 for s in data.splitlines()])
203 202 write(u'%s\n' % odata)
204 203 self.logfile.flush()
205 204
206 205 def logstop(self):
207 206 """Fully stop logging and close log file.
208 207
209 208 In order to start logging again, a new logstart() call needs to be
210 209 made, possibly (though not necessarily) with a new filename, mode and
211 210 other options."""
212 211
213 212 if self.logfile is not None:
214 213 self.logfile.close()
215 214 self.logfile = None
216 215 else:
217 216 print("Logging hadn't been started.")
218 217 self.log_active = False
219 218
220 219 # For backwards compatibility, in case anyone was using this.
221 220 close_log = logstop
@@ -1,680 +1,679 b''
1 1 # encoding: utf-8
2 2 """Magic functions for InteractiveShell.
3 3 """
4 from __future__ import print_function
5 4
6 5 #-----------------------------------------------------------------------------
7 6 # Copyright (C) 2001 Janko Hauser <jhauser@zscout.de> and
8 7 # Copyright (C) 2001 Fernando Perez <fperez@colorado.edu>
9 8 # Copyright (C) 2008 The IPython Development Team
10 9
11 10 # Distributed under the terms of the BSD License. The full license is in
12 11 # the file COPYING, distributed as part of this software.
13 12 #-----------------------------------------------------------------------------
14 13
15 14 import os
16 15 import re
17 16 import sys
18 17 import types
19 18 from getopt import getopt, GetoptError
20 19
21 20 from traitlets.config.configurable import Configurable
22 21 from IPython.core import oinspect
23 22 from IPython.core.error import UsageError
24 23 from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2
25 24 from decorator import decorator
26 25 from IPython.utils.ipstruct import Struct
27 26 from IPython.utils.process import arg_split
28 27 from IPython.utils.py3compat import string_types, iteritems
29 28 from IPython.utils.text import dedent
30 29 from traitlets import Bool, Dict, Instance, observe
31 30 from logging import error
32 31
33 32 #-----------------------------------------------------------------------------
34 33 # Globals
35 34 #-----------------------------------------------------------------------------
36 35
37 36 # A dict we'll use for each class that has magics, used as temporary storage to
38 37 # pass information between the @line/cell_magic method decorators and the
39 38 # @magics_class class decorator, because the method decorators have no
40 39 # access to the class when they run. See for more details:
41 40 # http://stackoverflow.com/questions/2366713/can-a-python-decorator-of-an-instance-method-access-the-class
42 41
43 42 magics = dict(line={}, cell={})
44 43
45 44 magic_kinds = ('line', 'cell')
46 45 magic_spec = ('line', 'cell', 'line_cell')
47 46 magic_escapes = dict(line=ESC_MAGIC, cell=ESC_MAGIC2)
48 47
49 48 #-----------------------------------------------------------------------------
50 49 # Utility classes and functions
51 50 #-----------------------------------------------------------------------------
52 51
53 52 class Bunch: pass
54 53
55 54
56 55 def on_off(tag):
57 56 """Return an ON/OFF string for a 1/0 input. Simple utility function."""
58 57 return ['OFF','ON'][tag]
59 58
60 59
61 60 def compress_dhist(dh):
62 61 """Compress a directory history into a new one with at most 20 entries.
63 62
64 63 Return a new list made from the first and last 10 elements of dhist after
65 64 removal of duplicates.
66 65 """
67 66 head, tail = dh[:-10], dh[-10:]
68 67
69 68 newhead = []
70 69 done = set()
71 70 for h in head:
72 71 if h in done:
73 72 continue
74 73 newhead.append(h)
75 74 done.add(h)
76 75
77 76 return newhead + tail
78 77
79 78
80 79 def needs_local_scope(func):
81 80 """Decorator to mark magic functions which need to local scope to run."""
82 81 func.needs_local_scope = True
83 82 return func
84 83
85 84 #-----------------------------------------------------------------------------
86 85 # Class and method decorators for registering magics
87 86 #-----------------------------------------------------------------------------
88 87
89 88 def magics_class(cls):
90 89 """Class decorator for all subclasses of the main Magics class.
91 90
92 91 Any class that subclasses Magics *must* also apply this decorator, to
93 92 ensure that all the methods that have been decorated as line/cell magics
94 93 get correctly registered in the class instance. This is necessary because
95 94 when method decorators run, the class does not exist yet, so they
96 95 temporarily store their information into a module global. Application of
97 96 this class decorator copies that global data to the class instance and
98 97 clears the global.
99 98
100 99 Obviously, this mechanism is not thread-safe, which means that the
101 100 *creation* of subclasses of Magic should only be done in a single-thread
102 101 context. Instantiation of the classes has no restrictions. Given that
103 102 these classes are typically created at IPython startup time and before user
104 103 application code becomes active, in practice this should not pose any
105 104 problems.
106 105 """
107 106 cls.registered = True
108 107 cls.magics = dict(line = magics['line'],
109 108 cell = magics['cell'])
110 109 magics['line'] = {}
111 110 magics['cell'] = {}
112 111 return cls
113 112
114 113
115 114 def record_magic(dct, magic_kind, magic_name, func):
116 115 """Utility function to store a function as a magic of a specific kind.
117 116
118 117 Parameters
119 118 ----------
120 119 dct : dict
121 120 A dictionary with 'line' and 'cell' subdicts.
122 121
123 122 magic_kind : str
124 123 Kind of magic to be stored.
125 124
126 125 magic_name : str
127 126 Key to store the magic as.
128 127
129 128 func : function
130 129 Callable object to store.
131 130 """
132 131 if magic_kind == 'line_cell':
133 132 dct['line'][magic_name] = dct['cell'][magic_name] = func
134 133 else:
135 134 dct[magic_kind][magic_name] = func
136 135
137 136
138 137 def validate_type(magic_kind):
139 138 """Ensure that the given magic_kind is valid.
140 139
141 140 Check that the given magic_kind is one of the accepted spec types (stored
142 141 in the global `magic_spec`), raise ValueError otherwise.
143 142 """
144 143 if magic_kind not in magic_spec:
145 144 raise ValueError('magic_kind must be one of %s, %s given' %
146 145 magic_kinds, magic_kind)
147 146
148 147
149 148 # The docstrings for the decorator below will be fairly similar for the two
150 149 # types (method and function), so we generate them here once and reuse the
151 150 # templates below.
152 151 _docstring_template = \
153 152 """Decorate the given {0} as {1} magic.
154 153
155 154 The decorator can be used with or without arguments, as follows.
156 155
157 156 i) without arguments: it will create a {1} magic named as the {0} being
158 157 decorated::
159 158
160 159 @deco
161 160 def foo(...)
162 161
163 162 will create a {1} magic named `foo`.
164 163
165 164 ii) with one string argument: which will be used as the actual name of the
166 165 resulting magic::
167 166
168 167 @deco('bar')
169 168 def foo(...)
170 169
171 170 will create a {1} magic named `bar`.
172 171 """
173 172
174 173 # These two are decorator factories. While they are conceptually very similar,
175 174 # there are enough differences in the details that it's simpler to have them
176 175 # written as completely standalone functions rather than trying to share code
177 176 # and make a single one with convoluted logic.
178 177
179 178 def _method_magic_marker(magic_kind):
180 179 """Decorator factory for methods in Magics subclasses.
181 180 """
182 181
183 182 validate_type(magic_kind)
184 183
185 184 # This is a closure to capture the magic_kind. We could also use a class,
186 185 # but it's overkill for just that one bit of state.
187 186 def magic_deco(arg):
188 187 call = lambda f, *a, **k: f(*a, **k)
189 188
190 189 if callable(arg):
191 190 # "Naked" decorator call (just @foo, no args)
192 191 func = arg
193 192 name = func.__name__
194 193 retval = decorator(call, func)
195 194 record_magic(magics, magic_kind, name, name)
196 195 elif isinstance(arg, string_types):
197 196 # Decorator called with arguments (@foo('bar'))
198 197 name = arg
199 198 def mark(func, *a, **kw):
200 199 record_magic(magics, magic_kind, name, func.__name__)
201 200 return decorator(call, func)
202 201 retval = mark
203 202 else:
204 203 raise TypeError("Decorator can only be called with "
205 204 "string or function")
206 205 return retval
207 206
208 207 # Ensure the resulting decorator has a usable docstring
209 208 magic_deco.__doc__ = _docstring_template.format('method', magic_kind)
210 209 return magic_deco
211 210
212 211
213 212 def _function_magic_marker(magic_kind):
214 213 """Decorator factory for standalone functions.
215 214 """
216 215 validate_type(magic_kind)
217 216
218 217 # This is a closure to capture the magic_kind. We could also use a class,
219 218 # but it's overkill for just that one bit of state.
220 219 def magic_deco(arg):
221 220 call = lambda f, *a, **k: f(*a, **k)
222 221
223 222 # Find get_ipython() in the caller's namespace
224 223 caller = sys._getframe(1)
225 224 for ns in ['f_locals', 'f_globals', 'f_builtins']:
226 225 get_ipython = getattr(caller, ns).get('get_ipython')
227 226 if get_ipython is not None:
228 227 break
229 228 else:
230 229 raise NameError('Decorator can only run in context where '
231 230 '`get_ipython` exists')
232 231
233 232 ip = get_ipython()
234 233
235 234 if callable(arg):
236 235 # "Naked" decorator call (just @foo, no args)
237 236 func = arg
238 237 name = func.__name__
239 238 ip.register_magic_function(func, magic_kind, name)
240 239 retval = decorator(call, func)
241 240 elif isinstance(arg, string_types):
242 241 # Decorator called with arguments (@foo('bar'))
243 242 name = arg
244 243 def mark(func, *a, **kw):
245 244 ip.register_magic_function(func, magic_kind, name)
246 245 return decorator(call, func)
247 246 retval = mark
248 247 else:
249 248 raise TypeError("Decorator can only be called with "
250 249 "string or function")
251 250 return retval
252 251
253 252 # Ensure the resulting decorator has a usable docstring
254 253 ds = _docstring_template.format('function', magic_kind)
255 254
256 255 ds += dedent("""
257 256 Note: this decorator can only be used in a context where IPython is already
258 257 active, so that the `get_ipython()` call succeeds. You can therefore use
259 258 it in your startup files loaded after IPython initializes, but *not* in the
260 259 IPython configuration file itself, which is executed before IPython is
261 260 fully up and running. Any file located in the `startup` subdirectory of
262 261 your configuration profile will be OK in this sense.
263 262 """)
264 263
265 264 magic_deco.__doc__ = ds
266 265 return magic_deco
267 266
268 267
269 268 # Create the actual decorators for public use
270 269
271 270 # These three are used to decorate methods in class definitions
272 271 line_magic = _method_magic_marker('line')
273 272 cell_magic = _method_magic_marker('cell')
274 273 line_cell_magic = _method_magic_marker('line_cell')
275 274
276 275 # These three decorate standalone functions and perform the decoration
277 276 # immediately. They can only run where get_ipython() works
278 277 register_line_magic = _function_magic_marker('line')
279 278 register_cell_magic = _function_magic_marker('cell')
280 279 register_line_cell_magic = _function_magic_marker('line_cell')
281 280
282 281 #-----------------------------------------------------------------------------
283 282 # Core Magic classes
284 283 #-----------------------------------------------------------------------------
285 284
286 285 class MagicsManager(Configurable):
287 286 """Object that handles all magic-related functionality for IPython.
288 287 """
289 288 # Non-configurable class attributes
290 289
291 290 # A two-level dict, first keyed by magic type, then by magic function, and
292 291 # holding the actual callable object as value. This is the dict used for
293 292 # magic function dispatch
294 293 magics = Dict()
295 294
296 295 # A registry of the original objects that we've been given holding magics.
297 296 registry = Dict()
298 297
299 298 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
300 299
301 300 auto_magic = Bool(True, help=
302 301 "Automatically call line magics without requiring explicit % prefix"
303 302 ).tag(config=True)
304 303 @observe('auto_magic')
305 304 def _auto_magic_changed(self, change):
306 305 self.shell.automagic = change['new']
307 306
308 307 _auto_status = [
309 308 'Automagic is OFF, % prefix IS needed for line magics.',
310 309 'Automagic is ON, % prefix IS NOT needed for line magics.']
311 310
312 311 user_magics = Instance('IPython.core.magics.UserMagics', allow_none=True)
313 312
314 313 def __init__(self, shell=None, config=None, user_magics=None, **traits):
315 314
316 315 super(MagicsManager, self).__init__(shell=shell, config=config,
317 316 user_magics=user_magics, **traits)
318 317 self.magics = dict(line={}, cell={})
319 318 # Let's add the user_magics to the registry for uniformity, so *all*
320 319 # registered magic containers can be found there.
321 320 self.registry[user_magics.__class__.__name__] = user_magics
322 321
323 322 def auto_status(self):
324 323 """Return descriptive string with automagic status."""
325 324 return self._auto_status[self.auto_magic]
326 325
327 326 def lsmagic(self):
328 327 """Return a dict of currently available magic functions.
329 328
330 329 The return dict has the keys 'line' and 'cell', corresponding to the
331 330 two types of magics we support. Each value is a list of names.
332 331 """
333 332 return self.magics
334 333
335 334 def lsmagic_docs(self, brief=False, missing=''):
336 335 """Return dict of documentation of magic functions.
337 336
338 337 The return dict has the keys 'line' and 'cell', corresponding to the
339 338 two types of magics we support. Each value is a dict keyed by magic
340 339 name whose value is the function docstring. If a docstring is
341 340 unavailable, the value of `missing` is used instead.
342 341
343 342 If brief is True, only the first line of each docstring will be returned.
344 343 """
345 344 docs = {}
346 345 for m_type in self.magics:
347 346 m_docs = {}
348 347 for m_name, m_func in iteritems(self.magics[m_type]):
349 348 if m_func.__doc__:
350 349 if brief:
351 350 m_docs[m_name] = m_func.__doc__.split('\n', 1)[0]
352 351 else:
353 352 m_docs[m_name] = m_func.__doc__.rstrip()
354 353 else:
355 354 m_docs[m_name] = missing
356 355 docs[m_type] = m_docs
357 356 return docs
358 357
359 358 def register(self, *magic_objects):
360 359 """Register one or more instances of Magics.
361 360
362 361 Take one or more classes or instances of classes that subclass the main
363 362 `core.Magic` class, and register them with IPython to use the magic
364 363 functions they provide. The registration process will then ensure that
365 364 any methods that have decorated to provide line and/or cell magics will
366 365 be recognized with the `%x`/`%%x` syntax as a line/cell magic
367 366 respectively.
368 367
369 368 If classes are given, they will be instantiated with the default
370 369 constructor. If your classes need a custom constructor, you should
371 370 instanitate them first and pass the instance.
372 371
373 372 The provided arguments can be an arbitrary mix of classes and instances.
374 373
375 374 Parameters
376 375 ----------
377 376 magic_objects : one or more classes or instances
378 377 """
379 378 # Start by validating them to ensure they have all had their magic
380 379 # methods registered at the instance level
381 380 for m in magic_objects:
382 381 if not m.registered:
383 382 raise ValueError("Class of magics %r was constructed without "
384 383 "the @register_magics class decorator")
385 384 if isinstance(m, type):
386 385 # If we're given an uninstantiated class
387 386 m = m(shell=self.shell)
388 387
389 388 # Now that we have an instance, we can register it and update the
390 389 # table of callables
391 390 self.registry[m.__class__.__name__] = m
392 391 for mtype in magic_kinds:
393 392 self.magics[mtype].update(m.magics[mtype])
394 393
395 394 def register_function(self, func, magic_kind='line', magic_name=None):
396 395 """Expose a standalone function as magic function for IPython.
397 396
398 397 This will create an IPython magic (line, cell or both) from a
399 398 standalone function. The functions should have the following
400 399 signatures:
401 400
402 401 * For line magics: `def f(line)`
403 402 * For cell magics: `def f(line, cell)`
404 403 * For a function that does both: `def f(line, cell=None)`
405 404
406 405 In the latter case, the function will be called with `cell==None` when
407 406 invoked as `%f`, and with cell as a string when invoked as `%%f`.
408 407
409 408 Parameters
410 409 ----------
411 410 func : callable
412 411 Function to be registered as a magic.
413 412
414 413 magic_kind : str
415 414 Kind of magic, one of 'line', 'cell' or 'line_cell'
416 415
417 416 magic_name : optional str
418 417 If given, the name the magic will have in the IPython namespace. By
419 418 default, the name of the function itself is used.
420 419 """
421 420
422 421 # Create the new method in the user_magics and register it in the
423 422 # global table
424 423 validate_type(magic_kind)
425 424 magic_name = func.__name__ if magic_name is None else magic_name
426 425 setattr(self.user_magics, magic_name, func)
427 426 record_magic(self.magics, magic_kind, magic_name, func)
428 427
429 428 def register_alias(self, alias_name, magic_name, magic_kind='line'):
430 429 """Register an alias to a magic function.
431 430
432 431 The alias is an instance of :class:`MagicAlias`, which holds the
433 432 name and kind of the magic it should call. Binding is done at
434 433 call time, so if the underlying magic function is changed the alias
435 434 will call the new function.
436 435
437 436 Parameters
438 437 ----------
439 438 alias_name : str
440 439 The name of the magic to be registered.
441 440
442 441 magic_name : str
443 442 The name of an existing magic.
444 443
445 444 magic_kind : str
446 445 Kind of magic, one of 'line' or 'cell'
447 446 """
448 447
449 448 # `validate_type` is too permissive, as it allows 'line_cell'
450 449 # which we do not handle.
451 450 if magic_kind not in magic_kinds:
452 451 raise ValueError('magic_kind must be one of %s, %s given' %
453 452 magic_kinds, magic_kind)
454 453
455 454 alias = MagicAlias(self.shell, magic_name, magic_kind)
456 455 setattr(self.user_magics, alias_name, alias)
457 456 record_magic(self.magics, magic_kind, alias_name, alias)
458 457
459 458 # Key base class that provides the central functionality for magics.
460 459
461 460
462 461 class Magics(Configurable):
463 462 """Base class for implementing magic functions.
464 463
465 464 Shell functions which can be reached as %function_name. All magic
466 465 functions should accept a string, which they can parse for their own
467 466 needs. This can make some functions easier to type, eg `%cd ../`
468 467 vs. `%cd("../")`
469 468
470 469 Classes providing magic functions need to subclass this class, and they
471 470 MUST:
472 471
473 472 - Use the method decorators `@line_magic` and `@cell_magic` to decorate
474 473 individual methods as magic functions, AND
475 474
476 475 - Use the class decorator `@magics_class` to ensure that the magic
477 476 methods are properly registered at the instance level upon instance
478 477 initialization.
479 478
480 479 See :mod:`magic_functions` for examples of actual implementation classes.
481 480 """
482 481 # Dict holding all command-line options for each magic.
483 482 options_table = None
484 483 # Dict for the mapping of magic names to methods, set by class decorator
485 484 magics = None
486 485 # Flag to check that the class decorator was properly applied
487 486 registered = False
488 487 # Instance of IPython shell
489 488 shell = None
490 489
491 490 def __init__(self, shell=None, **kwargs):
492 491 if not(self.__class__.registered):
493 492 raise ValueError('Magics subclass without registration - '
494 493 'did you forget to apply @magics_class?')
495 494 if shell is not None:
496 495 if hasattr(shell, 'configurables'):
497 496 shell.configurables.append(self)
498 497 if hasattr(shell, 'config'):
499 498 kwargs.setdefault('parent', shell)
500 499
501 500 self.shell = shell
502 501 self.options_table = {}
503 502 # The method decorators are run when the instance doesn't exist yet, so
504 503 # they can only record the names of the methods they are supposed to
505 504 # grab. Only now, that the instance exists, can we create the proper
506 505 # mapping to bound methods. So we read the info off the original names
507 506 # table and replace each method name by the actual bound method.
508 507 # But we mustn't clobber the *class* mapping, in case of multiple instances.
509 508 class_magics = self.magics
510 509 self.magics = {}
511 510 for mtype in magic_kinds:
512 511 tab = self.magics[mtype] = {}
513 512 cls_tab = class_magics[mtype]
514 513 for magic_name, meth_name in iteritems(cls_tab):
515 514 if isinstance(meth_name, string_types):
516 515 # it's a method name, grab it
517 516 tab[magic_name] = getattr(self, meth_name)
518 517 else:
519 518 # it's the real thing
520 519 tab[magic_name] = meth_name
521 520 # Configurable **needs** to be initiated at the end or the config
522 521 # magics get screwed up.
523 522 super(Magics, self).__init__(**kwargs)
524 523
525 524 def arg_err(self,func):
526 525 """Print docstring if incorrect arguments were passed"""
527 526 print('Error in arguments:')
528 527 print(oinspect.getdoc(func))
529 528
530 529 def format_latex(self, strng):
531 530 """Format a string for latex inclusion."""
532 531
533 532 # Characters that need to be escaped for latex:
534 533 escape_re = re.compile(r'(%|_|\$|#|&)',re.MULTILINE)
535 534 # Magic command names as headers:
536 535 cmd_name_re = re.compile(r'^(%s.*?):' % ESC_MAGIC,
537 536 re.MULTILINE)
538 537 # Magic commands
539 538 cmd_re = re.compile(r'(?P<cmd>%s.+?\b)(?!\}\}:)' % ESC_MAGIC,
540 539 re.MULTILINE)
541 540 # Paragraph continue
542 541 par_re = re.compile(r'\\$',re.MULTILINE)
543 542
544 543 # The "\n" symbol
545 544 newline_re = re.compile(r'\\n')
546 545
547 546 # Now build the string for output:
548 547 #strng = cmd_name_re.sub(r'\n\\texttt{\\textsl{\\large \1}}:',strng)
549 548 strng = cmd_name_re.sub(r'\n\\bigskip\n\\texttt{\\textbf{ \1}}:',
550 549 strng)
551 550 strng = cmd_re.sub(r'\\texttt{\g<cmd>}',strng)
552 551 strng = par_re.sub(r'\\\\',strng)
553 552 strng = escape_re.sub(r'\\\1',strng)
554 553 strng = newline_re.sub(r'\\textbackslash{}n',strng)
555 554 return strng
556 555
557 556 def parse_options(self, arg_str, opt_str, *long_opts, **kw):
558 557 """Parse options passed to an argument string.
559 558
560 559 The interface is similar to that of :func:`getopt.getopt`, but it
561 560 returns a :class:`~IPython.utils.struct.Struct` with the options as keys
562 561 and the stripped argument string still as a string.
563 562
564 563 arg_str is quoted as a true sys.argv vector by using shlex.split.
565 564 This allows us to easily expand variables, glob files, quote
566 565 arguments, etc.
567 566
568 567 Parameters
569 568 ----------
570 569
571 570 arg_str : str
572 571 The arguments to parse.
573 572
574 573 opt_str : str
575 574 The options specification.
576 575
577 576 mode : str, default 'string'
578 577 If given as 'list', the argument string is returned as a list (split
579 578 on whitespace) instead of a string.
580 579
581 580 list_all : bool, default False
582 581 Put all option values in lists. Normally only options
583 582 appearing more than once are put in a list.
584 583
585 584 posix : bool, default True
586 585 Whether to split the input line in POSIX mode or not, as per the
587 586 conventions outlined in the :mod:`shlex` module from the standard
588 587 library.
589 588 """
590 589
591 590 # inject default options at the beginning of the input line
592 591 caller = sys._getframe(1).f_code.co_name
593 592 arg_str = '%s %s' % (self.options_table.get(caller,''),arg_str)
594 593
595 594 mode = kw.get('mode','string')
596 595 if mode not in ['string','list']:
597 596 raise ValueError('incorrect mode given: %s' % mode)
598 597 # Get options
599 598 list_all = kw.get('list_all',0)
600 599 posix = kw.get('posix', os.name == 'posix')
601 600 strict = kw.get('strict', True)
602 601
603 602 # Check if we have more than one argument to warrant extra processing:
604 603 odict = {} # Dictionary with options
605 604 args = arg_str.split()
606 605 if len(args) >= 1:
607 606 # If the list of inputs only has 0 or 1 thing in it, there's no
608 607 # need to look for options
609 608 argv = arg_split(arg_str, posix, strict)
610 609 # Do regular option processing
611 610 try:
612 611 opts,args = getopt(argv, opt_str, long_opts)
613 612 except GetoptError as e:
614 613 raise UsageError('%s ( allowed: "%s" %s)' % (e.msg,opt_str,
615 614 " ".join(long_opts)))
616 615 for o,a in opts:
617 616 if o.startswith('--'):
618 617 o = o[2:]
619 618 else:
620 619 o = o[1:]
621 620 try:
622 621 odict[o].append(a)
623 622 except AttributeError:
624 623 odict[o] = [odict[o],a]
625 624 except KeyError:
626 625 if list_all:
627 626 odict[o] = [a]
628 627 else:
629 628 odict[o] = a
630 629
631 630 # Prepare opts,args for return
632 631 opts = Struct(odict)
633 632 if mode == 'string':
634 633 args = ' '.join(args)
635 634
636 635 return opts,args
637 636
638 637 def default_option(self, fn, optstr):
639 638 """Make an entry in the options_table for fn, with value optstr"""
640 639
641 640 if fn not in self.lsmagic():
642 641 error("%s is not a magic function" % fn)
643 642 self.options_table[fn] = optstr
644 643
645 644
646 645 class MagicAlias(object):
647 646 """An alias to another magic function.
648 647
649 648 An alias is determined by its magic name and magic kind. Lookup
650 649 is done at call time, so if the underlying magic changes the alias
651 650 will call the new function.
652 651
653 652 Use the :meth:`MagicsManager.register_alias` method or the
654 653 `%alias_magic` magic function to create and register a new alias.
655 654 """
656 655 def __init__(self, shell, magic_name, magic_kind):
657 656 self.shell = shell
658 657 self.magic_name = magic_name
659 658 self.magic_kind = magic_kind
660 659
661 660 self.pretty_target = '%s%s' % (magic_escapes[self.magic_kind], self.magic_name)
662 661 self.__doc__ = "Alias for `%s`." % self.pretty_target
663 662
664 663 self._in_call = False
665 664
666 665 def __call__(self, *args, **kwargs):
667 666 """Call the magic alias."""
668 667 fn = self.shell.find_magic(self.magic_name, self.magic_kind)
669 668 if fn is None:
670 669 raise UsageError("Magic `%s` not found." % self.pretty_target)
671 670
672 671 # Protect against infinite recursion.
673 672 if self._in_call:
674 673 raise UsageError("Infinite recursion detected; "
675 674 "magic aliases cannot call themselves.")
676 675 self._in_call = True
677 676 try:
678 677 return fn(*args, **kwargs)
679 678 finally:
680 679 self._in_call = False
@@ -1,130 +1,128 b''
1 1 """Implementation of magic functions that control various automatic behaviors.
2 2 """
3 from __future__ import print_function
4 from __future__ import absolute_import
5 3 #-----------------------------------------------------------------------------
6 4 # Copyright (c) 2012 The IPython Development Team.
7 5 #
8 6 # Distributed under the terms of the Modified BSD License.
9 7 #
10 8 # The full license is in the file COPYING.txt, distributed with this software.
11 9 #-----------------------------------------------------------------------------
12 10
13 11 #-----------------------------------------------------------------------------
14 12 # Imports
15 13 #-----------------------------------------------------------------------------
16 14
17 15 # Our own packages
18 16 from IPython.core.magic import Bunch, Magics, magics_class, line_magic
19 17 from IPython.testing.skipdoctest import skip_doctest
20 18 from logging import error
21 19
22 20 #-----------------------------------------------------------------------------
23 21 # Magic implementation classes
24 22 #-----------------------------------------------------------------------------
25 23
26 24 @magics_class
27 25 class AutoMagics(Magics):
28 26 """Magics that control various autoX behaviors."""
29 27
30 28 def __init__(self, shell):
31 29 super(AutoMagics, self).__init__(shell)
32 30 # namespace for holding state we may need
33 31 self._magic_state = Bunch()
34 32
35 33 @line_magic
36 34 def automagic(self, parameter_s=''):
37 35 """Make magic functions callable without having to type the initial %.
38 36
39 37 Without argumentsl toggles on/off (when off, you must call it as
40 38 %automagic, of course). With arguments it sets the value, and you can
41 39 use any of (case insensitive):
42 40
43 41 - on, 1, True: to activate
44 42
45 43 - off, 0, False: to deactivate.
46 44
47 45 Note that magic functions have lowest priority, so if there's a
48 46 variable whose name collides with that of a magic fn, automagic won't
49 47 work for that function (you get the variable instead). However, if you
50 48 delete the variable (del var), the previously shadowed magic function
51 49 becomes visible to automagic again."""
52 50
53 51 arg = parameter_s.lower()
54 52 mman = self.shell.magics_manager
55 53 if arg in ('on', '1', 'true'):
56 54 val = True
57 55 elif arg in ('off', '0', 'false'):
58 56 val = False
59 57 else:
60 58 val = not mman.auto_magic
61 59 mman.auto_magic = val
62 60 print('\n' + self.shell.magics_manager.auto_status())
63 61
64 62 @skip_doctest
65 63 @line_magic
66 64 def autocall(self, parameter_s=''):
67 65 """Make functions callable without having to type parentheses.
68 66
69 67 Usage:
70 68
71 69 %autocall [mode]
72 70
73 71 The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the
74 72 value is toggled on and off (remembering the previous state).
75 73
76 74 In more detail, these values mean:
77 75
78 76 0 -> fully disabled
79 77
80 78 1 -> active, but do not apply if there are no arguments on the line.
81 79
82 80 In this mode, you get::
83 81
84 82 In [1]: callable
85 83 Out[1]: <built-in function callable>
86 84
87 85 In [2]: callable 'hello'
88 86 ------> callable('hello')
89 87 Out[2]: False
90 88
91 89 2 -> Active always. Even if no arguments are present, the callable
92 90 object is called::
93 91
94 92 In [2]: float
95 93 ------> float()
96 94 Out[2]: 0.0
97 95
98 96 Note that even with autocall off, you can still use '/' at the start of
99 97 a line to treat the first argument on the command line as a function
100 98 and add parentheses to it::
101 99
102 100 In [8]: /str 43
103 101 ------> str(43)
104 102 Out[8]: '43'
105 103
106 104 # all-random (note for auto-testing)
107 105 """
108 106
109 107 if parameter_s:
110 108 arg = int(parameter_s)
111 109 else:
112 110 arg = 'toggle'
113 111
114 112 if not arg in (0, 1, 2, 'toggle'):
115 113 error('Valid modes: (0->Off, 1->Smart, 2->Full')
116 114 return
117 115
118 116 if arg in (0, 1, 2):
119 117 self.shell.autocall = arg
120 118 else: # toggle
121 119 if self.shell.autocall:
122 120 self._magic_state.autocall_save = self.shell.autocall
123 121 self.shell.autocall = 0
124 122 else:
125 123 try:
126 124 self.shell.autocall = self._magic_state.autocall_save
127 125 except AttributeError:
128 126 self.shell.autocall = self._magic_state.autocall_save = 1
129 127
130 128 print("Automatic calling is:",['OFF','Smart','Full'][self.shell.autocall])
@@ -1,583 +1,581 b''
1 1 """Implementation of basic magic functions."""
2 2
3 from __future__ import print_function
4 from __future__ import absolute_import
5 3
6 4 import argparse
7 5 import io
8 6 import sys
9 7 from pprint import pformat
10 8
11 9 from IPython.core import magic_arguments, page
12 10 from IPython.core.error import UsageError
13 11 from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
14 12 from IPython.utils.text import format_screen, dedent, indent
15 13 from IPython.testing.skipdoctest import skip_doctest
16 14 from IPython.utils.ipstruct import Struct
17 15 from IPython.utils.py3compat import unicode_type
18 16 from warnings import warn
19 17 from logging import error
20 18
21 19
22 20 class MagicsDisplay(object):
23 21 def __init__(self, magics_manager):
24 22 self.magics_manager = magics_manager
25 23
26 24 def _lsmagic(self):
27 25 """The main implementation of the %lsmagic"""
28 26 mesc = magic_escapes['line']
29 27 cesc = magic_escapes['cell']
30 28 mman = self.magics_manager
31 29 magics = mman.lsmagic()
32 30 out = ['Available line magics:',
33 31 mesc + (' '+mesc).join(sorted(magics['line'])),
34 32 '',
35 33 'Available cell magics:',
36 34 cesc + (' '+cesc).join(sorted(magics['cell'])),
37 35 '',
38 36 mman.auto_status()]
39 37 return '\n'.join(out)
40 38
41 39 def _repr_pretty_(self, p, cycle):
42 40 p.text(self._lsmagic())
43 41
44 42 def __str__(self):
45 43 return self._lsmagic()
46 44
47 45 def _jsonable(self):
48 46 """turn magics dict into jsonable dict of the same structure
49 47
50 48 replaces object instances with their class names as strings
51 49 """
52 50 magic_dict = {}
53 51 mman = self.magics_manager
54 52 magics = mman.lsmagic()
55 53 for key, subdict in magics.items():
56 54 d = {}
57 55 magic_dict[key] = d
58 56 for name, obj in subdict.items():
59 57 try:
60 58 classname = obj.__self__.__class__.__name__
61 59 except AttributeError:
62 60 classname = 'Other'
63 61
64 62 d[name] = classname
65 63 return magic_dict
66 64
67 65 def _repr_json_(self):
68 66 return self._jsonable()
69 67
70 68
71 69 @magics_class
72 70 class BasicMagics(Magics):
73 71 """Magics that provide central IPython functionality.
74 72
75 73 These are various magics that don't fit into specific categories but that
76 74 are all part of the base 'IPython experience'."""
77 75
78 76 @magic_arguments.magic_arguments()
79 77 @magic_arguments.argument(
80 78 '-l', '--line', action='store_true',
81 79 help="""Create a line magic alias."""
82 80 )
83 81 @magic_arguments.argument(
84 82 '-c', '--cell', action='store_true',
85 83 help="""Create a cell magic alias."""
86 84 )
87 85 @magic_arguments.argument(
88 86 'name',
89 87 help="""Name of the magic to be created."""
90 88 )
91 89 @magic_arguments.argument(
92 90 'target',
93 91 help="""Name of the existing line or cell magic."""
94 92 )
95 93 @line_magic
96 94 def alias_magic(self, line=''):
97 95 """Create an alias for an existing line or cell magic.
98 96
99 97 Examples
100 98 --------
101 99 ::
102 100
103 101 In [1]: %alias_magic t timeit
104 102 Created `%t` as an alias for `%timeit`.
105 103 Created `%%t` as an alias for `%%timeit`.
106 104
107 105 In [2]: %t -n1 pass
108 106 1 loops, best of 3: 954 ns per loop
109 107
110 108 In [3]: %%t -n1
111 109 ...: pass
112 110 ...:
113 111 1 loops, best of 3: 954 ns per loop
114 112
115 113 In [4]: %alias_magic --cell whereami pwd
116 114 UsageError: Cell magic function `%%pwd` not found.
117 115 In [5]: %alias_magic --line whereami pwd
118 116 Created `%whereami` as an alias for `%pwd`.
119 117
120 118 In [6]: %whereami
121 119 Out[6]: u'/home/testuser'
122 120 """
123 121 args = magic_arguments.parse_argstring(self.alias_magic, line)
124 122 shell = self.shell
125 123 mman = self.shell.magics_manager
126 124 escs = ''.join(magic_escapes.values())
127 125
128 126 target = args.target.lstrip(escs)
129 127 name = args.name.lstrip(escs)
130 128
131 129 # Find the requested magics.
132 130 m_line = shell.find_magic(target, 'line')
133 131 m_cell = shell.find_magic(target, 'cell')
134 132 if args.line and m_line is None:
135 133 raise UsageError('Line magic function `%s%s` not found.' %
136 134 (magic_escapes['line'], target))
137 135 if args.cell and m_cell is None:
138 136 raise UsageError('Cell magic function `%s%s` not found.' %
139 137 (magic_escapes['cell'], target))
140 138
141 139 # If --line and --cell are not specified, default to the ones
142 140 # that are available.
143 141 if not args.line and not args.cell:
144 142 if not m_line and not m_cell:
145 143 raise UsageError(
146 144 'No line or cell magic with name `%s` found.' % target
147 145 )
148 146 args.line = bool(m_line)
149 147 args.cell = bool(m_cell)
150 148
151 149 if args.line:
152 150 mman.register_alias(name, target, 'line')
153 151 print('Created `%s%s` as an alias for `%s%s`.' % (
154 152 magic_escapes['line'], name,
155 153 magic_escapes['line'], target))
156 154
157 155 if args.cell:
158 156 mman.register_alias(name, target, 'cell')
159 157 print('Created `%s%s` as an alias for `%s%s`.' % (
160 158 magic_escapes['cell'], name,
161 159 magic_escapes['cell'], target))
162 160
163 161 @line_magic
164 162 def lsmagic(self, parameter_s=''):
165 163 """List currently available magic functions."""
166 164 return MagicsDisplay(self.shell.magics_manager)
167 165
168 166 def _magic_docs(self, brief=False, rest=False):
169 167 """Return docstrings from magic functions."""
170 168 mman = self.shell.magics_manager
171 169 docs = mman.lsmagic_docs(brief, missing='No documentation')
172 170
173 171 if rest:
174 172 format_string = '**%s%s**::\n\n%s\n\n'
175 173 else:
176 174 format_string = '%s%s:\n%s\n'
177 175
178 176 return ''.join(
179 177 [format_string % (magic_escapes['line'], fname,
180 178 indent(dedent(fndoc)))
181 179 for fname, fndoc in sorted(docs['line'].items())]
182 180 +
183 181 [format_string % (magic_escapes['cell'], fname,
184 182 indent(dedent(fndoc)))
185 183 for fname, fndoc in sorted(docs['cell'].items())]
186 184 )
187 185
188 186 @line_magic
189 187 def magic(self, parameter_s=''):
190 188 """Print information about the magic function system.
191 189
192 190 Supported formats: -latex, -brief, -rest
193 191 """
194 192
195 193 mode = ''
196 194 try:
197 195 mode = parameter_s.split()[0][1:]
198 196 except IndexError:
199 197 pass
200 198
201 199 brief = (mode == 'brief')
202 200 rest = (mode == 'rest')
203 201 magic_docs = self._magic_docs(brief, rest)
204 202
205 203 if mode == 'latex':
206 204 print(self.format_latex(magic_docs))
207 205 return
208 206 else:
209 207 magic_docs = format_screen(magic_docs)
210 208
211 209 out = ["""
212 210 IPython's 'magic' functions
213 211 ===========================
214 212
215 213 The magic function system provides a series of functions which allow you to
216 214 control the behavior of IPython itself, plus a lot of system-type
217 215 features. There are two kinds of magics, line-oriented and cell-oriented.
218 216
219 217 Line magics are prefixed with the % character and work much like OS
220 218 command-line calls: they get as an argument the rest of the line, where
221 219 arguments are passed without parentheses or quotes. For example, this will
222 220 time the given statement::
223 221
224 222 %timeit range(1000)
225 223
226 224 Cell magics are prefixed with a double %%, and they are functions that get as
227 225 an argument not only the rest of the line, but also the lines below it in a
228 226 separate argument. These magics are called with two arguments: the rest of the
229 227 call line and the body of the cell, consisting of the lines below the first.
230 228 For example::
231 229
232 230 %%timeit x = numpy.random.randn((100, 100))
233 231 numpy.linalg.svd(x)
234 232
235 233 will time the execution of the numpy svd routine, running the assignment of x
236 234 as part of the setup phase, which is not timed.
237 235
238 236 In a line-oriented client (the terminal or Qt console IPython), starting a new
239 237 input with %% will automatically enter cell mode, and IPython will continue
240 238 reading input until a blank line is given. In the notebook, simply type the
241 239 whole cell as one entity, but keep in mind that the %% escape can only be at
242 240 the very start of the cell.
243 241
244 242 NOTE: If you have 'automagic' enabled (via the command line option or with the
245 243 %automagic function), you don't need to type in the % explicitly for line
246 244 magics; cell magics always require an explicit '%%' escape. By default,
247 245 IPython ships with automagic on, so you should only rarely need the % escape.
248 246
249 247 Example: typing '%cd mydir' (without the quotes) changes your working directory
250 248 to 'mydir', if it exists.
251 249
252 250 For a list of the available magic functions, use %lsmagic. For a description
253 251 of any of them, type %magic_name?, e.g. '%cd?'.
254 252
255 253 Currently the magic system has the following functions:""",
256 254 magic_docs,
257 255 "Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
258 256 str(self.lsmagic()),
259 257 ]
260 258 page.page('\n'.join(out))
261 259
262 260
263 261 @line_magic
264 262 def page(self, parameter_s=''):
265 263 """Pretty print the object and display it through a pager.
266 264
267 265 %page [options] OBJECT
268 266
269 267 If no object is given, use _ (last output).
270 268
271 269 Options:
272 270
273 271 -r: page str(object), don't pretty-print it."""
274 272
275 273 # After a function contributed by Olivier Aubert, slightly modified.
276 274
277 275 # Process options/args
278 276 opts, args = self.parse_options(parameter_s, 'r')
279 277 raw = 'r' in opts
280 278
281 279 oname = args and args or '_'
282 280 info = self.shell._ofind(oname)
283 281 if info['found']:
284 282 txt = (raw and str or pformat)( info['obj'] )
285 283 page.page(txt)
286 284 else:
287 285 print('Object `%s` not found' % oname)
288 286
289 287 @line_magic
290 288 def profile(self, parameter_s=''):
291 289 """Print your currently active IPython profile.
292 290
293 291 See Also
294 292 --------
295 293 prun : run code using the Python profiler
296 294 (:meth:`~IPython.core.magics.execution.ExecutionMagics.prun`)
297 295 """
298 296 warn("%profile is now deprecated. Please use get_ipython().profile instead.")
299 297 from IPython.core.application import BaseIPythonApplication
300 298 if BaseIPythonApplication.initialized():
301 299 print(BaseIPythonApplication.instance().profile)
302 300 else:
303 301 error("profile is an application-level value, but you don't appear to be in an IPython application")
304 302
305 303 @line_magic
306 304 def pprint(self, parameter_s=''):
307 305 """Toggle pretty printing on/off."""
308 306 ptformatter = self.shell.display_formatter.formatters['text/plain']
309 307 ptformatter.pprint = bool(1 - ptformatter.pprint)
310 308 print('Pretty printing has been turned',
311 309 ['OFF','ON'][ptformatter.pprint])
312 310
313 311 @line_magic
314 312 def colors(self, parameter_s=''):
315 313 """Switch color scheme for prompts, info system and exception handlers.
316 314
317 315 Currently implemented schemes: NoColor, Linux, LightBG.
318 316
319 317 Color scheme names are not case-sensitive.
320 318
321 319 Examples
322 320 --------
323 321 To get a plain black and white terminal::
324 322
325 323 %colors nocolor
326 324 """
327 325 def color_switch_err(name):
328 326 warn('Error changing %s color schemes.\n%s' %
329 327 (name, sys.exc_info()[1]), stacklevel=2)
330 328
331 329
332 330 new_scheme = parameter_s.strip()
333 331 if not new_scheme:
334 332 raise UsageError(
335 333 "%colors: you must specify a color scheme. See '%colors?'")
336 334 # local shortcut
337 335 shell = self.shell
338 336
339 337 # Set shell colour scheme
340 338 try:
341 339 shell.colors = new_scheme
342 340 shell.refresh_style()
343 341 except:
344 342 color_switch_err('shell')
345 343
346 344 # Set exception colors
347 345 try:
348 346 shell.InteractiveTB.set_colors(scheme = new_scheme)
349 347 shell.SyntaxTB.set_colors(scheme = new_scheme)
350 348 except:
351 349 color_switch_err('exception')
352 350
353 351 # Set info (for 'object?') colors
354 352 if shell.color_info:
355 353 try:
356 354 shell.inspector.set_active_scheme(new_scheme)
357 355 except:
358 356 color_switch_err('object inspector')
359 357 else:
360 358 shell.inspector.set_active_scheme('NoColor')
361 359
362 360 @line_magic
363 361 def xmode(self, parameter_s=''):
364 362 """Switch modes for the exception handlers.
365 363
366 364 Valid modes: Plain, Context and Verbose.
367 365
368 366 If called without arguments, acts as a toggle."""
369 367
370 368 def xmode_switch_err(name):
371 369 warn('Error changing %s exception modes.\n%s' %
372 370 (name,sys.exc_info()[1]))
373 371
374 372 shell = self.shell
375 373 new_mode = parameter_s.strip().capitalize()
376 374 try:
377 375 shell.InteractiveTB.set_mode(mode=new_mode)
378 376 print('Exception reporting mode:',shell.InteractiveTB.mode)
379 377 except:
380 378 xmode_switch_err('user')
381 379
382 380 @line_magic
383 381 def quickref(self,arg):
384 382 """ Show a quick reference sheet """
385 383 from IPython.core.usage import quick_reference
386 384 qr = quick_reference + self._magic_docs(brief=True)
387 385 page.page(qr)
388 386
389 387 @line_magic
390 388 def doctest_mode(self, parameter_s=''):
391 389 """Toggle doctest mode on and off.
392 390
393 391 This mode is intended to make IPython behave as much as possible like a
394 392 plain Python shell, from the perspective of how its prompts, exceptions
395 393 and output look. This makes it easy to copy and paste parts of a
396 394 session into doctests. It does so by:
397 395
398 396 - Changing the prompts to the classic ``>>>`` ones.
399 397 - Changing the exception reporting mode to 'Plain'.
400 398 - Disabling pretty-printing of output.
401 399
402 400 Note that IPython also supports the pasting of code snippets that have
403 401 leading '>>>' and '...' prompts in them. This means that you can paste
404 402 doctests from files or docstrings (even if they have leading
405 403 whitespace), and the code will execute correctly. You can then use
406 404 '%history -t' to see the translated history; this will give you the
407 405 input after removal of all the leading prompts and whitespace, which
408 406 can be pasted back into an editor.
409 407
410 408 With these features, you can switch into this mode easily whenever you
411 409 need to do testing and changes to doctests, without having to leave
412 410 your existing IPython session.
413 411 """
414 412
415 413 # Shorthands
416 414 shell = self.shell
417 415 meta = shell.meta
418 416 disp_formatter = self.shell.display_formatter
419 417 ptformatter = disp_formatter.formatters['text/plain']
420 418 # dstore is a data store kept in the instance metadata bag to track any
421 419 # changes we make, so we can undo them later.
422 420 dstore = meta.setdefault('doctest_mode',Struct())
423 421 save_dstore = dstore.setdefault
424 422
425 423 # save a few values we'll need to recover later
426 424 mode = save_dstore('mode',False)
427 425 save_dstore('rc_pprint',ptformatter.pprint)
428 426 save_dstore('xmode',shell.InteractiveTB.mode)
429 427 save_dstore('rc_separate_out',shell.separate_out)
430 428 save_dstore('rc_separate_out2',shell.separate_out2)
431 429 save_dstore('rc_separate_in',shell.separate_in)
432 430 save_dstore('rc_active_types',disp_formatter.active_types)
433 431
434 432 if not mode:
435 433 # turn on
436 434
437 435 # Prompt separators like plain python
438 436 shell.separate_in = ''
439 437 shell.separate_out = ''
440 438 shell.separate_out2 = ''
441 439
442 440
443 441 ptformatter.pprint = False
444 442 disp_formatter.active_types = ['text/plain']
445 443
446 444 shell.magic('xmode Plain')
447 445 else:
448 446 # turn off
449 447 shell.separate_in = dstore.rc_separate_in
450 448
451 449 shell.separate_out = dstore.rc_separate_out
452 450 shell.separate_out2 = dstore.rc_separate_out2
453 451
454 452 ptformatter.pprint = dstore.rc_pprint
455 453 disp_formatter.active_types = dstore.rc_active_types
456 454
457 455 shell.magic('xmode ' + dstore.xmode)
458 456
459 457 # mode here is the state before we switch; switch_doctest_mode takes
460 458 # the mode we're switching to.
461 459 shell.switch_doctest_mode(not mode)
462 460
463 461 # Store new mode and inform
464 462 dstore.mode = bool(not mode)
465 463 mode_label = ['OFF','ON'][dstore.mode]
466 464 print('Doctest mode is:', mode_label)
467 465
468 466 @line_magic
469 467 def gui(self, parameter_s=''):
470 468 """Enable or disable IPython GUI event loop integration.
471 469
472 470 %gui [GUINAME]
473 471
474 472 This magic replaces IPython's threaded shells that were activated
475 473 using the (pylab/wthread/etc.) command line flags. GUI toolkits
476 474 can now be enabled at runtime and keyboard
477 475 interrupts should work without any problems. The following toolkits
478 476 are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
479 477
480 478 %gui wx # enable wxPython event loop integration
481 479 %gui qt4|qt # enable PyQt4 event loop integration
482 480 %gui qt5 # enable PyQt5 event loop integration
483 481 %gui gtk # enable PyGTK event loop integration
484 482 %gui gtk3 # enable Gtk3 event loop integration
485 483 %gui tk # enable Tk event loop integration
486 484 %gui osx # enable Cocoa event loop integration
487 485 # (requires %matplotlib 1.1)
488 486 %gui # disable all event loop integration
489 487
490 488 WARNING: after any of these has been called you can simply create
491 489 an application object, but DO NOT start the event loop yourself, as
492 490 we have already handled that.
493 491 """
494 492 opts, arg = self.parse_options(parameter_s, '')
495 493 if arg=='': arg = None
496 494 try:
497 495 return self.shell.enable_gui(arg)
498 496 except Exception as e:
499 497 # print simple error message, rather than traceback if we can't
500 498 # hook up the GUI
501 499 error(str(e))
502 500
503 501 @skip_doctest
504 502 @line_magic
505 503 def precision(self, s=''):
506 504 """Set floating point precision for pretty printing.
507 505
508 506 Can set either integer precision or a format string.
509 507
510 508 If numpy has been imported and precision is an int,
511 509 numpy display precision will also be set, via ``numpy.set_printoptions``.
512 510
513 511 If no argument is given, defaults will be restored.
514 512
515 513 Examples
516 514 --------
517 515 ::
518 516
519 517 In [1]: from math import pi
520 518
521 519 In [2]: %precision 3
522 520 Out[2]: u'%.3f'
523 521
524 522 In [3]: pi
525 523 Out[3]: 3.142
526 524
527 525 In [4]: %precision %i
528 526 Out[4]: u'%i'
529 527
530 528 In [5]: pi
531 529 Out[5]: 3
532 530
533 531 In [6]: %precision %e
534 532 Out[6]: u'%e'
535 533
536 534 In [7]: pi**10
537 535 Out[7]: 9.364805e+04
538 536
539 537 In [8]: %precision
540 538 Out[8]: u'%r'
541 539
542 540 In [9]: pi**10
543 541 Out[9]: 93648.047476082982
544 542 """
545 543 ptformatter = self.shell.display_formatter.formatters['text/plain']
546 544 ptformatter.float_precision = s
547 545 return ptformatter.float_format
548 546
549 547 @magic_arguments.magic_arguments()
550 548 @magic_arguments.argument(
551 549 '-e', '--export', action='store_true', default=False,
552 550 help=argparse.SUPPRESS
553 551 )
554 552 @magic_arguments.argument(
555 553 'filename', type=unicode_type,
556 554 help='Notebook name or filename'
557 555 )
558 556 @line_magic
559 557 def notebook(self, s):
560 558 """Export and convert IPython notebooks.
561 559
562 560 This function can export the current IPython history to a notebook file.
563 561 For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
564 562
565 563 The -e or --export flag is deprecated in IPython 5.2, and will be
566 564 removed in the future.
567 565 """
568 566 args = magic_arguments.parse_argstring(self.notebook, s)
569 567
570 568 from nbformat import write, v4
571 569
572 570 cells = []
573 571 hist = list(self.shell.history_manager.get_range())
574 572 if(len(hist)<=1):
575 573 raise ValueError('History is empty, cannot export')
576 574 for session, execution_count, source in hist[:-1]:
577 575 cells.append(v4.new_code_cell(
578 576 execution_count=execution_count,
579 577 source=source
580 578 ))
581 579 nb = v4.new_notebook(cells=cells)
582 580 with io.open(args.filename, 'w', encoding='utf-8') as f:
583 581 write(nb, f, version=4)
@@ -1,746 +1,744 b''
1 1 """Implementation of code management magic functions.
2 2 """
3 from __future__ import print_function
4 from __future__ import absolute_import
5 3 #-----------------------------------------------------------------------------
6 4 # Copyright (c) 2012 The IPython Development Team.
7 5 #
8 6 # Distributed under the terms of the Modified BSD License.
9 7 #
10 8 # The full license is in the file COPYING.txt, distributed with this software.
11 9 #-----------------------------------------------------------------------------
12 10
13 11 #-----------------------------------------------------------------------------
14 12 # Imports
15 13 #-----------------------------------------------------------------------------
16 14
17 15 # Stdlib
18 16 import inspect
19 17 import io
20 18 import os
21 19 import re
22 20 import sys
23 21 import ast
24 22 from itertools import chain
25 23
26 24 # Our own packages
27 25 from IPython.core.error import TryNext, StdinNotImplementedError, UsageError
28 26 from IPython.core.macro import Macro
29 27 from IPython.core.magic import Magics, magics_class, line_magic
30 28 from IPython.core.oinspect import find_file, find_source_lines
31 29 from IPython.testing.skipdoctest import skip_doctest
32 30 from IPython.utils import py3compat
33 31 from IPython.utils.py3compat import string_types
34 32 from IPython.utils.contexts import preserve_keys
35 33 from IPython.utils.path import get_py_filename
36 34 from warnings import warn
37 35 from logging import error
38 36 from IPython.utils.text import get_text_list
39 37
40 38 #-----------------------------------------------------------------------------
41 39 # Magic implementation classes
42 40 #-----------------------------------------------------------------------------
43 41
44 42 # Used for exception handling in magic_edit
45 43 class MacroToEdit(ValueError): pass
46 44
47 45 ipython_input_pat = re.compile(r"<ipython\-input\-(\d+)-[a-z\d]+>$")
48 46
49 47 # To match, e.g. 8-10 1:5 :10 3-
50 48 range_re = re.compile(r"""
51 49 (?P<start>\d+)?
52 50 ((?P<sep>[\-:])
53 51 (?P<end>\d+)?)?
54 52 $""", re.VERBOSE)
55 53
56 54
57 55 def extract_code_ranges(ranges_str):
58 56 """Turn a string of range for %%load into 2-tuples of (start, stop)
59 57 ready to use as a slice of the content splitted by lines.
60 58
61 59 Examples
62 60 --------
63 61 list(extract_input_ranges("5-10 2"))
64 62 [(4, 10), (1, 2)]
65 63 """
66 64 for range_str in ranges_str.split():
67 65 rmatch = range_re.match(range_str)
68 66 if not rmatch:
69 67 continue
70 68 sep = rmatch.group("sep")
71 69 start = rmatch.group("start")
72 70 end = rmatch.group("end")
73 71
74 72 if sep == '-':
75 73 start = int(start) - 1 if start else None
76 74 end = int(end) if end else None
77 75 elif sep == ':':
78 76 start = int(start) - 1 if start else None
79 77 end = int(end) - 1 if end else None
80 78 else:
81 79 end = int(start)
82 80 start = int(start) - 1
83 81 yield (start, end)
84 82
85 83
86 84 @skip_doctest
87 85 def extract_symbols(code, symbols):
88 86 """
89 87 Return a tuple (blocks, not_found)
90 88 where ``blocks`` is a list of code fragments
91 89 for each symbol parsed from code, and ``not_found`` are
92 90 symbols not found in the code.
93 91
94 92 For example::
95 93
96 94 >>> code = '''a = 10
97 95
98 96 def b(): return 42
99 97
100 98 class A: pass'''
101 99
102 100 >>> extract_symbols(code, 'A,b,z')
103 101 (["class A: pass", "def b(): return 42"], ['z'])
104 102 """
105 103 symbols = symbols.split(',')
106 104
107 105 # this will raise SyntaxError if code isn't valid Python
108 106 py_code = ast.parse(code)
109 107
110 108 marks = [(getattr(s, 'name', None), s.lineno) for s in py_code.body]
111 109 code = code.split('\n')
112 110
113 111 symbols_lines = {}
114 112
115 113 # we already know the start_lineno of each symbol (marks).
116 114 # To find each end_lineno, we traverse in reverse order until each
117 115 # non-blank line
118 116 end = len(code)
119 117 for name, start in reversed(marks):
120 118 while not code[end - 1].strip():
121 119 end -= 1
122 120 if name:
123 121 symbols_lines[name] = (start - 1, end)
124 122 end = start - 1
125 123
126 124 # Now symbols_lines is a map
127 125 # {'symbol_name': (start_lineno, end_lineno), ...}
128 126
129 127 # fill a list with chunks of codes for each requested symbol
130 128 blocks = []
131 129 not_found = []
132 130 for symbol in symbols:
133 131 if symbol in symbols_lines:
134 132 start, end = symbols_lines[symbol]
135 133 blocks.append('\n'.join(code[start:end]) + '\n')
136 134 else:
137 135 not_found.append(symbol)
138 136
139 137 return blocks, not_found
140 138
141 139 def strip_initial_indent(lines):
142 140 """For %load, strip indent from lines until finding an unindented line.
143 141
144 142 https://github.com/ipython/ipython/issues/9775
145 143 """
146 144 indent_re = re.compile(r'\s+')
147 145
148 146 it = iter(lines)
149 147 first_line = next(it)
150 148 indent_match = indent_re.match(first_line)
151 149
152 150 if indent_match:
153 151 # First line was indented
154 152 indent = indent_match.group()
155 153 yield first_line[len(indent):]
156 154
157 155 for line in it:
158 156 if line.startswith(indent):
159 157 yield line[len(indent):]
160 158 else:
161 159 # Less indented than the first line - stop dedenting
162 160 yield line
163 161 break
164 162 else:
165 163 yield first_line
166 164
167 165 # Pass the remaining lines through without dedenting
168 166 for line in it:
169 167 yield line
170 168
171 169
172 170 class InteractivelyDefined(Exception):
173 171 """Exception for interactively defined variable in magic_edit"""
174 172 def __init__(self, index):
175 173 self.index = index
176 174
177 175
178 176 @magics_class
179 177 class CodeMagics(Magics):
180 178 """Magics related to code management (loading, saving, editing, ...)."""
181 179
182 180 def __init__(self, *args, **kwargs):
183 181 self._knowntemps = set()
184 182 super(CodeMagics, self).__init__(*args, **kwargs)
185 183
186 184 @line_magic
187 185 def save(self, parameter_s=''):
188 186 """Save a set of lines or a macro to a given filename.
189 187
190 188 Usage:\\
191 189 %save [options] filename n1-n2 n3-n4 ... n5 .. n6 ...
192 190
193 191 Options:
194 192
195 193 -r: use 'raw' input. By default, the 'processed' history is used,
196 194 so that magics are loaded in their transformed version to valid
197 195 Python. If this option is given, the raw input as typed as the
198 196 command line is used instead.
199 197
200 198 -f: force overwrite. If file exists, %save will prompt for overwrite
201 199 unless -f is given.
202 200
203 201 -a: append to the file instead of overwriting it.
204 202
205 203 This function uses the same syntax as %history for input ranges,
206 204 then saves the lines to the filename you specify.
207 205
208 206 It adds a '.py' extension to the file if you don't do so yourself, and
209 207 it asks for confirmation before overwriting existing files.
210 208
211 209 If `-r` option is used, the default extension is `.ipy`.
212 210 """
213 211
214 212 opts,args = self.parse_options(parameter_s,'fra',mode='list')
215 213 if not args:
216 214 raise UsageError('Missing filename.')
217 215 raw = 'r' in opts
218 216 force = 'f' in opts
219 217 append = 'a' in opts
220 218 mode = 'a' if append else 'w'
221 219 ext = u'.ipy' if raw else u'.py'
222 220 fname, codefrom = args[0], " ".join(args[1:])
223 221 if not fname.endswith((u'.py',u'.ipy')):
224 222 fname += ext
225 223 file_exists = os.path.isfile(fname)
226 224 if file_exists and not force and not append:
227 225 try:
228 226 overwrite = self.shell.ask_yes_no('File `%s` exists. Overwrite (y/[N])? ' % fname, default='n')
229 227 except StdinNotImplementedError:
230 228 print("File `%s` exists. Use `%%save -f %s` to force overwrite" % (fname, parameter_s))
231 229 return
232 230 if not overwrite :
233 231 print('Operation cancelled.')
234 232 return
235 233 try:
236 234 cmds = self.shell.find_user_code(codefrom,raw)
237 235 except (TypeError, ValueError) as e:
238 236 print(e.args[0])
239 237 return
240 238 out = py3compat.cast_unicode(cmds)
241 239 with io.open(fname, mode, encoding="utf-8") as f:
242 240 if not file_exists or not append:
243 241 f.write(u"# coding: utf-8\n")
244 242 f.write(out)
245 243 # make sure we end on a newline
246 244 if not out.endswith(u'\n'):
247 245 f.write(u'\n')
248 246 print('The following commands were written to file `%s`:' % fname)
249 247 print(cmds)
250 248
251 249 @line_magic
252 250 def pastebin(self, parameter_s=''):
253 251 """Upload code to Github's Gist paste bin, returning the URL.
254 252
255 253 Usage:\\
256 254 %pastebin [-d "Custom description"] 1-7
257 255
258 256 The argument can be an input history range, a filename, or the name of a
259 257 string or macro.
260 258
261 259 Options:
262 260
263 261 -d: Pass a custom description for the gist. The default will say
264 262 "Pasted from IPython".
265 263 """
266 264 opts, args = self.parse_options(parameter_s, 'd:')
267 265
268 266 try:
269 267 code = self.shell.find_user_code(args)
270 268 except (ValueError, TypeError) as e:
271 269 print(e.args[0])
272 270 return
273 271
274 272 # Deferred import
275 273 try:
276 274 from urllib.request import urlopen # Py 3
277 275 except ImportError:
278 276 from urllib2 import urlopen
279 277 import json
280 278 post_data = json.dumps({
281 279 "description": opts.get('d', "Pasted from IPython"),
282 280 "public": True,
283 281 "files": {
284 282 "file1.py": {
285 283 "content": code
286 284 }
287 285 }
288 286 }).encode('utf-8')
289 287
290 288 response = urlopen("https://api.github.com/gists", post_data)
291 289 response_data = json.loads(response.read().decode('utf-8'))
292 290 return response_data['html_url']
293 291
294 292 @line_magic
295 293 def loadpy(self, arg_s):
296 294 """Alias of `%load`
297 295
298 296 `%loadpy` has gained some flexibility and dropped the requirement of a `.py`
299 297 extension. So it has been renamed simply into %load. You can look at
300 298 `%load`'s docstring for more info.
301 299 """
302 300 self.load(arg_s)
303 301
304 302 @line_magic
305 303 def load(self, arg_s):
306 304 """Load code into the current frontend.
307 305
308 306 Usage:\\
309 307 %load [options] source
310 308
311 309 where source can be a filename, URL, input history range, macro, or
312 310 element in the user namespace
313 311
314 312 Options:
315 313
316 314 -r <lines>: Specify lines or ranges of lines to load from the source.
317 315 Ranges could be specified as x-y (x..y) or in python-style x:y
318 316 (x..(y-1)). Both limits x and y can be left blank (meaning the
319 317 beginning and end of the file, respectively).
320 318
321 319 -s <symbols>: Specify function or classes to load from python source.
322 320
323 321 -y : Don't ask confirmation for loading source above 200 000 characters.
324 322
325 323 -n : Include the user's namespace when searching for source code.
326 324
327 325 This magic command can either take a local filename, a URL, an history
328 326 range (see %history) or a macro as argument, it will prompt for
329 327 confirmation before loading source with more than 200 000 characters, unless
330 328 -y flag is passed or if the frontend does not support raw_input::
331 329
332 330 %load myscript.py
333 331 %load 7-27
334 332 %load myMacro
335 333 %load http://www.example.com/myscript.py
336 334 %load -r 5-10 myscript.py
337 335 %load -r 10-20,30,40: foo.py
338 336 %load -s MyClass,wonder_function myscript.py
339 337 %load -n MyClass
340 338 %load -n my_module.wonder_function
341 339 """
342 340 opts,args = self.parse_options(arg_s,'yns:r:')
343 341
344 342 if not args:
345 343 raise UsageError('Missing filename, URL, input history range, '
346 344 'macro, or element in the user namespace.')
347 345
348 346 search_ns = 'n' in opts
349 347
350 348 contents = self.shell.find_user_code(args, search_ns=search_ns)
351 349
352 350 if 's' in opts:
353 351 try:
354 352 blocks, not_found = extract_symbols(contents, opts['s'])
355 353 except SyntaxError:
356 354 # non python code
357 355 error("Unable to parse the input as valid Python code")
358 356 return
359 357
360 358 if len(not_found) == 1:
361 359 warn('The symbol `%s` was not found' % not_found[0])
362 360 elif len(not_found) > 1:
363 361 warn('The symbols %s were not found' % get_text_list(not_found,
364 362 wrap_item_with='`')
365 363 )
366 364
367 365 contents = '\n'.join(blocks)
368 366
369 367 if 'r' in opts:
370 368 ranges = opts['r'].replace(',', ' ')
371 369 lines = contents.split('\n')
372 370 slices = extract_code_ranges(ranges)
373 371 contents = [lines[slice(*slc)] for slc in slices]
374 372 contents = '\n'.join(strip_initial_indent(chain.from_iterable(contents)))
375 373
376 374 l = len(contents)
377 375
378 376 # 200 000 is ~ 2500 full 80 caracter lines
379 377 # so in average, more than 5000 lines
380 378 if l > 200000 and 'y' not in opts:
381 379 try:
382 380 ans = self.shell.ask_yes_no(("The text you're trying to load seems pretty big"\
383 381 " (%d characters). Continue (y/[N]) ?" % l), default='n' )
384 382 except StdinNotImplementedError:
385 383 #asume yes if raw input not implemented
386 384 ans = True
387 385
388 386 if ans is False :
389 387 print('Operation cancelled.')
390 388 return
391 389
392 390 contents = "# %load {}\n".format(arg_s) + contents
393 391
394 392 self.shell.set_next_input(contents, replace=True)
395 393
396 394 @staticmethod
397 395 def _find_edit_target(shell, args, opts, last_call):
398 396 """Utility method used by magic_edit to find what to edit."""
399 397
400 398 def make_filename(arg):
401 399 "Make a filename from the given args"
402 400 try:
403 401 filename = get_py_filename(arg)
404 402 except IOError:
405 403 # If it ends with .py but doesn't already exist, assume we want
406 404 # a new file.
407 405 if arg.endswith('.py'):
408 406 filename = arg
409 407 else:
410 408 filename = None
411 409 return filename
412 410
413 411 # Set a few locals from the options for convenience:
414 412 opts_prev = 'p' in opts
415 413 opts_raw = 'r' in opts
416 414
417 415 # custom exceptions
418 416 class DataIsObject(Exception): pass
419 417
420 418 # Default line number value
421 419 lineno = opts.get('n',None)
422 420
423 421 if opts_prev:
424 422 args = '_%s' % last_call[0]
425 423 if args not in shell.user_ns:
426 424 args = last_call[1]
427 425
428 426 # by default this is done with temp files, except when the given
429 427 # arg is a filename
430 428 use_temp = True
431 429
432 430 data = ''
433 431
434 432 # First, see if the arguments should be a filename.
435 433 filename = make_filename(args)
436 434 if filename:
437 435 use_temp = False
438 436 elif args:
439 437 # Mode where user specifies ranges of lines, like in %macro.
440 438 data = shell.extract_input_lines(args, opts_raw)
441 439 if not data:
442 440 try:
443 441 # Load the parameter given as a variable. If not a string,
444 442 # process it as an object instead (below)
445 443
446 444 #print '*** args',args,'type',type(args) # dbg
447 445 data = eval(args, shell.user_ns)
448 446 if not isinstance(data, string_types):
449 447 raise DataIsObject
450 448
451 449 except (NameError,SyntaxError):
452 450 # given argument is not a variable, try as a filename
453 451 filename = make_filename(args)
454 452 if filename is None:
455 453 warn("Argument given (%s) can't be found as a variable "
456 454 "or as a filename." % args)
457 455 return (None, None, None)
458 456 use_temp = False
459 457
460 458 except DataIsObject:
461 459 # macros have a special edit function
462 460 if isinstance(data, Macro):
463 461 raise MacroToEdit(data)
464 462
465 463 # For objects, try to edit the file where they are defined
466 464 filename = find_file(data)
467 465 if filename:
468 466 if 'fakemodule' in filename.lower() and \
469 467 inspect.isclass(data):
470 468 # class created by %edit? Try to find source
471 469 # by looking for method definitions instead, the
472 470 # __module__ in those classes is FakeModule.
473 471 attrs = [getattr(data, aname) for aname in dir(data)]
474 472 for attr in attrs:
475 473 if not inspect.ismethod(attr):
476 474 continue
477 475 filename = find_file(attr)
478 476 if filename and \
479 477 'fakemodule' not in filename.lower():
480 478 # change the attribute to be the edit
481 479 # target instead
482 480 data = attr
483 481 break
484 482
485 483 m = ipython_input_pat.match(os.path.basename(filename))
486 484 if m:
487 485 raise InteractivelyDefined(int(m.groups()[0]))
488 486
489 487 datafile = 1
490 488 if filename is None:
491 489 filename = make_filename(args)
492 490 datafile = 1
493 491 if filename is not None:
494 492 # only warn about this if we get a real name
495 493 warn('Could not find file where `%s` is defined.\n'
496 494 'Opening a file named `%s`' % (args, filename))
497 495 # Now, make sure we can actually read the source (if it was
498 496 # in a temp file it's gone by now).
499 497 if datafile:
500 498 if lineno is None:
501 499 lineno = find_source_lines(data)
502 500 if lineno is None:
503 501 filename = make_filename(args)
504 502 if filename is None:
505 503 warn('The file where `%s` was defined '
506 504 'cannot be read or found.' % data)
507 505 return (None, None, None)
508 506 use_temp = False
509 507
510 508 if use_temp:
511 509 filename = shell.mktempfile(data)
512 510 print('IPython will make a temporary file named:',filename)
513 511
514 512 # use last_call to remember the state of the previous call, but don't
515 513 # let it be clobbered by successive '-p' calls.
516 514 try:
517 515 last_call[0] = shell.displayhook.prompt_count
518 516 if not opts_prev:
519 517 last_call[1] = args
520 518 except:
521 519 pass
522 520
523 521
524 522 return filename, lineno, use_temp
525 523
526 524 def _edit_macro(self,mname,macro):
527 525 """open an editor with the macro data in a file"""
528 526 filename = self.shell.mktempfile(macro.value)
529 527 self.shell.hooks.editor(filename)
530 528
531 529 # and make a new macro object, to replace the old one
532 530 with open(filename) as mfile:
533 531 mvalue = mfile.read()
534 532 self.shell.user_ns[mname] = Macro(mvalue)
535 533
536 534 @skip_doctest
537 535 @line_magic
538 536 def edit(self, parameter_s='',last_call=['','']):
539 537 """Bring up an editor and execute the resulting code.
540 538
541 539 Usage:
542 540 %edit [options] [args]
543 541
544 542 %edit runs IPython's editor hook. The default version of this hook is
545 543 set to call the editor specified by your $EDITOR environment variable.
546 544 If this isn't found, it will default to vi under Linux/Unix and to
547 545 notepad under Windows. See the end of this docstring for how to change
548 546 the editor hook.
549 547
550 548 You can also set the value of this editor via the
551 549 ``TerminalInteractiveShell.editor`` option in your configuration file.
552 550 This is useful if you wish to use a different editor from your typical
553 551 default with IPython (and for Windows users who typically don't set
554 552 environment variables).
555 553
556 554 This command allows you to conveniently edit multi-line code right in
557 555 your IPython session.
558 556
559 557 If called without arguments, %edit opens up an empty editor with a
560 558 temporary file and will execute the contents of this file when you
561 559 close it (don't forget to save it!).
562 560
563 561
564 562 Options:
565 563
566 564 -n <number>: open the editor at a specified line number. By default,
567 565 the IPython editor hook uses the unix syntax 'editor +N filename', but
568 566 you can configure this by providing your own modified hook if your
569 567 favorite editor supports line-number specifications with a different
570 568 syntax.
571 569
572 570 -p: this will call the editor with the same data as the previous time
573 571 it was used, regardless of how long ago (in your current session) it
574 572 was.
575 573
576 574 -r: use 'raw' input. This option only applies to input taken from the
577 575 user's history. By default, the 'processed' history is used, so that
578 576 magics are loaded in their transformed version to valid Python. If
579 577 this option is given, the raw input as typed as the command line is
580 578 used instead. When you exit the editor, it will be executed by
581 579 IPython's own processor.
582 580
583 581 -x: do not execute the edited code immediately upon exit. This is
584 582 mainly useful if you are editing programs which need to be called with
585 583 command line arguments, which you can then do using %run.
586 584
587 585
588 586 Arguments:
589 587
590 588 If arguments are given, the following possibilities exist:
591 589
592 590 - If the argument is a filename, IPython will load that into the
593 591 editor. It will execute its contents with execfile() when you exit,
594 592 loading any code in the file into your interactive namespace.
595 593
596 594 - The arguments are ranges of input history, e.g. "7 ~1/4-6".
597 595 The syntax is the same as in the %history magic.
598 596
599 597 - If the argument is a string variable, its contents are loaded
600 598 into the editor. You can thus edit any string which contains
601 599 python code (including the result of previous edits).
602 600
603 601 - If the argument is the name of an object (other than a string),
604 602 IPython will try to locate the file where it was defined and open the
605 603 editor at the point where it is defined. You can use `%edit function`
606 604 to load an editor exactly at the point where 'function' is defined,
607 605 edit it and have the file be executed automatically.
608 606
609 607 - If the object is a macro (see %macro for details), this opens up your
610 608 specified editor with a temporary file containing the macro's data.
611 609 Upon exit, the macro is reloaded with the contents of the file.
612 610
613 611 Note: opening at an exact line is only supported under Unix, and some
614 612 editors (like kedit and gedit up to Gnome 2.8) do not understand the
615 613 '+NUMBER' parameter necessary for this feature. Good editors like
616 614 (X)Emacs, vi, jed, pico and joe all do.
617 615
618 616 After executing your code, %edit will return as output the code you
619 617 typed in the editor (except when it was an existing file). This way
620 618 you can reload the code in further invocations of %edit as a variable,
621 619 via _<NUMBER> or Out[<NUMBER>], where <NUMBER> is the prompt number of
622 620 the output.
623 621
624 622 Note that %edit is also available through the alias %ed.
625 623
626 624 This is an example of creating a simple function inside the editor and
627 625 then modifying it. First, start up the editor::
628 626
629 627 In [1]: edit
630 628 Editing... done. Executing edited code...
631 629 Out[1]: 'def foo():\\n print "foo() was defined in an editing
632 630 session"\\n'
633 631
634 632 We can then call the function foo()::
635 633
636 634 In [2]: foo()
637 635 foo() was defined in an editing session
638 636
639 637 Now we edit foo. IPython automatically loads the editor with the
640 638 (temporary) file where foo() was previously defined::
641 639
642 640 In [3]: edit foo
643 641 Editing... done. Executing edited code...
644 642
645 643 And if we call foo() again we get the modified version::
646 644
647 645 In [4]: foo()
648 646 foo() has now been changed!
649 647
650 648 Here is an example of how to edit a code snippet successive
651 649 times. First we call the editor::
652 650
653 651 In [5]: edit
654 652 Editing... done. Executing edited code...
655 653 hello
656 654 Out[5]: "print 'hello'\\n"
657 655
658 656 Now we call it again with the previous output (stored in _)::
659 657
660 658 In [6]: edit _
661 659 Editing... done. Executing edited code...
662 660 hello world
663 661 Out[6]: "print 'hello world'\\n"
664 662
665 663 Now we call it with the output #8 (stored in _8, also as Out[8])::
666 664
667 665 In [7]: edit _8
668 666 Editing... done. Executing edited code...
669 667 hello again
670 668 Out[7]: "print 'hello again'\\n"
671 669
672 670
673 671 Changing the default editor hook:
674 672
675 673 If you wish to write your own editor hook, you can put it in a
676 674 configuration file which you load at startup time. The default hook
677 675 is defined in the IPython.core.hooks module, and you can use that as a
678 676 starting example for further modifications. That file also has
679 677 general instructions on how to set a new hook for use once you've
680 678 defined it."""
681 679 opts,args = self.parse_options(parameter_s,'prxn:')
682 680
683 681 try:
684 682 filename, lineno, is_temp = self._find_edit_target(self.shell,
685 683 args, opts, last_call)
686 684 except MacroToEdit as e:
687 685 self._edit_macro(args, e.args[0])
688 686 return
689 687 except InteractivelyDefined as e:
690 688 print("Editing In[%i]" % e.index)
691 689 args = str(e.index)
692 690 filename, lineno, is_temp = self._find_edit_target(self.shell,
693 691 args, opts, last_call)
694 692 if filename is None:
695 693 # nothing was found, warnings have already been issued,
696 694 # just give up.
697 695 return
698 696
699 697 if is_temp:
700 698 self._knowntemps.add(filename)
701 699 elif (filename in self._knowntemps):
702 700 is_temp = True
703 701
704 702
705 703 # do actual editing here
706 704 print('Editing...', end=' ')
707 705 sys.stdout.flush()
708 706 try:
709 707 # Quote filenames that may have spaces in them
710 708 if ' ' in filename:
711 709 filename = "'%s'" % filename
712 710 self.shell.hooks.editor(filename,lineno)
713 711 except TryNext:
714 712 warn('Could not open editor')
715 713 return
716 714
717 715 # XXX TODO: should this be generalized for all string vars?
718 716 # For now, this is special-cased to blocks created by cpaste
719 717 if args.strip() == 'pasted_block':
720 718 with open(filename, 'r') as f:
721 719 self.shell.user_ns['pasted_block'] = f.read()
722 720
723 721 if 'x' in opts: # -x prevents actual execution
724 722 print()
725 723 else:
726 724 print('done. Executing edited code...')
727 725 with preserve_keys(self.shell.user_ns, '__file__'):
728 726 if not is_temp:
729 727 self.shell.user_ns['__file__'] = filename
730 728 if 'r' in opts: # Untranslated IPython code
731 729 with open(filename, 'r') as f:
732 730 source = f.read()
733 731 self.shell.run_cell(source, store_history=False)
734 732 else:
735 733 self.shell.safe_execfile(filename, self.shell.user_ns,
736 734 self.shell.user_ns)
737 735
738 736 if is_temp:
739 737 try:
740 738 return open(filename).read()
741 739 except IOError as msg:
742 740 if msg.filename == filename:
743 741 warn('File not found. Did you forget to save?')
744 742 return
745 743 else:
746 744 self.shell.showtraceback()
@@ -1,159 +1,157 b''
1 1 """Implementation of configuration-related magic functions.
2 2 """
3 from __future__ import print_function
4 from __future__ import absolute_import
5 3 #-----------------------------------------------------------------------------
6 4 # Copyright (c) 2012 The IPython Development Team.
7 5 #
8 6 # Distributed under the terms of the Modified BSD License.
9 7 #
10 8 # The full license is in the file COPYING.txt, distributed with this software.
11 9 #-----------------------------------------------------------------------------
12 10
13 11 #-----------------------------------------------------------------------------
14 12 # Imports
15 13 #-----------------------------------------------------------------------------
16 14
17 15 # Stdlib
18 16 import re
19 17
20 18 # Our own packages
21 19 from IPython.core.error import UsageError
22 20 from IPython.core.magic import Magics, magics_class, line_magic
23 21 from logging import error
24 22
25 23 #-----------------------------------------------------------------------------
26 24 # Magic implementation classes
27 25 #-----------------------------------------------------------------------------
28 26
29 27 reg = re.compile('^\w+\.\w+$')
30 28 @magics_class
31 29 class ConfigMagics(Magics):
32 30
33 31 def __init__(self, shell):
34 32 super(ConfigMagics, self).__init__(shell)
35 33 self.configurables = []
36 34
37 35 @line_magic
38 36 def config(self, s):
39 37 """configure IPython
40 38
41 39 %config Class[.trait=value]
42 40
43 41 This magic exposes most of the IPython config system. Any
44 42 Configurable class should be able to be configured with the simple
45 43 line::
46 44
47 45 %config Class.trait=value
48 46
49 47 Where `value` will be resolved in the user's namespace, if it is an
50 48 expression or variable name.
51 49
52 50 Examples
53 51 --------
54 52
55 53 To see what classes are available for config, pass no arguments::
56 54
57 55 In [1]: %config
58 56 Available objects for config:
59 57 TerminalInteractiveShell
60 58 HistoryManager
61 59 PrefilterManager
62 60 AliasManager
63 61 IPCompleter
64 62 DisplayFormatter
65 63
66 64 To view what is configurable on a given class, just pass the class
67 65 name::
68 66
69 67 In [2]: %config IPCompleter
70 68 IPCompleter options
71 69 -----------------
72 70 IPCompleter.omit__names=<Enum>
73 71 Current: 2
74 72 Choices: (0, 1, 2)
75 73 Instruct the completer to omit private method names
76 74 Specifically, when completing on ``object.<tab>``.
77 75 When 2 [default]: all names that start with '_' will be excluded.
78 76 When 1: all 'magic' names (``__foo__``) will be excluded.
79 77 When 0: nothing will be excluded.
80 78 IPCompleter.merge_completions=<CBool>
81 79 Current: True
82 80 Whether to merge completion results into a single list
83 81 If False, only the completion results from the first non-empty
84 82 completer will be returned.
85 83 IPCompleter.limit_to__all__=<CBool>
86 84 Current: False
87 85 Instruct the completer to use __all__ for the completion
88 86 Specifically, when completing on ``object.<tab>``.
89 87 When True: only those names in obj.__all__ will be included.
90 88 When False [default]: the __all__ attribute is ignored
91 89 IPCompleter.greedy=<CBool>
92 90 Current: False
93 91 Activate greedy completion
94 92 This will enable completion on elements of lists, results of
95 93 function calls, etc., but can be unsafe because the code is
96 94 actually evaluated on TAB.
97 95
98 96 but the real use is in setting values::
99 97
100 98 In [3]: %config IPCompleter.greedy = True
101 99
102 100 and these values are read from the user_ns if they are variables::
103 101
104 102 In [4]: feeling_greedy=False
105 103
106 104 In [5]: %config IPCompleter.greedy = feeling_greedy
107 105
108 106 """
109 107 from traitlets.config.loader import Config
110 108 # some IPython objects are Configurable, but do not yet have
111 109 # any configurable traits. Exclude them from the effects of
112 110 # this magic, as their presence is just noise:
113 111 configurables = [ c for c in self.shell.configurables
114 112 if c.__class__.class_traits(config=True) ]
115 113 classnames = [ c.__class__.__name__ for c in configurables ]
116 114
117 115 line = s.strip()
118 116 if not line:
119 117 # print available configurable names
120 118 print("Available objects for config:")
121 119 for name in classnames:
122 120 print(" ", name)
123 121 return
124 122 elif line in classnames:
125 123 # `%config TerminalInteractiveShell` will print trait info for
126 124 # TerminalInteractiveShell
127 125 c = configurables[classnames.index(line)]
128 126 cls = c.__class__
129 127 help = cls.class_get_help(c)
130 128 # strip leading '--' from cl-args:
131 129 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
132 130 print(help)
133 131 return
134 132 elif reg.match(line):
135 133 cls, attr = line.split('.')
136 134 return getattr(configurables[classnames.index(cls)],attr)
137 135 elif '=' not in line:
138 136 msg = "Invalid config statement: %r, "\
139 137 "should be `Class.trait = value`."
140 138
141 139 ll = line.lower()
142 140 for classname in classnames:
143 141 if ll == classname.lower():
144 142 msg = msg + '\nDid you mean %s (note the case)?' % classname
145 143 break
146 144
147 145 raise UsageError( msg % line)
148 146
149 147 # otherwise, assume we are setting configurables.
150 148 # leave quotes on args when splitting, because we want
151 149 # unquoted args to eval in user_ns
152 150 cfg = Config()
153 151 exec("cfg."+line, locals(), self.shell.user_ns)
154 152
155 153 for configurable in configurables:
156 154 try:
157 155 configurable.update_config(cfg)
158 156 except Exception as e:
159 157 error(e)
@@ -1,1370 +1,1368 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Implementation of execution-related magic functions."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function
8 from __future__ import absolute_import
9 7
10 8 import ast
11 9 import bdb
12 10 import gc
13 11 import itertools
14 12 import os
15 13 import sys
16 14 import time
17 15 import timeit
18 16 import math
19 17 from pdb import Restart
20 18
21 19 # cProfile was added in Python2.5
22 20 try:
23 21 import cProfile as profile
24 22 import pstats
25 23 except ImportError:
26 24 # profile isn't bundled by default in Debian for license reasons
27 25 try:
28 26 import profile, pstats
29 27 except ImportError:
30 28 profile = pstats = None
31 29
32 30 from IPython.core import oinspect
33 31 from IPython.core import magic_arguments
34 32 from IPython.core import page
35 33 from IPython.core.error import UsageError
36 34 from IPython.core.macro import Macro
37 35 from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
38 36 line_cell_magic, on_off, needs_local_scope)
39 37 from IPython.testing.skipdoctest import skip_doctest
40 38 from IPython.utils import py3compat
41 39 from IPython.utils.py3compat import builtin_mod, iteritems, PY3
42 40 from IPython.utils.contexts import preserve_keys
43 41 from IPython.utils.capture import capture_output
44 42 from IPython.utils.ipstruct import Struct
45 43 from IPython.utils.module_paths import find_mod
46 44 from IPython.utils.path import get_py_filename, shellglob
47 45 from IPython.utils.timing import clock, clock2
48 46 from warnings import warn
49 47 from logging import error
50 48
51 49 if PY3:
52 50 from io import StringIO
53 51 else:
54 52 from StringIO import StringIO
55 53
56 54 #-----------------------------------------------------------------------------
57 55 # Magic implementation classes
58 56 #-----------------------------------------------------------------------------
59 57
60 58
61 59 class TimeitResult(object):
62 60 """
63 61 Object returned by the timeit magic with info about the run.
64 62
65 63 Contains the following attributes :
66 64
67 65 loops: (int) number of loops done per measurement
68 66 repeat: (int) number of times the measurement has been repeated
69 67 best: (float) best execution time / number
70 68 all_runs: (list of float) execution time of each run (in s)
71 69 compile_time: (float) time of statement compilation (s)
72 70
73 71 """
74 72 def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
75 73 self.loops = loops
76 74 self.repeat = repeat
77 75 self.best = best
78 76 self.worst = worst
79 77 self.all_runs = all_runs
80 78 self.compile_time = compile_time
81 79 self._precision = precision
82 80 self.timings = [ dt / self.loops for dt in all_runs]
83 81
84 82 @property
85 83 def average(self):
86 84 return math.fsum(self.timings) / len(self.timings)
87 85
88 86 @property
89 87 def stdev(self):
90 88 mean = self.average
91 89 return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
92 90
93 91 def __str__(self):
94 92 return (u"%s loop%s, average of %d: %s +- %s per loop (using standard deviation)"
95 93 % (self.loops,"" if self.loops == 1 else "s", self.repeat,
96 94 _format_time(self.average, self._precision),
97 95 _format_time(self.stdev, self._precision)))
98 96
99 97 def _repr_pretty_(self, p , cycle):
100 98 unic = self.__str__()
101 99 p.text(u'<TimeitResult : '+unic+u'>')
102 100
103 101
104 102
105 103 class TimeitTemplateFiller(ast.NodeTransformer):
106 104 """Fill in the AST template for timing execution.
107 105
108 106 This is quite closely tied to the template definition, which is in
109 107 :meth:`ExecutionMagics.timeit`.
110 108 """
111 109 def __init__(self, ast_setup, ast_stmt):
112 110 self.ast_setup = ast_setup
113 111 self.ast_stmt = ast_stmt
114 112
115 113 def visit_FunctionDef(self, node):
116 114 "Fill in the setup statement"
117 115 self.generic_visit(node)
118 116 if node.name == "inner":
119 117 node.body[:1] = self.ast_setup.body
120 118
121 119 return node
122 120
123 121 def visit_For(self, node):
124 122 "Fill in the statement to be timed"
125 123 if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
126 124 node.body = self.ast_stmt.body
127 125 return node
128 126
129 127
130 128 class Timer(timeit.Timer):
131 129 """Timer class that explicitly uses self.inner
132 130
133 131 which is an undocumented implementation detail of CPython,
134 132 not shared by PyPy.
135 133 """
136 134 # Timer.timeit copied from CPython 3.4.2
137 135 def timeit(self, number=timeit.default_number):
138 136 """Time 'number' executions of the main statement.
139 137
140 138 To be precise, this executes the setup statement once, and
141 139 then returns the time it takes to execute the main statement
142 140 a number of times, as a float measured in seconds. The
143 141 argument is the number of times through the loop, defaulting
144 142 to one million. The main statement, the setup statement and
145 143 the timer function to be used are passed to the constructor.
146 144 """
147 145 it = itertools.repeat(None, number)
148 146 gcold = gc.isenabled()
149 147 gc.disable()
150 148 try:
151 149 timing = self.inner(it, self.timer)
152 150 finally:
153 151 if gcold:
154 152 gc.enable()
155 153 return timing
156 154
157 155
158 156 @magics_class
159 157 class ExecutionMagics(Magics):
160 158 """Magics related to code execution, debugging, profiling, etc.
161 159
162 160 """
163 161
164 162 def __init__(self, shell):
165 163 super(ExecutionMagics, self).__init__(shell)
166 164 if profile is None:
167 165 self.prun = self.profile_missing_notice
168 166 # Default execution function used to actually run user code.
169 167 self.default_runner = None
170 168
171 169 def profile_missing_notice(self, *args, **kwargs):
172 170 error("""\
173 171 The profile module could not be found. It has been removed from the standard
174 172 python packages because of its non-free license. To use profiling, install the
175 173 python-profiler package from non-free.""")
176 174
177 175 @skip_doctest
178 176 @line_cell_magic
179 177 def prun(self, parameter_s='', cell=None):
180 178
181 179 """Run a statement through the python code profiler.
182 180
183 181 Usage, in line mode:
184 182 %prun [options] statement
185 183
186 184 Usage, in cell mode:
187 185 %%prun [options] [statement]
188 186 code...
189 187 code...
190 188
191 189 In cell mode, the additional code lines are appended to the (possibly
192 190 empty) statement in the first line. Cell mode allows you to easily
193 191 profile multiline blocks without having to put them in a separate
194 192 function.
195 193
196 194 The given statement (which doesn't require quote marks) is run via the
197 195 python profiler in a manner similar to the profile.run() function.
198 196 Namespaces are internally managed to work correctly; profile.run
199 197 cannot be used in IPython because it makes certain assumptions about
200 198 namespaces which do not hold under IPython.
201 199
202 200 Options:
203 201
204 202 -l <limit>
205 203 you can place restrictions on what or how much of the
206 204 profile gets printed. The limit value can be:
207 205
208 206 * A string: only information for function names containing this string
209 207 is printed.
210 208
211 209 * An integer: only these many lines are printed.
212 210
213 211 * A float (between 0 and 1): this fraction of the report is printed
214 212 (for example, use a limit of 0.4 to see the topmost 40% only).
215 213
216 214 You can combine several limits with repeated use of the option. For
217 215 example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
218 216 information about class constructors.
219 217
220 218 -r
221 219 return the pstats.Stats object generated by the profiling. This
222 220 object has all the information about the profile in it, and you can
223 221 later use it for further analysis or in other functions.
224 222
225 223 -s <key>
226 224 sort profile by given key. You can provide more than one key
227 225 by using the option several times: '-s key1 -s key2 -s key3...'. The
228 226 default sorting key is 'time'.
229 227
230 228 The following is copied verbatim from the profile documentation
231 229 referenced below:
232 230
233 231 When more than one key is provided, additional keys are used as
234 232 secondary criteria when the there is equality in all keys selected
235 233 before them.
236 234
237 235 Abbreviations can be used for any key names, as long as the
238 236 abbreviation is unambiguous. The following are the keys currently
239 237 defined:
240 238
241 239 ============ =====================
242 240 Valid Arg Meaning
243 241 ============ =====================
244 242 "calls" call count
245 243 "cumulative" cumulative time
246 244 "file" file name
247 245 "module" file name
248 246 "pcalls" primitive call count
249 247 "line" line number
250 248 "name" function name
251 249 "nfl" name/file/line
252 250 "stdname" standard name
253 251 "time" internal time
254 252 ============ =====================
255 253
256 254 Note that all sorts on statistics are in descending order (placing
257 255 most time consuming items first), where as name, file, and line number
258 256 searches are in ascending order (i.e., alphabetical). The subtle
259 257 distinction between "nfl" and "stdname" is that the standard name is a
260 258 sort of the name as printed, which means that the embedded line
261 259 numbers get compared in an odd way. For example, lines 3, 20, and 40
262 260 would (if the file names were the same) appear in the string order
263 261 "20" "3" and "40". In contrast, "nfl" does a numeric compare of the
264 262 line numbers. In fact, sort_stats("nfl") is the same as
265 263 sort_stats("name", "file", "line").
266 264
267 265 -T <filename>
268 266 save profile results as shown on screen to a text
269 267 file. The profile is still shown on screen.
270 268
271 269 -D <filename>
272 270 save (via dump_stats) profile statistics to given
273 271 filename. This data is in a format understood by the pstats module, and
274 272 is generated by a call to the dump_stats() method of profile
275 273 objects. The profile is still shown on screen.
276 274
277 275 -q
278 276 suppress output to the pager. Best used with -T and/or -D above.
279 277
280 278 If you want to run complete programs under the profiler's control, use
281 279 ``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
282 280 contains profiler specific options as described here.
283 281
284 282 You can read the complete documentation for the profile module with::
285 283
286 284 In [1]: import profile; profile.help()
287 285 """
288 286 opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
289 287 list_all=True, posix=False)
290 288 if cell is not None:
291 289 arg_str += '\n' + cell
292 290 arg_str = self.shell.input_splitter.transform_cell(arg_str)
293 291 return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
294 292
295 293 def _run_with_profiler(self, code, opts, namespace):
296 294 """
297 295 Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
298 296
299 297 Parameters
300 298 ----------
301 299 code : str
302 300 Code to be executed.
303 301 opts : Struct
304 302 Options parsed by `self.parse_options`.
305 303 namespace : dict
306 304 A dictionary for Python namespace (e.g., `self.shell.user_ns`).
307 305
308 306 """
309 307
310 308 # Fill default values for unspecified options:
311 309 opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
312 310
313 311 prof = profile.Profile()
314 312 try:
315 313 prof = prof.runctx(code, namespace, namespace)
316 314 sys_exit = ''
317 315 except SystemExit:
318 316 sys_exit = """*** SystemExit exception caught in code being profiled."""
319 317
320 318 stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
321 319
322 320 lims = opts.l
323 321 if lims:
324 322 lims = [] # rebuild lims with ints/floats/strings
325 323 for lim in opts.l:
326 324 try:
327 325 lims.append(int(lim))
328 326 except ValueError:
329 327 try:
330 328 lims.append(float(lim))
331 329 except ValueError:
332 330 lims.append(lim)
333 331
334 332 # Trap output.
335 333 stdout_trap = StringIO()
336 334 stats_stream = stats.stream
337 335 try:
338 336 stats.stream = stdout_trap
339 337 stats.print_stats(*lims)
340 338 finally:
341 339 stats.stream = stats_stream
342 340
343 341 output = stdout_trap.getvalue()
344 342 output = output.rstrip()
345 343
346 344 if 'q' not in opts:
347 345 page.page(output)
348 346 print(sys_exit, end=' ')
349 347
350 348 dump_file = opts.D[0]
351 349 text_file = opts.T[0]
352 350 if dump_file:
353 351 prof.dump_stats(dump_file)
354 352 print('\n*** Profile stats marshalled to file',\
355 353 repr(dump_file)+'.',sys_exit)
356 354 if text_file:
357 355 pfile = open(text_file,'w')
358 356 pfile.write(output)
359 357 pfile.close()
360 358 print('\n*** Profile printout saved to text file',\
361 359 repr(text_file)+'.',sys_exit)
362 360
363 361 if 'r' in opts:
364 362 return stats
365 363 else:
366 364 return None
367 365
368 366 @line_magic
369 367 def pdb(self, parameter_s=''):
370 368 """Control the automatic calling of the pdb interactive debugger.
371 369
372 370 Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
373 371 argument it works as a toggle.
374 372
375 373 When an exception is triggered, IPython can optionally call the
376 374 interactive pdb debugger after the traceback printout. %pdb toggles
377 375 this feature on and off.
378 376
379 377 The initial state of this feature is set in your configuration
380 378 file (the option is ``InteractiveShell.pdb``).
381 379
382 380 If you want to just activate the debugger AFTER an exception has fired,
383 381 without having to type '%pdb on' and rerunning your code, you can use
384 382 the %debug magic."""
385 383
386 384 par = parameter_s.strip().lower()
387 385
388 386 if par:
389 387 try:
390 388 new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
391 389 except KeyError:
392 390 print ('Incorrect argument. Use on/1, off/0, '
393 391 'or nothing for a toggle.')
394 392 return
395 393 else:
396 394 # toggle
397 395 new_pdb = not self.shell.call_pdb
398 396
399 397 # set on the shell
400 398 self.shell.call_pdb = new_pdb
401 399 print('Automatic pdb calling has been turned',on_off(new_pdb))
402 400
403 401 @skip_doctest
404 402 @magic_arguments.magic_arguments()
405 403 @magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
406 404 help="""
407 405 Set break point at LINE in FILE.
408 406 """
409 407 )
410 408 @magic_arguments.argument('statement', nargs='*',
411 409 help="""
412 410 Code to run in debugger.
413 411 You can omit this in cell magic mode.
414 412 """
415 413 )
416 414 @line_cell_magic
417 415 def debug(self, line='', cell=None):
418 416 """Activate the interactive debugger.
419 417
420 418 This magic command support two ways of activating debugger.
421 419 One is to activate debugger before executing code. This way, you
422 420 can set a break point, to step through the code from the point.
423 421 You can use this mode by giving statements to execute and optionally
424 422 a breakpoint.
425 423
426 424 The other one is to activate debugger in post-mortem mode. You can
427 425 activate this mode simply running %debug without any argument.
428 426 If an exception has just occurred, this lets you inspect its stack
429 427 frames interactively. Note that this will always work only on the last
430 428 traceback that occurred, so you must call this quickly after an
431 429 exception that you wish to inspect has fired, because if another one
432 430 occurs, it clobbers the previous one.
433 431
434 432 If you want IPython to automatically do this on every exception, see
435 433 the %pdb magic for more details.
436 434 """
437 435 args = magic_arguments.parse_argstring(self.debug, line)
438 436
439 437 if not (args.breakpoint or args.statement or cell):
440 438 self._debug_post_mortem()
441 439 else:
442 440 code = "\n".join(args.statement)
443 441 if cell:
444 442 code += "\n" + cell
445 443 self._debug_exec(code, args.breakpoint)
446 444
447 445 def _debug_post_mortem(self):
448 446 self.shell.debugger(force=True)
449 447
450 448 def _debug_exec(self, code, breakpoint):
451 449 if breakpoint:
452 450 (filename, bp_line) = breakpoint.rsplit(':', 1)
453 451 bp_line = int(bp_line)
454 452 else:
455 453 (filename, bp_line) = (None, None)
456 454 self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
457 455
458 456 @line_magic
459 457 def tb(self, s):
460 458 """Print the last traceback with the currently active exception mode.
461 459
462 460 See %xmode for changing exception reporting modes."""
463 461 self.shell.showtraceback()
464 462
465 463 @skip_doctest
466 464 @line_magic
467 465 def run(self, parameter_s='', runner=None,
468 466 file_finder=get_py_filename):
469 467 """Run the named file inside IPython as a program.
470 468
471 469 Usage::
472 470
473 471 %run [-n -i -e -G]
474 472 [( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
475 473 ( -m mod | file ) [args]
476 474
477 475 Parameters after the filename are passed as command-line arguments to
478 476 the program (put in sys.argv). Then, control returns to IPython's
479 477 prompt.
480 478
481 479 This is similar to running at a system prompt ``python file args``,
482 480 but with the advantage of giving you IPython's tracebacks, and of
483 481 loading all variables into your interactive namespace for further use
484 482 (unless -p is used, see below).
485 483
486 484 The file is executed in a namespace initially consisting only of
487 485 ``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
488 486 sees its environment as if it were being run as a stand-alone program
489 487 (except for sharing global objects such as previously imported
490 488 modules). But after execution, the IPython interactive namespace gets
491 489 updated with all variables defined in the program (except for __name__
492 490 and sys.argv). This allows for very convenient loading of code for
493 491 interactive work, while giving each program a 'clean sheet' to run in.
494 492
495 493 Arguments are expanded using shell-like glob match. Patterns
496 494 '*', '?', '[seq]' and '[!seq]' can be used. Additionally,
497 495 tilde '~' will be expanded into user's home directory. Unlike
498 496 real shells, quotation does not suppress expansions. Use
499 497 *two* back slashes (e.g. ``\\\\*``) to suppress expansions.
500 498 To completely disable these expansions, you can use -G flag.
501 499
502 500 Options:
503 501
504 502 -n
505 503 __name__ is NOT set to '__main__', but to the running file's name
506 504 without extension (as python does under import). This allows running
507 505 scripts and reloading the definitions in them without calling code
508 506 protected by an ``if __name__ == "__main__"`` clause.
509 507
510 508 -i
511 509 run the file in IPython's namespace instead of an empty one. This
512 510 is useful if you are experimenting with code written in a text editor
513 511 which depends on variables defined interactively.
514 512
515 513 -e
516 514 ignore sys.exit() calls or SystemExit exceptions in the script
517 515 being run. This is particularly useful if IPython is being used to
518 516 run unittests, which always exit with a sys.exit() call. In such
519 517 cases you are interested in the output of the test results, not in
520 518 seeing a traceback of the unittest module.
521 519
522 520 -t
523 521 print timing information at the end of the run. IPython will give
524 522 you an estimated CPU time consumption for your script, which under
525 523 Unix uses the resource module to avoid the wraparound problems of
526 524 time.clock(). Under Unix, an estimate of time spent on system tasks
527 525 is also given (for Windows platforms this is reported as 0.0).
528 526
529 527 If -t is given, an additional ``-N<N>`` option can be given, where <N>
530 528 must be an integer indicating how many times you want the script to
531 529 run. The final timing report will include total and per run results.
532 530
533 531 For example (testing the script uniq_stable.py)::
534 532
535 533 In [1]: run -t uniq_stable
536 534
537 535 IPython CPU timings (estimated):
538 536 User : 0.19597 s.
539 537 System: 0.0 s.
540 538
541 539 In [2]: run -t -N5 uniq_stable
542 540
543 541 IPython CPU timings (estimated):
544 542 Total runs performed: 5
545 543 Times : Total Per run
546 544 User : 0.910862 s, 0.1821724 s.
547 545 System: 0.0 s, 0.0 s.
548 546
549 547 -d
550 548 run your program under the control of pdb, the Python debugger.
551 549 This allows you to execute your program step by step, watch variables,
552 550 etc. Internally, what IPython does is similar to calling::
553 551
554 552 pdb.run('execfile("YOURFILENAME")')
555 553
556 554 with a breakpoint set on line 1 of your file. You can change the line
557 555 number for this automatic breakpoint to be <N> by using the -bN option
558 556 (where N must be an integer). For example::
559 557
560 558 %run -d -b40 myscript
561 559
562 560 will set the first breakpoint at line 40 in myscript.py. Note that
563 561 the first breakpoint must be set on a line which actually does
564 562 something (not a comment or docstring) for it to stop execution.
565 563
566 564 Or you can specify a breakpoint in a different file::
567 565
568 566 %run -d -b myotherfile.py:20 myscript
569 567
570 568 When the pdb debugger starts, you will see a (Pdb) prompt. You must
571 569 first enter 'c' (without quotes) to start execution up to the first
572 570 breakpoint.
573 571
574 572 Entering 'help' gives information about the use of the debugger. You
575 573 can easily see pdb's full documentation with "import pdb;pdb.help()"
576 574 at a prompt.
577 575
578 576 -p
579 577 run program under the control of the Python profiler module (which
580 578 prints a detailed report of execution times, function calls, etc).
581 579
582 580 You can pass other options after -p which affect the behavior of the
583 581 profiler itself. See the docs for %prun for details.
584 582
585 583 In this mode, the program's variables do NOT propagate back to the
586 584 IPython interactive namespace (because they remain in the namespace
587 585 where the profiler executes them).
588 586
589 587 Internally this triggers a call to %prun, see its documentation for
590 588 details on the options available specifically for profiling.
591 589
592 590 There is one special usage for which the text above doesn't apply:
593 591 if the filename ends with .ipy[nb], the file is run as ipython script,
594 592 just as if the commands were written on IPython prompt.
595 593
596 594 -m
597 595 specify module name to load instead of script path. Similar to
598 596 the -m option for the python interpreter. Use this option last if you
599 597 want to combine with other %run options. Unlike the python interpreter
600 598 only source modules are allowed no .pyc or .pyo files.
601 599 For example::
602 600
603 601 %run -m example
604 602
605 603 will run the example module.
606 604
607 605 -G
608 606 disable shell-like glob expansion of arguments.
609 607
610 608 """
611 609
612 610 # get arguments and set sys.argv for program to be run.
613 611 opts, arg_lst = self.parse_options(parameter_s,
614 612 'nidtN:b:pD:l:rs:T:em:G',
615 613 mode='list', list_all=1)
616 614 if "m" in opts:
617 615 modulename = opts["m"][0]
618 616 modpath = find_mod(modulename)
619 617 if modpath is None:
620 618 warn('%r is not a valid modulename on sys.path'%modulename)
621 619 return
622 620 arg_lst = [modpath] + arg_lst
623 621 try:
624 622 filename = file_finder(arg_lst[0])
625 623 except IndexError:
626 624 warn('you must provide at least a filename.')
627 625 print('\n%run:\n', oinspect.getdoc(self.run))
628 626 return
629 627 except IOError as e:
630 628 try:
631 629 msg = str(e)
632 630 except UnicodeError:
633 631 msg = e.message
634 632 error(msg)
635 633 return
636 634
637 635 if filename.lower().endswith(('.ipy', '.ipynb')):
638 636 with preserve_keys(self.shell.user_ns, '__file__'):
639 637 self.shell.user_ns['__file__'] = filename
640 638 self.shell.safe_execfile_ipy(filename)
641 639 return
642 640
643 641 # Control the response to exit() calls made by the script being run
644 642 exit_ignore = 'e' in opts
645 643
646 644 # Make sure that the running script gets a proper sys.argv as if it
647 645 # were run from a system shell.
648 646 save_argv = sys.argv # save it for later restoring
649 647
650 648 if 'G' in opts:
651 649 args = arg_lst[1:]
652 650 else:
653 651 # tilde and glob expansion
654 652 args = shellglob(map(os.path.expanduser, arg_lst[1:]))
655 653
656 654 sys.argv = [filename] + args # put in the proper filename
657 655 # protect sys.argv from potential unicode strings on Python 2:
658 656 if not py3compat.PY3:
659 657 sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
660 658
661 659 if 'i' in opts:
662 660 # Run in user's interactive namespace
663 661 prog_ns = self.shell.user_ns
664 662 __name__save = self.shell.user_ns['__name__']
665 663 prog_ns['__name__'] = '__main__'
666 664 main_mod = self.shell.user_module
667 665
668 666 # Since '%run foo' emulates 'python foo.py' at the cmd line, we must
669 667 # set the __file__ global in the script's namespace
670 668 # TK: Is this necessary in interactive mode?
671 669 prog_ns['__file__'] = filename
672 670 else:
673 671 # Run in a fresh, empty namespace
674 672 if 'n' in opts:
675 673 name = os.path.splitext(os.path.basename(filename))[0]
676 674 else:
677 675 name = '__main__'
678 676
679 677 # The shell MUST hold a reference to prog_ns so after %run
680 678 # exits, the python deletion mechanism doesn't zero it out
681 679 # (leaving dangling references). See interactiveshell for details
682 680 main_mod = self.shell.new_main_mod(filename, name)
683 681 prog_ns = main_mod.__dict__
684 682
685 683 # pickle fix. See interactiveshell for an explanation. But we need to
686 684 # make sure that, if we overwrite __main__, we replace it at the end
687 685 main_mod_name = prog_ns['__name__']
688 686
689 687 if main_mod_name == '__main__':
690 688 restore_main = sys.modules['__main__']
691 689 else:
692 690 restore_main = False
693 691
694 692 # This needs to be undone at the end to prevent holding references to
695 693 # every single object ever created.
696 694 sys.modules[main_mod_name] = main_mod
697 695
698 696 if 'p' in opts or 'd' in opts:
699 697 if 'm' in opts:
700 698 code = 'run_module(modulename, prog_ns)'
701 699 code_ns = {
702 700 'run_module': self.shell.safe_run_module,
703 701 'prog_ns': prog_ns,
704 702 'modulename': modulename,
705 703 }
706 704 else:
707 705 if 'd' in opts:
708 706 # allow exceptions to raise in debug mode
709 707 code = 'execfile(filename, prog_ns, raise_exceptions=True)'
710 708 else:
711 709 code = 'execfile(filename, prog_ns)'
712 710 code_ns = {
713 711 'execfile': self.shell.safe_execfile,
714 712 'prog_ns': prog_ns,
715 713 'filename': get_py_filename(filename),
716 714 }
717 715
718 716 try:
719 717 stats = None
720 718 if 'p' in opts:
721 719 stats = self._run_with_profiler(code, opts, code_ns)
722 720 else:
723 721 if 'd' in opts:
724 722 bp_file, bp_line = parse_breakpoint(
725 723 opts.get('b', ['1'])[0], filename)
726 724 self._run_with_debugger(
727 725 code, code_ns, filename, bp_line, bp_file)
728 726 else:
729 727 if 'm' in opts:
730 728 def run():
731 729 self.shell.safe_run_module(modulename, prog_ns)
732 730 else:
733 731 if runner is None:
734 732 runner = self.default_runner
735 733 if runner is None:
736 734 runner = self.shell.safe_execfile
737 735
738 736 def run():
739 737 runner(filename, prog_ns, prog_ns,
740 738 exit_ignore=exit_ignore)
741 739
742 740 if 't' in opts:
743 741 # timed execution
744 742 try:
745 743 nruns = int(opts['N'][0])
746 744 if nruns < 1:
747 745 error('Number of runs must be >=1')
748 746 return
749 747 except (KeyError):
750 748 nruns = 1
751 749 self._run_with_timing(run, nruns)
752 750 else:
753 751 # regular execution
754 752 run()
755 753
756 754 if 'i' in opts:
757 755 self.shell.user_ns['__name__'] = __name__save
758 756 else:
759 757 # update IPython interactive namespace
760 758
761 759 # Some forms of read errors on the file may mean the
762 760 # __name__ key was never set; using pop we don't have to
763 761 # worry about a possible KeyError.
764 762 prog_ns.pop('__name__', None)
765 763
766 764 with preserve_keys(self.shell.user_ns, '__file__'):
767 765 self.shell.user_ns.update(prog_ns)
768 766 finally:
769 767 # It's a bit of a mystery why, but __builtins__ can change from
770 768 # being a module to becoming a dict missing some key data after
771 769 # %run. As best I can see, this is NOT something IPython is doing
772 770 # at all, and similar problems have been reported before:
773 771 # http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
774 772 # Since this seems to be done by the interpreter itself, the best
775 773 # we can do is to at least restore __builtins__ for the user on
776 774 # exit.
777 775 self.shell.user_ns['__builtins__'] = builtin_mod
778 776
779 777 # Ensure key global structures are restored
780 778 sys.argv = save_argv
781 779 if restore_main:
782 780 sys.modules['__main__'] = restore_main
783 781 else:
784 782 # Remove from sys.modules the reference to main_mod we'd
785 783 # added. Otherwise it will trap references to objects
786 784 # contained therein.
787 785 del sys.modules[main_mod_name]
788 786
789 787 return stats
790 788
791 789 def _run_with_debugger(self, code, code_ns, filename=None,
792 790 bp_line=None, bp_file=None):
793 791 """
794 792 Run `code` in debugger with a break point.
795 793
796 794 Parameters
797 795 ----------
798 796 code : str
799 797 Code to execute.
800 798 code_ns : dict
801 799 A namespace in which `code` is executed.
802 800 filename : str
803 801 `code` is ran as if it is in `filename`.
804 802 bp_line : int, optional
805 803 Line number of the break point.
806 804 bp_file : str, optional
807 805 Path to the file in which break point is specified.
808 806 `filename` is used if not given.
809 807
810 808 Raises
811 809 ------
812 810 UsageError
813 811 If the break point given by `bp_line` is not valid.
814 812
815 813 """
816 814 deb = self.shell.InteractiveTB.pdb
817 815 if not deb:
818 816 self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
819 817 deb = self.shell.InteractiveTB.pdb
820 818
821 819 # reset Breakpoint state, which is moronically kept
822 820 # in a class
823 821 bdb.Breakpoint.next = 1
824 822 bdb.Breakpoint.bplist = {}
825 823 bdb.Breakpoint.bpbynumber = [None]
826 824 if bp_line is not None:
827 825 # Set an initial breakpoint to stop execution
828 826 maxtries = 10
829 827 bp_file = bp_file or filename
830 828 checkline = deb.checkline(bp_file, bp_line)
831 829 if not checkline:
832 830 for bp in range(bp_line + 1, bp_line + maxtries + 1):
833 831 if deb.checkline(bp_file, bp):
834 832 break
835 833 else:
836 834 msg = ("\nI failed to find a valid line to set "
837 835 "a breakpoint\n"
838 836 "after trying up to line: %s.\n"
839 837 "Please set a valid breakpoint manually "
840 838 "with the -b option." % bp)
841 839 raise UsageError(msg)
842 840 # if we find a good linenumber, set the breakpoint
843 841 deb.do_break('%s:%s' % (bp_file, bp_line))
844 842
845 843 if filename:
846 844 # Mimic Pdb._runscript(...)
847 845 deb._wait_for_mainpyfile = True
848 846 deb.mainpyfile = deb.canonic(filename)
849 847
850 848 # Start file run
851 849 print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
852 850 try:
853 851 if filename:
854 852 # save filename so it can be used by methods on the deb object
855 853 deb._exec_filename = filename
856 854 while True:
857 855 try:
858 856 deb.run(code, code_ns)
859 857 except Restart:
860 858 print("Restarting")
861 859 if filename:
862 860 deb._wait_for_mainpyfile = True
863 861 deb.mainpyfile = deb.canonic(filename)
864 862 continue
865 863 else:
866 864 break
867 865
868 866
869 867 except:
870 868 etype, value, tb = sys.exc_info()
871 869 # Skip three frames in the traceback: the %run one,
872 870 # one inside bdb.py, and the command-line typed by the
873 871 # user (run by exec in pdb itself).
874 872 self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
875 873
876 874 @staticmethod
877 875 def _run_with_timing(run, nruns):
878 876 """
879 877 Run function `run` and print timing information.
880 878
881 879 Parameters
882 880 ----------
883 881 run : callable
884 882 Any callable object which takes no argument.
885 883 nruns : int
886 884 Number of times to execute `run`.
887 885
888 886 """
889 887 twall0 = time.time()
890 888 if nruns == 1:
891 889 t0 = clock2()
892 890 run()
893 891 t1 = clock2()
894 892 t_usr = t1[0] - t0[0]
895 893 t_sys = t1[1] - t0[1]
896 894 print("\nIPython CPU timings (estimated):")
897 895 print(" User : %10.2f s." % t_usr)
898 896 print(" System : %10.2f s." % t_sys)
899 897 else:
900 898 runs = range(nruns)
901 899 t0 = clock2()
902 900 for nr in runs:
903 901 run()
904 902 t1 = clock2()
905 903 t_usr = t1[0] - t0[0]
906 904 t_sys = t1[1] - t0[1]
907 905 print("\nIPython CPU timings (estimated):")
908 906 print("Total runs performed:", nruns)
909 907 print(" Times : %10s %10s" % ('Total', 'Per run'))
910 908 print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
911 909 print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
912 910 twall1 = time.time()
913 911 print("Wall time: %10.2f s." % (twall1 - twall0))
914 912
915 913 @skip_doctest
916 914 @line_cell_magic
917 915 def timeit(self, line='', cell=None):
918 916 """Time execution of a Python statement or expression
919 917
920 918 Usage, in line mode:
921 919 %timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
922 920 or in cell mode:
923 921 %%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
924 922 code
925 923 code...
926 924
927 925 Time execution of a Python statement or expression using the timeit
928 926 module. This function can be used both as a line and cell magic:
929 927
930 928 - In line mode you can time a single-line statement (though multiple
931 929 ones can be chained with using semicolons).
932 930
933 931 - In cell mode, the statement in the first line is used as setup code
934 932 (executed but not timed) and the body of the cell is timed. The cell
935 933 body has access to any variables created in the setup code.
936 934
937 935 Options:
938 936 -n<N>: execute the given statement <N> times in a loop. If this value
939 937 is not given, a fitting value is chosen.
940 938
941 939 -r<R>: repeat the loop iteration <R> times and take the best result.
942 940 Default: 3
943 941
944 942 -t: use time.time to measure the time, which is the default on Unix.
945 943 This function measures wall time.
946 944
947 945 -c: use time.clock to measure the time, which is the default on
948 946 Windows and measures wall time. On Unix, resource.getrusage is used
949 947 instead and returns the CPU user time.
950 948
951 949 -p<P>: use a precision of <P> digits to display the timing result.
952 950 Default: 3
953 951
954 952 -q: Quiet, do not print result.
955 953
956 954 -o: return a TimeitResult that can be stored in a variable to inspect
957 955 the result in more details.
958 956
959 957
960 958 Examples
961 959 --------
962 960 ::
963 961
964 962 In [1]: %timeit pass
965 963 100000000 loops, average of 7: 5.48 ns +- 0.354 ns per loop (using standard deviation)
966 964
967 965 In [2]: u = None
968 966
969 967 In [3]: %timeit u is None
970 968 10000000 loops, average of 7: 22.7 ns +- 2.33 ns per loop (using standard deviation)
971 969
972 970 In [4]: %timeit -r 4 u == None
973 971 10000000 loops, average of 4: 27.5 ns +- 2.91 ns per loop (using standard deviation)
974 972
975 973 In [5]: import time
976 974
977 975 In [6]: %timeit -n1 time.sleep(2)
978 976 1 loop, average of 7: 2 s +- 4.71 µs per loop (using standard deviation)
979 977
980 978
981 979 The times reported by %timeit will be slightly higher than those
982 980 reported by the timeit.py script when variables are accessed. This is
983 981 due to the fact that %timeit executes the statement in the namespace
984 982 of the shell, compared with timeit.py, which uses a single setup
985 983 statement to import function or create variables. Generally, the bias
986 984 does not matter as long as results from timeit.py are not mixed with
987 985 those from %timeit."""
988 986
989 987 opts, stmt = self.parse_options(line,'n:r:tcp:qo',
990 988 posix=False, strict=False)
991 989 if stmt == "" and cell is None:
992 990 return
993 991
994 992 timefunc = timeit.default_timer
995 993 number = int(getattr(opts, "n", 0))
996 994 default_repeat = 7 if timeit.default_repeat < 7 else timeit.default_repeat
997 995 repeat = int(getattr(opts, "r", default_repeat))
998 996 precision = int(getattr(opts, "p", 3))
999 997 quiet = 'q' in opts
1000 998 return_result = 'o' in opts
1001 999 if hasattr(opts, "t"):
1002 1000 timefunc = time.time
1003 1001 if hasattr(opts, "c"):
1004 1002 timefunc = clock
1005 1003
1006 1004 timer = Timer(timer=timefunc)
1007 1005 # this code has tight coupling to the inner workings of timeit.Timer,
1008 1006 # but is there a better way to achieve that the code stmt has access
1009 1007 # to the shell namespace?
1010 1008 transform = self.shell.input_splitter.transform_cell
1011 1009
1012 1010 if cell is None:
1013 1011 # called as line magic
1014 1012 ast_setup = self.shell.compile.ast_parse("pass")
1015 1013 ast_stmt = self.shell.compile.ast_parse(transform(stmt))
1016 1014 else:
1017 1015 ast_setup = self.shell.compile.ast_parse(transform(stmt))
1018 1016 ast_stmt = self.shell.compile.ast_parse(transform(cell))
1019 1017
1020 1018 ast_setup = self.shell.transform_ast(ast_setup)
1021 1019 ast_stmt = self.shell.transform_ast(ast_stmt)
1022 1020
1023 1021 # This codestring is taken from timeit.template - we fill it in as an
1024 1022 # AST, so that we can apply our AST transformations to the user code
1025 1023 # without affecting the timing code.
1026 1024 timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
1027 1025 ' setup\n'
1028 1026 ' _t0 = _timer()\n'
1029 1027 ' for _i in _it:\n'
1030 1028 ' stmt\n'
1031 1029 ' _t1 = _timer()\n'
1032 1030 ' return _t1 - _t0\n')
1033 1031
1034 1032 timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
1035 1033 timeit_ast = ast.fix_missing_locations(timeit_ast)
1036 1034
1037 1035 # Track compilation time so it can be reported if too long
1038 1036 # Minimum time above which compilation time will be reported
1039 1037 tc_min = 0.1
1040 1038
1041 1039 t0 = clock()
1042 1040 code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
1043 1041 tc = clock()-t0
1044 1042
1045 1043 ns = {}
1046 1044 exec(code, self.shell.user_ns, ns)
1047 1045 timer.inner = ns["inner"]
1048 1046
1049 1047 # This is used to check if there is a huge difference between the
1050 1048 # best and worst timings.
1051 1049 # Issue: https://github.com/ipython/ipython/issues/6471
1052 1050 if number == 0:
1053 1051 # determine number so that 0.2 <= total time < 2.0
1054 1052 for index in range(0, 10):
1055 1053 number = 10 ** index
1056 1054 time_number = timer.timeit(number)
1057 1055 if time_number >= 0.2:
1058 1056 break
1059 1057
1060 1058 all_runs = timer.repeat(repeat, number)
1061 1059 best = min(all_runs) / number
1062 1060 worst = max(all_runs) / number
1063 1061 timeit_result = TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
1064 1062
1065 1063 if not quiet :
1066 1064 # Check best timing is greater than zero to avoid a
1067 1065 # ZeroDivisionError.
1068 1066 # In cases where the slowest timing is lesser than a micosecond
1069 1067 # we assume that it does not really matter if the fastest
1070 1068 # timing is 4 times faster than the slowest timing or not.
1071 1069 if worst > 4 * best and best > 0 and worst > 1e-6:
1072 1070 print("The slowest run took %0.2f times longer than the "
1073 1071 "fastest. This could mean that an intermediate result "
1074 1072 "is being cached." % (worst / best))
1075 1073
1076 1074 print( timeit_result )
1077 1075
1078 1076 if tc > tc_min:
1079 1077 print("Compiler time: %.2f s" % tc)
1080 1078 if return_result:
1081 1079 return timeit_result
1082 1080
1083 1081 @skip_doctest
1084 1082 @needs_local_scope
1085 1083 @line_cell_magic
1086 1084 def time(self,line='', cell=None, local_ns=None):
1087 1085 """Time execution of a Python statement or expression.
1088 1086
1089 1087 The CPU and wall clock times are printed, and the value of the
1090 1088 expression (if any) is returned. Note that under Win32, system time
1091 1089 is always reported as 0, since it can not be measured.
1092 1090
1093 1091 This function can be used both as a line and cell magic:
1094 1092
1095 1093 - In line mode you can time a single-line statement (though multiple
1096 1094 ones can be chained with using semicolons).
1097 1095
1098 1096 - In cell mode, you can time the cell body (a directly
1099 1097 following statement raises an error).
1100 1098
1101 1099 This function provides very basic timing functionality. Use the timeit
1102 1100 magic for more control over the measurement.
1103 1101
1104 1102 Examples
1105 1103 --------
1106 1104 ::
1107 1105
1108 1106 In [1]: %time 2**128
1109 1107 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1110 1108 Wall time: 0.00
1111 1109 Out[1]: 340282366920938463463374607431768211456L
1112 1110
1113 1111 In [2]: n = 1000000
1114 1112
1115 1113 In [3]: %time sum(range(n))
1116 1114 CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
1117 1115 Wall time: 1.37
1118 1116 Out[3]: 499999500000L
1119 1117
1120 1118 In [4]: %time print 'hello world'
1121 1119 hello world
1122 1120 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1123 1121 Wall time: 0.00
1124 1122
1125 1123 Note that the time needed by Python to compile the given expression
1126 1124 will be reported if it is more than 0.1s. In this example, the
1127 1125 actual exponentiation is done by Python at compilation time, so while
1128 1126 the expression can take a noticeable amount of time to compute, that
1129 1127 time is purely due to the compilation:
1130 1128
1131 1129 In [5]: %time 3**9999;
1132 1130 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1133 1131 Wall time: 0.00 s
1134 1132
1135 1133 In [6]: %time 3**999999;
1136 1134 CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
1137 1135 Wall time: 0.00 s
1138 1136 Compiler : 0.78 s
1139 1137 """
1140 1138
1141 1139 # fail immediately if the given expression can't be compiled
1142 1140
1143 1141 if line and cell:
1144 1142 raise UsageError("Can't use statement directly after '%%time'!")
1145 1143
1146 1144 if cell:
1147 1145 expr = self.shell.input_transformer_manager.transform_cell(cell)
1148 1146 else:
1149 1147 expr = self.shell.input_transformer_manager.transform_cell(line)
1150 1148
1151 1149 # Minimum time above which parse time will be reported
1152 1150 tp_min = 0.1
1153 1151
1154 1152 t0 = clock()
1155 1153 expr_ast = self.shell.compile.ast_parse(expr)
1156 1154 tp = clock()-t0
1157 1155
1158 1156 # Apply AST transformations
1159 1157 expr_ast = self.shell.transform_ast(expr_ast)
1160 1158
1161 1159 # Minimum time above which compilation time will be reported
1162 1160 tc_min = 0.1
1163 1161
1164 1162 if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
1165 1163 mode = 'eval'
1166 1164 source = '<timed eval>'
1167 1165 expr_ast = ast.Expression(expr_ast.body[0].value)
1168 1166 else:
1169 1167 mode = 'exec'
1170 1168 source = '<timed exec>'
1171 1169 t0 = clock()
1172 1170 code = self.shell.compile(expr_ast, source, mode)
1173 1171 tc = clock()-t0
1174 1172
1175 1173 # skew measurement as little as possible
1176 1174 glob = self.shell.user_ns
1177 1175 wtime = time.time
1178 1176 # time execution
1179 1177 wall_st = wtime()
1180 1178 if mode=='eval':
1181 1179 st = clock2()
1182 1180 out = eval(code, glob, local_ns)
1183 1181 end = clock2()
1184 1182 else:
1185 1183 st = clock2()
1186 1184 exec(code, glob, local_ns)
1187 1185 end = clock2()
1188 1186 out = None
1189 1187 wall_end = wtime()
1190 1188 # Compute actual times and report
1191 1189 wall_time = wall_end-wall_st
1192 1190 cpu_user = end[0]-st[0]
1193 1191 cpu_sys = end[1]-st[1]
1194 1192 cpu_tot = cpu_user+cpu_sys
1195 1193 # On windows cpu_sys is always zero, so no new information to the next print
1196 1194 if sys.platform != 'win32':
1197 1195 print("CPU times: user %s, sys: %s, total: %s" % \
1198 1196 (_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot)))
1199 1197 print("Wall time: %s" % _format_time(wall_time))
1200 1198 if tc > tc_min:
1201 1199 print("Compiler : %s" % _format_time(tc))
1202 1200 if tp > tp_min:
1203 1201 print("Parser : %s" % _format_time(tp))
1204 1202 return out
1205 1203
1206 1204 @skip_doctest
1207 1205 @line_magic
1208 1206 def macro(self, parameter_s=''):
1209 1207 """Define a macro for future re-execution. It accepts ranges of history,
1210 1208 filenames or string objects.
1211 1209
1212 1210 Usage:\\
1213 1211 %macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
1214 1212
1215 1213 Options:
1216 1214
1217 1215 -r: use 'raw' input. By default, the 'processed' history is used,
1218 1216 so that magics are loaded in their transformed version to valid
1219 1217 Python. If this option is given, the raw input as typed at the
1220 1218 command line is used instead.
1221 1219
1222 1220 -q: quiet macro definition. By default, a tag line is printed
1223 1221 to indicate the macro has been created, and then the contents of
1224 1222 the macro are printed. If this option is given, then no printout
1225 1223 is produced once the macro is created.
1226 1224
1227 1225 This will define a global variable called `name` which is a string
1228 1226 made of joining the slices and lines you specify (n1,n2,... numbers
1229 1227 above) from your input history into a single string. This variable
1230 1228 acts like an automatic function which re-executes those lines as if
1231 1229 you had typed them. You just type 'name' at the prompt and the code
1232 1230 executes.
1233 1231
1234 1232 The syntax for indicating input ranges is described in %history.
1235 1233
1236 1234 Note: as a 'hidden' feature, you can also use traditional python slice
1237 1235 notation, where N:M means numbers N through M-1.
1238 1236
1239 1237 For example, if your history contains (print using %hist -n )::
1240 1238
1241 1239 44: x=1
1242 1240 45: y=3
1243 1241 46: z=x+y
1244 1242 47: print x
1245 1243 48: a=5
1246 1244 49: print 'x',x,'y',y
1247 1245
1248 1246 you can create a macro with lines 44 through 47 (included) and line 49
1249 1247 called my_macro with::
1250 1248
1251 1249 In [55]: %macro my_macro 44-47 49
1252 1250
1253 1251 Now, typing `my_macro` (without quotes) will re-execute all this code
1254 1252 in one pass.
1255 1253
1256 1254 You don't need to give the line-numbers in order, and any given line
1257 1255 number can appear multiple times. You can assemble macros with any
1258 1256 lines from your input history in any order.
1259 1257
1260 1258 The macro is a simple object which holds its value in an attribute,
1261 1259 but IPython's display system checks for macros and executes them as
1262 1260 code instead of printing them when you type their name.
1263 1261
1264 1262 You can view a macro's contents by explicitly printing it with::
1265 1263
1266 1264 print macro_name
1267 1265
1268 1266 """
1269 1267 opts,args = self.parse_options(parameter_s,'rq',mode='list')
1270 1268 if not args: # List existing macros
1271 1269 return sorted(k for k,v in iteritems(self.shell.user_ns) if\
1272 1270 isinstance(v, Macro))
1273 1271 if len(args) == 1:
1274 1272 raise UsageError(
1275 1273 "%macro insufficient args; usage '%macro name n1-n2 n3-4...")
1276 1274 name, codefrom = args[0], " ".join(args[1:])
1277 1275
1278 1276 #print 'rng',ranges # dbg
1279 1277 try:
1280 1278 lines = self.shell.find_user_code(codefrom, 'r' in opts)
1281 1279 except (ValueError, TypeError) as e:
1282 1280 print(e.args[0])
1283 1281 return
1284 1282 macro = Macro(lines)
1285 1283 self.shell.define_macro(name, macro)
1286 1284 if not ( 'q' in opts) :
1287 1285 print('Macro `%s` created. To execute, type its name (without quotes).' % name)
1288 1286 print('=== Macro contents: ===')
1289 1287 print(macro, end=' ')
1290 1288
1291 1289 @magic_arguments.magic_arguments()
1292 1290 @magic_arguments.argument('output', type=str, default='', nargs='?',
1293 1291 help="""The name of the variable in which to store output.
1294 1292 This is a utils.io.CapturedIO object with stdout/err attributes
1295 1293 for the text of the captured output.
1296 1294
1297 1295 CapturedOutput also has a show() method for displaying the output,
1298 1296 and __call__ as well, so you can use that to quickly display the
1299 1297 output.
1300 1298
1301 1299 If unspecified, captured output is discarded.
1302 1300 """
1303 1301 )
1304 1302 @magic_arguments.argument('--no-stderr', action="store_true",
1305 1303 help="""Don't capture stderr."""
1306 1304 )
1307 1305 @magic_arguments.argument('--no-stdout', action="store_true",
1308 1306 help="""Don't capture stdout."""
1309 1307 )
1310 1308 @magic_arguments.argument('--no-display', action="store_true",
1311 1309 help="""Don't capture IPython's rich display."""
1312 1310 )
1313 1311 @cell_magic
1314 1312 def capture(self, line, cell):
1315 1313 """run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
1316 1314 args = magic_arguments.parse_argstring(self.capture, line)
1317 1315 out = not args.no_stdout
1318 1316 err = not args.no_stderr
1319 1317 disp = not args.no_display
1320 1318 with capture_output(out, err, disp) as io:
1321 1319 self.shell.run_cell(cell)
1322 1320 if args.output:
1323 1321 self.shell.user_ns[args.output] = io
1324 1322
1325 1323 def parse_breakpoint(text, current_file):
1326 1324 '''Returns (file, line) for file:line and (current_file, line) for line'''
1327 1325 colon = text.find(':')
1328 1326 if colon == -1:
1329 1327 return current_file, int(text)
1330 1328 else:
1331 1329 return text[:colon], int(text[colon+1:])
1332 1330
1333 1331 def _format_time(timespan, precision=3):
1334 1332 """Formats the timespan in a human readable form"""
1335 1333
1336 1334 if timespan >= 60.0:
1337 1335 # we have more than a minute, format that in a human readable form
1338 1336 # Idea from http://snipplr.com/view/5713/
1339 1337 parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
1340 1338 time = []
1341 1339 leftover = timespan
1342 1340 for suffix, length in parts:
1343 1341 value = int(leftover / length)
1344 1342 if value > 0:
1345 1343 leftover = leftover % length
1346 1344 time.append(u'%s%s' % (str(value), suffix))
1347 1345 if leftover < 1:
1348 1346 break
1349 1347 return " ".join(time)
1350 1348
1351 1349
1352 1350 # Unfortunately the unicode 'micro' symbol can cause problems in
1353 1351 # certain terminals.
1354 1352 # See bug: https://bugs.launchpad.net/ipython/+bug/348466
1355 1353 # Try to prevent crashes by being more secure than it needs to
1356 1354 # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
1357 1355 units = [u"s", u"ms",u'us',"ns"] # the save value
1358 1356 if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
1359 1357 try:
1360 1358 u'\xb5'.encode(sys.stdout.encoding)
1361 1359 units = [u"s", u"ms",u'\xb5s',"ns"]
1362 1360 except:
1363 1361 pass
1364 1362 scaling = [1, 1e3, 1e6, 1e9]
1365 1363
1366 1364 if timespan > 0.0:
1367 1365 order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
1368 1366 else:
1369 1367 order = 3
1370 1368 return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
@@ -1,67 +1,66 b''
1 1 """Implementation of magic functions for the extension machinery.
2 2 """
3 from __future__ import print_function
4 3 #-----------------------------------------------------------------------------
5 4 # Copyright (c) 2012 The IPython Development Team.
6 5 #
7 6 # Distributed under the terms of the Modified BSD License.
8 7 #
9 8 # The full license is in the file COPYING.txt, distributed with this software.
10 9 #-----------------------------------------------------------------------------
11 10
12 11 #-----------------------------------------------------------------------------
13 12 # Imports
14 13 #-----------------------------------------------------------------------------
15 14
16 15 # Stdlib
17 16 import os
18 17
19 18 # Our own packages
20 19 from IPython.core.error import UsageError
21 20 from IPython.core.magic import Magics, magics_class, line_magic
22 21 from warnings import warn
23 22
24 23 #-----------------------------------------------------------------------------
25 24 # Magic implementation classes
26 25 #-----------------------------------------------------------------------------
27 26
28 27 @magics_class
29 28 class ExtensionMagics(Magics):
30 29 """Magics to manage the IPython extensions system."""
31 30
32 31 @line_magic
33 32 def load_ext(self, module_str):
34 33 """Load an IPython extension by its module name."""
35 34 if not module_str:
36 35 raise UsageError('Missing module name.')
37 36 res = self.shell.extension_manager.load_extension(module_str)
38 37
39 38 if res == 'already loaded':
40 39 print("The %s extension is already loaded. To reload it, use:" % module_str)
41 40 print(" %reload_ext", module_str)
42 41 elif res == 'no load function':
43 42 print("The %s module is not an IPython extension." % module_str)
44 43
45 44 @line_magic
46 45 def unload_ext(self, module_str):
47 46 """Unload an IPython extension by its module name.
48 47
49 48 Not all extensions can be unloaded, only those which define an
50 49 ``unload_ipython_extension`` function.
51 50 """
52 51 if not module_str:
53 52 raise UsageError('Missing module name.')
54 53
55 54 res = self.shell.extension_manager.unload_extension(module_str)
56 55
57 56 if res == 'no unload function':
58 57 print("The %s extension doesn't define how to unload it." % module_str)
59 58 elif res == "not loaded":
60 59 print("The %s extension is not loaded." % module_str)
61 60
62 61 @line_magic
63 62 def reload_ext(self, module_str):
64 63 """Reload an IPython extension by its module name."""
65 64 if not module_str:
66 65 raise UsageError('Missing module name.')
67 66 self.shell.extension_manager.reload_extension(module_str)
@@ -1,320 +1,319 b''
1 1 """Implementation of magic functions related to History.
2 2 """
3 3 #-----------------------------------------------------------------------------
4 4 # Copyright (c) 2012, IPython Development Team.
5 5 #
6 6 # Distributed under the terms of the Modified BSD License.
7 7 #
8 8 # The full license is in the file COPYING.txt, distributed with this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 from __future__ import print_function
15 14
16 15 # Stdlib
17 16 import os
18 17 import sys
19 18 from io import open as io_open
20 19
21 20 # Our own packages
22 21 from IPython.core.error import StdinNotImplementedError
23 22 from IPython.core.magic import Magics, magics_class, line_magic
24 23 from IPython.core.magic_arguments import (argument, magic_arguments,
25 24 parse_argstring)
26 25 from IPython.testing.skipdoctest import skip_doctest
27 26 from IPython.utils import io
28 27 from IPython.utils.py3compat import cast_unicode_py2
29 28
30 29 #-----------------------------------------------------------------------------
31 30 # Magics class implementation
32 31 #-----------------------------------------------------------------------------
33 32
34 33
35 34 _unspecified = object()
36 35
37 36
38 37 @magics_class
39 38 class HistoryMagics(Magics):
40 39
41 40 @magic_arguments()
42 41 @argument(
43 42 '-n', dest='print_nums', action='store_true', default=False,
44 43 help="""
45 44 print line numbers for each input.
46 45 This feature is only available if numbered prompts are in use.
47 46 """)
48 47 @argument(
49 48 '-o', dest='get_output', action='store_true', default=False,
50 49 help="also print outputs for each input.")
51 50 @argument(
52 51 '-p', dest='pyprompts', action='store_true', default=False,
53 52 help="""
54 53 print classic '>>>' python prompts before each input.
55 54 This is useful for making documentation, and in conjunction
56 55 with -o, for producing doctest-ready output.
57 56 """)
58 57 @argument(
59 58 '-t', dest='raw', action='store_false', default=True,
60 59 help="""
61 60 print the 'translated' history, as IPython understands it.
62 61 IPython filters your input and converts it all into valid Python
63 62 source before executing it (things like magics or aliases are turned
64 63 into function calls, for example). With this option, you'll see the
65 64 native history instead of the user-entered version: '%%cd /' will be
66 65 seen as 'get_ipython().magic("%%cd /")' instead of '%%cd /'.
67 66 """)
68 67 @argument(
69 68 '-f', dest='filename',
70 69 help="""
71 70 FILENAME: instead of printing the output to the screen, redirect
72 71 it to the given file. The file is always overwritten, though *when
73 72 it can*, IPython asks for confirmation first. In particular, running
74 73 the command 'history -f FILENAME' from the IPython Notebook
75 74 interface will replace FILENAME even if it already exists *without*
76 75 confirmation.
77 76 """)
78 77 @argument(
79 78 '-g', dest='pattern', nargs='*', default=None,
80 79 help="""
81 80 treat the arg as a glob pattern to search for in (full) history.
82 81 This includes the saved history (almost all commands ever written).
83 82 The pattern may contain '?' to match one unknown character and '*'
84 83 to match any number of unknown characters. Use '%%hist -g' to show
85 84 full saved history (may be very long).
86 85 """)
87 86 @argument(
88 87 '-l', dest='limit', type=int, nargs='?', default=_unspecified,
89 88 help="""
90 89 get the last n lines from all sessions. Specify n as a single
91 90 arg, or the default is the last 10 lines.
92 91 """)
93 92 @argument(
94 93 '-u', dest='unique', action='store_true',
95 94 help="""
96 95 when searching history using `-g`, show only unique history.
97 96 """)
98 97 @argument('range', nargs='*')
99 98 @skip_doctest
100 99 @line_magic
101 100 def history(self, parameter_s = ''):
102 101 """Print input history (_i<n> variables), with most recent last.
103 102
104 103 By default, input history is printed without line numbers so it can be
105 104 directly pasted into an editor. Use -n to show them.
106 105
107 106 By default, all input history from the current session is displayed.
108 107 Ranges of history can be indicated using the syntax:
109 108
110 109 ``4``
111 110 Line 4, current session
112 111 ``4-6``
113 112 Lines 4-6, current session
114 113 ``243/1-5``
115 114 Lines 1-5, session 243
116 115 ``~2/7``
117 116 Line 7, session 2 before current
118 117 ``~8/1-~6/5``
119 118 From the first line of 8 sessions ago, to the fifth line of 6
120 119 sessions ago.
121 120
122 121 Multiple ranges can be entered, separated by spaces
123 122
124 123 The same syntax is used by %macro, %save, %edit, %rerun
125 124
126 125 Examples
127 126 --------
128 127 ::
129 128
130 129 In [6]: %history -n 4-6
131 130 4:a = 12
132 131 5:print a**2
133 132 6:%history -n 4-6
134 133
135 134 """
136 135
137 136 args = parse_argstring(self.history, parameter_s)
138 137
139 138 # For brevity
140 139 history_manager = self.shell.history_manager
141 140
142 141 def _format_lineno(session, line):
143 142 """Helper function to format line numbers properly."""
144 143 if session in (0, history_manager.session_number):
145 144 return str(line)
146 145 return "%s/%s" % (session, line)
147 146
148 147 # Check if output to specific file was requested.
149 148 outfname = args.filename
150 149 if not outfname:
151 150 outfile = sys.stdout # default
152 151 # We don't want to close stdout at the end!
153 152 close_at_end = False
154 153 else:
155 154 if os.path.exists(outfname):
156 155 try:
157 156 ans = io.ask_yes_no("File %r exists. Overwrite?" % outfname)
158 157 except StdinNotImplementedError:
159 158 ans = True
160 159 if not ans:
161 160 print('Aborting.')
162 161 return
163 162 print("Overwriting file.")
164 163 outfile = io_open(outfname, 'w', encoding='utf-8')
165 164 close_at_end = True
166 165
167 166 print_nums = args.print_nums
168 167 get_output = args.get_output
169 168 pyprompts = args.pyprompts
170 169 raw = args.raw
171 170
172 171 pattern = None
173 172 limit = None if args.limit is _unspecified else args.limit
174 173
175 174 if args.pattern is not None:
176 175 if args.pattern:
177 176 pattern = "*" + " ".join(args.pattern) + "*"
178 177 else:
179 178 pattern = "*"
180 179 hist = history_manager.search(pattern, raw=raw, output=get_output,
181 180 n=limit, unique=args.unique)
182 181 print_nums = True
183 182 elif args.limit is not _unspecified:
184 183 n = 10 if limit is None else limit
185 184 hist = history_manager.get_tail(n, raw=raw, output=get_output)
186 185 else:
187 186 if args.range: # Get history by ranges
188 187 hist = history_manager.get_range_by_str(" ".join(args.range),
189 188 raw, get_output)
190 189 else: # Just get history for the current session
191 190 hist = history_manager.get_range(raw=raw, output=get_output)
192 191
193 192 # We could be displaying the entire history, so let's not try to pull
194 193 # it into a list in memory. Anything that needs more space will just
195 194 # misalign.
196 195 width = 4
197 196
198 197 for session, lineno, inline in hist:
199 198 # Print user history with tabs expanded to 4 spaces. The GUI
200 199 # clients use hard tabs for easier usability in auto-indented code,
201 200 # but we want to produce PEP-8 compliant history for safe pasting
202 201 # into an editor.
203 202 if get_output:
204 203 inline, output = inline
205 204 inline = inline.expandtabs(4).rstrip()
206 205
207 206 multiline = "\n" in inline
208 207 line_sep = '\n' if multiline else ' '
209 208 if print_nums:
210 209 print(u'%s:%s' % (_format_lineno(session, lineno).rjust(width),
211 210 line_sep), file=outfile, end=u'')
212 211 if pyprompts:
213 212 print(u">>> ", end=u"", file=outfile)
214 213 if multiline:
215 214 inline = "\n... ".join(inline.splitlines()) + "\n..."
216 215 print(inline, file=outfile)
217 216 if get_output and output:
218 217 print(cast_unicode_py2(output), file=outfile)
219 218
220 219 if close_at_end:
221 220 outfile.close()
222 221
223 222 @line_magic
224 223 def recall(self, arg):
225 224 r"""Repeat a command, or get command to input line for editing.
226 225
227 226 %recall and %rep are equivalent.
228 227
229 228 - %recall (no arguments):
230 229
231 230 Place a string version of last computation result (stored in the
232 231 special '_' variable) to the next input prompt. Allows you to create
233 232 elaborate command lines without using copy-paste::
234 233
235 234 In[1]: l = ["hei", "vaan"]
236 235 In[2]: "".join(l)
237 236 Out[2]: heivaan
238 237 In[3]: %recall
239 238 In[4]: heivaan_ <== cursor blinking
240 239
241 240 %recall 45
242 241
243 242 Place history line 45 on the next input prompt. Use %hist to find
244 243 out the number.
245 244
246 245 %recall 1-4
247 246
248 247 Combine the specified lines into one cell, and place it on the next
249 248 input prompt. See %history for the slice syntax.
250 249
251 250 %recall foo+bar
252 251
253 252 If foo+bar can be evaluated in the user namespace, the result is
254 253 placed at the next input prompt. Otherwise, the history is searched
255 254 for lines which contain that substring, and the most recent one is
256 255 placed at the next input prompt.
257 256 """
258 257 if not arg: # Last output
259 258 self.shell.set_next_input(str(self.shell.user_ns["_"]))
260 259 return
261 260 # Get history range
262 261 histlines = self.shell.history_manager.get_range_by_str(arg)
263 262 cmd = "\n".join(x[2] for x in histlines)
264 263 if cmd:
265 264 self.shell.set_next_input(cmd.rstrip())
266 265 return
267 266
268 267 try: # Variable in user namespace
269 268 cmd = str(eval(arg, self.shell.user_ns))
270 269 except Exception: # Search for term in history
271 270 histlines = self.shell.history_manager.search("*"+arg+"*")
272 271 for h in reversed([x[2] for x in histlines]):
273 272 if 'recall' in h or 'rep' in h:
274 273 continue
275 274 self.shell.set_next_input(h.rstrip())
276 275 return
277 276 else:
278 277 self.shell.set_next_input(cmd.rstrip())
279 278 print("Couldn't evaluate or find in history:", arg)
280 279
281 280 @line_magic
282 281 def rerun(self, parameter_s=''):
283 282 """Re-run previous input
284 283
285 284 By default, you can specify ranges of input history to be repeated
286 285 (as with %history). With no arguments, it will repeat the last line.
287 286
288 287 Options:
289 288
290 289 -l <n> : Repeat the last n lines of input, not including the
291 290 current command.
292 291
293 292 -g foo : Repeat the most recent line which contains foo
294 293 """
295 294 opts, args = self.parse_options(parameter_s, 'l:g:', mode='string')
296 295 if "l" in opts: # Last n lines
297 296 n = int(opts['l'])
298 297 hist = self.shell.history_manager.get_tail(n)
299 298 elif "g" in opts: # Search
300 299 p = "*"+opts['g']+"*"
301 300 hist = list(self.shell.history_manager.search(p))
302 301 for l in reversed(hist):
303 302 if "rerun" not in l[2]:
304 303 hist = [l] # The last match which isn't a %rerun
305 304 break
306 305 else:
307 306 hist = [] # No matches except %rerun
308 307 elif args: # Specify history ranges
309 308 hist = self.shell.history_manager.get_range_by_str(args)
310 309 else: # Last line
311 310 hist = self.shell.history_manager.get_tail(1)
312 311 hist = [x[2] for x in hist]
313 312 if not hist:
314 313 print("No lines in history match specification")
315 314 return
316 315 histlines = "\n".join(hist)
317 316 print("=== Executing: ===")
318 317 print(histlines)
319 318 print("=== Output: ===")
320 319 self.shell.run_cell("\n".join(hist), store_history=False)
@@ -1,704 +1,703 b''
1 1 """Implementation of namespace-related magic functions.
2 2 """
3 from __future__ import print_function
4 3 #-----------------------------------------------------------------------------
5 4 # Copyright (c) 2012 The IPython Development Team.
6 5 #
7 6 # Distributed under the terms of the Modified BSD License.
8 7 #
9 8 # The full license is in the file COPYING.txt, distributed with this software.
10 9 #-----------------------------------------------------------------------------
11 10
12 11 #-----------------------------------------------------------------------------
13 12 # Imports
14 13 #-----------------------------------------------------------------------------
15 14
16 15 # Stdlib
17 16 import gc
18 17 import re
19 18 import sys
20 19
21 20 # Our own packages
22 21 from IPython.core import page
23 22 from IPython.core.error import StdinNotImplementedError, UsageError
24 23 from IPython.core.magic import Magics, magics_class, line_magic
25 24 from IPython.testing.skipdoctest import skip_doctest
26 25 from IPython.utils.encoding import DEFAULT_ENCODING
27 26 from IPython.utils.openpy import read_py_file
28 27 from IPython.utils.path import get_py_filename
29 28 from IPython.utils.py3compat import unicode_type
30 29
31 30 #-----------------------------------------------------------------------------
32 31 # Magic implementation classes
33 32 #-----------------------------------------------------------------------------
34 33
35 34 @magics_class
36 35 class NamespaceMagics(Magics):
37 36 """Magics to manage various aspects of the user's namespace.
38 37
39 38 These include listing variables, introspecting into them, etc.
40 39 """
41 40
42 41 @line_magic
43 42 def pinfo(self, parameter_s='', namespaces=None):
44 43 """Provide detailed information about an object.
45 44
46 45 '%pinfo object' is just a synonym for object? or ?object."""
47 46
48 47 #print 'pinfo par: <%s>' % parameter_s # dbg
49 48 # detail_level: 0 -> obj? , 1 -> obj??
50 49 detail_level = 0
51 50 # We need to detect if we got called as 'pinfo pinfo foo', which can
52 51 # happen if the user types 'pinfo foo?' at the cmd line.
53 52 pinfo,qmark1,oname,qmark2 = \
54 53 re.match('(pinfo )?(\?*)(.*?)(\??$)',parameter_s).groups()
55 54 if pinfo or qmark1 or qmark2:
56 55 detail_level = 1
57 56 if "*" in oname:
58 57 self.psearch(oname)
59 58 else:
60 59 self.shell._inspect('pinfo', oname, detail_level=detail_level,
61 60 namespaces=namespaces)
62 61
63 62 @line_magic
64 63 def pinfo2(self, parameter_s='', namespaces=None):
65 64 """Provide extra detailed information about an object.
66 65
67 66 '%pinfo2 object' is just a synonym for object?? or ??object."""
68 67 self.shell._inspect('pinfo', parameter_s, detail_level=1,
69 68 namespaces=namespaces)
70 69
71 70 @skip_doctest
72 71 @line_magic
73 72 def pdef(self, parameter_s='', namespaces=None):
74 73 """Print the call signature for any callable object.
75 74
76 75 If the object is a class, print the constructor information.
77 76
78 77 Examples
79 78 --------
80 79 ::
81 80
82 81 In [3]: %pdef urllib.urlopen
83 82 urllib.urlopen(url, data=None, proxies=None)
84 83 """
85 84 self.shell._inspect('pdef',parameter_s, namespaces)
86 85
87 86 @line_magic
88 87 def pdoc(self, parameter_s='', namespaces=None):
89 88 """Print the docstring for an object.
90 89
91 90 If the given object is a class, it will print both the class and the
92 91 constructor docstrings."""
93 92 self.shell._inspect('pdoc',parameter_s, namespaces)
94 93
95 94 @line_magic
96 95 def psource(self, parameter_s='', namespaces=None):
97 96 """Print (or run through pager) the source code for an object."""
98 97 if not parameter_s:
99 98 raise UsageError('Missing object name.')
100 99 self.shell._inspect('psource',parameter_s, namespaces)
101 100
102 101 @line_magic
103 102 def pfile(self, parameter_s='', namespaces=None):
104 103 """Print (or run through pager) the file where an object is defined.
105 104
106 105 The file opens at the line where the object definition begins. IPython
107 106 will honor the environment variable PAGER if set, and otherwise will
108 107 do its best to print the file in a convenient form.
109 108
110 109 If the given argument is not an object currently defined, IPython will
111 110 try to interpret it as a filename (automatically adding a .py extension
112 111 if needed). You can thus use %pfile as a syntax highlighting code
113 112 viewer."""
114 113
115 114 # first interpret argument as an object name
116 115 out = self.shell._inspect('pfile',parameter_s, namespaces)
117 116 # if not, try the input as a filename
118 117 if out == 'not found':
119 118 try:
120 119 filename = get_py_filename(parameter_s)
121 120 except IOError as msg:
122 121 print(msg)
123 122 return
124 123 page.page(self.shell.pycolorize(read_py_file(filename, skip_encoding_cookie=False)))
125 124
126 125 @line_magic
127 126 def psearch(self, parameter_s=''):
128 127 """Search for object in namespaces by wildcard.
129 128
130 129 %psearch [options] PATTERN [OBJECT TYPE]
131 130
132 131 Note: ? can be used as a synonym for %psearch, at the beginning or at
133 132 the end: both a*? and ?a* are equivalent to '%psearch a*'. Still, the
134 133 rest of the command line must be unchanged (options come first), so
135 134 for example the following forms are equivalent
136 135
137 136 %psearch -i a* function
138 137 -i a* function?
139 138 ?-i a* function
140 139
141 140 Arguments:
142 141
143 142 PATTERN
144 143
145 144 where PATTERN is a string containing * as a wildcard similar to its
146 145 use in a shell. The pattern is matched in all namespaces on the
147 146 search path. By default objects starting with a single _ are not
148 147 matched, many IPython generated objects have a single
149 148 underscore. The default is case insensitive matching. Matching is
150 149 also done on the attributes of objects and not only on the objects
151 150 in a module.
152 151
153 152 [OBJECT TYPE]
154 153
155 154 Is the name of a python type from the types module. The name is
156 155 given in lowercase without the ending type, ex. StringType is
157 156 written string. By adding a type here only objects matching the
158 157 given type are matched. Using all here makes the pattern match all
159 158 types (this is the default).
160 159
161 160 Options:
162 161
163 162 -a: makes the pattern match even objects whose names start with a
164 163 single underscore. These names are normally omitted from the
165 164 search.
166 165
167 166 -i/-c: make the pattern case insensitive/sensitive. If neither of
168 167 these options are given, the default is read from your configuration
169 168 file, with the option ``InteractiveShell.wildcards_case_sensitive``.
170 169 If this option is not specified in your configuration file, IPython's
171 170 internal default is to do a case sensitive search.
172 171
173 172 -e/-s NAMESPACE: exclude/search a given namespace. The pattern you
174 173 specify can be searched in any of the following namespaces:
175 174 'builtin', 'user', 'user_global','internal', 'alias', where
176 175 'builtin' and 'user' are the search defaults. Note that you should
177 176 not use quotes when specifying namespaces.
178 177
179 178 'Builtin' contains the python module builtin, 'user' contains all
180 179 user data, 'alias' only contain the shell aliases and no python
181 180 objects, 'internal' contains objects used by IPython. The
182 181 'user_global' namespace is only used by embedded IPython instances,
183 182 and it contains module-level globals. You can add namespaces to the
184 183 search with -s or exclude them with -e (these options can be given
185 184 more than once).
186 185
187 186 Examples
188 187 --------
189 188 ::
190 189
191 190 %psearch a* -> objects beginning with an a
192 191 %psearch -e builtin a* -> objects NOT in the builtin space starting in a
193 192 %psearch a* function -> all functions beginning with an a
194 193 %psearch re.e* -> objects beginning with an e in module re
195 194 %psearch r*.e* -> objects that start with e in modules starting in r
196 195 %psearch r*.* string -> all strings in modules beginning with r
197 196
198 197 Case sensitive search::
199 198
200 199 %psearch -c a* list all object beginning with lower case a
201 200
202 201 Show objects beginning with a single _::
203 202
204 203 %psearch -a _* list objects beginning with a single underscore
205 204 """
206 205 try:
207 206 parameter_s.encode('ascii')
208 207 except UnicodeEncodeError:
209 208 print('Python identifiers can only contain ascii characters.')
210 209 return
211 210
212 211 # default namespaces to be searched
213 212 def_search = ['user_local', 'user_global', 'builtin']
214 213
215 214 # Process options/args
216 215 opts,args = self.parse_options(parameter_s,'cias:e:',list_all=True)
217 216 opt = opts.get
218 217 shell = self.shell
219 218 psearch = shell.inspector.psearch
220 219
221 220 # select case options
222 221 if 'i' in opts:
223 222 ignore_case = True
224 223 elif 'c' in opts:
225 224 ignore_case = False
226 225 else:
227 226 ignore_case = not shell.wildcards_case_sensitive
228 227
229 228 # Build list of namespaces to search from user options
230 229 def_search.extend(opt('s',[]))
231 230 ns_exclude = ns_exclude=opt('e',[])
232 231 ns_search = [nm for nm in def_search if nm not in ns_exclude]
233 232
234 233 # Call the actual search
235 234 try:
236 235 psearch(args,shell.ns_table,ns_search,
237 236 show_all=opt('a'),ignore_case=ignore_case)
238 237 except:
239 238 shell.showtraceback()
240 239
241 240 @skip_doctest
242 241 @line_magic
243 242 def who_ls(self, parameter_s=''):
244 243 """Return a sorted list of all interactive variables.
245 244
246 245 If arguments are given, only variables of types matching these
247 246 arguments are returned.
248 247
249 248 Examples
250 249 --------
251 250
252 251 Define two variables and list them with who_ls::
253 252
254 253 In [1]: alpha = 123
255 254
256 255 In [2]: beta = 'test'
257 256
258 257 In [3]: %who_ls
259 258 Out[3]: ['alpha', 'beta']
260 259
261 260 In [4]: %who_ls int
262 261 Out[4]: ['alpha']
263 262
264 263 In [5]: %who_ls str
265 264 Out[5]: ['beta']
266 265 """
267 266
268 267 user_ns = self.shell.user_ns
269 268 user_ns_hidden = self.shell.user_ns_hidden
270 269 nonmatching = object() # This can never be in user_ns
271 270 out = [ i for i in user_ns
272 271 if not i.startswith('_') \
273 272 and (user_ns[i] is not user_ns_hidden.get(i, nonmatching)) ]
274 273
275 274 typelist = parameter_s.split()
276 275 if typelist:
277 276 typeset = set(typelist)
278 277 out = [i for i in out if type(user_ns[i]).__name__ in typeset]
279 278
280 279 out.sort()
281 280 return out
282 281
283 282 @skip_doctest
284 283 @line_magic
285 284 def who(self, parameter_s=''):
286 285 """Print all interactive variables, with some minimal formatting.
287 286
288 287 If any arguments are given, only variables whose type matches one of
289 288 these are printed. For example::
290 289
291 290 %who function str
292 291
293 292 will only list functions and strings, excluding all other types of
294 293 variables. To find the proper type names, simply use type(var) at a
295 294 command line to see how python prints type names. For example:
296 295
297 296 ::
298 297
299 298 In [1]: type('hello')\\
300 299 Out[1]: <type 'str'>
301 300
302 301 indicates that the type name for strings is 'str'.
303 302
304 303 ``%who`` always excludes executed names loaded through your configuration
305 304 file and things which are internal to IPython.
306 305
307 306 This is deliberate, as typically you may load many modules and the
308 307 purpose of %who is to show you only what you've manually defined.
309 308
310 309 Examples
311 310 --------
312 311
313 312 Define two variables and list them with who::
314 313
315 314 In [1]: alpha = 123
316 315
317 316 In [2]: beta = 'test'
318 317
319 318 In [3]: %who
320 319 alpha beta
321 320
322 321 In [4]: %who int
323 322 alpha
324 323
325 324 In [5]: %who str
326 325 beta
327 326 """
328 327
329 328 varlist = self.who_ls(parameter_s)
330 329 if not varlist:
331 330 if parameter_s:
332 331 print('No variables match your requested type.')
333 332 else:
334 333 print('Interactive namespace is empty.')
335 334 return
336 335
337 336 # if we have variables, move on...
338 337 count = 0
339 338 for i in varlist:
340 339 print(i+'\t', end=' ')
341 340 count += 1
342 341 if count > 8:
343 342 count = 0
344 343 print()
345 344 print()
346 345
347 346 @skip_doctest
348 347 @line_magic
349 348 def whos(self, parameter_s=''):
350 349 """Like %who, but gives some extra information about each variable.
351 350
352 351 The same type filtering of %who can be applied here.
353 352
354 353 For all variables, the type is printed. Additionally it prints:
355 354
356 355 - For {},[],(): their length.
357 356
358 357 - For numpy arrays, a summary with shape, number of
359 358 elements, typecode and size in memory.
360 359
361 360 - Everything else: a string representation, snipping their middle if
362 361 too long.
363 362
364 363 Examples
365 364 --------
366 365
367 366 Define two variables and list them with whos::
368 367
369 368 In [1]: alpha = 123
370 369
371 370 In [2]: beta = 'test'
372 371
373 372 In [3]: %whos
374 373 Variable Type Data/Info
375 374 --------------------------------
376 375 alpha int 123
377 376 beta str test
378 377 """
379 378
380 379 varnames = self.who_ls(parameter_s)
381 380 if not varnames:
382 381 if parameter_s:
383 382 print('No variables match your requested type.')
384 383 else:
385 384 print('Interactive namespace is empty.')
386 385 return
387 386
388 387 # if we have variables, move on...
389 388
390 389 # for these types, show len() instead of data:
391 390 seq_types = ['dict', 'list', 'tuple']
392 391
393 392 # for numpy arrays, display summary info
394 393 ndarray_type = None
395 394 if 'numpy' in sys.modules:
396 395 try:
397 396 from numpy import ndarray
398 397 except ImportError:
399 398 pass
400 399 else:
401 400 ndarray_type = ndarray.__name__
402 401
403 402 # Find all variable names and types so we can figure out column sizes
404 403
405 404 # some types are well known and can be shorter
406 405 abbrevs = {'IPython.core.macro.Macro' : 'Macro'}
407 406 def type_name(v):
408 407 tn = type(v).__name__
409 408 return abbrevs.get(tn,tn)
410 409
411 410 varlist = [self.shell.user_ns[n] for n in varnames]
412 411
413 412 typelist = []
414 413 for vv in varlist:
415 414 tt = type_name(vv)
416 415
417 416 if tt=='instance':
418 417 typelist.append( abbrevs.get(str(vv.__class__),
419 418 str(vv.__class__)))
420 419 else:
421 420 typelist.append(tt)
422 421
423 422 # column labels and # of spaces as separator
424 423 varlabel = 'Variable'
425 424 typelabel = 'Type'
426 425 datalabel = 'Data/Info'
427 426 colsep = 3
428 427 # variable format strings
429 428 vformat = "{0:<{varwidth}}{1:<{typewidth}}"
430 429 aformat = "%s: %s elems, type `%s`, %s bytes"
431 430 # find the size of the columns to format the output nicely
432 431 varwidth = max(max(map(len,varnames)), len(varlabel)) + colsep
433 432 typewidth = max(max(map(len,typelist)), len(typelabel)) + colsep
434 433 # table header
435 434 print(varlabel.ljust(varwidth) + typelabel.ljust(typewidth) + \
436 435 ' '+datalabel+'\n' + '-'*(varwidth+typewidth+len(datalabel)+1))
437 436 # and the table itself
438 437 kb = 1024
439 438 Mb = 1048576 # kb**2
440 439 for vname,var,vtype in zip(varnames,varlist,typelist):
441 440 print(vformat.format(vname, vtype, varwidth=varwidth, typewidth=typewidth), end=' ')
442 441 if vtype in seq_types:
443 442 print("n="+str(len(var)))
444 443 elif vtype == ndarray_type:
445 444 vshape = str(var.shape).replace(',','').replace(' ','x')[1:-1]
446 445 if vtype==ndarray_type:
447 446 # numpy
448 447 vsize = var.size
449 448 vbytes = vsize*var.itemsize
450 449 vdtype = var.dtype
451 450
452 451 if vbytes < 100000:
453 452 print(aformat % (vshape, vsize, vdtype, vbytes))
454 453 else:
455 454 print(aformat % (vshape, vsize, vdtype, vbytes), end=' ')
456 455 if vbytes < Mb:
457 456 print('(%s kb)' % (vbytes/kb,))
458 457 else:
459 458 print('(%s Mb)' % (vbytes/Mb,))
460 459 else:
461 460 try:
462 461 vstr = str(var)
463 462 except UnicodeEncodeError:
464 463 vstr = unicode_type(var).encode(DEFAULT_ENCODING,
465 464 'backslashreplace')
466 465 except:
467 466 vstr = "<object with id %d (str() failed)>" % id(var)
468 467 vstr = vstr.replace('\n', '\\n')
469 468 if len(vstr) < 50:
470 469 print(vstr)
471 470 else:
472 471 print(vstr[:25] + "<...>" + vstr[-25:])
473 472
474 473 @line_magic
475 474 def reset(self, parameter_s=''):
476 475 """Resets the namespace by removing all names defined by the user, if
477 476 called without arguments, or by removing some types of objects, such
478 477 as everything currently in IPython's In[] and Out[] containers (see
479 478 the parameters for details).
480 479
481 480 Parameters
482 481 ----------
483 482 -f : force reset without asking for confirmation.
484 483
485 484 -s : 'Soft' reset: Only clears your namespace, leaving history intact.
486 485 References to objects may be kept. By default (without this option),
487 486 we do a 'hard' reset, giving you a new session and removing all
488 487 references to objects from the current session.
489 488
490 489 in : reset input history
491 490
492 491 out : reset output history
493 492
494 493 dhist : reset directory history
495 494
496 495 array : reset only variables that are NumPy arrays
497 496
498 497 See Also
499 498 --------
500 499 reset_selective : invoked as ``%reset_selective``
501 500
502 501 Examples
503 502 --------
504 503 ::
505 504
506 505 In [6]: a = 1
507 506
508 507 In [7]: a
509 508 Out[7]: 1
510 509
511 510 In [8]: 'a' in _ip.user_ns
512 511 Out[8]: True
513 512
514 513 In [9]: %reset -f
515 514
516 515 In [1]: 'a' in _ip.user_ns
517 516 Out[1]: False
518 517
519 518 In [2]: %reset -f in
520 519 Flushing input history
521 520
522 521 In [3]: %reset -f dhist in
523 522 Flushing directory history
524 523 Flushing input history
525 524
526 525 Notes
527 526 -----
528 527 Calling this magic from clients that do not implement standard input,
529 528 such as the ipython notebook interface, will reset the namespace
530 529 without confirmation.
531 530 """
532 531 opts, args = self.parse_options(parameter_s,'sf', mode='list')
533 532 if 'f' in opts:
534 533 ans = True
535 534 else:
536 535 try:
537 536 ans = self.shell.ask_yes_no(
538 537 "Once deleted, variables cannot be recovered. Proceed (y/[n])?",
539 538 default='n')
540 539 except StdinNotImplementedError:
541 540 ans = True
542 541 if not ans:
543 542 print('Nothing done.')
544 543 return
545 544
546 545 if 's' in opts: # Soft reset
547 546 user_ns = self.shell.user_ns
548 547 for i in self.who_ls():
549 548 del(user_ns[i])
550 549 elif len(args) == 0: # Hard reset
551 550 self.shell.reset(new_session = False)
552 551
553 552 # reset in/out/dhist/array: previously extensinions/clearcmd.py
554 553 ip = self.shell
555 554 user_ns = self.shell.user_ns # local lookup, heavily used
556 555
557 556 for target in args:
558 557 target = target.lower() # make matches case insensitive
559 558 if target == 'out':
560 559 print("Flushing output cache (%d entries)" % len(user_ns['_oh']))
561 560 self.shell.displayhook.flush()
562 561
563 562 elif target == 'in':
564 563 print("Flushing input history")
565 564 pc = self.shell.displayhook.prompt_count + 1
566 565 for n in range(1, pc):
567 566 key = '_i'+repr(n)
568 567 user_ns.pop(key,None)
569 568 user_ns.update(dict(_i=u'',_ii=u'',_iii=u''))
570 569 hm = ip.history_manager
571 570 # don't delete these, as %save and %macro depending on the
572 571 # length of these lists to be preserved
573 572 hm.input_hist_parsed[:] = [''] * pc
574 573 hm.input_hist_raw[:] = [''] * pc
575 574 # hm has internal machinery for _i,_ii,_iii, clear it out
576 575 hm._i = hm._ii = hm._iii = hm._i00 = u''
577 576
578 577 elif target == 'array':
579 578 # Support cleaning up numpy arrays
580 579 try:
581 580 from numpy import ndarray
582 581 # This must be done with items and not iteritems because
583 582 # we're going to modify the dict in-place.
584 583 for x,val in list(user_ns.items()):
585 584 if isinstance(val,ndarray):
586 585 del user_ns[x]
587 586 except ImportError:
588 587 print("reset array only works if Numpy is available.")
589 588
590 589 elif target == 'dhist':
591 590 print("Flushing directory history")
592 591 del user_ns['_dh'][:]
593 592
594 593 else:
595 594 print("Don't know how to reset ", end=' ')
596 595 print(target + ", please run `%reset?` for details")
597 596
598 597 gc.collect()
599 598
600 599 @line_magic
601 600 def reset_selective(self, parameter_s=''):
602 601 """Resets the namespace by removing names defined by the user.
603 602
604 603 Input/Output history are left around in case you need them.
605 604
606 605 %reset_selective [-f] regex
607 606
608 607 No action is taken if regex is not included
609 608
610 609 Options
611 610 -f : force reset without asking for confirmation.
612 611
613 612 See Also
614 613 --------
615 614 reset : invoked as ``%reset``
616 615
617 616 Examples
618 617 --------
619 618
620 619 We first fully reset the namespace so your output looks identical to
621 620 this example for pedagogical reasons; in practice you do not need a
622 621 full reset::
623 622
624 623 In [1]: %reset -f
625 624
626 625 Now, with a clean namespace we can make a few variables and use
627 626 ``%reset_selective`` to only delete names that match our regexp::
628 627
629 628 In [2]: a=1; b=2; c=3; b1m=4; b2m=5; b3m=6; b4m=7; b2s=8
630 629
631 630 In [3]: who_ls
632 631 Out[3]: ['a', 'b', 'b1m', 'b2m', 'b2s', 'b3m', 'b4m', 'c']
633 632
634 633 In [4]: %reset_selective -f b[2-3]m
635 634
636 635 In [5]: who_ls
637 636 Out[5]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
638 637
639 638 In [6]: %reset_selective -f d
640 639
641 640 In [7]: who_ls
642 641 Out[7]: ['a', 'b', 'b1m', 'b2s', 'b4m', 'c']
643 642
644 643 In [8]: %reset_selective -f c
645 644
646 645 In [9]: who_ls
647 646 Out[9]: ['a', 'b', 'b1m', 'b2s', 'b4m']
648 647
649 648 In [10]: %reset_selective -f b
650 649
651 650 In [11]: who_ls
652 651 Out[11]: ['a']
653 652
654 653 Notes
655 654 -----
656 655 Calling this magic from clients that do not implement standard input,
657 656 such as the ipython notebook interface, will reset the namespace
658 657 without confirmation.
659 658 """
660 659
661 660 opts, regex = self.parse_options(parameter_s,'f')
662 661
663 662 if 'f' in opts:
664 663 ans = True
665 664 else:
666 665 try:
667 666 ans = self.shell.ask_yes_no(
668 667 "Once deleted, variables cannot be recovered. Proceed (y/[n])? ",
669 668 default='n')
670 669 except StdinNotImplementedError:
671 670 ans = True
672 671 if not ans:
673 672 print('Nothing done.')
674 673 return
675 674 user_ns = self.shell.user_ns
676 675 if not regex:
677 676 print('No regex pattern specified. Nothing done.')
678 677 return
679 678 else:
680 679 try:
681 680 m = re.compile(regex)
682 681 except TypeError:
683 682 raise TypeError('regex must be a string or compiled pattern')
684 683 for i in self.who_ls():
685 684 if m.search(i):
686 685 del(user_ns[i])
687 686
688 687 @line_magic
689 688 def xdel(self, parameter_s=''):
690 689 """Delete a variable, trying to clear it from anywhere that
691 690 IPython's machinery has references to it. By default, this uses
692 691 the identity of the named object in the user namespace to remove
693 692 references held under other names. The object is also removed
694 693 from the output history.
695 694
696 695 Options
697 696 -n : Delete the specified name from all namespaces, without
698 697 checking their identity.
699 698 """
700 699 opts, varname = self.parse_options(parameter_s,'n')
701 700 try:
702 701 self.shell.del_var(varname, ('n' in opts))
703 702 except (NameError, ValueError) as e:
704 703 print(type(e).__name__ +": "+ str(e))
@@ -1,790 +1,789 b''
1 1 """Implementation of magic functions for interaction with the OS.
2 2
3 3 Note: this module is named 'osm' instead of 'os' to avoid a collision with the
4 4 builtin.
5 5 """
6 from __future__ import print_function
7 6 #-----------------------------------------------------------------------------
8 7 # Copyright (c) 2012 The IPython Development Team.
9 8 #
10 9 # Distributed under the terms of the Modified BSD License.
11 10 #
12 11 # The full license is in the file COPYING.txt, distributed with this software.
13 12 #-----------------------------------------------------------------------------
14 13
15 14 #-----------------------------------------------------------------------------
16 15 # Imports
17 16 #-----------------------------------------------------------------------------
18 17
19 18 # Stdlib
20 19 import io
21 20 import os
22 21 import re
23 22 import sys
24 23 from pprint import pformat
25 24
26 25 # Our own packages
27 26 from IPython.core import magic_arguments
28 27 from IPython.core import oinspect
29 28 from IPython.core import page
30 29 from IPython.core.alias import AliasError, Alias
31 30 from IPython.core.error import UsageError
32 31 from IPython.core.magic import (
33 32 Magics, compress_dhist, magics_class, line_magic, cell_magic, line_cell_magic
34 33 )
35 34 from IPython.testing.skipdoctest import skip_doctest
36 35 from IPython.utils.openpy import source_to_unicode
37 36 from IPython.utils.process import abbrev_cwd
38 37 from IPython.utils import py3compat
39 38 from IPython.utils.py3compat import unicode_type
40 39 from IPython.utils.terminal import set_term_title
41 40
42 41 #-----------------------------------------------------------------------------
43 42 # Magic implementation classes
44 43 #-----------------------------------------------------------------------------
45 44 @magics_class
46 45 class OSMagics(Magics):
47 46 """Magics to interact with the underlying OS (shell-type functionality).
48 47 """
49 48
50 49 @skip_doctest
51 50 @line_magic
52 51 def alias(self, parameter_s=''):
53 52 """Define an alias for a system command.
54 53
55 54 '%alias alias_name cmd' defines 'alias_name' as an alias for 'cmd'
56 55
57 56 Then, typing 'alias_name params' will execute the system command 'cmd
58 57 params' (from your underlying operating system).
59 58
60 59 Aliases have lower precedence than magic functions and Python normal
61 60 variables, so if 'foo' is both a Python variable and an alias, the
62 61 alias can not be executed until 'del foo' removes the Python variable.
63 62
64 63 You can use the %l specifier in an alias definition to represent the
65 64 whole line when the alias is called. For example::
66 65
67 66 In [2]: alias bracket echo "Input in brackets: <%l>"
68 67 In [3]: bracket hello world
69 68 Input in brackets: <hello world>
70 69
71 70 You can also define aliases with parameters using %s specifiers (one
72 71 per parameter)::
73 72
74 73 In [1]: alias parts echo first %s second %s
75 74 In [2]: %parts A B
76 75 first A second B
77 76 In [3]: %parts A
78 77 Incorrect number of arguments: 2 expected.
79 78 parts is an alias to: 'echo first %s second %s'
80 79
81 80 Note that %l and %s are mutually exclusive. You can only use one or
82 81 the other in your aliases.
83 82
84 83 Aliases expand Python variables just like system calls using ! or !!
85 84 do: all expressions prefixed with '$' get expanded. For details of
86 85 the semantic rules, see PEP-215:
87 86 http://www.python.org/peps/pep-0215.html. This is the library used by
88 87 IPython for variable expansion. If you want to access a true shell
89 88 variable, an extra $ is necessary to prevent its expansion by
90 89 IPython::
91 90
92 91 In [6]: alias show echo
93 92 In [7]: PATH='A Python string'
94 93 In [8]: show $PATH
95 94 A Python string
96 95 In [9]: show $$PATH
97 96 /usr/local/lf9560/bin:/usr/local/intel/compiler70/ia32/bin:...
98 97
99 98 You can use the alias facility to acess all of $PATH. See the %rehashx
100 99 function, which automatically creates aliases for the contents of your
101 100 $PATH.
102 101
103 102 If called with no parameters, %alias prints the current alias table."""
104 103
105 104 par = parameter_s.strip()
106 105 if not par:
107 106 aliases = sorted(self.shell.alias_manager.aliases)
108 107 # stored = self.shell.db.get('stored_aliases', {} )
109 108 # for k, v in stored:
110 109 # atab.append(k, v[0])
111 110
112 111 print("Total number of aliases:", len(aliases))
113 112 sys.stdout.flush()
114 113 return aliases
115 114
116 115 # Now try to define a new one
117 116 try:
118 117 alias,cmd = par.split(None, 1)
119 118 except TypeError:
120 119 print(oinspect.getdoc(self.alias))
121 120 return
122 121
123 122 try:
124 123 self.shell.alias_manager.define_alias(alias, cmd)
125 124 except AliasError as e:
126 125 print(e)
127 126 # end magic_alias
128 127
129 128 @line_magic
130 129 def unalias(self, parameter_s=''):
131 130 """Remove an alias"""
132 131
133 132 aname = parameter_s.strip()
134 133 try:
135 134 self.shell.alias_manager.undefine_alias(aname)
136 135 except ValueError as e:
137 136 print(e)
138 137 return
139 138
140 139 stored = self.shell.db.get('stored_aliases', {} )
141 140 if aname in stored:
142 141 print("Removing %stored alias",aname)
143 142 del stored[aname]
144 143 self.shell.db['stored_aliases'] = stored
145 144
146 145 @line_magic
147 146 def rehashx(self, parameter_s=''):
148 147 """Update the alias table with all executable files in $PATH.
149 148
150 149 rehashx explicitly checks that every entry in $PATH is a file
151 150 with execute access (os.X_OK).
152 151
153 152 Under Windows, it checks executability as a match against a
154 153 '|'-separated string of extensions, stored in the IPython config
155 154 variable win_exec_ext. This defaults to 'exe|com|bat'.
156 155
157 156 This function also resets the root module cache of module completer,
158 157 used on slow filesystems.
159 158 """
160 159 from IPython.core.alias import InvalidAliasError
161 160
162 161 # for the benefit of module completer in ipy_completers.py
163 162 del self.shell.db['rootmodules_cache']
164 163
165 164 path = [os.path.abspath(os.path.expanduser(p)) for p in
166 165 os.environ.get('PATH','').split(os.pathsep)]
167 166
168 167 syscmdlist = []
169 168 # Now define isexec in a cross platform manner.
170 169 if os.name == 'posix':
171 170 isexec = lambda fname:os.path.isfile(fname) and \
172 171 os.access(fname,os.X_OK)
173 172 else:
174 173 try:
175 174 winext = os.environ['pathext'].replace(';','|').replace('.','')
176 175 except KeyError:
177 176 winext = 'exe|com|bat|py'
178 177 if 'py' not in winext:
179 178 winext += '|py'
180 179 execre = re.compile(r'(.*)\.(%s)$' % winext,re.IGNORECASE)
181 180 isexec = lambda fname:os.path.isfile(fname) and execre.match(fname)
182 181 savedir = py3compat.getcwd()
183 182
184 183 # Now walk the paths looking for executables to alias.
185 184 try:
186 185 # write the whole loop for posix/Windows so we don't have an if in
187 186 # the innermost part
188 187 if os.name == 'posix':
189 188 for pdir in path:
190 189 try:
191 190 os.chdir(pdir)
192 191 dirlist = os.listdir(pdir)
193 192 except OSError:
194 193 continue
195 194 for ff in dirlist:
196 195 if isexec(ff):
197 196 try:
198 197 # Removes dots from the name since ipython
199 198 # will assume names with dots to be python.
200 199 if not self.shell.alias_manager.is_alias(ff):
201 200 self.shell.alias_manager.define_alias(
202 201 ff.replace('.',''), ff)
203 202 except InvalidAliasError:
204 203 pass
205 204 else:
206 205 syscmdlist.append(ff)
207 206 else:
208 207 no_alias = Alias.blacklist
209 208 for pdir in path:
210 209 try:
211 210 os.chdir(pdir)
212 211 dirlist = os.listdir(pdir)
213 212 except OSError:
214 213 continue
215 214 for ff in dirlist:
216 215 base, ext = os.path.splitext(ff)
217 216 if isexec(ff) and base.lower() not in no_alias:
218 217 if ext.lower() == '.exe':
219 218 ff = base
220 219 try:
221 220 # Removes dots from the name since ipython
222 221 # will assume names with dots to be python.
223 222 self.shell.alias_manager.define_alias(
224 223 base.lower().replace('.',''), ff)
225 224 except InvalidAliasError:
226 225 pass
227 226 syscmdlist.append(ff)
228 227 self.shell.db['syscmdlist'] = syscmdlist
229 228 finally:
230 229 os.chdir(savedir)
231 230
232 231 @skip_doctest
233 232 @line_magic
234 233 def pwd(self, parameter_s=''):
235 234 """Return the current working directory path.
236 235
237 236 Examples
238 237 --------
239 238 ::
240 239
241 240 In [9]: pwd
242 241 Out[9]: '/home/tsuser/sprint/ipython'
243 242 """
244 243 return py3compat.getcwd()
245 244
246 245 @skip_doctest
247 246 @line_magic
248 247 def cd(self, parameter_s=''):
249 248 """Change the current working directory.
250 249
251 250 This command automatically maintains an internal list of directories
252 251 you visit during your IPython session, in the variable _dh. The
253 252 command %dhist shows this history nicely formatted. You can also
254 253 do 'cd -<tab>' to see directory history conveniently.
255 254
256 255 Usage:
257 256
258 257 cd 'dir': changes to directory 'dir'.
259 258
260 259 cd -: changes to the last visited directory.
261 260
262 261 cd -<n>: changes to the n-th directory in the directory history.
263 262
264 263 cd --foo: change to directory that matches 'foo' in history
265 264
266 265 cd -b <bookmark_name>: jump to a bookmark set by %bookmark
267 266 (note: cd <bookmark_name> is enough if there is no
268 267 directory <bookmark_name>, but a bookmark with the name exists.)
269 268 'cd -b <tab>' allows you to tab-complete bookmark names.
270 269
271 270 Options:
272 271
273 272 -q: quiet. Do not print the working directory after the cd command is
274 273 executed. By default IPython's cd command does print this directory,
275 274 since the default prompts do not display path information.
276 275
277 276 Note that !cd doesn't work for this purpose because the shell where
278 277 !command runs is immediately discarded after executing 'command'.
279 278
280 279 Examples
281 280 --------
282 281 ::
283 282
284 283 In [10]: cd parent/child
285 284 /home/tsuser/parent/child
286 285 """
287 286
288 287 oldcwd = py3compat.getcwd()
289 288 numcd = re.match(r'(-)(\d+)$',parameter_s)
290 289 # jump in directory history by number
291 290 if numcd:
292 291 nn = int(numcd.group(2))
293 292 try:
294 293 ps = self.shell.user_ns['_dh'][nn]
295 294 except IndexError:
296 295 print('The requested directory does not exist in history.')
297 296 return
298 297 else:
299 298 opts = {}
300 299 elif parameter_s.startswith('--'):
301 300 ps = None
302 301 fallback = None
303 302 pat = parameter_s[2:]
304 303 dh = self.shell.user_ns['_dh']
305 304 # first search only by basename (last component)
306 305 for ent in reversed(dh):
307 306 if pat in os.path.basename(ent) and os.path.isdir(ent):
308 307 ps = ent
309 308 break
310 309
311 310 if fallback is None and pat in ent and os.path.isdir(ent):
312 311 fallback = ent
313 312
314 313 # if we have no last part match, pick the first full path match
315 314 if ps is None:
316 315 ps = fallback
317 316
318 317 if ps is None:
319 318 print("No matching entry in directory history")
320 319 return
321 320 else:
322 321 opts = {}
323 322
324 323
325 324 else:
326 325 opts, ps = self.parse_options(parameter_s, 'qb', mode='string')
327 326 # jump to previous
328 327 if ps == '-':
329 328 try:
330 329 ps = self.shell.user_ns['_dh'][-2]
331 330 except IndexError:
332 331 raise UsageError('%cd -: No previous directory to change to.')
333 332 # jump to bookmark if needed
334 333 else:
335 334 if not os.path.isdir(ps) or 'b' in opts:
336 335 bkms = self.shell.db.get('bookmarks', {})
337 336
338 337 if ps in bkms:
339 338 target = bkms[ps]
340 339 print('(bookmark:%s) -> %s' % (ps, target))
341 340 ps = target
342 341 else:
343 342 if 'b' in opts:
344 343 raise UsageError("Bookmark '%s' not found. "
345 344 "Use '%%bookmark -l' to see your bookmarks." % ps)
346 345
347 346 # at this point ps should point to the target dir
348 347 if ps:
349 348 try:
350 349 os.chdir(os.path.expanduser(ps))
351 350 if hasattr(self.shell, 'term_title') and self.shell.term_title:
352 351 set_term_title('IPython: ' + abbrev_cwd())
353 352 except OSError:
354 353 print(sys.exc_info()[1])
355 354 else:
356 355 cwd = py3compat.getcwd()
357 356 dhist = self.shell.user_ns['_dh']
358 357 if oldcwd != cwd:
359 358 dhist.append(cwd)
360 359 self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
361 360
362 361 else:
363 362 os.chdir(self.shell.home_dir)
364 363 if hasattr(self.shell, 'term_title') and self.shell.term_title:
365 364 set_term_title('IPython: ' + '~')
366 365 cwd = py3compat.getcwd()
367 366 dhist = self.shell.user_ns['_dh']
368 367
369 368 if oldcwd != cwd:
370 369 dhist.append(cwd)
371 370 self.shell.db['dhist'] = compress_dhist(dhist)[-100:]
372 371 if not 'q' in opts and self.shell.user_ns['_dh']:
373 372 print(self.shell.user_ns['_dh'][-1])
374 373
375 374 @line_magic
376 375 def env(self, parameter_s=''):
377 376 """Get, set, or list environment variables.
378 377
379 378 Usage:\\
380 379
381 380 %env: lists all environment variables/values
382 381 %env var: get value for var
383 382 %env var val: set value for var
384 383 %env var=val: set value for var
385 384 %env var=$val: set value for var, using python expansion if possible
386 385 """
387 386 if parameter_s.strip():
388 387 split = '=' if '=' in parameter_s else ' '
389 388 bits = parameter_s.split(split)
390 389 if len(bits) == 1:
391 390 key = parameter_s.strip()
392 391 if key in os.environ:
393 392 return os.environ[key]
394 393 else:
395 394 err = "Environment does not have key: {0}".format(key)
396 395 raise UsageError(err)
397 396 if len(bits) > 1:
398 397 return self.set_env(parameter_s)
399 398 return dict(os.environ)
400 399
401 400 @line_magic
402 401 def set_env(self, parameter_s):
403 402 """Set environment variables. Assumptions are that either "val" is a
404 403 name in the user namespace, or val is something that evaluates to a
405 404 string.
406 405
407 406 Usage:\\
408 407 %set_env var val: set value for var
409 408 %set_env var=val: set value for var
410 409 %set_env var=$val: set value for var, using python expansion if possible
411 410 """
412 411 split = '=' if '=' in parameter_s else ' '
413 412 bits = parameter_s.split(split, 1)
414 413 if not parameter_s.strip() or len(bits)<2:
415 414 raise UsageError("usage is 'set_env var=val'")
416 415 var = bits[0].strip()
417 416 val = bits[1].strip()
418 417 if re.match(r'.*\s.*', var):
419 418 # an environment variable with whitespace is almost certainly
420 419 # not what the user intended. what's more likely is the wrong
421 420 # split was chosen, ie for "set_env cmd_args A=B", we chose
422 421 # '=' for the split and should have chosen ' '. to get around
423 422 # this, users should just assign directly to os.environ or use
424 423 # standard magic {var} expansion.
425 424 err = "refusing to set env var with whitespace: '{0}'"
426 425 err = err.format(val)
427 426 raise UsageError(err)
428 427 os.environ[py3compat.cast_bytes_py2(var)] = py3compat.cast_bytes_py2(val)
429 428 print('env: {0}={1}'.format(var,val))
430 429
431 430 @line_magic
432 431 def pushd(self, parameter_s=''):
433 432 """Place the current dir on stack and change directory.
434 433
435 434 Usage:\\
436 435 %pushd ['dirname']
437 436 """
438 437
439 438 dir_s = self.shell.dir_stack
440 439 tgt = os.path.expanduser(parameter_s)
441 440 cwd = py3compat.getcwd().replace(self.shell.home_dir,'~')
442 441 if tgt:
443 442 self.cd(parameter_s)
444 443 dir_s.insert(0,cwd)
445 444 return self.shell.magic('dirs')
446 445
447 446 @line_magic
448 447 def popd(self, parameter_s=''):
449 448 """Change to directory popped off the top of the stack.
450 449 """
451 450 if not self.shell.dir_stack:
452 451 raise UsageError("%popd on empty stack")
453 452 top = self.shell.dir_stack.pop(0)
454 453 self.cd(top)
455 454 print("popd ->",top)
456 455
457 456 @line_magic
458 457 def dirs(self, parameter_s=''):
459 458 """Return the current directory stack."""
460 459
461 460 return self.shell.dir_stack
462 461
463 462 @line_magic
464 463 def dhist(self, parameter_s=''):
465 464 """Print your history of visited directories.
466 465
467 466 %dhist -> print full history\\
468 467 %dhist n -> print last n entries only\\
469 468 %dhist n1 n2 -> print entries between n1 and n2 (n2 not included)\\
470 469
471 470 This history is automatically maintained by the %cd command, and
472 471 always available as the global list variable _dh. You can use %cd -<n>
473 472 to go to directory number <n>.
474 473
475 474 Note that most of time, you should view directory history by entering
476 475 cd -<TAB>.
477 476
478 477 """
479 478
480 479 dh = self.shell.user_ns['_dh']
481 480 if parameter_s:
482 481 try:
483 482 args = map(int,parameter_s.split())
484 483 except:
485 484 self.arg_err(self.dhist)
486 485 return
487 486 if len(args) == 1:
488 487 ini,fin = max(len(dh)-(args[0]),0),len(dh)
489 488 elif len(args) == 2:
490 489 ini,fin = args
491 490 fin = min(fin, len(dh))
492 491 else:
493 492 self.arg_err(self.dhist)
494 493 return
495 494 else:
496 495 ini,fin = 0,len(dh)
497 496 print('Directory history (kept in _dh)')
498 497 for i in range(ini, fin):
499 498 print("%d: %s" % (i, dh[i]))
500 499
501 500 @skip_doctest
502 501 @line_magic
503 502 def sc(self, parameter_s=''):
504 503 """Shell capture - run shell command and capture output (DEPRECATED use !).
505 504
506 505 DEPRECATED. Suboptimal, retained for backwards compatibility.
507 506
508 507 You should use the form 'var = !command' instead. Example:
509 508
510 509 "%sc -l myfiles = ls ~" should now be written as
511 510
512 511 "myfiles = !ls ~"
513 512
514 513 myfiles.s, myfiles.l and myfiles.n still apply as documented
515 514 below.
516 515
517 516 --
518 517 %sc [options] varname=command
519 518
520 519 IPython will run the given command using commands.getoutput(), and
521 520 will then update the user's interactive namespace with a variable
522 521 called varname, containing the value of the call. Your command can
523 522 contain shell wildcards, pipes, etc.
524 523
525 524 The '=' sign in the syntax is mandatory, and the variable name you
526 525 supply must follow Python's standard conventions for valid names.
527 526
528 527 (A special format without variable name exists for internal use)
529 528
530 529 Options:
531 530
532 531 -l: list output. Split the output on newlines into a list before
533 532 assigning it to the given variable. By default the output is stored
534 533 as a single string.
535 534
536 535 -v: verbose. Print the contents of the variable.
537 536
538 537 In most cases you should not need to split as a list, because the
539 538 returned value is a special type of string which can automatically
540 539 provide its contents either as a list (split on newlines) or as a
541 540 space-separated string. These are convenient, respectively, either
542 541 for sequential processing or to be passed to a shell command.
543 542
544 543 For example::
545 544
546 545 # Capture into variable a
547 546 In [1]: sc a=ls *py
548 547
549 548 # a is a string with embedded newlines
550 549 In [2]: a
551 550 Out[2]: 'setup.py\\nwin32_manual_post_install.py'
552 551
553 552 # which can be seen as a list:
554 553 In [3]: a.l
555 554 Out[3]: ['setup.py', 'win32_manual_post_install.py']
556 555
557 556 # or as a whitespace-separated string:
558 557 In [4]: a.s
559 558 Out[4]: 'setup.py win32_manual_post_install.py'
560 559
561 560 # a.s is useful to pass as a single command line:
562 561 In [5]: !wc -l $a.s
563 562 146 setup.py
564 563 130 win32_manual_post_install.py
565 564 276 total
566 565
567 566 # while the list form is useful to loop over:
568 567 In [6]: for f in a.l:
569 568 ...: !wc -l $f
570 569 ...:
571 570 146 setup.py
572 571 130 win32_manual_post_install.py
573 572
574 573 Similarly, the lists returned by the -l option are also special, in
575 574 the sense that you can equally invoke the .s attribute on them to
576 575 automatically get a whitespace-separated string from their contents::
577 576
578 577 In [7]: sc -l b=ls *py
579 578
580 579 In [8]: b
581 580 Out[8]: ['setup.py', 'win32_manual_post_install.py']
582 581
583 582 In [9]: b.s
584 583 Out[9]: 'setup.py win32_manual_post_install.py'
585 584
586 585 In summary, both the lists and strings used for output capture have
587 586 the following special attributes::
588 587
589 588 .l (or .list) : value as list.
590 589 .n (or .nlstr): value as newline-separated string.
591 590 .s (or .spstr): value as space-separated string.
592 591 """
593 592
594 593 opts,args = self.parse_options(parameter_s, 'lv')
595 594 # Try to get a variable name and command to run
596 595 try:
597 596 # the variable name must be obtained from the parse_options
598 597 # output, which uses shlex.split to strip options out.
599 598 var,_ = args.split('=', 1)
600 599 var = var.strip()
601 600 # But the command has to be extracted from the original input
602 601 # parameter_s, not on what parse_options returns, to avoid the
603 602 # quote stripping which shlex.split performs on it.
604 603 _,cmd = parameter_s.split('=', 1)
605 604 except ValueError:
606 605 var,cmd = '',''
607 606 # If all looks ok, proceed
608 607 split = 'l' in opts
609 608 out = self.shell.getoutput(cmd, split=split)
610 609 if 'v' in opts:
611 610 print('%s ==\n%s' % (var, pformat(out)))
612 611 if var:
613 612 self.shell.user_ns.update({var:out})
614 613 else:
615 614 return out
616 615
617 616 @line_cell_magic
618 617 def sx(self, line='', cell=None):
619 618 """Shell execute - run shell command and capture output (!! is short-hand).
620 619
621 620 %sx command
622 621
623 622 IPython will run the given command using commands.getoutput(), and
624 623 return the result formatted as a list (split on '\\n'). Since the
625 624 output is _returned_, it will be stored in ipython's regular output
626 625 cache Out[N] and in the '_N' automatic variables.
627 626
628 627 Notes:
629 628
630 629 1) If an input line begins with '!!', then %sx is automatically
631 630 invoked. That is, while::
632 631
633 632 !ls
634 633
635 634 causes ipython to simply issue system('ls'), typing::
636 635
637 636 !!ls
638 637
639 638 is a shorthand equivalent to::
640 639
641 640 %sx ls
642 641
643 642 2) %sx differs from %sc in that %sx automatically splits into a list,
644 643 like '%sc -l'. The reason for this is to make it as easy as possible
645 644 to process line-oriented shell output via further python commands.
646 645 %sc is meant to provide much finer control, but requires more
647 646 typing.
648 647
649 648 3) Just like %sc -l, this is a list with special attributes:
650 649 ::
651 650
652 651 .l (or .list) : value as list.
653 652 .n (or .nlstr): value as newline-separated string.
654 653 .s (or .spstr): value as whitespace-separated string.
655 654
656 655 This is very useful when trying to use such lists as arguments to
657 656 system commands."""
658 657
659 658 if cell is None:
660 659 # line magic
661 660 return self.shell.getoutput(line)
662 661 else:
663 662 opts,args = self.parse_options(line, '', 'out=')
664 663 output = self.shell.getoutput(cell)
665 664 out_name = opts.get('out', opts.get('o'))
666 665 if out_name:
667 666 self.shell.user_ns[out_name] = output
668 667 else:
669 668 return output
670 669
671 670 system = line_cell_magic('system')(sx)
672 671 bang = cell_magic('!')(sx)
673 672
674 673 @line_magic
675 674 def bookmark(self, parameter_s=''):
676 675 """Manage IPython's bookmark system.
677 676
678 677 %bookmark <name> - set bookmark to current dir
679 678 %bookmark <name> <dir> - set bookmark to <dir>
680 679 %bookmark -l - list all bookmarks
681 680 %bookmark -d <name> - remove bookmark
682 681 %bookmark -r - remove all bookmarks
683 682
684 683 You can later on access a bookmarked folder with::
685 684
686 685 %cd -b <name>
687 686
688 687 or simply '%cd <name>' if there is no directory called <name> AND
689 688 there is such a bookmark defined.
690 689
691 690 Your bookmarks persist through IPython sessions, but they are
692 691 associated with each profile."""
693 692
694 693 opts,args = self.parse_options(parameter_s,'drl',mode='list')
695 694 if len(args) > 2:
696 695 raise UsageError("%bookmark: too many arguments")
697 696
698 697 bkms = self.shell.db.get('bookmarks',{})
699 698
700 699 if 'd' in opts:
701 700 try:
702 701 todel = args[0]
703 702 except IndexError:
704 703 raise UsageError(
705 704 "%bookmark -d: must provide a bookmark to delete")
706 705 else:
707 706 try:
708 707 del bkms[todel]
709 708 except KeyError:
710 709 raise UsageError(
711 710 "%%bookmark -d: Can't delete bookmark '%s'" % todel)
712 711
713 712 elif 'r' in opts:
714 713 bkms = {}
715 714 elif 'l' in opts:
716 715 bks = sorted(bkms)
717 716 if bks:
718 717 size = max(map(len, bks))
719 718 else:
720 719 size = 0
721 720 fmt = '%-'+str(size)+'s -> %s'
722 721 print('Current bookmarks:')
723 722 for bk in bks:
724 723 print(fmt % (bk, bkms[bk]))
725 724 else:
726 725 if not args:
727 726 raise UsageError("%bookmark: You must specify the bookmark name")
728 727 elif len(args)==1:
729 728 bkms[args[0]] = py3compat.getcwd()
730 729 elif len(args)==2:
731 730 bkms[args[0]] = args[1]
732 731 self.shell.db['bookmarks'] = bkms
733 732
734 733 @line_magic
735 734 def pycat(self, parameter_s=''):
736 735 """Show a syntax-highlighted file through a pager.
737 736
738 737 This magic is similar to the cat utility, but it will assume the file
739 738 to be Python source and will show it with syntax highlighting.
740 739
741 740 This magic command can either take a local filename, an url,
742 741 an history range (see %history) or a macro as argument ::
743 742
744 743 %pycat myscript.py
745 744 %pycat 7-27
746 745 %pycat myMacro
747 746 %pycat http://www.example.com/myscript.py
748 747 """
749 748 if not parameter_s:
750 749 raise UsageError('Missing filename, URL, input history range, '
751 750 'or macro.')
752 751
753 752 try :
754 753 cont = self.shell.find_user_code(parameter_s, skip_encoding_cookie=False)
755 754 except (ValueError, IOError):
756 755 print("Error: no such file, variable, URL, history range or macro")
757 756 return
758 757
759 758 page.page(self.shell.pycolorize(source_to_unicode(cont)))
760 759
761 760 @magic_arguments.magic_arguments()
762 761 @magic_arguments.argument(
763 762 '-a', '--append', action='store_true', default=False,
764 763 help='Append contents of the cell to an existing file. '
765 764 'The file will be created if it does not exist.'
766 765 )
767 766 @magic_arguments.argument(
768 767 'filename', type=unicode_type,
769 768 help='file to write'
770 769 )
771 770 @cell_magic
772 771 def writefile(self, line, cell):
773 772 """Write the contents of the cell to a file.
774 773
775 774 The file will be overwritten unless the -a (--append) flag is specified.
776 775 """
777 776 args = magic_arguments.parse_argstring(self.writefile, line)
778 777 filename = os.path.expanduser(args.filename)
779 778
780 779 if os.path.exists(filename):
781 780 if args.append:
782 781 print("Appending to %s" % filename)
783 782 else:
784 783 print("Overwriting %s" % filename)
785 784 else:
786 785 print("Writing %s" % filename)
787 786
788 787 mode = 'a' if args.append else 'w'
789 788 with io.open(filename, mode, encoding='utf-8') as f:
790 789 f.write(cell)
@@ -1,167 +1,166 b''
1 1 """Implementation of magic functions for matplotlib/pylab support.
2 2 """
3 from __future__ import print_function
4 3 #-----------------------------------------------------------------------------
5 4 # Copyright (c) 2012 The IPython Development Team.
6 5 #
7 6 # Distributed under the terms of the Modified BSD License.
8 7 #
9 8 # The full license is in the file COPYING.txt, distributed with this software.
10 9 #-----------------------------------------------------------------------------
11 10
12 11 #-----------------------------------------------------------------------------
13 12 # Imports
14 13 #-----------------------------------------------------------------------------
15 14
16 15 # Our own packages
17 16 from traitlets.config.application import Application
18 17 from IPython.core import magic_arguments
19 18 from IPython.core.magic import Magics, magics_class, line_magic
20 19 from IPython.testing.skipdoctest import skip_doctest
21 20 from warnings import warn
22 21 from IPython.core.pylabtools import backends
23 22
24 23 #-----------------------------------------------------------------------------
25 24 # Magic implementation classes
26 25 #-----------------------------------------------------------------------------
27 26
28 27 magic_gui_arg = magic_arguments.argument(
29 28 'gui', nargs='?',
30 29 help="""Name of the matplotlib backend to use %s.
31 30 If given, the corresponding matplotlib backend is used,
32 31 otherwise it will be matplotlib's default
33 32 (which you can set in your matplotlib config file).
34 33 """ % str(tuple(sorted(backends.keys())))
35 34 )
36 35
37 36
38 37 @magics_class
39 38 class PylabMagics(Magics):
40 39 """Magics related to matplotlib's pylab support"""
41 40
42 41 @skip_doctest
43 42 @line_magic
44 43 @magic_arguments.magic_arguments()
45 44 @magic_arguments.argument('-l', '--list', action='store_true',
46 45 help='Show available matplotlib backends')
47 46 @magic_gui_arg
48 47 def matplotlib(self, line=''):
49 48 """Set up matplotlib to work interactively.
50 49
51 50 This function lets you activate matplotlib interactive support
52 51 at any point during an IPython session. It does not import anything
53 52 into the interactive namespace.
54 53
55 54 If you are using the inline matplotlib backend in the IPython Notebook
56 55 you can set which figure formats are enabled using the following::
57 56
58 57 In [1]: from IPython.display import set_matplotlib_formats
59 58
60 59 In [2]: set_matplotlib_formats('pdf', 'svg')
61 60
62 61 The default for inline figures sets `bbox_inches` to 'tight'. This can
63 62 cause discrepancies between the displayed image and the identical
64 63 image created using `savefig`. This behavior can be disabled using the
65 64 `%config` magic::
66 65
67 66 In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
68 67
69 68 In addition, see the docstring of
70 69 `IPython.display.set_matplotlib_formats` and
71 70 `IPython.display.set_matplotlib_close` for more information on
72 71 changing additional behaviors of the inline backend.
73 72
74 73 Examples
75 74 --------
76 75 To enable the inline backend for usage with the IPython Notebook::
77 76
78 77 In [1]: %matplotlib inline
79 78
80 79 In this case, where the matplotlib default is TkAgg::
81 80
82 81 In [2]: %matplotlib
83 82 Using matplotlib backend: TkAgg
84 83
85 84 But you can explicitly request a different GUI backend::
86 85
87 86 In [3]: %matplotlib qt
88 87
89 88 You can list the available backends using the -l/--list option::
90 89
91 90 In [4]: %matplotlib --list
92 91 Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
93 92 'gtk', 'tk', 'inline']
94 93 """
95 94 args = magic_arguments.parse_argstring(self.matplotlib, line)
96 95 if args.list:
97 96 backends_list = list(backends.keys())
98 97 print("Available matplotlib backends: %s" % backends_list)
99 98 else:
100 99 gui, backend = self.shell.enable_matplotlib(args.gui)
101 100 self._show_matplotlib_backend(args.gui, backend)
102 101
103 102 @skip_doctest
104 103 @line_magic
105 104 @magic_arguments.magic_arguments()
106 105 @magic_arguments.argument(
107 106 '--no-import-all', action='store_true', default=None,
108 107 help="""Prevent IPython from performing ``import *`` into the interactive namespace.
109 108
110 109 You can govern the default behavior of this flag with the
111 110 InteractiveShellApp.pylab_import_all configurable.
112 111 """
113 112 )
114 113 @magic_gui_arg
115 114 def pylab(self, line=''):
116 115 """Load numpy and matplotlib to work interactively.
117 116
118 117 This function lets you activate pylab (matplotlib, numpy and
119 118 interactive support) at any point during an IPython session.
120 119
121 120 %pylab makes the following imports::
122 121
123 122 import numpy
124 123 import matplotlib
125 124 from matplotlib import pylab, mlab, pyplot
126 125 np = numpy
127 126 plt = pyplot
128 127
129 128 from IPython.display import display
130 129 from IPython.core.pylabtools import figsize, getfigs
131 130
132 131 from pylab import *
133 132 from numpy import *
134 133
135 134 If you pass `--no-import-all`, the last two `*` imports will be excluded.
136 135
137 136 See the %matplotlib magic for more details about activating matplotlib
138 137 without affecting the interactive namespace.
139 138 """
140 139 args = magic_arguments.parse_argstring(self.pylab, line)
141 140 if args.no_import_all is None:
142 141 # get default from Application
143 142 if Application.initialized():
144 143 app = Application.instance()
145 144 try:
146 145 import_all = app.pylab_import_all
147 146 except AttributeError:
148 147 import_all = True
149 148 else:
150 149 # nothing specified, no app - default True
151 150 import_all = True
152 151 else:
153 152 # invert no-import flag
154 153 import_all = not args.no_import_all
155 154
156 155 gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
157 156 self._show_matplotlib_backend(args.gui, backend)
158 157 print ("Populating the interactive namespace from numpy and matplotlib")
159 158 if clobbered:
160 159 warn("pylab import has clobbered these variables: %s" % clobbered +
161 160 "\n`%matplotlib` prevents importing * from pylab and numpy"
162 161 )
163 162
164 163 def _show_matplotlib_backend(self, gui, backend):
165 164 """show matplotlib message backend message"""
166 165 if not gui or gui == 'auto':
167 166 print("Using matplotlib backend: %s" % backend)
@@ -1,272 +1,271 b''
1 1 """Magic functions for running cells in various scripts."""
2 from __future__ import print_function
3 2
4 3 # Copyright (c) IPython Development Team.
5 4 # Distributed under the terms of the Modified BSD License.
6 5
7 6 import errno
8 7 import os
9 8 import sys
10 9 import signal
11 10 import time
12 11 from subprocess import Popen, PIPE
13 12 import atexit
14 13
15 14 from IPython.core import magic_arguments
16 15 from IPython.core.magic import (
17 16 Magics, magics_class, line_magic, cell_magic
18 17 )
19 18 from IPython.lib.backgroundjobs import BackgroundJobManager
20 19 from IPython.utils import py3compat
21 20 from IPython.utils.process import arg_split
22 21 from traitlets import List, Dict, default
23 22
24 23 #-----------------------------------------------------------------------------
25 24 # Magic implementation classes
26 25 #-----------------------------------------------------------------------------
27 26
28 27 def script_args(f):
29 28 """single decorator for adding script args"""
30 29 args = [
31 30 magic_arguments.argument(
32 31 '--out', type=str,
33 32 help="""The variable in which to store stdout from the script.
34 33 If the script is backgrounded, this will be the stdout *pipe*,
35 34 instead of the stderr text itself.
36 35 """
37 36 ),
38 37 magic_arguments.argument(
39 38 '--err', type=str,
40 39 help="""The variable in which to store stderr from the script.
41 40 If the script is backgrounded, this will be the stderr *pipe*,
42 41 instead of the stderr text itself.
43 42 """
44 43 ),
45 44 magic_arguments.argument(
46 45 '--bg', action="store_true",
47 46 help="""Whether to run the script in the background.
48 47 If given, the only way to see the output of the command is
49 48 with --out/err.
50 49 """
51 50 ),
52 51 magic_arguments.argument(
53 52 '--proc', type=str,
54 53 help="""The variable in which to store Popen instance.
55 54 This is used only when --bg option is given.
56 55 """
57 56 ),
58 57 ]
59 58 for arg in args:
60 59 f = arg(f)
61 60 return f
62 61
63 62 @magics_class
64 63 class ScriptMagics(Magics):
65 64 """Magics for talking to scripts
66 65
67 66 This defines a base `%%script` cell magic for running a cell
68 67 with a program in a subprocess, and registers a few top-level
69 68 magics that call %%script with common interpreters.
70 69 """
71 70 script_magics = List(
72 71 help="""Extra script cell magics to define
73 72
74 73 This generates simple wrappers of `%%script foo` as `%%foo`.
75 74
76 75 If you want to add script magics that aren't on your path,
77 76 specify them in script_paths
78 77 """,
79 78 ).tag(config=True)
80 79 @default('script_magics')
81 80 def _script_magics_default(self):
82 81 """default to a common list of programs"""
83 82
84 83 defaults = [
85 84 'sh',
86 85 'bash',
87 86 'perl',
88 87 'ruby',
89 88 'python',
90 89 'python2',
91 90 'python3',
92 91 'pypy',
93 92 ]
94 93 if os.name == 'nt':
95 94 defaults.extend([
96 95 'cmd',
97 96 ])
98 97
99 98 return defaults
100 99
101 100 script_paths = Dict(
102 101 help="""Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
103 102
104 103 Only necessary for items in script_magics where the default path will not
105 104 find the right interpreter.
106 105 """
107 106 ).tag(config=True)
108 107
109 108 def __init__(self, shell=None):
110 109 super(ScriptMagics, self).__init__(shell=shell)
111 110 self._generate_script_magics()
112 111 self.job_manager = BackgroundJobManager()
113 112 self.bg_processes = []
114 113 atexit.register(self.kill_bg_processes)
115 114
116 115 def __del__(self):
117 116 self.kill_bg_processes()
118 117
119 118 def _generate_script_magics(self):
120 119 cell_magics = self.magics['cell']
121 120 for name in self.script_magics:
122 121 cell_magics[name] = self._make_script_magic(name)
123 122
124 123 def _make_script_magic(self, name):
125 124 """make a named magic, that calls %%script with a particular program"""
126 125 # expand to explicit path if necessary:
127 126 script = self.script_paths.get(name, name)
128 127
129 128 @magic_arguments.magic_arguments()
130 129 @script_args
131 130 def named_script_magic(line, cell):
132 131 # if line, add it as cl-flags
133 132 if line:
134 133 line = "%s %s" % (script, line)
135 134 else:
136 135 line = script
137 136 return self.shebang(line, cell)
138 137
139 138 # write a basic docstring:
140 139 named_script_magic.__doc__ = \
141 140 """%%{name} script magic
142 141
143 142 Run cells with {script} in a subprocess.
144 143
145 144 This is a shortcut for `%%script {script}`
146 145 """.format(**locals())
147 146
148 147 return named_script_magic
149 148
150 149 @magic_arguments.magic_arguments()
151 150 @script_args
152 151 @cell_magic("script")
153 152 def shebang(self, line, cell):
154 153 """Run a cell via a shell command
155 154
156 155 The `%%script` line is like the #! line of script,
157 156 specifying a program (bash, perl, ruby, etc.) with which to run.
158 157
159 158 The rest of the cell is run by that program.
160 159
161 160 Examples
162 161 --------
163 162 ::
164 163
165 164 In [1]: %%script bash
166 165 ...: for i in 1 2 3; do
167 166 ...: echo $i
168 167 ...: done
169 168 1
170 169 2
171 170 3
172 171 """
173 172 argv = arg_split(line, posix = not sys.platform.startswith('win'))
174 173 args, cmd = self.shebang.parser.parse_known_args(argv)
175 174
176 175 try:
177 176 p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
178 177 except OSError as e:
179 178 if e.errno == errno.ENOENT:
180 179 print("Couldn't find program: %r" % cmd[0])
181 180 return
182 181 else:
183 182 raise
184 183
185 184 if not cell.endswith('\n'):
186 185 cell += '\n'
187 186 cell = cell.encode('utf8', 'replace')
188 187 if args.bg:
189 188 self.bg_processes.append(p)
190 189 self._gc_bg_processes()
191 190 if args.out:
192 191 self.shell.user_ns[args.out] = p.stdout
193 192 if args.err:
194 193 self.shell.user_ns[args.err] = p.stderr
195 194 self.job_manager.new(self._run_script, p, cell, daemon=True)
196 195 if args.proc:
197 196 self.shell.user_ns[args.proc] = p
198 197 return
199 198
200 199 try:
201 200 out, err = p.communicate(cell)
202 201 except KeyboardInterrupt:
203 202 try:
204 203 p.send_signal(signal.SIGINT)
205 204 time.sleep(0.1)
206 205 if p.poll() is not None:
207 206 print("Process is interrupted.")
208 207 return
209 208 p.terminate()
210 209 time.sleep(0.1)
211 210 if p.poll() is not None:
212 211 print("Process is terminated.")
213 212 return
214 213 p.kill()
215 214 print("Process is killed.")
216 215 except OSError:
217 216 pass
218 217 except Exception as e:
219 218 print("Error while terminating subprocess (pid=%i): %s" \
220 219 % (p.pid, e))
221 220 return
222 221 out = py3compat.bytes_to_str(out)
223 222 err = py3compat.bytes_to_str(err)
224 223 if args.out:
225 224 self.shell.user_ns[args.out] = out
226 225 else:
227 226 sys.stdout.write(out)
228 227 sys.stdout.flush()
229 228 if args.err:
230 229 self.shell.user_ns[args.err] = err
231 230 else:
232 231 sys.stderr.write(err)
233 232 sys.stderr.flush()
234 233
235 234 def _run_script(self, p, cell):
236 235 """callback for running the script in the background"""
237 236 p.stdin.write(cell)
238 237 p.stdin.close()
239 238 p.wait()
240 239
241 240 @line_magic("killbgscripts")
242 241 def killbgscripts(self, _nouse_=''):
243 242 """Kill all BG processes started by %%script and its family."""
244 243 self.kill_bg_processes()
245 244 print("All background processes were killed.")
246 245
247 246 def kill_bg_processes(self):
248 247 """Kill all BG processes which are still running."""
249 248 for p in self.bg_processes:
250 249 if p.poll() is None:
251 250 try:
252 251 p.send_signal(signal.SIGINT)
253 252 except:
254 253 pass
255 254 time.sleep(0.1)
256 255 for p in self.bg_processes:
257 256 if p.poll() is None:
258 257 try:
259 258 p.terminate()
260 259 except:
261 260 pass
262 261 time.sleep(0.1)
263 262 for p in self.bg_processes:
264 263 if p.poll() is None:
265 264 try:
266 265 p.kill()
267 266 except:
268 267 pass
269 268 self._gc_bg_processes()
270 269
271 270 def _gc_bg_processes(self):
272 271 self.bg_processes = [p for p in self.bg_processes if p.poll() is None]
@@ -1,1011 +1,1009 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tools for inspecting Python objects.
3 3
4 4 Uses syntax highlighting for presenting the various information elements.
5 5
6 6 Similar in spirit to the inspect module, but all calls take a name argument to
7 7 reference the name under which an object is being read.
8 8 """
9 9
10 10 # Copyright (c) IPython Development Team.
11 11 # Distributed under the terms of the Modified BSD License.
12 12
13 from __future__ import print_function
14
15 13 __all__ = ['Inspector','InspectColors']
16 14
17 15 # stdlib modules
18 16 import inspect
19 17 import linecache
20 18 import warnings
21 19 import os
22 20 from textwrap import dedent
23 21 import types
24 22 import io as stdlib_io
25 23
26 24 try:
27 25 from itertools import izip_longest
28 26 except ImportError:
29 27 from itertools import zip_longest as izip_longest
30 28
31 29 # IPython's own
32 30 from IPython.core import page
33 31 from IPython.lib.pretty import pretty
34 from IPython.testing.skipdoctest import skip_doctest_py3
32 from IPython.testing.skipdoctest import skip_doctest
35 33 from IPython.utils import PyColorize
36 34 from IPython.utils import openpy
37 35 from IPython.utils import py3compat
38 36 from IPython.utils.dir2 import safe_hasattr
39 37 from IPython.utils.path import compress_user
40 38 from IPython.utils.text import indent
41 39 from IPython.utils.wildcard import list_namespace
42 40 from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable
43 41 from IPython.utils.py3compat import cast_unicode, string_types, PY3
44 42 from IPython.utils.signatures import signature
45 43 from IPython.utils.colorable import Colorable
46 44
47 45 from pygments import highlight
48 46 from pygments.lexers import PythonLexer
49 47 from pygments.formatters import HtmlFormatter
50 48
51 49 def pylight(code):
52 50 return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True))
53 51
54 52 # builtin docstrings to ignore
55 53 _func_call_docstring = types.FunctionType.__call__.__doc__
56 54 _object_init_docstring = object.__init__.__doc__
57 55 _builtin_type_docstrings = {
58 56 inspect.getdoc(t) for t in (types.ModuleType, types.MethodType,
59 57 types.FunctionType, property)
60 58 }
61 59
62 60 _builtin_func_type = type(all)
63 61 _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions
64 62 #****************************************************************************
65 63 # Builtin color schemes
66 64
67 65 Colors = TermColors # just a shorthand
68 66
69 67 InspectColors = PyColorize.ANSICodeColors
70 68
71 69 #****************************************************************************
72 70 # Auxiliary functions and objects
73 71
74 72 # See the messaging spec for the definition of all these fields. This list
75 73 # effectively defines the order of display
76 74 info_fields = ['type_name', 'base_class', 'string_form', 'namespace',
77 75 'length', 'file', 'definition', 'docstring', 'source',
78 76 'init_definition', 'class_docstring', 'init_docstring',
79 77 'call_def', 'call_docstring',
80 78 # These won't be printed but will be used to determine how to
81 79 # format the object
82 80 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name'
83 81 ]
84 82
85 83
86 84 def object_info(**kw):
87 85 """Make an object info dict with all fields present."""
88 86 infodict = dict(izip_longest(info_fields, [None]))
89 87 infodict.update(kw)
90 88 return infodict
91 89
92 90
93 91 def get_encoding(obj):
94 92 """Get encoding for python source file defining obj
95 93
96 94 Returns None if obj is not defined in a sourcefile.
97 95 """
98 96 ofile = find_file(obj)
99 97 # run contents of file through pager starting at line where the object
100 98 # is defined, as long as the file isn't binary and is actually on the
101 99 # filesystem.
102 100 if ofile is None:
103 101 return None
104 102 elif ofile.endswith(('.so', '.dll', '.pyd')):
105 103 return None
106 104 elif not os.path.isfile(ofile):
107 105 return None
108 106 else:
109 107 # Print only text files, not extension binaries. Note that
110 108 # getsourcelines returns lineno with 1-offset and page() uses
111 109 # 0-offset, so we must adjust.
112 110 with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2
113 111 encoding, lines = openpy.detect_encoding(buffer.readline)
114 112 return encoding
115 113
116 114 def getdoc(obj):
117 115 """Stable wrapper around inspect.getdoc.
118 116
119 117 This can't crash because of attribute problems.
120 118
121 119 It also attempts to call a getdoc() method on the given object. This
122 120 allows objects which provide their docstrings via non-standard mechanisms
123 121 (like Pyro proxies) to still be inspected by ipython's ? system.
124 122 """
125 123 # Allow objects to offer customized documentation via a getdoc method:
126 124 try:
127 125 ds = obj.getdoc()
128 126 except Exception:
129 127 pass
130 128 else:
131 129 # if we get extra info, we add it to the normal docstring.
132 130 if isinstance(ds, string_types):
133 131 return inspect.cleandoc(ds)
134 132 try:
135 133 docstr = inspect.getdoc(obj)
136 134 encoding = get_encoding(obj)
137 135 return py3compat.cast_unicode(docstr, encoding=encoding)
138 136 except Exception:
139 137 # Harden against an inspect failure, which can occur with
140 138 # extensions modules.
141 139 raise
142 140 return None
143 141
144 142
145 143 def getsource(obj, oname=''):
146 144 """Wrapper around inspect.getsource.
147 145
148 146 This can be modified by other projects to provide customized source
149 147 extraction.
150 148
151 149 Parameters
152 150 ----------
153 151 obj : object
154 152 an object whose source code we will attempt to extract
155 153 oname : str
156 154 (optional) a name under which the object is known
157 155
158 156 Returns
159 157 -------
160 158 src : unicode or None
161 159
162 160 """
163 161
164 162 if isinstance(obj, property):
165 163 sources = []
166 164 for attrname in ['fget', 'fset', 'fdel']:
167 165 fn = getattr(obj, attrname)
168 166 if fn is not None:
169 167 encoding = get_encoding(fn)
170 168 oname_prefix = ('%s.' % oname) if oname else ''
171 169 sources.append(cast_unicode(
172 170 ''.join(('# ', oname_prefix, attrname)),
173 171 encoding=encoding))
174 172 if inspect.isfunction(fn):
175 173 sources.append(dedent(getsource(fn)))
176 174 else:
177 175 # Default str/repr only prints function name,
178 176 # pretty.pretty prints module name too.
179 177 sources.append(cast_unicode(
180 178 '%s%s = %s\n' % (
181 179 oname_prefix, attrname, pretty(fn)),
182 180 encoding=encoding))
183 181 if sources:
184 182 return '\n'.join(sources)
185 183 else:
186 184 return None
187 185
188 186 else:
189 187 # Get source for non-property objects.
190 188
191 189 obj = _get_wrapped(obj)
192 190
193 191 try:
194 192 src = inspect.getsource(obj)
195 193 except TypeError:
196 194 # The object itself provided no meaningful source, try looking for
197 195 # its class definition instead.
198 196 if hasattr(obj, '__class__'):
199 197 try:
200 198 src = inspect.getsource(obj.__class__)
201 199 except TypeError:
202 200 return None
203 201
204 202 encoding = get_encoding(obj)
205 203 return cast_unicode(src, encoding=encoding)
206 204
207 205
208 206 def is_simple_callable(obj):
209 207 """True if obj is a function ()"""
210 208 return (inspect.isfunction(obj) or inspect.ismethod(obj) or \
211 209 isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type))
212 210
213 211
214 212 def getargspec(obj):
215 213 """Wrapper around :func:`inspect.getfullargspec` on Python 3, and
216 214 :func:inspect.getargspec` on Python 2.
217 215
218 216 In addition to functions and methods, this can also handle objects with a
219 217 ``__call__`` attribute.
220 218 """
221 219 if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
222 220 obj = obj.__call__
223 221
224 222 return inspect.getfullargspec(obj) if PY3 else inspect.getargspec(obj)
225 223
226 224
227 225 def format_argspec(argspec):
228 226 """Format argspect, convenience wrapper around inspect's.
229 227
230 228 This takes a dict instead of ordered arguments and calls
231 229 inspect.format_argspec with the arguments in the necessary order.
232 230 """
233 231 return inspect.formatargspec(argspec['args'], argspec['varargs'],
234 232 argspec['varkw'], argspec['defaults'])
235 233
236 234
237 235 def call_tip(oinfo, format_call=True):
238 236 """Extract call tip data from an oinfo dict.
239 237
240 238 Parameters
241 239 ----------
242 240 oinfo : dict
243 241
244 242 format_call : bool, optional
245 243 If True, the call line is formatted and returned as a string. If not, a
246 244 tuple of (name, argspec) is returned.
247 245
248 246 Returns
249 247 -------
250 248 call_info : None, str or (str, dict) tuple.
251 249 When format_call is True, the whole call information is formattted as a
252 250 single string. Otherwise, the object's name and its argspec dict are
253 251 returned. If no call information is available, None is returned.
254 252
255 253 docstring : str or None
256 254 The most relevant docstring for calling purposes is returned, if
257 255 available. The priority is: call docstring for callable instances, then
258 256 constructor docstring for classes, then main object's docstring otherwise
259 257 (regular functions).
260 258 """
261 259 # Get call definition
262 260 argspec = oinfo.get('argspec')
263 261 if argspec is None:
264 262 call_line = None
265 263 else:
266 264 # Callable objects will have 'self' as their first argument, prune
267 265 # it out if it's there for clarity (since users do *not* pass an
268 266 # extra first argument explicitly).
269 267 try:
270 268 has_self = argspec['args'][0] == 'self'
271 269 except (KeyError, IndexError):
272 270 pass
273 271 else:
274 272 if has_self:
275 273 argspec['args'] = argspec['args'][1:]
276 274
277 275 call_line = oinfo['name']+format_argspec(argspec)
278 276
279 277 # Now get docstring.
280 278 # The priority is: call docstring, constructor docstring, main one.
281 279 doc = oinfo.get('call_docstring')
282 280 if doc is None:
283 281 doc = oinfo.get('init_docstring')
284 282 if doc is None:
285 283 doc = oinfo.get('docstring','')
286 284
287 285 return call_line, doc
288 286
289 287
290 288 def _get_wrapped(obj):
291 289 """Get the original object if wrapped in one or more @decorators
292 290
293 291 Some objects automatically construct similar objects on any unrecognised
294 292 attribute access (e.g. unittest.mock.call). To protect against infinite loops,
295 293 this will arbitrarily cut off after 100 levels of obj.__wrapped__
296 294 attribute access. --TK, Jan 2016
297 295 """
298 296 orig_obj = obj
299 297 i = 0
300 298 while safe_hasattr(obj, '__wrapped__'):
301 299 obj = obj.__wrapped__
302 300 i += 1
303 301 if i > 100:
304 302 # __wrapped__ is probably a lie, so return the thing we started with
305 303 return orig_obj
306 304 return obj
307 305
308 306 def find_file(obj):
309 307 """Find the absolute path to the file where an object was defined.
310 308
311 309 This is essentially a robust wrapper around `inspect.getabsfile`.
312 310
313 311 Returns None if no file can be found.
314 312
315 313 Parameters
316 314 ----------
317 315 obj : any Python object
318 316
319 317 Returns
320 318 -------
321 319 fname : str
322 320 The absolute path to the file where the object was defined.
323 321 """
324 322 obj = _get_wrapped(obj)
325 323
326 324 fname = None
327 325 try:
328 326 fname = inspect.getabsfile(obj)
329 327 except TypeError:
330 328 # For an instance, the file that matters is where its class was
331 329 # declared.
332 330 if hasattr(obj, '__class__'):
333 331 try:
334 332 fname = inspect.getabsfile(obj.__class__)
335 333 except TypeError:
336 334 # Can happen for builtins
337 335 pass
338 336 except:
339 337 pass
340 338 return cast_unicode(fname)
341 339
342 340
343 341 def find_source_lines(obj):
344 342 """Find the line number in a file where an object was defined.
345 343
346 344 This is essentially a robust wrapper around `inspect.getsourcelines`.
347 345
348 346 Returns None if no file can be found.
349 347
350 348 Parameters
351 349 ----------
352 350 obj : any Python object
353 351
354 352 Returns
355 353 -------
356 354 lineno : int
357 355 The line number where the object definition starts.
358 356 """
359 357 obj = _get_wrapped(obj)
360 358
361 359 try:
362 360 try:
363 361 lineno = inspect.getsourcelines(obj)[1]
364 362 except TypeError:
365 363 # For instances, try the class object like getsource() does
366 364 if hasattr(obj, '__class__'):
367 365 lineno = inspect.getsourcelines(obj.__class__)[1]
368 366 else:
369 367 lineno = None
370 368 except:
371 369 return None
372 370
373 371 return lineno
374 372
375 373 class Inspector(Colorable):
376 374
377 375 def __init__(self, color_table=InspectColors,
378 376 code_color_table=PyColorize.ANSICodeColors,
379 377 scheme='NoColor',
380 378 str_detail_level=0,
381 379 parent=None, config=None):
382 380 super(Inspector, self).__init__(parent=parent, config=config)
383 381 self.color_table = color_table
384 382 self.parser = PyColorize.Parser(out='str', parent=self, style=scheme)
385 383 self.format = self.parser.format
386 384 self.str_detail_level = str_detail_level
387 385 self.set_active_scheme(scheme)
388 386
389 387 def _getdef(self,obj,oname=''):
390 388 """Return the call signature for any callable object.
391 389
392 390 If any exception is generated, None is returned instead and the
393 391 exception is suppressed."""
394 392 try:
395 393 hdef = oname + str(signature(obj))
396 394 return cast_unicode(hdef)
397 395 except:
398 396 return None
399 397
400 398 def __head(self,h):
401 399 """Return a header string with proper colors."""
402 400 return '%s%s%s' % (self.color_table.active_colors.header,h,
403 401 self.color_table.active_colors.normal)
404 402
405 403 def set_active_scheme(self, scheme):
406 404 self.color_table.set_active_scheme(scheme)
407 405 self.parser.color_table.set_active_scheme(scheme)
408 406
409 407 def noinfo(self, msg, oname):
410 408 """Generic message when no information is found."""
411 409 print('No %s found' % msg, end=' ')
412 410 if oname:
413 411 print('for %s' % oname)
414 412 else:
415 413 print()
416 414
417 415 def pdef(self, obj, oname=''):
418 416 """Print the call signature for any callable object.
419 417
420 418 If the object is a class, print the constructor information."""
421 419
422 420 if not callable(obj):
423 421 print('Object is not callable.')
424 422 return
425 423
426 424 header = ''
427 425
428 426 if inspect.isclass(obj):
429 427 header = self.__head('Class constructor information:\n')
430 428 elif (not py3compat.PY3) and type(obj) is types.InstanceType:
431 429 obj = obj.__call__
432 430
433 431 output = self._getdef(obj,oname)
434 432 if output is None:
435 433 self.noinfo('definition header',oname)
436 434 else:
437 435 print(header,self.format(output), end=' ')
438 436
439 437 # In Python 3, all classes are new-style, so they all have __init__.
440 @skip_doctest_py3
438 @skip_doctest
441 439 def pdoc(self, obj, oname='', formatter=None):
442 440 """Print the docstring for any object.
443 441
444 442 Optional:
445 443 -formatter: a function to run the docstring through for specially
446 444 formatted docstrings.
447 445
448 446 Examples
449 447 --------
450 448
451 449 In [1]: class NoInit:
452 450 ...: pass
453 451
454 452 In [2]: class NoDoc:
455 453 ...: def __init__(self):
456 454 ...: pass
457 455
458 456 In [3]: %pdoc NoDoc
459 457 No documentation found for NoDoc
460 458
461 459 In [4]: %pdoc NoInit
462 460 No documentation found for NoInit
463 461
464 462 In [5]: obj = NoInit()
465 463
466 464 In [6]: %pdoc obj
467 465 No documentation found for obj
468 466
469 467 In [5]: obj2 = NoDoc()
470 468
471 469 In [6]: %pdoc obj2
472 470 No documentation found for obj2
473 471 """
474 472
475 473 head = self.__head # For convenience
476 474 lines = []
477 475 ds = getdoc(obj)
478 476 if formatter:
479 477 ds = formatter(ds).get('plain/text', ds)
480 478 if ds:
481 479 lines.append(head("Class docstring:"))
482 480 lines.append(indent(ds))
483 481 if inspect.isclass(obj) and hasattr(obj, '__init__'):
484 482 init_ds = getdoc(obj.__init__)
485 483 if init_ds is not None:
486 484 lines.append(head("Init docstring:"))
487 485 lines.append(indent(init_ds))
488 486 elif hasattr(obj,'__call__'):
489 487 call_ds = getdoc(obj.__call__)
490 488 if call_ds:
491 489 lines.append(head("Call docstring:"))
492 490 lines.append(indent(call_ds))
493 491
494 492 if not lines:
495 493 self.noinfo('documentation',oname)
496 494 else:
497 495 page.page('\n'.join(lines))
498 496
499 497 def psource(self, obj, oname=''):
500 498 """Print the source code for an object."""
501 499
502 500 # Flush the source cache because inspect can return out-of-date source
503 501 linecache.checkcache()
504 502 try:
505 503 src = getsource(obj, oname=oname)
506 504 except Exception:
507 505 src = None
508 506
509 507 if src is None:
510 508 self.noinfo('source', oname)
511 509 else:
512 510 page.page(self.format(src))
513 511
514 512 def pfile(self, obj, oname=''):
515 513 """Show the whole file where an object was defined."""
516 514
517 515 lineno = find_source_lines(obj)
518 516 if lineno is None:
519 517 self.noinfo('file', oname)
520 518 return
521 519
522 520 ofile = find_file(obj)
523 521 # run contents of file through pager starting at line where the object
524 522 # is defined, as long as the file isn't binary and is actually on the
525 523 # filesystem.
526 524 if ofile.endswith(('.so', '.dll', '.pyd')):
527 525 print('File %r is binary, not printing.' % ofile)
528 526 elif not os.path.isfile(ofile):
529 527 print('File %r does not exist, not printing.' % ofile)
530 528 else:
531 529 # Print only text files, not extension binaries. Note that
532 530 # getsourcelines returns lineno with 1-offset and page() uses
533 531 # 0-offset, so we must adjust.
534 532 page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1)
535 533
536 534 def _format_fields(self, fields, title_width=0):
537 535 """Formats a list of fields for display.
538 536
539 537 Parameters
540 538 ----------
541 539 fields : list
542 540 A list of 2-tuples: (field_title, field_content)
543 541 title_width : int
544 542 How many characters to pad titles to. Default to longest title.
545 543 """
546 544 out = []
547 545 header = self.__head
548 546 if title_width == 0:
549 547 title_width = max(len(title) + 2 for title, _ in fields)
550 548 for title, content in fields:
551 549 if len(content.splitlines()) > 1:
552 550 title = header(title + ':') + '\n'
553 551 else:
554 552 title = header((title + ':').ljust(title_width))
555 553 out.append(cast_unicode(title) + cast_unicode(content))
556 554 return "\n".join(out)
557 555
558 556 def _mime_format(self, text, formatter=None):
559 557 """Return a mime bundle representation of the input text.
560 558
561 559 - if `formatter` is None, the returned mime bundle has
562 560 a `text/plain` field, with the input text.
563 561 a `text/html` field with a `<pre>` tag containing the input text.
564 562
565 563 - if `formatter` is not None, it must be a callable transforming the
566 564 input text into a mime bundle. Default values for `text/plain` and
567 565 `text/html` representations are the ones described above.
568 566
569 567 Note:
570 568
571 569 Formatters returning strings are supported but this behavior is deprecated.
572 570
573 571 """
574 572 text = cast_unicode(text)
575 573 defaults = {
576 574 'text/plain': text,
577 575 'text/html': '<pre>' + text + '</pre>'
578 576 }
579 577
580 578 if formatter is None:
581 579 return defaults
582 580 else:
583 581 formatted = formatter(text)
584 582
585 583 if not isinstance(formatted, dict):
586 584 # Handle the deprecated behavior of a formatter returning
587 585 # a string instead of a mime bundle.
588 586 return {
589 587 'text/plain': formatted,
590 588 'text/html': '<pre>' + formatted + '</pre>'
591 589 }
592 590
593 591 else:
594 592 return dict(defaults, **formatted)
595 593
596 594
597 595 def format_mime(self, bundle):
598 596
599 597 text_plain = bundle['text/plain']
600 598
601 599 text = ''
602 600 heads, bodies = list(zip(*text_plain))
603 601 _len = max(len(h) for h in heads)
604 602
605 603 for head, body in zip(heads, bodies):
606 604 body = body.strip('\n')
607 605 delim = '\n' if '\n' in body else ' '
608 606 text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n'
609 607
610 608 bundle['text/plain'] = text
611 609 return bundle
612 610
613 611 def _get_info(self, obj, oname='', formatter=None, info=None, detail_level=0):
614 612 """Retrieve an info dict and format it."""
615 613
616 614 info = self._info(obj, oname=oname, info=info, detail_level=detail_level)
617 615
618 616 _mime = {
619 617 'text/plain': [],
620 618 'text/html': '',
621 619 }
622 620
623 621 def append_field(bundle, title, key, formatter=None):
624 622 field = info[key]
625 623 if field is not None:
626 624 formatted_field = self._mime_format(field, formatter)
627 625 bundle['text/plain'].append((title, formatted_field['text/plain']))
628 626 bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n'
629 627
630 628 def code_formatter(text):
631 629 return {
632 630 'text/plain': self.format(text),
633 631 'text/html': pylight(text)
634 632 }
635 633
636 634 if info['isalias']:
637 635 append_field(_mime, 'Repr', 'string_form')
638 636
639 637 elif info['ismagic']:
640 638 if detail_level > 0:
641 639 append_field(_mime, 'Source', 'source', code_formatter)
642 640 else:
643 641 append_field(_mime, 'Docstring', 'docstring', formatter)
644 642 append_field(_mime, 'File', 'file')
645 643
646 644 elif info['isclass'] or is_simple_callable(obj):
647 645 # Functions, methods, classes
648 646 append_field(_mime, 'Signature', 'definition', code_formatter)
649 647 append_field(_mime, 'Init signature', 'init_definition', code_formatter)
650 648 if detail_level > 0:
651 649 append_field(_mime, 'Source', 'source', code_formatter)
652 650 else:
653 651 append_field(_mime, 'Docstring', 'docstring', formatter)
654 652 append_field(_mime, 'Init docstring', 'init_docstring', formatter)
655 653
656 654 append_field(_mime, 'File', 'file')
657 655 append_field(_mime, 'Type', 'type_name')
658 656
659 657 else:
660 658 # General Python objects
661 659 append_field(_mime, 'Signature', 'definition', code_formatter)
662 660 append_field(_mime, 'Call signature', 'call_def', code_formatter)
663 661
664 662 append_field(_mime, 'Type', 'type_name')
665 663
666 664 # Base class for old-style instances
667 665 if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']:
668 666 append_field(_mime, 'Base Class', 'base_class')
669 667
670 668 append_field(_mime, 'String form', 'string_form')
671 669
672 670 # Namespace
673 671 if info['namespace'] != 'Interactive':
674 672 append_field(_mime, 'Namespace', 'namespace')
675 673
676 674 append_field(_mime, 'Length', 'length')
677 675 append_field(_mime, 'File', 'file')
678 676
679 677 # Source or docstring, depending on detail level and whether
680 678 # source found.
681 679 if detail_level > 0:
682 680 append_field(_mime, 'Source', 'source', code_formatter)
683 681 else:
684 682 append_field(_mime, 'Docstring', 'docstring', formatter)
685 683
686 684 append_field(_mime, 'Class docstring', 'class_docstring', formatter)
687 685 append_field(_mime, 'Init docstring', 'init_docstring', formatter)
688 686 append_field(_mime, 'Call docstring', 'call_docstring', formatter)
689 687
690 688
691 689 return self.format_mime(_mime)
692 690
693 691 def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True):
694 692 """Show detailed information about an object.
695 693
696 694 Optional arguments:
697 695
698 696 - oname: name of the variable pointing to the object.
699 697
700 698 - formatter: callable (optional)
701 699 A special formatter for docstrings.
702 700
703 701 The formatter is a callable that takes a string as an input
704 702 and returns either a formatted string or a mime type bundle
705 703 in the form of a dictionnary.
706 704
707 705 Although the support of custom formatter returning a string
708 706 instead of a mime type bundle is deprecated.
709 707
710 708 - info: a structure with some information fields which may have been
711 709 precomputed already.
712 710
713 711 - detail_level: if set to 1, more information is given.
714 712 """
715 713 info = self._get_info(obj, oname, formatter, info, detail_level)
716 714 if not enable_html_pager:
717 715 del info['text/html']
718 716 page.page(info)
719 717
720 718 def info(self, obj, oname='', formatter=None, info=None, detail_level=0):
721 719 """DEPRECATED. Compute a dict with detailed information about an object.
722 720 """
723 721 if formatter is not None:
724 722 warnings.warn('The `formatter` keyword argument to `Inspector.info`'
725 723 'is deprecated as of IPython 5.0 and will have no effects.',
726 724 DeprecationWarning, stacklevel=2)
727 725 return self._info(obj, oname=oname, info=info, detail_level=detail_level)
728 726
729 727 def _info(self, obj, oname='', info=None, detail_level=0):
730 728 """Compute a dict with detailed information about an object.
731 729
732 730 Optional arguments:
733 731
734 732 - oname: name of the variable pointing to the object.
735 733
736 734 - info: a structure with some information fields which may have been
737 735 precomputed already.
738 736
739 737 - detail_level: if set to 1, more information is given.
740 738 """
741 739
742 740 obj_type = type(obj)
743 741
744 742 if info is None:
745 743 ismagic = 0
746 744 isalias = 0
747 745 ospace = ''
748 746 else:
749 747 ismagic = info.ismagic
750 748 isalias = info.isalias
751 749 ospace = info.namespace
752 750
753 751 # Get docstring, special-casing aliases:
754 752 if isalias:
755 753 if not callable(obj):
756 754 try:
757 755 ds = "Alias to the system command:\n %s" % obj[1]
758 756 except:
759 757 ds = "Alias: " + str(obj)
760 758 else:
761 759 ds = "Alias to " + str(obj)
762 760 if obj.__doc__:
763 761 ds += "\nDocstring:\n" + obj.__doc__
764 762 else:
765 763 ds = getdoc(obj)
766 764 if ds is None:
767 765 ds = '<no docstring>'
768 766
769 767 # store output in a dict, we initialize it here and fill it as we go
770 768 out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic)
771 769
772 770 string_max = 200 # max size of strings to show (snipped if longer)
773 771 shalf = int((string_max - 5) / 2)
774 772
775 773 if ismagic:
776 774 obj_type_name = 'Magic function'
777 775 elif isalias:
778 776 obj_type_name = 'System alias'
779 777 else:
780 778 obj_type_name = obj_type.__name__
781 779 out['type_name'] = obj_type_name
782 780
783 781 try:
784 782 bclass = obj.__class__
785 783 out['base_class'] = str(bclass)
786 784 except: pass
787 785
788 786 # String form, but snip if too long in ? form (full in ??)
789 787 if detail_level >= self.str_detail_level:
790 788 try:
791 789 ostr = str(obj)
792 790 str_head = 'string_form'
793 791 if not detail_level and len(ostr)>string_max:
794 792 ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:]
795 793 ostr = ("\n" + " " * len(str_head.expandtabs())).\
796 794 join(q.strip() for q in ostr.split("\n"))
797 795 out[str_head] = ostr
798 796 except:
799 797 pass
800 798
801 799 if ospace:
802 800 out['namespace'] = ospace
803 801
804 802 # Length (for strings and lists)
805 803 try:
806 804 out['length'] = str(len(obj))
807 805 except: pass
808 806
809 807 # Filename where object was defined
810 808 binary_file = False
811 809 fname = find_file(obj)
812 810 if fname is None:
813 811 # if anything goes wrong, we don't want to show source, so it's as
814 812 # if the file was binary
815 813 binary_file = True
816 814 else:
817 815 if fname.endswith(('.so', '.dll', '.pyd')):
818 816 binary_file = True
819 817 elif fname.endswith('<string>'):
820 818 fname = 'Dynamically generated function. No source code available.'
821 819 out['file'] = compress_user(fname)
822 820
823 821 # Original source code for a callable, class or property.
824 822 if detail_level:
825 823 # Flush the source cache because inspect can return out-of-date
826 824 # source
827 825 linecache.checkcache()
828 826 try:
829 827 if isinstance(obj, property) or not binary_file:
830 828 src = getsource(obj, oname)
831 829 if src is not None:
832 830 src = src.rstrip()
833 831 out['source'] = src
834 832
835 833 except Exception:
836 834 pass
837 835
838 836 # Add docstring only if no source is to be shown (avoid repetitions).
839 837 if ds and out.get('source', None) is None:
840 838 out['docstring'] = ds
841 839
842 840 # Constructor docstring for classes
843 841 if inspect.isclass(obj):
844 842 out['isclass'] = True
845 843
846 844 # get the init signature:
847 845 try:
848 846 init_def = self._getdef(obj, oname)
849 847 except AttributeError:
850 848 init_def = None
851 849
852 850 # get the __init__ docstring
853 851 try:
854 852 obj_init = obj.__init__
855 853 except AttributeError:
856 854 init_ds = None
857 855 else:
858 856 if init_def is None:
859 857 # Get signature from init if top-level sig failed.
860 858 # Can happen for built-in types (list, etc.).
861 859 try:
862 860 init_def = self._getdef(obj_init, oname)
863 861 except AttributeError:
864 862 pass
865 863 init_ds = getdoc(obj_init)
866 864 # Skip Python's auto-generated docstrings
867 865 if init_ds == _object_init_docstring:
868 866 init_ds = None
869 867
870 868 if init_def:
871 869 out['init_definition'] = init_def
872 870
873 871 if init_ds:
874 872 out['init_docstring'] = init_ds
875 873
876 874 # and class docstring for instances:
877 875 else:
878 876 # reconstruct the function definition and print it:
879 877 defln = self._getdef(obj, oname)
880 878 if defln:
881 879 out['definition'] = defln
882 880
883 881 # First, check whether the instance docstring is identical to the
884 882 # class one, and print it separately if they don't coincide. In
885 883 # most cases they will, but it's nice to print all the info for
886 884 # objects which use instance-customized docstrings.
887 885 if ds:
888 886 try:
889 887 cls = getattr(obj,'__class__')
890 888 except:
891 889 class_ds = None
892 890 else:
893 891 class_ds = getdoc(cls)
894 892 # Skip Python's auto-generated docstrings
895 893 if class_ds in _builtin_type_docstrings:
896 894 class_ds = None
897 895 if class_ds and ds != class_ds:
898 896 out['class_docstring'] = class_ds
899 897
900 898 # Next, try to show constructor docstrings
901 899 try:
902 900 init_ds = getdoc(obj.__init__)
903 901 # Skip Python's auto-generated docstrings
904 902 if init_ds == _object_init_docstring:
905 903 init_ds = None
906 904 except AttributeError:
907 905 init_ds = None
908 906 if init_ds:
909 907 out['init_docstring'] = init_ds
910 908
911 909 # Call form docstring for callable instances
912 910 if safe_hasattr(obj, '__call__') and not is_simple_callable(obj):
913 911 call_def = self._getdef(obj.__call__, oname)
914 912 if call_def and (call_def != out.get('definition')):
915 913 # it may never be the case that call def and definition differ,
916 914 # but don't include the same signature twice
917 915 out['call_def'] = call_def
918 916 call_ds = getdoc(obj.__call__)
919 917 # Skip Python's auto-generated docstrings
920 918 if call_ds == _func_call_docstring:
921 919 call_ds = None
922 920 if call_ds:
923 921 out['call_docstring'] = call_ds
924 922
925 923 # Compute the object's argspec as a callable. The key is to decide
926 924 # whether to pull it from the object itself, from its __init__ or
927 925 # from its __call__ method.
928 926
929 927 if inspect.isclass(obj):
930 928 # Old-style classes need not have an __init__
931 929 callable_obj = getattr(obj, "__init__", None)
932 930 elif callable(obj):
933 931 callable_obj = obj
934 932 else:
935 933 callable_obj = None
936 934
937 935 if callable_obj is not None:
938 936 try:
939 937 argspec = getargspec(callable_obj)
940 938 except (TypeError, AttributeError):
941 939 # For extensions/builtins we can't retrieve the argspec
942 940 pass
943 941 else:
944 942 # named tuples' _asdict() method returns an OrderedDict, but we
945 943 # we want a normal
946 944 out['argspec'] = argspec_dict = dict(argspec._asdict())
947 945 # We called this varkw before argspec became a named tuple.
948 946 # With getfullargspec it's also called varkw.
949 947 if 'varkw' not in argspec_dict:
950 948 argspec_dict['varkw'] = argspec_dict.pop('keywords')
951 949
952 950 return object_info(**out)
953 951
954 952 def psearch(self,pattern,ns_table,ns_search=[],
955 953 ignore_case=False,show_all=False):
956 954 """Search namespaces with wildcards for objects.
957 955
958 956 Arguments:
959 957
960 958 - pattern: string containing shell-like wildcards to use in namespace
961 959 searches and optionally a type specification to narrow the search to
962 960 objects of that type.
963 961
964 962 - ns_table: dict of name->namespaces for search.
965 963
966 964 Optional arguments:
967 965
968 966 - ns_search: list of namespace names to include in search.
969 967
970 968 - ignore_case(False): make the search case-insensitive.
971 969
972 970 - show_all(False): show all names, including those starting with
973 971 underscores.
974 972 """
975 973 #print 'ps pattern:<%r>' % pattern # dbg
976 974
977 975 # defaults
978 976 type_pattern = 'all'
979 977 filter = ''
980 978
981 979 cmds = pattern.split()
982 980 len_cmds = len(cmds)
983 981 if len_cmds == 1:
984 982 # Only filter pattern given
985 983 filter = cmds[0]
986 984 elif len_cmds == 2:
987 985 # Both filter and type specified
988 986 filter,type_pattern = cmds
989 987 else:
990 988 raise ValueError('invalid argument string for psearch: <%s>' %
991 989 pattern)
992 990
993 991 # filter search namespaces
994 992 for name in ns_search:
995 993 if name not in ns_table:
996 994 raise ValueError('invalid namespace <%s>. Valid names: %s' %
997 995 (name,ns_table.keys()))
998 996
999 997 #print 'type_pattern:',type_pattern # dbg
1000 998 search_result, namespaces_seen = set(), set()
1001 999 for ns_name in ns_search:
1002 1000 ns = ns_table[ns_name]
1003 1001 # Normally, locals and globals are the same, so we just check one.
1004 1002 if id(ns) in namespaces_seen:
1005 1003 continue
1006 1004 namespaces_seen.add(id(ns))
1007 1005 tmp_res = list_namespace(ns, type_pattern, filter,
1008 1006 ignore_case=ignore_case, show_all=show_all)
1009 1007 search_result.update(tmp_res)
1010 1008
1011 1009 page.page('\n'.join(sorted(search_result)))
@@ -1,386 +1,385 b''
1 1 # encoding: utf-8
2 2 """
3 3 Paging capabilities for IPython.core
4 4
5 5 Notes
6 6 -----
7 7
8 8 For now this uses IPython hooks, so it can't be in IPython.utils. If we can get
9 9 rid of that dependency, we could move it there.
10 10 -----
11 11 """
12 12
13 13 # Copyright (c) IPython Development Team.
14 14 # Distributed under the terms of the Modified BSD License.
15 15
16 from __future__ import print_function
17 16
18 17 import os
19 18 import re
20 19 import sys
21 20 import tempfile
22 21
23 22 from io import UnsupportedOperation
24 23
25 24 from IPython import get_ipython
26 25 from IPython.core.display import display
27 26 from IPython.core.error import TryNext
28 27 from IPython.utils.data import chop
29 28 from IPython.utils.process import system
30 29 from IPython.utils.terminal import get_terminal_size
31 30 from IPython.utils import py3compat
32 31
33 32
34 33 def display_page(strng, start=0, screen_lines=25):
35 34 """Just display, no paging. screen_lines is ignored."""
36 35 if isinstance(strng, dict):
37 36 data = strng
38 37 else:
39 38 if start:
40 39 strng = u'\n'.join(strng.splitlines()[start:])
41 40 data = { 'text/plain': strng }
42 41 display(data, raw=True)
43 42
44 43
45 44 def as_hook(page_func):
46 45 """Wrap a pager func to strip the `self` arg
47 46
48 47 so it can be called as a hook.
49 48 """
50 49 return lambda self, *args, **kwargs: page_func(*args, **kwargs)
51 50
52 51
53 52 esc_re = re.compile(r"(\x1b[^m]+m)")
54 53
55 54 def page_dumb(strng, start=0, screen_lines=25):
56 55 """Very dumb 'pager' in Python, for when nothing else works.
57 56
58 57 Only moves forward, same interface as page(), except for pager_cmd and
59 58 mode.
60 59 """
61 60 if isinstance(strng, dict):
62 61 strng = strng.get('text/plain', '')
63 62 out_ln = strng.splitlines()[start:]
64 63 screens = chop(out_ln,screen_lines-1)
65 64 if len(screens) == 1:
66 65 print(os.linesep.join(screens[0]))
67 66 else:
68 67 last_escape = ""
69 68 for scr in screens[0:-1]:
70 69 hunk = os.linesep.join(scr)
71 70 print(last_escape + hunk)
72 71 if not page_more():
73 72 return
74 73 esc_list = esc_re.findall(hunk)
75 74 if len(esc_list) > 0:
76 75 last_escape = esc_list[-1]
77 76 print(last_escape + os.linesep.join(screens[-1]))
78 77
79 78 def _detect_screen_size(screen_lines_def):
80 79 """Attempt to work out the number of lines on the screen.
81 80
82 81 This is called by page(). It can raise an error (e.g. when run in the
83 82 test suite), so it's separated out so it can easily be called in a try block.
84 83 """
85 84 TERM = os.environ.get('TERM',None)
86 85 if not((TERM=='xterm' or TERM=='xterm-color') and sys.platform != 'sunos5'):
87 86 # curses causes problems on many terminals other than xterm, and
88 87 # some termios calls lock up on Sun OS5.
89 88 return screen_lines_def
90 89
91 90 try:
92 91 import termios
93 92 import curses
94 93 except ImportError:
95 94 return screen_lines_def
96 95
97 96 # There is a bug in curses, where *sometimes* it fails to properly
98 97 # initialize, and then after the endwin() call is made, the
99 98 # terminal is left in an unusable state. Rather than trying to
100 99 # check everytime for this (by requesting and comparing termios
101 100 # flags each time), we just save the initial terminal state and
102 101 # unconditionally reset it every time. It's cheaper than making
103 102 # the checks.
104 103 try:
105 104 term_flags = termios.tcgetattr(sys.stdout)
106 105 except termios.error as err:
107 106 # can fail on Linux 2.6, pager_page will catch the TypeError
108 107 raise TypeError('termios error: {0}'.format(err))
109 108
110 109 # Curses modifies the stdout buffer size by default, which messes
111 110 # up Python's normal stdout buffering. This would manifest itself
112 111 # to IPython users as delayed printing on stdout after having used
113 112 # the pager.
114 113 #
115 114 # We can prevent this by manually setting the NCURSES_NO_SETBUF
116 115 # environment variable. For more details, see:
117 116 # http://bugs.python.org/issue10144
118 117 NCURSES_NO_SETBUF = os.environ.get('NCURSES_NO_SETBUF', None)
119 118 os.environ['NCURSES_NO_SETBUF'] = ''
120 119
121 120 # Proceed with curses initialization
122 121 try:
123 122 scr = curses.initscr()
124 123 except AttributeError:
125 124 # Curses on Solaris may not be complete, so we can't use it there
126 125 return screen_lines_def
127 126
128 127 screen_lines_real,screen_cols = scr.getmaxyx()
129 128 curses.endwin()
130 129
131 130 # Restore environment
132 131 if NCURSES_NO_SETBUF is None:
133 132 del os.environ['NCURSES_NO_SETBUF']
134 133 else:
135 134 os.environ['NCURSES_NO_SETBUF'] = NCURSES_NO_SETBUF
136 135
137 136 # Restore terminal state in case endwin() didn't.
138 137 termios.tcsetattr(sys.stdout,termios.TCSANOW,term_flags)
139 138 # Now we have what we needed: the screen size in rows/columns
140 139 return screen_lines_real
141 140 #print '***Screen size:',screen_lines_real,'lines x',\
142 141 #screen_cols,'columns.' # dbg
143 142
144 143 def pager_page(strng, start=0, screen_lines=0, pager_cmd=None):
145 144 """Display a string, piping through a pager after a certain length.
146 145
147 146 strng can be a mime-bundle dict, supplying multiple representations,
148 147 keyed by mime-type.
149 148
150 149 The screen_lines parameter specifies the number of *usable* lines of your
151 150 terminal screen (total lines minus lines you need to reserve to show other
152 151 information).
153 152
154 153 If you set screen_lines to a number <=0, page() will try to auto-determine
155 154 your screen size and will only use up to (screen_size+screen_lines) for
156 155 printing, paging after that. That is, if you want auto-detection but need
157 156 to reserve the bottom 3 lines of the screen, use screen_lines = -3, and for
158 157 auto-detection without any lines reserved simply use screen_lines = 0.
159 158
160 159 If a string won't fit in the allowed lines, it is sent through the
161 160 specified pager command. If none given, look for PAGER in the environment,
162 161 and ultimately default to less.
163 162
164 163 If no system pager works, the string is sent through a 'dumb pager'
165 164 written in python, very simplistic.
166 165 """
167 166
168 167 # for compatibility with mime-bundle form:
169 168 if isinstance(strng, dict):
170 169 strng = strng['text/plain']
171 170
172 171 # Ugly kludge, but calling curses.initscr() flat out crashes in emacs
173 172 TERM = os.environ.get('TERM','dumb')
174 173 if TERM in ['dumb','emacs'] and os.name != 'nt':
175 174 print(strng)
176 175 return
177 176 # chop off the topmost part of the string we don't want to see
178 177 str_lines = strng.splitlines()[start:]
179 178 str_toprint = os.linesep.join(str_lines)
180 179 num_newlines = len(str_lines)
181 180 len_str = len(str_toprint)
182 181
183 182 # Dumb heuristics to guesstimate number of on-screen lines the string
184 183 # takes. Very basic, but good enough for docstrings in reasonable
185 184 # terminals. If someone later feels like refining it, it's not hard.
186 185 numlines = max(num_newlines,int(len_str/80)+1)
187 186
188 187 screen_lines_def = get_terminal_size()[1]
189 188
190 189 # auto-determine screen size
191 190 if screen_lines <= 0:
192 191 try:
193 192 screen_lines += _detect_screen_size(screen_lines_def)
194 193 except (TypeError, UnsupportedOperation):
195 194 print(str_toprint)
196 195 return
197 196
198 197 #print 'numlines',numlines,'screenlines',screen_lines # dbg
199 198 if numlines <= screen_lines :
200 199 #print '*** normal print' # dbg
201 200 print(str_toprint)
202 201 else:
203 202 # Try to open pager and default to internal one if that fails.
204 203 # All failure modes are tagged as 'retval=1', to match the return
205 204 # value of a failed system command. If any intermediate attempt
206 205 # sets retval to 1, at the end we resort to our own page_dumb() pager.
207 206 pager_cmd = get_pager_cmd(pager_cmd)
208 207 pager_cmd += ' ' + get_pager_start(pager_cmd,start)
209 208 if os.name == 'nt':
210 209 if pager_cmd.startswith('type'):
211 210 # The default WinXP 'type' command is failing on complex strings.
212 211 retval = 1
213 212 else:
214 213 fd, tmpname = tempfile.mkstemp('.txt')
215 214 try:
216 215 os.close(fd)
217 216 with open(tmpname, 'wt') as tmpfile:
218 217 tmpfile.write(strng)
219 218 cmd = "%s < %s" % (pager_cmd, tmpname)
220 219 # tmpfile needs to be closed for windows
221 220 if os.system(cmd):
222 221 retval = 1
223 222 else:
224 223 retval = None
225 224 finally:
226 225 os.remove(tmpname)
227 226 else:
228 227 try:
229 228 retval = None
230 229 # if I use popen4, things hang. No idea why.
231 230 #pager,shell_out = os.popen4(pager_cmd)
232 231 pager = os.popen(pager_cmd, 'w')
233 232 try:
234 233 pager_encoding = pager.encoding or sys.stdout.encoding
235 234 pager.write(py3compat.cast_bytes_py2(
236 235 strng, encoding=pager_encoding))
237 236 finally:
238 237 retval = pager.close()
239 238 except IOError as msg: # broken pipe when user quits
240 239 if msg.args == (32, 'Broken pipe'):
241 240 retval = None
242 241 else:
243 242 retval = 1
244 243 except OSError:
245 244 # Other strange problems, sometimes seen in Win2k/cygwin
246 245 retval = 1
247 246 if retval is not None:
248 247 page_dumb(strng,screen_lines=screen_lines)
249 248
250 249
251 250 def page(data, start=0, screen_lines=0, pager_cmd=None):
252 251 """Display content in a pager, piping through a pager after a certain length.
253 252
254 253 data can be a mime-bundle dict, supplying multiple representations,
255 254 keyed by mime-type, or text.
256 255
257 256 Pager is dispatched via the `show_in_pager` IPython hook.
258 257 If no hook is registered, `pager_page` will be used.
259 258 """
260 259 # Some routines may auto-compute start offsets incorrectly and pass a
261 260 # negative value. Offset to 0 for robustness.
262 261 start = max(0, start)
263 262
264 263 # first, try the hook
265 264 ip = get_ipython()
266 265 if ip:
267 266 try:
268 267 ip.hooks.show_in_pager(data, start=start, screen_lines=screen_lines)
269 268 return
270 269 except TryNext:
271 270 pass
272 271
273 272 # fallback on default pager
274 273 return pager_page(data, start, screen_lines, pager_cmd)
275 274
276 275
277 276 def page_file(fname, start=0, pager_cmd=None):
278 277 """Page a file, using an optional pager command and starting line.
279 278 """
280 279
281 280 pager_cmd = get_pager_cmd(pager_cmd)
282 281 pager_cmd += ' ' + get_pager_start(pager_cmd,start)
283 282
284 283 try:
285 284 if os.environ['TERM'] in ['emacs','dumb']:
286 285 raise EnvironmentError
287 286 system(pager_cmd + ' ' + fname)
288 287 except:
289 288 try:
290 289 if start > 0:
291 290 start -= 1
292 291 page(open(fname).read(),start)
293 292 except:
294 293 print('Unable to show file',repr(fname))
295 294
296 295
297 296 def get_pager_cmd(pager_cmd=None):
298 297 """Return a pager command.
299 298
300 299 Makes some attempts at finding an OS-correct one.
301 300 """
302 301 if os.name == 'posix':
303 302 default_pager_cmd = 'less -R' # -R for color control sequences
304 303 elif os.name in ['nt','dos']:
305 304 default_pager_cmd = 'type'
306 305
307 306 if pager_cmd is None:
308 307 try:
309 308 pager_cmd = os.environ['PAGER']
310 309 except:
311 310 pager_cmd = default_pager_cmd
312 311
313 312 if pager_cmd == 'less' and '-r' not in os.environ.get('LESS', '').lower():
314 313 pager_cmd += ' -R'
315 314
316 315 return pager_cmd
317 316
318 317
319 318 def get_pager_start(pager, start):
320 319 """Return the string for paging files with an offset.
321 320
322 321 This is the '+N' argument which less and more (under Unix) accept.
323 322 """
324 323
325 324 if pager in ['less','more']:
326 325 if start:
327 326 start_string = '+' + str(start)
328 327 else:
329 328 start_string = ''
330 329 else:
331 330 start_string = ''
332 331 return start_string
333 332
334 333
335 334 # (X)emacs on win32 doesn't like to be bypassed with msvcrt.getch()
336 335 if os.name == 'nt' and os.environ.get('TERM','dumb') != 'emacs':
337 336 import msvcrt
338 337 def page_more():
339 338 """ Smart pausing between pages
340 339
341 340 @return: True if need print more lines, False if quit
342 341 """
343 342 sys.stdout.write('---Return to continue, q to quit--- ')
344 343 ans = msvcrt.getwch()
345 344 if ans in ("q", "Q"):
346 345 result = False
347 346 else:
348 347 result = True
349 348 sys.stdout.write("\b"*37 + " "*37 + "\b"*37)
350 349 return result
351 350 else:
352 351 def page_more():
353 352 ans = py3compat.input('---Return to continue, q to quit--- ')
354 353 if ans.lower().startswith('q'):
355 354 return False
356 355 else:
357 356 return True
358 357
359 358
360 359 def snip_print(str,width = 75,print_full = 0,header = ''):
361 360 """Print a string snipping the midsection to fit in width.
362 361
363 362 print_full: mode control:
364 363
365 364 - 0: only snip long strings
366 365 - 1: send to page() directly.
367 366 - 2: snip long strings and ask for full length viewing with page()
368 367
369 368 Return 1 if snipping was necessary, 0 otherwise."""
370 369
371 370 if print_full == 1:
372 371 page(header+str)
373 372 return 0
374 373
375 374 print(header, end=' ')
376 375 if len(str) < width:
377 376 print(str)
378 377 snip = 0
379 378 else:
380 379 whalf = int((width -5)/2)
381 380 print(str[:whalf] + ' <...> ' + str[-whalf:])
382 381 snip = 1
383 382 if snip and print_full == 2:
384 383 if py3compat.input(header+' Snipped. View (y/n)? [N]').lower() == 'y':
385 384 page(str)
386 385 return snip
@@ -1,314 +1,313 b''
1 1 # encoding: utf-8
2 2 """
3 3 An application for managing IPython profiles.
4 4
5 5 To be invoked as the `ipython profile` subcommand.
6 6
7 7 Authors:
8 8
9 9 * Min RK
10 10
11 11 """
12 from __future__ import print_function
13 12
14 13 #-----------------------------------------------------------------------------
15 14 # Copyright (C) 2008 The IPython Development Team
16 15 #
17 16 # Distributed under the terms of the BSD License. The full license is in
18 17 # the file COPYING, distributed as part of this software.
19 18 #-----------------------------------------------------------------------------
20 19
21 20 #-----------------------------------------------------------------------------
22 21 # Imports
23 22 #-----------------------------------------------------------------------------
24 23
25 24 import os
26 25
27 26 from traitlets.config.application import Application
28 27 from IPython.core.application import (
29 28 BaseIPythonApplication, base_flags
30 29 )
31 30 from IPython.core.profiledir import ProfileDir
32 31 from IPython.utils.importstring import import_item
33 32 from IPython.paths import get_ipython_dir, get_ipython_package_dir
34 33 from IPython.utils import py3compat
35 34 from traitlets import Unicode, Bool, Dict, observe
36 35
37 36 #-----------------------------------------------------------------------------
38 37 # Constants
39 38 #-----------------------------------------------------------------------------
40 39
41 40 create_help = """Create an IPython profile by name
42 41
43 42 Create an ipython profile directory by its name or
44 43 profile directory path. Profile directories contain
45 44 configuration, log and security related files and are named
46 45 using the convention 'profile_<name>'. By default they are
47 46 located in your ipython directory. Once created, you will
48 47 can edit the configuration files in the profile
49 48 directory to configure IPython. Most users will create a
50 49 profile directory by name,
51 50 `ipython profile create myprofile`, which will put the directory
52 51 in `<ipython_dir>/profile_myprofile`.
53 52 """
54 53 list_help = """List available IPython profiles
55 54
56 55 List all available profiles, by profile location, that can
57 56 be found in the current working directly or in the ipython
58 57 directory. Profile directories are named using the convention
59 58 'profile_<profile>'.
60 59 """
61 60 profile_help = """Manage IPython profiles
62 61
63 62 Profile directories contain
64 63 configuration, log and security related files and are named
65 64 using the convention 'profile_<name>'. By default they are
66 65 located in your ipython directory. You can create profiles
67 66 with `ipython profile create <name>`, or see the profiles you
68 67 already have with `ipython profile list`
69 68
70 69 To get started configuring IPython, simply do:
71 70
72 71 $> ipython profile create
73 72
74 73 and IPython will create the default profile in <ipython_dir>/profile_default,
75 74 where you can edit ipython_config.py to start configuring IPython.
76 75
77 76 """
78 77
79 78 _list_examples = "ipython profile list # list all profiles"
80 79
81 80 _create_examples = """
82 81 ipython profile create foo # create profile foo w/ default config files
83 82 ipython profile create foo --reset # restage default config files over current
84 83 ipython profile create foo --parallel # also stage parallel config files
85 84 """
86 85
87 86 _main_examples = """
88 87 ipython profile create -h # show the help string for the create subcommand
89 88 ipython profile list -h # show the help string for the list subcommand
90 89
91 90 ipython locate profile foo # print the path to the directory for profile 'foo'
92 91 """
93 92
94 93 #-----------------------------------------------------------------------------
95 94 # Profile Application Class (for `ipython profile` subcommand)
96 95 #-----------------------------------------------------------------------------
97 96
98 97
99 98 def list_profiles_in(path):
100 99 """list profiles in a given root directory"""
101 100 files = os.listdir(path)
102 101 profiles = []
103 102 for f in files:
104 103 try:
105 104 full_path = os.path.join(path, f)
106 105 except UnicodeError:
107 106 continue
108 107 if os.path.isdir(full_path) and f.startswith('profile_'):
109 108 profiles.append(f.split('_',1)[-1])
110 109 return profiles
111 110
112 111
113 112 def list_bundled_profiles():
114 113 """list profiles that are bundled with IPython."""
115 114 path = os.path.join(get_ipython_package_dir(), u'core', u'profile')
116 115 files = os.listdir(path)
117 116 profiles = []
118 117 for profile in files:
119 118 full_path = os.path.join(path, profile)
120 119 if os.path.isdir(full_path) and profile != "__pycache__":
121 120 profiles.append(profile)
122 121 return profiles
123 122
124 123
125 124 class ProfileLocate(BaseIPythonApplication):
126 125 description = """print the path to an IPython profile dir"""
127 126
128 127 def parse_command_line(self, argv=None):
129 128 super(ProfileLocate, self).parse_command_line(argv)
130 129 if self.extra_args:
131 130 self.profile = self.extra_args[0]
132 131
133 132 def start(self):
134 133 print(self.profile_dir.location)
135 134
136 135
137 136 class ProfileList(Application):
138 137 name = u'ipython-profile'
139 138 description = list_help
140 139 examples = _list_examples
141 140
142 141 aliases = Dict({
143 142 'ipython-dir' : 'ProfileList.ipython_dir',
144 143 'log-level' : 'Application.log_level',
145 144 })
146 145 flags = Dict(dict(
147 146 debug = ({'Application' : {'log_level' : 0}},
148 147 "Set Application.log_level to 0, maximizing log output."
149 148 )
150 149 ))
151 150
152 151 ipython_dir = Unicode(get_ipython_dir(),
153 152 help="""
154 153 The name of the IPython directory. This directory is used for logging
155 154 configuration (through profiles), history storage, etc. The default
156 155 is usually $HOME/.ipython. This options can also be specified through
157 156 the environment variable IPYTHONDIR.
158 157 """
159 158 ).tag(config=True)
160 159
161 160
162 161 def _print_profiles(self, profiles):
163 162 """print list of profiles, indented."""
164 163 for profile in profiles:
165 164 print(' %s' % profile)
166 165
167 166 def list_profile_dirs(self):
168 167 profiles = list_bundled_profiles()
169 168 if profiles:
170 169 print()
171 170 print("Available profiles in IPython:")
172 171 self._print_profiles(profiles)
173 172 print()
174 173 print(" The first request for a bundled profile will copy it")
175 174 print(" into your IPython directory (%s)," % self.ipython_dir)
176 175 print(" where you can customize it.")
177 176
178 177 profiles = list_profiles_in(self.ipython_dir)
179 178 if profiles:
180 179 print()
181 180 print("Available profiles in %s:" % self.ipython_dir)
182 181 self._print_profiles(profiles)
183 182
184 183 profiles = list_profiles_in(py3compat.getcwd())
185 184 if profiles:
186 185 print()
187 186 print("Available profiles in current directory (%s):" % py3compat.getcwd())
188 187 self._print_profiles(profiles)
189 188
190 189 print()
191 190 print("To use any of the above profiles, start IPython with:")
192 191 print(" ipython --profile=<name>")
193 192 print()
194 193
195 194 def start(self):
196 195 self.list_profile_dirs()
197 196
198 197
199 198 create_flags = {}
200 199 create_flags.update(base_flags)
201 200 # don't include '--init' flag, which implies running profile create in other apps
202 201 create_flags.pop('init')
203 202 create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
204 203 "reset config files in this profile to the defaults.")
205 204 create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
206 205 "Include the config files for parallel "
207 206 "computing apps (ipengine, ipcontroller, etc.)")
208 207
209 208
210 209 class ProfileCreate(BaseIPythonApplication):
211 210 name = u'ipython-profile'
212 211 description = create_help
213 212 examples = _create_examples
214 213 auto_create = Bool(True)
215 214 def _log_format_default(self):
216 215 return "[%(name)s] %(message)s"
217 216
218 217 def _copy_config_files_default(self):
219 218 return True
220 219
221 220 parallel = Bool(False,
222 221 help="whether to include parallel computing config files"
223 222 ).tag(config=True)
224 223
225 224 @observe('parallel')
226 225 def _parallel_changed(self, change):
227 226 parallel_files = [ 'ipcontroller_config.py',
228 227 'ipengine_config.py',
229 228 'ipcluster_config.py'
230 229 ]
231 230 if change['new']:
232 231 for cf in parallel_files:
233 232 self.config_files.append(cf)
234 233 else:
235 234 for cf in parallel_files:
236 235 if cf in self.config_files:
237 236 self.config_files.remove(cf)
238 237
239 238 def parse_command_line(self, argv):
240 239 super(ProfileCreate, self).parse_command_line(argv)
241 240 # accept positional arg as profile name
242 241 if self.extra_args:
243 242 self.profile = self.extra_args[0]
244 243
245 244 flags = Dict(create_flags)
246 245
247 246 classes = [ProfileDir]
248 247
249 248 def _import_app(self, app_path):
250 249 """import an app class"""
251 250 app = None
252 251 name = app_path.rsplit('.', 1)[-1]
253 252 try:
254 253 app = import_item(app_path)
255 254 except ImportError:
256 255 self.log.info("Couldn't import %s, config file will be excluded", name)
257 256 except Exception:
258 257 self.log.warning('Unexpected error importing %s', name, exc_info=True)
259 258 return app
260 259
261 260 def init_config_files(self):
262 261 super(ProfileCreate, self).init_config_files()
263 262 # use local imports, since these classes may import from here
264 263 from IPython.terminal.ipapp import TerminalIPythonApp
265 264 apps = [TerminalIPythonApp]
266 265 for app_path in (
267 266 'ipykernel.kernelapp.IPKernelApp',
268 267 ):
269 268 app = self._import_app(app_path)
270 269 if app is not None:
271 270 apps.append(app)
272 271 if self.parallel:
273 272 from ipyparallel.apps.ipcontrollerapp import IPControllerApp
274 273 from ipyparallel.apps.ipengineapp import IPEngineApp
275 274 from ipyparallel.apps.ipclusterapp import IPClusterStart
276 275 apps.extend([
277 276 IPControllerApp,
278 277 IPEngineApp,
279 278 IPClusterStart,
280 279 ])
281 280 for App in apps:
282 281 app = App()
283 282 app.config.update(self.config)
284 283 app.log = self.log
285 284 app.overwrite = self.overwrite
286 285 app.copy_config_files=True
287 286 app.ipython_dir=self.ipython_dir
288 287 app.profile_dir=self.profile_dir
289 288 app.init_config_files()
290 289
291 290 def stage_default_config_file(self):
292 291 pass
293 292
294 293
295 294 class ProfileApp(Application):
296 295 name = u'ipython profile'
297 296 description = profile_help
298 297 examples = _main_examples
299 298
300 299 subcommands = Dict(dict(
301 300 create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
302 301 list = (ProfileList, ProfileList.description.splitlines()[0]),
303 302 locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
304 303 ))
305 304
306 305 def start(self):
307 306 if self.subapp is None:
308 307 print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
309 308 print()
310 309 self.print_description()
311 310 self.print_subcommands()
312 311 self.exit(1)
313 312 else:
314 313 return self.subapp.start()
@@ -1,409 +1,408 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Pylab (matplotlib) support utilities."""
3 from __future__ import print_function
4 3
5 4 # Copyright (c) IPython Development Team.
6 5 # Distributed under the terms of the Modified BSD License.
7 6
8 7 from io import BytesIO
9 8
10 9 from IPython.core.display import _pngxy
11 10 from IPython.utils.decorators import flag_calls
12 11 from IPython.utils import py3compat
13 12
14 13 # If user specifies a GUI, that dictates the backend, otherwise we read the
15 14 # user's mpl default from the mpl rc structure
16 15 backends = {'tk': 'TkAgg',
17 16 'gtk': 'GTKAgg',
18 17 'gtk3': 'GTK3Agg',
19 18 'wx': 'WXAgg',
20 19 'qt': 'Qt4Agg', # qt3 not supported
21 20 'qt4': 'Qt4Agg',
22 21 'qt5': 'Qt5Agg',
23 22 'osx': 'MacOSX',
24 23 'nbagg': 'nbAgg',
25 24 'notebook': 'nbAgg',
26 25 'agg': 'agg',
27 26 'inline' : 'module://ipykernel.pylab.backend_inline'}
28 27
29 28 # We also need a reverse backends2guis mapping that will properly choose which
30 29 # GUI support to activate based on the desired matplotlib backend. For the
31 30 # most part it's just a reverse of the above dict, but we also need to add a
32 31 # few others that map to the same GUI manually:
33 32 backend2gui = dict(zip(backends.values(), backends.keys()))
34 33 # Our tests expect backend2gui to just return 'qt'
35 34 backend2gui['Qt4Agg'] = 'qt'
36 35 # In the reverse mapping, there are a few extra valid matplotlib backends that
37 36 # map to the same GUI support
38 37 backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
39 38 backend2gui['GTK3Cairo'] = 'gtk3'
40 39 backend2gui['WX'] = 'wx'
41 40 backend2gui['CocoaAgg'] = 'osx'
42 41 # And some backends that don't need GUI integration
43 42 del backend2gui['nbAgg']
44 43 del backend2gui['agg']
45 44 del backend2gui['module://ipykernel.pylab.backend_inline']
46 45
47 46 #-----------------------------------------------------------------------------
48 47 # Matplotlib utilities
49 48 #-----------------------------------------------------------------------------
50 49
51 50
52 51 def getfigs(*fig_nums):
53 52 """Get a list of matplotlib figures by figure numbers.
54 53
55 54 If no arguments are given, all available figures are returned. If the
56 55 argument list contains references to invalid figures, a warning is printed
57 56 but the function continues pasting further figures.
58 57
59 58 Parameters
60 59 ----------
61 60 figs : tuple
62 61 A tuple of ints giving the figure numbers of the figures to return.
63 62 """
64 63 from matplotlib._pylab_helpers import Gcf
65 64 if not fig_nums:
66 65 fig_managers = Gcf.get_all_fig_managers()
67 66 return [fm.canvas.figure for fm in fig_managers]
68 67 else:
69 68 figs = []
70 69 for num in fig_nums:
71 70 f = Gcf.figs.get(num)
72 71 if f is None:
73 72 print('Warning: figure %s not available.' % num)
74 73 else:
75 74 figs.append(f.canvas.figure)
76 75 return figs
77 76
78 77
79 78 def figsize(sizex, sizey):
80 79 """Set the default figure size to be [sizex, sizey].
81 80
82 81 This is just an easy to remember, convenience wrapper that sets::
83 82
84 83 matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
85 84 """
86 85 import matplotlib
87 86 matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
88 87
89 88
90 89 def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
91 90 """Print a figure to an image, and return the resulting file data
92 91
93 92 Returned data will be bytes unless ``fmt='svg'``,
94 93 in which case it will be unicode.
95 94
96 95 Any keyword args are passed to fig.canvas.print_figure,
97 96 such as ``quality`` or ``bbox_inches``.
98 97 """
99 98 from matplotlib import rcParams
100 99 # When there's an empty figure, we shouldn't return anything, otherwise we
101 100 # get big blank areas in the qt console.
102 101 if not fig.axes and not fig.lines:
103 102 return
104 103
105 104 dpi = fig.dpi
106 105 if fmt == 'retina':
107 106 dpi = dpi * 2
108 107 fmt = 'png'
109 108
110 109 # build keyword args
111 110 kw = dict(
112 111 format=fmt,
113 112 facecolor=fig.get_facecolor(),
114 113 edgecolor=fig.get_edgecolor(),
115 114 dpi=dpi,
116 115 bbox_inches=bbox_inches,
117 116 )
118 117 # **kwargs get higher priority
119 118 kw.update(kwargs)
120 119
121 120 bytes_io = BytesIO()
122 121 fig.canvas.print_figure(bytes_io, **kw)
123 122 data = bytes_io.getvalue()
124 123 if fmt == 'svg':
125 124 data = data.decode('utf-8')
126 125 return data
127 126
128 127 def retina_figure(fig, **kwargs):
129 128 """format a figure as a pixel-doubled (retina) PNG"""
130 129 pngdata = print_figure(fig, fmt='retina', **kwargs)
131 130 # Make sure that retina_figure acts just like print_figure and returns
132 131 # None when the figure is empty.
133 132 if pngdata is None:
134 133 return
135 134 w, h = _pngxy(pngdata)
136 135 metadata = dict(width=w//2, height=h//2)
137 136 return pngdata, metadata
138 137
139 138 # We need a little factory function here to create the closure where
140 139 # safe_execfile can live.
141 140 def mpl_runner(safe_execfile):
142 141 """Factory to return a matplotlib-enabled runner for %run.
143 142
144 143 Parameters
145 144 ----------
146 145 safe_execfile : function
147 146 This must be a function with the same interface as the
148 147 :meth:`safe_execfile` method of IPython.
149 148
150 149 Returns
151 150 -------
152 151 A function suitable for use as the ``runner`` argument of the %run magic
153 152 function.
154 153 """
155 154
156 155 def mpl_execfile(fname,*where,**kw):
157 156 """matplotlib-aware wrapper around safe_execfile.
158 157
159 158 Its interface is identical to that of the :func:`execfile` builtin.
160 159
161 160 This is ultimately a call to execfile(), but wrapped in safeties to
162 161 properly handle interactive rendering."""
163 162
164 163 import matplotlib
165 164 import matplotlib.pyplot as plt
166 165
167 166 #print '*** Matplotlib runner ***' # dbg
168 167 # turn off rendering until end of script
169 168 is_interactive = matplotlib.rcParams['interactive']
170 169 matplotlib.interactive(False)
171 170 safe_execfile(fname,*where,**kw)
172 171 matplotlib.interactive(is_interactive)
173 172 # make rendering call now, if the user tried to do it
174 173 if plt.draw_if_interactive.called:
175 174 plt.draw()
176 175 plt.draw_if_interactive.called = False
177 176
178 177 # re-draw everything that is stale
179 178 try:
180 179 da = plt.draw_all
181 180 except AttributeError:
182 181 pass
183 182 else:
184 183 da()
185 184
186 185 return mpl_execfile
187 186
188 187
189 188 def _reshow_nbagg_figure(fig):
190 189 """reshow an nbagg figure"""
191 190 try:
192 191 reshow = fig.canvas.manager.reshow
193 192 except AttributeError:
194 193 raise NotImplementedError()
195 194 else:
196 195 reshow()
197 196
198 197
199 198 def select_figure_formats(shell, formats, **kwargs):
200 199 """Select figure formats for the inline backend.
201 200
202 201 Parameters
203 202 ==========
204 203 shell : InteractiveShell
205 204 The main IPython instance.
206 205 formats : str or set
207 206 One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
208 207 **kwargs : any
209 208 Extra keyword arguments to be passed to fig.canvas.print_figure.
210 209 """
211 210 import matplotlib
212 211 from matplotlib.figure import Figure
213 212
214 213 svg_formatter = shell.display_formatter.formatters['image/svg+xml']
215 214 png_formatter = shell.display_formatter.formatters['image/png']
216 215 jpg_formatter = shell.display_formatter.formatters['image/jpeg']
217 216 pdf_formatter = shell.display_formatter.formatters['application/pdf']
218 217
219 218 if isinstance(formats, py3compat.string_types):
220 219 formats = {formats}
221 220 # cast in case of list / tuple
222 221 formats = set(formats)
223 222
224 223 [ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
225 224
226 225 if matplotlib.get_backend().lower() == 'nbagg':
227 226 formatter = shell.display_formatter.ipython_display_formatter
228 227 formatter.for_type(Figure, _reshow_nbagg_figure)
229 228
230 229 supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
231 230 bad = formats.difference(supported)
232 231 if bad:
233 232 bs = "%s" % ','.join([repr(f) for f in bad])
234 233 gs = "%s" % ','.join([repr(f) for f in supported])
235 234 raise ValueError("supported formats are: %s not %s" % (gs, bs))
236 235
237 236 if 'png' in formats:
238 237 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
239 238 if 'retina' in formats or 'png2x' in formats:
240 239 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
241 240 if 'jpg' in formats or 'jpeg' in formats:
242 241 jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
243 242 if 'svg' in formats:
244 243 svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
245 244 if 'pdf' in formats:
246 245 pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
247 246
248 247 #-----------------------------------------------------------------------------
249 248 # Code for initializing matplotlib and importing pylab
250 249 #-----------------------------------------------------------------------------
251 250
252 251
253 252 def find_gui_and_backend(gui=None, gui_select=None):
254 253 """Given a gui string return the gui and mpl backend.
255 254
256 255 Parameters
257 256 ----------
258 257 gui : str
259 258 Can be one of ('tk','gtk','wx','qt','qt4','inline','agg').
260 259 gui_select : str
261 260 Can be one of ('tk','gtk','wx','qt','qt4','inline').
262 261 This is any gui already selected by the shell.
263 262
264 263 Returns
265 264 -------
266 265 A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
267 266 'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline','agg').
268 267 """
269 268
270 269 import matplotlib
271 270
272 271 if gui and gui != 'auto':
273 272 # select backend based on requested gui
274 273 backend = backends[gui]
275 274 if gui == 'agg':
276 275 gui = None
277 276 else:
278 277 # We need to read the backend from the original data structure, *not*
279 278 # from mpl.rcParams, since a prior invocation of %matplotlib may have
280 279 # overwritten that.
281 280 # WARNING: this assumes matplotlib 1.1 or newer!!
282 281 backend = matplotlib.rcParamsOrig['backend']
283 282 # In this case, we need to find what the appropriate gui selection call
284 283 # should be for IPython, so we can activate inputhook accordingly
285 284 gui = backend2gui.get(backend, None)
286 285
287 286 # If we have already had a gui active, we need it and inline are the
288 287 # ones allowed.
289 288 if gui_select and gui != gui_select:
290 289 gui = gui_select
291 290 backend = backends[gui]
292 291
293 292 return gui, backend
294 293
295 294
296 295 def activate_matplotlib(backend):
297 296 """Activate the given backend and set interactive to True."""
298 297
299 298 import matplotlib
300 299 matplotlib.interactive(True)
301 300
302 301 # Matplotlib had a bug where even switch_backend could not force
303 302 # the rcParam to update. This needs to be set *before* the module
304 303 # magic of switch_backend().
305 304 matplotlib.rcParams['backend'] = backend
306 305
307 306 import matplotlib.pyplot
308 307 matplotlib.pyplot.switch_backend(backend)
309 308
310 309 # This must be imported last in the matplotlib series, after
311 310 # backend/interactivity choices have been made
312 311 import matplotlib.pyplot as plt
313 312
314 313 plt.show._needmain = False
315 314 # We need to detect at runtime whether show() is called by the user.
316 315 # For this, we wrap it into a decorator which adds a 'called' flag.
317 316 plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
318 317
319 318
320 319 def import_pylab(user_ns, import_all=True):
321 320 """Populate the namespace with pylab-related values.
322 321
323 322 Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
324 323
325 324 Also imports a few names from IPython (figsize, display, getfigs)
326 325
327 326 """
328 327
329 328 # Import numpy as np/pyplot as plt are conventions we're trying to
330 329 # somewhat standardize on. Making them available to users by default
331 330 # will greatly help this.
332 331 s = ("import numpy\n"
333 332 "import matplotlib\n"
334 333 "from matplotlib import pylab, mlab, pyplot\n"
335 334 "np = numpy\n"
336 335 "plt = pyplot\n"
337 336 )
338 337 exec(s, user_ns)
339 338
340 339 if import_all:
341 340 s = ("from matplotlib.pylab import *\n"
342 341 "from numpy import *\n")
343 342 exec(s, user_ns)
344 343
345 344 # IPython symbols to add
346 345 user_ns['figsize'] = figsize
347 346 from IPython.core.display import display
348 347 # Add display and getfigs to the user's namespace
349 348 user_ns['display'] = display
350 349 user_ns['getfigs'] = getfigs
351 350
352 351
353 352 def configure_inline_support(shell, backend):
354 353 """Configure an IPython shell object for matplotlib use.
355 354
356 355 Parameters
357 356 ----------
358 357 shell : InteractiveShell instance
359 358
360 359 backend : matplotlib backend
361 360 """
362 361 # If using our svg payload backend, register the post-execution
363 362 # function that will pick up the results for display. This can only be
364 363 # done with access to the real shell object.
365 364
366 365 # Note: if we can't load the inline backend, then there's no point
367 366 # continuing (such as in terminal-only shells in environments without
368 367 # zeromq available).
369 368 try:
370 369 from ipykernel.pylab.backend_inline import InlineBackend
371 370 except ImportError:
372 371 return
373 372 import matplotlib
374 373
375 374 cfg = InlineBackend.instance(parent=shell)
376 375 cfg.shell = shell
377 376 if cfg not in shell.configurables:
378 377 shell.configurables.append(cfg)
379 378
380 379 if backend == backends['inline']:
381 380 from ipykernel.pylab.backend_inline import flush_figures
382 381 shell.events.register('post_execute', flush_figures)
383 382
384 383 # Save rcParams that will be overwrittern
385 384 shell._saved_rcParams = dict()
386 385 for k in cfg.rc:
387 386 shell._saved_rcParams[k] = matplotlib.rcParams[k]
388 387 # load inline_rc
389 388 matplotlib.rcParams.update(cfg.rc)
390 389 new_backend_name = "inline"
391 390 else:
392 391 from ipykernel.pylab.backend_inline import flush_figures
393 392 try:
394 393 shell.events.unregister('post_execute', flush_figures)
395 394 except ValueError:
396 395 pass
397 396 if hasattr(shell, '_saved_rcParams'):
398 397 matplotlib.rcParams.update(shell._saved_rcParams)
399 398 del shell._saved_rcParams
400 399 new_backend_name = "other"
401 400
402 401 # only enable the formats once -> don't change the enabled formats (which the user may
403 402 # has changed) when getting another "%matplotlib inline" call.
404 403 # See https://github.com/ipython/ipykernel/issues/29
405 404 cur_backend = getattr(configure_inline_support, "current_backend", "unset")
406 405 if new_backend_name != cur_backend:
407 406 # Setup the default figure format
408 407 select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
409 408 configure_inline_support.current_backend = new_backend_name
@@ -1,411 +1,408 b''
1 1 # encoding: utf-8
2 2 """
3 3 A mixin for :class:`~IPython.core.application.Application` classes that
4 4 launch InteractiveShell instances, load extensions, etc.
5 5 """
6 6
7 7 # Copyright (c) IPython Development Team.
8 8 # Distributed under the terms of the Modified BSD License.
9 9
10 from __future__ import absolute_import
11 from __future__ import print_function
12
13 10 import glob
14 11 import os
15 12 import sys
16 13
17 14 from traitlets.config.application import boolean_flag
18 15 from traitlets.config.configurable import Configurable
19 16 from traitlets.config.loader import Config
20 17 from IPython.core import pylabtools
21 18 from IPython.utils import py3compat
22 19 from IPython.utils.contexts import preserve_keys
23 20 from IPython.utils.path import filefind
24 21 from traitlets import (
25 22 Unicode, Instance, List, Bool, CaselessStrEnum, observe,
26 23 )
27 24 from IPython.terminal import pt_inputhooks
28 25
29 26 #-----------------------------------------------------------------------------
30 27 # Aliases and Flags
31 28 #-----------------------------------------------------------------------------
32 29
33 30 gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
34 31
35 32 backend_keys = sorted(pylabtools.backends.keys())
36 33 backend_keys.insert(0, 'auto')
37 34
38 35 shell_flags = {}
39 36
40 37 addflag = lambda *args: shell_flags.update(boolean_flag(*args))
41 38 addflag('autoindent', 'InteractiveShell.autoindent',
42 39 'Turn on autoindenting.', 'Turn off autoindenting.'
43 40 )
44 41 addflag('automagic', 'InteractiveShell.automagic',
45 42 """Turn on the auto calling of magic commands. Type %%magic at the
46 43 IPython prompt for more information.""",
47 44 'Turn off the auto calling of magic commands.'
48 45 )
49 46 addflag('pdb', 'InteractiveShell.pdb',
50 47 "Enable auto calling the pdb debugger after every exception.",
51 48 "Disable auto calling the pdb debugger after every exception."
52 49 )
53 50 addflag('pprint', 'PlainTextFormatter.pprint',
54 51 "Enable auto pretty printing of results.",
55 52 "Disable auto pretty printing of results."
56 53 )
57 54 addflag('color-info', 'InteractiveShell.color_info',
58 55 """IPython can display information about objects via a set of functions,
59 56 and optionally can use colors for this, syntax highlighting
60 57 source code and various other elements. This is on by default, but can cause
61 58 problems with some pagers. If you see such problems, you can disable the
62 59 colours.""",
63 60 "Disable using colors for info related things."
64 61 )
65 62 nosep_config = Config()
66 63 nosep_config.InteractiveShell.separate_in = ''
67 64 nosep_config.InteractiveShell.separate_out = ''
68 65 nosep_config.InteractiveShell.separate_out2 = ''
69 66
70 67 shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
71 68 shell_flags['pylab'] = (
72 69 {'InteractiveShellApp' : {'pylab' : 'auto'}},
73 70 """Pre-load matplotlib and numpy for interactive use with
74 71 the default matplotlib backend."""
75 72 )
76 73 shell_flags['matplotlib'] = (
77 74 {'InteractiveShellApp' : {'matplotlib' : 'auto'}},
78 75 """Configure matplotlib for interactive use with
79 76 the default matplotlib backend."""
80 77 )
81 78
82 79 # it's possible we don't want short aliases for *all* of these:
83 80 shell_aliases = dict(
84 81 autocall='InteractiveShell.autocall',
85 82 colors='InteractiveShell.colors',
86 83 logfile='InteractiveShell.logfile',
87 84 logappend='InteractiveShell.logappend',
88 85 c='InteractiveShellApp.code_to_run',
89 86 m='InteractiveShellApp.module_to_run',
90 87 ext='InteractiveShellApp.extra_extension',
91 88 gui='InteractiveShellApp.gui',
92 89 pylab='InteractiveShellApp.pylab',
93 90 matplotlib='InteractiveShellApp.matplotlib',
94 91 )
95 92 shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
96 93
97 94 #-----------------------------------------------------------------------------
98 95 # Main classes and functions
99 96 #-----------------------------------------------------------------------------
100 97
101 98 class InteractiveShellApp(Configurable):
102 99 """A Mixin for applications that start InteractiveShell instances.
103 100
104 101 Provides configurables for loading extensions and executing files
105 102 as part of configuring a Shell environment.
106 103
107 104 The following methods should be called by the :meth:`initialize` method
108 105 of the subclass:
109 106
110 107 - :meth:`init_path`
111 108 - :meth:`init_shell` (to be implemented by the subclass)
112 109 - :meth:`init_gui_pylab`
113 110 - :meth:`init_extensions`
114 111 - :meth:`init_code`
115 112 """
116 113 extensions = List(Unicode(),
117 114 help="A list of dotted module names of IPython extensions to load."
118 115 ).tag(config=True)
119 116 extra_extension = Unicode('',
120 117 help="dotted module name of an IPython extension to load."
121 118 ).tag(config=True)
122 119
123 120 reraise_ipython_extension_failures = Bool(False,
124 121 help="Reraise exceptions encountered loading IPython extensions?",
125 122 ).tag(config=True)
126 123
127 124 # Extensions that are always loaded (not configurable)
128 125 default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
129 126
130 127 hide_initial_ns = Bool(True,
131 128 help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
132 129 be hidden from tools like %who?"""
133 130 ).tag(config=True)
134 131
135 132 exec_files = List(Unicode(),
136 133 help="""List of files to run at IPython startup."""
137 134 ).tag(config=True)
138 135 exec_PYTHONSTARTUP = Bool(True,
139 136 help="""Run the file referenced by the PYTHONSTARTUP environment
140 137 variable at IPython startup."""
141 138 ).tag(config=True)
142 139 file_to_run = Unicode('',
143 140 help="""A file to be run""").tag(config=True)
144 141
145 142 exec_lines = List(Unicode(),
146 143 help="""lines of code to run at IPython startup."""
147 144 ).tag(config=True)
148 145 code_to_run = Unicode('',
149 146 help="Execute the given command string."
150 147 ).tag(config=True)
151 148 module_to_run = Unicode('',
152 149 help="Run the module as a script."
153 150 ).tag(config=True)
154 151 gui = CaselessStrEnum(gui_keys, allow_none=True,
155 152 help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
156 153 ).tag(config=True)
157 154 matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
158 155 help="""Configure matplotlib for interactive use with
159 156 the default matplotlib backend."""
160 157 ).tag(config=True)
161 158 pylab = CaselessStrEnum(backend_keys, allow_none=True,
162 159 help="""Pre-load matplotlib and numpy for interactive use,
163 160 selecting a particular matplotlib backend and loop integration.
164 161 """
165 162 ).tag(config=True)
166 163 pylab_import_all = Bool(True,
167 164 help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
168 165 and an ``import *`` is done from numpy and pylab, when using pylab mode.
169 166
170 167 When False, pylab mode should not import any names into the user namespace.
171 168 """
172 169 ).tag(config=True)
173 170 shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
174 171 allow_none=True)
175 172 # whether interact-loop should start
176 173 interact = Bool(True)
177 174
178 175 user_ns = Instance(dict, args=None, allow_none=True)
179 176 @observe('user_ns')
180 177 def _user_ns_changed(self, change):
181 178 if self.shell is not None:
182 179 self.shell.user_ns = change['new']
183 180 self.shell.init_user_ns()
184 181
185 182 def init_path(self):
186 183 """Add current working directory, '', to sys.path"""
187 184 if sys.path[0] != '':
188 185 sys.path.insert(0, '')
189 186
190 187 def init_shell(self):
191 188 raise NotImplementedError("Override in subclasses")
192 189
193 190 def init_gui_pylab(self):
194 191 """Enable GUI event loop integration, taking pylab into account."""
195 192 enable = False
196 193 shell = self.shell
197 194 if self.pylab:
198 195 enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
199 196 key = self.pylab
200 197 elif self.matplotlib:
201 198 enable = shell.enable_matplotlib
202 199 key = self.matplotlib
203 200 elif self.gui:
204 201 enable = shell.enable_gui
205 202 key = self.gui
206 203
207 204 if not enable:
208 205 return
209 206
210 207 try:
211 208 r = enable(key)
212 209 except ImportError:
213 210 self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
214 211 self.shell.showtraceback()
215 212 return
216 213 except Exception:
217 214 self.log.warning("GUI event loop or pylab initialization failed")
218 215 self.shell.showtraceback()
219 216 return
220 217
221 218 if isinstance(r, tuple):
222 219 gui, backend = r[:2]
223 220 self.log.info("Enabling GUI event loop integration, "
224 221 "eventloop=%s, matplotlib=%s", gui, backend)
225 222 if key == "auto":
226 223 print("Using matplotlib backend: %s" % backend)
227 224 else:
228 225 gui = r
229 226 self.log.info("Enabling GUI event loop integration, "
230 227 "eventloop=%s", gui)
231 228
232 229 def init_extensions(self):
233 230 """Load all IPython extensions in IPythonApp.extensions.
234 231
235 232 This uses the :meth:`ExtensionManager.load_extensions` to load all
236 233 the extensions listed in ``self.extensions``.
237 234 """
238 235 try:
239 236 self.log.debug("Loading IPython extensions...")
240 237 extensions = self.default_extensions + self.extensions
241 238 if self.extra_extension:
242 239 extensions.append(self.extra_extension)
243 240 for ext in extensions:
244 241 try:
245 242 self.log.info("Loading IPython extension: %s" % ext)
246 243 self.shell.extension_manager.load_extension(ext)
247 244 except:
248 245 if self.reraise_ipython_extension_failures:
249 246 raise
250 247 msg = ("Error in loading extension: {ext}\n"
251 248 "Check your config files in {location}".format(
252 249 ext=ext,
253 250 location=self.profile_dir.location
254 251 ))
255 252 self.log.warning(msg, exc_info=True)
256 253 except:
257 254 if self.reraise_ipython_extension_failures:
258 255 raise
259 256 self.log.warning("Unknown error in loading extensions:", exc_info=True)
260 257
261 258 def init_code(self):
262 259 """run the pre-flight code, specified via exec_lines"""
263 260 self._run_startup_files()
264 261 self._run_exec_lines()
265 262 self._run_exec_files()
266 263
267 264 # Hide variables defined here from %who etc.
268 265 if self.hide_initial_ns:
269 266 self.shell.user_ns_hidden.update(self.shell.user_ns)
270 267
271 268 # command-line execution (ipython -i script.py, ipython -m module)
272 269 # should *not* be excluded from %whos
273 270 self._run_cmd_line_code()
274 271 self._run_module()
275 272
276 273 # flush output, so itwon't be attached to the first cell
277 274 sys.stdout.flush()
278 275 sys.stderr.flush()
279 276
280 277 def _run_exec_lines(self):
281 278 """Run lines of code in IPythonApp.exec_lines in the user's namespace."""
282 279 if not self.exec_lines:
283 280 return
284 281 try:
285 282 self.log.debug("Running code from IPythonApp.exec_lines...")
286 283 for line in self.exec_lines:
287 284 try:
288 285 self.log.info("Running code in user namespace: %s" %
289 286 line)
290 287 self.shell.run_cell(line, store_history=False)
291 288 except:
292 289 self.log.warning("Error in executing line in user "
293 290 "namespace: %s" % line)
294 291 self.shell.showtraceback()
295 292 except:
296 293 self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
297 294 self.shell.showtraceback()
298 295
299 296 def _exec_file(self, fname, shell_futures=False):
300 297 try:
301 298 full_filename = filefind(fname, [u'.', self.ipython_dir])
302 299 except IOError:
303 300 self.log.warning("File not found: %r"%fname)
304 301 return
305 302 # Make sure that the running script gets a proper sys.argv as if it
306 303 # were run from a system shell.
307 304 save_argv = sys.argv
308 305 sys.argv = [full_filename] + self.extra_args[1:]
309 306 # protect sys.argv from potential unicode strings on Python 2:
310 307 if not py3compat.PY3:
311 308 sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
312 309 try:
313 310 if os.path.isfile(full_filename):
314 311 self.log.info("Running file in user namespace: %s" %
315 312 full_filename)
316 313 # Ensure that __file__ is always defined to match Python
317 314 # behavior.
318 315 with preserve_keys(self.shell.user_ns, '__file__'):
319 316 self.shell.user_ns['__file__'] = fname
320 317 if full_filename.endswith('.ipy'):
321 318 self.shell.safe_execfile_ipy(full_filename,
322 319 shell_futures=shell_futures)
323 320 else:
324 321 # default to python, even without extension
325 322 self.shell.safe_execfile(full_filename,
326 323 self.shell.user_ns,
327 324 shell_futures=shell_futures,
328 325 raise_exceptions=True)
329 326 finally:
330 327 sys.argv = save_argv
331 328
332 329 def _run_startup_files(self):
333 330 """Run files from profile startup directory"""
334 331 startup_dir = self.profile_dir.startup_dir
335 332 startup_files = []
336 333
337 334 if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
338 335 not (self.file_to_run or self.code_to_run or self.module_to_run):
339 336 python_startup = os.environ['PYTHONSTARTUP']
340 337 self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
341 338 try:
342 339 self._exec_file(python_startup)
343 340 except:
344 341 self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
345 342 self.shell.showtraceback()
346 343
347 344 startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
348 345 startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
349 346 if not startup_files:
350 347 return
351 348
352 349 self.log.debug("Running startup files from %s...", startup_dir)
353 350 try:
354 351 for fname in sorted(startup_files):
355 352 self._exec_file(fname)
356 353 except:
357 354 self.log.warning("Unknown error in handling startup files:")
358 355 self.shell.showtraceback()
359 356
360 357 def _run_exec_files(self):
361 358 """Run files from IPythonApp.exec_files"""
362 359 if not self.exec_files:
363 360 return
364 361
365 362 self.log.debug("Running files in IPythonApp.exec_files...")
366 363 try:
367 364 for fname in self.exec_files:
368 365 self._exec_file(fname)
369 366 except:
370 367 self.log.warning("Unknown error in handling IPythonApp.exec_files:")
371 368 self.shell.showtraceback()
372 369
373 370 def _run_cmd_line_code(self):
374 371 """Run code or file specified at the command-line"""
375 372 if self.code_to_run:
376 373 line = self.code_to_run
377 374 try:
378 375 self.log.info("Running code given at command line (c=): %s" %
379 376 line)
380 377 self.shell.run_cell(line, store_history=False)
381 378 except:
382 379 self.log.warning("Error in executing line in user namespace: %s" %
383 380 line)
384 381 self.shell.showtraceback()
385 382 if not self.interact:
386 383 self.exit(1)
387 384
388 385 # Like Python itself, ignore the second if the first of these is present
389 386 elif self.file_to_run:
390 387 fname = self.file_to_run
391 388 if os.path.isdir(fname):
392 389 fname = os.path.join(fname, "__main__.py")
393 390 try:
394 391 self._exec_file(fname, shell_futures=True)
395 392 except:
396 393 self.shell.showtraceback(tb_offset=4)
397 394 if not self.interact:
398 395 self.exit(1)
399 396
400 397 def _run_module(self):
401 398 """Run module specified at the command-line."""
402 399 if self.module_to_run:
403 400 # Make sure that the module gets a proper sys.argv as if it were
404 401 # run using `python -m`.
405 402 save_argv = sys.argv
406 403 sys.argv = [sys.executable] + self.extra_args
407 404 try:
408 405 self.shell.safe_run_module(self.module_to_run,
409 406 self.shell.user_ns)
410 407 finally:
411 408 sys.argv = save_argv
@@ -1,5 +1,4 b''
1 1 # coding: iso-8859-5
2 2 # (Unlikely to be the default encoding for most testers.)
3 3 # ������������������� <- Cyrillic characters
4 from __future__ import unicode_literals
5 4 u = '����'
@@ -1,3 +1,2 b''
1 from __future__ import print_function
2 1 import sys
3 2 print(sys.argv[1:])
@@ -1,48 +1,47 b''
1 1 """Minimal script to reproduce our nasty reference counting bug.
2 2
3 3 The problem is related to https://github.com/ipython/ipython/issues/141
4 4
5 5 The original fix for that appeared to work, but John D. Hunter found a
6 6 matplotlib example which, when run twice in a row, would break. The problem
7 7 were references held by open figures to internals of Tkinter.
8 8
9 9 This code reproduces the problem that John saw, without matplotlib.
10 10
11 11 This script is meant to be called by other parts of the test suite that call it
12 12 via %run as if it were executed interactively by the user. As of 2011-05-29,
13 13 test_run.py calls it.
14 14 """
15 from __future__ import print_function
16 15
17 16 #-----------------------------------------------------------------------------
18 17 # Module imports
19 18 #-----------------------------------------------------------------------------
20 19 import sys
21 20
22 21 from IPython import get_ipython
23 22
24 23 #-----------------------------------------------------------------------------
25 24 # Globals
26 25 #-----------------------------------------------------------------------------
27 26
28 27 # This needs to be here because nose and other test runners will import
29 28 # this module. Importing this module has potential side effects that we
30 29 # want to prevent.
31 30 if __name__ == '__main__':
32 31
33 32 ip = get_ipython()
34 33
35 34 if not '_refbug_cache' in ip.user_ns:
36 35 ip.user_ns['_refbug_cache'] = []
37 36
38 37
39 38 aglobal = 'Hello'
40 39 def f():
41 40 return aglobal
42 41
43 42 cache = ip.user_ns['_refbug_cache']
44 43 cache.append(f)
45 44
46 45 def call_f():
47 46 for func in cache:
48 47 print('lowercased:',func().lower())
@@ -1,35 +1,34 b''
1 1 """Simple script to be run *twice*, to check reference counting bugs.
2 2
3 3 See test_run for details."""
4 4
5 from __future__ import print_function
6 5
7 6 import sys
8 7
9 8 # We want to ensure that while objects remain available for immediate access,
10 9 # objects from *previous* runs of the same script get collected, to avoid
11 10 # accumulating massive amounts of old references.
12 11 class C(object):
13 12 def __init__(self,name):
14 13 self.name = name
15 14 self.p = print
16 15 self.flush_stdout = sys.stdout.flush
17 16
18 17 def __del__(self):
19 18 self.p('tclass.py: deleting object:',self.name)
20 19 self.flush_stdout()
21 20
22 21 try:
23 22 name = sys.argv[1]
24 23 except IndexError:
25 24 pass
26 25 else:
27 26 if name.startswith('C'):
28 27 c = C(name)
29 28
30 29 #print >> sys.stderr, "ARGV:", sys.argv # dbg
31 30
32 31 # This next print statement is NOT debugging, we're making the check on a
33 32 # completely separate process so we verify by capturing stdout:
34 33 print('ARGV 1-:', sys.argv[1:])
35 34 sys.stdout.flush()
@@ -1,75 +1,74 b''
1 1 # coding: utf-8
2 2 """Tests for the compilerop module.
3 3 """
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2010-2011 The IPython Development Team.
6 6 #
7 7 # Distributed under the terms of the BSD License.
8 8 #
9 9 # The full license is in the file COPYING.txt, distributed with this software.
10 10 #-----------------------------------------------------------------------------
11 11
12 12 #-----------------------------------------------------------------------------
13 13 # Imports
14 14 #-----------------------------------------------------------------------------
15 from __future__ import print_function
16 15
17 16 # Stdlib imports
18 17 import linecache
19 18 import sys
20 19
21 20 # Third-party imports
22 21 import nose.tools as nt
23 22
24 23 # Our own imports
25 24 from IPython.core import compilerop
26 25 from IPython.utils import py3compat
27 26
28 27 #-----------------------------------------------------------------------------
29 28 # Test functions
30 29 #-----------------------------------------------------------------------------
31 30
32 31 def test_code_name():
33 32 code = 'x=1'
34 33 name = compilerop.code_name(code)
35 34 nt.assert_true(name.startswith('<ipython-input-0'))
36 35
37 36
38 37 def test_code_name2():
39 38 code = 'x=1'
40 39 name = compilerop.code_name(code, 9)
41 40 nt.assert_true(name.startswith('<ipython-input-9'))
42 41
43 42
44 43 def test_cache():
45 44 """Test the compiler correctly compiles and caches inputs
46 45 """
47 46 cp = compilerop.CachingCompiler()
48 47 ncache = len(linecache.cache)
49 48 cp.cache('x=1')
50 49 nt.assert_true(len(linecache.cache) > ncache)
51 50
52 51 def setUp():
53 52 # Check we're in a proper Python 2 environment (some imports, such
54 53 # as GTK, can change the default encoding, which can hide bugs.)
55 54 nt.assert_equal(sys.getdefaultencoding(), "utf-8" if py3compat.PY3 else "ascii")
56 55
57 56 def test_cache_unicode():
58 57 cp = compilerop.CachingCompiler()
59 58 ncache = len(linecache.cache)
60 59 cp.cache(u"t = 'žćčšđ'")
61 60 nt.assert_true(len(linecache.cache) > ncache)
62 61
63 62 def test_compiler_check_cache():
64 63 """Test the compiler properly manages the cache.
65 64 """
66 65 # Rather simple-minded tests that just exercise the API
67 66 cp = compilerop.CachingCompiler()
68 67 cp.cache('x=1', 99)
69 68 # Ensure now that after clearing the cache, our entries survive
70 69 linecache.checkcache()
71 70 for k in linecache.cache:
72 71 if k.startswith('<ipython-input-99'):
73 72 break
74 73 else:
75 74 raise AssertionError('Entry for input-99 missing from linecache')
@@ -1,806 +1,739 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import unittest
10 10
11 11 from contextlib import contextmanager
12 12
13 13 import nose.tools as nt
14 14
15 15 from traitlets.config.loader import Config
16 16 from IPython import get_ipython
17 17 from IPython.core import completer
18 18 from IPython.external.decorators import knownfailureif
19 19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 20 from IPython.utils.generics import complete_object
21 21 from IPython.utils.py3compat import string_types, unicode_type
22 22 from IPython.testing import decorators as dec
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Test functions
26 26 #-----------------------------------------------------------------------------
27 27
28 28 @contextmanager
29 29 def greedy_completion():
30 30 ip = get_ipython()
31 31 greedy_original = ip.Completer.greedy
32 32 try:
33 33 ip.Completer.greedy = True
34 34 yield
35 35 finally:
36 36 ip.Completer.greedy = greedy_original
37 37
38 38 def test_protect_filename():
39 39 if sys.platform == 'win32':
40 40 pairs = [('abc','abc'),
41 41 (' abc','" abc"'),
42 42 ('a bc','"a bc"'),
43 43 ('a bc','"a bc"'),
44 44 (' bc','" bc"'),
45 45 ]
46 46 else:
47 47 pairs = [('abc','abc'),
48 48 (' abc',r'\ abc'),
49 49 ('a bc',r'a\ bc'),
50 50 ('a bc',r'a\ \ bc'),
51 51 (' bc',r'\ \ bc'),
52 52 # On posix, we also protect parens and other special characters.
53 53 ('a(bc',r'a\(bc'),
54 54 ('a)bc',r'a\)bc'),
55 55 ('a( )bc',r'a\(\ \)bc'),
56 56 ('a[1]bc', r'a\[1\]bc'),
57 57 ('a{1}bc', r'a\{1\}bc'),
58 58 ('a#bc', r'a\#bc'),
59 59 ('a?bc', r'a\?bc'),
60 60 ('a=bc', r'a\=bc'),
61 61 ('a\\bc', r'a\\bc'),
62 62 ('a|bc', r'a\|bc'),
63 63 ('a;bc', r'a\;bc'),
64 64 ('a:bc', r'a\:bc'),
65 65 ("a'bc", r"a\'bc"),
66 66 ('a*bc', r'a\*bc'),
67 67 ('a"bc', r'a\"bc'),
68 68 ('a^bc', r'a\^bc'),
69 69 ('a&bc', r'a\&bc'),
70 70 ]
71 71 # run the actual tests
72 72 for s1, s2 in pairs:
73 73 s1p = completer.protect_filename(s1)
74 74 nt.assert_equal(s1p, s2)
75 75
76 76
77 77 def check_line_split(splitter, test_specs):
78 78 for part1, part2, split in test_specs:
79 79 cursor_pos = len(part1)
80 80 line = part1+part2
81 81 out = splitter.split_line(line, cursor_pos)
82 82 nt.assert_equal(out, split)
83 83
84 84
85 85 def test_line_split():
86 86 """Basic line splitter test with default specs."""
87 87 sp = completer.CompletionSplitter()
88 88 # The format of the test specs is: part1, part2, expected answer. Parts 1
89 89 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
90 90 # was at the end of part1. So an empty part2 represents someone hitting
91 91 # tab at the end of the line, the most common case.
92 92 t = [('run some/scrip', '', 'some/scrip'),
93 93 ('run scripts/er', 'ror.py foo', 'scripts/er'),
94 94 ('echo $HOM', '', 'HOM'),
95 95 ('print sys.pa', '', 'sys.pa'),
96 96 ('print(sys.pa', '', 'sys.pa'),
97 97 ("execfile('scripts/er", '', 'scripts/er'),
98 98 ('a[x.', '', 'x.'),
99 99 ('a[x.', 'y', 'x.'),
100 100 ('cd "some_file/', '', 'some_file/'),
101 101 ]
102 102 check_line_split(sp, t)
103 103 # Ensure splitting works OK with unicode by re-running the tests with
104 104 # all inputs turned into unicode
105 105 check_line_split(sp, [ map(unicode_type, p) for p in t] )
106 106
107 107
108 108 def test_custom_completion_error():
109 109 """Test that errors from custom attribute completers are silenced."""
110 110 ip = get_ipython()
111 111 class A(object): pass
112 112 ip.user_ns['a'] = A()
113 113
114 114 @complete_object.when_type(A)
115 115 def complete_A(a, existing_completions):
116 116 raise TypeError("this should be silenced")
117 117
118 118 ip.complete("a.")
119 119
120 120
121 121 def test_unicode_completions():
122 122 ip = get_ipython()
123 123 # Some strings that trigger different types of completion. Check them both
124 124 # in str and unicode forms
125 125 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
126 126 for t in s + list(map(unicode_type, s)):
127 127 # We don't need to check exact completion values (they may change
128 128 # depending on the state of the namespace, but at least no exceptions
129 129 # should be thrown and the return value should be a pair of text, list
130 130 # values.
131 131 text, matches = ip.complete(t)
132 132 nt.assert_true(isinstance(text, string_types))
133 133 nt.assert_true(isinstance(matches, list))
134 134
135 135 def test_latex_completions():
136 136 from IPython.core.latex_symbols import latex_symbols
137 137 import random
138 138 ip = get_ipython()
139 139 # Test some random unicode symbols
140 140 keys = random.sample(latex_symbols.keys(), 10)
141 141 for k in keys:
142 142 text, matches = ip.complete(k)
143 143 nt.assert_equal(len(matches),1)
144 144 nt.assert_equal(text, k)
145 145 nt.assert_equal(matches[0], latex_symbols[k])
146 146 # Test a more complex line
147 147 text, matches = ip.complete(u'print(\\alpha')
148 148 nt.assert_equals(text, u'\\alpha')
149 149 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
150 150 # Test multiple matching latex symbols
151 151 text, matches = ip.complete(u'\\al')
152 152 nt.assert_in('\\alpha', matches)
153 153 nt.assert_in('\\aleph', matches)
154 154
155 155
156 156
157 157
158 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
159 158 def test_back_latex_completion():
160 159 ip = get_ipython()
161 160
162 161 # do not return more than 1 matches fro \beta, only the latex one.
163 162 name, matches = ip.complete('\\β')
164 163 nt.assert_equal(len(matches), 1)
165 164 nt.assert_equal(matches[0], '\\beta')
166 165
167 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
168 166 def test_back_unicode_completion():
169 167 ip = get_ipython()
170 168
171 169 name, matches = ip.complete('\\Ⅴ')
172 170 nt.assert_equal(len(matches), 1)
173 171 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
174 172
175 173
176 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
177 174 def test_forward_unicode_completion():
178 175 ip = get_ipython()
179 176
180 177 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
181 178 nt.assert_equal(len(matches), 1)
182 179 nt.assert_equal(matches[0], 'Ⅴ')
183 180
184 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
185 181 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
186 182 def test_no_ascii_back_completion():
187 183 ip = get_ipython()
188 184 with TemporaryWorkingDirectory(): # Avoid any filename completions
189 185 # single ascii letter that don't have yet completions
190 186 for letter in 'jJ' :
191 187 name, matches = ip.complete('\\'+letter)
192 188 nt.assert_equal(matches, [])
193 189
194 190
195 191
196 192
197 193 class CompletionSplitterTestCase(unittest.TestCase):
198 194 def setUp(self):
199 195 self.sp = completer.CompletionSplitter()
200 196
201 197 def test_delim_setting(self):
202 198 self.sp.delims = ' '
203 199 nt.assert_equal(self.sp.delims, ' ')
204 200 nt.assert_equal(self.sp._delim_expr, '[\ ]')
205 201
206 202 def test_spaces(self):
207 203 """Test with only spaces as split chars."""
208 204 self.sp.delims = ' '
209 205 t = [('foo', '', 'foo'),
210 206 ('run foo', '', 'foo'),
211 207 ('run foo', 'bar', 'foo'),
212 208 ]
213 209 check_line_split(self.sp, t)
214 210
215 211
216 212 def test_has_open_quotes1():
217 213 for s in ["'", "'''", "'hi' '"]:
218 214 nt.assert_equal(completer.has_open_quotes(s), "'")
219 215
220 216
221 217 def test_has_open_quotes2():
222 218 for s in ['"', '"""', '"hi" "']:
223 219 nt.assert_equal(completer.has_open_quotes(s), '"')
224 220
225 221
226 222 def test_has_open_quotes3():
227 223 for s in ["''", "''' '''", "'hi' 'ipython'"]:
228 224 nt.assert_false(completer.has_open_quotes(s))
229 225
230 226
231 227 def test_has_open_quotes4():
232 228 for s in ['""', '""" """', '"hi" "ipython"']:
233 229 nt.assert_false(completer.has_open_quotes(s))
234 230
235 231
236 232 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
237 233 def test_abspath_file_completions():
238 234 ip = get_ipython()
239 235 with TemporaryDirectory() as tmpdir:
240 236 prefix = os.path.join(tmpdir, 'foo')
241 237 suffixes = ['1', '2']
242 238 names = [prefix+s for s in suffixes]
243 239 for n in names:
244 240 open(n, 'w').close()
245 241
246 242 # Check simple completion
247 243 c = ip.complete(prefix)[1]
248 244 nt.assert_equal(c, names)
249 245
250 246 # Now check with a function call
251 247 cmd = 'a = f("%s' % prefix
252 248 c = ip.complete(prefix, cmd)[1]
253 249 comp = [prefix+s for s in suffixes]
254 250 nt.assert_equal(c, comp)
255 251
256 252
257 253 def test_local_file_completions():
258 254 ip = get_ipython()
259 255 with TemporaryWorkingDirectory():
260 256 prefix = './foo'
261 257 suffixes = ['1', '2']
262 258 names = [prefix+s for s in suffixes]
263 259 for n in names:
264 260 open(n, 'w').close()
265 261
266 262 # Check simple completion
267 263 c = ip.complete(prefix)[1]
268 264 nt.assert_equal(c, names)
269 265
270 266 # Now check with a function call
271 267 cmd = 'a = f("%s' % prefix
272 268 c = ip.complete(prefix, cmd)[1]
273 269 comp = set(prefix+s for s in suffixes)
274 270 nt.assert_true(comp.issubset(set(c)))
275 271
276 272
277 273 def test_greedy_completions():
278 274 ip = get_ipython()
279 275 ip.ex('a=list(range(5))')
280 276 _,c = ip.complete('.',line='a[0].')
281 277 nt.assert_false('.real' in c,
282 278 "Shouldn't have completed on a[0]: %s"%c)
283 279 with greedy_completion():
284 280 def _(line, cursor_pos, expect, message):
285 281 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
286 282 nt.assert_in(expect, c, message%c)
287 283
288 284 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s"
289 285 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s"
290 286
291 287 if sys.version_info > (3,4):
292 288 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s"
293 289
294 290
295 291
296 292 def test_omit__names():
297 293 # also happens to test IPCompleter as a configurable
298 294 ip = get_ipython()
299 295 ip._hidden_attr = 1
300 296 ip._x = {}
301 297 c = ip.Completer
302 298 ip.ex('ip=get_ipython()')
303 299 cfg = Config()
304 300 cfg.IPCompleter.omit__names = 0
305 301 c.update_config(cfg)
306 302 s,matches = c.complete('ip.')
307 303 nt.assert_in('ip.__str__', matches)
308 304 nt.assert_in('ip._hidden_attr', matches)
309 305 cfg = Config()
310 306 cfg.IPCompleter.omit__names = 1
311 307 c.update_config(cfg)
312 308 s,matches = c.complete('ip.')
313 309 nt.assert_not_in('ip.__str__', matches)
314 310 nt.assert_in('ip._hidden_attr', matches)
315 311 cfg = Config()
316 312 cfg.IPCompleter.omit__names = 2
317 313 c.update_config(cfg)
318 314 s,matches = c.complete('ip.')
319 315 nt.assert_not_in('ip.__str__', matches)
320 316 nt.assert_not_in('ip._hidden_attr', matches)
321 317 s,matches = c.complete('ip._x.')
322 318 nt.assert_in('ip._x.keys', matches)
323 319 del ip._hidden_attr
324 320
325 321
326 322 def test_limit_to__all__False_ok():
327 323 ip = get_ipython()
328 324 c = ip.Completer
329 325 ip.ex('class D: x=24')
330 326 ip.ex('d=D()')
331 327 cfg = Config()
332 328 cfg.IPCompleter.limit_to__all__ = False
333 329 c.update_config(cfg)
334 330 s, matches = c.complete('d.')
335 331 nt.assert_in('d.x', matches)
336 332
337 333
338 334 def test_get__all__entries_ok():
339 335 class A(object):
340 336 __all__ = ['x', 1]
341 337 words = completer.get__all__entries(A())
342 338 nt.assert_equal(words, ['x'])
343 339
344 340
345 341 def test_get__all__entries_no__all__ok():
346 342 class A(object):
347 343 pass
348 344 words = completer.get__all__entries(A())
349 345 nt.assert_equal(words, [])
350 346
351 347
352 348 def test_func_kw_completions():
353 349 ip = get_ipython()
354 350 c = ip.Completer
355 351 ip.ex('def myfunc(a=1,b=2): return a+b')
356 352 s, matches = c.complete(None, 'myfunc(1,b')
357 353 nt.assert_in('b=', matches)
358 354 # Simulate completing with cursor right after b (pos==10):
359 355 s, matches = c.complete(None, 'myfunc(1,b)', 10)
360 356 nt.assert_in('b=', matches)
361 357 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
362 358 nt.assert_in('b=', matches)
363 359 #builtin function
364 360 s, matches = c.complete(None, 'min(k, k')
365 361 nt.assert_in('key=', matches)
366 362
367 363
368 364 def test_default_arguments_from_docstring():
369 365 ip = get_ipython()
370 366 c = ip.Completer
371 367 kwd = c._default_arguments_from_docstring(
372 368 'min(iterable[, key=func]) -> value')
373 369 nt.assert_equal(kwd, ['key'])
374 370 #with cython type etc
375 371 kwd = c._default_arguments_from_docstring(
376 372 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
377 373 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
378 374 #white spaces
379 375 kwd = c._default_arguments_from_docstring(
380 376 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
381 377 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
382 378
383 379 def test_line_magics():
384 380 ip = get_ipython()
385 381 c = ip.Completer
386 382 s, matches = c.complete(None, 'lsmag')
387 383 nt.assert_in('%lsmagic', matches)
388 384 s, matches = c.complete(None, '%lsmag')
389 385 nt.assert_in('%lsmagic', matches)
390 386
391 387
392 388 def test_cell_magics():
393 389 from IPython.core.magic import register_cell_magic
394 390
395 391 @register_cell_magic
396 392 def _foo_cellm(line, cell):
397 393 pass
398 394
399 395 ip = get_ipython()
400 396 c = ip.Completer
401 397
402 398 s, matches = c.complete(None, '_foo_ce')
403 399 nt.assert_in('%%_foo_cellm', matches)
404 400 s, matches = c.complete(None, '%%_foo_ce')
405 401 nt.assert_in('%%_foo_cellm', matches)
406 402
407 403
408 404 def test_line_cell_magics():
409 405 from IPython.core.magic import register_line_cell_magic
410 406
411 407 @register_line_cell_magic
412 408 def _bar_cellm(line, cell):
413 409 pass
414 410
415 411 ip = get_ipython()
416 412 c = ip.Completer
417 413
418 414 # The policy here is trickier, see comments in completion code. The
419 415 # returned values depend on whether the user passes %% or not explicitly,
420 416 # and this will show a difference if the same name is both a line and cell
421 417 # magic.
422 418 s, matches = c.complete(None, '_bar_ce')
423 419 nt.assert_in('%_bar_cellm', matches)
424 420 nt.assert_in('%%_bar_cellm', matches)
425 421 s, matches = c.complete(None, '%_bar_ce')
426 422 nt.assert_in('%_bar_cellm', matches)
427 423 nt.assert_in('%%_bar_cellm', matches)
428 424 s, matches = c.complete(None, '%%_bar_ce')
429 425 nt.assert_not_in('%_bar_cellm', matches)
430 426 nt.assert_in('%%_bar_cellm', matches)
431 427
432 428
433 429 def test_magic_completion_order():
434 430
435 431 ip = get_ipython()
436 432 c = ip.Completer
437 433
438 434 # Test ordering of magics and non-magics with the same name
439 435 # We want the non-magic first
440 436
441 437 # Before importing matplotlib, there should only be one option:
442 438
443 439 text, matches = c.complete('mat')
444 440 nt.assert_equal(matches, ["%matplotlib"])
445 441
446 442
447 443 ip.run_cell("matplotlib = 1") # introduce name into namespace
448 444
449 445 # After the import, there should be two options, ordered like this:
450 446 text, matches = c.complete('mat')
451 447 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
452 448
453 449
454 450 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
455 451
456 452 # Order of user variable and line and cell magics with same name:
457 453 text, matches = c.complete('timeit')
458 454 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
459 455
460 456
461 457 def test_dict_key_completion_string():
462 458 """Test dictionary key completion for string keys"""
463 459 ip = get_ipython()
464 460 complete = ip.Completer.complete
465 461
466 462 ip.user_ns['d'] = {'abc': None}
467 463
468 464 # check completion at different stages
469 465 _, matches = complete(line_buffer="d[")
470 466 nt.assert_in("'abc'", matches)
471 467 nt.assert_not_in("'abc']", matches)
472 468
473 469 _, matches = complete(line_buffer="d['")
474 470 nt.assert_in("abc", matches)
475 471 nt.assert_not_in("abc']", matches)
476 472
477 473 _, matches = complete(line_buffer="d['a")
478 474 nt.assert_in("abc", matches)
479 475 nt.assert_not_in("abc']", matches)
480 476
481 477 # check use of different quoting
482 478 _, matches = complete(line_buffer="d[\"")
483 479 nt.assert_in("abc", matches)
484 480 nt.assert_not_in('abc\"]', matches)
485 481
486 482 _, matches = complete(line_buffer="d[\"a")
487 483 nt.assert_in("abc", matches)
488 484 nt.assert_not_in('abc\"]', matches)
489 485
490 486 # check sensitivity to following context
491 487 _, matches = complete(line_buffer="d[]", cursor_pos=2)
492 488 nt.assert_in("'abc'", matches)
493 489
494 490 _, matches = complete(line_buffer="d['']", cursor_pos=3)
495 491 nt.assert_in("abc", matches)
496 492 nt.assert_not_in("abc'", matches)
497 493 nt.assert_not_in("abc']", matches)
498 494
499 495 # check multiple solutions are correctly returned and that noise is not
500 496 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
501 497 5: None}
502 498
503 499 _, matches = complete(line_buffer="d['a")
504 500 nt.assert_in("abc", matches)
505 501 nt.assert_in("abd", matches)
506 502 nt.assert_not_in("bad", matches)
507 503 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
508 504
509 505 # check escaping and whitespace
510 506 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
511 507 _, matches = complete(line_buffer="d['a")
512 508 nt.assert_in("a\\nb", matches)
513 509 nt.assert_in("a\\'b", matches)
514 510 nt.assert_in("a\"b", matches)
515 511 nt.assert_in("a word", matches)
516 512 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
517 513
518 514 # - can complete on non-initial word of the string
519 515 _, matches = complete(line_buffer="d['a w")
520 516 nt.assert_in("word", matches)
521 517
522 518 # - understands quote escaping
523 519 _, matches = complete(line_buffer="d['a\\'")
524 520 nt.assert_in("b", matches)
525 521
526 522 # - default quoting should work like repr
527 523 _, matches = complete(line_buffer="d[")
528 524 nt.assert_in("\"a'b\"", matches)
529 525
530 526 # - when opening quote with ", possible to match with unescaped apostrophe
531 527 _, matches = complete(line_buffer="d[\"a'")
532 528 nt.assert_in("b", matches)
533 529
534 530 # need to not split at delims that readline won't split at
535 531 if '-' not in ip.Completer.splitter.delims:
536 532 ip.user_ns['d'] = {'before-after': None}
537 533 _, matches = complete(line_buffer="d['before-af")
538 534 nt.assert_in('before-after', matches)
539 535
540 536 def test_dict_key_completion_contexts():
541 537 """Test expression contexts in which dict key completion occurs"""
542 538 ip = get_ipython()
543 539 complete = ip.Completer.complete
544 540 d = {'abc': None}
545 541 ip.user_ns['d'] = d
546 542
547 543 class C:
548 544 data = d
549 545 ip.user_ns['C'] = C
550 546 ip.user_ns['get'] = lambda: d
551 547
552 548 def assert_no_completion(**kwargs):
553 549 _, matches = complete(**kwargs)
554 550 nt.assert_not_in('abc', matches)
555 551 nt.assert_not_in('abc\'', matches)
556 552 nt.assert_not_in('abc\']', matches)
557 553 nt.assert_not_in('\'abc\'', matches)
558 554 nt.assert_not_in('\'abc\']', matches)
559 555
560 556 def assert_completion(**kwargs):
561 557 _, matches = complete(**kwargs)
562 558 nt.assert_in("'abc'", matches)
563 559 nt.assert_not_in("'abc']", matches)
564 560
565 561 # no completion after string closed, even if reopened
566 562 assert_no_completion(line_buffer="d['a'")
567 563 assert_no_completion(line_buffer="d[\"a\"")
568 564 assert_no_completion(line_buffer="d['a' + ")
569 565 assert_no_completion(line_buffer="d['a' + '")
570 566
571 567 # completion in non-trivial expressions
572 568 assert_completion(line_buffer="+ d[")
573 569 assert_completion(line_buffer="(d[")
574 570 assert_completion(line_buffer="C.data[")
575 571
576 572 # greedy flag
577 573 def assert_completion(**kwargs):
578 574 _, matches = complete(**kwargs)
579 575 nt.assert_in("get()['abc']", matches)
580 576
581 577 assert_no_completion(line_buffer="get()[")
582 578 with greedy_completion():
583 579 assert_completion(line_buffer="get()[")
584 580 assert_completion(line_buffer="get()['")
585 581 assert_completion(line_buffer="get()['a")
586 582 assert_completion(line_buffer="get()['ab")
587 583 assert_completion(line_buffer="get()['abc")
588 584
589 585
590 586
591 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
592 587 def test_dict_key_completion_bytes():
593 588 """Test handling of bytes in dict key completion"""
594 589 ip = get_ipython()
595 590 complete = ip.Completer.complete
596 591
597 592 ip.user_ns['d'] = {'abc': None, b'abd': None}
598 593
599 594 _, matches = complete(line_buffer="d[")
600 595 nt.assert_in("'abc'", matches)
601 596 nt.assert_in("b'abd'", matches)
602 597
603 598 if False: # not currently implemented
604 599 _, matches = complete(line_buffer="d[b")
605 600 nt.assert_in("b'abd'", matches)
606 601 nt.assert_not_in("b'abc'", matches)
607 602
608 603 _, matches = complete(line_buffer="d[b'")
609 604 nt.assert_in("abd", matches)
610 605 nt.assert_not_in("abc", matches)
611 606
612 607 _, matches = complete(line_buffer="d[B'")
613 608 nt.assert_in("abd", matches)
614 609 nt.assert_not_in("abc", matches)
615 610
616 611 _, matches = complete(line_buffer="d['")
617 612 nt.assert_in("abc", matches)
618 613 nt.assert_not_in("abd", matches)
619 614
620 615
621 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
622 def test_dict_key_completion_unicode_py2():
623 """Test handling of unicode in dict key completion"""
624 ip = get_ipython()
625 complete = ip.Completer.complete
626
627 ip.user_ns['d'] = {u'abc': None,
628 u'a\u05d0b': None}
629
630 _, matches = complete(line_buffer="d[")
631 nt.assert_in("u'abc'", matches)
632 nt.assert_in("u'a\\u05d0b'", matches)
633
634 _, matches = complete(line_buffer="d['a")
635 nt.assert_in("abc", matches)
636 nt.assert_not_in("a\\u05d0b", matches)
637
638 _, matches = complete(line_buffer="d[u'a")
639 nt.assert_in("abc", matches)
640 nt.assert_in("a\\u05d0b", matches)
641
642 _, matches = complete(line_buffer="d[U'a")
643 nt.assert_in("abc", matches)
644 nt.assert_in("a\\u05d0b", matches)
645
646 # query using escape
647 if sys.platform != 'win32':
648 # Known failure on Windows
649 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
650 nt.assert_in("u05d0b", matches) # tokenized after \\
651
652 # query using character
653 _, matches = complete(line_buffer=u"d[u'a\u05d0")
654 nt.assert_in(u"a\u05d0b", matches)
655
656 with greedy_completion():
657 _, matches = complete(line_buffer="d[")
658 nt.assert_in("d[u'abc']", matches)
659 nt.assert_in("d[u'a\\u05d0b']", matches)
660
661 _, matches = complete(line_buffer="d['a")
662 nt.assert_in("d['abc']", matches)
663 nt.assert_not_in("d[u'a\\u05d0b']", matches)
664
665 _, matches = complete(line_buffer="d[u'a")
666 nt.assert_in("d[u'abc']", matches)
667 nt.assert_in("d[u'a\\u05d0b']", matches)
668
669 _, matches = complete(line_buffer="d[U'a")
670 nt.assert_in("d[U'abc']", matches)
671 nt.assert_in("d[U'a\\u05d0b']", matches)
672
673 # query using escape
674 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
675 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
676
677 # query using character
678 _, matches = complete(line_buffer=u"d[u'a\u05d0")
679 nt.assert_in(u"d[u'a\u05d0b']", matches)
680
681
682 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
683 616 def test_dict_key_completion_unicode_py3():
684 617 """Test handling of unicode in dict key completion"""
685 618 ip = get_ipython()
686 619 complete = ip.Completer.complete
687 620
688 621 ip.user_ns['d'] = {u'a\u05d0': None}
689 622
690 623 # query using escape
691 624 if sys.platform != 'win32':
692 625 # Known failure on Windows
693 626 _, matches = complete(line_buffer="d['a\\u05d0")
694 627 nt.assert_in("u05d0", matches) # tokenized after \\
695 628
696 629 # query using character
697 630 _, matches = complete(line_buffer="d['a\u05d0")
698 631 nt.assert_in(u"a\u05d0", matches)
699 632
700 633 with greedy_completion():
701 634 # query using escape
702 635 _, matches = complete(line_buffer="d['a\\u05d0")
703 636 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
704 637
705 638 # query using character
706 639 _, matches = complete(line_buffer="d['a\u05d0")
707 640 nt.assert_in(u"d['a\u05d0']", matches)
708 641
709 642
710 643
711 644 @dec.skip_without('numpy')
712 645 def test_struct_array_key_completion():
713 646 """Test dict key completion applies to numpy struct arrays"""
714 647 import numpy
715 648 ip = get_ipython()
716 649 complete = ip.Completer.complete
717 650 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
718 651 _, matches = complete(line_buffer="d['")
719 652 nt.assert_in("hello", matches)
720 653 nt.assert_in("world", matches)
721 654 # complete on the numpy struct itself
722 655 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
723 656 ('my_data', '>f4', 5)])
724 657 x = numpy.zeros(2, dtype=dt)
725 658 ip.user_ns['d'] = x[1]
726 659 _, matches = complete(line_buffer="d['")
727 660 nt.assert_in("my_head", matches)
728 661 nt.assert_in("my_data", matches)
729 662 # complete on a nested level
730 663 with greedy_completion():
731 664 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
732 665 _, matches = complete(line_buffer="d[1]['my_head']['")
733 666 nt.assert_true(any(["my_dt" in m for m in matches]))
734 667 nt.assert_true(any(["my_df" in m for m in matches]))
735 668
736 669
737 670 @dec.skip_without('pandas')
738 671 def test_dataframe_key_completion():
739 672 """Test dict key completion applies to pandas DataFrames"""
740 673 import pandas
741 674 ip = get_ipython()
742 675 complete = ip.Completer.complete
743 676 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
744 677 _, matches = complete(line_buffer="d['")
745 678 nt.assert_in("hello", matches)
746 679 nt.assert_in("world", matches)
747 680
748 681
749 682 def test_dict_key_completion_invalids():
750 683 """Smoke test cases dict key completion can't handle"""
751 684 ip = get_ipython()
752 685 complete = ip.Completer.complete
753 686
754 687 ip.user_ns['no_getitem'] = None
755 688 ip.user_ns['no_keys'] = []
756 689 ip.user_ns['cant_call_keys'] = dict
757 690 ip.user_ns['empty'] = {}
758 691 ip.user_ns['d'] = {'abc': 5}
759 692
760 693 _, matches = complete(line_buffer="no_getitem['")
761 694 _, matches = complete(line_buffer="no_keys['")
762 695 _, matches = complete(line_buffer="cant_call_keys['")
763 696 _, matches = complete(line_buffer="empty['")
764 697 _, matches = complete(line_buffer="name_error['")
765 698 _, matches = complete(line_buffer="d['\\") # incomplete escape
766 699
767 700 class KeyCompletable(object):
768 701 def __init__(self, things=()):
769 702 self.things = things
770 703
771 704 def _ipython_key_completions_(self):
772 705 return list(self.things)
773 706
774 707 def test_object_key_completion():
775 708 ip = get_ipython()
776 709 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
777 710
778 711 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
779 712 nt.assert_in('qwerty', matches)
780 713 nt.assert_in('qwick', matches)
781 714
782 715
783 716 def test_aimport_module_completer():
784 717 ip = get_ipython()
785 718 _, matches = ip.complete('i', '%aimport i')
786 719 nt.assert_in('io', matches)
787 720 nt.assert_not_in('int', matches)
788 721
789 722 def test_nested_import_module_completer():
790 723 ip = get_ipython()
791 724 _, matches = ip.complete(None, 'import IPython.co', 17)
792 725 nt.assert_in('IPython.core', matches)
793 726 nt.assert_not_in('import IPython.core', matches)
794 727 nt.assert_not_in('IPython.display', matches)
795 728
796 729 def test_import_module_completer():
797 730 ip = get_ipython()
798 731 _, matches = ip.complete('i', 'import i')
799 732 nt.assert_in('io', matches)
800 733 nt.assert_not_in('int', matches)
801 734
802 735 def test_from_module_completer():
803 736 ip = get_ipython()
804 737 _, matches = ip.complete('B', 'from io import B', 16)
805 738 nt.assert_in('BytesIO', matches)
806 739 nt.assert_not_in('BaseException', matches)
@@ -1,163 +1,162 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for completerlib.
3 3
4 4 """
5 from __future__ import absolute_import
6 5
7 6 #-----------------------------------------------------------------------------
8 7 # Imports
9 8 #-----------------------------------------------------------------------------
10 9
11 10 import os
12 11 import shutil
13 12 import sys
14 13 import tempfile
15 14 import unittest
16 15 from os.path import join
17 16
18 17 import nose.tools as nt
19 18
20 19 from IPython.core.completerlib import magic_run_completer, module_completion
21 20 from IPython.utils import py3compat
22 21 from IPython.utils.tempdir import TemporaryDirectory
23 22 from IPython.testing.decorators import onlyif_unicode_paths
24 23
25 24
26 25 class MockEvent(object):
27 26 def __init__(self, line):
28 27 self.line = line
29 28
30 29 #-----------------------------------------------------------------------------
31 30 # Test functions begin
32 31 #-----------------------------------------------------------------------------
33 32 class Test_magic_run_completer(unittest.TestCase):
34 33 files = [u"aao.py", u"a.py", u"b.py", u"aao.txt"]
35 34 dirs = [u"adir/", "bdir/"]
36 35
37 36 def setUp(self):
38 37 self.BASETESTDIR = tempfile.mkdtemp()
39 38 for fil in self.files:
40 39 with open(join(self.BASETESTDIR, fil), "w") as sfile:
41 40 sfile.write("pass\n")
42 41 for d in self.dirs:
43 42 os.mkdir(join(self.BASETESTDIR, d))
44 43
45 44 self.oldpath = py3compat.getcwd()
46 45 os.chdir(self.BASETESTDIR)
47 46
48 47 def tearDown(self):
49 48 os.chdir(self.oldpath)
50 49 shutil.rmtree(self.BASETESTDIR)
51 50
52 51 def test_1(self):
53 52 """Test magic_run_completer, should match two alterntives
54 53 """
55 54 event = MockEvent(u"%run a")
56 55 mockself = None
57 56 match = set(magic_run_completer(mockself, event))
58 57 self.assertEqual(match, {u"a.py", u"aao.py", u"adir/"})
59 58
60 59 def test_2(self):
61 60 """Test magic_run_completer, should match one alterntive
62 61 """
63 62 event = MockEvent(u"%run aa")
64 63 mockself = None
65 64 match = set(magic_run_completer(mockself, event))
66 65 self.assertEqual(match, {u"aao.py"})
67 66
68 67 def test_3(self):
69 68 """Test magic_run_completer with unterminated " """
70 69 event = MockEvent(u'%run "a')
71 70 mockself = None
72 71 match = set(magic_run_completer(mockself, event))
73 72 self.assertEqual(match, {u"a.py", u"aao.py", u"adir/"})
74 73
75 74 def test_completion_more_args(self):
76 75 event = MockEvent(u'%run a.py ')
77 76 match = set(magic_run_completer(None, event))
78 77 self.assertEqual(match, set(self.files + self.dirs))
79 78
80 79 def test_completion_in_dir(self):
81 80 # Github issue #3459
82 81 event = MockEvent(u'%run a.py {}'.format(join(self.BASETESTDIR, 'a')))
83 82 print(repr(event.line))
84 83 match = set(magic_run_completer(None, event))
85 84 # We specifically use replace here rather than normpath, because
86 85 # at one point there were duplicates 'adir' and 'adir/', and normpath
87 86 # would hide the failure for that.
88 87 self.assertEqual(match, {join(self.BASETESTDIR, f).replace('\\','/')
89 88 for f in (u'a.py', u'aao.py', u'aao.txt', u'adir/')})
90 89
91 90 class Test_magic_run_completer_nonascii(unittest.TestCase):
92 91 @onlyif_unicode_paths
93 92 def setUp(self):
94 93 self.BASETESTDIR = tempfile.mkdtemp()
95 94 for fil in [u"aaø.py", u"a.py", u"b.py"]:
96 95 with open(join(self.BASETESTDIR, fil), "w") as sfile:
97 96 sfile.write("pass\n")
98 97 self.oldpath = py3compat.getcwd()
99 98 os.chdir(self.BASETESTDIR)
100 99
101 100 def tearDown(self):
102 101 os.chdir(self.oldpath)
103 102 shutil.rmtree(self.BASETESTDIR)
104 103
105 104 @onlyif_unicode_paths
106 105 def test_1(self):
107 106 """Test magic_run_completer, should match two alterntives
108 107 """
109 108 event = MockEvent(u"%run a")
110 109 mockself = None
111 110 match = set(magic_run_completer(mockself, event))
112 111 self.assertEqual(match, {u"a.py", u"aaø.py"})
113 112
114 113 @onlyif_unicode_paths
115 114 def test_2(self):
116 115 """Test magic_run_completer, should match one alterntive
117 116 """
118 117 event = MockEvent(u"%run aa")
119 118 mockself = None
120 119 match = set(magic_run_completer(mockself, event))
121 120 self.assertEqual(match, {u"aaø.py"})
122 121
123 122 @onlyif_unicode_paths
124 123 def test_3(self):
125 124 """Test magic_run_completer with unterminated " """
126 125 event = MockEvent(u'%run "a')
127 126 mockself = None
128 127 match = set(magic_run_completer(mockself, event))
129 128 self.assertEqual(match, {u"a.py", u"aaø.py"})
130 129
131 130 # module_completer:
132 131
133 132 def test_import_invalid_module():
134 133 """Testing of issue https://github.com/ipython/ipython/issues/1107"""
135 134 invalid_module_names = {'foo-bar', 'foo:bar', '10foo'}
136 135 valid_module_names = {'foobar'}
137 136 with TemporaryDirectory() as tmpdir:
138 137 sys.path.insert( 0, tmpdir )
139 138 for name in invalid_module_names | valid_module_names:
140 139 filename = os.path.join(tmpdir, name + '.py')
141 140 open(filename, 'w').close()
142 141
143 142 s = set( module_completion('import foo') )
144 143 intersection = s.intersection(invalid_module_names)
145 144 nt.assert_equal(intersection, set())
146 145
147 146 assert valid_module_names.issubset(s), valid_module_names.intersection(s)
148 147
149 148
150 149 def test_bad_module_all():
151 150 """Test module with invalid __all__
152 151
153 152 https://github.com/ipython/ipython/issues/9678
154 153 """
155 154 testsdir = os.path.dirname(__file__)
156 155 sys.path.insert(0, testsdir)
157 156 try:
158 157 results = module_completion('from bad_all import ')
159 158 nt.assert_in('puppies', results)
160 159 for r in results:
161 160 nt.assert_is_instance(r, py3compat.string_types)
162 161 finally:
163 162 sys.path.remove(testsdir)
@@ -1,226 +1,225 b''
1 1 """Tests for debugging machinery.
2 2 """
3 from __future__ import print_function
4 3
5 4 # Copyright (c) IPython Development Team.
6 5 # Distributed under the terms of the Modified BSD License.
7 6
8 7 import sys
9 8 import warnings
10 9
11 10 import nose.tools as nt
12 11
13 12 from IPython.core import debugger
14 13
15 14 #-----------------------------------------------------------------------------
16 15 # Helper classes, from CPython's Pdb test suite
17 16 #-----------------------------------------------------------------------------
18 17
19 18 class _FakeInput(object):
20 19 """
21 20 A fake input stream for pdb's interactive debugger. Whenever a
22 21 line is read, print it (to simulate the user typing it), and then
23 22 return it. The set of lines to return is specified in the
24 23 constructor; they should not have trailing newlines.
25 24 """
26 25 def __init__(self, lines):
27 26 self.lines = iter(lines)
28 27
29 28 def readline(self):
30 29 line = next(self.lines)
31 30 print(line)
32 31 return line+'\n'
33 32
34 33 class PdbTestInput(object):
35 34 """Context manager that makes testing Pdb in doctests easier."""
36 35
37 36 def __init__(self, input):
38 37 self.input = input
39 38
40 39 def __enter__(self):
41 40 self.real_stdin = sys.stdin
42 41 sys.stdin = _FakeInput(self.input)
43 42
44 43 def __exit__(self, *exc):
45 44 sys.stdin = self.real_stdin
46 45
47 46 #-----------------------------------------------------------------------------
48 47 # Tests
49 48 #-----------------------------------------------------------------------------
50 49
51 50 def test_longer_repr():
52 51 try:
53 52 from reprlib import repr as trepr # Py 3
54 53 except ImportError:
55 54 from repr import repr as trepr # Py 2
56 55
57 56 a = '1234567890'* 7
58 57 ar = "'1234567890123456789012345678901234567890123456789012345678901234567890'"
59 58 a_trunc = "'123456789012...8901234567890'"
60 59 nt.assert_equal(trepr(a), a_trunc)
61 60 # The creation of our tracer modifies the repr module's repr function
62 61 # in-place, since that global is used directly by the stdlib's pdb module.
63 62 with warnings.catch_warnings():
64 63 warnings.simplefilter('ignore', DeprecationWarning)
65 64 debugger.Tracer()
66 65 nt.assert_equal(trepr(a), ar)
67 66
68 67 def test_ipdb_magics():
69 68 '''Test calling some IPython magics from ipdb.
70 69
71 70 First, set up some test functions and classes which we can inspect.
72 71
73 72 >>> class ExampleClass(object):
74 73 ... """Docstring for ExampleClass."""
75 74 ... def __init__(self):
76 75 ... """Docstring for ExampleClass.__init__"""
77 76 ... pass
78 77 ... def __str__(self):
79 78 ... return "ExampleClass()"
80 79
81 80 >>> def example_function(x, y, z="hello"):
82 81 ... """Docstring for example_function."""
83 82 ... pass
84 83
85 84 >>> old_trace = sys.gettrace()
86 85
87 86 Create a function which triggers ipdb.
88 87
89 88 >>> def trigger_ipdb():
90 89 ... a = ExampleClass()
91 90 ... debugger.Pdb().set_trace()
92 91
93 92 >>> with PdbTestInput([
94 93 ... 'pdef example_function',
95 94 ... 'pdoc ExampleClass',
96 95 ... 'up',
97 96 ... 'down',
98 97 ... 'list',
99 98 ... 'pinfo a',
100 99 ... 'll',
101 100 ... 'continue',
102 101 ... ]):
103 102 ... trigger_ipdb()
104 103 --Return--
105 104 None
106 105 > <doctest ...>(3)trigger_ipdb()
107 106 1 def trigger_ipdb():
108 107 2 a = ExampleClass()
109 108 ----> 3 debugger.Pdb().set_trace()
110 109 <BLANKLINE>
111 110 ipdb> pdef example_function
112 111 example_function(x, y, z='hello')
113 112 ipdb> pdoc ExampleClass
114 113 Class docstring:
115 114 Docstring for ExampleClass.
116 115 Init docstring:
117 116 Docstring for ExampleClass.__init__
118 117 ipdb> up
119 118 > <doctest ...>(11)<module>()
120 119 7 'pinfo a',
121 120 8 'll',
122 121 9 'continue',
123 122 10 ]):
124 123 ---> 11 trigger_ipdb()
125 124 <BLANKLINE>
126 125 ipdb> down
127 126 None
128 127 > <doctest ...>(3)trigger_ipdb()
129 128 1 def trigger_ipdb():
130 129 2 a = ExampleClass()
131 130 ----> 3 debugger.Pdb().set_trace()
132 131 <BLANKLINE>
133 132 ipdb> list
134 133 1 def trigger_ipdb():
135 134 2 a = ExampleClass()
136 135 ----> 3 debugger.Pdb().set_trace()
137 136 <BLANKLINE>
138 137 ipdb> pinfo a
139 138 Type: ExampleClass
140 139 String form: ExampleClass()
141 140 Namespace: Local...
142 141 Docstring: Docstring for ExampleClass.
143 142 Init docstring: Docstring for ExampleClass.__init__
144 143 ipdb> ll
145 144 1 def trigger_ipdb():
146 145 2 a = ExampleClass()
147 146 ----> 3 debugger.Pdb().set_trace()
148 147 <BLANKLINE>
149 148 ipdb> continue
150 149
151 150 Restore previous trace function, e.g. for coverage.py
152 151
153 152 >>> sys.settrace(old_trace)
154 153 '''
155 154
156 155 def test_ipdb_magics2():
157 156 '''Test ipdb with a very short function.
158 157
159 158 >>> old_trace = sys.gettrace()
160 159
161 160 >>> def bar():
162 161 ... pass
163 162
164 163 Run ipdb.
165 164
166 165 >>> with PdbTestInput([
167 166 ... 'continue',
168 167 ... ]):
169 168 ... debugger.Pdb().runcall(bar)
170 169 > <doctest ...>(2)bar()
171 170 1 def bar():
172 171 ----> 2 pass
173 172 <BLANKLINE>
174 173 ipdb> continue
175 174
176 175 Restore previous trace function, e.g. for coverage.py
177 176
178 177 >>> sys.settrace(old_trace)
179 178 '''
180 179
181 180 def can_quit():
182 181 '''Test that quit work in ipydb
183 182
184 183 >>> old_trace = sys.gettrace()
185 184
186 185 >>> def bar():
187 186 ... pass
188 187
189 188 >>> with PdbTestInput([
190 189 ... 'quit',
191 190 ... ]):
192 191 ... debugger.Pdb().runcall(bar)
193 192 > <doctest ...>(2)bar()
194 193 1 def bar():
195 194 ----> 2 pass
196 195 <BLANKLINE>
197 196 ipdb> quit
198 197
199 198 Restore previous trace function, e.g. for coverage.py
200 199
201 200 >>> sys.settrace(old_trace)
202 201 '''
203 202
204 203
205 204 def can_exit():
206 205 '''Test that quit work in ipydb
207 206
208 207 >>> old_trace = sys.gettrace()
209 208
210 209 >>> def bar():
211 210 ... pass
212 211
213 212 >>> with PdbTestInput([
214 213 ... 'exit',
215 214 ... ]):
216 215 ... debugger.Pdb().runcall(bar)
217 216 > <doctest ...>(2)bar()
218 217 1 def bar():
219 218 ----> 2 pass
220 219 <BLANKLINE>
221 220 ipdb> exit
222 221
223 222 Restore previous trace function, e.g. for coverage.py
224 223
225 224 >>> sys.settrace(old_trace)
226 225 '''
@@ -1,81 +1,80 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for CommandChainDispatcher."""
3 3
4 from __future__ import absolute_import
5 4
6 5 #-----------------------------------------------------------------------------
7 6 # Imports
8 7 #-----------------------------------------------------------------------------
9 8
10 9 import nose.tools as nt
11 10 from IPython.core.error import TryNext
12 11 from IPython.core.hooks import CommandChainDispatcher
13 12
14 13 #-----------------------------------------------------------------------------
15 14 # Local utilities
16 15 #-----------------------------------------------------------------------------
17 16
18 17 # Define two classes, one which succeeds and one which raises TryNext. Each
19 18 # sets the attribute `called` to True when it is called.
20 19 class Okay(object):
21 20 def __init__(self, message):
22 21 self.message = message
23 22 self.called = False
24 23 def __call__(self):
25 24 self.called = True
26 25 return self.message
27 26
28 27 class Fail(object):
29 28 def __init__(self, message):
30 29 self.message = message
31 30 self.called = False
32 31 def __call__(self):
33 32 self.called = True
34 33 raise TryNext(self.message)
35 34
36 35 #-----------------------------------------------------------------------------
37 36 # Test functions
38 37 #-----------------------------------------------------------------------------
39 38
40 39 def test_command_chain_dispatcher_ff():
41 40 """Test two failing hooks"""
42 41 fail1 = Fail(u'fail1')
43 42 fail2 = Fail(u'fail2')
44 43 dp = CommandChainDispatcher([(0, fail1),
45 44 (10, fail2)])
46 45
47 46 try:
48 47 dp()
49 48 except TryNext as e:
50 49 nt.assert_equal(str(e), u'fail2')
51 50 else:
52 51 assert False, "Expected exception was not raised."
53 52
54 53 nt.assert_true(fail1.called)
55 54 nt.assert_true(fail2.called)
56 55
57 56 def test_command_chain_dispatcher_fofo():
58 57 """Test a mixture of failing and succeeding hooks."""
59 58 fail1 = Fail(u'fail1')
60 59 fail2 = Fail(u'fail2')
61 60 okay1 = Okay(u'okay1')
62 61 okay2 = Okay(u'okay2')
63 62
64 63 dp = CommandChainDispatcher([(0, fail1),
65 64 # (5, okay1), # add this later
66 65 (10, fail2),
67 66 (15, okay2)])
68 67 dp.add(okay1, 5)
69 68
70 69 nt.assert_equal(dp(), u'okay1')
71 70
72 71 nt.assert_true(fail1.called)
73 72 nt.assert_true(okay1.called)
74 73 nt.assert_false(fail2.called)
75 74 nt.assert_false(okay2.called)
76 75
77 76 def test_command_chain_dispatcher_eq_priority():
78 77 okay1 = Okay(u'okay1')
79 78 okay2 = Okay(u'okay2')
80 79 dp = CommandChainDispatcher([(1, okay1)])
81 80 dp.add(okay2, 1)
@@ -1,615 +1,614 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for the inputsplitter module."""
3 3
4 from __future__ import print_function
5 4
6 5 # Copyright (c) IPython Development Team.
7 6 # Distributed under the terms of the Modified BSD License.
8 7
9 8 import unittest
10 9 import sys
11 10
12 11 import nose.tools as nt
13 12
14 13 from IPython.core import inputsplitter as isp
15 14 from IPython.core.inputtransformer import InputTransformer
16 15 from IPython.core.tests.test_inputtransformer import syntax, syntax_ml
17 16 from IPython.testing import tools as tt
18 17 from IPython.utils import py3compat
19 18 from IPython.utils.py3compat import string_types, input
20 19
21 20 #-----------------------------------------------------------------------------
22 21 # Semi-complete examples (also used as tests)
23 22 #-----------------------------------------------------------------------------
24 23
25 24 # Note: at the bottom, there's a slightly more complete version of this that
26 25 # can be useful during development of code here.
27 26
28 27 def mini_interactive_loop(input_func):
29 28 """Minimal example of the logic of an interactive interpreter loop.
30 29
31 30 This serves as an example, and it is used by the test system with a fake
32 31 raw_input that simulates interactive input."""
33 32
34 33 from IPython.core.inputsplitter import InputSplitter
35 34
36 35 isp = InputSplitter()
37 36 # In practice, this input loop would be wrapped in an outside loop to read
38 37 # input indefinitely, until some exit/quit command was issued. Here we
39 38 # only illustrate the basic inner loop.
40 39 while isp.push_accepts_more():
41 40 indent = ' '*isp.indent_spaces
42 41 prompt = '>>> ' + indent
43 42 line = indent + input_func(prompt)
44 43 isp.push(line)
45 44
46 45 # Here we just return input so we can use it in a test suite, but a real
47 46 # interpreter would instead send it for execution somewhere.
48 47 src = isp.source_reset()
49 48 #print 'Input source was:\n', src # dbg
50 49 return src
51 50
52 51 #-----------------------------------------------------------------------------
53 52 # Test utilities, just for local use
54 53 #-----------------------------------------------------------------------------
55 54
56 55 def assemble(block):
57 56 """Assemble a block into multi-line sub-blocks."""
58 57 return ['\n'.join(sub_block)+'\n' for sub_block in block]
59 58
60 59
61 60 def pseudo_input(lines):
62 61 """Return a function that acts like raw_input but feeds the input list."""
63 62 ilines = iter(lines)
64 63 def raw_in(prompt):
65 64 try:
66 65 return next(ilines)
67 66 except StopIteration:
68 67 return ''
69 68 return raw_in
70 69
71 70 #-----------------------------------------------------------------------------
72 71 # Tests
73 72 #-----------------------------------------------------------------------------
74 73 def test_spaces():
75 74 tests = [('', 0),
76 75 (' ', 1),
77 76 ('\n', 0),
78 77 (' \n', 1),
79 78 ('x', 0),
80 79 (' x', 1),
81 80 (' x',2),
82 81 (' x',4),
83 82 # Note: tabs are counted as a single whitespace!
84 83 ('\tx', 1),
85 84 ('\t x', 2),
86 85 ]
87 86 tt.check_pairs(isp.num_ini_spaces, tests)
88 87
89 88
90 89 def test_remove_comments():
91 90 tests = [('text', 'text'),
92 91 ('text # comment', 'text '),
93 92 ('text # comment\n', 'text \n'),
94 93 ('text # comment \n', 'text \n'),
95 94 ('line # c \nline\n','line \nline\n'),
96 95 ('line # c \nline#c2 \nline\nline #c\n\n',
97 96 'line \nline\nline\nline \n\n'),
98 97 ]
99 98 tt.check_pairs(isp.remove_comments, tests)
100 99
101 100
102 101 def test_get_input_encoding():
103 102 encoding = isp.get_input_encoding()
104 103 nt.assert_true(isinstance(encoding, string_types))
105 104 # simple-minded check that at least encoding a simple string works with the
106 105 # encoding we got.
107 106 nt.assert_equal(u'test'.encode(encoding), b'test')
108 107
109 108
110 109 class NoInputEncodingTestCase(unittest.TestCase):
111 110 def setUp(self):
112 111 self.old_stdin = sys.stdin
113 112 class X: pass
114 113 fake_stdin = X()
115 114 sys.stdin = fake_stdin
116 115
117 116 def test(self):
118 117 # Verify that if sys.stdin has no 'encoding' attribute we do the right
119 118 # thing
120 119 enc = isp.get_input_encoding()
121 120 self.assertEqual(enc, 'ascii')
122 121
123 122 def tearDown(self):
124 123 sys.stdin = self.old_stdin
125 124
126 125
127 126 class InputSplitterTestCase(unittest.TestCase):
128 127 def setUp(self):
129 128 self.isp = isp.InputSplitter()
130 129
131 130 def test_reset(self):
132 131 isp = self.isp
133 132 isp.push('x=1')
134 133 isp.reset()
135 134 self.assertEqual(isp._buffer, [])
136 135 self.assertEqual(isp.indent_spaces, 0)
137 136 self.assertEqual(isp.source, '')
138 137 self.assertEqual(isp.code, None)
139 138 self.assertEqual(isp._is_complete, False)
140 139
141 140 def test_source(self):
142 141 self.isp._store('1')
143 142 self.isp._store('2')
144 143 self.assertEqual(self.isp.source, '1\n2\n')
145 144 self.assertEqual(len(self.isp._buffer)>0, True)
146 145 self.assertEqual(self.isp.source_reset(), '1\n2\n')
147 146 self.assertEqual(self.isp._buffer, [])
148 147 self.assertEqual(self.isp.source, '')
149 148
150 149 def test_indent(self):
151 150 isp = self.isp # shorthand
152 151 isp.push('x=1')
153 152 self.assertEqual(isp.indent_spaces, 0)
154 153 isp.push('if 1:\n x=1')
155 154 self.assertEqual(isp.indent_spaces, 4)
156 155 isp.push('y=2\n')
157 156 self.assertEqual(isp.indent_spaces, 0)
158 157
159 158 def test_indent2(self):
160 159 isp = self.isp
161 160 isp.push('if 1:')
162 161 self.assertEqual(isp.indent_spaces, 4)
163 162 isp.push(' x=1')
164 163 self.assertEqual(isp.indent_spaces, 4)
165 164 # Blank lines shouldn't change the indent level
166 165 isp.push(' '*2)
167 166 self.assertEqual(isp.indent_spaces, 4)
168 167
169 168 def test_indent3(self):
170 169 isp = self.isp
171 170 # When a multiline statement contains parens or multiline strings, we
172 171 # shouldn't get confused.
173 172 isp.push("if 1:")
174 173 isp.push(" x = (1+\n 2)")
175 174 self.assertEqual(isp.indent_spaces, 4)
176 175
177 176 def test_indent4(self):
178 177 isp = self.isp
179 178 # whitespace after ':' should not screw up indent level
180 179 isp.push('if 1: \n x=1')
181 180 self.assertEqual(isp.indent_spaces, 4)
182 181 isp.push('y=2\n')
183 182 self.assertEqual(isp.indent_spaces, 0)
184 183 isp.push('if 1:\t\n x=1')
185 184 self.assertEqual(isp.indent_spaces, 4)
186 185 isp.push('y=2\n')
187 186 self.assertEqual(isp.indent_spaces, 0)
188 187
189 188 def test_dedent_pass(self):
190 189 isp = self.isp # shorthand
191 190 # should NOT cause dedent
192 191 isp.push('if 1:\n passes = 5')
193 192 self.assertEqual(isp.indent_spaces, 4)
194 193 isp.push('if 1:\n pass')
195 194 self.assertEqual(isp.indent_spaces, 0)
196 195 isp.push('if 1:\n pass ')
197 196 self.assertEqual(isp.indent_spaces, 0)
198 197
199 198 def test_dedent_break(self):
200 199 isp = self.isp # shorthand
201 200 # should NOT cause dedent
202 201 isp.push('while 1:\n breaks = 5')
203 202 self.assertEqual(isp.indent_spaces, 4)
204 203 isp.push('while 1:\n break')
205 204 self.assertEqual(isp.indent_spaces, 0)
206 205 isp.push('while 1:\n break ')
207 206 self.assertEqual(isp.indent_spaces, 0)
208 207
209 208 def test_dedent_continue(self):
210 209 isp = self.isp # shorthand
211 210 # should NOT cause dedent
212 211 isp.push('while 1:\n continues = 5')
213 212 self.assertEqual(isp.indent_spaces, 4)
214 213 isp.push('while 1:\n continue')
215 214 self.assertEqual(isp.indent_spaces, 0)
216 215 isp.push('while 1:\n continue ')
217 216 self.assertEqual(isp.indent_spaces, 0)
218 217
219 218 def test_dedent_raise(self):
220 219 isp = self.isp # shorthand
221 220 # should NOT cause dedent
222 221 isp.push('if 1:\n raised = 4')
223 222 self.assertEqual(isp.indent_spaces, 4)
224 223 isp.push('if 1:\n raise TypeError()')
225 224 self.assertEqual(isp.indent_spaces, 0)
226 225 isp.push('if 1:\n raise')
227 226 self.assertEqual(isp.indent_spaces, 0)
228 227 isp.push('if 1:\n raise ')
229 228 self.assertEqual(isp.indent_spaces, 0)
230 229
231 230 def test_dedent_return(self):
232 231 isp = self.isp # shorthand
233 232 # should NOT cause dedent
234 233 isp.push('if 1:\n returning = 4')
235 234 self.assertEqual(isp.indent_spaces, 4)
236 235 isp.push('if 1:\n return 5 + 493')
237 236 self.assertEqual(isp.indent_spaces, 0)
238 237 isp.push('if 1:\n return')
239 238 self.assertEqual(isp.indent_spaces, 0)
240 239 isp.push('if 1:\n return ')
241 240 self.assertEqual(isp.indent_spaces, 0)
242 241 isp.push('if 1:\n return(0)')
243 242 self.assertEqual(isp.indent_spaces, 0)
244 243
245 244 def test_push(self):
246 245 isp = self.isp
247 246 self.assertEqual(isp.push('x=1'), True)
248 247
249 248 def test_push2(self):
250 249 isp = self.isp
251 250 self.assertEqual(isp.push('if 1:'), False)
252 251 for line in [' x=1', '# a comment', ' y=2']:
253 252 print(line)
254 253 self.assertEqual(isp.push(line), True)
255 254
256 255 def test_push3(self):
257 256 isp = self.isp
258 257 isp.push('if True:')
259 258 isp.push(' a = 1')
260 259 self.assertEqual(isp.push('b = [1,'), False)
261 260
262 261 def test_push_accepts_more(self):
263 262 isp = self.isp
264 263 isp.push('x=1')
265 264 self.assertEqual(isp.push_accepts_more(), False)
266 265
267 266 def test_push_accepts_more2(self):
268 267 isp = self.isp
269 268 isp.push('if 1:')
270 269 self.assertEqual(isp.push_accepts_more(), True)
271 270 isp.push(' x=1')
272 271 self.assertEqual(isp.push_accepts_more(), True)
273 272 isp.push('')
274 273 self.assertEqual(isp.push_accepts_more(), False)
275 274
276 275 def test_push_accepts_more3(self):
277 276 isp = self.isp
278 277 isp.push("x = (2+\n3)")
279 278 self.assertEqual(isp.push_accepts_more(), False)
280 279
281 280 def test_push_accepts_more4(self):
282 281 isp = self.isp
283 282 # When a multiline statement contains parens or multiline strings, we
284 283 # shouldn't get confused.
285 284 # FIXME: we should be able to better handle de-dents in statements like
286 285 # multiline strings and multiline expressions (continued with \ or
287 286 # parens). Right now we aren't handling the indentation tracking quite
288 287 # correctly with this, though in practice it may not be too much of a
289 288 # problem. We'll need to see.
290 289 isp.push("if 1:")
291 290 isp.push(" x = (2+")
292 291 isp.push(" 3)")
293 292 self.assertEqual(isp.push_accepts_more(), True)
294 293 isp.push(" y = 3")
295 294 self.assertEqual(isp.push_accepts_more(), True)
296 295 isp.push('')
297 296 self.assertEqual(isp.push_accepts_more(), False)
298 297
299 298 def test_push_accepts_more5(self):
300 299 isp = self.isp
301 300 isp.push('try:')
302 301 isp.push(' a = 5')
303 302 isp.push('except:')
304 303 isp.push(' raise')
305 304 # We want to be able to add an else: block at this point, so it should
306 305 # wait for a blank line.
307 306 self.assertEqual(isp.push_accepts_more(), True)
308 307
309 308 def test_continuation(self):
310 309 isp = self.isp
311 310 isp.push("import os, \\")
312 311 self.assertEqual(isp.push_accepts_more(), True)
313 312 isp.push("sys")
314 313 self.assertEqual(isp.push_accepts_more(), False)
315 314
316 315 def test_syntax_error(self):
317 316 isp = self.isp
318 317 # Syntax errors immediately produce a 'ready' block, so the invalid
319 318 # Python can be sent to the kernel for evaluation with possible ipython
320 319 # special-syntax conversion.
321 320 isp.push('run foo')
322 321 self.assertEqual(isp.push_accepts_more(), False)
323 322
324 323 def test_unicode(self):
325 324 self.isp.push(u"Pérez")
326 325 self.isp.push(u'\xc3\xa9')
327 326 self.isp.push(u"u'\xc3\xa9'")
328 327
329 328 def test_line_continuation(self):
330 329 """ Test issue #2108."""
331 330 isp = self.isp
332 331 # A blank line after a line continuation should not accept more
333 332 isp.push("1 \\\n\n")
334 333 self.assertEqual(isp.push_accepts_more(), False)
335 334 # Whitespace after a \ is a SyntaxError. The only way to test that
336 335 # here is to test that push doesn't accept more (as with
337 336 # test_syntax_error() above).
338 337 isp.push(r"1 \ ")
339 338 self.assertEqual(isp.push_accepts_more(), False)
340 339 # Even if the line is continuable (c.f. the regular Python
341 340 # interpreter)
342 341 isp.push(r"(1 \ ")
343 342 self.assertEqual(isp.push_accepts_more(), False)
344 343
345 344 def test_check_complete(self):
346 345 isp = self.isp
347 346 self.assertEqual(isp.check_complete("a = 1"), ('complete', None))
348 347 self.assertEqual(isp.check_complete("for a in range(5):"), ('incomplete', 4))
349 348 self.assertEqual(isp.check_complete("raise = 2"), ('invalid', None))
350 349 self.assertEqual(isp.check_complete("a = [1,\n2,"), ('incomplete', 0))
351 350 self.assertEqual(isp.check_complete("def a():\n x=1\n global x"), ('invalid', None))
352 351
353 352 class InteractiveLoopTestCase(unittest.TestCase):
354 353 """Tests for an interactive loop like a python shell.
355 354 """
356 355 def check_ns(self, lines, ns):
357 356 """Validate that the given input lines produce the resulting namespace.
358 357
359 358 Note: the input lines are given exactly as they would be typed in an
360 359 auto-indenting environment, as mini_interactive_loop above already does
361 360 auto-indenting and prepends spaces to the input.
362 361 """
363 362 src = mini_interactive_loop(pseudo_input(lines))
364 363 test_ns = {}
365 364 exec(src, test_ns)
366 365 # We can't check that the provided ns is identical to the test_ns,
367 366 # because Python fills test_ns with extra keys (copyright, etc). But
368 367 # we can check that the given dict is *contained* in test_ns
369 368 for k,v in ns.items():
370 369 self.assertEqual(test_ns[k], v)
371 370
372 371 def test_simple(self):
373 372 self.check_ns(['x=1'], dict(x=1))
374 373
375 374 def test_simple2(self):
376 375 self.check_ns(['if 1:', 'x=2'], dict(x=2))
377 376
378 377 def test_xy(self):
379 378 self.check_ns(['x=1; y=2'], dict(x=1, y=2))
380 379
381 380 def test_abc(self):
382 381 self.check_ns(['if 1:','a=1','b=2','c=3'], dict(a=1, b=2, c=3))
383 382
384 383 def test_multi(self):
385 384 self.check_ns(['x =(1+','1+','2)'], dict(x=4))
386 385
387 386
388 387 class IPythonInputTestCase(InputSplitterTestCase):
389 388 """By just creating a new class whose .isp is a different instance, we
390 389 re-run the same test battery on the new input splitter.
391 390
392 391 In addition, this runs the tests over the syntax and syntax_ml dicts that
393 392 were tested by individual functions, as part of the OO interface.
394 393
395 394 It also makes some checks on the raw buffer storage.
396 395 """
397 396
398 397 def setUp(self):
399 398 self.isp = isp.IPythonInputSplitter()
400 399
401 400 def test_syntax(self):
402 401 """Call all single-line syntax tests from the main object"""
403 402 isp = self.isp
404 403 for example in syntax.values():
405 404 for raw, out_t in example:
406 405 if raw.startswith(' '):
407 406 continue
408 407
409 408 isp.push(raw+'\n')
410 409 out_raw = isp.source_raw
411 410 out = isp.source_reset()
412 411 self.assertEqual(out.rstrip(), out_t,
413 412 tt.pair_fail_msg.format("inputsplitter",raw, out_t, out))
414 413 self.assertEqual(out_raw.rstrip(), raw.rstrip())
415 414
416 415 def test_syntax_multiline(self):
417 416 isp = self.isp
418 417 for example in syntax_ml.values():
419 418 for line_pairs in example:
420 419 out_t_parts = []
421 420 raw_parts = []
422 421 for lraw, out_t_part in line_pairs:
423 422 if out_t_part is not None:
424 423 out_t_parts.append(out_t_part)
425 424
426 425 if lraw is not None:
427 426 isp.push(lraw)
428 427 raw_parts.append(lraw)
429 428
430 429 out_raw = isp.source_raw
431 430 out = isp.source_reset()
432 431 out_t = '\n'.join(out_t_parts).rstrip()
433 432 raw = '\n'.join(raw_parts).rstrip()
434 433 self.assertEqual(out.rstrip(), out_t)
435 434 self.assertEqual(out_raw.rstrip(), raw)
436 435
437 436 def test_syntax_multiline_cell(self):
438 437 isp = self.isp
439 438 for example in syntax_ml.values():
440 439
441 440 out_t_parts = []
442 441 for line_pairs in example:
443 442 raw = '\n'.join(r for r, _ in line_pairs if r is not None)
444 443 out_t = '\n'.join(t for _,t in line_pairs if t is not None)
445 444 out = isp.transform_cell(raw)
446 445 # Match ignoring trailing whitespace
447 446 self.assertEqual(out.rstrip(), out_t.rstrip())
448 447
449 448 def test_cellmagic_preempt(self):
450 449 isp = self.isp
451 450 for raw, name, line, cell in [
452 451 ("%%cellm a\nIn[1]:", u'cellm', u'a', u'In[1]:'),
453 452 ("%%cellm \nline\n>>> hi", u'cellm', u'', u'line\n>>> hi'),
454 453 (">>> %%cellm \nline\n>>> hi", u'cellm', u'', u'line\nhi'),
455 454 ("%%cellm \n>>> hi", u'cellm', u'', u'>>> hi'),
456 455 ("%%cellm \nline1\nline2", u'cellm', u'', u'line1\nline2'),
457 456 ("%%cellm \nline1\\\\\nline2", u'cellm', u'', u'line1\\\\\nline2'),
458 457 ]:
459 458 expected = "get_ipython().run_cell_magic(%r, %r, %r)" % (
460 459 name, line, cell
461 460 )
462 461 out = isp.transform_cell(raw)
463 462 self.assertEqual(out.rstrip(), expected.rstrip())
464 463
465 464 def test_multiline_passthrough(self):
466 465 isp = self.isp
467 466 class CommentTransformer(InputTransformer):
468 467 def __init__(self):
469 468 self._lines = []
470 469
471 470 def push(self, line):
472 471 self._lines.append(line + '#')
473 472
474 473 def reset(self):
475 474 text = '\n'.join(self._lines)
476 475 self._lines = []
477 476 return text
478 477
479 478 isp.physical_line_transforms.insert(0, CommentTransformer())
480 479
481 480 for raw, expected in [
482 481 ("a=5", "a=5#"),
483 482 ("%ls foo", "get_ipython().magic(%r)" % u'ls foo#'),
484 483 ("!ls foo\n%ls bar", "get_ipython().system(%r)\nget_ipython().magic(%r)" % (
485 484 u'ls foo#', u'ls bar#'
486 485 )),
487 486 ("1\n2\n3\n%ls foo\n4\n5", "1#\n2#\n3#\nget_ipython().magic(%r)\n4#\n5#" % u'ls foo#'),
488 487 ]:
489 488 out = isp.transform_cell(raw)
490 489 self.assertEqual(out.rstrip(), expected.rstrip())
491 490
492 491 #-----------------------------------------------------------------------------
493 492 # Main - use as a script, mostly for developer experiments
494 493 #-----------------------------------------------------------------------------
495 494
496 495 if __name__ == '__main__':
497 496 # A simple demo for interactive experimentation. This code will not get
498 497 # picked up by any test suite.
499 498 from IPython.core.inputsplitter import IPythonInputSplitter
500 499
501 500 # configure here the syntax to use, prompt and whether to autoindent
502 501 #isp, start_prompt = InputSplitter(), '>>> '
503 502 isp, start_prompt = IPythonInputSplitter(), 'In> '
504 503
505 504 autoindent = True
506 505 #autoindent = False
507 506
508 507 try:
509 508 while True:
510 509 prompt = start_prompt
511 510 while isp.push_accepts_more():
512 511 indent = ' '*isp.indent_spaces
513 512 if autoindent:
514 513 line = indent + input(prompt+indent)
515 514 else:
516 515 line = input(prompt)
517 516 isp.push(line)
518 517 prompt = '... '
519 518
520 519 # Here we just return input so we can use it in a test suite, but a
521 520 # real interpreter would instead send it for execution somewhere.
522 521 #src = isp.source; raise EOFError # dbg
523 522 raw = isp.source_raw
524 523 src = isp.source_reset()
525 524 print('Input source was:\n', src)
526 525 print('Raw source was:\n', raw)
527 526 except EOFError:
528 527 print('Bye')
529 528
530 529 # Tests for cell magics support
531 530
532 531 def test_last_blank():
533 532 nt.assert_false(isp.last_blank(''))
534 533 nt.assert_false(isp.last_blank('abc'))
535 534 nt.assert_false(isp.last_blank('abc\n'))
536 535 nt.assert_false(isp.last_blank('abc\na'))
537 536
538 537 nt.assert_true(isp.last_blank('\n'))
539 538 nt.assert_true(isp.last_blank('\n '))
540 539 nt.assert_true(isp.last_blank('abc\n '))
541 540 nt.assert_true(isp.last_blank('abc\n\n'))
542 541 nt.assert_true(isp.last_blank('abc\nd\n\n'))
543 542 nt.assert_true(isp.last_blank('abc\nd\ne\n\n'))
544 543 nt.assert_true(isp.last_blank('abc \n \n \n\n'))
545 544
546 545
547 546 def test_last_two_blanks():
548 547 nt.assert_false(isp.last_two_blanks(''))
549 548 nt.assert_false(isp.last_two_blanks('abc'))
550 549 nt.assert_false(isp.last_two_blanks('abc\n'))
551 550 nt.assert_false(isp.last_two_blanks('abc\n\na'))
552 551 nt.assert_false(isp.last_two_blanks('abc\n \n'))
553 552 nt.assert_false(isp.last_two_blanks('abc\n\n'))
554 553
555 554 nt.assert_true(isp.last_two_blanks('\n\n'))
556 555 nt.assert_true(isp.last_two_blanks('\n\n '))
557 556 nt.assert_true(isp.last_two_blanks('\n \n'))
558 557 nt.assert_true(isp.last_two_blanks('abc\n\n '))
559 558 nt.assert_true(isp.last_two_blanks('abc\n\n\n'))
560 559 nt.assert_true(isp.last_two_blanks('abc\n\n \n'))
561 560 nt.assert_true(isp.last_two_blanks('abc\n\n \n '))
562 561 nt.assert_true(isp.last_two_blanks('abc\n\n \n \n'))
563 562 nt.assert_true(isp.last_two_blanks('abc\nd\n\n\n'))
564 563 nt.assert_true(isp.last_two_blanks('abc\nd\ne\nf\n\n\n'))
565 564
566 565
567 566 class CellMagicsCommon(object):
568 567
569 568 def test_whole_cell(self):
570 569 src = "%%cellm line\nbody\n"
571 570 out = self.sp.transform_cell(src)
572 571 ref = u"get_ipython().run_cell_magic({u}'cellm', {u}'line', {u}'body')\n"
573 572 nt.assert_equal(out, py3compat.u_format(ref))
574 573
575 574 def test_cellmagic_help(self):
576 575 self.sp.push('%%cellm?')
577 576 nt.assert_false(self.sp.push_accepts_more())
578 577
579 578 def tearDown(self):
580 579 self.sp.reset()
581 580
582 581
583 582 class CellModeCellMagics(CellMagicsCommon, unittest.TestCase):
584 583 sp = isp.IPythonInputSplitter(line_input_checker=False)
585 584
586 585 def test_incremental(self):
587 586 sp = self.sp
588 587 sp.push('%%cellm firstline\n')
589 588 nt.assert_true(sp.push_accepts_more()) #1
590 589 sp.push('line2\n')
591 590 nt.assert_true(sp.push_accepts_more()) #2
592 591 sp.push('\n')
593 592 # This should accept a blank line and carry on until the cell is reset
594 593 nt.assert_true(sp.push_accepts_more()) #3
595 594
596 595 def test_no_strip_coding(self):
597 596 src = '\n'.join([
598 597 '%%writefile foo.py',
599 598 '# coding: utf-8',
600 599 'print(u"üñîçø∂é")',
601 600 ])
602 601 out = self.sp.transform_cell(src)
603 602 nt.assert_in('# coding: utf-8', out)
604 603
605 604
606 605 class LineModeCellMagics(CellMagicsCommon, unittest.TestCase):
607 606 sp = isp.IPythonInputSplitter(line_input_checker=True)
608 607
609 608 def test_incremental(self):
610 609 sp = self.sp
611 610 sp.push('%%cellm line2\n')
612 611 nt.assert_true(sp.push_accepts_more()) #1
613 612 sp.push('\n')
614 613 # In this case, a blank line should end the cell magic
615 614 nt.assert_false(sp.push_accepts_more()) #2
@@ -1,950 +1,906 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for the key interactiveshell module.
3 3
4 4 Historically the main classes in interactiveshell have been under-tested. This
5 5 module should grow as many single-method tests as possible to trap many of the
6 6 recurring bugs we seem to encounter with high-level interaction.
7 7 """
8 8
9 9 # Copyright (c) IPython Development Team.
10 10 # Distributed under the terms of the Modified BSD License.
11 11
12 12 import ast
13 13 import os
14 14 import signal
15 15 import shutil
16 16 import sys
17 17 import tempfile
18 18 import unittest
19 19 try:
20 20 from unittest import mock
21 21 except ImportError:
22 22 import mock
23 23 from os.path import join
24 24
25 25 import nose.tools as nt
26 26
27 27 from IPython.core.error import InputRejected
28 28 from IPython.core.inputtransformer import InputTransformer
29 29 from IPython.testing.decorators import (
30 30 skipif, skip_win32, onlyif_unicode_paths, onlyif_cmds_exist,
31 31 )
32 32 from IPython.testing import tools as tt
33 33 from IPython.utils.process import find_cmd
34 34 from IPython.utils import py3compat
35 35 from IPython.utils.py3compat import unicode_type, PY3
36 36
37 37 if PY3:
38 38 from io import StringIO
39 39 else:
40 40 from StringIO import StringIO
41 41
42 42 #-----------------------------------------------------------------------------
43 43 # Globals
44 44 #-----------------------------------------------------------------------------
45 45 # This is used by every single test, no point repeating it ad nauseam
46 46 ip = get_ipython()
47 47
48 48 #-----------------------------------------------------------------------------
49 49 # Tests
50 50 #-----------------------------------------------------------------------------
51 51
52 52 class DerivedInterrupt(KeyboardInterrupt):
53 53 pass
54 54
55 55 class InteractiveShellTestCase(unittest.TestCase):
56 56 def test_naked_string_cells(self):
57 57 """Test that cells with only naked strings are fully executed"""
58 58 # First, single-line inputs
59 59 ip.run_cell('"a"\n')
60 60 self.assertEqual(ip.user_ns['_'], 'a')
61 61 # And also multi-line cells
62 62 ip.run_cell('"""a\nb"""\n')
63 63 self.assertEqual(ip.user_ns['_'], 'a\nb')
64 64
65 65 def test_run_empty_cell(self):
66 66 """Just make sure we don't get a horrible error with a blank
67 67 cell of input. Yes, I did overlook that."""
68 68 old_xc = ip.execution_count
69 69 res = ip.run_cell('')
70 70 self.assertEqual(ip.execution_count, old_xc)
71 71 self.assertEqual(res.execution_count, None)
72 72
73 73 def test_run_cell_multiline(self):
74 74 """Multi-block, multi-line cells must execute correctly.
75 75 """
76 76 src = '\n'.join(["x=1",
77 77 "y=2",
78 78 "if 1:",
79 79 " x += 1",
80 80 " y += 1",])
81 81 res = ip.run_cell(src)
82 82 self.assertEqual(ip.user_ns['x'], 2)
83 83 self.assertEqual(ip.user_ns['y'], 3)
84 84 self.assertEqual(res.success, True)
85 85 self.assertEqual(res.result, None)
86 86
87 87 def test_multiline_string_cells(self):
88 88 "Code sprinkled with multiline strings should execute (GH-306)"
89 89 ip.run_cell('tmp=0')
90 90 self.assertEqual(ip.user_ns['tmp'], 0)
91 91 res = ip.run_cell('tmp=1;"""a\nb"""\n')
92 92 self.assertEqual(ip.user_ns['tmp'], 1)
93 93 self.assertEqual(res.success, True)
94 94 self.assertEqual(res.result, "a\nb")
95 95
96 96 def test_dont_cache_with_semicolon(self):
97 97 "Ending a line with semicolon should not cache the returned object (GH-307)"
98 98 oldlen = len(ip.user_ns['Out'])
99 99 for cell in ['1;', '1;1;']:
100 100 res = ip.run_cell(cell, store_history=True)
101 101 newlen = len(ip.user_ns['Out'])
102 102 self.assertEqual(oldlen, newlen)
103 103 self.assertIsNone(res.result)
104 104 i = 0
105 105 #also test the default caching behavior
106 106 for cell in ['1', '1;1']:
107 107 ip.run_cell(cell, store_history=True)
108 108 newlen = len(ip.user_ns['Out'])
109 109 i += 1
110 110 self.assertEqual(oldlen+i, newlen)
111 111
112 112 def test_syntax_error(self):
113 113 res = ip.run_cell("raise = 3")
114 114 self.assertIsInstance(res.error_before_exec, SyntaxError)
115 115
116 116 def test_In_variable(self):
117 117 "Verify that In variable grows with user input (GH-284)"
118 118 oldlen = len(ip.user_ns['In'])
119 119 ip.run_cell('1;', store_history=True)
120 120 newlen = len(ip.user_ns['In'])
121 121 self.assertEqual(oldlen+1, newlen)
122 122 self.assertEqual(ip.user_ns['In'][-1],'1;')
123 123
124 124 def test_magic_names_in_string(self):
125 125 ip.run_cell('a = """\n%exit\n"""')
126 126 self.assertEqual(ip.user_ns['a'], '\n%exit\n')
127 127
128 128 def test_trailing_newline(self):
129 129 """test that running !(command) does not raise a SyntaxError"""
130 130 ip.run_cell('!(true)\n', False)
131 131 ip.run_cell('!(true)\n\n\n', False)
132 132
133 133 def test_gh_597(self):
134 134 """Pretty-printing lists of objects with non-ascii reprs may cause
135 135 problems."""
136 136 class Spam(object):
137 137 def __repr__(self):
138 138 return "\xe9"*50
139 139 import IPython.core.formatters
140 140 f = IPython.core.formatters.PlainTextFormatter()
141 141 f([Spam(),Spam()])
142 142
143 143
144 144 def test_future_flags(self):
145 145 """Check that future flags are used for parsing code (gh-777)"""
146 ip.run_cell('from __future__ import print_function')
146 ip.run_cell('from __future__ import barry_as_FLUFL')
147 147 try:
148 ip.run_cell('prfunc_return_val = print(1,2, sep=" ")')
148 ip.run_cell('prfunc_return_val = 1 <> 2')
149 149 assert 'prfunc_return_val' in ip.user_ns
150 150 finally:
151 151 # Reset compiler flags so we don't mess up other tests.
152 152 ip.compile.reset_compiler_flags()
153 153
154 def test_future_unicode(self):
155 """Check that unicode_literals is imported from __future__ (gh #786)"""
156 try:
157 ip.run_cell(u'byte_str = "a"')
158 assert isinstance(ip.user_ns['byte_str'], str) # string literals are byte strings by default
159 ip.run_cell('from __future__ import unicode_literals')
160 ip.run_cell(u'unicode_str = "a"')
161 assert isinstance(ip.user_ns['unicode_str'], unicode_type) # strings literals are now unicode
162 finally:
163 # Reset compiler flags so we don't mess up other tests.
164 ip.compile.reset_compiler_flags()
165
166 154 def test_can_pickle(self):
167 155 "Can we pickle objects defined interactively (GH-29)"
168 156 ip = get_ipython()
169 157 ip.reset()
170 158 ip.run_cell(("class Mylist(list):\n"
171 159 " def __init__(self,x=[]):\n"
172 160 " list.__init__(self,x)"))
173 161 ip.run_cell("w=Mylist([1,2,3])")
174 162
175 163 from pickle import dumps
176 164
177 165 # We need to swap in our main module - this is only necessary
178 166 # inside the test framework, because IPython puts the interactive module
179 167 # in place (but the test framework undoes this).
180 168 _main = sys.modules['__main__']
181 169 sys.modules['__main__'] = ip.user_module
182 170 try:
183 171 res = dumps(ip.user_ns["w"])
184 172 finally:
185 173 sys.modules['__main__'] = _main
186 174 self.assertTrue(isinstance(res, bytes))
187 175
188 176 def test_global_ns(self):
189 177 "Code in functions must be able to access variables outside them."
190 178 ip = get_ipython()
191 179 ip.run_cell("a = 10")
192 180 ip.run_cell(("def f(x):\n"
193 181 " return x + a"))
194 182 ip.run_cell("b = f(12)")
195 183 self.assertEqual(ip.user_ns["b"], 22)
196 184
197 185 def test_bad_custom_tb(self):
198 186 """Check that InteractiveShell is protected from bad custom exception handlers"""
199 187 ip.set_custom_exc((IOError,), lambda etype,value,tb: 1/0)
200 188 self.assertEqual(ip.custom_exceptions, (IOError,))
201 189 with tt.AssertPrints("Custom TB Handler failed", channel='stderr'):
202 190 ip.run_cell(u'raise IOError("foo")')
203 191 self.assertEqual(ip.custom_exceptions, ())
204 192
205 193 def test_bad_custom_tb_return(self):
206 194 """Check that InteractiveShell is protected from bad return types in custom exception handlers"""
207 195 ip.set_custom_exc((NameError,),lambda etype,value,tb, tb_offset=None: 1)
208 196 self.assertEqual(ip.custom_exceptions, (NameError,))
209 197 with tt.AssertPrints("Custom TB Handler failed", channel='stderr'):
210 198 ip.run_cell(u'a=abracadabra')
211 199 self.assertEqual(ip.custom_exceptions, ())
212 200
213 201 def test_drop_by_id(self):
214 202 myvars = {"a":object(), "b":object(), "c": object()}
215 203 ip.push(myvars, interactive=False)
216 204 for name in myvars:
217 205 assert name in ip.user_ns, name
218 206 assert name in ip.user_ns_hidden, name
219 207 ip.user_ns['b'] = 12
220 208 ip.drop_by_id(myvars)
221 209 for name in ["a", "c"]:
222 210 assert name not in ip.user_ns, name
223 211 assert name not in ip.user_ns_hidden, name
224 212 assert ip.user_ns['b'] == 12
225 213 ip.reset()
226 214
227 215 def test_var_expand(self):
228 216 ip.user_ns['f'] = u'Ca\xf1o'
229 217 self.assertEqual(ip.var_expand(u'echo $f'), u'echo Ca\xf1o')
230 218 self.assertEqual(ip.var_expand(u'echo {f}'), u'echo Ca\xf1o')
231 219 self.assertEqual(ip.var_expand(u'echo {f[:-1]}'), u'echo Ca\xf1')
232 220 self.assertEqual(ip.var_expand(u'echo {1*2}'), u'echo 2')
233 221
234 222 ip.user_ns['f'] = b'Ca\xc3\xb1o'
235 223 # This should not raise any exception:
236 224 ip.var_expand(u'echo $f')
237 225
238 226 def test_var_expand_local(self):
239 227 """Test local variable expansion in !system and %magic calls"""
240 228 # !system
241 229 ip.run_cell('def test():\n'
242 230 ' lvar = "ttt"\n'
243 231 ' ret = !echo {lvar}\n'
244 232 ' return ret[0]\n')
245 233 res = ip.user_ns['test']()
246 234 nt.assert_in('ttt', res)
247 235
248 236 # %magic
249 237 ip.run_cell('def makemacro():\n'
250 238 ' macroname = "macro_var_expand_locals"\n'
251 239 ' %macro {macroname} codestr\n')
252 240 ip.user_ns['codestr'] = "str(12)"
253 241 ip.run_cell('makemacro()')
254 242 nt.assert_in('macro_var_expand_locals', ip.user_ns)
255 243
256 244 def test_var_expand_self(self):
257 245 """Test variable expansion with the name 'self', which was failing.
258 246
259 247 See https://github.com/ipython/ipython/issues/1878#issuecomment-7698218
260 248 """
261 249 ip.run_cell('class cTest:\n'
262 250 ' classvar="see me"\n'
263 251 ' def test(self):\n'
264 252 ' res = !echo Variable: {self.classvar}\n'
265 253 ' return res[0]\n')
266 254 nt.assert_in('see me', ip.user_ns['cTest']().test())
267 255
268 256 def test_bad_var_expand(self):
269 257 """var_expand on invalid formats shouldn't raise"""
270 258 # SyntaxError
271 259 self.assertEqual(ip.var_expand(u"{'a':5}"), u"{'a':5}")
272 260 # NameError
273 261 self.assertEqual(ip.var_expand(u"{asdf}"), u"{asdf}")
274 262 # ZeroDivisionError
275 263 self.assertEqual(ip.var_expand(u"{1/0}"), u"{1/0}")
276 264
277 265 def test_silent_postexec(self):
278 266 """run_cell(silent=True) doesn't invoke pre/post_run_cell callbacks"""
279 267 pre_explicit = mock.Mock()
280 268 pre_always = mock.Mock()
281 269 post_explicit = mock.Mock()
282 270 post_always = mock.Mock()
283 271
284 272 ip.events.register('pre_run_cell', pre_explicit)
285 273 ip.events.register('pre_execute', pre_always)
286 274 ip.events.register('post_run_cell', post_explicit)
287 275 ip.events.register('post_execute', post_always)
288 276
289 277 try:
290 278 ip.run_cell("1", silent=True)
291 279 assert pre_always.called
292 280 assert not pre_explicit.called
293 281 assert post_always.called
294 282 assert not post_explicit.called
295 283 # double-check that non-silent exec did what we expected
296 284 # silent to avoid
297 285 ip.run_cell("1")
298 286 assert pre_explicit.called
299 287 assert post_explicit.called
300 288 finally:
301 289 # remove post-exec
302 290 ip.events.unregister('pre_run_cell', pre_explicit)
303 291 ip.events.unregister('pre_execute', pre_always)
304 292 ip.events.unregister('post_run_cell', post_explicit)
305 293 ip.events.unregister('post_execute', post_always)
306 294
307 295 def test_silent_noadvance(self):
308 296 """run_cell(silent=True) doesn't advance execution_count"""
309 297 ec = ip.execution_count
310 298 # silent should force store_history=False
311 299 ip.run_cell("1", store_history=True, silent=True)
312 300
313 301 self.assertEqual(ec, ip.execution_count)
314 302 # double-check that non-silent exec did what we expected
315 303 # silent to avoid
316 304 ip.run_cell("1", store_history=True)
317 305 self.assertEqual(ec+1, ip.execution_count)
318 306
319 307 def test_silent_nodisplayhook(self):
320 308 """run_cell(silent=True) doesn't trigger displayhook"""
321 309 d = dict(called=False)
322 310
323 311 trap = ip.display_trap
324 312 save_hook = trap.hook
325 313
326 314 def failing_hook(*args, **kwargs):
327 315 d['called'] = True
328 316
329 317 try:
330 318 trap.hook = failing_hook
331 319 res = ip.run_cell("1", silent=True)
332 320 self.assertFalse(d['called'])
333 321 self.assertIsNone(res.result)
334 322 # double-check that non-silent exec did what we expected
335 323 # silent to avoid
336 324 ip.run_cell("1")
337 325 self.assertTrue(d['called'])
338 326 finally:
339 327 trap.hook = save_hook
340 328
341 @skipif(sys.version_info[0] >= 3, "softspace removed in py3")
342 def test_print_softspace(self):
343 """Verify that softspace is handled correctly when executing multiple
344 statements.
345
346 In [1]: print 1; print 2
347 1
348 2
349
350 In [2]: print 1,; print 2
351 1 2
352 """
353
354 329 def test_ofind_line_magic(self):
355 330 from IPython.core.magic import register_line_magic
356 331
357 332 @register_line_magic
358 333 def lmagic(line):
359 334 "A line magic"
360 335
361 336 # Get info on line magic
362 337 lfind = ip._ofind('lmagic')
363 338 info = dict(found=True, isalias=False, ismagic=True,
364 339 namespace = 'IPython internal', obj= lmagic.__wrapped__,
365 340 parent = None)
366 341 nt.assert_equal(lfind, info)
367 342
368 343 def test_ofind_cell_magic(self):
369 344 from IPython.core.magic import register_cell_magic
370 345
371 346 @register_cell_magic
372 347 def cmagic(line, cell):
373 348 "A cell magic"
374 349
375 350 # Get info on cell magic
376 351 find = ip._ofind('cmagic')
377 352 info = dict(found=True, isalias=False, ismagic=True,
378 353 namespace = 'IPython internal', obj= cmagic.__wrapped__,
379 354 parent = None)
380 355 nt.assert_equal(find, info)
381 356
382 357 def test_ofind_property_with_error(self):
383 358 class A(object):
384 359 @property
385 360 def foo(self):
386 361 raise NotImplementedError()
387 362 a = A()
388 363
389 364 found = ip._ofind('a.foo', [('locals', locals())])
390 365 info = dict(found=True, isalias=False, ismagic=False,
391 366 namespace='locals', obj=A.foo, parent=a)
392 367 nt.assert_equal(found, info)
393 368
394 369 def test_ofind_multiple_attribute_lookups(self):
395 370 class A(object):
396 371 @property
397 372 def foo(self):
398 373 raise NotImplementedError()
399 374
400 375 a = A()
401 376 a.a = A()
402 377 a.a.a = A()
403 378
404 379 found = ip._ofind('a.a.a.foo', [('locals', locals())])
405 380 info = dict(found=True, isalias=False, ismagic=False,
406 381 namespace='locals', obj=A.foo, parent=a.a.a)
407 382 nt.assert_equal(found, info)
408 383
409 384 def test_ofind_slotted_attributes(self):
410 385 class A(object):
411 386 __slots__ = ['foo']
412 387 def __init__(self):
413 388 self.foo = 'bar'
414 389
415 390 a = A()
416 391 found = ip._ofind('a.foo', [('locals', locals())])
417 392 info = dict(found=True, isalias=False, ismagic=False,
418 393 namespace='locals', obj=a.foo, parent=a)
419 394 nt.assert_equal(found, info)
420 395
421 396 found = ip._ofind('a.bar', [('locals', locals())])
422 397 info = dict(found=False, isalias=False, ismagic=False,
423 398 namespace=None, obj=None, parent=a)
424 399 nt.assert_equal(found, info)
425 400
426 401 def test_ofind_prefers_property_to_instance_level_attribute(self):
427 402 class A(object):
428 403 @property
429 404 def foo(self):
430 405 return 'bar'
431 406 a = A()
432 407 a.__dict__['foo'] = 'baz'
433 408 nt.assert_equal(a.foo, 'bar')
434 409 found = ip._ofind('a.foo', [('locals', locals())])
435 410 nt.assert_is(found['obj'], A.foo)
436 411
437 412 def test_custom_syntaxerror_exception(self):
438 413 called = []
439 414 def my_handler(shell, etype, value, tb, tb_offset=None):
440 415 called.append(etype)
441 416 shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
442 417
443 418 ip.set_custom_exc((SyntaxError,), my_handler)
444 419 try:
445 420 ip.run_cell("1f")
446 421 # Check that this was called, and only once.
447 422 self.assertEqual(called, [SyntaxError])
448 423 finally:
449 424 # Reset the custom exception hook
450 425 ip.set_custom_exc((), None)
451 426
452 427 def test_custom_exception(self):
453 428 called = []
454 429 def my_handler(shell, etype, value, tb, tb_offset=None):
455 430 called.append(etype)
456 431 shell.showtraceback((etype, value, tb), tb_offset=tb_offset)
457 432
458 433 ip.set_custom_exc((ValueError,), my_handler)
459 434 try:
460 435 res = ip.run_cell("raise ValueError('test')")
461 436 # Check that this was called, and only once.
462 437 self.assertEqual(called, [ValueError])
463 438 # Check that the error is on the result object
464 439 self.assertIsInstance(res.error_in_exec, ValueError)
465 440 finally:
466 441 # Reset the custom exception hook
467 442 ip.set_custom_exc((), None)
468 443
469 @skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
470 def test_future_environment(self):
471 "Can we run code with & without the shell's __future__ imports?"
472 ip.run_cell("from __future__ import division")
473 ip.run_cell("a = 1/2", shell_futures=True)
474 self.assertEqual(ip.user_ns['a'], 0.5)
475 ip.run_cell("b = 1/2", shell_futures=False)
476 self.assertEqual(ip.user_ns['b'], 0)
477
478 ip.compile.reset_compiler_flags()
479 # This shouldn't leak to the shell's compiler
480 ip.run_cell("from __future__ import division \nc=1/2", shell_futures=False)
481 self.assertEqual(ip.user_ns['c'], 0.5)
482 ip.run_cell("d = 1/2", shell_futures=True)
483 self.assertEqual(ip.user_ns['d'], 0)
484
485 444 def test_mktempfile(self):
486 445 filename = ip.mktempfile()
487 446 # Check that we can open the file again on Windows
488 447 with open(filename, 'w') as f:
489 448 f.write('abc')
490 449
491 450 filename = ip.mktempfile(data='blah')
492 451 with open(filename, 'r') as f:
493 452 self.assertEqual(f.read(), 'blah')
494 453
495 454 def test_new_main_mod(self):
496 455 # Smoketest to check that this accepts a unicode module name
497 456 name = u'jiefmw'
498 457 mod = ip.new_main_mod(u'%s.py' % name, name)
499 458 self.assertEqual(mod.__name__, name)
500 459
501 460 def test_get_exception_only(self):
502 461 try:
503 462 raise KeyboardInterrupt
504 463 except KeyboardInterrupt:
505 464 msg = ip.get_exception_only()
506 465 self.assertEqual(msg, 'KeyboardInterrupt\n')
507 466
508 467 try:
509 468 raise DerivedInterrupt("foo")
510 469 except KeyboardInterrupt:
511 470 msg = ip.get_exception_only()
512 if sys.version_info[0] <= 2:
513 self.assertEqual(msg, 'DerivedInterrupt: foo\n')
514 else:
515 self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n')
471 self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n')
516 472
517 473 def test_inspect_text(self):
518 474 ip.run_cell('a = 5')
519 475 text = ip.object_inspect_text('a')
520 476 self.assertIsInstance(text, unicode_type)
521 477
522 478
523 479 class TestSafeExecfileNonAsciiPath(unittest.TestCase):
524 480
525 481 @onlyif_unicode_paths
526 482 def setUp(self):
527 483 self.BASETESTDIR = tempfile.mkdtemp()
528 484 self.TESTDIR = join(self.BASETESTDIR, u"åäö")
529 485 os.mkdir(self.TESTDIR)
530 486 with open(join(self.TESTDIR, u"åäötestscript.py"), "w") as sfile:
531 487 sfile.write("pass\n")
532 488 self.oldpath = py3compat.getcwd()
533 489 os.chdir(self.TESTDIR)
534 490 self.fname = u"åäötestscript.py"
535 491
536 492 def tearDown(self):
537 493 os.chdir(self.oldpath)
538 494 shutil.rmtree(self.BASETESTDIR)
539 495
540 496 @onlyif_unicode_paths
541 497 def test_1(self):
542 498 """Test safe_execfile with non-ascii path
543 499 """
544 500 ip.safe_execfile(self.fname, {}, raise_exceptions=True)
545 501
546 502 class ExitCodeChecks(tt.TempFileMixin):
547 503 def test_exit_code_ok(self):
548 504 self.system('exit 0')
549 505 self.assertEqual(ip.user_ns['_exit_code'], 0)
550 506
551 507 def test_exit_code_error(self):
552 508 self.system('exit 1')
553 509 self.assertEqual(ip.user_ns['_exit_code'], 1)
554 510
555 511 @skipif(not hasattr(signal, 'SIGALRM'))
556 512 def test_exit_code_signal(self):
557 513 self.mktmp("import signal, time\n"
558 514 "signal.setitimer(signal.ITIMER_REAL, 0.1)\n"
559 515 "time.sleep(1)\n")
560 516 self.system("%s %s" % (sys.executable, self.fname))
561 517 self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGALRM)
562 518
563 519 @onlyif_cmds_exist("csh")
564 520 def test_exit_code_signal_csh(self):
565 521 SHELL = os.environ.get('SHELL', None)
566 522 os.environ['SHELL'] = find_cmd("csh")
567 523 try:
568 524 self.test_exit_code_signal()
569 525 finally:
570 526 if SHELL is not None:
571 527 os.environ['SHELL'] = SHELL
572 528 else:
573 529 del os.environ['SHELL']
574 530
575 531 class TestSystemRaw(unittest.TestCase, ExitCodeChecks):
576 532 system = ip.system_raw
577 533
578 534 @onlyif_unicode_paths
579 535 def test_1(self):
580 536 """Test system_raw with non-ascii cmd
581 537 """
582 538 cmd = u'''python -c "'åäö'" '''
583 539 ip.system_raw(cmd)
584 540
585 541 @mock.patch('subprocess.call', side_effect=KeyboardInterrupt)
586 542 @mock.patch('os.system', side_effect=KeyboardInterrupt)
587 543 def test_control_c(self, *mocks):
588 544 try:
589 545 self.system("sleep 1 # wont happen")
590 546 except KeyboardInterrupt:
591 547 self.fail("system call should intercept "
592 548 "keyboard interrupt from subprocess.call")
593 549 self.assertEqual(ip.user_ns['_exit_code'], -signal.SIGINT)
594 550
595 551 # TODO: Exit codes are currently ignored on Windows.
596 552 class TestSystemPipedExitCode(unittest.TestCase, ExitCodeChecks):
597 553 system = ip.system_piped
598 554
599 555 @skip_win32
600 556 def test_exit_code_ok(self):
601 557 ExitCodeChecks.test_exit_code_ok(self)
602 558
603 559 @skip_win32
604 560 def test_exit_code_error(self):
605 561 ExitCodeChecks.test_exit_code_error(self)
606 562
607 563 @skip_win32
608 564 def test_exit_code_signal(self):
609 565 ExitCodeChecks.test_exit_code_signal(self)
610 566
611 567 class TestModules(unittest.TestCase, tt.TempFileMixin):
612 568 def test_extraneous_loads(self):
613 569 """Test we're not loading modules on startup that we shouldn't.
614 570 """
615 571 self.mktmp("import sys\n"
616 572 "print('numpy' in sys.modules)\n"
617 573 "print('ipyparallel' in sys.modules)\n"
618 574 "print('ipykernel' in sys.modules)\n"
619 575 )
620 576 out = "False\nFalse\nFalse\n"
621 577 tt.ipexec_validate(self.fname, out)
622 578
623 579 class Negator(ast.NodeTransformer):
624 580 """Negates all number literals in an AST."""
625 581 def visit_Num(self, node):
626 582 node.n = -node.n
627 583 return node
628 584
629 585 class TestAstTransform(unittest.TestCase):
630 586 def setUp(self):
631 587 self.negator = Negator()
632 588 ip.ast_transformers.append(self.negator)
633 589
634 590 def tearDown(self):
635 591 ip.ast_transformers.remove(self.negator)
636 592
637 593 def test_run_cell(self):
638 594 with tt.AssertPrints('-34'):
639 595 ip.run_cell('print (12 + 22)')
640 596
641 597 # A named reference to a number shouldn't be transformed.
642 598 ip.user_ns['n'] = 55
643 599 with tt.AssertNotPrints('-55'):
644 600 ip.run_cell('print (n)')
645 601
646 602 def test_timeit(self):
647 603 called = set()
648 604 def f(x):
649 605 called.add(x)
650 606 ip.push({'f':f})
651 607
652 608 with tt.AssertPrints("average of "):
653 609 ip.run_line_magic("timeit", "-n1 f(1)")
654 610 self.assertEqual(called, {-1})
655 611 called.clear()
656 612
657 613 with tt.AssertPrints("average of "):
658 614 ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)")
659 615 self.assertEqual(called, {-2, -3})
660 616
661 617 def test_time(self):
662 618 called = []
663 619 def f(x):
664 620 called.append(x)
665 621 ip.push({'f':f})
666 622
667 623 # Test with an expression
668 624 with tt.AssertPrints("Wall time: "):
669 625 ip.run_line_magic("time", "f(5+9)")
670 626 self.assertEqual(called, [-14])
671 627 called[:] = []
672 628
673 629 # Test with a statement (different code path)
674 630 with tt.AssertPrints("Wall time: "):
675 631 ip.run_line_magic("time", "a = f(-3 + -2)")
676 632 self.assertEqual(called, [5])
677 633
678 634 def test_macro(self):
679 635 ip.push({'a':10})
680 636 # The AST transformation makes this do a+=-1
681 637 ip.define_macro("amacro", "a+=1\nprint(a)")
682 638
683 639 with tt.AssertPrints("9"):
684 640 ip.run_cell("amacro")
685 641 with tt.AssertPrints("8"):
686 642 ip.run_cell("amacro")
687 643
688 644 class IntegerWrapper(ast.NodeTransformer):
689 645 """Wraps all integers in a call to Integer()"""
690 646 def visit_Num(self, node):
691 647 if isinstance(node.n, int):
692 648 return ast.Call(func=ast.Name(id='Integer', ctx=ast.Load()),
693 649 args=[node], keywords=[])
694 650 return node
695 651
696 652 class TestAstTransform2(unittest.TestCase):
697 653 def setUp(self):
698 654 self.intwrapper = IntegerWrapper()
699 655 ip.ast_transformers.append(self.intwrapper)
700 656
701 657 self.calls = []
702 658 def Integer(*args):
703 659 self.calls.append(args)
704 660 return args
705 661 ip.push({"Integer": Integer})
706 662
707 663 def tearDown(self):
708 664 ip.ast_transformers.remove(self.intwrapper)
709 665 del ip.user_ns['Integer']
710 666
711 667 def test_run_cell(self):
712 668 ip.run_cell("n = 2")
713 669 self.assertEqual(self.calls, [(2,)])
714 670
715 671 # This shouldn't throw an error
716 672 ip.run_cell("o = 2.0")
717 673 self.assertEqual(ip.user_ns['o'], 2.0)
718 674
719 675 def test_timeit(self):
720 676 called = set()
721 677 def f(x):
722 678 called.add(x)
723 679 ip.push({'f':f})
724 680
725 681 with tt.AssertPrints("average of "):
726 682 ip.run_line_magic("timeit", "-n1 f(1)")
727 683 self.assertEqual(called, {(1,)})
728 684 called.clear()
729 685
730 686 with tt.AssertPrints("average of "):
731 687 ip.run_cell_magic("timeit", "-n1 f(2)", "f(3)")
732 688 self.assertEqual(called, {(2,), (3,)})
733 689
734 690 class ErrorTransformer(ast.NodeTransformer):
735 691 """Throws an error when it sees a number."""
736 692 def visit_Num(self, node):
737 693 raise ValueError("test")
738 694
739 695 class TestAstTransformError(unittest.TestCase):
740 696 def test_unregistering(self):
741 697 err_transformer = ErrorTransformer()
742 698 ip.ast_transformers.append(err_transformer)
743 699
744 700 with tt.AssertPrints("unregister", channel='stderr'):
745 701 ip.run_cell("1 + 2")
746 702
747 703 # This should have been removed.
748 704 nt.assert_not_in(err_transformer, ip.ast_transformers)
749 705
750 706
751 707 class StringRejector(ast.NodeTransformer):
752 708 """Throws an InputRejected when it sees a string literal.
753 709
754 710 Used to verify that NodeTransformers can signal that a piece of code should
755 711 not be executed by throwing an InputRejected.
756 712 """
757 713
758 714 def visit_Str(self, node):
759 715 raise InputRejected("test")
760 716
761 717
762 718 class TestAstTransformInputRejection(unittest.TestCase):
763 719
764 720 def setUp(self):
765 721 self.transformer = StringRejector()
766 722 ip.ast_transformers.append(self.transformer)
767 723
768 724 def tearDown(self):
769 725 ip.ast_transformers.remove(self.transformer)
770 726
771 727 def test_input_rejection(self):
772 728 """Check that NodeTransformers can reject input."""
773 729
774 730 expect_exception_tb = tt.AssertPrints("InputRejected: test")
775 731 expect_no_cell_output = tt.AssertNotPrints("'unsafe'", suppress=False)
776 732
777 733 # Run the same check twice to verify that the transformer is not
778 734 # disabled after raising.
779 735 with expect_exception_tb, expect_no_cell_output:
780 736 ip.run_cell("'unsafe'")
781 737
782 738 with expect_exception_tb, expect_no_cell_output:
783 739 res = ip.run_cell("'unsafe'")
784 740
785 741 self.assertIsInstance(res.error_before_exec, InputRejected)
786 742
787 743 def test__IPYTHON__():
788 744 # This shouldn't raise a NameError, that's all
789 745 __IPYTHON__
790 746
791 747
792 748 class DummyRepr(object):
793 749 def __repr__(self):
794 750 return "DummyRepr"
795 751
796 752 def _repr_html_(self):
797 753 return "<b>dummy</b>"
798 754
799 755 def _repr_javascript_(self):
800 756 return "console.log('hi');", {'key': 'value'}
801 757
802 758
803 759 def test_user_variables():
804 760 # enable all formatters
805 761 ip.display_formatter.active_types = ip.display_formatter.format_types
806 762
807 763 ip.user_ns['dummy'] = d = DummyRepr()
808 764 keys = {'dummy', 'doesnotexist'}
809 765 r = ip.user_expressions({ key:key for key in keys})
810 766
811 767 nt.assert_equal(keys, set(r.keys()))
812 768 dummy = r['dummy']
813 769 nt.assert_equal({'status', 'data', 'metadata'}, set(dummy.keys()))
814 770 nt.assert_equal(dummy['status'], 'ok')
815 771 data = dummy['data']
816 772 metadata = dummy['metadata']
817 773 nt.assert_equal(data.get('text/html'), d._repr_html_())
818 774 js, jsmd = d._repr_javascript_()
819 775 nt.assert_equal(data.get('application/javascript'), js)
820 776 nt.assert_equal(metadata.get('application/javascript'), jsmd)
821 777
822 778 dne = r['doesnotexist']
823 779 nt.assert_equal(dne['status'], 'error')
824 780 nt.assert_equal(dne['ename'], 'NameError')
825 781
826 782 # back to text only
827 783 ip.display_formatter.active_types = ['text/plain']
828 784
829 785 def test_user_expression():
830 786 # enable all formatters
831 787 ip.display_formatter.active_types = ip.display_formatter.format_types
832 788 query = {
833 789 'a' : '1 + 2',
834 790 'b' : '1/0',
835 791 }
836 792 r = ip.user_expressions(query)
837 793 import pprint
838 794 pprint.pprint(r)
839 795 nt.assert_equal(set(r.keys()), set(query.keys()))
840 796 a = r['a']
841 797 nt.assert_equal({'status', 'data', 'metadata'}, set(a.keys()))
842 798 nt.assert_equal(a['status'], 'ok')
843 799 data = a['data']
844 800 metadata = a['metadata']
845 801 nt.assert_equal(data.get('text/plain'), '3')
846 802
847 803 b = r['b']
848 804 nt.assert_equal(b['status'], 'error')
849 805 nt.assert_equal(b['ename'], 'ZeroDivisionError')
850 806
851 807 # back to text only
852 808 ip.display_formatter.active_types = ['text/plain']
853 809
854 810
855 811
856 812
857 813
858 814 class TestSyntaxErrorTransformer(unittest.TestCase):
859 815 """Check that SyntaxError raised by an input transformer is handled by run_cell()"""
860 816
861 817 class SyntaxErrorTransformer(InputTransformer):
862 818
863 819 def push(self, line):
864 820 pos = line.find('syntaxerror')
865 821 if pos >= 0:
866 822 e = SyntaxError('input contains "syntaxerror"')
867 823 e.text = line
868 824 e.offset = pos + 1
869 825 raise e
870 826 return line
871 827
872 828 def reset(self):
873 829 pass
874 830
875 831 def setUp(self):
876 832 self.transformer = TestSyntaxErrorTransformer.SyntaxErrorTransformer()
877 833 ip.input_splitter.python_line_transforms.append(self.transformer)
878 834 ip.input_transformer_manager.python_line_transforms.append(self.transformer)
879 835
880 836 def tearDown(self):
881 837 ip.input_splitter.python_line_transforms.remove(self.transformer)
882 838 ip.input_transformer_manager.python_line_transforms.remove(self.transformer)
883 839
884 840 def test_syntaxerror_input_transformer(self):
885 841 with tt.AssertPrints('1234'):
886 842 ip.run_cell('1234')
887 843 with tt.AssertPrints('SyntaxError: invalid syntax'):
888 844 ip.run_cell('1 2 3') # plain python syntax error
889 845 with tt.AssertPrints('SyntaxError: input contains "syntaxerror"'):
890 846 ip.run_cell('2345 # syntaxerror') # input transformer syntax error
891 847 with tt.AssertPrints('3456'):
892 848 ip.run_cell('3456')
893 849
894 850
895 851
896 852 def test_warning_suppression():
897 853 ip.run_cell("import warnings")
898 854 try:
899 855 with tt.AssertPrints("UserWarning: asdf", channel="stderr"):
900 856 ip.run_cell("warnings.warn('asdf')")
901 857 # Here's the real test -- if we run that again, we should get the
902 858 # warning again. Traditionally, each warning was only issued once per
903 859 # IPython session (approximately), even if the user typed in new and
904 860 # different code that should have also triggered the warning, leading
905 861 # to much confusion.
906 862 with tt.AssertPrints("UserWarning: asdf", channel="stderr"):
907 863 ip.run_cell("warnings.warn('asdf')")
908 864 finally:
909 865 ip.run_cell("del warnings")
910 866
911 867
912 868 def test_deprecation_warning():
913 869 ip.run_cell("""
914 870 import warnings
915 871 def wrn():
916 872 warnings.warn(
917 873 "I AM A WARNING",
918 874 DeprecationWarning
919 875 )
920 876 """)
921 877 try:
922 878 with tt.AssertPrints("I AM A WARNING", channel="stderr"):
923 879 ip.run_cell("wrn()")
924 880 finally:
925 881 ip.run_cell("del warnings")
926 882 ip.run_cell("del wrn")
927 883
928 884
929 885 class TestImportNoDeprecate(tt.TempFileMixin):
930 886
931 887 def setup(self):
932 888 """Make a valid python temp file."""
933 889 self.mktmp("""
934 890 import warnings
935 891 def wrn():
936 892 warnings.warn(
937 893 "I AM A WARNING",
938 894 DeprecationWarning
939 895 )
940 896 """)
941 897
942 898 def test_no_dep(self):
943 899 """
944 900 No deprecation warning should be raised from imported functions
945 901 """
946 902 ip.run_cell("from {} import wrn".format(self.fname))
947 903
948 904 with tt.AssertNotPrints("I AM A WARNING"):
949 905 ip.run_cell("wrn()")
950 906 ip.run_cell("del wrn")
@@ -1,1011 +1,987 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Tests for various magic functions.
3 3
4 4 Needs to be run by nose (to make ipython session available).
5 5 """
6 from __future__ import absolute_import
7 6
8 7 import io
9 8 import os
10 9 import sys
11 10 import warnings
12 11 from unittest import TestCase
13 12
14 13 try:
15 14 from importlib import invalidate_caches # Required from Python 3.3
16 15 except ImportError:
17 16 def invalidate_caches():
18 17 pass
19 18
20 19 import nose.tools as nt
21 20
22 21 from IPython import get_ipython
23 22 from IPython.core import magic
24 23 from IPython.core.error import UsageError
25 24 from IPython.core.magic import (Magics, magics_class, line_magic,
26 25 cell_magic,
27 26 register_line_magic, register_cell_magic)
28 27 from IPython.core.magics import execution, script, code
29 28 from IPython.testing import decorators as dec
30 29 from IPython.testing import tools as tt
31 30 from IPython.utils import py3compat
32 31 from IPython.utils.io import capture_output
33 32 from IPython.utils.tempdir import TemporaryDirectory
34 33 from IPython.utils.process import find_cmd
35 34
36 35 if py3compat.PY3:
37 36 from io import StringIO
38 37 else:
39 38 from StringIO import StringIO
40 39
41 40
42 41 _ip = get_ipython()
43 42
44 43 @magic.magics_class
45 44 class DummyMagics(magic.Magics): pass
46 45
47 46 def test_extract_code_ranges():
48 47 instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
49 48 expected = [(0, 1),
50 49 (2, 3),
51 50 (4, 6),
52 51 (6, 9),
53 52 (9, 14),
54 53 (16, None),
55 54 (None, 9),
56 55 (9, None),
57 56 (None, 13),
58 57 (None, None)]
59 58 actual = list(code.extract_code_ranges(instr))
60 59 nt.assert_equal(actual, expected)
61 60
62 61 def test_extract_symbols():
63 62 source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
64 63 symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
65 64 expected = [([], ['a']),
66 65 (["def b():\n return 42\n"], []),
67 66 (["class A: pass\n"], []),
68 67 (["class A: pass\n", "def b():\n return 42\n"], []),
69 68 (["class A: pass\n"], ['a']),
70 69 ([], ['z'])]
71 70 for symbols, exp in zip(symbols_args, expected):
72 71 nt.assert_equal(code.extract_symbols(source, symbols), exp)
73 72
74 73
75 74 def test_extract_symbols_raises_exception_with_non_python_code():
76 75 source = ("=begin A Ruby program :)=end\n"
77 76 "def hello\n"
78 77 "puts 'Hello world'\n"
79 78 "end")
80 79 with nt.assert_raises(SyntaxError):
81 80 code.extract_symbols(source, "hello")
82 81
83 82 def test_config():
84 83 """ test that config magic does not raise
85 84 can happen if Configurable init is moved too early into
86 85 Magics.__init__ as then a Config object will be registerd as a
87 86 magic.
88 87 """
89 88 ## should not raise.
90 89 _ip.magic('config')
91 90
92 91 def test_rehashx():
93 92 # clear up everything
94 93 _ip.alias_manager.clear_aliases()
95 94 del _ip.db['syscmdlist']
96 95
97 96 _ip.magic('rehashx')
98 97 # Practically ALL ipython development systems will have more than 10 aliases
99 98
100 99 nt.assert_true(len(_ip.alias_manager.aliases) > 10)
101 100 for name, cmd in _ip.alias_manager.aliases:
102 101 # we must strip dots from alias names
103 102 nt.assert_not_in('.', name)
104 103
105 104 # rehashx must fill up syscmdlist
106 105 scoms = _ip.db['syscmdlist']
107 106 nt.assert_true(len(scoms) > 10)
108 107
109 108
110 109 def test_magic_parse_options():
111 110 """Test that we don't mangle paths when parsing magic options."""
112 111 ip = get_ipython()
113 112 path = 'c:\\x'
114 113 m = DummyMagics(ip)
115 114 opts = m.parse_options('-f %s' % path,'f:')[0]
116 115 # argv splitting is os-dependent
117 116 if os.name == 'posix':
118 117 expected = 'c:x'
119 118 else:
120 119 expected = path
121 120 nt.assert_equal(opts['f'], expected)
122 121
123 122 def test_magic_parse_long_options():
124 123 """Magic.parse_options can handle --foo=bar long options"""
125 124 ip = get_ipython()
126 125 m = DummyMagics(ip)
127 126 opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
128 127 nt.assert_in('foo', opts)
129 128 nt.assert_in('bar', opts)
130 129 nt.assert_equal(opts['bar'], "bubble")
131 130
132 131
133 132 @dec.skip_without('sqlite3')
134 133 def doctest_hist_f():
135 134 """Test %hist -f with temporary filename.
136 135
137 136 In [9]: import tempfile
138 137
139 138 In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
140 139
141 140 In [11]: %hist -nl -f $tfile 3
142 141
143 142 In [13]: import os; os.unlink(tfile)
144 143 """
145 144
146 145
147 146 @dec.skip_without('sqlite3')
148 147 def doctest_hist_r():
149 148 """Test %hist -r
150 149
151 150 XXX - This test is not recording the output correctly. For some reason, in
152 151 testing mode the raw history isn't getting populated. No idea why.
153 152 Disabling the output checking for now, though at least we do run it.
154 153
155 154 In [1]: 'hist' in _ip.lsmagic()
156 155 Out[1]: True
157 156
158 157 In [2]: x=1
159 158
160 159 In [3]: %hist -rl 2
161 160 x=1 # random
162 161 %hist -r 2
163 162 """
164 163
165 164
166 165 @dec.skip_without('sqlite3')
167 166 def doctest_hist_op():
168 167 """Test %hist -op
169 168
170 169 In [1]: class b(float):
171 170 ...: pass
172 171 ...:
173 172
174 173 In [2]: class s(object):
175 174 ...: def __str__(self):
176 175 ...: return 's'
177 176 ...:
178 177
179 178 In [3]:
180 179
181 180 In [4]: class r(b):
182 181 ...: def __repr__(self):
183 182 ...: return 'r'
184 183 ...:
185 184
186 185 In [5]: class sr(s,r): pass
187 186 ...:
188 187
189 188 In [6]:
190 189
191 190 In [7]: bb=b()
192 191
193 192 In [8]: ss=s()
194 193
195 194 In [9]: rr=r()
196 195
197 196 In [10]: ssrr=sr()
198 197
199 198 In [11]: 4.5
200 199 Out[11]: 4.5
201 200
202 201 In [12]: str(ss)
203 202 Out[12]: 's'
204 203
205 204 In [13]:
206 205
207 206 In [14]: %hist -op
208 207 >>> class b:
209 208 ... pass
210 209 ...
211 210 >>> class s(b):
212 211 ... def __str__(self):
213 212 ... return 's'
214 213 ...
215 214 >>>
216 215 >>> class r(b):
217 216 ... def __repr__(self):
218 217 ... return 'r'
219 218 ...
220 219 >>> class sr(s,r): pass
221 220 >>>
222 221 >>> bb=b()
223 222 >>> ss=s()
224 223 >>> rr=r()
225 224 >>> ssrr=sr()
226 225 >>> 4.5
227 226 4.5
228 227 >>> str(ss)
229 228 's'
230 229 >>>
231 230 """
232 231
233 232 def test_hist_pof():
234 233 ip = get_ipython()
235 234 ip.run_cell(u"1+2", store_history=True)
236 235 #raise Exception(ip.history_manager.session_number)
237 236 #raise Exception(list(ip.history_manager._get_range_session()))
238 237 with TemporaryDirectory() as td:
239 238 tf = os.path.join(td, 'hist.py')
240 239 ip.run_line_magic('history', '-pof %s' % tf)
241 240 assert os.path.isfile(tf)
242 241
243 242
244 243 @dec.skip_without('sqlite3')
245 244 def test_macro():
246 245 ip = get_ipython()
247 246 ip.history_manager.reset() # Clear any existing history.
248 247 cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
249 248 for i, cmd in enumerate(cmds, start=1):
250 249 ip.history_manager.store_inputs(i, cmd)
251 250 ip.magic("macro test 1-3")
252 251 nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
253 252
254 253 # List macros
255 254 nt.assert_in("test", ip.magic("macro"))
256 255
257 256
258 257 @dec.skip_without('sqlite3')
259 258 def test_macro_run():
260 259 """Test that we can run a multi-line macro successfully."""
261 260 ip = get_ipython()
262 261 ip.history_manager.reset()
263 262 cmds = ["a=10", "a+=1", py3compat.doctest_refactor_print("print a"),
264 263 "%macro test 2-3"]
265 264 for cmd in cmds:
266 265 ip.run_cell(cmd, store_history=True)
267 266 nt.assert_equal(ip.user_ns["test"].value,
268 267 py3compat.doctest_refactor_print("a+=1\nprint a\n"))
269 268 with tt.AssertPrints("12"):
270 269 ip.run_cell("test")
271 270 with tt.AssertPrints("13"):
272 271 ip.run_cell("test")
273 272
274 273
275 274 def test_magic_magic():
276 275 """Test %magic"""
277 276 ip = get_ipython()
278 277 with capture_output() as captured:
279 278 ip.magic("magic")
280 279
281 280 stdout = captured.stdout
282 281 nt.assert_in('%magic', stdout)
283 282 nt.assert_in('IPython', stdout)
284 283 nt.assert_in('Available', stdout)
285 284
286 285
287 286 @dec.skipif_not_numpy
288 287 def test_numpy_reset_array_undec():
289 288 "Test '%reset array' functionality"
290 289 _ip.ex('import numpy as np')
291 290 _ip.ex('a = np.empty(2)')
292 291 nt.assert_in('a', _ip.user_ns)
293 292 _ip.magic('reset -f array')
294 293 nt.assert_not_in('a', _ip.user_ns)
295 294
296 295 def test_reset_out():
297 296 "Test '%reset out' magic"
298 297 _ip.run_cell("parrot = 'dead'", store_history=True)
299 298 # test '%reset -f out', make an Out prompt
300 299 _ip.run_cell("parrot", store_history=True)
301 300 nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
302 301 _ip.magic('reset -f out')
303 302 nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
304 303 nt.assert_equal(len(_ip.user_ns['Out']), 0)
305 304
306 305 def test_reset_in():
307 306 "Test '%reset in' magic"
308 307 # test '%reset -f in'
309 308 _ip.run_cell("parrot", store_history=True)
310 309 nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
311 310 _ip.magic('%reset -f in')
312 311 nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
313 312 nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
314 313
315 314 def test_reset_dhist():
316 315 "Test '%reset dhist' magic"
317 316 _ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
318 317 _ip.magic('cd ' + os.path.dirname(nt.__file__))
319 318 _ip.magic('cd -')
320 319 nt.assert_true(len(_ip.user_ns['_dh']) > 0)
321 320 _ip.magic('reset -f dhist')
322 321 nt.assert_equal(len(_ip.user_ns['_dh']), 0)
323 322 _ip.run_cell("_dh = [d for d in tmp]") #restore
324 323
325 324 def test_reset_in_length():
326 325 "Test that '%reset in' preserves In[] length"
327 326 _ip.run_cell("print 'foo'")
328 327 _ip.run_cell("reset -f in")
329 328 nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
330 329
331 330 def test_tb_syntaxerror():
332 331 """test %tb after a SyntaxError"""
333 332 ip = get_ipython()
334 333 ip.run_cell("for")
335 334
336 335 # trap and validate stdout
337 336 save_stdout = sys.stdout
338 337 try:
339 338 sys.stdout = StringIO()
340 339 ip.run_cell("%tb")
341 340 out = sys.stdout.getvalue()
342 341 finally:
343 342 sys.stdout = save_stdout
344 343 # trim output, and only check the last line
345 344 last_line = out.rstrip().splitlines()[-1].strip()
346 345 nt.assert_equal(last_line, "SyntaxError: invalid syntax")
347 346
348 347
349 348 def test_time():
350 349 ip = get_ipython()
351 350
352 351 with tt.AssertPrints("Wall time: "):
353 352 ip.run_cell("%time None")
354 353
355 354 ip.run_cell("def f(kmjy):\n"
356 355 " %time print (2*kmjy)")
357 356
358 357 with tt.AssertPrints("Wall time: "):
359 358 with tt.AssertPrints("hihi", suppress=False):
360 359 ip.run_cell("f('hi')")
361 360
362 361
363 362 @dec.skip_win32
364 363 def test_time2():
365 364 ip = get_ipython()
366 365
367 366 with tt.AssertPrints("CPU times: user "):
368 367 ip.run_cell("%time None")
369 368
370 369 def test_time3():
371 370 """Erroneous magic function calls, issue gh-3334"""
372 371 ip = get_ipython()
373 372 ip.user_ns.pop('run', None)
374 373
375 374 with tt.AssertNotPrints("not found", channel='stderr'):
376 375 ip.run_cell("%%time\n"
377 376 "run = 0\n"
378 377 "run += 1")
379 378
380 @dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
381 def test_time_futures():
382 "Test %time with __future__ environments"
383 ip = get_ipython()
384 ip.autocall = 0
385 ip.run_cell("from __future__ import division")
386 with tt.AssertPrints('0.25'):
387 ip.run_line_magic('time', 'print(1/4)')
388 ip.compile.reset_compiler_flags()
389 with tt.AssertNotPrints('0.25'):
390 ip.run_line_magic('time', 'print(1/4)')
391
392 379 def test_doctest_mode():
393 380 "Toggle doctest_mode twice, it should be a no-op and run without error"
394 381 _ip.magic('doctest_mode')
395 382 _ip.magic('doctest_mode')
396 383
397 384
398 385 def test_parse_options():
399 386 """Tests for basic options parsing in magics."""
400 387 # These are only the most minimal of tests, more should be added later. At
401 388 # the very least we check that basic text/unicode calls work OK.
402 389 m = DummyMagics(_ip)
403 390 nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
404 391 nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
405 392
406 393
407 394 def test_dirops():
408 395 """Test various directory handling operations."""
409 396 # curpath = lambda :os.path.splitdrive(py3compat.getcwd())[1].replace('\\','/')
410 397 curpath = py3compat.getcwd
411 398 startdir = py3compat.getcwd()
412 399 ipdir = os.path.realpath(_ip.ipython_dir)
413 400 try:
414 401 _ip.magic('cd "%s"' % ipdir)
415 402 nt.assert_equal(curpath(), ipdir)
416 403 _ip.magic('cd -')
417 404 nt.assert_equal(curpath(), startdir)
418 405 _ip.magic('pushd "%s"' % ipdir)
419 406 nt.assert_equal(curpath(), ipdir)
420 407 _ip.magic('popd')
421 408 nt.assert_equal(curpath(), startdir)
422 409 finally:
423 410 os.chdir(startdir)
424 411
425 412
426 413 def test_xmode():
427 414 # Calling xmode three times should be a no-op
428 415 xmode = _ip.InteractiveTB.mode
429 416 for i in range(3):
430 417 _ip.magic("xmode")
431 418 nt.assert_equal(_ip.InteractiveTB.mode, xmode)
432 419
433 420 def test_reset_hard():
434 421 monitor = []
435 422 class A(object):
436 423 def __del__(self):
437 424 monitor.append(1)
438 425 def __repr__(self):
439 426 return "<A instance>"
440 427
441 428 _ip.user_ns["a"] = A()
442 429 _ip.run_cell("a")
443 430
444 431 nt.assert_equal(monitor, [])
445 432 _ip.magic("reset -f")
446 433 nt.assert_equal(monitor, [1])
447 434
448 435 class TestXdel(tt.TempFileMixin):
449 436 def test_xdel(self):
450 437 """Test that references from %run are cleared by xdel."""
451 438 src = ("class A(object):\n"
452 439 " monitor = []\n"
453 440 " def __del__(self):\n"
454 441 " self.monitor.append(1)\n"
455 442 "a = A()\n")
456 443 self.mktmp(src)
457 444 # %run creates some hidden references...
458 445 _ip.magic("run %s" % self.fname)
459 446 # ... as does the displayhook.
460 447 _ip.run_cell("a")
461 448
462 449 monitor = _ip.user_ns["A"].monitor
463 450 nt.assert_equal(monitor, [])
464 451
465 452 _ip.magic("xdel a")
466 453
467 454 # Check that a's __del__ method has been called.
468 455 nt.assert_equal(monitor, [1])
469 456
470 457 def doctest_who():
471 458 """doctest for %who
472 459
473 460 In [1]: %reset -f
474 461
475 462 In [2]: alpha = 123
476 463
477 464 In [3]: beta = 'beta'
478 465
479 466 In [4]: %who int
480 467 alpha
481 468
482 469 In [5]: %who str
483 470 beta
484 471
485 472 In [6]: %whos
486 473 Variable Type Data/Info
487 474 ----------------------------
488 475 alpha int 123
489 476 beta str beta
490 477
491 478 In [7]: %who_ls
492 479 Out[7]: ['alpha', 'beta']
493 480 """
494 481
495 482 def test_whos():
496 483 """Check that whos is protected against objects where repr() fails."""
497 484 class A(object):
498 485 def __repr__(self):
499 486 raise Exception()
500 487 _ip.user_ns['a'] = A()
501 488 _ip.magic("whos")
502 489
503 490 @py3compat.u_format
504 491 def doctest_precision():
505 492 """doctest for %precision
506 493
507 494 In [1]: f = get_ipython().display_formatter.formatters['text/plain']
508 495
509 496 In [2]: %precision 5
510 497 Out[2]: {u}'%.5f'
511 498
512 499 In [3]: f.float_format
513 500 Out[3]: {u}'%.5f'
514 501
515 502 In [4]: %precision %e
516 503 Out[4]: {u}'%e'
517 504
518 505 In [5]: f(3.1415927)
519 506 Out[5]: {u}'3.141593e+00'
520 507 """
521 508
522 509 def test_psearch():
523 510 with tt.AssertPrints("dict.fromkeys"):
524 511 _ip.run_cell("dict.fr*?")
525 512
526 513 def test_timeit_shlex():
527 514 """test shlex issues with timeit (#1109)"""
528 515 _ip.ex("def f(*a,**kw): pass")
529 516 _ip.magic('timeit -n1 "this is a bug".count(" ")')
530 517 _ip.magic('timeit -r1 -n1 f(" ", 1)')
531 518 _ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
532 519 _ip.magic('timeit -r1 -n1 ("a " + "b")')
533 520 _ip.magic('timeit -r1 -n1 f("a " + "b")')
534 521 _ip.magic('timeit -r1 -n1 f("a " + "b ")')
535 522
536 523
537 524 def test_timeit_arguments():
538 525 "Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
539 526 _ip.magic("timeit ('#')")
540 527
541 528
542 529 def test_timeit_special_syntax():
543 530 "Test %%timeit with IPython special syntax"
544 531 @register_line_magic
545 532 def lmagic(line):
546 533 ip = get_ipython()
547 534 ip.user_ns['lmagic_out'] = line
548 535
549 536 # line mode test
550 537 _ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
551 538 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
552 539 # cell mode test
553 540 _ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
554 541 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
555 542
556 543 def test_timeit_return():
557 544 """
558 545 test wether timeit -o return object
559 546 """
560 547
561 548 res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
562 549 assert(res is not None)
563 550
564 551 def test_timeit_quiet():
565 552 """
566 553 test quiet option of timeit magic
567 554 """
568 555 with tt.AssertNotPrints("loops"):
569 556 _ip.run_cell("%timeit -n1 -r1 -q 1")
570 557
571 558 def test_timeit_return_quiet():
572 559 with tt.AssertNotPrints("loops"):
573 560 res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')
574 561 assert (res is not None)
575 562
576 @dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3")
577 def test_timeit_futures():
578 "Test %timeit with __future__ environments"
579 ip = get_ipython()
580 ip.run_cell("from __future__ import division")
581 with tt.AssertPrints('0.25'):
582 ip.run_line_magic('timeit', '-n1 -r1 print(1/4)')
583 ip.compile.reset_compiler_flags()
584 with tt.AssertNotPrints('0.25'):
585 ip.run_line_magic('timeit', '-n1 -r1 print(1/4)')
586
587 563 @dec.skipif(execution.profile is None)
588 564 def test_prun_special_syntax():
589 565 "Test %%prun with IPython special syntax"
590 566 @register_line_magic
591 567 def lmagic(line):
592 568 ip = get_ipython()
593 569 ip.user_ns['lmagic_out'] = line
594 570
595 571 # line mode test
596 572 _ip.run_line_magic('prun', '-q %lmagic my line')
597 573 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
598 574 # cell mode test
599 575 _ip.run_cell_magic('prun', '-q', '%lmagic my line2')
600 576 nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
601 577
602 578 @dec.skipif(execution.profile is None)
603 579 def test_prun_quotes():
604 580 "Test that prun does not clobber string escapes (GH #1302)"
605 581 _ip.magic(r"prun -q x = '\t'")
606 582 nt.assert_equal(_ip.user_ns['x'], '\t')
607 583
608 584 def test_extension():
609 585 # Debugging information for failures of this test
610 586 print('sys.path:')
611 587 for p in sys.path:
612 588 print(' ', p)
613 589 print('CWD', os.getcwd())
614 590
615 591 nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
616 592 daft_path = os.path.join(os.path.dirname(__file__), "daft_extension")
617 593 sys.path.insert(0, daft_path)
618 594 try:
619 595 _ip.user_ns.pop('arq', None)
620 596 invalidate_caches() # Clear import caches
621 597 _ip.magic("load_ext daft_extension")
622 598 nt.assert_equal(_ip.user_ns['arq'], 185)
623 599 _ip.magic("unload_ext daft_extension")
624 600 assert 'arq' not in _ip.user_ns
625 601 finally:
626 602 sys.path.remove(daft_path)
627 603
628 604
629 605 def test_notebook_export_json():
630 606 _ip = get_ipython()
631 607 _ip.history_manager.reset() # Clear any existing history.
632 608 cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"]
633 609 for i, cmd in enumerate(cmds, start=1):
634 610 _ip.history_manager.store_inputs(i, cmd)
635 611 with TemporaryDirectory() as td:
636 612 outfile = os.path.join(td, "nb.ipynb")
637 613 _ip.magic("notebook -e %s" % outfile)
638 614
639 615
640 616 class TestEnv(TestCase):
641 617
642 618 def test_env(self):
643 619 env = _ip.magic("env")
644 620 self.assertTrue(isinstance(env, dict))
645 621
646 622 def test_env_get_set_simple(self):
647 623 env = _ip.magic("env var val1")
648 624 self.assertEqual(env, None)
649 625 self.assertEqual(os.environ['var'], 'val1')
650 626 self.assertEqual(_ip.magic("env var"), 'val1')
651 627 env = _ip.magic("env var=val2")
652 628 self.assertEqual(env, None)
653 629 self.assertEqual(os.environ['var'], 'val2')
654 630
655 631 def test_env_get_set_complex(self):
656 632 env = _ip.magic("env var 'val1 '' 'val2")
657 633 self.assertEqual(env, None)
658 634 self.assertEqual(os.environ['var'], "'val1 '' 'val2")
659 635 self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2")
660 636 env = _ip.magic('env var=val2 val3="val4')
661 637 self.assertEqual(env, None)
662 638 self.assertEqual(os.environ['var'], 'val2 val3="val4')
663 639
664 640 def test_env_set_bad_input(self):
665 641 self.assertRaises(UsageError, lambda: _ip.magic("set_env var"))
666 642
667 643 def test_env_set_whitespace(self):
668 644 self.assertRaises(UsageError, lambda: _ip.magic("env var A=B"))
669 645
670 646
671 647 class CellMagicTestCase(TestCase):
672 648
673 649 def check_ident(self, magic):
674 650 # Manually called, we get the result
675 651 out = _ip.run_cell_magic(magic, 'a', 'b')
676 652 nt.assert_equal(out, ('a','b'))
677 653 # Via run_cell, it goes into the user's namespace via displayhook
678 654 _ip.run_cell('%%' + magic +' c\nd')
679 655 nt.assert_equal(_ip.user_ns['_'], ('c','d'))
680 656
681 657 def test_cell_magic_func_deco(self):
682 658 "Cell magic using simple decorator"
683 659 @register_cell_magic
684 660 def cellm(line, cell):
685 661 return line, cell
686 662
687 663 self.check_ident('cellm')
688 664
689 665 def test_cell_magic_reg(self):
690 666 "Cell magic manually registered"
691 667 def cellm(line, cell):
692 668 return line, cell
693 669
694 670 _ip.register_magic_function(cellm, 'cell', 'cellm2')
695 671 self.check_ident('cellm2')
696 672
697 673 def test_cell_magic_class(self):
698 674 "Cell magics declared via a class"
699 675 @magics_class
700 676 class MyMagics(Magics):
701 677
702 678 @cell_magic
703 679 def cellm3(self, line, cell):
704 680 return line, cell
705 681
706 682 _ip.register_magics(MyMagics)
707 683 self.check_ident('cellm3')
708 684
709 685 def test_cell_magic_class2(self):
710 686 "Cell magics declared via a class, #2"
711 687 @magics_class
712 688 class MyMagics2(Magics):
713 689
714 690 @cell_magic('cellm4')
715 691 def cellm33(self, line, cell):
716 692 return line, cell
717 693
718 694 _ip.register_magics(MyMagics2)
719 695 self.check_ident('cellm4')
720 696 # Check that nothing is registered as 'cellm33'
721 697 c33 = _ip.find_cell_magic('cellm33')
722 698 nt.assert_equal(c33, None)
723 699
724 700 def test_file():
725 701 """Basic %%file"""
726 702 ip = get_ipython()
727 703 with TemporaryDirectory() as td:
728 704 fname = os.path.join(td, 'file1')
729 705 ip.run_cell_magic("file", fname, u'\n'.join([
730 706 'line1',
731 707 'line2',
732 708 ]))
733 709 with open(fname) as f:
734 710 s = f.read()
735 711 nt.assert_in('line1\n', s)
736 712 nt.assert_in('line2', s)
737 713
738 714 def test_file_var_expand():
739 715 """%%file $filename"""
740 716 ip = get_ipython()
741 717 with TemporaryDirectory() as td:
742 718 fname = os.path.join(td, 'file1')
743 719 ip.user_ns['filename'] = fname
744 720 ip.run_cell_magic("file", '$filename', u'\n'.join([
745 721 'line1',
746 722 'line2',
747 723 ]))
748 724 with open(fname) as f:
749 725 s = f.read()
750 726 nt.assert_in('line1\n', s)
751 727 nt.assert_in('line2', s)
752 728
753 729 def test_file_unicode():
754 730 """%%file with unicode cell"""
755 731 ip = get_ipython()
756 732 with TemporaryDirectory() as td:
757 733 fname = os.path.join(td, 'file1')
758 734 ip.run_cell_magic("file", fname, u'\n'.join([
759 735 u'liné1',
760 736 u'liné2',
761 737 ]))
762 738 with io.open(fname, encoding='utf-8') as f:
763 739 s = f.read()
764 740 nt.assert_in(u'liné1\n', s)
765 741 nt.assert_in(u'liné2', s)
766 742
767 743 def test_file_amend():
768 744 """%%file -a amends files"""
769 745 ip = get_ipython()
770 746 with TemporaryDirectory() as td:
771 747 fname = os.path.join(td, 'file2')
772 748 ip.run_cell_magic("file", fname, u'\n'.join([
773 749 'line1',
774 750 'line2',
775 751 ]))
776 752 ip.run_cell_magic("file", "-a %s" % fname, u'\n'.join([
777 753 'line3',
778 754 'line4',
779 755 ]))
780 756 with open(fname) as f:
781 757 s = f.read()
782 758 nt.assert_in('line1\n', s)
783 759 nt.assert_in('line3\n', s)
784 760
785 761
786 762 def test_script_config():
787 763 ip = get_ipython()
788 764 ip.config.ScriptMagics.script_magics = ['whoda']
789 765 sm = script.ScriptMagics(shell=ip)
790 766 nt.assert_in('whoda', sm.magics['cell'])
791 767
792 768 @dec.skip_win32
793 769 def test_script_out():
794 770 ip = get_ipython()
795 771 ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
796 772 nt.assert_equal(ip.user_ns['output'], 'hi\n')
797 773
798 774 @dec.skip_win32
799 775 def test_script_err():
800 776 ip = get_ipython()
801 777 ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
802 778 nt.assert_equal(ip.user_ns['error'], 'hello\n')
803 779
804 780 @dec.skip_win32
805 781 def test_script_out_err():
806 782 ip = get_ipython()
807 783 ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
808 784 nt.assert_equal(ip.user_ns['output'], 'hi\n')
809 785 nt.assert_equal(ip.user_ns['error'], 'hello\n')
810 786
811 787 @dec.skip_win32
812 788 def test_script_bg_out():
813 789 ip = get_ipython()
814 790 ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
815 791 nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
816 792
817 793 @dec.skip_win32
818 794 def test_script_bg_err():
819 795 ip = get_ipython()
820 796 ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
821 797 nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
822 798
823 799 @dec.skip_win32
824 800 def test_script_bg_out_err():
825 801 ip = get_ipython()
826 802 ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
827 803 nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
828 804 nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
829 805
830 806 def test_script_defaults():
831 807 ip = get_ipython()
832 808 for cmd in ['sh', 'bash', 'perl', 'ruby']:
833 809 try:
834 810 find_cmd(cmd)
835 811 except Exception:
836 812 pass
837 813 else:
838 814 nt.assert_in(cmd, ip.magics_manager.magics['cell'])
839 815
840 816
841 817 @magics_class
842 818 class FooFoo(Magics):
843 819 """class with both %foo and %%foo magics"""
844 820 @line_magic('foo')
845 821 def line_foo(self, line):
846 822 "I am line foo"
847 823 pass
848 824
849 825 @cell_magic("foo")
850 826 def cell_foo(self, line, cell):
851 827 "I am cell foo, not line foo"
852 828 pass
853 829
854 830 def test_line_cell_info():
855 831 """%%foo and %foo magics are distinguishable to inspect"""
856 832 ip = get_ipython()
857 833 ip.magics_manager.register(FooFoo)
858 834 oinfo = ip.object_inspect('foo')
859 835 nt.assert_true(oinfo['found'])
860 836 nt.assert_true(oinfo['ismagic'])
861 837
862 838 oinfo = ip.object_inspect('%%foo')
863 839 nt.assert_true(oinfo['found'])
864 840 nt.assert_true(oinfo['ismagic'])
865 841 nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
866 842
867 843 oinfo = ip.object_inspect('%foo')
868 844 nt.assert_true(oinfo['found'])
869 845 nt.assert_true(oinfo['ismagic'])
870 846 nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
871 847
872 848 def test_multiple_magics():
873 849 ip = get_ipython()
874 850 foo1 = FooFoo(ip)
875 851 foo2 = FooFoo(ip)
876 852 mm = ip.magics_manager
877 853 mm.register(foo1)
878 854 nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
879 855 mm.register(foo2)
880 856 nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
881 857
882 858 def test_alias_magic():
883 859 """Test %alias_magic."""
884 860 ip = get_ipython()
885 861 mm = ip.magics_manager
886 862
887 863 # Basic operation: both cell and line magics are created, if possible.
888 864 ip.run_line_magic('alias_magic', 'timeit_alias timeit')
889 865 nt.assert_in('timeit_alias', mm.magics['line'])
890 866 nt.assert_in('timeit_alias', mm.magics['cell'])
891 867
892 868 # --cell is specified, line magic not created.
893 869 ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
894 870 nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
895 871 nt.assert_in('timeit_cell_alias', mm.magics['cell'])
896 872
897 873 # Test that line alias is created successfully.
898 874 ip.run_line_magic('alias_magic', '--line env_alias env')
899 875 nt.assert_equal(ip.run_line_magic('env', ''),
900 876 ip.run_line_magic('env_alias', ''))
901 877
902 878 def test_save():
903 879 """Test %save."""
904 880 ip = get_ipython()
905 881 ip.history_manager.reset() # Clear any existing history.
906 882 cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
907 883 for i, cmd in enumerate(cmds, start=1):
908 884 ip.history_manager.store_inputs(i, cmd)
909 885 with TemporaryDirectory() as tmpdir:
910 886 file = os.path.join(tmpdir, "testsave.py")
911 887 ip.run_line_magic("save", "%s 1-10" % file)
912 888 with open(file) as f:
913 889 content = f.read()
914 890 nt.assert_equal(content.count(cmds[0]), 1)
915 891 nt.assert_in('coding: utf-8', content)
916 892 ip.run_line_magic("save", "-a %s 1-10" % file)
917 893 with open(file) as f:
918 894 content = f.read()
919 895 nt.assert_equal(content.count(cmds[0]), 2)
920 896 nt.assert_in('coding: utf-8', content)
921 897
922 898
923 899 def test_store():
924 900 """Test %store."""
925 901 ip = get_ipython()
926 902 ip.run_line_magic('load_ext', 'storemagic')
927 903
928 904 # make sure the storage is empty
929 905 ip.run_line_magic('store', '-z')
930 906 ip.user_ns['var'] = 42
931 907 ip.run_line_magic('store', 'var')
932 908 ip.user_ns['var'] = 39
933 909 ip.run_line_magic('store', '-r')
934 910 nt.assert_equal(ip.user_ns['var'], 42)
935 911
936 912 ip.run_line_magic('store', '-d var')
937 913 ip.user_ns['var'] = 39
938 914 ip.run_line_magic('store' , '-r')
939 915 nt.assert_equal(ip.user_ns['var'], 39)
940 916
941 917
942 918 def _run_edit_test(arg_s, exp_filename=None,
943 919 exp_lineno=-1,
944 920 exp_contents=None,
945 921 exp_is_temp=None):
946 922 ip = get_ipython()
947 923 M = code.CodeMagics(ip)
948 924 last_call = ['','']
949 925 opts,args = M.parse_options(arg_s,'prxn:')
950 926 filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
951 927
952 928 if exp_filename is not None:
953 929 nt.assert_equal(exp_filename, filename)
954 930 if exp_contents is not None:
955 931 with io.open(filename, 'r', encoding='utf-8') as f:
956 932 contents = f.read()
957 933 nt.assert_equal(exp_contents, contents)
958 934 if exp_lineno != -1:
959 935 nt.assert_equal(exp_lineno, lineno)
960 936 if exp_is_temp is not None:
961 937 nt.assert_equal(exp_is_temp, is_temp)
962 938
963 939
964 940 def test_edit_interactive():
965 941 """%edit on interactively defined objects"""
966 942 ip = get_ipython()
967 943 n = ip.execution_count
968 944 ip.run_cell(u"def foo(): return 1", store_history=True)
969 945
970 946 try:
971 947 _run_edit_test("foo")
972 948 except code.InteractivelyDefined as e:
973 949 nt.assert_equal(e.index, n)
974 950 else:
975 951 raise AssertionError("Should have raised InteractivelyDefined")
976 952
977 953
978 954 def test_edit_cell():
979 955 """%edit [cell id]"""
980 956 ip = get_ipython()
981 957
982 958 ip.run_cell(u"def foo(): return 1", store_history=True)
983 959
984 960 # test
985 961 _run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
986 962
987 963 def test_bookmark():
988 964 ip = get_ipython()
989 965 ip.run_line_magic('bookmark', 'bmname')
990 966 with tt.AssertPrints('bmname'):
991 967 ip.run_line_magic('bookmark', '-l')
992 968 ip.run_line_magic('bookmark', '-d bmname')
993 969
994 970 def test_ls_magic():
995 971 ip = get_ipython()
996 972 json_formatter = ip.display_formatter.formatters['application/json']
997 973 json_formatter.enabled = True
998 974 lsmagic = ip.magic('lsmagic')
999 975 with warnings.catch_warnings(record=True) as w:
1000 976 j = json_formatter(lsmagic)
1001 977 nt.assert_equal(sorted(j), ['cell', 'line'])
1002 978 nt.assert_equal(w, []) # no warnings
1003 979
1004 980 def test_strip_initial_indent():
1005 981 def sii(s):
1006 982 lines = s.splitlines()
1007 983 return '\n'.join(code.strip_initial_indent(lines))
1008 984
1009 985 nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2")
1010 986 nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc")
1011 987 nt.assert_equal(sii("a\n b"), "a\n b")
@@ -1,210 +1,202 b''
1 1 """Tests for various magic functions specific to the terminal frontend.
2 2
3 3 Needs to be run by nose (to make ipython session available).
4 4 """
5 from __future__ import absolute_import
6 5
7 6 #-----------------------------------------------------------------------------
8 7 # Imports
9 8 #-----------------------------------------------------------------------------
10 9
11 10 import sys
12 11 from unittest import TestCase
13 12
14 13 import nose.tools as nt
15 14
16 15 from IPython.testing import tools as tt
17 16 from IPython.utils.py3compat import PY3
18 17
19 18 if PY3:
20 19 from io import StringIO
21 20 else:
22 21 from StringIO import StringIO
23 22
24 23 #-----------------------------------------------------------------------------
25 24 # Globals
26 25 #-----------------------------------------------------------------------------
27 26 ip = get_ipython()
28 27
29 28 #-----------------------------------------------------------------------------
30 29 # Test functions begin
31 30 #-----------------------------------------------------------------------------
32 31
33 32 def check_cpaste(code, should_fail=False):
34 33 """Execute code via 'cpaste' and ensure it was executed, unless
35 34 should_fail is set.
36 35 """
37 36 ip.user_ns['code_ran'] = False
38 37
39 38 src = StringIO()
40 39 if not hasattr(src, 'encoding'):
41 40 # IPython expects stdin to have an encoding attribute
42 41 src.encoding = None
43 42 src.write(code)
44 43 src.write('\n--\n')
45 44 src.seek(0)
46 45
47 46 stdin_save = sys.stdin
48 47 sys.stdin = src
49 48
50 49 try:
51 50 context = tt.AssertPrints if should_fail else tt.AssertNotPrints
52 51 with context("Traceback (most recent call last)"):
53 52 ip.magic('cpaste')
54 53
55 54 if not should_fail:
56 55 assert ip.user_ns['code_ran'], "%r failed" % code
57 56 finally:
58 57 sys.stdin = stdin_save
59 58
60 PY31 = sys.version_info[:2] == (3,1)
61
62 59 def test_cpaste():
63 60 """Test cpaste magic"""
64 61
65 62 def runf():
66 63 """Marker function: sets a flag when executed.
67 64 """
68 65 ip.user_ns['code_ran'] = True
69 66 return 'runf' # return string so '+ runf()' doesn't result in success
70 67
71 68 tests = {'pass': ["runf()",
72 69 "In [1]: runf()",
73 70 "In [1]: if 1:\n ...: runf()",
74 71 "> > > runf()",
75 72 ">>> runf()",
76 73 " >>> runf()",
77 74 ],
78 75
79 76 'fail': ["1 + runf()",
77 "++ runf()",
80 78 ]}
81
82 # I don't know why this is failing specifically on Python 3.1. I've
83 # checked it manually interactively, but we don't care enough about 3.1
84 # to spend time fiddling with the tests, so we just skip it.
85 if not PY31:
86 tests['fail'].append("++ runf()")
87 79
88 80 ip.user_ns['runf'] = runf
89 81
90 82 for code in tests['pass']:
91 83 check_cpaste(code)
92 84
93 85 for code in tests['fail']:
94 86 check_cpaste(code, should_fail=True)
95 87
96 88
97 89 class PasteTestCase(TestCase):
98 90 """Multiple tests for clipboard pasting"""
99 91
100 92 def paste(self, txt, flags='-q'):
101 93 """Paste input text, by default in quiet mode"""
102 94 ip.hooks.clipboard_get = lambda : txt
103 95 ip.magic('paste '+flags)
104 96
105 97 def setUp(self):
106 98 # Inject fake clipboard hook but save original so we can restore it later
107 99 self.original_clip = ip.hooks.clipboard_get
108 100
109 101 def tearDown(self):
110 102 # Restore original hook
111 103 ip.hooks.clipboard_get = self.original_clip
112 104
113 105 def test_paste(self):
114 106 ip.user_ns.pop('x', None)
115 107 self.paste('x = 1')
116 108 nt.assert_equal(ip.user_ns['x'], 1)
117 109 ip.user_ns.pop('x')
118 110
119 111 def test_paste_pyprompt(self):
120 112 ip.user_ns.pop('x', None)
121 113 self.paste('>>> x=2')
122 114 nt.assert_equal(ip.user_ns['x'], 2)
123 115 ip.user_ns.pop('x')
124 116
125 117 def test_paste_py_multi(self):
126 118 self.paste("""
127 119 >>> x = [1,2,3]
128 120 >>> y = []
129 121 >>> for i in x:
130 122 ... y.append(i**2)
131 123 ...
132 124 """)
133 125 nt.assert_equal(ip.user_ns['x'], [1,2,3])
134 126 nt.assert_equal(ip.user_ns['y'], [1,4,9])
135 127
136 128 def test_paste_py_multi_r(self):
137 129 "Now, test that self.paste -r works"
138 130 self.test_paste_py_multi()
139 131 nt.assert_equal(ip.user_ns.pop('x'), [1,2,3])
140 132 nt.assert_equal(ip.user_ns.pop('y'), [1,4,9])
141 133 nt.assert_false('x' in ip.user_ns)
142 134 ip.magic('paste -r')
143 135 nt.assert_equal(ip.user_ns['x'], [1,2,3])
144 136 nt.assert_equal(ip.user_ns['y'], [1,4,9])
145 137
146 138 def test_paste_email(self):
147 139 "Test pasting of email-quoted contents"
148 140 self.paste("""\
149 141 >> def foo(x):
150 142 >> return x + 1
151 143 >> xx = foo(1.1)""")
152 144 nt.assert_equal(ip.user_ns['xx'], 2.1)
153 145
154 146 def test_paste_email2(self):
155 147 "Email again; some programs add a space also at each quoting level"
156 148 self.paste("""\
157 149 > > def foo(x):
158 150 > > return x + 1
159 151 > > yy = foo(2.1) """)
160 152 nt.assert_equal(ip.user_ns['yy'], 3.1)
161 153
162 154 def test_paste_email_py(self):
163 155 "Email quoting of interactive input"
164 156 self.paste("""\
165 157 >> >>> def f(x):
166 158 >> ... return x+1
167 159 >> ...
168 160 >> >>> zz = f(2.5) """)
169 161 nt.assert_equal(ip.user_ns['zz'], 3.5)
170 162
171 163 def test_paste_echo(self):
172 164 "Also test self.paste echoing, by temporarily faking the writer"
173 165 w = StringIO()
174 166 writer = ip.write
175 167 ip.write = w.write
176 168 code = """
177 169 a = 100
178 170 b = 200"""
179 171 try:
180 172 self.paste(code,'')
181 173 out = w.getvalue()
182 174 finally:
183 175 ip.write = writer
184 176 nt.assert_equal(ip.user_ns['a'], 100)
185 177 nt.assert_equal(ip.user_ns['b'], 200)
186 178 nt.assert_equal(out, code+"\n## -- End pasted text --\n")
187 179
188 180 def test_paste_leading_commas(self):
189 181 "Test multiline strings with leading commas"
190 182 tm = ip.magics_manager.registry['TerminalMagics']
191 183 s = '''\
192 184 a = """
193 185 ,1,2,3
194 186 """'''
195 187 ip.user_ns.pop('foo', None)
196 188 tm.store_or_execute(s, 'foo')
197 189 nt.assert_in('foo', ip.user_ns)
198 190
199 191
200 192 def test_paste_trailing_question(self):
201 193 "Test pasting sources with trailing question marks"
202 194 tm = ip.magics_manager.registry['TerminalMagics']
203 195 s = '''\
204 196 def funcfoo():
205 197 if True: #am i true?
206 198 return 'fooresult'
207 199 '''
208 200 ip.user_ns.pop('funcfoo', None)
209 201 self.paste(s)
210 202 nt.assert_equal(ip.user_ns['funcfoo'](), 'fooresult')
@@ -1,456 +1,454 b''
1 1 """Tests for the object inspection functionality.
2 2 """
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function
8 7
9 8 import os
10 9 import re
11 10 import sys
12 11
13 12 import nose.tools as nt
14 13
15 14 from .. import oinspect
16 15 from IPython.core.magic import (Magics, magics_class, line_magic,
17 16 cell_magic, line_cell_magic,
18 17 register_line_magic, register_cell_magic,
19 18 register_line_cell_magic)
20 19 from decorator import decorator
21 20 from IPython.testing.decorators import skipif
22 21 from IPython.testing.tools import AssertPrints
23 22 from IPython.utils.path import compress_user
24 23 from IPython.utils import py3compat
25 24 from IPython.utils.signatures import Signature, Parameter
26 25
27 26
28 27 #-----------------------------------------------------------------------------
29 28 # Globals and constants
30 29 #-----------------------------------------------------------------------------
31 30
32 31 inspector = oinspect.Inspector()
33 32 ip = get_ipython()
34 33
35 34 #-----------------------------------------------------------------------------
36 35 # Local utilities
37 36 #-----------------------------------------------------------------------------
38 37
39 38 # WARNING: since this test checks the line number where a function is
40 39 # defined, if any code is inserted above, the following line will need to be
41 40 # updated. Do NOT insert any whitespace between the next line and the function
42 41 # definition below.
43 THIS_LINE_NUMBER = 43 # Put here the actual number of this line
42 THIS_LINE_NUMBER = 42 # Put here the actual number of this line
44 43
45 44 from unittest import TestCase
46 45
47 46 class Test(TestCase):
48 47
49 48 def test_find_source_lines(self):
50 49 self.assertEqual(oinspect.find_source_lines(Test.test_find_source_lines),
51 50 THIS_LINE_NUMBER+6)
52 51
53 52
54 53 # A couple of utilities to ensure these tests work the same from a source or a
55 54 # binary install
56 55 def pyfile(fname):
57 56 return os.path.normcase(re.sub('.py[co]$', '.py', fname))
58 57
59 58
60 59 def match_pyfiles(f1, f2):
61 60 nt.assert_equal(pyfile(f1), pyfile(f2))
62 61
63 62
64 63 def test_find_file():
65 64 match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__))
66 65
67 66
68 67 def test_find_file_decorated1():
69 68
70 69 @decorator
71 70 def noop1(f):
72 71 def wrapper():
73 72 return f(*a, **kw)
74 73 return wrapper
75 74
76 75 @noop1
77 76 def f(x):
78 77 "My docstring"
79 78
80 79 match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
81 80 nt.assert_equal(f.__doc__, "My docstring")
82 81
83 82
84 83 def test_find_file_decorated2():
85 84
86 85 @decorator
87 86 def noop2(f, *a, **kw):
88 87 return f(*a, **kw)
89 88
90 89 @noop2
91 90 @noop2
92 91 @noop2
93 92 def f(x):
94 93 "My docstring 2"
95 94
96 95 match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
97 96 nt.assert_equal(f.__doc__, "My docstring 2")
98 97
99 98
100 99 def test_find_file_magic():
101 100 run = ip.find_line_magic('run')
102 101 nt.assert_not_equal(oinspect.find_file(run), None)
103 102
104 103
105 104 # A few generic objects we can then inspect in the tests below
106 105
107 106 class Call(object):
108 107 """This is the class docstring."""
109 108
110 109 def __init__(self, x, y=1):
111 110 """This is the constructor docstring."""
112 111
113 112 def __call__(self, *a, **kw):
114 113 """This is the call docstring."""
115 114
116 115 def method(self, x, z=2):
117 116 """Some method's docstring"""
118 117
119 118 class HasSignature(object):
120 119 """This is the class docstring."""
121 120 __signature__ = Signature([Parameter('test', Parameter.POSITIONAL_OR_KEYWORD)])
122 121
123 122 def __init__(self, *args):
124 123 """This is the init docstring"""
125 124
126 125
127 126 class SimpleClass(object):
128 127 def method(self, x, z=2):
129 128 """Some method's docstring"""
130 129
131 130
132 131 class OldStyle:
133 132 """An old-style class for testing."""
134 133 pass
135 134
136 135
137 136 def f(x, y=2, *a, **kw):
138 137 """A simple function."""
139 138
140 139
141 140 def g(y, z=3, *a, **kw):
142 141 pass # no docstring
143 142
144 143
145 144 @register_line_magic
146 145 def lmagic(line):
147 146 "A line magic"
148 147
149 148
150 149 @register_cell_magic
151 150 def cmagic(line, cell):
152 151 "A cell magic"
153 152
154 153
155 154 @register_line_cell_magic
156 155 def lcmagic(line, cell=None):
157 156 "A line/cell magic"
158 157
159 158
160 159 @magics_class
161 160 class SimpleMagics(Magics):
162 161 @line_magic
163 162 def Clmagic(self, cline):
164 163 "A class-based line magic"
165 164
166 165 @cell_magic
167 166 def Ccmagic(self, cline, ccell):
168 167 "A class-based cell magic"
169 168
170 169 @line_cell_magic
171 170 def Clcmagic(self, cline, ccell=None):
172 171 "A class-based line/cell magic"
173 172
174 173
175 174 class Awkward(object):
176 175 def __getattr__(self, name):
177 176 raise Exception(name)
178 177
179 178 class NoBoolCall:
180 179 """
181 180 callable with `__bool__` raising should still be inspect-able.
182 181 """
183 182
184 183 def __call__(self):
185 184 """does nothing"""
186 185 pass
187 186
188 187 def __bool__(self):
189 188 """just raise NotImplemented"""
190 189 raise NotImplementedError('Must be implemented')
191 190
192 191
193 192 class SerialLiar(object):
194 193 """Attribute accesses always get another copy of the same class.
195 194
196 195 unittest.mock.call does something similar, but it's not ideal for testing
197 196 as the failure mode is to eat all your RAM. This gives up after 10k levels.
198 197 """
199 198 def __init__(self, max_fibbing_twig, lies_told=0):
200 199 if lies_told > 10000:
201 200 raise RuntimeError('Nose too long, honesty is the best policy')
202 201 self.max_fibbing_twig = max_fibbing_twig
203 202 self.lies_told = lies_told
204 203 max_fibbing_twig[0] = max(max_fibbing_twig[0], lies_told)
205 204
206 205 def __getattr__(self, item):
207 206 return SerialLiar(self.max_fibbing_twig, self.lies_told + 1)
208 207
209 208
210 209 def check_calltip(obj, name, call, docstring):
211 210 """Generic check pattern all calltip tests will use"""
212 211 info = inspector.info(obj, name)
213 212 call_line, ds = oinspect.call_tip(info)
214 213 nt.assert_equal(call_line, call)
215 214 nt.assert_equal(ds, docstring)
216 215
217 216 #-----------------------------------------------------------------------------
218 217 # Tests
219 218 #-----------------------------------------------------------------------------
220 219
221 220 def test_calltip_class():
222 221 check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
223 222
224 223
225 224 def test_calltip_instance():
226 225 c = Call(1)
227 226 check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
228 227
229 228
230 229 def test_calltip_method():
231 230 c = Call(1)
232 231 check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
233 232
234 233
235 234 def test_calltip_function():
236 235 check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
237 236
238 237
239 238 def test_calltip_function2():
240 239 check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
241 240
242 241
243 242 @skipif(sys.version_info >= (3, 5))
244 243 def test_calltip_builtin():
245 244 check_calltip(sum, 'sum', None, sum.__doc__)
246 245
247 246
248 247 def test_calltip_line_magic():
249 248 check_calltip(lmagic, 'lmagic', 'lmagic(line)', "A line magic")
250 249
251 250
252 251 def test_calltip_cell_magic():
253 252 check_calltip(cmagic, 'cmagic', 'cmagic(line, cell)', "A cell magic")
254 253
255 254
256 255 def test_calltip_line_cell_magic():
257 256 check_calltip(lcmagic, 'lcmagic', 'lcmagic(line, cell=None)',
258 257 "A line/cell magic")
259 258
260 259
261 260 def test_class_magics():
262 261 cm = SimpleMagics(ip)
263 262 ip.register_magics(cm)
264 263 check_calltip(cm.Clmagic, 'Clmagic', 'Clmagic(cline)',
265 264 "A class-based line magic")
266 265 check_calltip(cm.Ccmagic, 'Ccmagic', 'Ccmagic(cline, ccell)',
267 266 "A class-based cell magic")
268 267 check_calltip(cm.Clcmagic, 'Clcmagic', 'Clcmagic(cline, ccell=None)',
269 268 "A class-based line/cell magic")
270 269
271 270
272 271 def test_info():
273 272 "Check that Inspector.info fills out various fields as expected."
274 273 i = inspector.info(Call, oname='Call')
275 274 nt.assert_equal(i['type_name'], 'type')
276 275 expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
277 276 nt.assert_equal(i['base_class'], expted_class)
278 if sys.version_info > (3,):
279 nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>")
277 nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>")
280 278 fname = __file__
281 279 if fname.endswith(".pyc"):
282 280 fname = fname[:-1]
283 281 # case-insensitive comparison needed on some filesystems
284 282 # e.g. Windows:
285 283 nt.assert_equal(i['file'].lower(), compress_user(fname).lower())
286 284 nt.assert_equal(i['definition'], None)
287 285 nt.assert_equal(i['docstring'], Call.__doc__)
288 286 nt.assert_equal(i['source'], None)
289 287 nt.assert_true(i['isclass'])
290 288 _self_py2 = '' if py3compat.PY3 else 'self, '
291 289 nt.assert_equal(i['init_definition'], "Call(%sx, y=1)" % _self_py2)
292 290 nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
293 291
294 292 i = inspector.info(Call, detail_level=1)
295 293 nt.assert_not_equal(i['source'], None)
296 294 nt.assert_equal(i['docstring'], None)
297 295
298 296 c = Call(1)
299 297 c.__doc__ = "Modified instance docstring"
300 298 i = inspector.info(c)
301 299 nt.assert_equal(i['type_name'], 'Call')
302 300 nt.assert_equal(i['docstring'], "Modified instance docstring")
303 301 nt.assert_equal(i['class_docstring'], Call.__doc__)
304 302 nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
305 303 nt.assert_equal(i['call_docstring'], Call.__call__.__doc__)
306 304
307 305 # Test old-style classes, which for example may not have an __init__ method.
308 306 if not py3compat.PY3:
309 307 i = inspector.info(OldStyle)
310 308 nt.assert_equal(i['type_name'], 'classobj')
311 309
312 310 i = inspector.info(OldStyle())
313 311 nt.assert_equal(i['type_name'], 'instance')
314 312 nt.assert_equal(i['docstring'], OldStyle.__doc__)
315 313
316 314 def test_class_signature():
317 315 info = inspector.info(HasSignature, 'HasSignature')
318 316 nt.assert_equal(info['init_definition'], "HasSignature(test)")
319 317 nt.assert_equal(info['init_docstring'], HasSignature.__init__.__doc__)
320 318
321 319 def test_info_awkward():
322 320 # Just test that this doesn't throw an error.
323 321 inspector.info(Awkward())
324 322
325 323 def test_bool_raise():
326 324 inspector.info(NoBoolCall())
327 325
328 326 def test_info_serialliar():
329 327 fib_tracker = [0]
330 328 inspector.info(SerialLiar(fib_tracker))
331 329
332 330 # Nested attribute access should be cut off at 100 levels deep to avoid
333 331 # infinite loops: https://github.com/ipython/ipython/issues/9122
334 332 nt.assert_less(fib_tracker[0], 9000)
335 333
336 334 def test_calldef_none():
337 335 # We should ignore __call__ for all of these.
338 336 for obj in [f, SimpleClass().method, any, str.upper]:
339 337 print(obj)
340 338 i = inspector.info(obj)
341 339 nt.assert_is(i['call_def'], None)
342 340
343 341 def f_kwarg(pos, *, kwonly):
344 342 pass
345 343
346 344 def test_definition_kwonlyargs():
347 345 i = inspector.info(f_kwarg, oname='f_kwarg') # analysis:ignore
348 346 nt.assert_equal(i['definition'], "f_kwarg(pos, *, kwonly)")
349 347
350 348 def test_getdoc():
351 349 class A(object):
352 350 """standard docstring"""
353 351 pass
354 352
355 353 class B(object):
356 354 """standard docstring"""
357 355 def getdoc(self):
358 356 return "custom docstring"
359 357
360 358 class C(object):
361 359 """standard docstring"""
362 360 def getdoc(self):
363 361 return None
364 362
365 363 a = A()
366 364 b = B()
367 365 c = C()
368 366
369 367 nt.assert_equal(oinspect.getdoc(a), "standard docstring")
370 368 nt.assert_equal(oinspect.getdoc(b), "custom docstring")
371 369 nt.assert_equal(oinspect.getdoc(c), "standard docstring")
372 370
373 371
374 372 def test_empty_property_has_no_source():
375 373 i = inspector.info(property(), detail_level=1)
376 374 nt.assert_is(i['source'], None)
377 375
378 376
379 377 def test_property_sources():
380 378 import zlib
381 379
382 380 class A(object):
383 381 @property
384 382 def foo(self):
385 383 return 'bar'
386 384
387 385 foo = foo.setter(lambda self, v: setattr(self, 'bar', v))
388 386
389 387 id = property(id)
390 388 compress = property(zlib.compress)
391 389
392 390 i = inspector.info(A.foo, detail_level=1)
393 391 nt.assert_in('def foo(self):', i['source'])
394 392 nt.assert_in('lambda self, v:', i['source'])
395 393
396 394 i = inspector.info(A.id, detail_level=1)
397 395 nt.assert_in('fget = <function id>', i['source'])
398 396
399 397 i = inspector.info(A.compress, detail_level=1)
400 398 nt.assert_in('fget = <function zlib.compress>', i['source'])
401 399
402 400
403 401 def test_property_docstring_is_in_info_for_detail_level_0():
404 402 class A(object):
405 403 @property
406 404 def foobar(self):
407 405 """This is `foobar` property."""
408 406 pass
409 407
410 408 ip.user_ns['a_obj'] = A()
411 409 nt.assert_equals(
412 410 'This is `foobar` property.',
413 411 ip.object_inspect('a_obj.foobar', detail_level=0)['docstring'])
414 412
415 413 ip.user_ns['a_cls'] = A
416 414 nt.assert_equals(
417 415 'This is `foobar` property.',
418 416 ip.object_inspect('a_cls.foobar', detail_level=0)['docstring'])
419 417
420 418
421 419 def test_pdef():
422 420 # See gh-1914
423 421 def foo(): pass
424 422 inspector.pdef(foo, 'foo')
425 423
426 424
427 425 def test_pinfo_nonascii():
428 426 # See gh-1177
429 427 from . import nonascii2
430 428 ip.user_ns['nonascii2'] = nonascii2
431 429 ip._inspect('pinfo', 'nonascii2', detail_level=1)
432 430
433 431
434 432 def test_pinfo_magic():
435 433 with AssertPrints('Docstring:'):
436 434 ip._inspect('pinfo', 'lsmagic', detail_level=0)
437 435
438 436 with AssertPrints('Source:'):
439 437 ip._inspect('pinfo', 'lsmagic', detail_level=1)
440 438
441 439
442 440 def test_init_colors():
443 441 # ensure colors are not present in signature info
444 442 info = inspector.info(HasSignature)
445 443 init_def = info['init_definition']
446 444 nt.assert_not_in('[0m', init_def)
447 445
448 446
449 447 def test_builtin_init():
450 448 info = inspector.info(list)
451 449 init_def = info['init_definition']
452 450 # Python < 3.4 can't get init definition from builtins,
453 451 # but still exercise the inspection in case of error-raising bugs.
454 452 if sys.version_info >= (3,4):
455 453 nt.assert_is_not_none(init_def)
456 454
@@ -1,165 +1,164 b''
1 1 # coding: utf-8
2 2 """Tests for profile-related functions.
3 3
4 4 Currently only the startup-dir functionality is tested, but more tests should
5 5 be added for:
6 6
7 7 * ipython profile create
8 8 * ipython profile list
9 9 * ipython profile create --parallel
10 10 * security dir permissions
11 11
12 12 Authors
13 13 -------
14 14
15 15 * MinRK
16 16
17 17 """
18 from __future__ import absolute_import
19 18
20 19 #-----------------------------------------------------------------------------
21 20 # Imports
22 21 #-----------------------------------------------------------------------------
23 22
24 23 import os
25 24 import shutil
26 25 import sys
27 26 import tempfile
28 27
29 28 from unittest import TestCase
30 29
31 30 import nose.tools as nt
32 31
33 32 from IPython.core.profileapp import list_profiles_in, list_bundled_profiles
34 33 from IPython.core.profiledir import ProfileDir
35 34
36 35 from IPython.testing import decorators as dec
37 36 from IPython.testing import tools as tt
38 37 from IPython.utils import py3compat
39 38 from IPython.utils.process import getoutput
40 39 from IPython.utils.tempdir import TemporaryDirectory
41 40
42 41 #-----------------------------------------------------------------------------
43 42 # Globals
44 43 #-----------------------------------------------------------------------------
45 44 TMP_TEST_DIR = tempfile.mkdtemp()
46 45 HOME_TEST_DIR = os.path.join(TMP_TEST_DIR, "home_test_dir")
47 46 IP_TEST_DIR = os.path.join(HOME_TEST_DIR,'.ipython')
48 47
49 48 #
50 49 # Setup/teardown functions/decorators
51 50 #
52 51
53 52 def setup():
54 53 """Setup test environment for the module:
55 54
56 55 - Adds dummy home dir tree
57 56 """
58 57 # Do not mask exceptions here. In particular, catching WindowsError is a
59 58 # problem because that exception is only defined on Windows...
60 59 os.makedirs(IP_TEST_DIR)
61 60
62 61
63 62 def teardown():
64 63 """Teardown test environment for the module:
65 64
66 65 - Remove dummy home dir tree
67 66 """
68 67 # Note: we remove the parent test dir, which is the root of all test
69 68 # subdirs we may have created. Use shutil instead of os.removedirs, so
70 69 # that non-empty directories are all recursively removed.
71 70 shutil.rmtree(TMP_TEST_DIR)
72 71
73 72
74 73 #-----------------------------------------------------------------------------
75 74 # Test functions
76 75 #-----------------------------------------------------------------------------
77 76 def win32_without_pywin32():
78 77 if sys.platform == 'win32':
79 78 try:
80 79 import pywin32
81 80 except ImportError:
82 81 return True
83 82 return False
84 83
85 84
86 85 class ProfileStartupTest(TestCase):
87 86 def setUp(self):
88 87 # create profile dir
89 88 self.pd = ProfileDir.create_profile_dir_by_name(IP_TEST_DIR, 'test')
90 89 self.options = ['--ipython-dir', IP_TEST_DIR, '--profile', 'test']
91 90 self.fname = os.path.join(TMP_TEST_DIR, 'test.py')
92 91
93 92 def tearDown(self):
94 93 # We must remove this profile right away so its presence doesn't
95 94 # confuse other tests.
96 95 shutil.rmtree(self.pd.location)
97 96
98 97 def init(self, startup_file, startup, test):
99 98 # write startup python file
100 99 with open(os.path.join(self.pd.startup_dir, startup_file), 'w') as f:
101 100 f.write(startup)
102 101 # write simple test file, to check that the startup file was run
103 102 with open(self.fname, 'w') as f:
104 103 f.write(py3compat.doctest_refactor_print(test))
105 104
106 105 def validate(self, output):
107 106 tt.ipexec_validate(self.fname, output, '', options=self.options)
108 107
109 108 @dec.skipif(win32_without_pywin32(), "Test requires pywin32 on Windows")
110 109 def test_startup_py(self):
111 110 self.init('00-start.py', 'zzz=123\n',
112 111 py3compat.doctest_refactor_print('print zzz\n'))
113 112 self.validate('123')
114 113
115 114 @dec.skipif(win32_without_pywin32(), "Test requires pywin32 on Windows")
116 115 def test_startup_ipy(self):
117 116 self.init('00-start.ipy', '%xmode plain\n', '')
118 117 self.validate('Exception reporting mode: Plain')
119 118
120 119
121 120 def test_list_profiles_in():
122 121 # No need to remove these directories and files, as they will get nuked in
123 122 # the module-level teardown.
124 123 td = tempfile.mkdtemp(dir=TMP_TEST_DIR)
125 124 td = py3compat.str_to_unicode(td)
126 125 for name in ('profile_foo', 'profile_hello', 'not_a_profile'):
127 126 os.mkdir(os.path.join(td, name))
128 127 if dec.unicode_paths:
129 128 os.mkdir(os.path.join(td, u'profile_ünicode'))
130 129
131 130 with open(os.path.join(td, 'profile_file'), 'w') as f:
132 131 f.write("I am not a profile directory")
133 132 profiles = list_profiles_in(td)
134 133
135 134 # unicode normalization can turn u'ünicode' into u'u\0308nicode',
136 135 # so only check for *nicode, and that creating a ProfileDir from the
137 136 # name remains valid
138 137 found_unicode = False
139 138 for p in list(profiles):
140 139 if p.endswith('nicode'):
141 140 pd = ProfileDir.find_profile_dir_by_name(td, p)
142 141 profiles.remove(p)
143 142 found_unicode = True
144 143 break
145 144 if dec.unicode_paths:
146 145 nt.assert_true(found_unicode)
147 146 nt.assert_equal(set(profiles), {'foo', 'hello'})
148 147
149 148
150 149 def test_list_bundled_profiles():
151 150 # This variable will need to be updated when a new profile gets bundled
152 151 bundled = sorted(list_bundled_profiles())
153 152 nt.assert_equal(bundled, [])
154 153
155 154
156 155 def test_profile_create_ipython_dir():
157 156 """ipython profile create respects --ipython-dir"""
158 157 with TemporaryDirectory() as td:
159 158 getoutput([sys.executable, '-m', 'IPython', 'profile', 'create',
160 159 'foo', '--ipython-dir=%s' % td])
161 160 profile_dir = os.path.join(td, 'profile_foo')
162 161 assert os.path.exists(profile_dir)
163 162 ipython_config = os.path.join(profile_dir, 'ipython_config.py')
164 163 assert os.path.exists(ipython_config)
165 164 No newline at end of file
@@ -1,247 +1,246 b''
1 1 """Tests for pylab tools module.
2 2 """
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function
8 7
9 8 from io import UnsupportedOperation, BytesIO
10 9
11 10 import matplotlib
12 11 matplotlib.use('Agg')
13 12 from matplotlib.figure import Figure
14 13
15 14 from nose import SkipTest
16 15 import nose.tools as nt
17 16
18 17 from matplotlib import pyplot as plt
19 18 import numpy as np
20 19
21 20 from IPython.core.getipython import get_ipython
22 21 from IPython.core.interactiveshell import InteractiveShell
23 22 from IPython.core.display import _PNG, _JPEG
24 23 from .. import pylabtools as pt
25 24
26 25 from IPython.testing import decorators as dec
27 26
28 27
29 28 def test_figure_to_svg():
30 29 # simple empty-figure test
31 30 fig = plt.figure()
32 31 nt.assert_equal(pt.print_figure(fig, 'svg'), None)
33 32
34 33 plt.close('all')
35 34
36 35 # simple check for at least svg-looking output
37 36 fig = plt.figure()
38 37 ax = fig.add_subplot(1,1,1)
39 38 ax.plot([1,2,3])
40 39 plt.draw()
41 40 svg = pt.print_figure(fig, 'svg')[:100].lower()
42 41 nt.assert_in(u'doctype svg', svg)
43 42
44 43 def _check_pil_jpeg_bytes():
45 44 """Skip if PIL can't write JPEGs to BytesIO objects"""
46 45 # PIL's JPEG plugin can't write to BytesIO objects
47 46 # Pillow fixes this
48 47 from PIL import Image
49 48 buf = BytesIO()
50 49 img = Image.new("RGB", (4,4))
51 50 try:
52 51 img.save(buf, 'jpeg')
53 52 except Exception as e:
54 53 ename = e.__class__.__name__
55 54 raise SkipTest("PIL can't write JPEG to BytesIO: %s: %s" % (ename, e))
56 55
57 56 @dec.skip_without("PIL.Image")
58 57 def test_figure_to_jpeg():
59 58 _check_pil_jpeg_bytes()
60 59 # simple check for at least jpeg-looking output
61 60 fig = plt.figure()
62 61 ax = fig.add_subplot(1,1,1)
63 62 ax.plot([1,2,3])
64 63 plt.draw()
65 64 jpeg = pt.print_figure(fig, 'jpeg', quality=50)[:100].lower()
66 65 assert jpeg.startswith(_JPEG)
67 66
68 67 def test_retina_figure():
69 68 # simple empty-figure test
70 69 fig = plt.figure()
71 70 nt.assert_equal(pt.retina_figure(fig), None)
72 71 plt.close('all')
73 72
74 73 fig = plt.figure()
75 74 ax = fig.add_subplot(1,1,1)
76 75 ax.plot([1,2,3])
77 76 plt.draw()
78 77 png, md = pt.retina_figure(fig)
79 78 assert png.startswith(_PNG)
80 79 nt.assert_in('width', md)
81 80 nt.assert_in('height', md)
82 81
83 82 _fmt_mime_map = {
84 83 'png': 'image/png',
85 84 'jpeg': 'image/jpeg',
86 85 'pdf': 'application/pdf',
87 86 'retina': 'image/png',
88 87 'svg': 'image/svg+xml',
89 88 }
90 89
91 90 def test_select_figure_formats_str():
92 91 ip = get_ipython()
93 92 for fmt, active_mime in _fmt_mime_map.items():
94 93 pt.select_figure_formats(ip, fmt)
95 94 for mime, f in ip.display_formatter.formatters.items():
96 95 if mime == active_mime:
97 96 nt.assert_in(Figure, f)
98 97 else:
99 98 nt.assert_not_in(Figure, f)
100 99
101 100 def test_select_figure_formats_kwargs():
102 101 ip = get_ipython()
103 102 kwargs = dict(quality=10, bbox_inches='tight')
104 103 pt.select_figure_formats(ip, 'png', **kwargs)
105 104 formatter = ip.display_formatter.formatters['image/png']
106 105 f = formatter.lookup_by_type(Figure)
107 106 cell = f.__closure__[0].cell_contents
108 107 nt.assert_equal(cell, kwargs)
109 108
110 109 # check that the formatter doesn't raise
111 110 fig = plt.figure()
112 111 ax = fig.add_subplot(1,1,1)
113 112 ax.plot([1,2,3])
114 113 plt.draw()
115 114 formatter.enabled = True
116 115 png = formatter(fig)
117 116 assert png.startswith(_PNG)
118 117
119 118 def test_select_figure_formats_set():
120 119 ip = get_ipython()
121 120 for fmts in [
122 121 {'png', 'svg'},
123 122 ['png'],
124 123 ('jpeg', 'pdf', 'retina'),
125 124 {'svg'},
126 125 ]:
127 126 active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
128 127 pt.select_figure_formats(ip, fmts)
129 128 for mime, f in ip.display_formatter.formatters.items():
130 129 if mime in active_mimes:
131 130 nt.assert_in(Figure, f)
132 131 else:
133 132 nt.assert_not_in(Figure, f)
134 133
135 134 def test_select_figure_formats_bad():
136 135 ip = get_ipython()
137 136 with nt.assert_raises(ValueError):
138 137 pt.select_figure_formats(ip, 'foo')
139 138 with nt.assert_raises(ValueError):
140 139 pt.select_figure_formats(ip, {'png', 'foo'})
141 140 with nt.assert_raises(ValueError):
142 141 pt.select_figure_formats(ip, ['retina', 'pdf', 'bar', 'bad'])
143 142
144 143 def test_import_pylab():
145 144 ns = {}
146 145 pt.import_pylab(ns, import_all=False)
147 146 nt.assert_true('plt' in ns)
148 147 nt.assert_equal(ns['np'], np)
149 148
150 149 class TestPylabSwitch(object):
151 150 class Shell(InteractiveShell):
152 151 def enable_gui(self, gui):
153 152 pass
154 153
155 154 def setup(self):
156 155 import matplotlib
157 156 def act_mpl(backend):
158 157 matplotlib.rcParams['backend'] = backend
159 158
160 159 # Save rcParams since they get modified
161 160 self._saved_rcParams = matplotlib.rcParams
162 161 self._saved_rcParamsOrig = matplotlib.rcParamsOrig
163 162 matplotlib.rcParams = dict(backend='Qt4Agg')
164 163 matplotlib.rcParamsOrig = dict(backend='Qt4Agg')
165 164
166 165 # Mock out functions
167 166 self._save_am = pt.activate_matplotlib
168 167 pt.activate_matplotlib = act_mpl
169 168 self._save_ip = pt.import_pylab
170 169 pt.import_pylab = lambda *a,**kw:None
171 170 self._save_cis = pt.configure_inline_support
172 171 pt.configure_inline_support = lambda *a,**kw:None
173 172
174 173 def teardown(self):
175 174 pt.activate_matplotlib = self._save_am
176 175 pt.import_pylab = self._save_ip
177 176 pt.configure_inline_support = self._save_cis
178 177 import matplotlib
179 178 matplotlib.rcParams = self._saved_rcParams
180 179 matplotlib.rcParamsOrig = self._saved_rcParamsOrig
181 180
182 181 def test_qt(self):
183 182 s = self.Shell()
184 183 gui, backend = s.enable_matplotlib(None)
185 184 nt.assert_equal(gui, 'qt')
186 185 nt.assert_equal(s.pylab_gui_select, 'qt')
187 186
188 187 gui, backend = s.enable_matplotlib('inline')
189 188 nt.assert_equal(gui, 'inline')
190 189 nt.assert_equal(s.pylab_gui_select, 'qt')
191 190
192 191 gui, backend = s.enable_matplotlib('qt')
193 192 nt.assert_equal(gui, 'qt')
194 193 nt.assert_equal(s.pylab_gui_select, 'qt')
195 194
196 195 gui, backend = s.enable_matplotlib('inline')
197 196 nt.assert_equal(gui, 'inline')
198 197 nt.assert_equal(s.pylab_gui_select, 'qt')
199 198
200 199 gui, backend = s.enable_matplotlib()
201 200 nt.assert_equal(gui, 'qt')
202 201 nt.assert_equal(s.pylab_gui_select, 'qt')
203 202
204 203 def test_inline(self):
205 204 s = self.Shell()
206 205 gui, backend = s.enable_matplotlib('inline')
207 206 nt.assert_equal(gui, 'inline')
208 207 nt.assert_equal(s.pylab_gui_select, None)
209 208
210 209 gui, backend = s.enable_matplotlib('inline')
211 210 nt.assert_equal(gui, 'inline')
212 211 nt.assert_equal(s.pylab_gui_select, None)
213 212
214 213 gui, backend = s.enable_matplotlib('qt')
215 214 nt.assert_equal(gui, 'qt')
216 215 nt.assert_equal(s.pylab_gui_select, 'qt')
217 216
218 217 def test_inline_twice(self):
219 218 "Using '%matplotlib inline' twice should not reset formatters"
220 219
221 220 ip = self.Shell()
222 221 gui, backend = ip.enable_matplotlib('inline')
223 222 nt.assert_equal(gui, 'inline')
224 223
225 224 fmts = {'png'}
226 225 active_mimes = {_fmt_mime_map[fmt] for fmt in fmts}
227 226 pt.select_figure_formats(ip, fmts)
228 227
229 228 gui, backend = ip.enable_matplotlib('inline')
230 229 nt.assert_equal(gui, 'inline')
231 230
232 231 for mime, f in ip.display_formatter.formatters.items():
233 232 if mime in active_mimes:
234 233 nt.assert_in(Figure, f)
235 234 else:
236 235 nt.assert_not_in(Figure, f)
237 236
238 237 def test_qt_gtk(self):
239 238 s = self.Shell()
240 239 gui, backend = s.enable_matplotlib('qt')
241 240 nt.assert_equal(gui, 'qt')
242 241 nt.assert_equal(s.pylab_gui_select, 'qt')
243 242
244 243 gui, backend = s.enable_matplotlib('gtk')
245 244 nt.assert_equal(gui, 'qt')
246 245 nt.assert_equal(s.pylab_gui_select, 'qt')
247 246
@@ -1,508 +1,507 b''
1 1 # encoding: utf-8
2 2 """Tests for code execution (%run and related), which is particularly tricky.
3 3
4 4 Because of how %run manages namespaces, and the fact that we are trying here to
5 5 verify subtle object deletion and reference counting issues, the %run tests
6 6 will be kept in this separate file. This makes it easier to aggregate in one
7 7 place the tricks needed to handle it; most other magics are much easier to test
8 8 and we do so in a common test_magic file.
9 9 """
10 10
11 11 # Copyright (c) IPython Development Team.
12 12 # Distributed under the terms of the Modified BSD License.
13 13
14 from __future__ import absolute_import
15 14
16 15
17 16 import functools
18 17 import os
19 18 from os.path import join as pjoin
20 19 import random
21 20 import sys
22 21 import textwrap
23 22 import unittest
24 23
25 24 try:
26 25 from unittest.mock import patch
27 26 except ImportError:
28 27 from mock import patch
29 28
30 29 import nose.tools as nt
31 30 from nose import SkipTest
32 31
33 32 from IPython.testing import decorators as dec
34 33 from IPython.testing import tools as tt
35 34 from IPython.utils import py3compat
36 35 from IPython.utils.io import capture_output
37 36 from IPython.utils.tempdir import TemporaryDirectory
38 37 from IPython.core import debugger
39 38
40 39
41 40 def doctest_refbug():
42 41 """Very nasty problem with references held by multiple runs of a script.
43 42 See: https://github.com/ipython/ipython/issues/141
44 43
45 44 In [1]: _ip.clear_main_mod_cache()
46 45 # random
47 46
48 47 In [2]: %run refbug
49 48
50 49 In [3]: call_f()
51 50 lowercased: hello
52 51
53 52 In [4]: %run refbug
54 53
55 54 In [5]: call_f()
56 55 lowercased: hello
57 56 lowercased: hello
58 57 """
59 58
60 59
61 60 def doctest_run_builtins():
62 61 r"""Check that %run doesn't damage __builtins__.
63 62
64 63 In [1]: import tempfile
65 64
66 65 In [2]: bid1 = id(__builtins__)
67 66
68 67 In [3]: fname = tempfile.mkstemp('.py')[1]
69 68
70 69 In [3]: f = open(fname,'w')
71 70
72 71 In [4]: dummy= f.write('pass\n')
73 72
74 73 In [5]: f.flush()
75 74
76 75 In [6]: t1 = type(__builtins__)
77 76
78 77 In [7]: %run $fname
79 78
80 79 In [7]: f.close()
81 80
82 81 In [8]: bid2 = id(__builtins__)
83 82
84 83 In [9]: t2 = type(__builtins__)
85 84
86 85 In [10]: t1 == t2
87 86 Out[10]: True
88 87
89 88 In [10]: bid1 == bid2
90 89 Out[10]: True
91 90
92 91 In [12]: try:
93 92 ....: os.unlink(fname)
94 93 ....: except:
95 94 ....: pass
96 95 ....:
97 96 """
98 97
99 98
100 99 def doctest_run_option_parser():
101 100 r"""Test option parser in %run.
102 101
103 102 In [1]: %run print_argv.py
104 103 []
105 104
106 105 In [2]: %run print_argv.py print*.py
107 106 ['print_argv.py']
108 107
109 108 In [3]: %run -G print_argv.py print*.py
110 109 ['print*.py']
111 110
112 111 """
113 112
114 113
115 114 @dec.skip_win32
116 115 def doctest_run_option_parser_for_posix():
117 116 r"""Test option parser in %run (Linux/OSX specific).
118 117
119 118 You need double quote to escape glob in POSIX systems:
120 119
121 120 In [1]: %run print_argv.py print\\*.py
122 121 ['print*.py']
123 122
124 123 You can't use quote to escape glob in POSIX systems:
125 124
126 125 In [2]: %run print_argv.py 'print*.py'
127 126 ['print_argv.py']
128 127
129 128 """
130 129
131 130
132 131 @dec.skip_if_not_win32
133 132 def doctest_run_option_parser_for_windows():
134 133 r"""Test option parser in %run (Windows specific).
135 134
136 135 In Windows, you can't escape ``*` `by backslash:
137 136
138 137 In [1]: %run print_argv.py print\\*.py
139 138 ['print\\*.py']
140 139
141 140 You can use quote to escape glob:
142 141
143 142 In [2]: %run print_argv.py 'print*.py'
144 143 ['print*.py']
145 144
146 145 """
147 146
148 147
149 148 @py3compat.doctest_refactor_print
150 149 def doctest_reset_del():
151 150 """Test that resetting doesn't cause errors in __del__ methods.
152 151
153 152 In [2]: class A(object):
154 153 ...: def __del__(self):
155 154 ...: print str("Hi")
156 155 ...:
157 156
158 157 In [3]: a = A()
159 158
160 159 In [4]: get_ipython().reset()
161 160 Hi
162 161
163 162 In [5]: 1+1
164 163 Out[5]: 2
165 164 """
166 165
167 166 # For some tests, it will be handy to organize them in a class with a common
168 167 # setup that makes a temp file
169 168
170 169 class TestMagicRunPass(tt.TempFileMixin):
171 170
172 171 def setup(self):
173 172 """Make a valid python temp file."""
174 173 self.mktmp('pass\n')
175 174
176 175 def run_tmpfile(self):
177 176 _ip = get_ipython()
178 177 # This fails on Windows if self.tmpfile.name has spaces or "~" in it.
179 178 # See below and ticket https://bugs.launchpad.net/bugs/366353
180 179 _ip.magic('run %s' % self.fname)
181 180
182 181 def run_tmpfile_p(self):
183 182 _ip = get_ipython()
184 183 # This fails on Windows if self.tmpfile.name has spaces or "~" in it.
185 184 # See below and ticket https://bugs.launchpad.net/bugs/366353
186 185 _ip.magic('run -p %s' % self.fname)
187 186
188 187 def test_builtins_id(self):
189 188 """Check that %run doesn't damage __builtins__ """
190 189 _ip = get_ipython()
191 190 # Test that the id of __builtins__ is not modified by %run
192 191 bid1 = id(_ip.user_ns['__builtins__'])
193 192 self.run_tmpfile()
194 193 bid2 = id(_ip.user_ns['__builtins__'])
195 194 nt.assert_equal(bid1, bid2)
196 195
197 196 def test_builtins_type(self):
198 197 """Check that the type of __builtins__ doesn't change with %run.
199 198
200 199 However, the above could pass if __builtins__ was already modified to
201 200 be a dict (it should be a module) by a previous use of %run. So we
202 201 also check explicitly that it really is a module:
203 202 """
204 203 _ip = get_ipython()
205 204 self.run_tmpfile()
206 205 nt.assert_equal(type(_ip.user_ns['__builtins__']),type(sys))
207 206
208 207 def test_run_profile( self ):
209 208 """Test that the option -p, which invokes the profiler, do not
210 209 crash by invoking execfile"""
211 210 get_ipython()
212 211 self.run_tmpfile_p()
213 212
214 213
215 214 class TestMagicRunSimple(tt.TempFileMixin):
216 215
217 216 def test_simpledef(self):
218 217 """Test that simple class definitions work."""
219 218 src = ("class foo: pass\n"
220 219 "def f(): return foo()")
221 220 self.mktmp(src)
222 221 _ip.magic('run %s' % self.fname)
223 222 _ip.run_cell('t = isinstance(f(), foo)')
224 223 nt.assert_true(_ip.user_ns['t'])
225 224
226 225 def test_obj_del(self):
227 226 """Test that object's __del__ methods are called on exit."""
228 227 if sys.platform == 'win32':
229 228 try:
230 229 import win32api
231 230 except ImportError:
232 231 raise SkipTest("Test requires pywin32")
233 232 src = ("class A(object):\n"
234 233 " def __del__(self):\n"
235 234 " print 'object A deleted'\n"
236 235 "a = A()\n")
237 236 self.mktmp(py3compat.doctest_refactor_print(src))
238 237 if dec.module_not_available('sqlite3'):
239 238 err = 'WARNING: IPython History requires SQLite, your history will not be saved\n'
240 239 else:
241 240 err = None
242 241 tt.ipexec_validate(self.fname, 'object A deleted', err)
243 242
244 243 def test_aggressive_namespace_cleanup(self):
245 244 """Test that namespace cleanup is not too aggressive GH-238
246 245
247 246 Returning from another run magic deletes the namespace"""
248 247 # see ticket https://github.com/ipython/ipython/issues/238
249 248
250 249 with tt.TempFileMixin() as empty:
251 250 empty.mktmp('')
252 251 # On Windows, the filename will have \users in it, so we need to use the
253 252 # repr so that the \u becomes \\u.
254 253 src = ("ip = get_ipython()\n"
255 254 "for i in range(5):\n"
256 255 " try:\n"
257 256 " ip.magic(%r)\n"
258 257 " except NameError as e:\n"
259 258 " print(i)\n"
260 259 " break\n" % ('run ' + empty.fname))
261 260 self.mktmp(src)
262 261 _ip.magic('run %s' % self.fname)
263 262 _ip.run_cell('ip == get_ipython()')
264 263 nt.assert_equal(_ip.user_ns['i'], 4)
265 264
266 265 def test_run_second(self):
267 266 """Test that running a second file doesn't clobber the first, gh-3547
268 267 """
269 268 self.mktmp("avar = 1\n"
270 269 "def afunc():\n"
271 270 " return avar\n")
272 271
273 272 with tt.TempFileMixin() as empty:
274 273 empty.mktmp("")
275 274
276 275 _ip.magic('run %s' % self.fname)
277 276 _ip.magic('run %s' % empty.fname)
278 277 nt.assert_equal(_ip.user_ns['afunc'](), 1)
279 278
280 279 @dec.skip_win32
281 280 def test_tclass(self):
282 281 mydir = os.path.dirname(__file__)
283 282 tc = os.path.join(mydir, 'tclass')
284 283 src = ("%%run '%s' C-first\n"
285 284 "%%run '%s' C-second\n"
286 285 "%%run '%s' C-third\n") % (tc, tc, tc)
287 286 self.mktmp(src, '.ipy')
288 287 out = """\
289 288 ARGV 1-: ['C-first']
290 289 ARGV 1-: ['C-second']
291 290 tclass.py: deleting object: C-first
292 291 ARGV 1-: ['C-third']
293 292 tclass.py: deleting object: C-second
294 293 tclass.py: deleting object: C-third
295 294 """
296 295 if dec.module_not_available('sqlite3'):
297 296 err = 'WARNING: IPython History requires SQLite, your history will not be saved\n'
298 297 else:
299 298 err = None
300 299 tt.ipexec_validate(self.fname, out, err)
301 300
302 301 def test_run_i_after_reset(self):
303 302 """Check that %run -i still works after %reset (gh-693)"""
304 303 src = "yy = zz\n"
305 304 self.mktmp(src)
306 305 _ip.run_cell("zz = 23")
307 306 _ip.magic('run -i %s' % self.fname)
308 307 nt.assert_equal(_ip.user_ns['yy'], 23)
309 308 _ip.magic('reset -f')
310 309 _ip.run_cell("zz = 23")
311 310 _ip.magic('run -i %s' % self.fname)
312 311 nt.assert_equal(_ip.user_ns['yy'], 23)
313 312
314 313 def test_unicode(self):
315 314 """Check that files in odd encodings are accepted."""
316 315 mydir = os.path.dirname(__file__)
317 316 na = os.path.join(mydir, 'nonascii.py')
318 317 _ip.magic('run "%s"' % na)
319 318 nt.assert_equal(_ip.user_ns['u'], u'Ўт№Ф')
320 319
321 320 def test_run_py_file_attribute(self):
322 321 """Test handling of `__file__` attribute in `%run <file>.py`."""
323 322 src = "t = __file__\n"
324 323 self.mktmp(src)
325 324 _missing = object()
326 325 file1 = _ip.user_ns.get('__file__', _missing)
327 326 _ip.magic('run %s' % self.fname)
328 327 file2 = _ip.user_ns.get('__file__', _missing)
329 328
330 329 # Check that __file__ was equal to the filename in the script's
331 330 # namespace.
332 331 nt.assert_equal(_ip.user_ns['t'], self.fname)
333 332
334 333 # Check that __file__ was not leaked back into user_ns.
335 334 nt.assert_equal(file1, file2)
336 335
337 336 def test_run_ipy_file_attribute(self):
338 337 """Test handling of `__file__` attribute in `%run <file.ipy>`."""
339 338 src = "t = __file__\n"
340 339 self.mktmp(src, ext='.ipy')
341 340 _missing = object()
342 341 file1 = _ip.user_ns.get('__file__', _missing)
343 342 _ip.magic('run %s' % self.fname)
344 343 file2 = _ip.user_ns.get('__file__', _missing)
345 344
346 345 # Check that __file__ was equal to the filename in the script's
347 346 # namespace.
348 347 nt.assert_equal(_ip.user_ns['t'], self.fname)
349 348
350 349 # Check that __file__ was not leaked back into user_ns.
351 350 nt.assert_equal(file1, file2)
352 351
353 352 def test_run_formatting(self):
354 353 """ Test that %run -t -N<N> does not raise a TypeError for N > 1."""
355 354 src = "pass"
356 355 self.mktmp(src)
357 356 _ip.magic('run -t -N 1 %s' % self.fname)
358 357 _ip.magic('run -t -N 10 %s' % self.fname)
359 358
360 359 def test_ignore_sys_exit(self):
361 360 """Test the -e option to ignore sys.exit()"""
362 361 src = "import sys; sys.exit(1)"
363 362 self.mktmp(src)
364 363 with tt.AssertPrints('SystemExit'):
365 364 _ip.magic('run %s' % self.fname)
366 365
367 366 with tt.AssertNotPrints('SystemExit'):
368 367 _ip.magic('run -e %s' % self.fname)
369 368
370 369 def test_run_nb(self):
371 370 """Test %run notebook.ipynb"""
372 371 from nbformat import v4, writes
373 372 nb = v4.new_notebook(
374 373 cells=[
375 374 v4.new_markdown_cell("The Ultimate Question of Everything"),
376 375 v4.new_code_cell("answer=42")
377 376 ]
378 377 )
379 378 src = writes(nb, version=4)
380 379 self.mktmp(src, ext='.ipynb')
381 380
382 381 _ip.magic("run %s" % self.fname)
383 382
384 383 nt.assert_equal(_ip.user_ns['answer'], 42)
385 384
386 385
387 386
388 387 class TestMagicRunWithPackage(unittest.TestCase):
389 388
390 389 def writefile(self, name, content):
391 390 path = os.path.join(self.tempdir.name, name)
392 391 d = os.path.dirname(path)
393 392 if not os.path.isdir(d):
394 393 os.makedirs(d)
395 394 with open(path, 'w') as f:
396 395 f.write(textwrap.dedent(content))
397 396
398 397 def setUp(self):
399 398 self.package = package = 'tmp{0}'.format(repr(random.random())[2:])
400 399 """Temporary valid python package name."""
401 400
402 401 self.value = int(random.random() * 10000)
403 402
404 403 self.tempdir = TemporaryDirectory()
405 404 self.__orig_cwd = py3compat.getcwd()
406 405 sys.path.insert(0, self.tempdir.name)
407 406
408 407 self.writefile(os.path.join(package, '__init__.py'), '')
409 408 self.writefile(os.path.join(package, 'sub.py'), """
410 409 x = {0!r}
411 410 """.format(self.value))
412 411 self.writefile(os.path.join(package, 'relative.py'), """
413 412 from .sub import x
414 413 """)
415 414 self.writefile(os.path.join(package, 'absolute.py'), """
416 415 from {0}.sub import x
417 416 """.format(package))
418 417
419 418 def tearDown(self):
420 419 os.chdir(self.__orig_cwd)
421 420 sys.path[:] = [p for p in sys.path if p != self.tempdir.name]
422 421 self.tempdir.cleanup()
423 422
424 423 def check_run_submodule(self, submodule, opts=''):
425 424 _ip.user_ns.pop('x', None)
426 425 _ip.magic('run {2} -m {0}.{1}'.format(self.package, submodule, opts))
427 426 self.assertEqual(_ip.user_ns['x'], self.value,
428 427 'Variable `x` is not loaded from module `{0}`.'
429 428 .format(submodule))
430 429
431 430 def test_run_submodule_with_absolute_import(self):
432 431 self.check_run_submodule('absolute')
433 432
434 433 def test_run_submodule_with_relative_import(self):
435 434 """Run submodule that has a relative import statement (#2727)."""
436 435 self.check_run_submodule('relative')
437 436
438 437 def test_prun_submodule_with_absolute_import(self):
439 438 self.check_run_submodule('absolute', '-p')
440 439
441 440 def test_prun_submodule_with_relative_import(self):
442 441 self.check_run_submodule('relative', '-p')
443 442
444 443 def with_fake_debugger(func):
445 444 @functools.wraps(func)
446 445 def wrapper(*args, **kwds):
447 446 with patch.object(debugger.Pdb, 'run', staticmethod(eval)):
448 447 return func(*args, **kwds)
449 448 return wrapper
450 449
451 450 @with_fake_debugger
452 451 def test_debug_run_submodule_with_absolute_import(self):
453 452 self.check_run_submodule('absolute', '-d')
454 453
455 454 @with_fake_debugger
456 455 def test_debug_run_submodule_with_relative_import(self):
457 456 self.check_run_submodule('relative', '-d')
458 457
459 458 def test_run__name__():
460 459 with TemporaryDirectory() as td:
461 460 path = pjoin(td, 'foo.py')
462 461 with open(path, 'w') as f:
463 462 f.write("q = __name__")
464 463
465 464 _ip.user_ns.pop('q', None)
466 465 _ip.magic('run {}'.format(path))
467 466 nt.assert_equal(_ip.user_ns.pop('q'), '__main__')
468 467
469 468 _ip.magic('run -n {}'.format(path))
470 469 nt.assert_equal(_ip.user_ns.pop('q'), 'foo')
471 470
472 471 def test_run_tb():
473 472 """Test traceback offset in %run"""
474 473 with TemporaryDirectory() as td:
475 474 path = pjoin(td, 'foo.py')
476 475 with open(path, 'w') as f:
477 476 f.write('\n'.join([
478 477 "def foo():",
479 478 " return bar()",
480 479 "def bar():",
481 480 " raise RuntimeError('hello!')",
482 481 "foo()",
483 482 ]))
484 483 with capture_output() as io:
485 484 _ip.magic('run {}'.format(path))
486 485 out = io.stdout
487 486 nt.assert_not_in("execfile", out)
488 487 nt.assert_in("RuntimeError", out)
489 488 nt.assert_equal(out.count("---->"), 3)
490 489
491 490 @dec.knownfailureif(sys.platform == 'win32', "writes to io.stdout aren't captured on Windows")
492 491 def test_script_tb():
493 492 """Test traceback offset in `ipython script.py`"""
494 493 with TemporaryDirectory() as td:
495 494 path = pjoin(td, 'foo.py')
496 495 with open(path, 'w') as f:
497 496 f.write('\n'.join([
498 497 "def foo():",
499 498 " return bar()",
500 499 "def bar():",
501 500 " raise RuntimeError('hello!')",
502 501 "foo()",
503 502 ]))
504 503 out, err = tt.ipexec(path)
505 504 nt.assert_not_in("execfile", out)
506 505 nt.assert_in("RuntimeError", out)
507 506 nt.assert_equal(out.count("---->"), 3)
508 507
@@ -1,356 +1,355 b''
1 1 # encoding: utf-8
2 2 """Tests for IPython.core.ultratb
3 3 """
4 4 import io
5 5 import sys
6 6 import os.path
7 7 from textwrap import dedent
8 8 import traceback
9 9 import unittest
10 10
11 11 try:
12 12 from unittest import mock
13 13 except ImportError:
14 14 import mock # Python 2
15 15
16 16 from ..ultratb import ColorTB, VerboseTB, find_recursion
17 17
18 18
19 19 from IPython.testing import tools as tt
20 20 from IPython.testing.decorators import onlyif_unicode_paths
21 21 from IPython.utils.syspathcontext import prepended_to_syspath
22 22 from IPython.utils.tempdir import TemporaryDirectory
23 23 from IPython.utils.py3compat import PY3
24 24
25 25 ip = get_ipython()
26 26
27 27 file_1 = """1
28 28 2
29 29 3
30 30 def f():
31 31 1/0
32 32 """
33 33
34 34 file_2 = """def f():
35 35 1/0
36 36 """
37 37
38 38 class ChangedPyFileTest(unittest.TestCase):
39 39 def test_changing_py_file(self):
40 40 """Traceback produced if the line where the error occurred is missing?
41 41
42 42 https://github.com/ipython/ipython/issues/1456
43 43 """
44 44 with TemporaryDirectory() as td:
45 45 fname = os.path.join(td, "foo.py")
46 46 with open(fname, "w") as f:
47 47 f.write(file_1)
48 48
49 49 with prepended_to_syspath(td):
50 50 ip.run_cell("import foo")
51 51
52 52 with tt.AssertPrints("ZeroDivisionError"):
53 53 ip.run_cell("foo.f()")
54 54
55 55 # Make the file shorter, so the line of the error is missing.
56 56 with open(fname, "w") as f:
57 57 f.write(file_2)
58 58
59 59 # For some reason, this was failing on the *second* call after
60 60 # changing the file, so we call f() twice.
61 61 with tt.AssertNotPrints("Internal Python error", channel='stderr'):
62 62 with tt.AssertPrints("ZeroDivisionError"):
63 63 ip.run_cell("foo.f()")
64 64 with tt.AssertPrints("ZeroDivisionError"):
65 65 ip.run_cell("foo.f()")
66 66
67 67 iso_8859_5_file = u'''# coding: iso-8859-5
68 68
69 69 def fail():
70 70 """дбИЖ"""
71 71 1/0 # дбИЖ
72 72 '''
73 73
74 74 class NonAsciiTest(unittest.TestCase):
75 75 @onlyif_unicode_paths
76 76 def test_nonascii_path(self):
77 77 # Non-ascii directory name as well.
78 78 with TemporaryDirectory(suffix=u'é') as td:
79 79 fname = os.path.join(td, u"fooé.py")
80 80 with open(fname, "w") as f:
81 81 f.write(file_1)
82 82
83 83 with prepended_to_syspath(td):
84 84 ip.run_cell("import foo")
85 85
86 86 with tt.AssertPrints("ZeroDivisionError"):
87 87 ip.run_cell("foo.f()")
88 88
89 89 def test_iso8859_5(self):
90 90 with TemporaryDirectory() as td:
91 91 fname = os.path.join(td, 'dfghjkl.py')
92 92
93 93 with io.open(fname, 'w', encoding='iso-8859-5') as f:
94 94 f.write(iso_8859_5_file)
95 95
96 96 with prepended_to_syspath(td):
97 97 ip.run_cell("from dfghjkl import fail")
98 98
99 99 with tt.AssertPrints("ZeroDivisionError"):
100 100 with tt.AssertPrints(u'дбИЖ', suppress=False):
101 101 ip.run_cell('fail()')
102 102
103 103 def test_nonascii_msg(self):
104 104 cell = u"raise Exception('é')"
105 105 expected = u"Exception('é')"
106 106 ip.run_cell("%xmode plain")
107 107 with tt.AssertPrints(expected):
108 108 ip.run_cell(cell)
109 109
110 110 ip.run_cell("%xmode verbose")
111 111 with tt.AssertPrints(expected):
112 112 ip.run_cell(cell)
113 113
114 114 ip.run_cell("%xmode context")
115 115 with tt.AssertPrints(expected):
116 116 ip.run_cell(cell)
117 117
118 118
119 119 class NestedGenExprTestCase(unittest.TestCase):
120 120 """
121 121 Regression test for the following issues:
122 122 https://github.com/ipython/ipython/issues/8293
123 123 https://github.com/ipython/ipython/issues/8205
124 124 """
125 125 def test_nested_genexpr(self):
126 126 code = dedent(
127 127 """\
128 128 class SpecificException(Exception):
129 129 pass
130 130
131 131 def foo(x):
132 132 raise SpecificException("Success!")
133 133
134 134 sum(sum(foo(x) for _ in [0]) for x in [0])
135 135 """
136 136 )
137 137 with tt.AssertPrints('SpecificException: Success!', suppress=False):
138 138 ip.run_cell(code)
139 139
140 140
141 141 indentationerror_file = """if True:
142 142 zoon()
143 143 """
144 144
145 145 class IndentationErrorTest(unittest.TestCase):
146 146 def test_indentationerror_shows_line(self):
147 147 # See issue gh-2398
148 148 with tt.AssertPrints("IndentationError"):
149 149 with tt.AssertPrints("zoon()", suppress=False):
150 150 ip.run_cell(indentationerror_file)
151 151
152 152 with TemporaryDirectory() as td:
153 153 fname = os.path.join(td, "foo.py")
154 154 with open(fname, "w") as f:
155 155 f.write(indentationerror_file)
156 156
157 157 with tt.AssertPrints("IndentationError"):
158 158 with tt.AssertPrints("zoon()", suppress=False):
159 159 ip.magic('run %s' % fname)
160 160
161 161 se_file_1 = """1
162 162 2
163 163 7/
164 164 """
165 165
166 166 se_file_2 = """7/
167 167 """
168 168
169 169 class SyntaxErrorTest(unittest.TestCase):
170 170 def test_syntaxerror_without_lineno(self):
171 171 with tt.AssertNotPrints("TypeError"):
172 172 with tt.AssertPrints("line unknown"):
173 173 ip.run_cell("raise SyntaxError()")
174 174
175 175 def test_changing_py_file(self):
176 176 with TemporaryDirectory() as td:
177 177 fname = os.path.join(td, "foo.py")
178 178 with open(fname, 'w') as f:
179 179 f.write(se_file_1)
180 180
181 181 with tt.AssertPrints(["7/", "SyntaxError"]):
182 182 ip.magic("run " + fname)
183 183
184 184 # Modify the file
185 185 with open(fname, 'w') as f:
186 186 f.write(se_file_2)
187 187
188 188 # The SyntaxError should point to the correct line
189 189 with tt.AssertPrints(["7/", "SyntaxError"]):
190 190 ip.magic("run " + fname)
191 191
192 192 def test_non_syntaxerror(self):
193 193 # SyntaxTB may be called with an error other than a SyntaxError
194 194 # See e.g. gh-4361
195 195 try:
196 196 raise ValueError('QWERTY')
197 197 except ValueError:
198 198 with tt.AssertPrints('QWERTY'):
199 199 ip.showsyntaxerror()
200 200
201 201
202 202 class Python3ChainedExceptionsTest(unittest.TestCase):
203 203 DIRECT_CAUSE_ERROR_CODE = """
204 204 try:
205 205 x = 1 + 2
206 206 print(not_defined_here)
207 207 except Exception as e:
208 208 x += 55
209 209 x - 1
210 210 y = {}
211 211 raise KeyError('uh') from e
212 212 """
213 213
214 214 EXCEPTION_DURING_HANDLING_CODE = """
215 215 try:
216 216 x = 1 + 2
217 217 print(not_defined_here)
218 218 except Exception as e:
219 219 x += 55
220 220 x - 1
221 221 y = {}
222 222 raise KeyError('uh')
223 223 """
224 224
225 225 SUPPRESS_CHAINING_CODE = """
226 226 try:
227 227 1/0
228 228 except Exception:
229 229 raise ValueError("Yikes") from None
230 230 """
231 231
232 232 def test_direct_cause_error(self):
233 233 if PY3:
234 234 with tt.AssertPrints(["KeyError", "NameError", "direct cause"]):
235 235 ip.run_cell(self.DIRECT_CAUSE_ERROR_CODE)
236 236
237 237 def test_exception_during_handling_error(self):
238 238 if PY3:
239 239 with tt.AssertPrints(["KeyError", "NameError", "During handling"]):
240 240 ip.run_cell(self.EXCEPTION_DURING_HANDLING_CODE)
241 241
242 242 def test_suppress_exception_chaining(self):
243 243 if PY3:
244 244 with tt.AssertNotPrints("ZeroDivisionError"), \
245 245 tt.AssertPrints("ValueError", suppress=False):
246 246 ip.run_cell(self.SUPPRESS_CHAINING_CODE)
247 247
248 248
249 249 class RecursionTest(unittest.TestCase):
250 250 DEFINITIONS = """
251 251 def non_recurs():
252 252 1/0
253 253
254 254 def r1():
255 255 r1()
256 256
257 257 def r3a():
258 258 r3b()
259 259
260 260 def r3b():
261 261 r3c()
262 262
263 263 def r3c():
264 264 r3a()
265 265
266 266 def r3o1():
267 267 r3a()
268 268
269 269 def r3o2():
270 270 r3o1()
271 271 """
272 272 def setUp(self):
273 273 ip.run_cell(self.DEFINITIONS)
274 274
275 275 def test_no_recursion(self):
276 276 with tt.AssertNotPrints("frames repeated"):
277 277 ip.run_cell("non_recurs()")
278 278
279 279 def test_recursion_one_frame(self):
280 280 with tt.AssertPrints("1 frames repeated"):
281 281 ip.run_cell("r1()")
282 282
283 283 def test_recursion_three_frames(self):
284 284 with tt.AssertPrints("3 frames repeated"):
285 285 ip.run_cell("r3o2()")
286 286
287 287 def test_find_recursion(self):
288 288 captured = []
289 289 def capture_exc(*args, **kwargs):
290 290 captured.append(sys.exc_info())
291 291 with mock.patch.object(ip, 'showtraceback', capture_exc):
292 292 ip.run_cell("r3o2()")
293 293
294 294 self.assertEqual(len(captured), 1)
295 295 etype, evalue, tb = captured[0]
296 296 self.assertIn("recursion", str(evalue))
297 297
298 298 records = ip.InteractiveTB.get_records(tb, 3, ip.InteractiveTB.tb_offset)
299 299 for r in records[:10]:
300 300 print(r[1:4])
301 301
302 302 # The outermost frames should be:
303 303 # 0: the 'cell' that was running when the exception came up
304 304 # 1: r3o2()
305 305 # 2: r3o1()
306 306 # 3: r3a()
307 307 # Then repeating r3b, r3c, r3a
308 308 last_unique, repeat_length = find_recursion(etype, evalue, records)
309 309 self.assertEqual(last_unique, 2)
310 310 self.assertEqual(repeat_length, 3)
311 311
312 312
313 313 #----------------------------------------------------------------------------
314 314
315 315 # module testing (minimal)
316 if sys.version_info > (3,):
317 def test_handlers():
318 def spam(c, d_e):
319 (d, e) = d_e
320 x = c + d
321 y = c * d
322 foo(x, y)
323
324 def foo(a, b, bar=1):
325 eggs(a, b + bar)
326
327 def eggs(f, g, z=globals()):
328 h = f + g
329 i = f - g
330 return h / i
331
332 buff = io.StringIO()
333
334 buff.write('')
335 buff.write('*** Before ***')
336 try:
337 buff.write(spam(1, (2, 3)))
338 except:
339 traceback.print_exc(file=buff)
340
341 handler = ColorTB(ostream=buff)
342 buff.write('*** ColorTB ***')
343 try:
344 buff.write(spam(1, (2, 3)))
345 except:
346 handler(*sys.exc_info())
347 buff.write('')
348
349 handler = VerboseTB(ostream=buff)
350 buff.write('*** VerboseTB ***')
351 try:
352 buff.write(spam(1, (2, 3)))
353 except:
354 handler(*sys.exc_info())
355 buff.write('')
316 def test_handlers():
317 def spam(c, d_e):
318 (d, e) = d_e
319 x = c + d
320 y = c * d
321 foo(x, y)
322
323 def foo(a, b, bar=1):
324 eggs(a, b + bar)
325
326 def eggs(f, g, z=globals()):
327 h = f + g
328 i = f - g
329 return h / i
330
331 buff = io.StringIO()
332
333 buff.write('')
334 buff.write('*** Before ***')
335 try:
336 buff.write(spam(1, (2, 3)))
337 except:
338 traceback.print_exc(file=buff)
339
340 handler = ColorTB(ostream=buff)
341 buff.write('*** ColorTB ***')
342 try:
343 buff.write(spam(1, (2, 3)))
344 except:
345 handler(*sys.exc_info())
346 buff.write('')
347
348 handler = VerboseTB(ostream=buff)
349 buff.write('*** VerboseTB ***')
350 try:
351 buff.write(spam(1, (2, 3)))
352 except:
353 handler(*sys.exc_info())
354 buff.write('')
356 355
@@ -1,1491 +1,1488 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Verbose and colourful traceback formatting.
4 4
5 5 **ColorTB**
6 6
7 7 I've always found it a bit hard to visually parse tracebacks in Python. The
8 8 ColorTB class is a solution to that problem. It colors the different parts of a
9 9 traceback in a manner similar to what you would expect from a syntax-highlighting
10 10 text editor.
11 11
12 12 Installation instructions for ColorTB::
13 13
14 14 import sys,ultratb
15 15 sys.excepthook = ultratb.ColorTB()
16 16
17 17 **VerboseTB**
18 18
19 19 I've also included a port of Ka-Ping Yee's "cgitb.py" that produces all kinds
20 20 of useful info when a traceback occurs. Ping originally had it spit out HTML
21 21 and intended it for CGI programmers, but why should they have all the fun? I
22 22 altered it to spit out colored text to the terminal. It's a bit overwhelming,
23 23 but kind of neat, and maybe useful for long-running programs that you believe
24 24 are bug-free. If a crash *does* occur in that type of program you want details.
25 25 Give it a shot--you'll love it or you'll hate it.
26 26
27 27 .. note::
28 28
29 29 The Verbose mode prints the variables currently visible where the exception
30 30 happened (shortening their strings if too long). This can potentially be
31 31 very slow, if you happen to have a huge data structure whose string
32 32 representation is complex to compute. Your computer may appear to freeze for
33 33 a while with cpu usage at 100%. If this occurs, you can cancel the traceback
34 34 with Ctrl-C (maybe hitting it more than once).
35 35
36 36 If you encounter this kind of situation often, you may want to use the
37 37 Verbose_novars mode instead of the regular Verbose, which avoids formatting
38 38 variables (but otherwise includes the information and context given by
39 39 Verbose).
40 40
41 41 .. note::
42 42
43 43 The verbose mode print all variables in the stack, which means it can
44 44 potentially leak sensitive information like access keys, or unencryted
45 45 password.
46 46
47 47 Installation instructions for VerboseTB::
48 48
49 49 import sys,ultratb
50 50 sys.excepthook = ultratb.VerboseTB()
51 51
52 52 Note: Much of the code in this module was lifted verbatim from the standard
53 53 library module 'traceback.py' and Ka-Ping Yee's 'cgitb.py'.
54 54
55 55 Color schemes
56 56 -------------
57 57
58 58 The colors are defined in the class TBTools through the use of the
59 59 ColorSchemeTable class. Currently the following exist:
60 60
61 61 - NoColor: allows all of this module to be used in any terminal (the color
62 62 escapes are just dummy blank strings).
63 63
64 64 - Linux: is meant to look good in a terminal like the Linux console (black
65 65 or very dark background).
66 66
67 67 - LightBG: similar to Linux but swaps dark/light colors to be more readable
68 68 in light background terminals.
69 69
70 70 - Neutral: a neutral color scheme that should be readable on both light and
71 71 dark background
72 72
73 73 You can implement other color schemes easily, the syntax is fairly
74 74 self-explanatory. Please send back new schemes you develop to the author for
75 75 possible inclusion in future releases.
76 76
77 77 Inheritance diagram:
78 78
79 79 .. inheritance-diagram:: IPython.core.ultratb
80 80 :parts: 3
81 81 """
82 82
83 83 #*****************************************************************************
84 84 # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
85 85 # Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
86 86 #
87 87 # Distributed under the terms of the BSD License. The full license is in
88 88 # the file COPYING, distributed as part of this software.
89 89 #*****************************************************************************
90 90
91 from __future__ import absolute_import
92 from __future__ import unicode_literals
93 from __future__ import print_function
94 91
95 92 import dis
96 93 import inspect
97 94 import keyword
98 95 import linecache
99 96 import os
100 97 import pydoc
101 98 import re
102 99 import sys
103 100 import time
104 101 import tokenize
105 102 import traceback
106 103 import types
107 104
108 105 try: # Python 2
109 106 generate_tokens = tokenize.generate_tokens
110 107 except AttributeError: # Python 3
111 108 generate_tokens = tokenize.tokenize
112 109
113 110 # For purposes of monkeypatching inspect to fix a bug in it.
114 111 from inspect import getsourcefile, getfile, getmodule, \
115 112 ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
116 113
117 114 # IPython's own modules
118 115 from IPython import get_ipython
119 116 from IPython.core import debugger
120 117 from IPython.core.display_trap import DisplayTrap
121 118 from IPython.core.excolors import exception_colors
122 119 from IPython.utils import PyColorize
123 120 from IPython.utils import openpy
124 121 from IPython.utils import path as util_path
125 122 from IPython.utils import py3compat
126 123 from IPython.utils import ulinecache
127 124 from IPython.utils.data import uniq_stable
128 125 from IPython.utils.terminal import get_terminal_size
129 126 from logging import info, error
130 127
131 128 import IPython.utils.colorable as colorable
132 129
133 130 # Globals
134 131 # amount of space to put line numbers before verbose tracebacks
135 132 INDENT_SIZE = 8
136 133
137 134 # Default color scheme. This is used, for example, by the traceback
138 135 # formatter. When running in an actual IPython instance, the user's rc.colors
139 136 # value is used, but having a module global makes this functionality available
140 137 # to users of ultratb who are NOT running inside ipython.
141 138 DEFAULT_SCHEME = 'NoColor'
142 139
143 140 # ---------------------------------------------------------------------------
144 141 # Code begins
145 142
146 143 # Utility functions
147 144 def inspect_error():
148 145 """Print a message about internal inspect errors.
149 146
150 147 These are unfortunately quite common."""
151 148
152 149 error('Internal Python error in the inspect module.\n'
153 150 'Below is the traceback from this internal error.\n')
154 151
155 152
156 153 # This function is a monkeypatch we apply to the Python inspect module. We have
157 154 # now found when it's needed (see discussion on issue gh-1456), and we have a
158 155 # test case (IPython.core.tests.test_ultratb.ChangedPyFileTest) that fails if
159 156 # the monkeypatch is not applied. TK, Aug 2012.
160 157 def findsource(object):
161 158 """Return the entire source file and starting line number for an object.
162 159
163 160 The argument may be a module, class, method, function, traceback, frame,
164 161 or code object. The source code is returned as a list of all the lines
165 162 in the file and the line number indexes a line in that list. An IOError
166 163 is raised if the source code cannot be retrieved.
167 164
168 165 FIXED version with which we monkeypatch the stdlib to work around a bug."""
169 166
170 167 file = getsourcefile(object) or getfile(object)
171 168 # If the object is a frame, then trying to get the globals dict from its
172 169 # module won't work. Instead, the frame object itself has the globals
173 170 # dictionary.
174 171 globals_dict = None
175 172 if inspect.isframe(object):
176 173 # XXX: can this ever be false?
177 174 globals_dict = object.f_globals
178 175 else:
179 176 module = getmodule(object, file)
180 177 if module:
181 178 globals_dict = module.__dict__
182 179 lines = linecache.getlines(file, globals_dict)
183 180 if not lines:
184 181 raise IOError('could not get source code')
185 182
186 183 if ismodule(object):
187 184 return lines, 0
188 185
189 186 if isclass(object):
190 187 name = object.__name__
191 188 pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
192 189 # make some effort to find the best matching class definition:
193 190 # use the one with the least indentation, which is the one
194 191 # that's most probably not inside a function definition.
195 192 candidates = []
196 193 for i in range(len(lines)):
197 194 match = pat.match(lines[i])
198 195 if match:
199 196 # if it's at toplevel, it's already the best one
200 197 if lines[i][0] == 'c':
201 198 return lines, i
202 199 # else add whitespace to candidate list
203 200 candidates.append((match.group(1), i))
204 201 if candidates:
205 202 # this will sort by whitespace, and by line number,
206 203 # less whitespace first
207 204 candidates.sort()
208 205 return lines, candidates[0][1]
209 206 else:
210 207 raise IOError('could not find class definition')
211 208
212 209 if ismethod(object):
213 210 object = object.__func__
214 211 if isfunction(object):
215 212 object = object.__code__
216 213 if istraceback(object):
217 214 object = object.tb_frame
218 215 if isframe(object):
219 216 object = object.f_code
220 217 if iscode(object):
221 218 if not hasattr(object, 'co_firstlineno'):
222 219 raise IOError('could not find function definition')
223 220 pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
224 221 pmatch = pat.match
225 222 # fperez - fix: sometimes, co_firstlineno can give a number larger than
226 223 # the length of lines, which causes an error. Safeguard against that.
227 224 lnum = min(object.co_firstlineno, len(lines)) - 1
228 225 while lnum > 0:
229 226 if pmatch(lines[lnum]):
230 227 break
231 228 lnum -= 1
232 229
233 230 return lines, lnum
234 231 raise IOError('could not find code object')
235 232
236 233
237 234 # This is a patched version of inspect.getargs that applies the (unmerged)
238 235 # patch for http://bugs.python.org/issue14611 by Stefano Taschini. This fixes
239 236 # https://github.com/ipython/ipython/issues/8205 and
240 237 # https://github.com/ipython/ipython/issues/8293
241 238 def getargs(co):
242 239 """Get information about the arguments accepted by a code object.
243 240
244 241 Three things are returned: (args, varargs, varkw), where 'args' is
245 242 a list of argument names (possibly containing nested lists), and
246 243 'varargs' and 'varkw' are the names of the * and ** arguments or None."""
247 244 if not iscode(co):
248 245 raise TypeError('{!r} is not a code object'.format(co))
249 246
250 247 nargs = co.co_argcount
251 248 names = co.co_varnames
252 249 args = list(names[:nargs])
253 250 step = 0
254 251
255 252 # The following acrobatics are for anonymous (tuple) arguments.
256 253 for i in range(nargs):
257 254 if args[i][:1] in ('', '.'):
258 255 stack, remain, count = [], [], []
259 256 while step < len(co.co_code):
260 257 op = ord(co.co_code[step])
261 258 step = step + 1
262 259 if op >= dis.HAVE_ARGUMENT:
263 260 opname = dis.opname[op]
264 261 value = ord(co.co_code[step]) + ord(co.co_code[step+1])*256
265 262 step = step + 2
266 263 if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'):
267 264 remain.append(value)
268 265 count.append(value)
269 266 elif opname in ('STORE_FAST', 'STORE_DEREF'):
270 267 if op in dis.haslocal:
271 268 stack.append(co.co_varnames[value])
272 269 elif op in dis.hasfree:
273 270 stack.append((co.co_cellvars + co.co_freevars)[value])
274 271 # Special case for sublists of length 1: def foo((bar))
275 272 # doesn't generate the UNPACK_TUPLE bytecode, so if
276 273 # `remain` is empty here, we have such a sublist.
277 274 if not remain:
278 275 stack[0] = [stack[0]]
279 276 break
280 277 else:
281 278 remain[-1] = remain[-1] - 1
282 279 while remain[-1] == 0:
283 280 remain.pop()
284 281 size = count.pop()
285 282 stack[-size:] = [stack[-size:]]
286 283 if not remain:
287 284 break
288 285 remain[-1] = remain[-1] - 1
289 286 if not remain:
290 287 break
291 288 args[i] = stack[0]
292 289
293 290 varargs = None
294 291 if co.co_flags & inspect.CO_VARARGS:
295 292 varargs = co.co_varnames[nargs]
296 293 nargs = nargs + 1
297 294 varkw = None
298 295 if co.co_flags & inspect.CO_VARKEYWORDS:
299 296 varkw = co.co_varnames[nargs]
300 297 return inspect.Arguments(args, varargs, varkw)
301 298
302 299
303 300 # Monkeypatch inspect to apply our bugfix.
304 301 def with_patch_inspect(f):
305 302 """decorator for monkeypatching inspect.findsource"""
306 303
307 304 def wrapped(*args, **kwargs):
308 305 save_findsource = inspect.findsource
309 306 save_getargs = inspect.getargs
310 307 inspect.findsource = findsource
311 308 inspect.getargs = getargs
312 309 try:
313 310 return f(*args, **kwargs)
314 311 finally:
315 312 inspect.findsource = save_findsource
316 313 inspect.getargs = save_getargs
317 314
318 315 return wrapped
319 316
320 317
321 318 if py3compat.PY3:
322 319 fixed_getargvalues = inspect.getargvalues
323 320 else:
324 321 # Fixes for https://github.com/ipython/ipython/issues/8293
325 322 # and https://github.com/ipython/ipython/issues/8205.
326 323 # The relevant bug is caused by failure to correctly handle anonymous tuple
327 324 # unpacking, which only exists in Python 2.
328 325 fixed_getargvalues = with_patch_inspect(inspect.getargvalues)
329 326
330 327
331 328 def fix_frame_records_filenames(records):
332 329 """Try to fix the filenames in each record from inspect.getinnerframes().
333 330
334 331 Particularly, modules loaded from within zip files have useless filenames
335 332 attached to their code object, and inspect.getinnerframes() just uses it.
336 333 """
337 334 fixed_records = []
338 335 for frame, filename, line_no, func_name, lines, index in records:
339 336 # Look inside the frame's globals dictionary for __file__,
340 337 # which should be better. However, keep Cython filenames since
341 338 # we prefer the source filenames over the compiled .so file.
342 339 filename = py3compat.cast_unicode_py2(filename, "utf-8")
343 340 if not filename.endswith(('.pyx', '.pxd', '.pxi')):
344 341 better_fn = frame.f_globals.get('__file__', None)
345 342 if isinstance(better_fn, str):
346 343 # Check the type just in case someone did something weird with
347 344 # __file__. It might also be None if the error occurred during
348 345 # import.
349 346 filename = better_fn
350 347 fixed_records.append((frame, filename, line_no, func_name, lines, index))
351 348 return fixed_records
352 349
353 350
354 351 @with_patch_inspect
355 352 def _fixed_getinnerframes(etb, context=1, tb_offset=0):
356 353 LNUM_POS, LINES_POS, INDEX_POS = 2, 4, 5
357 354
358 355 records = fix_frame_records_filenames(inspect.getinnerframes(etb, context))
359 356 # If the error is at the console, don't build any context, since it would
360 357 # otherwise produce 5 blank lines printed out (there is no file at the
361 358 # console)
362 359 rec_check = records[tb_offset:]
363 360 try:
364 361 rname = rec_check[0][1]
365 362 if rname == '<ipython console>' or rname.endswith('<string>'):
366 363 return rec_check
367 364 except IndexError:
368 365 pass
369 366
370 367 aux = traceback.extract_tb(etb)
371 368 assert len(records) == len(aux)
372 369 for i, (file, lnum, _, _) in zip(range(len(records)), aux):
373 370 maybeStart = lnum - 1 - context // 2
374 371 start = max(maybeStart, 0)
375 372 end = start + context
376 373 lines = ulinecache.getlines(file)[start:end]
377 374 buf = list(records[i])
378 375 buf[LNUM_POS] = lnum
379 376 buf[INDEX_POS] = lnum - 1 - start
380 377 buf[LINES_POS] = lines
381 378 records[i] = tuple(buf)
382 379 return records[tb_offset:]
383 380
384 381 # Helper function -- largely belongs to VerboseTB, but we need the same
385 382 # functionality to produce a pseudo verbose TB for SyntaxErrors, so that they
386 383 # can be recognized properly by ipython.el's py-traceback-line-re
387 384 # (SyntaxErrors have to be treated specially because they have no traceback)
388 385
389 386
390 387 def _format_traceback_lines(lnum, index, lines, Colors, lvals=None, _line_format=(lambda x,_:x,None)):
391 388 numbers_width = INDENT_SIZE - 1
392 389 res = []
393 390 i = lnum - index
394 391
395 392 for line in lines:
396 393 line = py3compat.cast_unicode(line)
397 394
398 395 new_line, err = _line_format(line, 'str')
399 396 if not err: line = new_line
400 397
401 398 if i == lnum:
402 399 # This is the line with the error
403 400 pad = numbers_width - len(str(i))
404 401 num = '%s%s' % (debugger.make_arrow(pad), str(lnum))
405 402 line = '%s%s%s %s%s' % (Colors.linenoEm, num,
406 403 Colors.line, line, Colors.Normal)
407 404 else:
408 405 num = '%*s' % (numbers_width, i)
409 406 line = '%s%s%s %s' % (Colors.lineno, num,
410 407 Colors.Normal, line)
411 408
412 409 res.append(line)
413 410 if lvals and i == lnum:
414 411 res.append(lvals + '\n')
415 412 i = i + 1
416 413 return res
417 414
418 415 def is_recursion_error(etype, value, records):
419 416 try:
420 417 # RecursionError is new in Python 3.5
421 418 recursion_error_type = RecursionError
422 419 except NameError:
423 420 recursion_error_type = RuntimeError
424 421
425 422 # The default recursion limit is 1000, but some of that will be taken up
426 423 # by stack frames in IPython itself. >500 frames probably indicates
427 424 # a recursion error.
428 425 return (etype is recursion_error_type) \
429 426 and "recursion" in str(value).lower() \
430 427 and len(records) > 500
431 428
432 429 def find_recursion(etype, value, records):
433 430 """Identify the repeating stack frames from a RecursionError traceback
434 431
435 432 'records' is a list as returned by VerboseTB.get_records()
436 433
437 434 Returns (last_unique, repeat_length)
438 435 """
439 436 # This involves a bit of guesswork - we want to show enough of the traceback
440 437 # to indicate where the recursion is occurring. We guess that the innermost
441 438 # quarter of the traceback (250 frames by default) is repeats, and find the
442 439 # first frame (from in to out) that looks different.
443 440 if not is_recursion_error(etype, value, records):
444 441 return len(records), 0
445 442
446 443 # Select filename, lineno, func_name to track frames with
447 444 records = [r[1:4] for r in records]
448 445 inner_frames = records[-(len(records)//4):]
449 446 frames_repeated = set(inner_frames)
450 447
451 448 last_seen_at = {}
452 449 longest_repeat = 0
453 450 i = len(records)
454 451 for frame in reversed(records):
455 452 i -= 1
456 453 if frame not in frames_repeated:
457 454 last_unique = i
458 455 break
459 456
460 457 if frame in last_seen_at:
461 458 distance = last_seen_at[frame] - i
462 459 longest_repeat = max(longest_repeat, distance)
463 460
464 461 last_seen_at[frame] = i
465 462 else:
466 463 last_unique = 0 # The whole traceback was recursion
467 464
468 465 return last_unique, longest_repeat
469 466
470 467 #---------------------------------------------------------------------------
471 468 # Module classes
472 469 class TBTools(colorable.Colorable):
473 470 """Basic tools used by all traceback printer classes."""
474 471
475 472 # Number of frames to skip when reporting tracebacks
476 473 tb_offset = 0
477 474
478 475 def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None):
479 476 # Whether to call the interactive pdb debugger after printing
480 477 # tracebacks or not
481 478 super(TBTools, self).__init__(parent=parent, config=config)
482 479 self.call_pdb = call_pdb
483 480
484 481 # Output stream to write to. Note that we store the original value in
485 482 # a private attribute and then make the public ostream a property, so
486 483 # that we can delay accessing sys.stdout until runtime. The way
487 484 # things are written now, the sys.stdout object is dynamically managed
488 485 # so a reference to it should NEVER be stored statically. This
489 486 # property approach confines this detail to a single location, and all
490 487 # subclasses can simply access self.ostream for writing.
491 488 self._ostream = ostream
492 489
493 490 # Create color table
494 491 self.color_scheme_table = exception_colors()
495 492
496 493 self.set_colors(color_scheme)
497 494 self.old_scheme = color_scheme # save initial value for toggles
498 495
499 496 if call_pdb:
500 497 self.pdb = debugger.Pdb()
501 498 else:
502 499 self.pdb = None
503 500
504 501 def _get_ostream(self):
505 502 """Output stream that exceptions are written to.
506 503
507 504 Valid values are:
508 505
509 506 - None: the default, which means that IPython will dynamically resolve
510 507 to sys.stdout. This ensures compatibility with most tools, including
511 508 Windows (where plain stdout doesn't recognize ANSI escapes).
512 509
513 510 - Any object with 'write' and 'flush' attributes.
514 511 """
515 512 return sys.stdout if self._ostream is None else self._ostream
516 513
517 514 def _set_ostream(self, val):
518 515 assert val is None or (hasattr(val, 'write') and hasattr(val, 'flush'))
519 516 self._ostream = val
520 517
521 518 ostream = property(_get_ostream, _set_ostream)
522 519
523 520 def set_colors(self, *args, **kw):
524 521 """Shorthand access to the color table scheme selector method."""
525 522
526 523 # Set own color table
527 524 self.color_scheme_table.set_active_scheme(*args, **kw)
528 525 # for convenience, set Colors to the active scheme
529 526 self.Colors = self.color_scheme_table.active_colors
530 527 # Also set colors of debugger
531 528 if hasattr(self, 'pdb') and self.pdb is not None:
532 529 self.pdb.set_colors(*args, **kw)
533 530
534 531 def color_toggle(self):
535 532 """Toggle between the currently active color scheme and NoColor."""
536 533
537 534 if self.color_scheme_table.active_scheme_name == 'NoColor':
538 535 self.color_scheme_table.set_active_scheme(self.old_scheme)
539 536 self.Colors = self.color_scheme_table.active_colors
540 537 else:
541 538 self.old_scheme = self.color_scheme_table.active_scheme_name
542 539 self.color_scheme_table.set_active_scheme('NoColor')
543 540 self.Colors = self.color_scheme_table.active_colors
544 541
545 542 def stb2text(self, stb):
546 543 """Convert a structured traceback (a list) to a string."""
547 544 return '\n'.join(stb)
548 545
549 546 def text(self, etype, value, tb, tb_offset=None, context=5):
550 547 """Return formatted traceback.
551 548
552 549 Subclasses may override this if they add extra arguments.
553 550 """
554 551 tb_list = self.structured_traceback(etype, value, tb,
555 552 tb_offset, context)
556 553 return self.stb2text(tb_list)
557 554
558 555 def structured_traceback(self, etype, evalue, tb, tb_offset=None,
559 556 context=5, mode=None):
560 557 """Return a list of traceback frames.
561 558
562 559 Must be implemented by each class.
563 560 """
564 561 raise NotImplementedError()
565 562
566 563
567 564 #---------------------------------------------------------------------------
568 565 class ListTB(TBTools):
569 566 """Print traceback information from a traceback list, with optional color.
570 567
571 568 Calling requires 3 arguments: (etype, evalue, elist)
572 569 as would be obtained by::
573 570
574 571 etype, evalue, tb = sys.exc_info()
575 572 if tb:
576 573 elist = traceback.extract_tb(tb)
577 574 else:
578 575 elist = None
579 576
580 577 It can thus be used by programs which need to process the traceback before
581 578 printing (such as console replacements based on the code module from the
582 579 standard library).
583 580
584 581 Because they are meant to be called without a full traceback (only a
585 582 list), instances of this class can't call the interactive pdb debugger."""
586 583
587 584 def __init__(self, color_scheme='NoColor', call_pdb=False, ostream=None, parent=None, config=None):
588 585 TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
589 586 ostream=ostream, parent=parent,config=config)
590 587
591 588 def __call__(self, etype, value, elist):
592 589 self.ostream.flush()
593 590 self.ostream.write(self.text(etype, value, elist))
594 591 self.ostream.write('\n')
595 592
596 593 def structured_traceback(self, etype, value, elist, tb_offset=None,
597 594 context=5):
598 595 """Return a color formatted string with the traceback info.
599 596
600 597 Parameters
601 598 ----------
602 599 etype : exception type
603 600 Type of the exception raised.
604 601
605 602 value : object
606 603 Data stored in the exception
607 604
608 605 elist : list
609 606 List of frames, see class docstring for details.
610 607
611 608 tb_offset : int, optional
612 609 Number of frames in the traceback to skip. If not given, the
613 610 instance value is used (set in constructor).
614 611
615 612 context : int, optional
616 613 Number of lines of context information to print.
617 614
618 615 Returns
619 616 -------
620 617 String with formatted exception.
621 618 """
622 619 tb_offset = self.tb_offset if tb_offset is None else tb_offset
623 620 Colors = self.Colors
624 621 out_list = []
625 622 if elist:
626 623
627 624 if tb_offset and len(elist) > tb_offset:
628 625 elist = elist[tb_offset:]
629 626
630 627 out_list.append('Traceback %s(most recent call last)%s:' %
631 628 (Colors.normalEm, Colors.Normal) + '\n')
632 629 out_list.extend(self._format_list(elist))
633 630 # The exception info should be a single entry in the list.
634 631 lines = ''.join(self._format_exception_only(etype, value))
635 632 out_list.append(lines)
636 633
637 634 # Note: this code originally read:
638 635
639 636 ## for line in lines[:-1]:
640 637 ## out_list.append(" "+line)
641 638 ## out_list.append(lines[-1])
642 639
643 640 # This means it was indenting everything but the last line by a little
644 641 # bit. I've disabled this for now, but if we see ugliness somewhere we
645 642 # can restore it.
646 643
647 644 return out_list
648 645
649 646 def _format_list(self, extracted_list):
650 647 """Format a list of traceback entry tuples for printing.
651 648
652 649 Given a list of tuples as returned by extract_tb() or
653 650 extract_stack(), return a list of strings ready for printing.
654 651 Each string in the resulting list corresponds to the item with the
655 652 same index in the argument list. Each string ends in a newline;
656 653 the strings may contain internal newlines as well, for those items
657 654 whose source text line is not None.
658 655
659 656 Lifted almost verbatim from traceback.py
660 657 """
661 658
662 659 Colors = self.Colors
663 660 list = []
664 661 for filename, lineno, name, line in extracted_list[:-1]:
665 662 item = ' File %s"%s"%s, line %s%d%s, in %s%s%s\n' % \
666 663 (Colors.filename, py3compat.cast_unicode_py2(filename, "utf-8"), Colors.Normal,
667 664 Colors.lineno, lineno, Colors.Normal,
668 665 Colors.name, py3compat.cast_unicode_py2(name, "utf-8"), Colors.Normal)
669 666 if line:
670 667 item += ' %s\n' % line.strip()
671 668 list.append(item)
672 669 # Emphasize the last entry
673 670 filename, lineno, name, line = extracted_list[-1]
674 671 item = '%s File %s"%s"%s, line %s%d%s, in %s%s%s%s\n' % \
675 672 (Colors.normalEm,
676 673 Colors.filenameEm, py3compat.cast_unicode_py2(filename, "utf-8"), Colors.normalEm,
677 674 Colors.linenoEm, lineno, Colors.normalEm,
678 675 Colors.nameEm, py3compat.cast_unicode_py2(name, "utf-8"), Colors.normalEm,
679 676 Colors.Normal)
680 677 if line:
681 678 item += '%s %s%s\n' % (Colors.line, line.strip(),
682 679 Colors.Normal)
683 680 list.append(item)
684 681 return list
685 682
686 683 def _format_exception_only(self, etype, value):
687 684 """Format the exception part of a traceback.
688 685
689 686 The arguments are the exception type and value such as given by
690 687 sys.exc_info()[:2]. The return value is a list of strings, each ending
691 688 in a newline. Normally, the list contains a single string; however,
692 689 for SyntaxError exceptions, it contains several lines that (when
693 690 printed) display detailed information about where the syntax error
694 691 occurred. The message indicating which exception occurred is the
695 692 always last string in the list.
696 693
697 694 Also lifted nearly verbatim from traceback.py
698 695 """
699 696 have_filedata = False
700 697 Colors = self.Colors
701 698 list = []
702 699 stype = py3compat.cast_unicode(Colors.excName + etype.__name__ + Colors.Normal)
703 700 if value is None:
704 701 # Not sure if this can still happen in Python 2.6 and above
705 702 list.append(stype + '\n')
706 703 else:
707 704 if issubclass(etype, SyntaxError):
708 705 have_filedata = True
709 706 if not value.filename: value.filename = "<string>"
710 707 if value.lineno:
711 708 lineno = value.lineno
712 709 textline = ulinecache.getline(value.filename, value.lineno)
713 710 else:
714 711 lineno = 'unknown'
715 712 textline = ''
716 713 list.append('%s File %s"%s"%s, line %s%s%s\n' % \
717 714 (Colors.normalEm,
718 715 Colors.filenameEm, py3compat.cast_unicode(value.filename), Colors.normalEm,
719 716 Colors.linenoEm, lineno, Colors.Normal ))
720 717 if textline == '':
721 718 textline = py3compat.cast_unicode(value.text, "utf-8")
722 719
723 720 if textline is not None:
724 721 i = 0
725 722 while i < len(textline) and textline[i].isspace():
726 723 i += 1
727 724 list.append('%s %s%s\n' % (Colors.line,
728 725 textline.strip(),
729 726 Colors.Normal))
730 727 if value.offset is not None:
731 728 s = ' '
732 729 for c in textline[i:value.offset - 1]:
733 730 if c.isspace():
734 731 s += c
735 732 else:
736 733 s += ' '
737 734 list.append('%s%s^%s\n' % (Colors.caret, s,
738 735 Colors.Normal))
739 736
740 737 try:
741 738 s = value.msg
742 739 except Exception:
743 740 s = self._some_str(value)
744 741 if s:
745 742 list.append('%s%s:%s %s\n' % (stype, Colors.excName,
746 743 Colors.Normal, s))
747 744 else:
748 745 list.append('%s\n' % stype)
749 746
750 747 # sync with user hooks
751 748 if have_filedata:
752 749 ipinst = get_ipython()
753 750 if ipinst is not None:
754 751 ipinst.hooks.synchronize_with_editor(value.filename, value.lineno, 0)
755 752
756 753 return list
757 754
758 755 def get_exception_only(self, etype, value):
759 756 """Only print the exception type and message, without a traceback.
760 757
761 758 Parameters
762 759 ----------
763 760 etype : exception type
764 761 value : exception value
765 762 """
766 763 return ListTB.structured_traceback(self, etype, value, [])
767 764
768 765 def show_exception_only(self, etype, evalue):
769 766 """Only print the exception type and message, without a traceback.
770 767
771 768 Parameters
772 769 ----------
773 770 etype : exception type
774 771 value : exception value
775 772 """
776 773 # This method needs to use __call__ from *this* class, not the one from
777 774 # a subclass whose signature or behavior may be different
778 775 ostream = self.ostream
779 776 ostream.flush()
780 777 ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
781 778 ostream.flush()
782 779
783 780 def _some_str(self, value):
784 781 # Lifted from traceback.py
785 782 try:
786 783 return py3compat.cast_unicode(str(value))
787 784 except:
788 785 return u'<unprintable %s object>' % type(value).__name__
789 786
790 787
791 788 #----------------------------------------------------------------------------
792 789 class VerboseTB(TBTools):
793 790 """A port of Ka-Ping Yee's cgitb.py module that outputs color text instead
794 791 of HTML. Requires inspect and pydoc. Crazy, man.
795 792
796 793 Modified version which optionally strips the topmost entries from the
797 794 traceback, to be used with alternate interpreters (because their own code
798 795 would appear in the traceback)."""
799 796
800 797 def __init__(self, color_scheme='Linux', call_pdb=False, ostream=None,
801 798 tb_offset=0, long_header=False, include_vars=True,
802 799 check_cache=None, debugger_cls = None,
803 800 parent=None, config=None):
804 801 """Specify traceback offset, headers and color scheme.
805 802
806 803 Define how many frames to drop from the tracebacks. Calling it with
807 804 tb_offset=1 allows use of this handler in interpreters which will have
808 805 their own code at the top of the traceback (VerboseTB will first
809 806 remove that frame before printing the traceback info)."""
810 807 TBTools.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
811 808 ostream=ostream, parent=parent, config=config)
812 809 self.tb_offset = tb_offset
813 810 self.long_header = long_header
814 811 self.include_vars = include_vars
815 812 # By default we use linecache.checkcache, but the user can provide a
816 813 # different check_cache implementation. This is used by the IPython
817 814 # kernel to provide tracebacks for interactive code that is cached,
818 815 # by a compiler instance that flushes the linecache but preserves its
819 816 # own code cache.
820 817 if check_cache is None:
821 818 check_cache = linecache.checkcache
822 819 self.check_cache = check_cache
823 820
824 821 self.debugger_cls = debugger_cls or debugger.Pdb
825 822
826 823 def format_records(self, records, last_unique, recursion_repeat):
827 824 """Format the stack frames of the traceback"""
828 825 frames = []
829 826 for r in records[:last_unique+recursion_repeat+1]:
830 827 #print '*** record:',file,lnum,func,lines,index # dbg
831 828 frames.append(self.format_record(*r))
832 829
833 830 if recursion_repeat:
834 831 frames.append('... last %d frames repeated, from the frame below ...\n' % recursion_repeat)
835 832 frames.append(self.format_record(*records[last_unique+recursion_repeat+1]))
836 833
837 834 return frames
838 835
839 836 def format_record(self, frame, file, lnum, func, lines, index):
840 837 """Format a single stack frame"""
841 838 Colors = self.Colors # just a shorthand + quicker name lookup
842 839 ColorsNormal = Colors.Normal # used a lot
843 840 col_scheme = self.color_scheme_table.active_scheme_name
844 841 indent = ' ' * INDENT_SIZE
845 842 em_normal = '%s\n%s%s' % (Colors.valEm, indent, ColorsNormal)
846 843 undefined = '%sundefined%s' % (Colors.em, ColorsNormal)
847 844 tpl_link = '%s%%s%s' % (Colors.filenameEm, ColorsNormal)
848 845 tpl_call = 'in %s%%s%s%%s%s' % (Colors.vName, Colors.valEm,
849 846 ColorsNormal)
850 847 tpl_call_fail = 'in %s%%s%s(***failed resolving arguments***)%s' % \
851 848 (Colors.vName, Colors.valEm, ColorsNormal)
852 849 tpl_local_var = '%s%%s%s' % (Colors.vName, ColorsNormal)
853 850 tpl_global_var = '%sglobal%s %s%%s%s' % (Colors.em, ColorsNormal,
854 851 Colors.vName, ColorsNormal)
855 852 tpl_name_val = '%%s %s= %%s%s' % (Colors.valEm, ColorsNormal)
856 853
857 854 tpl_line = '%s%%s%s %%s' % (Colors.lineno, ColorsNormal)
858 855 tpl_line_em = '%s%%s%s %%s%s' % (Colors.linenoEm, Colors.line,
859 856 ColorsNormal)
860 857
861 858 abspath = os.path.abspath
862 859
863 860
864 861 if not file:
865 862 file = '?'
866 863 elif file.startswith(str("<")) and file.endswith(str(">")):
867 864 # Not a real filename, no problem...
868 865 pass
869 866 elif not os.path.isabs(file):
870 867 # Try to make the filename absolute by trying all
871 868 # sys.path entries (which is also what linecache does)
872 869 for dirname in sys.path:
873 870 try:
874 871 fullname = os.path.join(dirname, file)
875 872 if os.path.isfile(fullname):
876 873 file = os.path.abspath(fullname)
877 874 break
878 875 except Exception:
879 876 # Just in case that sys.path contains very
880 877 # strange entries...
881 878 pass
882 879
883 880 file = py3compat.cast_unicode(file, util_path.fs_encoding)
884 881 link = tpl_link % file
885 882 args, varargs, varkw, locals = fixed_getargvalues(frame)
886 883
887 884 if func == '?':
888 885 call = ''
889 886 else:
890 887 # Decide whether to include variable details or not
891 888 var_repr = self.include_vars and eqrepr or nullrepr
892 889 try:
893 890 call = tpl_call % (func, inspect.formatargvalues(args,
894 891 varargs, varkw,
895 892 locals, formatvalue=var_repr))
896 893 except KeyError:
897 894 # This happens in situations like errors inside generator
898 895 # expressions, where local variables are listed in the
899 896 # line, but can't be extracted from the frame. I'm not
900 897 # 100% sure this isn't actually a bug in inspect itself,
901 898 # but since there's no info for us to compute with, the
902 899 # best we can do is report the failure and move on. Here
903 900 # we must *not* call any traceback construction again,
904 901 # because that would mess up use of %debug later on. So we
905 902 # simply report the failure and move on. The only
906 903 # limitation will be that this frame won't have locals
907 904 # listed in the call signature. Quite subtle problem...
908 905 # I can't think of a good way to validate this in a unit
909 906 # test, but running a script consisting of:
910 907 # dict( (k,v.strip()) for (k,v) in range(10) )
911 908 # will illustrate the error, if this exception catch is
912 909 # disabled.
913 910 call = tpl_call_fail % func
914 911
915 912 # Don't attempt to tokenize binary files.
916 913 if file.endswith(('.so', '.pyd', '.dll')):
917 914 return '%s %s\n' % (link, call)
918 915
919 916 elif file.endswith(('.pyc', '.pyo')):
920 917 # Look up the corresponding source file.
921 918 try:
922 919 file = openpy.source_from_cache(file)
923 920 except ValueError:
924 921 # Failed to get the source file for some reason
925 922 # E.g. https://github.com/ipython/ipython/issues/9486
926 923 return '%s %s\n' % (link, call)
927 924
928 925 def linereader(file=file, lnum=[lnum], getline=ulinecache.getline):
929 926 line = getline(file, lnum[0])
930 927 lnum[0] += 1
931 928 return line
932 929
933 930 # Build the list of names on this line of code where the exception
934 931 # occurred.
935 932 try:
936 933 names = []
937 934 name_cont = False
938 935
939 936 for token_type, token, start, end, line in generate_tokens(linereader):
940 937 # build composite names
941 938 if token_type == tokenize.NAME and token not in keyword.kwlist:
942 939 if name_cont:
943 940 # Continuation of a dotted name
944 941 try:
945 942 names[-1].append(token)
946 943 except IndexError:
947 944 names.append([token])
948 945 name_cont = False
949 946 else:
950 947 # Regular new names. We append everything, the caller
951 948 # will be responsible for pruning the list later. It's
952 949 # very tricky to try to prune as we go, b/c composite
953 950 # names can fool us. The pruning at the end is easy
954 951 # to do (or the caller can print a list with repeated
955 952 # names if so desired.
956 953 names.append([token])
957 954 elif token == '.':
958 955 name_cont = True
959 956 elif token_type == tokenize.NEWLINE:
960 957 break
961 958
962 959 except (IndexError, UnicodeDecodeError, SyntaxError):
963 960 # signals exit of tokenizer
964 961 # SyntaxError can occur if the file is not actually Python
965 962 # - see gh-6300
966 963 pass
967 964 except tokenize.TokenError as msg:
968 965 _m = ("An unexpected error occurred while tokenizing input\n"
969 966 "The following traceback may be corrupted or invalid\n"
970 967 "The error message is: %s\n" % msg)
971 968 error(_m)
972 969
973 970 # Join composite names (e.g. "dict.fromkeys")
974 971 names = ['.'.join(n) for n in names]
975 972 # prune names list of duplicates, but keep the right order
976 973 unique_names = uniq_stable(names)
977 974
978 975 # Start loop over vars
979 976 lvals = []
980 977 if self.include_vars:
981 978 for name_full in unique_names:
982 979 name_base = name_full.split('.', 1)[0]
983 980 if name_base in frame.f_code.co_varnames:
984 981 if name_base in locals:
985 982 try:
986 983 value = repr(eval(name_full, locals))
987 984 except:
988 985 value = undefined
989 986 else:
990 987 value = undefined
991 988 name = tpl_local_var % name_full
992 989 else:
993 990 if name_base in frame.f_globals:
994 991 try:
995 992 value = repr(eval(name_full, frame.f_globals))
996 993 except:
997 994 value = undefined
998 995 else:
999 996 value = undefined
1000 997 name = tpl_global_var % name_full
1001 998 lvals.append(tpl_name_val % (name, value))
1002 999 if lvals:
1003 1000 lvals = '%s%s' % (indent, em_normal.join(lvals))
1004 1001 else:
1005 1002 lvals = ''
1006 1003
1007 1004 level = '%s %s\n' % (link, call)
1008 1005
1009 1006 if index is None:
1010 1007 return level
1011 1008 else:
1012 1009 _line_format = PyColorize.Parser(style=col_scheme, parent=self).format2
1013 1010 return '%s%s' % (level, ''.join(
1014 1011 _format_traceback_lines(lnum, index, lines, Colors, lvals,
1015 1012 _line_format)))
1016 1013
1017 1014 def prepare_chained_exception_message(self, cause):
1018 1015 direct_cause = "\nThe above exception was the direct cause of the following exception:\n"
1019 1016 exception_during_handling = "\nDuring handling of the above exception, another exception occurred:\n"
1020 1017
1021 1018 if cause:
1022 1019 message = [[direct_cause]]
1023 1020 else:
1024 1021 message = [[exception_during_handling]]
1025 1022 return message
1026 1023
1027 1024 def prepare_header(self, etype, long_version=False):
1028 1025 colors = self.Colors # just a shorthand + quicker name lookup
1029 1026 colorsnormal = colors.Normal # used a lot
1030 1027 exc = '%s%s%s' % (colors.excName, etype, colorsnormal)
1031 1028 width = min(75, get_terminal_size()[0])
1032 1029 if long_version:
1033 1030 # Header with the exception type, python version, and date
1034 1031 pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
1035 1032 date = time.ctime(time.time())
1036 1033
1037 1034 head = '%s%s%s\n%s%s%s\n%s' % (colors.topline, '-' * width, colorsnormal,
1038 1035 exc, ' ' * (width - len(str(etype)) - len(pyver)),
1039 1036 pyver, date.rjust(width) )
1040 1037 head += "\nA problem occurred executing Python code. Here is the sequence of function" \
1041 1038 "\ncalls leading up to the error, with the most recent (innermost) call last."
1042 1039 else:
1043 1040 # Simplified header
1044 1041 head = '%s%s' % (exc, 'Traceback (most recent call last)'. \
1045 1042 rjust(width - len(str(etype))) )
1046 1043
1047 1044 return head
1048 1045
1049 1046 def format_exception(self, etype, evalue):
1050 1047 colors = self.Colors # just a shorthand + quicker name lookup
1051 1048 colorsnormal = colors.Normal # used a lot
1052 1049 indent = ' ' * INDENT_SIZE
1053 1050 # Get (safely) a string form of the exception info
1054 1051 try:
1055 1052 etype_str, evalue_str = map(str, (etype, evalue))
1056 1053 except:
1057 1054 # User exception is improperly defined.
1058 1055 etype, evalue = str, sys.exc_info()[:2]
1059 1056 etype_str, evalue_str = map(str, (etype, evalue))
1060 1057 # ... and format it
1061 1058 exception = ['%s%s%s: %s' % (colors.excName, etype_str,
1062 1059 colorsnormal, py3compat.cast_unicode(evalue_str))]
1063 1060
1064 1061 if (not py3compat.PY3) and type(evalue) is types.InstanceType:
1065 1062 try:
1066 1063 names = [w for w in dir(evalue) if isinstance(w, py3compat.string_types)]
1067 1064 except:
1068 1065 # Every now and then, an object with funny internals blows up
1069 1066 # when dir() is called on it. We do the best we can to report
1070 1067 # the problem and continue
1071 1068 _m = '%sException reporting error (object with broken dir())%s:'
1072 1069 exception.append(_m % (colors.excName, colorsnormal))
1073 1070 etype_str, evalue_str = map(str, sys.exc_info()[:2])
1074 1071 exception.append('%s%s%s: %s' % (colors.excName, etype_str,
1075 1072 colorsnormal, py3compat.cast_unicode(evalue_str)))
1076 1073 names = []
1077 1074 for name in names:
1078 1075 value = text_repr(getattr(evalue, name))
1079 1076 exception.append('\n%s%s = %s' % (indent, name, value))
1080 1077
1081 1078 return exception
1082 1079
1083 1080 def format_exception_as_a_whole(self, etype, evalue, etb, number_of_lines_of_context, tb_offset):
1084 1081 """Formats the header, traceback and exception message for a single exception.
1085 1082
1086 1083 This may be called multiple times by Python 3 exception chaining
1087 1084 (PEP 3134).
1088 1085 """
1089 1086 # some locals
1090 1087 orig_etype = etype
1091 1088 try:
1092 1089 etype = etype.__name__
1093 1090 except AttributeError:
1094 1091 pass
1095 1092
1096 1093 tb_offset = self.tb_offset if tb_offset is None else tb_offset
1097 1094 head = self.prepare_header(etype, self.long_header)
1098 1095 records = self.get_records(etb, number_of_lines_of_context, tb_offset)
1099 1096
1100 1097 if records is None:
1101 1098 return ""
1102 1099
1103 1100 last_unique, recursion_repeat = find_recursion(orig_etype, evalue, records)
1104 1101
1105 1102 frames = self.format_records(records, last_unique, recursion_repeat)
1106 1103
1107 1104 formatted_exception = self.format_exception(etype, evalue)
1108 1105 if records:
1109 1106 filepath, lnum = records[-1][1:3]
1110 1107 filepath = os.path.abspath(filepath)
1111 1108 ipinst = get_ipython()
1112 1109 if ipinst is not None:
1113 1110 ipinst.hooks.synchronize_with_editor(filepath, lnum, 0)
1114 1111
1115 1112 return [[head] + frames + [''.join(formatted_exception[0])]]
1116 1113
1117 1114 def get_records(self, etb, number_of_lines_of_context, tb_offset):
1118 1115 try:
1119 1116 # Try the default getinnerframes and Alex's: Alex's fixes some
1120 1117 # problems, but it generates empty tracebacks for console errors
1121 1118 # (5 blanks lines) where none should be returned.
1122 1119 return _fixed_getinnerframes(etb, number_of_lines_of_context, tb_offset)
1123 1120 except UnicodeDecodeError:
1124 1121 # This can occur if a file's encoding magic comment is wrong.
1125 1122 # I can't see a way to recover without duplicating a bunch of code
1126 1123 # from the stdlib traceback module. --TK
1127 1124 error('\nUnicodeDecodeError while processing traceback.\n')
1128 1125 return None
1129 1126 except:
1130 1127 # FIXME: I've been getting many crash reports from python 2.3
1131 1128 # users, traceable to inspect.py. If I can find a small test-case
1132 1129 # to reproduce this, I should either write a better workaround or
1133 1130 # file a bug report against inspect (if that's the real problem).
1134 1131 # So far, I haven't been able to find an isolated example to
1135 1132 # reproduce the problem.
1136 1133 inspect_error()
1137 1134 traceback.print_exc(file=self.ostream)
1138 1135 info('\nUnfortunately, your original traceback can not be constructed.\n')
1139 1136 return None
1140 1137
1141 1138 def get_parts_of_chained_exception(self, evalue):
1142 1139 def get_chained_exception(exception_value):
1143 1140 cause = getattr(exception_value, '__cause__', None)
1144 1141 if cause:
1145 1142 return cause
1146 1143 if getattr(exception_value, '__suppress_context__', False):
1147 1144 return None
1148 1145 return getattr(exception_value, '__context__', None)
1149 1146
1150 1147 chained_evalue = get_chained_exception(evalue)
1151 1148
1152 1149 if chained_evalue:
1153 1150 return chained_evalue.__class__, chained_evalue, chained_evalue.__traceback__
1154 1151
1155 1152 def structured_traceback(self, etype, evalue, etb, tb_offset=None,
1156 1153 number_of_lines_of_context=5):
1157 1154 """Return a nice text document describing the traceback."""
1158 1155
1159 1156 formatted_exception = self.format_exception_as_a_whole(etype, evalue, etb, number_of_lines_of_context,
1160 1157 tb_offset)
1161 1158
1162 1159 colors = self.Colors # just a shorthand + quicker name lookup
1163 1160 colorsnormal = colors.Normal # used a lot
1164 1161 head = '%s%s%s' % (colors.topline, '-' * min(75, get_terminal_size()[0]), colorsnormal)
1165 1162 structured_traceback_parts = [head]
1166 1163 if py3compat.PY3:
1167 1164 chained_exceptions_tb_offset = 0
1168 1165 lines_of_context = 3
1169 1166 formatted_exceptions = formatted_exception
1170 1167 exception = self.get_parts_of_chained_exception(evalue)
1171 1168 if exception:
1172 1169 formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
1173 1170 etype, evalue, etb = exception
1174 1171 else:
1175 1172 evalue = None
1176 1173 chained_exc_ids = set()
1177 1174 while evalue:
1178 1175 formatted_exceptions += self.format_exception_as_a_whole(etype, evalue, etb, lines_of_context,
1179 1176 chained_exceptions_tb_offset)
1180 1177 exception = self.get_parts_of_chained_exception(evalue)
1181 1178
1182 1179 if exception and not id(exception[1]) in chained_exc_ids:
1183 1180 chained_exc_ids.add(id(exception[1])) # trace exception to avoid infinite 'cause' loop
1184 1181 formatted_exceptions += self.prepare_chained_exception_message(evalue.__cause__)
1185 1182 etype, evalue, etb = exception
1186 1183 else:
1187 1184 evalue = None
1188 1185
1189 1186 # we want to see exceptions in a reversed order:
1190 1187 # the first exception should be on top
1191 1188 for formatted_exception in reversed(formatted_exceptions):
1192 1189 structured_traceback_parts += formatted_exception
1193 1190 else:
1194 1191 structured_traceback_parts += formatted_exception[0]
1195 1192
1196 1193 return structured_traceback_parts
1197 1194
1198 1195 def debugger(self, force=False):
1199 1196 """Call up the pdb debugger if desired, always clean up the tb
1200 1197 reference.
1201 1198
1202 1199 Keywords:
1203 1200
1204 1201 - force(False): by default, this routine checks the instance call_pdb
1205 1202 flag and does not actually invoke the debugger if the flag is false.
1206 1203 The 'force' option forces the debugger to activate even if the flag
1207 1204 is false.
1208 1205
1209 1206 If the call_pdb flag is set, the pdb interactive debugger is
1210 1207 invoked. In all cases, the self.tb reference to the current traceback
1211 1208 is deleted to prevent lingering references which hamper memory
1212 1209 management.
1213 1210
1214 1211 Note that each call to pdb() does an 'import readline', so if your app
1215 1212 requires a special setup for the readline completers, you'll have to
1216 1213 fix that by hand after invoking the exception handler."""
1217 1214
1218 1215 if force or self.call_pdb:
1219 1216 if self.pdb is None:
1220 1217 self.pdb = self.debugger_cls()
1221 1218 # the system displayhook may have changed, restore the original
1222 1219 # for pdb
1223 1220 display_trap = DisplayTrap(hook=sys.__displayhook__)
1224 1221 with display_trap:
1225 1222 self.pdb.reset()
1226 1223 # Find the right frame so we don't pop up inside ipython itself
1227 1224 if hasattr(self, 'tb') and self.tb is not None:
1228 1225 etb = self.tb
1229 1226 else:
1230 1227 etb = self.tb = sys.last_traceback
1231 1228 while self.tb is not None and self.tb.tb_next is not None:
1232 1229 self.tb = self.tb.tb_next
1233 1230 if etb and etb.tb_next:
1234 1231 etb = etb.tb_next
1235 1232 self.pdb.botframe = etb.tb_frame
1236 1233 self.pdb.interaction(self.tb.tb_frame, self.tb)
1237 1234
1238 1235 if hasattr(self, 'tb'):
1239 1236 del self.tb
1240 1237
1241 1238 def handler(self, info=None):
1242 1239 (etype, evalue, etb) = info or sys.exc_info()
1243 1240 self.tb = etb
1244 1241 ostream = self.ostream
1245 1242 ostream.flush()
1246 1243 ostream.write(self.text(etype, evalue, etb))
1247 1244 ostream.write('\n')
1248 1245 ostream.flush()
1249 1246
1250 1247 # Changed so an instance can just be called as VerboseTB_inst() and print
1251 1248 # out the right info on its own.
1252 1249 def __call__(self, etype=None, evalue=None, etb=None):
1253 1250 """This hook can replace sys.excepthook (for Python 2.1 or higher)."""
1254 1251 if etb is None:
1255 1252 self.handler()
1256 1253 else:
1257 1254 self.handler((etype, evalue, etb))
1258 1255 try:
1259 1256 self.debugger()
1260 1257 except KeyboardInterrupt:
1261 1258 print("\nKeyboardInterrupt")
1262 1259
1263 1260
1264 1261 #----------------------------------------------------------------------------
1265 1262 class FormattedTB(VerboseTB, ListTB):
1266 1263 """Subclass ListTB but allow calling with a traceback.
1267 1264
1268 1265 It can thus be used as a sys.excepthook for Python > 2.1.
1269 1266
1270 1267 Also adds 'Context' and 'Verbose' modes, not available in ListTB.
1271 1268
1272 1269 Allows a tb_offset to be specified. This is useful for situations where
1273 1270 one needs to remove a number of topmost frames from the traceback (such as
1274 1271 occurs with python programs that themselves execute other python code,
1275 1272 like Python shells). """
1276 1273
1277 1274 def __init__(self, mode='Plain', color_scheme='Linux', call_pdb=False,
1278 1275 ostream=None,
1279 1276 tb_offset=0, long_header=False, include_vars=False,
1280 1277 check_cache=None, debugger_cls=None,
1281 1278 parent=None, config=None):
1282 1279
1283 1280 # NEVER change the order of this list. Put new modes at the end:
1284 1281 self.valid_modes = ['Plain', 'Context', 'Verbose']
1285 1282 self.verbose_modes = self.valid_modes[1:3]
1286 1283
1287 1284 VerboseTB.__init__(self, color_scheme=color_scheme, call_pdb=call_pdb,
1288 1285 ostream=ostream, tb_offset=tb_offset,
1289 1286 long_header=long_header, include_vars=include_vars,
1290 1287 check_cache=check_cache, debugger_cls=debugger_cls,
1291 1288 parent=parent, config=config)
1292 1289
1293 1290 # Different types of tracebacks are joined with different separators to
1294 1291 # form a single string. They are taken from this dict
1295 1292 self._join_chars = dict(Plain='', Context='\n', Verbose='\n')
1296 1293 # set_mode also sets the tb_join_char attribute
1297 1294 self.set_mode(mode)
1298 1295
1299 1296 def _extract_tb(self, tb):
1300 1297 if tb:
1301 1298 return traceback.extract_tb(tb)
1302 1299 else:
1303 1300 return None
1304 1301
1305 1302 def structured_traceback(self, etype, value, tb, tb_offset=None, number_of_lines_of_context=5):
1306 1303 tb_offset = self.tb_offset if tb_offset is None else tb_offset
1307 1304 mode = self.mode
1308 1305 if mode in self.verbose_modes:
1309 1306 # Verbose modes need a full traceback
1310 1307 return VerboseTB.structured_traceback(
1311 1308 self, etype, value, tb, tb_offset, number_of_lines_of_context
1312 1309 )
1313 1310 else:
1314 1311 # We must check the source cache because otherwise we can print
1315 1312 # out-of-date source code.
1316 1313 self.check_cache()
1317 1314 # Now we can extract and format the exception
1318 1315 elist = self._extract_tb(tb)
1319 1316 return ListTB.structured_traceback(
1320 1317 self, etype, value, elist, tb_offset, number_of_lines_of_context
1321 1318 )
1322 1319
1323 1320 def stb2text(self, stb):
1324 1321 """Convert a structured traceback (a list) to a string."""
1325 1322 return self.tb_join_char.join(stb)
1326 1323
1327 1324
1328 1325 def set_mode(self, mode=None):
1329 1326 """Switch to the desired mode.
1330 1327
1331 1328 If mode is not specified, cycles through the available modes."""
1332 1329
1333 1330 if not mode:
1334 1331 new_idx = (self.valid_modes.index(self.mode) + 1 ) % \
1335 1332 len(self.valid_modes)
1336 1333 self.mode = self.valid_modes[new_idx]
1337 1334 elif mode not in self.valid_modes:
1338 1335 raise ValueError('Unrecognized mode in FormattedTB: <' + mode + '>\n'
1339 1336 'Valid modes: ' + str(self.valid_modes))
1340 1337 else:
1341 1338 self.mode = mode
1342 1339 # include variable details only in 'Verbose' mode
1343 1340 self.include_vars = (self.mode == self.valid_modes[2])
1344 1341 # Set the join character for generating text tracebacks
1345 1342 self.tb_join_char = self._join_chars[self.mode]
1346 1343
1347 1344 # some convenient shortcuts
1348 1345 def plain(self):
1349 1346 self.set_mode(self.valid_modes[0])
1350 1347
1351 1348 def context(self):
1352 1349 self.set_mode(self.valid_modes[1])
1353 1350
1354 1351 def verbose(self):
1355 1352 self.set_mode(self.valid_modes[2])
1356 1353
1357 1354
1358 1355 #----------------------------------------------------------------------------
1359 1356 class AutoFormattedTB(FormattedTB):
1360 1357 """A traceback printer which can be called on the fly.
1361 1358
1362 1359 It will find out about exceptions by itself.
1363 1360
1364 1361 A brief example::
1365 1362
1366 1363 AutoTB = AutoFormattedTB(mode = 'Verbose',color_scheme='Linux')
1367 1364 try:
1368 1365 ...
1369 1366 except:
1370 1367 AutoTB() # or AutoTB(out=logfile) where logfile is an open file object
1371 1368 """
1372 1369
1373 1370 def __call__(self, etype=None, evalue=None, etb=None,
1374 1371 out=None, tb_offset=None):
1375 1372 """Print out a formatted exception traceback.
1376 1373
1377 1374 Optional arguments:
1378 1375 - out: an open file-like object to direct output to.
1379 1376
1380 1377 - tb_offset: the number of frames to skip over in the stack, on a
1381 1378 per-call basis (this overrides temporarily the instance's tb_offset
1382 1379 given at initialization time. """
1383 1380
1384 1381 if out is None:
1385 1382 out = self.ostream
1386 1383 out.flush()
1387 1384 out.write(self.text(etype, evalue, etb, tb_offset))
1388 1385 out.write('\n')
1389 1386 out.flush()
1390 1387 # FIXME: we should remove the auto pdb behavior from here and leave
1391 1388 # that to the clients.
1392 1389 try:
1393 1390 self.debugger()
1394 1391 except KeyboardInterrupt:
1395 1392 print("\nKeyboardInterrupt")
1396 1393
1397 1394 def structured_traceback(self, etype=None, value=None, tb=None,
1398 1395 tb_offset=None, number_of_lines_of_context=5):
1399 1396 if etype is None:
1400 1397 etype, value, tb = sys.exc_info()
1401 1398 self.tb = tb
1402 1399 return FormattedTB.structured_traceback(
1403 1400 self, etype, value, tb, tb_offset, number_of_lines_of_context)
1404 1401
1405 1402
1406 1403 #---------------------------------------------------------------------------
1407 1404
1408 1405 # A simple class to preserve Nathan's original functionality.
1409 1406 class ColorTB(FormattedTB):
1410 1407 """Shorthand to initialize a FormattedTB in Linux colors mode."""
1411 1408
1412 1409 def __init__(self, color_scheme='Linux', call_pdb=0, **kwargs):
1413 1410 FormattedTB.__init__(self, color_scheme=color_scheme,
1414 1411 call_pdb=call_pdb, **kwargs)
1415 1412
1416 1413
1417 1414 class SyntaxTB(ListTB):
1418 1415 """Extension which holds some state: the last exception value"""
1419 1416
1420 1417 def __init__(self, color_scheme='NoColor', parent=None, config=None):
1421 1418 ListTB.__init__(self, color_scheme, parent=parent, config=config)
1422 1419 self.last_syntax_error = None
1423 1420
1424 1421 def __call__(self, etype, value, elist):
1425 1422 self.last_syntax_error = value
1426 1423
1427 1424 ListTB.__call__(self, etype, value, elist)
1428 1425
1429 1426 def structured_traceback(self, etype, value, elist, tb_offset=None,
1430 1427 context=5):
1431 1428 # If the source file has been edited, the line in the syntax error can
1432 1429 # be wrong (retrieved from an outdated cache). This replaces it with
1433 1430 # the current value.
1434 1431 if isinstance(value, SyntaxError) \
1435 1432 and isinstance(value.filename, py3compat.string_types) \
1436 1433 and isinstance(value.lineno, int):
1437 1434 linecache.checkcache(value.filename)
1438 1435 newtext = ulinecache.getline(value.filename, value.lineno)
1439 1436 if newtext:
1440 1437 value.text = newtext
1441 1438 self.last_syntax_error = value
1442 1439 return super(SyntaxTB, self).structured_traceback(etype, value, elist,
1443 1440 tb_offset=tb_offset, context=context)
1444 1441
1445 1442 def clear_err_state(self):
1446 1443 """Return the current error state and clear it"""
1447 1444 e = self.last_syntax_error
1448 1445 self.last_syntax_error = None
1449 1446 return e
1450 1447
1451 1448 def stb2text(self, stb):
1452 1449 """Convert a structured traceback (a list) to a string."""
1453 1450 return ''.join(stb)
1454 1451
1455 1452
1456 1453 # some internal-use functions
1457 1454 def text_repr(value):
1458 1455 """Hopefully pretty robust repr equivalent."""
1459 1456 # this is pretty horrible but should always return *something*
1460 1457 try:
1461 1458 return pydoc.text.repr(value)
1462 1459 except KeyboardInterrupt:
1463 1460 raise
1464 1461 except:
1465 1462 try:
1466 1463 return repr(value)
1467 1464 except KeyboardInterrupt:
1468 1465 raise
1469 1466 except:
1470 1467 try:
1471 1468 # all still in an except block so we catch
1472 1469 # getattr raising
1473 1470 name = getattr(value, '__name__', None)
1474 1471 if name:
1475 1472 # ick, recursion
1476 1473 return text_repr(name)
1477 1474 klass = getattr(value, '__class__', None)
1478 1475 if klass:
1479 1476 return '%s instance' % text_repr(klass)
1480 1477 except KeyboardInterrupt:
1481 1478 raise
1482 1479 except:
1483 1480 return 'UNRECOVERABLE REPR FAILURE'
1484 1481
1485 1482
1486 1483 def eqrepr(value, repr=text_repr):
1487 1484 return '=%s' % repr(value)
1488 1485
1489 1486
1490 1487 def nullrepr(value, repr=text_repr):
1491 1488 return ''
@@ -1,537 +1,536 b''
1 1 """IPython extension to reload modules before executing user code.
2 2
3 3 ``autoreload`` reloads modules automatically before entering the execution of
4 4 code typed at the IPython prompt.
5 5
6 6 This makes for example the following workflow possible:
7 7
8 8 .. sourcecode:: ipython
9 9
10 10 In [1]: %load_ext autoreload
11 11
12 12 In [2]: %autoreload 2
13 13
14 14 In [3]: from foo import some_function
15 15
16 16 In [4]: some_function()
17 17 Out[4]: 42
18 18
19 19 In [5]: # open foo.py in an editor and change some_function to return 43
20 20
21 21 In [6]: some_function()
22 22 Out[6]: 43
23 23
24 24 The module was reloaded without reloading it explicitly, and the object
25 25 imported with ``from foo import ...`` was also updated.
26 26
27 27 Usage
28 28 =====
29 29
30 30 The following magic commands are provided:
31 31
32 32 ``%autoreload``
33 33
34 34 Reload all modules (except those excluded by ``%aimport``)
35 35 automatically now.
36 36
37 37 ``%autoreload 0``
38 38
39 39 Disable automatic reloading.
40 40
41 41 ``%autoreload 1``
42 42
43 43 Reload all modules imported with ``%aimport`` every time before
44 44 executing the Python code typed.
45 45
46 46 ``%autoreload 2``
47 47
48 48 Reload all modules (except those excluded by ``%aimport``) every
49 49 time before executing the Python code typed.
50 50
51 51 ``%aimport``
52 52
53 53 List modules which are to be automatically imported or not to be imported.
54 54
55 55 ``%aimport foo``
56 56
57 57 Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
58 58
59 59 ``%aimport -foo``
60 60
61 61 Mark module 'foo' to not be autoreloaded.
62 62
63 63 Caveats
64 64 =======
65 65
66 66 Reloading Python modules in a reliable way is in general difficult,
67 67 and unexpected things may occur. ``%autoreload`` tries to work around
68 68 common pitfalls by replacing function code objects and parts of
69 69 classes previously in the module with new versions. This makes the
70 70 following things to work:
71 71
72 72 - Functions and classes imported via 'from xxx import foo' are upgraded
73 73 to new versions when 'xxx' is reloaded.
74 74
75 75 - Methods and properties of classes are upgraded on reload, so that
76 76 calling 'c.foo()' on an object 'c' created before the reload causes
77 77 the new code for 'foo' to be executed.
78 78
79 79 Some of the known remaining caveats are:
80 80
81 81 - Replacing code objects does not always succeed: changing a @property
82 82 in a class to an ordinary method or a method to a member variable
83 83 can cause problems (but in old objects only).
84 84
85 85 - Functions that are removed (eg. via monkey-patching) from a module
86 86 before it is reloaded are not upgraded.
87 87
88 88 - C extension modules cannot be reloaded, and so cannot be autoreloaded.
89 89 """
90 from __future__ import print_function
91 90
92 91 skip_doctest = True
93 92
94 93 #-----------------------------------------------------------------------------
95 94 # Copyright (C) 2000 Thomas Heller
96 95 # Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
97 96 # Copyright (C) 2012 The IPython Development Team
98 97 #
99 98 # Distributed under the terms of the BSD License. The full license is in
100 99 # the file COPYING, distributed as part of this software.
101 100 #-----------------------------------------------------------------------------
102 101 #
103 102 # This IPython module is written by Pauli Virtanen, based on the autoreload
104 103 # code by Thomas Heller.
105 104
106 105 #-----------------------------------------------------------------------------
107 106 # Imports
108 107 #-----------------------------------------------------------------------------
109 108
110 109 import os
111 110 import sys
112 111 import traceback
113 112 import types
114 113 import weakref
115 114 from importlib import import_module
116 115
117 116 try:
118 117 # Reload is not defined by default in Python3.
119 118 reload
120 119 except NameError:
121 120 from imp import reload
122 121
123 122 from IPython.utils import openpy
124 123 from IPython.utils.py3compat import PY3
125 124
126 125 #------------------------------------------------------------------------------
127 126 # Autoreload functionality
128 127 #------------------------------------------------------------------------------
129 128
130 129 class ModuleReloader(object):
131 130 enabled = False
132 131 """Whether this reloader is enabled"""
133 132
134 133 check_all = True
135 134 """Autoreload all modules, not just those listed in 'modules'"""
136 135
137 136 def __init__(self):
138 137 # Modules that failed to reload: {module: mtime-on-failed-reload, ...}
139 138 self.failed = {}
140 139 # Modules specially marked as autoreloadable.
141 140 self.modules = {}
142 141 # Modules specially marked as not autoreloadable.
143 142 self.skip_modules = {}
144 143 # (module-name, name) -> weakref, for replacing old code objects
145 144 self.old_objects = {}
146 145 # Module modification timestamps
147 146 self.modules_mtimes = {}
148 147
149 148 # Cache module modification times
150 149 self.check(check_all=True, do_reload=False)
151 150
152 151 def mark_module_skipped(self, module_name):
153 152 """Skip reloading the named module in the future"""
154 153 try:
155 154 del self.modules[module_name]
156 155 except KeyError:
157 156 pass
158 157 self.skip_modules[module_name] = True
159 158
160 159 def mark_module_reloadable(self, module_name):
161 160 """Reload the named module in the future (if it is imported)"""
162 161 try:
163 162 del self.skip_modules[module_name]
164 163 except KeyError:
165 164 pass
166 165 self.modules[module_name] = True
167 166
168 167 def aimport_module(self, module_name):
169 168 """Import a module, and mark it reloadable
170 169
171 170 Returns
172 171 -------
173 172 top_module : module
174 173 The imported module if it is top-level, or the top-level
175 174 top_name : module
176 175 Name of top_module
177 176
178 177 """
179 178 self.mark_module_reloadable(module_name)
180 179
181 180 import_module(module_name)
182 181 top_name = module_name.split('.')[0]
183 182 top_module = sys.modules[top_name]
184 183 return top_module, top_name
185 184
186 185 def filename_and_mtime(self, module):
187 186 if not hasattr(module, '__file__') or module.__file__ is None:
188 187 return None, None
189 188
190 189 if getattr(module, '__name__', None) == '__main__':
191 190 # we cannot reload(__main__)
192 191 return None, None
193 192
194 193 filename = module.__file__
195 194 path, ext = os.path.splitext(filename)
196 195
197 196 if ext.lower() == '.py':
198 197 py_filename = filename
199 198 else:
200 199 try:
201 200 py_filename = openpy.source_from_cache(filename)
202 201 except ValueError:
203 202 return None, None
204 203
205 204 try:
206 205 pymtime = os.stat(py_filename).st_mtime
207 206 except OSError:
208 207 return None, None
209 208
210 209 return py_filename, pymtime
211 210
212 211 def check(self, check_all=False, do_reload=True):
213 212 """Check whether some modules need to be reloaded."""
214 213
215 214 if not self.enabled and not check_all:
216 215 return
217 216
218 217 if check_all or self.check_all:
219 218 modules = list(sys.modules.keys())
220 219 else:
221 220 modules = list(self.modules.keys())
222 221
223 222 for modname in modules:
224 223 m = sys.modules.get(modname, None)
225 224
226 225 if modname in self.skip_modules:
227 226 continue
228 227
229 228 py_filename, pymtime = self.filename_and_mtime(m)
230 229 if py_filename is None:
231 230 continue
232 231
233 232 try:
234 233 if pymtime <= self.modules_mtimes[modname]:
235 234 continue
236 235 except KeyError:
237 236 self.modules_mtimes[modname] = pymtime
238 237 continue
239 238 else:
240 239 if self.failed.get(py_filename, None) == pymtime:
241 240 continue
242 241
243 242 self.modules_mtimes[modname] = pymtime
244 243
245 244 # If we've reached this point, we should try to reload the module
246 245 if do_reload:
247 246 try:
248 247 superreload(m, reload, self.old_objects)
249 248 if py_filename in self.failed:
250 249 del self.failed[py_filename]
251 250 except:
252 251 print("[autoreload of %s failed: %s]" % (
253 252 modname, traceback.format_exc(1)), file=sys.stderr)
254 253 self.failed[py_filename] = pymtime
255 254
256 255 #------------------------------------------------------------------------------
257 256 # superreload
258 257 #------------------------------------------------------------------------------
259 258
260 259 if PY3:
261 260 func_attrs = ['__code__', '__defaults__', '__doc__',
262 261 '__closure__', '__globals__', '__dict__']
263 262 else:
264 263 func_attrs = ['func_code', 'func_defaults', 'func_doc',
265 264 'func_closure', 'func_globals', 'func_dict']
266 265
267 266
268 267 def update_function(old, new):
269 268 """Upgrade the code object of a function"""
270 269 for name in func_attrs:
271 270 try:
272 271 setattr(old, name, getattr(new, name))
273 272 except (AttributeError, TypeError):
274 273 pass
275 274
276 275
277 276 def update_class(old, new):
278 277 """Replace stuff in the __dict__ of a class, and upgrade
279 278 method code objects"""
280 279 for key in list(old.__dict__.keys()):
281 280 old_obj = getattr(old, key)
282 281
283 282 try:
284 283 new_obj = getattr(new, key)
285 284 except AttributeError:
286 285 # obsolete attribute: remove it
287 286 try:
288 287 delattr(old, key)
289 288 except (AttributeError, TypeError):
290 289 pass
291 290 continue
292 291
293 292 if update_generic(old_obj, new_obj): continue
294 293
295 294 try:
296 295 setattr(old, key, getattr(new, key))
297 296 except (AttributeError, TypeError):
298 297 pass # skip non-writable attributes
299 298
300 299
301 300 def update_property(old, new):
302 301 """Replace get/set/del functions of a property"""
303 302 update_generic(old.fdel, new.fdel)
304 303 update_generic(old.fget, new.fget)
305 304 update_generic(old.fset, new.fset)
306 305
307 306
308 307 def isinstance2(a, b, typ):
309 308 return isinstance(a, typ) and isinstance(b, typ)
310 309
311 310
312 311 UPDATE_RULES = [
313 312 (lambda a, b: isinstance2(a, b, type),
314 313 update_class),
315 314 (lambda a, b: isinstance2(a, b, types.FunctionType),
316 315 update_function),
317 316 (lambda a, b: isinstance2(a, b, property),
318 317 update_property),
319 318 ]
320 319
321 320
322 321 if PY3:
323 322 UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.MethodType),
324 323 lambda a, b: update_function(a.__func__, b.__func__)),
325 324 ])
326 325 else:
327 326 UPDATE_RULES.extend([(lambda a, b: isinstance2(a, b, types.ClassType),
328 327 update_class),
329 328 (lambda a, b: isinstance2(a, b, types.MethodType),
330 329 lambda a, b: update_function(a.__func__, b.__func__)),
331 330 ])
332 331
333 332
334 333 def update_generic(a, b):
335 334 for type_check, update in UPDATE_RULES:
336 335 if type_check(a, b):
337 336 update(a, b)
338 337 return True
339 338 return False
340 339
341 340
342 341 class StrongRef(object):
343 342 def __init__(self, obj):
344 343 self.obj = obj
345 344 def __call__(self):
346 345 return self.obj
347 346
348 347
349 348 def superreload(module, reload=reload, old_objects={}):
350 349 """Enhanced version of the builtin reload function.
351 350
352 351 superreload remembers objects previously in the module, and
353 352
354 353 - upgrades the class dictionary of every old class in the module
355 354 - upgrades the code object of every old function and method
356 355 - clears the module's namespace before reloading
357 356
358 357 """
359 358
360 359 # collect old objects in the module
361 360 for name, obj in list(module.__dict__.items()):
362 361 if not hasattr(obj, '__module__') or obj.__module__ != module.__name__:
363 362 continue
364 363 key = (module.__name__, name)
365 364 try:
366 365 old_objects.setdefault(key, []).append(weakref.ref(obj))
367 366 except TypeError:
368 367 # weakref doesn't work for all types;
369 368 # create strong references for 'important' cases
370 369 if not PY3 and isinstance(obj, types.ClassType):
371 370 old_objects.setdefault(key, []).append(StrongRef(obj))
372 371
373 372 # reload module
374 373 try:
375 374 # clear namespace first from old cruft
376 375 old_dict = module.__dict__.copy()
377 376 old_name = module.__name__
378 377 module.__dict__.clear()
379 378 module.__dict__['__name__'] = old_name
380 379 module.__dict__['__loader__'] = old_dict['__loader__']
381 380 except (TypeError, AttributeError, KeyError):
382 381 pass
383 382
384 383 try:
385 384 module = reload(module)
386 385 except:
387 386 # restore module dictionary on failed reload
388 387 module.__dict__.update(old_dict)
389 388 raise
390 389
391 390 # iterate over all objects and update functions & classes
392 391 for name, new_obj in list(module.__dict__.items()):
393 392 key = (module.__name__, name)
394 393 if key not in old_objects: continue
395 394
396 395 new_refs = []
397 396 for old_ref in old_objects[key]:
398 397 old_obj = old_ref()
399 398 if old_obj is None: continue
400 399 new_refs.append(old_ref)
401 400 update_generic(old_obj, new_obj)
402 401
403 402 if new_refs:
404 403 old_objects[key] = new_refs
405 404 else:
406 405 del old_objects[key]
407 406
408 407 return module
409 408
410 409 #------------------------------------------------------------------------------
411 410 # IPython connectivity
412 411 #------------------------------------------------------------------------------
413 412
414 413 from IPython.core.magic import Magics, magics_class, line_magic
415 414
416 415 @magics_class
417 416 class AutoreloadMagics(Magics):
418 417 def __init__(self, *a, **kw):
419 418 super(AutoreloadMagics, self).__init__(*a, **kw)
420 419 self._reloader = ModuleReloader()
421 420 self._reloader.check_all = False
422 421 self.loaded_modules = set(sys.modules)
423 422
424 423 @line_magic
425 424 def autoreload(self, parameter_s=''):
426 425 r"""%autoreload => Reload modules automatically
427 426
428 427 %autoreload
429 428 Reload all modules (except those excluded by %aimport) automatically
430 429 now.
431 430
432 431 %autoreload 0
433 432 Disable automatic reloading.
434 433
435 434 %autoreload 1
436 435 Reload all modules imported with %aimport every time before executing
437 436 the Python code typed.
438 437
439 438 %autoreload 2
440 439 Reload all modules (except those excluded by %aimport) every time
441 440 before executing the Python code typed.
442 441
443 442 Reloading Python modules in a reliable way is in general
444 443 difficult, and unexpected things may occur. %autoreload tries to
445 444 work around common pitfalls by replacing function code objects and
446 445 parts of classes previously in the module with new versions. This
447 446 makes the following things to work:
448 447
449 448 - Functions and classes imported via 'from xxx import foo' are upgraded
450 449 to new versions when 'xxx' is reloaded.
451 450
452 451 - Methods and properties of classes are upgraded on reload, so that
453 452 calling 'c.foo()' on an object 'c' created before the reload causes
454 453 the new code for 'foo' to be executed.
455 454
456 455 Some of the known remaining caveats are:
457 456
458 457 - Replacing code objects does not always succeed: changing a @property
459 458 in a class to an ordinary method or a method to a member variable
460 459 can cause problems (but in old objects only).
461 460
462 461 - Functions that are removed (eg. via monkey-patching) from a module
463 462 before it is reloaded are not upgraded.
464 463
465 464 - C extension modules cannot be reloaded, and so cannot be
466 465 autoreloaded.
467 466
468 467 """
469 468 if parameter_s == '':
470 469 self._reloader.check(True)
471 470 elif parameter_s == '0':
472 471 self._reloader.enabled = False
473 472 elif parameter_s == '1':
474 473 self._reloader.check_all = False
475 474 self._reloader.enabled = True
476 475 elif parameter_s == '2':
477 476 self._reloader.check_all = True
478 477 self._reloader.enabled = True
479 478
480 479 @line_magic
481 480 def aimport(self, parameter_s='', stream=None):
482 481 """%aimport => Import modules for automatic reloading.
483 482
484 483 %aimport
485 484 List modules to automatically import and not to import.
486 485
487 486 %aimport foo
488 487 Import module 'foo' and mark it to be autoreloaded for %autoreload 1
489 488
490 489 %aimport -foo
491 490 Mark module 'foo' to not be autoreloaded for %autoreload 1
492 491 """
493 492 modname = parameter_s
494 493 if not modname:
495 494 to_reload = sorted(self._reloader.modules.keys())
496 495 to_skip = sorted(self._reloader.skip_modules.keys())
497 496 if stream is None:
498 497 stream = sys.stdout
499 498 if self._reloader.check_all:
500 499 stream.write("Modules to reload:\nall-except-skipped\n")
501 500 else:
502 501 stream.write("Modules to reload:\n%s\n" % ' '.join(to_reload))
503 502 stream.write("\nModules to skip:\n%s\n" % ' '.join(to_skip))
504 503 elif modname.startswith('-'):
505 504 modname = modname[1:]
506 505 self._reloader.mark_module_skipped(modname)
507 506 else:
508 507 top_module, top_name = self._reloader.aimport_module(modname)
509 508
510 509 # Inject module to user namespace
511 510 self.shell.push({top_name: top_module})
512 511
513 512 def pre_run_cell(self):
514 513 if self._reloader.enabled:
515 514 try:
516 515 self._reloader.check()
517 516 except:
518 517 pass
519 518
520 519 def post_execute_hook(self):
521 520 """Cache the modification times of any modules imported in this execution
522 521 """
523 522 newly_loaded_modules = set(sys.modules) - self.loaded_modules
524 523 for modname in newly_loaded_modules:
525 524 _, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
526 525 if pymtime is not None:
527 526 self._reloader.modules_mtimes[modname] = pymtime
528 527
529 528 self.loaded_modules.update(newly_loaded_modules)
530 529
531 530
532 531 def load_ipython_extension(ip):
533 532 """Load the extension in IPython."""
534 533 auto_reload = AutoreloadMagics(ip)
535 534 ip.register_magics(auto_reload)
536 535 ip.events.register('pre_run_cell', auto_reload.pre_run_cell)
537 536 ip.events.register('post_execute', auto_reload.post_execute_hook)
@@ -1,228 +1,227 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 %store magic for lightweight persistence.
4 4
5 5 Stores variables, aliases and macros in IPython's database.
6 6
7 7 To automatically restore stored variables at startup, add this to your
8 8 :file:`ipython_config.py` file::
9 9
10 10 c.StoreMagics.autorestore = True
11 11 """
12 from __future__ import print_function
13 12
14 13 # Copyright (c) IPython Development Team.
15 14 # Distributed under the terms of the Modified BSD License.
16 15
17 16 import inspect, os, sys, textwrap
18 17
19 18 from IPython.core.error import UsageError
20 19 from IPython.core.magic import Magics, magics_class, line_magic
21 20 from traitlets import Bool
22 21 from IPython.utils.py3compat import string_types
23 22
24 23
25 24 def restore_aliases(ip):
26 25 staliases = ip.db.get('stored_aliases', {})
27 26 for k,v in staliases.items():
28 27 #print "restore alias",k,v # dbg
29 28 #self.alias_table[k] = v
30 29 ip.alias_manager.define_alias(k,v)
31 30
32 31
33 32 def refresh_variables(ip):
34 33 db = ip.db
35 34 for key in db.keys('autorestore/*'):
36 35 # strip autorestore
37 36 justkey = os.path.basename(key)
38 37 try:
39 38 obj = db[key]
40 39 except KeyError:
41 40 print("Unable to restore variable '%s', ignoring (use %%store -d to forget!)" % justkey)
42 41 print("The error was:", sys.exc_info()[0])
43 42 else:
44 43 #print "restored",justkey,"=",obj #dbg
45 44 ip.user_ns[justkey] = obj
46 45
47 46
48 47 def restore_dhist(ip):
49 48 ip.user_ns['_dh'] = ip.db.get('dhist',[])
50 49
51 50
52 51 def restore_data(ip):
53 52 refresh_variables(ip)
54 53 restore_aliases(ip)
55 54 restore_dhist(ip)
56 55
57 56
58 57 @magics_class
59 58 class StoreMagics(Magics):
60 59 """Lightweight persistence for python variables.
61 60
62 61 Provides the %store magic."""
63 62
64 63 autorestore = Bool(False, help=
65 64 """If True, any %store-d variables will be automatically restored
66 65 when IPython starts.
67 66 """
68 67 ).tag(config=True)
69 68
70 69 def __init__(self, shell):
71 70 super(StoreMagics, self).__init__(shell=shell)
72 71 self.shell.configurables.append(self)
73 72 if self.autorestore:
74 73 restore_data(self.shell)
75 74
76 75 @line_magic
77 76 def store(self, parameter_s=''):
78 77 """Lightweight persistence for python variables.
79 78
80 79 Example::
81 80
82 81 In [1]: l = ['hello',10,'world']
83 82 In [2]: %store l
84 83 In [3]: exit
85 84
86 85 (IPython session is closed and started again...)
87 86
88 87 ville@badger:~$ ipython
89 88 In [1]: l
90 89 NameError: name 'l' is not defined
91 90 In [2]: %store -r
92 91 In [3]: l
93 92 Out[3]: ['hello', 10, 'world']
94 93
95 94 Usage:
96 95
97 96 * ``%store`` - Show list of all variables and their current
98 97 values
99 98 * ``%store spam`` - Store the *current* value of the variable spam
100 99 to disk
101 100 * ``%store -d spam`` - Remove the variable and its value from storage
102 101 * ``%store -z`` - Remove all variables from storage
103 102 * ``%store -r`` - Refresh all variables from store (overwrite
104 103 current vals)
105 104 * ``%store -r spam bar`` - Refresh specified variables from store
106 105 (delete current val)
107 106 * ``%store foo >a.txt`` - Store value of foo to new file a.txt
108 107 * ``%store foo >>a.txt`` - Append value of foo to file a.txt
109 108
110 109 It should be noted that if you change the value of a variable, you
111 110 need to %store it again if you want to persist the new value.
112 111
113 112 Note also that the variables will need to be pickleable; most basic
114 113 python types can be safely %store'd.
115 114
116 115 Also aliases can be %store'd across sessions.
117 116 """
118 117
119 118 opts,argsl = self.parse_options(parameter_s,'drz',mode='string')
120 119 args = argsl.split(None,1)
121 120 ip = self.shell
122 121 db = ip.db
123 122 # delete
124 123 if 'd' in opts:
125 124 try:
126 125 todel = args[0]
127 126 except IndexError:
128 127 raise UsageError('You must provide the variable to forget')
129 128 else:
130 129 try:
131 130 del db['autorestore/' + todel]
132 131 except:
133 132 raise UsageError("Can't delete variable '%s'" % todel)
134 133 # reset
135 134 elif 'z' in opts:
136 135 for k in db.keys('autorestore/*'):
137 136 del db[k]
138 137
139 138 elif 'r' in opts:
140 139 if args:
141 140 for arg in args:
142 141 try:
143 142 obj = db['autorestore/' + arg]
144 143 except KeyError:
145 144 print("no stored variable %s" % arg)
146 145 else:
147 146 ip.user_ns[arg] = obj
148 147 else:
149 148 restore_data(ip)
150 149
151 150 # run without arguments -> list variables & values
152 151 elif not args:
153 152 vars = db.keys('autorestore/*')
154 153 vars.sort()
155 154 if vars:
156 155 size = max(map(len, vars))
157 156 else:
158 157 size = 0
159 158
160 159 print('Stored variables and their in-db values:')
161 160 fmt = '%-'+str(size)+'s -> %s'
162 161 get = db.get
163 162 for var in vars:
164 163 justkey = os.path.basename(var)
165 164 # print 30 first characters from every var
166 165 print(fmt % (justkey, repr(get(var, '<unavailable>'))[:50]))
167 166
168 167 # default action - store the variable
169 168 else:
170 169 # %store foo >file.txt or >>file.txt
171 170 if len(args) > 1 and args[1].startswith('>'):
172 171 fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
173 172 if args[1].startswith('>>'):
174 173 fil = open(fnam, 'a')
175 174 else:
176 175 fil = open(fnam, 'w')
177 176 obj = ip.ev(args[0])
178 177 print("Writing '%s' (%s) to file '%s'." % (args[0],
179 178 obj.__class__.__name__, fnam))
180 179
181 180
182 181 if not isinstance (obj, string_types):
183 182 from pprint import pprint
184 183 pprint(obj, fil)
185 184 else:
186 185 fil.write(obj)
187 186 if not obj.endswith('\n'):
188 187 fil.write('\n')
189 188
190 189 fil.close()
191 190 return
192 191
193 192 # %store foo
194 193 try:
195 194 obj = ip.user_ns[args[0]]
196 195 except KeyError:
197 196 # it might be an alias
198 197 name = args[0]
199 198 try:
200 199 cmd = ip.alias_manager.retrieve_alias(name)
201 200 except ValueError:
202 201 raise UsageError("Unknown variable '%s'" % name)
203 202
204 203 staliases = db.get('stored_aliases',{})
205 204 staliases[name] = cmd
206 205 db['stored_aliases'] = staliases
207 206 print("Alias stored: %s (%s)" % (name, cmd))
208 207 return
209 208
210 209 else:
211 210 modname = getattr(inspect.getmodule(obj), '__name__', '')
212 211 if modname == '__main__':
213 212 print(textwrap.dedent("""\
214 213 Warning:%s is %s
215 214 Proper storage of interactively declared classes (or instances
216 215 of those classes) is not possible! Only instances
217 216 of classes in real modules on file system can be %%store'd.
218 217 """ % (args[0], obj) ))
219 218 return
220 219 #pickled = pickle.dumps(obj)
221 220 db[ 'autorestore/' + args[0] ] = obj
222 221 print("Stored '%s' (%s)" % (args[0], obj.__class__.__name__))
223 222
224 223
225 224 def load_ipython_extension(ip):
226 225 """Load the extension in IPython."""
227 226 ip.register_magics(StoreMagics)
228 227
@@ -1,491 +1,490 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Manage background (threaded) jobs conveniently from an interactive shell.
3 3
4 4 This module provides a BackgroundJobManager class. This is the main class
5 5 meant for public usage, it implements an object which can create and manage
6 6 new background jobs.
7 7
8 8 It also provides the actual job classes managed by these BackgroundJobManager
9 9 objects, see their docstrings below.
10 10
11 11
12 12 This system was inspired by discussions with B. Granger and the
13 13 BackgroundCommand class described in the book Python Scripting for
14 14 Computational Science, by H. P. Langtangen:
15 15
16 16 http://folk.uio.no/hpl/scripting
17 17
18 18 (although ultimately no code from this text was used, as IPython's system is a
19 19 separate implementation).
20 20
21 21 An example notebook is provided in our documentation illustrating interactive
22 22 use of the system.
23 23 """
24 from __future__ import print_function
25 24
26 25 #*****************************************************************************
27 26 # Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
28 27 #
29 28 # Distributed under the terms of the BSD License. The full license is in
30 29 # the file COPYING, distributed as part of this software.
31 30 #*****************************************************************************
32 31
33 32 # Code begins
34 33 import sys
35 34 import threading
36 35
37 36 from IPython import get_ipython
38 37 from IPython.core.ultratb import AutoFormattedTB
39 38 from logging import error
40 39 from IPython.utils.py3compat import string_types
41 40
42 41
43 42 class BackgroundJobManager(object):
44 43 """Class to manage a pool of backgrounded threaded jobs.
45 44
46 45 Below, we assume that 'jobs' is a BackgroundJobManager instance.
47 46
48 47 Usage summary (see the method docstrings for details):
49 48
50 49 jobs.new(...) -> start a new job
51 50
52 51 jobs() or jobs.status() -> print status summary of all jobs
53 52
54 53 jobs[N] -> returns job number N.
55 54
56 55 foo = jobs[N].result -> assign to variable foo the result of job N
57 56
58 57 jobs[N].traceback() -> print the traceback of dead job N
59 58
60 59 jobs.remove(N) -> remove (finished) job N
61 60
62 61 jobs.flush() -> remove all finished jobs
63 62
64 63 As a convenience feature, BackgroundJobManager instances provide the
65 64 utility result and traceback methods which retrieve the corresponding
66 65 information from the jobs list:
67 66
68 67 jobs.result(N) <--> jobs[N].result
69 68 jobs.traceback(N) <--> jobs[N].traceback()
70 69
71 70 While this appears minor, it allows you to use tab completion
72 71 interactively on the job manager instance.
73 72 """
74 73
75 74 def __init__(self):
76 75 # Lists for job management, accessed via a property to ensure they're
77 76 # up to date.x
78 77 self._running = []
79 78 self._completed = []
80 79 self._dead = []
81 80 # A dict of all jobs, so users can easily access any of them
82 81 self.all = {}
83 82 # For reporting
84 83 self._comp_report = []
85 84 self._dead_report = []
86 85 # Store status codes locally for fast lookups
87 86 self._s_created = BackgroundJobBase.stat_created_c
88 87 self._s_running = BackgroundJobBase.stat_running_c
89 88 self._s_completed = BackgroundJobBase.stat_completed_c
90 89 self._s_dead = BackgroundJobBase.stat_dead_c
91 90
92 91 @property
93 92 def running(self):
94 93 self._update_status()
95 94 return self._running
96 95
97 96 @property
98 97 def dead(self):
99 98 self._update_status()
100 99 return self._dead
101 100
102 101 @property
103 102 def completed(self):
104 103 self._update_status()
105 104 return self._completed
106 105
107 106 def new(self, func_or_exp, *args, **kwargs):
108 107 """Add a new background job and start it in a separate thread.
109 108
110 109 There are two types of jobs which can be created:
111 110
112 111 1. Jobs based on expressions which can be passed to an eval() call.
113 112 The expression must be given as a string. For example:
114 113
115 114 job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
116 115
117 116 The given expression is passed to eval(), along with the optional
118 117 global/local dicts provided. If no dicts are given, they are
119 118 extracted automatically from the caller's frame.
120 119
121 120 A Python statement is NOT a valid eval() expression. Basically, you
122 121 can only use as an eval() argument something which can go on the right
123 122 of an '=' sign and be assigned to a variable.
124 123
125 124 For example,"print 'hello'" is not valid, but '2+3' is.
126 125
127 126 2. Jobs given a function object, optionally passing additional
128 127 positional arguments:
129 128
130 129 job_manager.new(myfunc, x, y)
131 130
132 131 The function is called with the given arguments.
133 132
134 133 If you need to pass keyword arguments to your function, you must
135 134 supply them as a dict named kw:
136 135
137 136 job_manager.new(myfunc, x, y, kw=dict(z=1))
138 137
139 138 The reason for this assymmetry is that the new() method needs to
140 139 maintain access to its own keywords, and this prevents name collisions
141 140 between arguments to new() and arguments to your own functions.
142 141
143 142 In both cases, the result is stored in the job.result field of the
144 143 background job object.
145 144
146 145 You can set `daemon` attribute of the thread by giving the keyword
147 146 argument `daemon`.
148 147
149 148 Notes and caveats:
150 149
151 150 1. All threads running share the same standard output. Thus, if your
152 151 background jobs generate output, it will come out on top of whatever
153 152 you are currently writing. For this reason, background jobs are best
154 153 used with silent functions which simply return their output.
155 154
156 155 2. Threads also all work within the same global namespace, and this
157 156 system does not lock interactive variables. So if you send job to the
158 157 background which operates on a mutable object for a long time, and
159 158 start modifying that same mutable object interactively (or in another
160 159 backgrounded job), all sorts of bizarre behaviour will occur.
161 160
162 161 3. If a background job is spending a lot of time inside a C extension
163 162 module which does not release the Python Global Interpreter Lock
164 163 (GIL), this will block the IPython prompt. This is simply because the
165 164 Python interpreter can only switch between threads at Python
166 165 bytecodes. While the execution is inside C code, the interpreter must
167 166 simply wait unless the extension module releases the GIL.
168 167
169 168 4. There is no way, due to limitations in the Python threads library,
170 169 to kill a thread once it has started."""
171 170
172 171 if callable(func_or_exp):
173 172 kw = kwargs.get('kw',{})
174 173 job = BackgroundJobFunc(func_or_exp,*args,**kw)
175 174 elif isinstance(func_or_exp, string_types):
176 175 if not args:
177 176 frame = sys._getframe(1)
178 177 glob, loc = frame.f_globals, frame.f_locals
179 178 elif len(args)==1:
180 179 glob = loc = args[0]
181 180 elif len(args)==2:
182 181 glob,loc = args
183 182 else:
184 183 raise ValueError(
185 184 'Expression jobs take at most 2 args (globals,locals)')
186 185 job = BackgroundJobExpr(func_or_exp, glob, loc)
187 186 else:
188 187 raise TypeError('invalid args for new job')
189 188
190 189 if kwargs.get('daemon', False):
191 190 job.daemon = True
192 191 job.num = len(self.all)+1 if self.all else 0
193 192 self.running.append(job)
194 193 self.all[job.num] = job
195 194 print('Starting job # %s in a separate thread.' % job.num)
196 195 job.start()
197 196 return job
198 197
199 198 def __getitem__(self, job_key):
200 199 num = job_key if isinstance(job_key, int) else job_key.num
201 200 return self.all[num]
202 201
203 202 def __call__(self):
204 203 """An alias to self.status(),
205 204
206 205 This allows you to simply call a job manager instance much like the
207 206 Unix `jobs` shell command."""
208 207
209 208 return self.status()
210 209
211 210 def _update_status(self):
212 211 """Update the status of the job lists.
213 212
214 213 This method moves finished jobs to one of two lists:
215 214 - self.completed: jobs which completed successfully
216 215 - self.dead: jobs which finished but died.
217 216
218 217 It also copies those jobs to corresponding _report lists. These lists
219 218 are used to report jobs completed/dead since the last update, and are
220 219 then cleared by the reporting function after each call."""
221 220
222 221 # Status codes
223 222 srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
224 223 # State lists, use the actual lists b/c the public names are properties
225 224 # that call this very function on access
226 225 running, completed, dead = self._running, self._completed, self._dead
227 226
228 227 # Now, update all state lists
229 228 for num, job in enumerate(running):
230 229 stat = job.stat_code
231 230 if stat == srun:
232 231 continue
233 232 elif stat == scomp:
234 233 completed.append(job)
235 234 self._comp_report.append(job)
236 235 running[num] = False
237 236 elif stat == sdead:
238 237 dead.append(job)
239 238 self._dead_report.append(job)
240 239 running[num] = False
241 240 # Remove dead/completed jobs from running list
242 241 running[:] = filter(None, running)
243 242
244 243 def _group_report(self,group,name):
245 244 """Report summary for a given job group.
246 245
247 246 Return True if the group had any elements."""
248 247
249 248 if group:
250 249 print('%s jobs:' % name)
251 250 for job in group:
252 251 print('%s : %s' % (job.num,job))
253 252 print()
254 253 return True
255 254
256 255 def _group_flush(self,group,name):
257 256 """Flush a given job group
258 257
259 258 Return True if the group had any elements."""
260 259
261 260 njobs = len(group)
262 261 if njobs:
263 262 plural = {1:''}.setdefault(njobs,'s')
264 263 print('Flushing %s %s job%s.' % (njobs,name,plural))
265 264 group[:] = []
266 265 return True
267 266
268 267 def _status_new(self):
269 268 """Print the status of newly finished jobs.
270 269
271 270 Return True if any new jobs are reported.
272 271
273 272 This call resets its own state every time, so it only reports jobs
274 273 which have finished since the last time it was called."""
275 274
276 275 self._update_status()
277 276 new_comp = self._group_report(self._comp_report, 'Completed')
278 277 new_dead = self._group_report(self._dead_report,
279 278 'Dead, call jobs.traceback() for details')
280 279 self._comp_report[:] = []
281 280 self._dead_report[:] = []
282 281 return new_comp or new_dead
283 282
284 283 def status(self,verbose=0):
285 284 """Print a status of all jobs currently being managed."""
286 285
287 286 self._update_status()
288 287 self._group_report(self.running,'Running')
289 288 self._group_report(self.completed,'Completed')
290 289 self._group_report(self.dead,'Dead')
291 290 # Also flush the report queues
292 291 self._comp_report[:] = []
293 292 self._dead_report[:] = []
294 293
295 294 def remove(self,num):
296 295 """Remove a finished (completed or dead) job."""
297 296
298 297 try:
299 298 job = self.all[num]
300 299 except KeyError:
301 300 error('Job #%s not found' % num)
302 301 else:
303 302 stat_code = job.stat_code
304 303 if stat_code == self._s_running:
305 304 error('Job #%s is still running, it can not be removed.' % num)
306 305 return
307 306 elif stat_code == self._s_completed:
308 307 self.completed.remove(job)
309 308 elif stat_code == self._s_dead:
310 309 self.dead.remove(job)
311 310
312 311 def flush(self):
313 312 """Flush all finished jobs (completed and dead) from lists.
314 313
315 314 Running jobs are never flushed.
316 315
317 316 It first calls _status_new(), to update info. If any jobs have
318 317 completed since the last _status_new() call, the flush operation
319 318 aborts."""
320 319
321 320 # Remove the finished jobs from the master dict
322 321 alljobs = self.all
323 322 for job in self.completed+self.dead:
324 323 del(alljobs[job.num])
325 324
326 325 # Now flush these lists completely
327 326 fl_comp = self._group_flush(self.completed, 'Completed')
328 327 fl_dead = self._group_flush(self.dead, 'Dead')
329 328 if not (fl_comp or fl_dead):
330 329 print('No jobs to flush.')
331 330
332 331 def result(self,num):
333 332 """result(N) -> return the result of job N."""
334 333 try:
335 334 return self.all[num].result
336 335 except KeyError:
337 336 error('Job #%s not found' % num)
338 337
339 338 def _traceback(self, job):
340 339 num = job if isinstance(job, int) else job.num
341 340 try:
342 341 self.all[num].traceback()
343 342 except KeyError:
344 343 error('Job #%s not found' % num)
345 344
346 345 def traceback(self, job=None):
347 346 if job is None:
348 347 self._update_status()
349 348 for deadjob in self.dead:
350 349 print("Traceback for: %r" % deadjob)
351 350 self._traceback(deadjob)
352 351 print()
353 352 else:
354 353 self._traceback(job)
355 354
356 355
357 356 class BackgroundJobBase(threading.Thread):
358 357 """Base class to build BackgroundJob classes.
359 358
360 359 The derived classes must implement:
361 360
362 361 - Their own __init__, since the one here raises NotImplementedError. The
363 362 derived constructor must call self._init() at the end, to provide common
364 363 initialization.
365 364
366 365 - A strform attribute used in calls to __str__.
367 366
368 367 - A call() method, which will make the actual execution call and must
369 368 return a value to be held in the 'result' field of the job object.
370 369 """
371 370
372 371 # Class constants for status, in string and as numerical codes (when
373 372 # updating jobs lists, we don't want to do string comparisons). This will
374 373 # be done at every user prompt, so it has to be as fast as possible
375 374 stat_created = 'Created'; stat_created_c = 0
376 375 stat_running = 'Running'; stat_running_c = 1
377 376 stat_completed = 'Completed'; stat_completed_c = 2
378 377 stat_dead = 'Dead (Exception), call jobs.traceback() for details'
379 378 stat_dead_c = -1
380 379
381 380 def __init__(self):
382 381 """Must be implemented in subclasses.
383 382
384 383 Subclasses must call :meth:`_init` for standard initialisation.
385 384 """
386 385 raise NotImplementedError("This class can not be instantiated directly.")
387 386
388 387 def _init(self):
389 388 """Common initialization for all BackgroundJob objects"""
390 389
391 390 for attr in ['call','strform']:
392 391 assert hasattr(self,attr), "Missing attribute <%s>" % attr
393 392
394 393 # The num tag can be set by an external job manager
395 394 self.num = None
396 395
397 396 self.status = BackgroundJobBase.stat_created
398 397 self.stat_code = BackgroundJobBase.stat_created_c
399 398 self.finished = False
400 399 self.result = '<BackgroundJob has not completed>'
401 400
402 401 # reuse the ipython traceback handler if we can get to it, otherwise
403 402 # make a new one
404 403 try:
405 404 make_tb = get_ipython().InteractiveTB.text
406 405 except:
407 406 make_tb = AutoFormattedTB(mode = 'Context',
408 407 color_scheme='NoColor',
409 408 tb_offset = 1).text
410 409 # Note that the actual API for text() requires the three args to be
411 410 # passed in, so we wrap it in a simple lambda.
412 411 self._make_tb = lambda : make_tb(None, None, None)
413 412
414 413 # Hold a formatted traceback if one is generated.
415 414 self._tb = None
416 415
417 416 threading.Thread.__init__(self)
418 417
419 418 def __str__(self):
420 419 return self.strform
421 420
422 421 def __repr__(self):
423 422 return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
424 423
425 424 def traceback(self):
426 425 print(self._tb)
427 426
428 427 def run(self):
429 428 try:
430 429 self.status = BackgroundJobBase.stat_running
431 430 self.stat_code = BackgroundJobBase.stat_running_c
432 431 self.result = self.call()
433 432 except:
434 433 self.status = BackgroundJobBase.stat_dead
435 434 self.stat_code = BackgroundJobBase.stat_dead_c
436 435 self.finished = None
437 436 self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
438 437 self._tb = self._make_tb()
439 438 else:
440 439 self.status = BackgroundJobBase.stat_completed
441 440 self.stat_code = BackgroundJobBase.stat_completed_c
442 441 self.finished = True
443 442
444 443
445 444 class BackgroundJobExpr(BackgroundJobBase):
446 445 """Evaluate an expression as a background job (uses a separate thread)."""
447 446
448 447 def __init__(self, expression, glob=None, loc=None):
449 448 """Create a new job from a string which can be fed to eval().
450 449
451 450 global/locals dicts can be provided, which will be passed to the eval
452 451 call."""
453 452
454 453 # fail immediately if the given expression can't be compiled
455 454 self.code = compile(expression,'<BackgroundJob compilation>','eval')
456 455
457 456 glob = {} if glob is None else glob
458 457 loc = {} if loc is None else loc
459 458 self.expression = self.strform = expression
460 459 self.glob = glob
461 460 self.loc = loc
462 461 self._init()
463 462
464 463 def call(self):
465 464 return eval(self.code,self.glob,self.loc)
466 465
467 466
468 467 class BackgroundJobFunc(BackgroundJobBase):
469 468 """Run a function call as a background job (uses a separate thread)."""
470 469
471 470 def __init__(self, func, *args, **kwargs):
472 471 """Create a new job from a callable object.
473 472
474 473 Any positional arguments and keyword args given to this constructor
475 474 after the initial callable are passed directly to it."""
476 475
477 476 if not callable(func):
478 477 raise TypeError(
479 478 'first argument to BackgroundJobFunc must be callable')
480 479
481 480 self.func = func
482 481 self.args = args
483 482 self.kwargs = kwargs
484 483 # The string form will only include the function passed, because
485 484 # generating string representations of the arguments is a potentially
486 485 # _very_ expensive operation (e.g. with large arrays).
487 486 self.strform = str(func)
488 487 self._init()
489 488
490 489 def call(self):
491 490 return self.func(*self.args, **self.kwargs)
@@ -1,344 +1,343 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Provides a reload() function that acts recursively.
4 4
5 5 Python's normal :func:`python:reload` function only reloads the module that it's
6 6 passed. The :func:`reload` function in this module also reloads everything
7 7 imported from that module, which is useful when you're changing files deep
8 8 inside a package.
9 9
10 10 To use this as your default reload function, type this for Python 2::
11 11
12 12 import __builtin__
13 13 from IPython.lib import deepreload
14 14 __builtin__.reload = deepreload.reload
15 15
16 16 Or this for Python 3::
17 17
18 18 import builtins
19 19 from IPython.lib import deepreload
20 20 builtins.reload = deepreload.reload
21 21
22 22 A reference to the original :func:`python:reload` is stored in this module as
23 23 :data:`original_reload`, so you can restore it later.
24 24
25 25 This code is almost entirely based on knee.py, which is a Python
26 26 re-implementation of hierarchical module import.
27 27 """
28 from __future__ import print_function
29 28 #*****************************************************************************
30 29 # Copyright (C) 2001 Nathaniel Gray <n8gray@caltech.edu>
31 30 #
32 31 # Distributed under the terms of the BSD License. The full license is in
33 32 # the file COPYING, distributed as part of this software.
34 33 #*****************************************************************************
35 34
36 35 from contextlib import contextmanager
37 36 import imp
38 37 import sys
39 38
40 39 from types import ModuleType
41 40 from warnings import warn
42 41
43 42 from IPython.utils.py3compat import builtin_mod, builtin_mod_name
44 43
45 44 original_import = builtin_mod.__import__
46 45
47 46 @contextmanager
48 47 def replace_import_hook(new_import):
49 48 saved_import = builtin_mod.__import__
50 49 builtin_mod.__import__ = new_import
51 50 try:
52 51 yield
53 52 finally:
54 53 builtin_mod.__import__ = saved_import
55 54
56 55 def get_parent(globals, level):
57 56 """
58 57 parent, name = get_parent(globals, level)
59 58
60 59 Return the package that an import is being performed in. If globals comes
61 60 from the module foo.bar.bat (not itself a package), this returns the
62 61 sys.modules entry for foo.bar. If globals is from a package's __init__.py,
63 62 the package's entry in sys.modules is returned.
64 63
65 64 If globals doesn't come from a package or a module in a package, or a
66 65 corresponding entry is not found in sys.modules, None is returned.
67 66 """
68 67 orig_level = level
69 68
70 69 if not level or not isinstance(globals, dict):
71 70 return None, ''
72 71
73 72 pkgname = globals.get('__package__', None)
74 73
75 74 if pkgname is not None:
76 75 # __package__ is set, so use it
77 76 if not hasattr(pkgname, 'rindex'):
78 77 raise ValueError('__package__ set to non-string')
79 78 if len(pkgname) == 0:
80 79 if level > 0:
81 80 raise ValueError('Attempted relative import in non-package')
82 81 return None, ''
83 82 name = pkgname
84 83 else:
85 84 # __package__ not set, so figure it out and set it
86 85 if '__name__' not in globals:
87 86 return None, ''
88 87 modname = globals['__name__']
89 88
90 89 if '__path__' in globals:
91 90 # __path__ is set, so modname is already the package name
92 91 globals['__package__'] = name = modname
93 92 else:
94 93 # Normal module, so work out the package name if any
95 94 lastdot = modname.rfind('.')
96 95 if lastdot < 0 < level:
97 96 raise ValueError("Attempted relative import in non-package")
98 97 if lastdot < 0:
99 98 globals['__package__'] = None
100 99 return None, ''
101 100 globals['__package__'] = name = modname[:lastdot]
102 101
103 102 dot = len(name)
104 103 for x in range(level, 1, -1):
105 104 try:
106 105 dot = name.rindex('.', 0, dot)
107 106 except ValueError:
108 107 raise ValueError("attempted relative import beyond top-level "
109 108 "package")
110 109 name = name[:dot]
111 110
112 111 try:
113 112 parent = sys.modules[name]
114 113 except:
115 114 if orig_level < 1:
116 115 warn("Parent module '%.200s' not found while handling absolute "
117 116 "import" % name)
118 117 parent = None
119 118 else:
120 119 raise SystemError("Parent module '%.200s' not loaded, cannot "
121 120 "perform relative import" % name)
122 121
123 122 # We expect, but can't guarantee, if parent != None, that:
124 123 # - parent.__name__ == name
125 124 # - parent.__dict__ is globals
126 125 # If this is violated... Who cares?
127 126 return parent, name
128 127
129 128 def load_next(mod, altmod, name, buf):
130 129 """
131 130 mod, name, buf = load_next(mod, altmod, name, buf)
132 131
133 132 altmod is either None or same as mod
134 133 """
135 134
136 135 if len(name) == 0:
137 136 # completely empty module name should only happen in
138 137 # 'from . import' (or '__import__("")')
139 138 return mod, None, buf
140 139
141 140 dot = name.find('.')
142 141 if dot == 0:
143 142 raise ValueError('Empty module name')
144 143
145 144 if dot < 0:
146 145 subname = name
147 146 next = None
148 147 else:
149 148 subname = name[:dot]
150 149 next = name[dot+1:]
151 150
152 151 if buf != '':
153 152 buf += '.'
154 153 buf += subname
155 154
156 155 result = import_submodule(mod, subname, buf)
157 156 if result is None and mod != altmod:
158 157 result = import_submodule(altmod, subname, subname)
159 158 if result is not None:
160 159 buf = subname
161 160
162 161 if result is None:
163 162 raise ImportError("No module named %.200s" % name)
164 163
165 164 return result, next, buf
166 165
167 166 # Need to keep track of what we've already reloaded to prevent cyclic evil
168 167 found_now = {}
169 168
170 169 def import_submodule(mod, subname, fullname):
171 170 """m = import_submodule(mod, subname, fullname)"""
172 171 # Require:
173 172 # if mod == None: subname == fullname
174 173 # else: mod.__name__ + "." + subname == fullname
175 174
176 175 global found_now
177 176 if fullname in found_now and fullname in sys.modules:
178 177 m = sys.modules[fullname]
179 178 else:
180 179 print('Reloading', fullname)
181 180 found_now[fullname] = 1
182 181 oldm = sys.modules.get(fullname, None)
183 182
184 183 if mod is None:
185 184 path = None
186 185 elif hasattr(mod, '__path__'):
187 186 path = mod.__path__
188 187 else:
189 188 return None
190 189
191 190 try:
192 191 # This appears to be necessary on Python 3, because imp.find_module()
193 192 # tries to import standard libraries (like io) itself, and we don't
194 193 # want them to be processed by our deep_import_hook.
195 194 with replace_import_hook(original_import):
196 195 fp, filename, stuff = imp.find_module(subname, path)
197 196 except ImportError:
198 197 return None
199 198
200 199 try:
201 200 m = imp.load_module(fullname, fp, filename, stuff)
202 201 except:
203 202 # load_module probably removed name from modules because of
204 203 # the error. Put back the original module object.
205 204 if oldm:
206 205 sys.modules[fullname] = oldm
207 206 raise
208 207 finally:
209 208 if fp: fp.close()
210 209
211 210 add_submodule(mod, m, fullname, subname)
212 211
213 212 return m
214 213
215 214 def add_submodule(mod, submod, fullname, subname):
216 215 """mod.{subname} = submod"""
217 216 if mod is None:
218 217 return #Nothing to do here.
219 218
220 219 if submod is None:
221 220 submod = sys.modules[fullname]
222 221
223 222 setattr(mod, subname, submod)
224 223
225 224 return
226 225
227 226 def ensure_fromlist(mod, fromlist, buf, recursive):
228 227 """Handle 'from module import a, b, c' imports."""
229 228 if not hasattr(mod, '__path__'):
230 229 return
231 230 for item in fromlist:
232 231 if not hasattr(item, 'rindex'):
233 232 raise TypeError("Item in ``from list'' not a string")
234 233 if item == '*':
235 234 if recursive:
236 235 continue # avoid endless recursion
237 236 try:
238 237 all = mod.__all__
239 238 except AttributeError:
240 239 pass
241 240 else:
242 241 ret = ensure_fromlist(mod, all, buf, 1)
243 242 if not ret:
244 243 return 0
245 244 elif not hasattr(mod, item):
246 245 import_submodule(mod, item, buf + '.' + item)
247 246
248 247 def deep_import_hook(name, globals=None, locals=None, fromlist=None, level=-1):
249 248 """Replacement for __import__()"""
250 249 parent, buf = get_parent(globals, level)
251 250
252 251 head, name, buf = load_next(parent, None if level < 0 else parent, name, buf)
253 252
254 253 tail = head
255 254 while name:
256 255 tail, name, buf = load_next(tail, tail, name, buf)
257 256
258 257 # If tail is None, both get_parent and load_next found
259 258 # an empty module name: someone called __import__("") or
260 259 # doctored faulty bytecode
261 260 if tail is None:
262 261 raise ValueError('Empty module name')
263 262
264 263 if not fromlist:
265 264 return head
266 265
267 266 ensure_fromlist(tail, fromlist, buf, 0)
268 267 return tail
269 268
270 269 modules_reloading = {}
271 270
272 271 def deep_reload_hook(m):
273 272 """Replacement for reload()."""
274 273 if not isinstance(m, ModuleType):
275 274 raise TypeError("reload() argument must be module")
276 275
277 276 name = m.__name__
278 277
279 278 if name not in sys.modules:
280 279 raise ImportError("reload(): module %.200s not in sys.modules" % name)
281 280
282 281 global modules_reloading
283 282 try:
284 283 return modules_reloading[name]
285 284 except:
286 285 modules_reloading[name] = m
287 286
288 287 dot = name.rfind('.')
289 288 if dot < 0:
290 289 subname = name
291 290 path = None
292 291 else:
293 292 try:
294 293 parent = sys.modules[name[:dot]]
295 294 except KeyError:
296 295 modules_reloading.clear()
297 296 raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot])
298 297 subname = name[dot+1:]
299 298 path = getattr(parent, "__path__", None)
300 299
301 300 try:
302 301 # This appears to be necessary on Python 3, because imp.find_module()
303 302 # tries to import standard libraries (like io) itself, and we don't
304 303 # want them to be processed by our deep_import_hook.
305 304 with replace_import_hook(original_import):
306 305 fp, filename, stuff = imp.find_module(subname, path)
307 306 finally:
308 307 modules_reloading.clear()
309 308
310 309 try:
311 310 newm = imp.load_module(name, fp, filename, stuff)
312 311 except:
313 312 # load_module probably removed name from modules because of
314 313 # the error. Put back the original module object.
315 314 sys.modules[name] = m
316 315 raise
317 316 finally:
318 317 if fp: fp.close()
319 318
320 319 modules_reloading.clear()
321 320 return newm
322 321
323 322 # Save the original hooks
324 323 try:
325 324 original_reload = builtin_mod.reload
326 325 except AttributeError:
327 326 original_reload = imp.reload # Python 3
328 327
329 328 # Replacement for reload()
330 329 def reload(module, exclude=('sys', 'os.path', builtin_mod_name, '__main__',
331 330 'numpy', 'numpy._globals')):
332 331 """Recursively reload all modules used in the given module. Optionally
333 332 takes a list of modules to exclude from reloading. The default exclude
334 333 list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
335 334 display, exception, and io hooks.
336 335 """
337 336 global found_now
338 337 for i in exclude:
339 338 found_now[i] = 1
340 339 try:
341 340 with replace_import_hook(deep_import_hook):
342 341 return deep_reload_hook(module)
343 342 finally:
344 343 found_now = {}
@@ -1,669 +1,667 b''
1 1 """Module for interactive demos using IPython.
2 2
3 3 This module implements a few classes for running Python scripts interactively
4 4 in IPython for demonstrations. With very simple markup (a few tags in
5 5 comments), you can control points where the script stops executing and returns
6 6 control to IPython.
7 7
8 8
9 9 Provided classes
10 10 ----------------
11 11
12 12 The classes are (see their docstrings for further details):
13 13
14 14 - Demo: pure python demos
15 15
16 16 - IPythonDemo: demos with input to be processed by IPython as if it had been
17 17 typed interactively (so magics work, as well as any other special syntax you
18 18 may have added via input prefilters).
19 19
20 20 - LineDemo: single-line version of the Demo class. These demos are executed
21 21 one line at a time, and require no markup.
22 22
23 23 - IPythonLineDemo: IPython version of the LineDemo class (the demo is
24 24 executed a line at a time, but processed via IPython).
25 25
26 26 - ClearMixin: mixin to make Demo classes with less visual clutter. It
27 27 declares an empty marquee and a pre_cmd that clears the screen before each
28 28 block (see Subclassing below).
29 29
30 30 - ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
31 31 classes.
32 32
33 33 Inheritance diagram:
34 34
35 35 .. inheritance-diagram:: IPython.lib.demo
36 36 :parts: 3
37 37
38 38 Subclassing
39 39 -----------
40 40
41 41 The classes here all include a few methods meant to make customization by
42 42 subclassing more convenient. Their docstrings below have some more details:
43 43
44 44 - highlight(): format every block and optionally highlight comments and
45 45 docstring content.
46 46
47 47 - marquee(): generates a marquee to provide visible on-screen markers at each
48 48 block start and end.
49 49
50 50 - pre_cmd(): run right before the execution of each block.
51 51
52 52 - post_cmd(): run right after the execution of each block. If the block
53 53 raises an exception, this is NOT called.
54 54
55 55
56 56 Operation
57 57 ---------
58 58
59 59 The file is run in its own empty namespace (though you can pass it a string of
60 60 arguments as if in a command line environment, and it will see those as
61 61 sys.argv). But at each stop, the global IPython namespace is updated with the
62 62 current internal demo namespace, so you can work interactively with the data
63 63 accumulated so far.
64 64
65 65 By default, each block of code is printed (with syntax highlighting) before
66 66 executing it and you have to confirm execution. This is intended to show the
67 67 code to an audience first so you can discuss it, and only proceed with
68 68 execution once you agree. There are a few tags which allow you to modify this
69 69 behavior.
70 70
71 71 The supported tags are:
72 72
73 73 # <demo> stop
74 74
75 75 Defines block boundaries, the points where IPython stops execution of the
76 76 file and returns to the interactive prompt.
77 77
78 78 You can optionally mark the stop tag with extra dashes before and after the
79 79 word 'stop', to help visually distinguish the blocks in a text editor:
80 80
81 81 # <demo> --- stop ---
82 82
83 83
84 84 # <demo> silent
85 85
86 86 Make a block execute silently (and hence automatically). Typically used in
87 87 cases where you have some boilerplate or initialization code which you need
88 88 executed but do not want to be seen in the demo.
89 89
90 90 # <demo> auto
91 91
92 92 Make a block execute automatically, but still being printed. Useful for
93 93 simple code which does not warrant discussion, since it avoids the extra
94 94 manual confirmation.
95 95
96 96 # <demo> auto_all
97 97
98 98 This tag can _only_ be in the first block, and if given it overrides the
99 99 individual auto tags to make the whole demo fully automatic (no block asks
100 100 for confirmation). It can also be given at creation time (or the attribute
101 101 set later) to override what's in the file.
102 102
103 103 While _any_ python file can be run as a Demo instance, if there are no stop
104 104 tags the whole file will run in a single block (no different that calling
105 105 first %pycat and then %run). The minimal markup to make this useful is to
106 106 place a set of stop tags; the other tags are only there to let you fine-tune
107 107 the execution.
108 108
109 109 This is probably best explained with the simple example file below. You can
110 110 copy this into a file named ex_demo.py, and try running it via::
111 111
112 112 from IPython.demo import Demo
113 113 d = Demo('ex_demo.py')
114 114 d()
115 115
116 116 Each time you call the demo object, it runs the next block. The demo object
117 117 has a few useful methods for navigation, like again(), edit(), jump(), seek()
118 118 and back(). It can be reset for a new run via reset() or reloaded from disk
119 119 (in case you've edited the source) via reload(). See their docstrings below.
120 120
121 121 Note: To make this simpler to explore, a file called "demo-exercizer.py" has
122 122 been added to the "docs/examples/core" directory. Just cd to this directory in
123 123 an IPython session, and type::
124 124
125 125 %run demo-exercizer.py
126 126
127 127 and then follow the directions.
128 128
129 129 Example
130 130 -------
131 131
132 132 The following is a very simple example of a valid demo file.
133 133
134 134 ::
135 135
136 136 #################### EXAMPLE DEMO <ex_demo.py> ###############################
137 137 '''A simple interactive demo to illustrate the use of IPython's Demo class.'''
138 138
139 139 print 'Hello, welcome to an interactive IPython demo.'
140 140
141 141 # The mark below defines a block boundary, which is a point where IPython will
142 142 # stop execution and return to the interactive prompt. The dashes are actually
143 143 # optional and used only as a visual aid to clearly separate blocks while
144 144 # editing the demo code.
145 145 # <demo> stop
146 146
147 147 x = 1
148 148 y = 2
149 149
150 150 # <demo> stop
151 151
152 152 # the mark below makes this block as silent
153 153 # <demo> silent
154 154
155 155 print 'This is a silent block, which gets executed but not printed.'
156 156
157 157 # <demo> stop
158 158 # <demo> auto
159 159 print 'This is an automatic block.'
160 160 print 'It is executed without asking for confirmation, but printed.'
161 161 z = x+y
162 162
163 163 print 'z=',x
164 164
165 165 # <demo> stop
166 166 # This is just another normal block.
167 167 print 'z is now:', z
168 168
169 169 print 'bye!'
170 170 ################### END EXAMPLE DEMO <ex_demo.py> ############################
171 171 """
172 172
173 from __future__ import unicode_literals
174 173
175 174 #*****************************************************************************
176 175 # Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
177 176 #
178 177 # Distributed under the terms of the BSD License. The full license is in
179 178 # the file COPYING, distributed as part of this software.
180 179 #
181 180 #*****************************************************************************
182 from __future__ import print_function
183 181
184 182 import os
185 183 import re
186 184 import shlex
187 185 import sys
188 186 import pygments
189 187
190 188 from IPython.utils.text import marquee
191 189 from IPython.utils import openpy
192 190 from IPython.utils import py3compat
193 191 __all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
194 192
195 193 class DemoError(Exception): pass
196 194
197 195 def re_mark(mark):
198 196 return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
199 197
200 198 class Demo(object):
201 199
202 200 re_stop = re_mark('-*\s?stop\s?-*')
203 201 re_silent = re_mark('silent')
204 202 re_auto = re_mark('auto')
205 203 re_auto_all = re_mark('auto_all')
206 204
207 205 def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
208 206 formatter='terminal', style='default'):
209 207 """Make a new demo object. To run the demo, simply call the object.
210 208
211 209 See the module docstring for full details and an example (you can use
212 210 IPython.Demo? in IPython to see it).
213 211
214 212 Inputs:
215 213
216 214 - src is either a file, or file-like object, or a
217 215 string that can be resolved to a filename.
218 216
219 217 Optional inputs:
220 218
221 219 - title: a string to use as the demo name. Of most use when the demo
222 220 you are making comes from an object that has no filename, or if you
223 221 want an alternate denotation distinct from the filename.
224 222
225 223 - arg_str(''): a string of arguments, internally converted to a list
226 224 just like sys.argv, so the demo script can see a similar
227 225 environment.
228 226
229 227 - auto_all(None): global flag to run all blocks automatically without
230 228 confirmation. This attribute overrides the block-level tags and
231 229 applies to the whole demo. It is an attribute of the object, and
232 230 can be changed at runtime simply by reassigning it to a boolean
233 231 value.
234 232
235 233 - format_rst(False): a bool to enable comments and doc strings
236 234 formating with pygments rst lexer
237 235
238 236 - formatter('terminal'): a string of pygments formatter name to be
239 237 used. Useful values for terminals: terminal, terminal256,
240 238 terminal16m
241 239
242 240 - style('default'): a string of pygments style name to be used.
243 241 """
244 242 if hasattr(src, "read"):
245 243 # It seems to be a file or a file-like object
246 244 self.fname = "from a file-like object"
247 245 if title == '':
248 246 self.title = "from a file-like object"
249 247 else:
250 248 self.title = title
251 249 else:
252 250 # Assume it's a string or something that can be converted to one
253 251 self.fname = src
254 252 if title == '':
255 253 (filepath, filename) = os.path.split(src)
256 254 self.title = filename
257 255 else:
258 256 self.title = title
259 257 self.sys_argv = [src] + shlex.split(arg_str)
260 258 self.auto_all = auto_all
261 259 self.src = src
262 260
263 261 self.inside_ipython = "get_ipython" in globals()
264 262 if self.inside_ipython:
265 263 # get a few things from ipython. While it's a bit ugly design-wise,
266 264 # it ensures that things like color scheme and the like are always in
267 265 # sync with the ipython mode being used. This class is only meant to
268 266 # be used inside ipython anyways, so it's OK.
269 267 ip = get_ipython() # this is in builtins whenever IPython is running
270 268 self.ip_ns = ip.user_ns
271 269 self.ip_colorize = ip.pycolorize
272 270 self.ip_showtb = ip.showtraceback
273 271 self.ip_run_cell = ip.run_cell
274 272 self.shell = ip
275 273
276 274 self.formatter = pygments.formatters.get_formatter_by_name(formatter,
277 275 style=style)
278 276 self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
279 277 self.format_rst = format_rst
280 278 if format_rst:
281 279 self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
282 280
283 281 # load user data and initialize data structures
284 282 self.reload()
285 283
286 284 def fload(self):
287 285 """Load file object."""
288 286 # read data and parse into blocks
289 287 if hasattr(self, 'fobj') and self.fobj is not None:
290 288 self.fobj.close()
291 289 if hasattr(self.src, "read"):
292 290 # It seems to be a file or a file-like object
293 291 self.fobj = self.src
294 292 else:
295 293 # Assume it's a string or something that can be converted to one
296 294 self.fobj = openpy.open(self.fname)
297 295
298 296 def reload(self):
299 297 """Reload source from disk and initialize state."""
300 298 self.fload()
301 299
302 300 self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
303 301 src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
304 302 self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
305 303 self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
306 304
307 305 # if auto_all is not given (def. None), we read it from the file
308 306 if self.auto_all is None:
309 307 self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
310 308 else:
311 309 self.auto_all = bool(self.auto_all)
312 310
313 311 # Clean the sources from all markup so it doesn't get displayed when
314 312 # running the demo
315 313 src_blocks = []
316 314 auto_strip = lambda s: self.re_auto.sub('',s)
317 315 for i,b in enumerate(src_b):
318 316 if self._auto[i]:
319 317 src_blocks.append(auto_strip(b))
320 318 else:
321 319 src_blocks.append(b)
322 320 # remove the auto_all marker
323 321 src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
324 322
325 323 self.nblocks = len(src_blocks)
326 324 self.src_blocks = src_blocks
327 325
328 326 # also build syntax-highlighted source
329 327 self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
330 328
331 329 # ensure clean namespace and seek offset
332 330 self.reset()
333 331
334 332 def reset(self):
335 333 """Reset the namespace and seek pointer to restart the demo"""
336 334 self.user_ns = {}
337 335 self.finished = False
338 336 self.block_index = 0
339 337
340 338 def _validate_index(self,index):
341 339 if index<0 or index>=self.nblocks:
342 340 raise ValueError('invalid block index %s' % index)
343 341
344 342 def _get_index(self,index):
345 343 """Get the current block index, validating and checking status.
346 344
347 345 Returns None if the demo is finished"""
348 346
349 347 if index is None:
350 348 if self.finished:
351 349 print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
352 350 return None
353 351 index = self.block_index
354 352 else:
355 353 self._validate_index(index)
356 354 return index
357 355
358 356 def seek(self,index):
359 357 """Move the current seek pointer to the given block.
360 358
361 359 You can use negative indices to seek from the end, with identical
362 360 semantics to those of Python lists."""
363 361 if index<0:
364 362 index = self.nblocks + index
365 363 self._validate_index(index)
366 364 self.block_index = index
367 365 self.finished = False
368 366
369 367 def back(self,num=1):
370 368 """Move the seek pointer back num blocks (default is 1)."""
371 369 self.seek(self.block_index-num)
372 370
373 371 def jump(self,num=1):
374 372 """Jump a given number of blocks relative to the current one.
375 373
376 374 The offset can be positive or negative, defaults to 1."""
377 375 self.seek(self.block_index+num)
378 376
379 377 def again(self):
380 378 """Move the seek pointer back one block and re-execute."""
381 379 self.back(1)
382 380 self()
383 381
384 382 def edit(self,index=None):
385 383 """Edit a block.
386 384
387 385 If no number is given, use the last block executed.
388 386
389 387 This edits the in-memory copy of the demo, it does NOT modify the
390 388 original source file. If you want to do that, simply open the file in
391 389 an editor and use reload() when you make changes to the file. This
392 390 method is meant to let you change a block during a demonstration for
393 391 explanatory purposes, without damaging your original script."""
394 392
395 393 index = self._get_index(index)
396 394 if index is None:
397 395 return
398 396 # decrease the index by one (unless we're at the very beginning), so
399 397 # that the default demo.edit() call opens up the sblock we've last run
400 398 if index>0:
401 399 index -= 1
402 400
403 401 filename = self.shell.mktempfile(self.src_blocks[index])
404 402 self.shell.hooks.editor(filename,1)
405 403 with open(filename, 'r') as f:
406 404 new_block = f.read()
407 405 # update the source and colored block
408 406 self.src_blocks[index] = new_block
409 407 self.src_blocks_colored[index] = self.highlight(new_block)
410 408 self.block_index = index
411 409 # call to run with the newly edited index
412 410 self()
413 411
414 412 def show(self,index=None):
415 413 """Show a single block on screen"""
416 414
417 415 index = self._get_index(index)
418 416 if index is None:
419 417 return
420 418
421 419 print(self.marquee('<%s> block # %s (%s remaining)' %
422 420 (self.title,index,self.nblocks-index-1)))
423 421 print(self.src_blocks_colored[index])
424 422 sys.stdout.flush()
425 423
426 424 def show_all(self):
427 425 """Show entire demo on screen, block by block"""
428 426
429 427 fname = self.title
430 428 title = self.title
431 429 nblocks = self.nblocks
432 430 silent = self._silent
433 431 marquee = self.marquee
434 432 for index,block in enumerate(self.src_blocks_colored):
435 433 if silent[index]:
436 434 print(marquee('<%s> SILENT block # %s (%s remaining)' %
437 435 (title,index,nblocks-index-1)))
438 436 else:
439 437 print(marquee('<%s> block # %s (%s remaining)' %
440 438 (title,index,nblocks-index-1)))
441 439 print(block, end=' ')
442 440 sys.stdout.flush()
443 441
444 442 def run_cell(self,source):
445 443 """Execute a string with one or more lines of code"""
446 444
447 445 exec(source, self.user_ns)
448 446
449 447 def __call__(self,index=None):
450 448 """run a block of the demo.
451 449
452 450 If index is given, it should be an integer >=1 and <= nblocks. This
453 451 means that the calling convention is one off from typical Python
454 452 lists. The reason for the inconsistency is that the demo always
455 453 prints 'Block n/N, and N is the total, so it would be very odd to use
456 454 zero-indexing here."""
457 455
458 456 index = self._get_index(index)
459 457 if index is None:
460 458 return
461 459 try:
462 460 marquee = self.marquee
463 461 next_block = self.src_blocks[index]
464 462 self.block_index += 1
465 463 if self._silent[index]:
466 464 print(marquee('Executing silent block # %s (%s remaining)' %
467 465 (index,self.nblocks-index-1)))
468 466 else:
469 467 self.pre_cmd()
470 468 self.show(index)
471 469 if self.auto_all or self._auto[index]:
472 470 print(marquee('output:'))
473 471 else:
474 472 print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
475 473 ans = py3compat.input().strip()
476 474 if ans:
477 475 print(marquee('Block NOT executed'))
478 476 return
479 477 try:
480 478 save_argv = sys.argv
481 479 sys.argv = self.sys_argv
482 480 self.run_cell(next_block)
483 481 self.post_cmd()
484 482 finally:
485 483 sys.argv = save_argv
486 484
487 485 except:
488 486 if self.inside_ipython:
489 487 self.ip_showtb(filename=self.fname)
490 488 else:
491 489 if self.inside_ipython:
492 490 self.ip_ns.update(self.user_ns)
493 491
494 492 if self.block_index == self.nblocks:
495 493 mq1 = self.marquee('END OF DEMO')
496 494 if mq1:
497 495 # avoid spurious print if empty marquees are used
498 496 print()
499 497 print(mq1)
500 498 print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
501 499 self.finished = True
502 500
503 501 # These methods are meant to be overridden by subclasses who may wish to
504 502 # customize the behavior of of their demos.
505 503 def marquee(self,txt='',width=78,mark='*'):
506 504 """Return the input string centered in a 'marquee'."""
507 505 return marquee(txt,width,mark)
508 506
509 507 def pre_cmd(self):
510 508 """Method called before executing each block."""
511 509 pass
512 510
513 511 def post_cmd(self):
514 512 """Method called after executing each block."""
515 513 pass
516 514
517 515 def highlight(self, block):
518 516 """Method called on each block to highlight it content"""
519 517 tokens = pygments.lex(block, self.python_lexer)
520 518 if self.format_rst:
521 519 from pygments.token import Token
522 520 toks = []
523 521 for token in tokens:
524 522 if token[0] == Token.String.Doc and len(token[1]) > 6:
525 523 toks += pygments.lex(token[1][:3], self.python_lexer)
526 524 # parse doc string content by rst lexer
527 525 toks += pygments.lex(token[1][3:-3], self.rst_lexer)
528 526 toks += pygments.lex(token[1][-3:], self.python_lexer)
529 527 elif token[0] == Token.Comment.Single:
530 528 toks.append((Token.Comment.Single, token[1][0]))
531 529 # parse comment content by rst lexer
532 530 # remove the extrat newline added by rst lexer
533 531 toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
534 532 else:
535 533 toks.append(token)
536 534 tokens = toks
537 535 return pygments.format(tokens, self.formatter)
538 536
539 537
540 538 class IPythonDemo(Demo):
541 539 """Class for interactive demos with IPython's input processing applied.
542 540
543 541 This subclasses Demo, but instead of executing each block by the Python
544 542 interpreter (via exec), it actually calls IPython on it, so that any input
545 543 filters which may be in place are applied to the input block.
546 544
547 545 If you have an interactive environment which exposes special input
548 546 processing, you can use this class instead to write demo scripts which
549 547 operate exactly as if you had typed them interactively. The default Demo
550 548 class requires the input to be valid, pure Python code.
551 549 """
552 550
553 551 def run_cell(self,source):
554 552 """Execute a string with one or more lines of code"""
555 553
556 554 self.shell.run_cell(source)
557 555
558 556 class LineDemo(Demo):
559 557 """Demo where each line is executed as a separate block.
560 558
561 559 The input script should be valid Python code.
562 560
563 561 This class doesn't require any markup at all, and it's meant for simple
564 562 scripts (with no nesting or any kind of indentation) which consist of
565 563 multiple lines of input to be executed, one at a time, as if they had been
566 564 typed in the interactive prompt.
567 565
568 566 Note: the input can not have *any* indentation, which means that only
569 567 single-lines of input are accepted, not even function definitions are
570 568 valid."""
571 569
572 570 def reload(self):
573 571 """Reload source from disk and initialize state."""
574 572 # read data and parse into blocks
575 573 self.fload()
576 574 lines = self.fobj.readlines()
577 575 src_b = [l for l in lines if l.strip()]
578 576 nblocks = len(src_b)
579 577 self.src = ''.join(lines)
580 578 self._silent = [False]*nblocks
581 579 self._auto = [True]*nblocks
582 580 self.auto_all = True
583 581 self.nblocks = nblocks
584 582 self.src_blocks = src_b
585 583
586 584 # also build syntax-highlighted source
587 585 self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
588 586
589 587 # ensure clean namespace and seek offset
590 588 self.reset()
591 589
592 590
593 591 class IPythonLineDemo(IPythonDemo,LineDemo):
594 592 """Variant of the LineDemo class whose input is processed by IPython."""
595 593 pass
596 594
597 595
598 596 class ClearMixin(object):
599 597 """Use this mixin to make Demo classes with less visual clutter.
600 598
601 599 Demos using this mixin will clear the screen before every block and use
602 600 blank marquees.
603 601
604 602 Note that in order for the methods defined here to actually override those
605 603 of the classes it's mixed with, it must go /first/ in the inheritance
606 604 tree. For example:
607 605
608 606 class ClearIPDemo(ClearMixin,IPythonDemo): pass
609 607
610 608 will provide an IPythonDemo class with the mixin's features.
611 609 """
612 610
613 611 def marquee(self,txt='',width=78,mark='*'):
614 612 """Blank marquee that returns '' no matter what the input."""
615 613 return ''
616 614
617 615 def pre_cmd(self):
618 616 """Method called before executing each block.
619 617
620 618 This one simply clears the screen."""
621 619 from IPython.utils.terminal import _term_clear
622 620 _term_clear()
623 621
624 622 class ClearDemo(ClearMixin,Demo):
625 623 pass
626 624
627 625
628 626 class ClearIPDemo(ClearMixin,IPythonDemo):
629 627 pass
630 628
631 629
632 630 def slide(file_path, noclear=False, format_rst=True, formatter="terminal",
633 631 style="native", auto_all=False, delimiter='...'):
634 632 if noclear:
635 633 demo_class = Demo
636 634 else:
637 635 demo_class = ClearDemo
638 636 demo = demo_class(file_path, format_rst=format_rst, formatter=formatter,
639 637 style=style, auto_all=auto_all)
640 638 while not demo.finished:
641 639 demo()
642 640 try:
643 641 py3compat.input('\n' + delimiter)
644 642 except KeyboardInterrupt:
645 643 exit(1)
646 644
647 645 if __name__ == '__main__':
648 646 import argparse
649 647 parser = argparse.ArgumentParser(description='Run python demos')
650 648 parser.add_argument('--noclear', '-C', action='store_true',
651 649 help='Do not clear terminal on each slide')
652 650 parser.add_argument('--rst', '-r', action='store_true',
653 651 help='Highlight comments and dostrings as rst')
654 652 parser.add_argument('--formatter', '-f', default='terminal',
655 653 help='pygments formatter name could be: terminal, '
656 654 'terminal256, terminal16m')
657 655 parser.add_argument('--style', '-s', default='default',
658 656 help='pygments style name')
659 657 parser.add_argument('--auto', '-a', action='store_true',
660 658 help='Run all blocks automatically without'
661 659 'confirmation')
662 660 parser.add_argument('--delimiter', '-d', default='...',
663 661 help='slides delimiter added after each slide run')
664 662 parser.add_argument('file', nargs=1,
665 663 help='python demo file')
666 664 args = parser.parse_args()
667 665 slide(args.file[0], noclear=args.noclear, format_rst=args.rst,
668 666 formatter=args.formatter, style=args.style, auto_all=args.auto,
669 667 delimiter=args.delimiter)
@@ -1,129 +1,128 b''
1 1 """ 'editor' hooks for common editors that work well with ipython
2 2
3 3 They should honor the line number argument, at least.
4 4
5 5 Contributions are *very* welcome.
6 6 """
7 from __future__ import print_function
8 7
9 8 import os
10 9 import pipes
11 10 import shlex
12 11 import subprocess
13 12 import sys
14 13
15 14 from IPython import get_ipython
16 15 from IPython.core.error import TryNext
17 16 from IPython.utils import py3compat
18 17
19 18
20 19 def install_editor(template, wait=False):
21 20 """Installs the editor that is called by IPython for the %edit magic.
22 21
23 22 This overrides the default editor, which is generally set by your EDITOR
24 23 environment variable or is notepad (windows) or vi (linux). By supplying a
25 24 template string `run_template`, you can control how the editor is invoked
26 25 by IPython -- (e.g. the format in which it accepts command line options)
27 26
28 27 Parameters
29 28 ----------
30 29 template : basestring
31 30 run_template acts as a template for how your editor is invoked by
32 31 the shell. It should contain '{filename}', which will be replaced on
33 32 invokation with the file name, and '{line}', $line by line number
34 33 (or 0) to invoke the file with.
35 34 wait : bool
36 35 If `wait` is true, wait until the user presses enter before returning,
37 36 to facilitate non-blocking editors that exit immediately after
38 37 the call.
39 38 """
40 39
41 40 # not all editors support $line, so we'll leave out this check
42 41 # for substitution in ['$file', '$line']:
43 42 # if not substitution in run_template:
44 43 # raise ValueError(('run_template should contain %s'
45 44 # ' for string substitution. You supplied "%s"' % (substitution,
46 45 # run_template)))
47 46
48 47 def call_editor(self, filename, line=0):
49 48 if line is None:
50 49 line = 0
51 50 cmd = template.format(filename=pipes.quote(filename), line=line)
52 51 print(">", cmd)
53 52 # pipes.quote doesn't work right on Windows, but it does after splitting
54 53 if sys.platform.startswith('win'):
55 54 cmd = shlex.split(cmd)
56 55 proc = subprocess.Popen(cmd, shell=True)
57 56 if wait and proc.wait() != 0:
58 57 raise TryNext()
59 58 if wait:
60 59 py3compat.input("Press Enter when done editing:")
61 60
62 61 get_ipython().set_hook('editor', call_editor)
63 62 get_ipython().editor = template
64 63
65 64
66 65 # in these, exe is always the path/name of the executable. Useful
67 66 # if you don't have the editor directory in your path
68 67 def komodo(exe=u'komodo'):
69 68 """ Activestate Komodo [Edit] """
70 69 install_editor(exe + u' -l {line} {filename}', wait=True)
71 70
72 71
73 72 def scite(exe=u"scite"):
74 73 """ SciTE or Sc1 """
75 74 install_editor(exe + u' {filename} -goto:{line}')
76 75
77 76
78 77 def notepadplusplus(exe=u'notepad++'):
79 78 """ Notepad++ http://notepad-plus.sourceforge.net """
80 79 install_editor(exe + u' -n{line} {filename}')
81 80
82 81
83 82 def jed(exe=u'jed'):
84 83 """ JED, the lightweight emacsish editor """
85 84 install_editor(exe + u' +{line} {filename}')
86 85
87 86
88 87 def idle(exe=u'idle'):
89 88 """ Idle, the editor bundled with python
90 89
91 90 Parameters
92 91 ----------
93 92 exe : str, None
94 93 If none, should be pretty smart about finding the executable.
95 94 """
96 95 if exe is None:
97 96 import idlelib
98 97 p = os.path.dirname(idlelib.__filename__)
99 98 # i'm not sure if this actually works. Is this idle.py script
100 99 # guarenteed to be executable?
101 100 exe = os.path.join(p, 'idle.py')
102 101 install_editor(exe + u' {filename}')
103 102
104 103
105 104 def mate(exe=u'mate'):
106 105 """ TextMate, the missing editor"""
107 106 # wait=True is not required since we're using the -w flag to mate
108 107 install_editor(exe + u' -w -l {line} {filename}')
109 108
110 109
111 110 # ##########################################
112 111 # these are untested, report any problems
113 112 # ##########################################
114 113
115 114
116 115 def emacs(exe=u'emacs'):
117 116 install_editor(exe + u' +{line} {filename}')
118 117
119 118
120 119 def gnuclient(exe=u'gnuclient'):
121 120 install_editor(exe + u' -nw +{line} {filename}')
122 121
123 122
124 123 def crimson_editor(exe=u'cedt.exe'):
125 124 install_editor(exe + u' /L:{line} {filename}')
126 125
127 126
128 127 def kate(exe=u'kate'):
129 128 install_editor(exe + u' -u -l {line} {filename}')
@@ -1,173 +1,172 b''
1 1 # coding: utf-8
2 2 """
3 3 GLUT Inputhook support functions
4 4 """
5 from __future__ import print_function
6 5
7 6 #-----------------------------------------------------------------------------
8 7 # Copyright (C) 2008-2011 The IPython Development Team
9 8 #
10 9 # Distributed under the terms of the BSD License. The full license is in
11 10 # the file COPYING, distributed as part of this software.
12 11 #-----------------------------------------------------------------------------
13 12
14 13 # GLUT is quite an old library and it is difficult to ensure proper
15 14 # integration within IPython since original GLUT does not allow to handle
16 15 # events one by one. Instead, it requires for the mainloop to be entered
17 16 # and never returned (there is not even a function to exit he
18 17 # mainloop). Fortunately, there are alternatives such as freeglut
19 18 # (available for linux and windows) and the OSX implementation gives
20 19 # access to a glutCheckLoop() function that blocks itself until a new
21 20 # event is received. This means we have to setup the idle callback to
22 21 # ensure we got at least one event that will unblock the function.
23 22 #
24 23 # Furthermore, it is not possible to install these handlers without a window
25 24 # being first created. We choose to make this window invisible. This means that
26 25 # display mode options are set at this level and user won't be able to change
27 26 # them later without modifying the code. This should probably be made available
28 27 # via IPython options system.
29 28
30 29 #-----------------------------------------------------------------------------
31 30 # Imports
32 31 #-----------------------------------------------------------------------------
33 32 import os
34 33 import sys
35 34 import time
36 35 import signal
37 36 import OpenGL.GLUT as glut
38 37 import OpenGL.platform as platform
39 38 from timeit import default_timer as clock
40 39
41 40 #-----------------------------------------------------------------------------
42 41 # Constants
43 42 #-----------------------------------------------------------------------------
44 43
45 44 # Frame per second : 60
46 45 # Should probably be an IPython option
47 46 glut_fps = 60
48 47
49 48
50 49 # Display mode : double buffeed + rgba + depth
51 50 # Should probably be an IPython option
52 51 glut_display_mode = (glut.GLUT_DOUBLE |
53 52 glut.GLUT_RGBA |
54 53 glut.GLUT_DEPTH)
55 54
56 55 glutMainLoopEvent = None
57 56 if sys.platform == 'darwin':
58 57 try:
59 58 glutCheckLoop = platform.createBaseFunction(
60 59 'glutCheckLoop', dll=platform.GLUT, resultType=None,
61 60 argTypes=[],
62 61 doc='glutCheckLoop( ) -> None',
63 62 argNames=(),
64 63 )
65 64 except AttributeError:
66 65 raise RuntimeError(
67 66 '''Your glut implementation does not allow interactive sessions'''
68 67 '''Consider installing freeglut.''')
69 68 glutMainLoopEvent = glutCheckLoop
70 69 elif glut.HAVE_FREEGLUT:
71 70 glutMainLoopEvent = glut.glutMainLoopEvent
72 71 else:
73 72 raise RuntimeError(
74 73 '''Your glut implementation does not allow interactive sessions. '''
75 74 '''Consider installing freeglut.''')
76 75
77 76
78 77 #-----------------------------------------------------------------------------
79 78 # Platform-dependent imports and functions
80 79 #-----------------------------------------------------------------------------
81 80
82 81 if os.name == 'posix':
83 82 import select
84 83
85 84 def stdin_ready():
86 85 infds, outfds, erfds = select.select([sys.stdin],[],[],0)
87 86 if infds:
88 87 return True
89 88 else:
90 89 return False
91 90
92 91 elif sys.platform == 'win32':
93 92 import msvcrt
94 93
95 94 def stdin_ready():
96 95 return msvcrt.kbhit()
97 96
98 97 #-----------------------------------------------------------------------------
99 98 # Callback functions
100 99 #-----------------------------------------------------------------------------
101 100
102 101 def glut_display():
103 102 # Dummy display function
104 103 pass
105 104
106 105 def glut_idle():
107 106 # Dummy idle function
108 107 pass
109 108
110 109 def glut_close():
111 110 # Close function only hides the current window
112 111 glut.glutHideWindow()
113 112 glutMainLoopEvent()
114 113
115 114 def glut_int_handler(signum, frame):
116 115 # Catch sigint and print the defautl message
117 116 signal.signal(signal.SIGINT, signal.default_int_handler)
118 117 print('\nKeyboardInterrupt')
119 118 # Need to reprint the prompt at this stage
120 119
121 120
122 121
123 122 #-----------------------------------------------------------------------------
124 123 # Code
125 124 #-----------------------------------------------------------------------------
126 125 def inputhook_glut():
127 126 """Run the pyglet event loop by processing pending events only.
128 127
129 128 This keeps processing pending events until stdin is ready. After
130 129 processing all pending events, a call to time.sleep is inserted. This is
131 130 needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
132 131 though for best performance.
133 132 """
134 133 # We need to protect against a user pressing Control-C when IPython is
135 134 # idle and this is running. We trap KeyboardInterrupt and pass.
136 135
137 136 signal.signal(signal.SIGINT, glut_int_handler)
138 137
139 138 try:
140 139 t = clock()
141 140
142 141 # Make sure the default window is set after a window has been closed
143 142 if glut.glutGetWindow() == 0:
144 143 glut.glutSetWindow( 1 )
145 144 glutMainLoopEvent()
146 145 return 0
147 146
148 147 while not stdin_ready():
149 148 glutMainLoopEvent()
150 149 # We need to sleep at this point to keep the idle CPU load
151 150 # low. However, if sleep to long, GUI response is poor. As
152 151 # a compromise, we watch how often GUI events are being processed
153 152 # and switch between a short and long sleep time. Here are some
154 153 # stats useful in helping to tune this.
155 154 # time CPU load
156 155 # 0.001 13%
157 156 # 0.005 3%
158 157 # 0.01 1.5%
159 158 # 0.05 0.5%
160 159 used_time = clock() - t
161 160 if used_time > 10.0:
162 161 # print 'Sleep for 1 s' # dbg
163 162 time.sleep(1.0)
164 163 elif used_time > 0.1:
165 164 # Few GUI events coming in, so we can sleep longer
166 165 # print 'Sleep for 0.05 s' # dbg
167 166 time.sleep(0.05)
168 167 else:
169 168 # Many GUI events coming in, so sleep only very little
170 169 time.sleep(0.001)
171 170 except KeyboardInterrupt:
172 171 pass
173 172 return 0
@@ -1,868 +1,867 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Python advanced pretty printer. This pretty printer is intended to
4 4 replace the old `pprint` python module which does not allow developers
5 5 to provide their own pretty print callbacks.
6 6
7 7 This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
8 8
9 9
10 10 Example Usage
11 11 -------------
12 12
13 13 To directly print the representation of an object use `pprint`::
14 14
15 15 from pretty import pprint
16 16 pprint(complex_object)
17 17
18 18 To get a string of the output use `pretty`::
19 19
20 20 from pretty import pretty
21 21 string = pretty(complex_object)
22 22
23 23
24 24 Extending
25 25 ---------
26 26
27 27 The pretty library allows developers to add pretty printing rules for their
28 28 own objects. This process is straightforward. All you have to do is to
29 29 add a `_repr_pretty_` method to your object and call the methods on the
30 30 pretty printer passed::
31 31
32 32 class MyObject(object):
33 33
34 34 def _repr_pretty_(self, p, cycle):
35 35 ...
36 36
37 37 Here is an example implementation of a `_repr_pretty_` method for a list
38 38 subclass::
39 39
40 40 class MyList(list):
41 41
42 42 def _repr_pretty_(self, p, cycle):
43 43 if cycle:
44 44 p.text('MyList(...)')
45 45 else:
46 46 with p.group(8, 'MyList([', '])'):
47 47 for idx, item in enumerate(self):
48 48 if idx:
49 49 p.text(',')
50 50 p.breakable()
51 51 p.pretty(item)
52 52
53 53 The `cycle` parameter is `True` if pretty detected a cycle. You *have* to
54 54 react to that or the result is an infinite loop. `p.text()` just adds
55 55 non breaking text to the output, `p.breakable()` either adds a whitespace
56 56 or breaks here. If you pass it an argument it's used instead of the
57 57 default space. `p.pretty` prettyprints another object using the pretty print
58 58 method.
59 59
60 60 The first parameter to the `group` function specifies the extra indentation
61 61 of the next line. In this example the next item will either be on the same
62 62 line (if the items are short enough) or aligned with the right edge of the
63 63 opening bracket of `MyList`.
64 64
65 65 If you just want to indent something you can use the group function
66 66 without open / close parameters. You can also use this code::
67 67
68 68 with p.indent(2):
69 69 ...
70 70
71 71 Inheritance diagram:
72 72
73 73 .. inheritance-diagram:: IPython.lib.pretty
74 74 :parts: 3
75 75
76 76 :copyright: 2007 by Armin Ronacher.
77 77 Portions (c) 2009 by Robert Kern.
78 78 :license: BSD License.
79 79 """
80 from __future__ import print_function
81 80 from contextlib import contextmanager
82 81 import sys
83 82 import types
84 83 import re
85 84 import datetime
86 85 from collections import deque
87 86
88 87 from IPython.utils.py3compat import PY3, PYPY, cast_unicode, string_types
89 88 from IPython.utils.encoding import get_stream_enc
90 89
91 90 from io import StringIO
92 91
93 92
94 93 __all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
95 94 'for_type', 'for_type_by_name']
96 95
97 96
98 97 MAX_SEQ_LENGTH = 1000
99 98 _re_pattern_type = type(re.compile(''))
100 99
101 100 def _safe_getattr(obj, attr, default=None):
102 101 """Safe version of getattr.
103 102
104 103 Same as getattr, but will return ``default`` on any Exception,
105 104 rather than raising.
106 105 """
107 106 try:
108 107 return getattr(obj, attr, default)
109 108 except Exception:
110 109 return default
111 110
112 111 if PY3:
113 112 CUnicodeIO = StringIO
114 113 else:
115 114 class CUnicodeIO(StringIO):
116 115 """StringIO that casts str to unicode on Python 2"""
117 116 def write(self, text):
118 117 return super(CUnicodeIO, self).write(
119 118 cast_unicode(text, encoding=get_stream_enc(sys.stdout)))
120 119
121 120
122 121 def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
123 122 """
124 123 Pretty print the object's representation.
125 124 """
126 125 stream = CUnicodeIO()
127 126 printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length=max_seq_length)
128 127 printer.pretty(obj)
129 128 printer.flush()
130 129 return stream.getvalue()
131 130
132 131
133 132 def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
134 133 """
135 134 Like `pretty` but print to stdout.
136 135 """
137 136 printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length)
138 137 printer.pretty(obj)
139 138 printer.flush()
140 139 sys.stdout.write(newline)
141 140 sys.stdout.flush()
142 141
143 142 class _PrettyPrinterBase(object):
144 143
145 144 @contextmanager
146 145 def indent(self, indent):
147 146 """with statement support for indenting/dedenting."""
148 147 self.indentation += indent
149 148 try:
150 149 yield
151 150 finally:
152 151 self.indentation -= indent
153 152
154 153 @contextmanager
155 154 def group(self, indent=0, open='', close=''):
156 155 """like begin_group / end_group but for the with statement."""
157 156 self.begin_group(indent, open)
158 157 try:
159 158 yield
160 159 finally:
161 160 self.end_group(indent, close)
162 161
163 162 class PrettyPrinter(_PrettyPrinterBase):
164 163 """
165 164 Baseclass for the `RepresentationPrinter` prettyprinter that is used to
166 165 generate pretty reprs of objects. Contrary to the `RepresentationPrinter`
167 166 this printer knows nothing about the default pprinters or the `_repr_pretty_`
168 167 callback method.
169 168 """
170 169
171 170 def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH):
172 171 self.output = output
173 172 self.max_width = max_width
174 173 self.newline = newline
175 174 self.max_seq_length = max_seq_length
176 175 self.output_width = 0
177 176 self.buffer_width = 0
178 177 self.buffer = deque()
179 178
180 179 root_group = Group(0)
181 180 self.group_stack = [root_group]
182 181 self.group_queue = GroupQueue(root_group)
183 182 self.indentation = 0
184 183
185 184 def _break_outer_groups(self):
186 185 while self.max_width < self.output_width + self.buffer_width:
187 186 group = self.group_queue.deq()
188 187 if not group:
189 188 return
190 189 while group.breakables:
191 190 x = self.buffer.popleft()
192 191 self.output_width = x.output(self.output, self.output_width)
193 192 self.buffer_width -= x.width
194 193 while self.buffer and isinstance(self.buffer[0], Text):
195 194 x = self.buffer.popleft()
196 195 self.output_width = x.output(self.output, self.output_width)
197 196 self.buffer_width -= x.width
198 197
199 198 def text(self, obj):
200 199 """Add literal text to the output."""
201 200 width = len(obj)
202 201 if self.buffer:
203 202 text = self.buffer[-1]
204 203 if not isinstance(text, Text):
205 204 text = Text()
206 205 self.buffer.append(text)
207 206 text.add(obj, width)
208 207 self.buffer_width += width
209 208 self._break_outer_groups()
210 209 else:
211 210 self.output.write(obj)
212 211 self.output_width += width
213 212
214 213 def breakable(self, sep=' '):
215 214 """
216 215 Add a breakable separator to the output. This does not mean that it
217 216 will automatically break here. If no breaking on this position takes
218 217 place the `sep` is inserted which default to one space.
219 218 """
220 219 width = len(sep)
221 220 group = self.group_stack[-1]
222 221 if group.want_break:
223 222 self.flush()
224 223 self.output.write(self.newline)
225 224 self.output.write(' ' * self.indentation)
226 225 self.output_width = self.indentation
227 226 self.buffer_width = 0
228 227 else:
229 228 self.buffer.append(Breakable(sep, width, self))
230 229 self.buffer_width += width
231 230 self._break_outer_groups()
232 231
233 232 def break_(self):
234 233 """
235 234 Explicitly insert a newline into the output, maintaining correct indentation.
236 235 """
237 236 self.flush()
238 237 self.output.write(self.newline)
239 238 self.output.write(' ' * self.indentation)
240 239 self.output_width = self.indentation
241 240 self.buffer_width = 0
242 241
243 242
244 243 def begin_group(self, indent=0, open=''):
245 244 """
246 245 Begin a group. If you want support for python < 2.5 which doesn't has
247 246 the with statement this is the preferred way:
248 247
249 248 p.begin_group(1, '{')
250 249 ...
251 250 p.end_group(1, '}')
252 251
253 252 The python 2.5 expression would be this:
254 253
255 254 with p.group(1, '{', '}'):
256 255 ...
257 256
258 257 The first parameter specifies the indentation for the next line (usually
259 258 the width of the opening text), the second the opening text. All
260 259 parameters are optional.
261 260 """
262 261 if open:
263 262 self.text(open)
264 263 group = Group(self.group_stack[-1].depth + 1)
265 264 self.group_stack.append(group)
266 265 self.group_queue.enq(group)
267 266 self.indentation += indent
268 267
269 268 def _enumerate(self, seq):
270 269 """like enumerate, but with an upper limit on the number of items"""
271 270 for idx, x in enumerate(seq):
272 271 if self.max_seq_length and idx >= self.max_seq_length:
273 272 self.text(',')
274 273 self.breakable()
275 274 self.text('...')
276 275 return
277 276 yield idx, x
278 277
279 278 def end_group(self, dedent=0, close=''):
280 279 """End a group. See `begin_group` for more details."""
281 280 self.indentation -= dedent
282 281 group = self.group_stack.pop()
283 282 if not group.breakables:
284 283 self.group_queue.remove(group)
285 284 if close:
286 285 self.text(close)
287 286
288 287 def flush(self):
289 288 """Flush data that is left in the buffer."""
290 289 for data in self.buffer:
291 290 self.output_width += data.output(self.output, self.output_width)
292 291 self.buffer.clear()
293 292 self.buffer_width = 0
294 293
295 294
296 295 def _get_mro(obj_class):
297 296 """ Get a reasonable method resolution order of a class and its superclasses
298 297 for both old-style and new-style classes.
299 298 """
300 299 if not hasattr(obj_class, '__mro__'):
301 300 # Old-style class. Mix in object to make a fake new-style class.
302 301 try:
303 302 obj_class = type(obj_class.__name__, (obj_class, object), {})
304 303 except TypeError:
305 304 # Old-style extension type that does not descend from object.
306 305 # FIXME: try to construct a more thorough MRO.
307 306 mro = [obj_class]
308 307 else:
309 308 mro = obj_class.__mro__[1:-1]
310 309 else:
311 310 mro = obj_class.__mro__
312 311 return mro
313 312
314 313
315 314 class RepresentationPrinter(PrettyPrinter):
316 315 """
317 316 Special pretty printer that has a `pretty` method that calls the pretty
318 317 printer for a python object.
319 318
320 319 This class stores processing data on `self` so you must *never* use
321 320 this class in a threaded environment. Always lock it or reinstanciate
322 321 it.
323 322
324 323 Instances also have a verbose flag callbacks can access to control their
325 324 output. For example the default instance repr prints all attributes and
326 325 methods that are not prefixed by an underscore if the printer is in
327 326 verbose mode.
328 327 """
329 328
330 329 def __init__(self, output, verbose=False, max_width=79, newline='\n',
331 330 singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None,
332 331 max_seq_length=MAX_SEQ_LENGTH):
333 332
334 333 PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length)
335 334 self.verbose = verbose
336 335 self.stack = []
337 336 if singleton_pprinters is None:
338 337 singleton_pprinters = _singleton_pprinters.copy()
339 338 self.singleton_pprinters = singleton_pprinters
340 339 if type_pprinters is None:
341 340 type_pprinters = _type_pprinters.copy()
342 341 self.type_pprinters = type_pprinters
343 342 if deferred_pprinters is None:
344 343 deferred_pprinters = _deferred_type_pprinters.copy()
345 344 self.deferred_pprinters = deferred_pprinters
346 345
347 346 def pretty(self, obj):
348 347 """Pretty print the given object."""
349 348 obj_id = id(obj)
350 349 cycle = obj_id in self.stack
351 350 self.stack.append(obj_id)
352 351 self.begin_group()
353 352 try:
354 353 obj_class = _safe_getattr(obj, '__class__', None) or type(obj)
355 354 # First try to find registered singleton printers for the type.
356 355 try:
357 356 printer = self.singleton_pprinters[obj_id]
358 357 except (TypeError, KeyError):
359 358 pass
360 359 else:
361 360 return printer(obj, self, cycle)
362 361 # Next walk the mro and check for either:
363 362 # 1) a registered printer
364 363 # 2) a _repr_pretty_ method
365 364 for cls in _get_mro(obj_class):
366 365 if cls in self.type_pprinters:
367 366 # printer registered in self.type_pprinters
368 367 return self.type_pprinters[cls](obj, self, cycle)
369 368 else:
370 369 # deferred printer
371 370 printer = self._in_deferred_types(cls)
372 371 if printer is not None:
373 372 return printer(obj, self, cycle)
374 373 else:
375 374 # Finally look for special method names.
376 375 # Some objects automatically create any requested
377 376 # attribute. Try to ignore most of them by checking for
378 377 # callability.
379 378 if '_repr_pretty_' in cls.__dict__:
380 379 meth = cls._repr_pretty_
381 380 if callable(meth):
382 381 return meth(obj, self, cycle)
383 382 return _default_pprint(obj, self, cycle)
384 383 finally:
385 384 self.end_group()
386 385 self.stack.pop()
387 386
388 387 def _in_deferred_types(self, cls):
389 388 """
390 389 Check if the given class is specified in the deferred type registry.
391 390
392 391 Returns the printer from the registry if it exists, and None if the
393 392 class is not in the registry. Successful matches will be moved to the
394 393 regular type registry for future use.
395 394 """
396 395 mod = _safe_getattr(cls, '__module__', None)
397 396 name = _safe_getattr(cls, '__name__', None)
398 397 key = (mod, name)
399 398 printer = None
400 399 if key in self.deferred_pprinters:
401 400 # Move the printer over to the regular registry.
402 401 printer = self.deferred_pprinters.pop(key)
403 402 self.type_pprinters[cls] = printer
404 403 return printer
405 404
406 405
407 406 class Printable(object):
408 407
409 408 def output(self, stream, output_width):
410 409 return output_width
411 410
412 411
413 412 class Text(Printable):
414 413
415 414 def __init__(self):
416 415 self.objs = []
417 416 self.width = 0
418 417
419 418 def output(self, stream, output_width):
420 419 for obj in self.objs:
421 420 stream.write(obj)
422 421 return output_width + self.width
423 422
424 423 def add(self, obj, width):
425 424 self.objs.append(obj)
426 425 self.width += width
427 426
428 427
429 428 class Breakable(Printable):
430 429
431 430 def __init__(self, seq, width, pretty):
432 431 self.obj = seq
433 432 self.width = width
434 433 self.pretty = pretty
435 434 self.indentation = pretty.indentation
436 435 self.group = pretty.group_stack[-1]
437 436 self.group.breakables.append(self)
438 437
439 438 def output(self, stream, output_width):
440 439 self.group.breakables.popleft()
441 440 if self.group.want_break:
442 441 stream.write(self.pretty.newline)
443 442 stream.write(' ' * self.indentation)
444 443 return self.indentation
445 444 if not self.group.breakables:
446 445 self.pretty.group_queue.remove(self.group)
447 446 stream.write(self.obj)
448 447 return output_width + self.width
449 448
450 449
451 450 class Group(Printable):
452 451
453 452 def __init__(self, depth):
454 453 self.depth = depth
455 454 self.breakables = deque()
456 455 self.want_break = False
457 456
458 457
459 458 class GroupQueue(object):
460 459
461 460 def __init__(self, *groups):
462 461 self.queue = []
463 462 for group in groups:
464 463 self.enq(group)
465 464
466 465 def enq(self, group):
467 466 depth = group.depth
468 467 while depth > len(self.queue) - 1:
469 468 self.queue.append([])
470 469 self.queue[depth].append(group)
471 470
472 471 def deq(self):
473 472 for stack in self.queue:
474 473 for idx, group in enumerate(reversed(stack)):
475 474 if group.breakables:
476 475 del stack[idx]
477 476 group.want_break = True
478 477 return group
479 478 for group in stack:
480 479 group.want_break = True
481 480 del stack[:]
482 481
483 482 def remove(self, group):
484 483 try:
485 484 self.queue[group.depth].remove(group)
486 485 except ValueError:
487 486 pass
488 487
489 488 try:
490 489 _baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
491 490 except AttributeError: # Python 3
492 491 _baseclass_reprs = (object.__repr__,)
493 492
494 493
495 494 def _default_pprint(obj, p, cycle):
496 495 """
497 496 The default print function. Used if an object does not provide one and
498 497 it's none of the builtin objects.
499 498 """
500 499 klass = _safe_getattr(obj, '__class__', None) or type(obj)
501 500 if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs:
502 501 # A user-provided repr. Find newlines and replace them with p.break_()
503 502 _repr_pprint(obj, p, cycle)
504 503 return
505 504 p.begin_group(1, '<')
506 505 p.pretty(klass)
507 506 p.text(' at 0x%x' % id(obj))
508 507 if cycle:
509 508 p.text(' ...')
510 509 elif p.verbose:
511 510 first = True
512 511 for key in dir(obj):
513 512 if not key.startswith('_'):
514 513 try:
515 514 value = getattr(obj, key)
516 515 except AttributeError:
517 516 continue
518 517 if isinstance(value, types.MethodType):
519 518 continue
520 519 if not first:
521 520 p.text(',')
522 521 p.breakable()
523 522 p.text(key)
524 523 p.text('=')
525 524 step = len(key) + 1
526 525 p.indentation += step
527 526 p.pretty(value)
528 527 p.indentation -= step
529 528 first = False
530 529 p.end_group(1, '>')
531 530
532 531
533 532 def _seq_pprinter_factory(start, end, basetype):
534 533 """
535 534 Factory that returns a pprint function useful for sequences. Used by
536 535 the default pprint for tuples, dicts, and lists.
537 536 """
538 537 def inner(obj, p, cycle):
539 538 typ = type(obj)
540 539 if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
541 540 # If the subclass provides its own repr, use it instead.
542 541 return p.text(typ.__repr__(obj))
543 542
544 543 if cycle:
545 544 return p.text(start + '...' + end)
546 545 step = len(start)
547 546 p.begin_group(step, start)
548 547 for idx, x in p._enumerate(obj):
549 548 if idx:
550 549 p.text(',')
551 550 p.breakable()
552 551 p.pretty(x)
553 552 if len(obj) == 1 and type(obj) is tuple:
554 553 # Special case for 1-item tuples.
555 554 p.text(',')
556 555 p.end_group(step, end)
557 556 return inner
558 557
559 558
560 559 def _set_pprinter_factory(start, end, basetype):
561 560 """
562 561 Factory that returns a pprint function useful for sets and frozensets.
563 562 """
564 563 def inner(obj, p, cycle):
565 564 typ = type(obj)
566 565 if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
567 566 # If the subclass provides its own repr, use it instead.
568 567 return p.text(typ.__repr__(obj))
569 568
570 569 if cycle:
571 570 return p.text(start + '...' + end)
572 571 if len(obj) == 0:
573 572 # Special case.
574 573 p.text(basetype.__name__ + '()')
575 574 else:
576 575 step = len(start)
577 576 p.begin_group(step, start)
578 577 # Like dictionary keys, we will try to sort the items if there aren't too many
579 578 items = obj
580 579 if not (p.max_seq_length and len(obj) >= p.max_seq_length):
581 580 try:
582 581 items = sorted(obj)
583 582 except Exception:
584 583 # Sometimes the items don't sort.
585 584 pass
586 585 for idx, x in p._enumerate(items):
587 586 if idx:
588 587 p.text(',')
589 588 p.breakable()
590 589 p.pretty(x)
591 590 p.end_group(step, end)
592 591 return inner
593 592
594 593
595 594 def _dict_pprinter_factory(start, end, basetype=None):
596 595 """
597 596 Factory that returns a pprint function used by the default pprint of
598 597 dicts and dict proxies.
599 598 """
600 599 def inner(obj, p, cycle):
601 600 typ = type(obj)
602 601 if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__:
603 602 # If the subclass provides its own repr, use it instead.
604 603 return p.text(typ.__repr__(obj))
605 604
606 605 if cycle:
607 606 return p.text('{...}')
608 607 step = len(start)
609 608 p.begin_group(step, start)
610 609 keys = obj.keys()
611 610 # if dict isn't large enough to be truncated, sort keys before displaying
612 611 if not (p.max_seq_length and len(obj) >= p.max_seq_length):
613 612 try:
614 613 keys = sorted(keys)
615 614 except Exception:
616 615 # Sometimes the keys don't sort.
617 616 pass
618 617 for idx, key in p._enumerate(keys):
619 618 if idx:
620 619 p.text(',')
621 620 p.breakable()
622 621 p.pretty(key)
623 622 p.text(': ')
624 623 p.pretty(obj[key])
625 624 p.end_group(step, end)
626 625 return inner
627 626
628 627
629 628 def _super_pprint(obj, p, cycle):
630 629 """The pprint for the super type."""
631 630 p.begin_group(8, '<super: ')
632 631 p.pretty(obj.__thisclass__)
633 632 p.text(',')
634 633 p.breakable()
635 634 if PYPY: # In PyPy, super() objects don't have __self__ attributes
636 635 dself = obj.__repr__.__self__
637 636 p.pretty(None if dself is obj else dself)
638 637 else:
639 638 p.pretty(obj.__self__)
640 639 p.end_group(8, '>')
641 640
642 641
643 642 def _re_pattern_pprint(obj, p, cycle):
644 643 """The pprint function for regular expression patterns."""
645 644 p.text('re.compile(')
646 645 pattern = repr(obj.pattern)
647 646 if pattern[:1] in 'uU':
648 647 pattern = pattern[1:]
649 648 prefix = 'ur'
650 649 else:
651 650 prefix = 'r'
652 651 pattern = prefix + pattern.replace('\\\\', '\\')
653 652 p.text(pattern)
654 653 if obj.flags:
655 654 p.text(',')
656 655 p.breakable()
657 656 done_one = False
658 657 for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
659 658 'UNICODE', 'VERBOSE', 'DEBUG'):
660 659 if obj.flags & getattr(re, flag):
661 660 if done_one:
662 661 p.text('|')
663 662 p.text('re.' + flag)
664 663 done_one = True
665 664 p.text(')')
666 665
667 666
668 667 def _type_pprint(obj, p, cycle):
669 668 """The pprint for classes and types."""
670 669 # Heap allocated types might not have the module attribute,
671 670 # and others may set it to None.
672 671
673 672 # Checks for a __repr__ override in the metaclass. Can't compare the
674 673 # type(obj).__repr__ directly because in PyPy the representation function
675 674 # inherited from type isn't the same type.__repr__
676 675 if [m for m in _get_mro(type(obj)) if "__repr__" in vars(m)][:1] != [type]:
677 676 _repr_pprint(obj, p, cycle)
678 677 return
679 678
680 679 mod = _safe_getattr(obj, '__module__', None)
681 680 try:
682 681 name = obj.__qualname__
683 682 if not isinstance(name, string_types):
684 683 # This can happen if the type implements __qualname__ as a property
685 684 # or other descriptor in Python 2.
686 685 raise Exception("Try __name__")
687 686 except Exception:
688 687 name = obj.__name__
689 688 if not isinstance(name, string_types):
690 689 name = '<unknown type>'
691 690
692 691 if mod in (None, '__builtin__', 'builtins', 'exceptions'):
693 692 p.text(name)
694 693 else:
695 694 p.text(mod + '.' + name)
696 695
697 696
698 697 def _repr_pprint(obj, p, cycle):
699 698 """A pprint that just redirects to the normal repr function."""
700 699 # Find newlines and replace them with p.break_()
701 700 output = repr(obj)
702 701 for idx,output_line in enumerate(output.splitlines()):
703 702 if idx:
704 703 p.break_()
705 704 p.text(output_line)
706 705
707 706
708 707 def _function_pprint(obj, p, cycle):
709 708 """Base pprint for all functions and builtin functions."""
710 709 name = _safe_getattr(obj, '__qualname__', obj.__name__)
711 710 mod = obj.__module__
712 711 if mod and mod not in ('__builtin__', 'builtins', 'exceptions'):
713 712 name = mod + '.' + name
714 713 p.text('<function %s>' % name)
715 714
716 715
717 716 def _exception_pprint(obj, p, cycle):
718 717 """Base pprint for all exceptions."""
719 718 name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__)
720 719 if obj.__class__.__module__ not in ('exceptions', 'builtins'):
721 720 name = '%s.%s' % (obj.__class__.__module__, name)
722 721 step = len(name) + 1
723 722 p.begin_group(step, name + '(')
724 723 for idx, arg in enumerate(getattr(obj, 'args', ())):
725 724 if idx:
726 725 p.text(',')
727 726 p.breakable()
728 727 p.pretty(arg)
729 728 p.end_group(step, ')')
730 729
731 730
732 731 #: the exception base
733 732 try:
734 733 _exception_base = BaseException
735 734 except NameError:
736 735 _exception_base = Exception
737 736
738 737
739 738 #: printers for builtin types
740 739 _type_pprinters = {
741 740 int: _repr_pprint,
742 741 float: _repr_pprint,
743 742 str: _repr_pprint,
744 743 tuple: _seq_pprinter_factory('(', ')', tuple),
745 744 list: _seq_pprinter_factory('[', ']', list),
746 745 dict: _dict_pprinter_factory('{', '}', dict),
747 746
748 747 set: _set_pprinter_factory('{', '}', set),
749 748 frozenset: _set_pprinter_factory('frozenset({', '})', frozenset),
750 749 super: _super_pprint,
751 750 _re_pattern_type: _re_pattern_pprint,
752 751 type: _type_pprint,
753 752 types.FunctionType: _function_pprint,
754 753 types.BuiltinFunctionType: _function_pprint,
755 754 types.MethodType: _repr_pprint,
756 755
757 756 datetime.datetime: _repr_pprint,
758 757 datetime.timedelta: _repr_pprint,
759 758 _exception_base: _exception_pprint
760 759 }
761 760
762 761 try:
763 762 # In PyPy, types.DictProxyType is dict, setting the dictproxy printer
764 763 # using dict.setdefault avoids overwritting the dict printer
765 764 _type_pprinters.setdefault(types.DictProxyType,
766 765 _dict_pprinter_factory('dict_proxy({', '})'))
767 766 _type_pprinters[types.ClassType] = _type_pprint
768 767 _type_pprinters[types.SliceType] = _repr_pprint
769 768 except AttributeError: # Python 3
770 769 _type_pprinters[types.MappingProxyType] = \
771 770 _dict_pprinter_factory('mappingproxy({', '})')
772 771 _type_pprinters[slice] = _repr_pprint
773 772
774 773 try:
775 774 _type_pprinters[xrange] = _repr_pprint
776 775 _type_pprinters[long] = _repr_pprint
777 776 _type_pprinters[unicode] = _repr_pprint
778 777 except NameError:
779 778 _type_pprinters[range] = _repr_pprint
780 779 _type_pprinters[bytes] = _repr_pprint
781 780
782 781 #: printers for types specified by name
783 782 _deferred_type_pprinters = {
784 783 }
785 784
786 785 def for_type(typ, func):
787 786 """
788 787 Add a pretty printer for a given type.
789 788 """
790 789 oldfunc = _type_pprinters.get(typ, None)
791 790 if func is not None:
792 791 # To support easy restoration of old pprinters, we need to ignore Nones.
793 792 _type_pprinters[typ] = func
794 793 return oldfunc
795 794
796 795 def for_type_by_name(type_module, type_name, func):
797 796 """
798 797 Add a pretty printer for a type specified by the module and name of a type
799 798 rather than the type object itself.
800 799 """
801 800 key = (type_module, type_name)
802 801 oldfunc = _deferred_type_pprinters.get(key, None)
803 802 if func is not None:
804 803 # To support easy restoration of old pprinters, we need to ignore Nones.
805 804 _deferred_type_pprinters[key] = func
806 805 return oldfunc
807 806
808 807
809 808 #: printers for the default singletons
810 809 _singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
811 810 NotImplemented]), _repr_pprint)
812 811
813 812
814 813 def _defaultdict_pprint(obj, p, cycle):
815 814 name = obj.__class__.__name__
816 815 with p.group(len(name) + 1, name + '(', ')'):
817 816 if cycle:
818 817 p.text('...')
819 818 else:
820 819 p.pretty(obj.default_factory)
821 820 p.text(',')
822 821 p.breakable()
823 822 p.pretty(dict(obj))
824 823
825 824 def _ordereddict_pprint(obj, p, cycle):
826 825 name = obj.__class__.__name__
827 826 with p.group(len(name) + 1, name + '(', ')'):
828 827 if cycle:
829 828 p.text('...')
830 829 elif len(obj):
831 830 p.pretty(list(obj.items()))
832 831
833 832 def _deque_pprint(obj, p, cycle):
834 833 name = obj.__class__.__name__
835 834 with p.group(len(name) + 1, name + '(', ')'):
836 835 if cycle:
837 836 p.text('...')
838 837 else:
839 838 p.pretty(list(obj))
840 839
841 840
842 841 def _counter_pprint(obj, p, cycle):
843 842 name = obj.__class__.__name__
844 843 with p.group(len(name) + 1, name + '(', ')'):
845 844 if cycle:
846 845 p.text('...')
847 846 elif len(obj):
848 847 p.pretty(dict(obj))
849 848
850 849 for_type_by_name('collections', 'defaultdict', _defaultdict_pprint)
851 850 for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint)
852 851 for_type_by_name('collections', 'deque', _deque_pprint)
853 852 for_type_by_name('collections', 'Counter', _counter_pprint)
854 853
855 854 if __name__ == '__main__':
856 855 from random import randrange
857 856 class Foo(object):
858 857 def __init__(self):
859 858 self.foo = 1
860 859 self.bar = re.compile(r'\s+')
861 860 self.blub = dict.fromkeys(range(30), randrange(1, 40))
862 861 self.hehe = 23424.234234
863 862 self.list = ["blub", "blah", self]
864 863
865 864 def get_foo(self):
866 865 print("foo")
867 866
868 867 pprint(Foo(), verbose=True)
@@ -1,89 +1,88 b''
1 1 """Tests for pylab tools module.
2 2 """
3 3 #-----------------------------------------------------------------------------
4 4 # Copyright (c) 2011, the IPython Development Team.
5 5 #
6 6 # Distributed under the terms of the Modified BSD License.
7 7 #
8 8 # The full license is in the file COPYING.txt, distributed with this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 from __future__ import print_function
15 14
16 15 # Stdlib imports
17 16 import time
18 17
19 18 # Third-party imports
20 19 import nose.tools as nt
21 20
22 21 # Our own imports
23 22 from IPython.lib import backgroundjobs as bg
24 23
25 24 #-----------------------------------------------------------------------------
26 25 # Globals and constants
27 26 #-----------------------------------------------------------------------------
28 27 t_short = 0.0001 # very short interval to wait on jobs
29 28
30 29 #-----------------------------------------------------------------------------
31 30 # Local utilities
32 31 #-----------------------------------------------------------------------------
33 32 def sleeper(interval=t_short, *a, **kw):
34 33 args = dict(interval=interval,
35 34 other_args=a,
36 35 kw_args=kw)
37 36 time.sleep(interval)
38 37 return args
39 38
40 39 def crasher(interval=t_short, *a, **kw):
41 40 time.sleep(interval)
42 41 raise Exception("Dead job with interval %s" % interval)
43 42
44 43 #-----------------------------------------------------------------------------
45 44 # Classes and functions
46 45 #-----------------------------------------------------------------------------
47 46
48 47 def test_result():
49 48 """Test job submission and result retrieval"""
50 49 jobs = bg.BackgroundJobManager()
51 50 j = jobs.new(sleeper)
52 51 j.join()
53 52 nt.assert_equal(j.result['interval'], t_short)
54 53
55 54
56 55 def test_flush():
57 56 """Test job control"""
58 57 jobs = bg.BackgroundJobManager()
59 58 j = jobs.new(sleeper)
60 59 j.join()
61 60 nt.assert_equal(len(jobs.completed), 1)
62 61 nt.assert_equal(len(jobs.dead), 0)
63 62 jobs.flush()
64 63 nt.assert_equal(len(jobs.completed), 0)
65 64
66 65
67 66 def test_dead():
68 67 """Test control of dead jobs"""
69 68 jobs = bg.BackgroundJobManager()
70 69 j = jobs.new(crasher)
71 70 j.join()
72 71 nt.assert_equal(len(jobs.completed), 0)
73 72 nt.assert_equal(len(jobs.dead), 1)
74 73 jobs.flush()
75 74 nt.assert_equal(len(jobs.dead), 0)
76 75
77 76
78 77 def test_longer():
79 78 """Test control of longer-running jobs"""
80 79 jobs = bg.BackgroundJobManager()
81 80 # Sleep for long enough for the following two checks to still report the
82 81 # job as running, but not so long that it makes the test suite noticeably
83 82 # slower.
84 83 j = jobs.new(sleeper, 0.1)
85 84 nt.assert_equal(len(jobs.running), 1)
86 85 nt.assert_equal(len(jobs.completed), 0)
87 86 j.join()
88 87 nt.assert_equal(len(jobs.running), 0)
89 88 nt.assert_equal(len(jobs.completed), 1)
@@ -1,178 +1,177 b''
1 1 """Tests for IPython.lib.display.
2 2
3 3 """
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (c) 2012, the IPython Development Team.
6 6 #
7 7 # Distributed under the terms of the Modified BSD License.
8 8 #
9 9 # The full license is in the file COPYING.txt, distributed with this software.
10 10 #-----------------------------------------------------------------------------
11 11
12 12 #-----------------------------------------------------------------------------
13 13 # Imports
14 14 #-----------------------------------------------------------------------------
15 from __future__ import print_function
16 15 from tempfile import NamedTemporaryFile, mkdtemp
17 16 from os.path import split, join as pjoin, dirname
18 17
19 18 # Third-party imports
20 19 import nose.tools as nt
21 20
22 21 # Our own imports
23 22 from IPython.lib import display
24 23 from IPython.testing.decorators import skipif_not_numpy
25 24
26 25 #-----------------------------------------------------------------------------
27 26 # Classes and functions
28 27 #-----------------------------------------------------------------------------
29 28
30 29 #--------------------------
31 30 # FileLink tests
32 31 #--------------------------
33 32
34 33 def test_instantiation_FileLink():
35 34 """FileLink: Test class can be instantiated"""
36 35 fl = display.FileLink('example.txt')
37 36
38 37 def test_warning_on_non_existant_path_FileLink():
39 38 """FileLink: Calling _repr_html_ on non-existant files returns a warning
40 39 """
41 40 fl = display.FileLink('example.txt')
42 41 nt.assert_true(fl._repr_html_().startswith('Path (<tt>example.txt</tt>)'))
43 42
44 43 def test_existing_path_FileLink():
45 44 """FileLink: Calling _repr_html_ functions as expected on existing filepath
46 45 """
47 46 tf = NamedTemporaryFile()
48 47 fl = display.FileLink(tf.name)
49 48 actual = fl._repr_html_()
50 49 expected = "<a href='%s' target='_blank'>%s</a><br>" % (tf.name,tf.name)
51 50 nt.assert_equal(actual,expected)
52 51
53 52 def test_existing_path_FileLink_repr():
54 53 """FileLink: Calling repr() functions as expected on existing filepath
55 54 """
56 55 tf = NamedTemporaryFile()
57 56 fl = display.FileLink(tf.name)
58 57 actual = repr(fl)
59 58 expected = tf.name
60 59 nt.assert_equal(actual,expected)
61 60
62 61 def test_error_on_directory_to_FileLink():
63 62 """FileLink: Raises error when passed directory
64 63 """
65 64 td = mkdtemp()
66 65 nt.assert_raises(ValueError,display.FileLink,td)
67 66
68 67 #--------------------------
69 68 # FileLinks tests
70 69 #--------------------------
71 70
72 71 def test_instantiation_FileLinks():
73 72 """FileLinks: Test class can be instantiated
74 73 """
75 74 fls = display.FileLinks('example')
76 75
77 76 def test_warning_on_non_existant_path_FileLinks():
78 77 """FileLinks: Calling _repr_html_ on non-existant files returns a warning
79 78 """
80 79 fls = display.FileLinks('example')
81 80 nt.assert_true(fls._repr_html_().startswith('Path (<tt>example</tt>)'))
82 81
83 82 def test_existing_path_FileLinks():
84 83 """FileLinks: Calling _repr_html_ functions as expected on existing dir
85 84 """
86 85 td = mkdtemp()
87 86 tf1 = NamedTemporaryFile(dir=td)
88 87 tf2 = NamedTemporaryFile(dir=td)
89 88 fl = display.FileLinks(td)
90 89 actual = fl._repr_html_()
91 90 actual = actual.split('\n')
92 91 actual.sort()
93 92 # the links should always have forward slashes, even on windows, so replace
94 93 # backslashes with forward slashes here
95 94 expected = ["%s/<br>" % td,
96 95 "&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" %\
97 96 (tf2.name.replace("\\","/"),split(tf2.name)[1]),
98 97 "&nbsp;&nbsp;<a href='%s' target='_blank'>%s</a><br>" %\
99 98 (tf1.name.replace("\\","/"),split(tf1.name)[1])]
100 99 expected.sort()
101 100 # We compare the sorted list of links here as that's more reliable
102 101 nt.assert_equal(actual,expected)
103 102
104 103 def test_existing_path_FileLinks_alt_formatter():
105 104 """FileLinks: Calling _repr_html_ functions as expected w/ an alt formatter
106 105 """
107 106 td = mkdtemp()
108 107 tf1 = NamedTemporaryFile(dir=td)
109 108 tf2 = NamedTemporaryFile(dir=td)
110 109 def fake_formatter(dirname,fnames,included_suffixes):
111 110 return ["hello","world"]
112 111 fl = display.FileLinks(td,notebook_display_formatter=fake_formatter)
113 112 actual = fl._repr_html_()
114 113 actual = actual.split('\n')
115 114 actual.sort()
116 115 expected = ["hello","world"]
117 116 expected.sort()
118 117 # We compare the sorted list of links here as that's more reliable
119 118 nt.assert_equal(actual,expected)
120 119
121 120 def test_existing_path_FileLinks_repr():
122 121 """FileLinks: Calling repr() functions as expected on existing directory """
123 122 td = mkdtemp()
124 123 tf1 = NamedTemporaryFile(dir=td)
125 124 tf2 = NamedTemporaryFile(dir=td)
126 125 fl = display.FileLinks(td)
127 126 actual = repr(fl)
128 127 actual = actual.split('\n')
129 128 actual.sort()
130 129 expected = ['%s/' % td, ' %s' % split(tf1.name)[1],' %s' % split(tf2.name)[1]]
131 130 expected.sort()
132 131 # We compare the sorted list of links here as that's more reliable
133 132 nt.assert_equal(actual,expected)
134 133
135 134 def test_existing_path_FileLinks_repr_alt_formatter():
136 135 """FileLinks: Calling repr() functions as expected w/ alt formatter
137 136 """
138 137 td = mkdtemp()
139 138 tf1 = NamedTemporaryFile(dir=td)
140 139 tf2 = NamedTemporaryFile(dir=td)
141 140 def fake_formatter(dirname,fnames,included_suffixes):
142 141 return ["hello","world"]
143 142 fl = display.FileLinks(td,terminal_display_formatter=fake_formatter)
144 143 actual = repr(fl)
145 144 actual = actual.split('\n')
146 145 actual.sort()
147 146 expected = ["hello","world"]
148 147 expected.sort()
149 148 # We compare the sorted list of links here as that's more reliable
150 149 nt.assert_equal(actual,expected)
151 150
152 151 def test_error_on_file_to_FileLinks():
153 152 """FileLinks: Raises error when passed file
154 153 """
155 154 td = mkdtemp()
156 155 tf1 = NamedTemporaryFile(dir=td)
157 156 nt.assert_raises(ValueError,display.FileLinks,tf1.name)
158 157
159 158 def test_recursive_FileLinks():
160 159 """FileLinks: Does not recurse when recursive=False
161 160 """
162 161 td = mkdtemp()
163 162 tf = NamedTemporaryFile(dir=td)
164 163 subtd = mkdtemp(dir=td)
165 164 subtf = NamedTemporaryFile(dir=subtd)
166 165 fl = display.FileLinks(td)
167 166 actual = str(fl)
168 167 actual = actual.split('\n')
169 168 nt.assert_equal(len(actual), 4, actual)
170 169 fl = display.FileLinks(td, recursive=False)
171 170 actual = str(fl)
172 171 actual = actual.split('\n')
173 172 nt.assert_equal(len(actual), 2, actual)
174 173
175 174 @skipif_not_numpy
176 175 def test_audio_from_file():
177 176 path = pjoin(dirname(__file__), 'test.wav')
178 177 display.Audio(filename=path)
@@ -1,487 +1,403 b''
1 1 # coding: utf-8
2 2 """Tests for IPython.lib.pretty."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function
8 7
9 8 from collections import Counter, defaultdict, deque, OrderedDict
10 9 import types, string
11 10
12 11 import nose.tools as nt
13 12
14 13 from IPython.lib import pretty
15 from IPython.testing.decorators import (skip_without, py2_only, py3_only)
16
17 from IPython.utils.py3compat import PY3, unicode_to_str
14 from IPython.testing.decorators import skip_without
18 15
19 if PY3:
20 from io import StringIO
21 else:
22 from StringIO import StringIO
16 from io import StringIO
23 17
24 18
25 19 class MyList(object):
26 20 def __init__(self, content):
27 21 self.content = content
28 22 def _repr_pretty_(self, p, cycle):
29 23 if cycle:
30 24 p.text("MyList(...)")
31 25 else:
32 26 with p.group(3, "MyList(", ")"):
33 27 for (i, child) in enumerate(self.content):
34 28 if i:
35 29 p.text(",")
36 30 p.breakable()
37 31 else:
38 32 p.breakable("")
39 33 p.pretty(child)
40 34
41 35
42 36 class MyDict(dict):
43 37 def _repr_pretty_(self, p, cycle):
44 38 p.text("MyDict(...)")
45 39
46 40 class MyObj(object):
47 41 def somemethod(self):
48 42 pass
49 43
50 44
51 45 class Dummy1(object):
52 46 def _repr_pretty_(self, p, cycle):
53 47 p.text("Dummy1(...)")
54 48
55 49 class Dummy2(Dummy1):
56 50 _repr_pretty_ = None
57 51
58 52 class NoModule(object):
59 53 pass
60 54
61 55 NoModule.__module__ = None
62 56
63 57 class Breaking(object):
64 58 def _repr_pretty_(self, p, cycle):
65 59 with p.group(4,"TG: ",":"):
66 60 p.text("Breaking(")
67 61 p.break_()
68 62 p.text(")")
69 63
70 64 class BreakingRepr(object):
71 65 def __repr__(self):
72 66 return "Breaking(\n)"
73 67
74 68 class BreakingReprParent(object):
75 69 def _repr_pretty_(self, p, cycle):
76 70 with p.group(4,"TG: ",":"):
77 71 p.pretty(BreakingRepr())
78 72
79 73 class BadRepr(object):
80 74
81 75 def __repr__(self):
82 76 return 1/0
83 77
84 78
85 79 def test_indentation():
86 80 """Test correct indentation in groups"""
87 81 count = 40
88 82 gotoutput = pretty.pretty(MyList(range(count)))
89 83 expectedoutput = "MyList(\n" + ",\n".join(" %d" % i for i in range(count)) + ")"
90 84
91 85 nt.assert_equal(gotoutput, expectedoutput)
92 86
93 87
94 88 def test_dispatch():
95 89 """
96 90 Test correct dispatching: The _repr_pretty_ method for MyDict
97 91 must be found before the registered printer for dict.
98 92 """
99 93 gotoutput = pretty.pretty(MyDict())
100 94 expectedoutput = "MyDict(...)"
101 95
102 96 nt.assert_equal(gotoutput, expectedoutput)
103 97
104 98
105 99 def test_callability_checking():
106 100 """
107 101 Test that the _repr_pretty_ method is tested for callability and skipped if
108 102 not.
109 103 """
110 104 gotoutput = pretty.pretty(Dummy2())
111 105 expectedoutput = "Dummy1(...)"
112 106
113 107 nt.assert_equal(gotoutput, expectedoutput)
114 108
115 109
116 110 def test_sets():
117 111 """
118 112 Test that set and frozenset use Python 3 formatting.
119 113 """
120 114 objects = [set(), frozenset(), set([1]), frozenset([1]), set([1, 2]),
121 115 frozenset([1, 2]), set([-1, -2, -3])]
122 116 expected = ['set()', 'frozenset()', '{1}', 'frozenset({1})', '{1, 2}',
123 117 'frozenset({1, 2})', '{-3, -2, -1}']
124 118 for obj, expected_output in zip(objects, expected):
125 119 got_output = pretty.pretty(obj)
126 120 yield nt.assert_equal, got_output, expected_output
127 121
128 122
129 123 @skip_without('xxlimited')
130 124 def test_pprint_heap_allocated_type():
131 125 """
132 126 Test that pprint works for heap allocated types.
133 127 """
134 128 import xxlimited
135 129 output = pretty.pretty(xxlimited.Null)
136 130 nt.assert_equal(output, 'xxlimited.Null')
137 131
138 132 def test_pprint_nomod():
139 133 """
140 134 Test that pprint works for classes with no __module__.
141 135 """
142 136 output = pretty.pretty(NoModule)
143 137 nt.assert_equal(output, 'NoModule')
144 138
145 139 def test_pprint_break():
146 140 """
147 141 Test that p.break_ produces expected output
148 142 """
149 143 output = pretty.pretty(Breaking())
150 144 expected = "TG: Breaking(\n ):"
151 145 nt.assert_equal(output, expected)
152 146
153 147 def test_pprint_break_repr():
154 148 """
155 149 Test that p.break_ is used in repr
156 150 """
157 151 output = pretty.pretty(BreakingReprParent())
158 152 expected = "TG: Breaking(\n ):"
159 153 nt.assert_equal(output, expected)
160 154
161 155 def test_bad_repr():
162 156 """Don't catch bad repr errors"""
163 157 with nt.assert_raises(ZeroDivisionError):
164 158 pretty.pretty(BadRepr())
165 159
166 160 class BadException(Exception):
167 161 def __str__(self):
168 162 return -1
169 163
170 164 class ReallyBadRepr(object):
171 165 __module__ = 1
172 166 @property
173 167 def __class__(self):
174 168 raise ValueError("I am horrible")
175 169
176 170 def __repr__(self):
177 171 raise BadException()
178 172
179 173 def test_really_bad_repr():
180 174 with nt.assert_raises(BadException):
181 175 pretty.pretty(ReallyBadRepr())
182 176
183 177
184 178 class SA(object):
185 179 pass
186 180
187 181 class SB(SA):
188 182 pass
189 183
190 184 def test_super_repr():
191 185 # "<super: module_name.SA, None>"
192 186 output = pretty.pretty(super(SA))
193 187 nt.assert_regexp_matches(output, r"<super: \S+.SA, None>")
194 188
195 189 # "<super: module_name.SA, <module_name.SB at 0x...>>"
196 190 sb = SB()
197 191 output = pretty.pretty(super(SA, sb))
198 192 nt.assert_regexp_matches(output, r"<super: \S+.SA,\s+<\S+.SB at 0x\S+>>")
199 193
200 194
201 195 def test_long_list():
202 196 lis = list(range(10000))
203 197 p = pretty.pretty(lis)
204 198 last2 = p.rsplit('\n', 2)[-2:]
205 199 nt.assert_equal(last2, [' 999,', ' ...]'])
206 200
207 201 def test_long_set():
208 202 s = set(range(10000))
209 203 p = pretty.pretty(s)
210 204 last2 = p.rsplit('\n', 2)[-2:]
211 205 nt.assert_equal(last2, [' 999,', ' ...}'])
212 206
213 207 def test_long_tuple():
214 208 tup = tuple(range(10000))
215 209 p = pretty.pretty(tup)
216 210 last2 = p.rsplit('\n', 2)[-2:]
217 211 nt.assert_equal(last2, [' 999,', ' ...)'])
218 212
219 213 def test_long_dict():
220 214 d = { n:n for n in range(10000) }
221 215 p = pretty.pretty(d)
222 216 last2 = p.rsplit('\n', 2)[-2:]
223 217 nt.assert_equal(last2, [' 999: 999,', ' ...}'])
224 218
225 219 def test_unbound_method():
226 220 output = pretty.pretty(MyObj.somemethod)
227 221 nt.assert_in('MyObj.somemethod', output)
228 222
229 223
230 224 class MetaClass(type):
231 225 def __new__(cls, name):
232 226 return type.__new__(cls, name, (object,), {'name': name})
233 227
234 228 def __repr__(self):
235 229 return "[CUSTOM REPR FOR CLASS %s]" % self.name
236 230
237 231
238 232 ClassWithMeta = MetaClass('ClassWithMeta')
239 233
240 234
241 235 def test_metaclass_repr():
242 236 output = pretty.pretty(ClassWithMeta)
243 237 nt.assert_equal(output, "[CUSTOM REPR FOR CLASS ClassWithMeta]")
244 238
245 239
246 240 def test_unicode_repr():
247 241 u = u"üniçodé"
248 ustr = unicode_to_str(u)
242 ustr = u
249 243
250 244 class C(object):
251 245 def __repr__(self):
252 246 return ustr
253 247
254 248 c = C()
255 249 p = pretty.pretty(c)
256 250 nt.assert_equal(p, u)
257 251 p = pretty.pretty([c])
258 252 nt.assert_equal(p, u'[%s]' % u)
259 253
260 254
261 255 def test_basic_class():
262 256 def type_pprint_wrapper(obj, p, cycle):
263 257 if obj is MyObj:
264 258 type_pprint_wrapper.called = True
265 259 return pretty._type_pprint(obj, p, cycle)
266 260 type_pprint_wrapper.called = False
267 261
268 262 stream = StringIO()
269 263 printer = pretty.RepresentationPrinter(stream)
270 264 printer.type_pprinters[type] = type_pprint_wrapper
271 265 printer.pretty(MyObj)
272 266 printer.flush()
273 267 output = stream.getvalue()
274 268
275 269 nt.assert_equal(output, '%s.MyObj' % __name__)
276 270 nt.assert_true(type_pprint_wrapper.called)
277 271
278 272
279 # This is only run on Python 2 because in Python 3 the language prevents you
280 # from setting a non-unicode value for __qualname__ on a metaclass, and it
281 # doesn't respect the descriptor protocol if you subclass unicode and implement
282 # __get__.
283 @py2_only
284 def test_fallback_to__name__on_type():
285 # Test that we correctly repr types that have non-string values for
286 # __qualname__ by falling back to __name__
287
288 class Type(object):
289 __qualname__ = 5
290
291 # Test repring of the type.
292 stream = StringIO()
293 printer = pretty.RepresentationPrinter(stream)
294
295 printer.pretty(Type)
296 printer.flush()
297 output = stream.getvalue()
298
299 # If __qualname__ is malformed, we should fall back to __name__.
300 expected = '.'.join([__name__, Type.__name__])
301 nt.assert_equal(output, expected)
302
303 # Clear stream buffer.
304 stream.buf = ''
305
306 # Test repring of an instance of the type.
307 instance = Type()
308 printer.pretty(instance)
309 printer.flush()
310 output = stream.getvalue()
311
312 # Should look like:
313 # <IPython.lib.tests.test_pretty.Type at 0x7f7658ae07d0>
314 prefix = '<' + '.'.join([__name__, Type.__name__]) + ' at 0x'
315 nt.assert_true(output.startswith(prefix))
316
317
318 @py2_only
319 def test_fail_gracefully_on_bogus__qualname__and__name__():
320 # Test that we correctly repr types that have non-string values for both
321 # __qualname__ and __name__
322
323 class Meta(type):
324 __name__ = 5
325
326 class Type(object):
327 __metaclass__ = Meta
328 __qualname__ = 5
329
330 stream = StringIO()
331 printer = pretty.RepresentationPrinter(stream)
332
333 printer.pretty(Type)
334 printer.flush()
335 output = stream.getvalue()
336
337 # If we can't find __name__ or __qualname__ just use a sentinel string.
338 expected = '.'.join([__name__, '<unknown type>'])
339 nt.assert_equal(output, expected)
340
341 # Clear stream buffer.
342 stream.buf = ''
343
344 # Test repring of an instance of the type.
345 instance = Type()
346 printer.pretty(instance)
347 printer.flush()
348 output = stream.getvalue()
349
350 # Should look like:
351 # <IPython.lib.tests.test_pretty.<unknown type> at 0x7f7658ae07d0>
352 prefix = '<' + '.'.join([__name__, '<unknown type>']) + ' at 0x'
353 nt.assert_true(output.startswith(prefix))
354
355
356 273 def test_collections_defaultdict():
357 274 # Create defaultdicts with cycles
358 275 a = defaultdict()
359 276 a.default_factory = a
360 277 b = defaultdict(list)
361 278 b['key'] = b
362 279
363 280 # Dictionary order cannot be relied on, test against single keys.
364 281 cases = [
365 282 (defaultdict(list), 'defaultdict(list, {})'),
366 283 (defaultdict(list, {'key': '-' * 50}),
367 284 "defaultdict(list,\n"
368 285 " {'key': '--------------------------------------------------'})"),
369 286 (a, 'defaultdict(defaultdict(...), {})'),
370 287 (b, "defaultdict(list, {'key': defaultdict(...)})"),
371 288 ]
372 289 for obj, expected in cases:
373 290 nt.assert_equal(pretty.pretty(obj), expected)
374 291
375 292
376 293 def test_collections_ordereddict():
377 294 # Create OrderedDict with cycle
378 295 a = OrderedDict()
379 296 a['key'] = a
380 297
381 298 cases = [
382 299 (OrderedDict(), 'OrderedDict()'),
383 300 (OrderedDict((i, i) for i in range(1000, 1010)),
384 301 'OrderedDict([(1000, 1000),\n'
385 302 ' (1001, 1001),\n'
386 303 ' (1002, 1002),\n'
387 304 ' (1003, 1003),\n'
388 305 ' (1004, 1004),\n'
389 306 ' (1005, 1005),\n'
390 307 ' (1006, 1006),\n'
391 308 ' (1007, 1007),\n'
392 309 ' (1008, 1008),\n'
393 310 ' (1009, 1009)])'),
394 311 (a, "OrderedDict([('key', OrderedDict(...))])"),
395 312 ]
396 313 for obj, expected in cases:
397 314 nt.assert_equal(pretty.pretty(obj), expected)
398 315
399 316
400 317 def test_collections_deque():
401 318 # Create deque with cycle
402 319 a = deque()
403 320 a.append(a)
404 321
405 322 cases = [
406 323 (deque(), 'deque([])'),
407 324 (deque(i for i in range(1000, 1020)),
408 325 'deque([1000,\n'
409 326 ' 1001,\n'
410 327 ' 1002,\n'
411 328 ' 1003,\n'
412 329 ' 1004,\n'
413 330 ' 1005,\n'
414 331 ' 1006,\n'
415 332 ' 1007,\n'
416 333 ' 1008,\n'
417 334 ' 1009,\n'
418 335 ' 1010,\n'
419 336 ' 1011,\n'
420 337 ' 1012,\n'
421 338 ' 1013,\n'
422 339 ' 1014,\n'
423 340 ' 1015,\n'
424 341 ' 1016,\n'
425 342 ' 1017,\n'
426 343 ' 1018,\n'
427 344 ' 1019])'),
428 345 (a, 'deque([deque(...)])'),
429 346 ]
430 347 for obj, expected in cases:
431 348 nt.assert_equal(pretty.pretty(obj), expected)
432 349
433 350 def test_collections_counter():
434 351 class MyCounter(Counter):
435 352 pass
436 353 cases = [
437 354 (Counter(), 'Counter()'),
438 355 (Counter(a=1), "Counter({'a': 1})"),
439 356 (MyCounter(a=1), "MyCounter({'a': 1})"),
440 357 ]
441 358 for obj, expected in cases:
442 359 nt.assert_equal(pretty.pretty(obj), expected)
443 360
444 @py3_only
445 361 def test_mappingproxy():
446 362 MP = types.MappingProxyType
447 363 underlying_dict = {}
448 364 mp_recursive = MP(underlying_dict)
449 365 underlying_dict[2] = mp_recursive
450 366 underlying_dict[3] = underlying_dict
451 367
452 368 cases = [
453 369 (MP({}), "mappingproxy({})"),
454 370 (MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"),
455 371 (MP({k: k.upper() for k in string.ascii_lowercase}),
456 372 "mappingproxy({'a': 'A',\n"
457 373 " 'b': 'B',\n"
458 374 " 'c': 'C',\n"
459 375 " 'd': 'D',\n"
460 376 " 'e': 'E',\n"
461 377 " 'f': 'F',\n"
462 378 " 'g': 'G',\n"
463 379 " 'h': 'H',\n"
464 380 " 'i': 'I',\n"
465 381 " 'j': 'J',\n"
466 382 " 'k': 'K',\n"
467 383 " 'l': 'L',\n"
468 384 " 'm': 'M',\n"
469 385 " 'n': 'N',\n"
470 386 " 'o': 'O',\n"
471 387 " 'p': 'P',\n"
472 388 " 'q': 'Q',\n"
473 389 " 'r': 'R',\n"
474 390 " 's': 'S',\n"
475 391 " 't': 'T',\n"
476 392 " 'u': 'U',\n"
477 393 " 'v': 'V',\n"
478 394 " 'w': 'W',\n"
479 395 " 'x': 'X',\n"
480 396 " 'y': 'Y',\n"
481 397 " 'z': 'Z'})"),
482 398 (mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"),
483 399 (underlying_dict,
484 400 "{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"),
485 401 ]
486 402 for obj, expected in cases:
487 403 nt.assert_equal(pretty.pretty(obj), expected)
@@ -1,1184 +1,1183 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Sphinx directive to support embedded IPython code.
4 4
5 5 This directive allows pasting of entire interactive IPython sessions, prompts
6 6 and all, and their code will actually get re-executed at doc build time, with
7 7 all prompts renumbered sequentially. It also allows you to input code as a pure
8 8 python input by giving the argument python to the directive. The output looks
9 9 like an interactive ipython section.
10 10
11 11 To enable this directive, simply list it in your Sphinx ``conf.py`` file
12 12 (making sure the directory where you placed it is visible to sphinx, as is
13 13 needed for all Sphinx directives). For example, to enable syntax highlighting
14 14 and the IPython directive::
15 15
16 16 extensions = ['IPython.sphinxext.ipython_console_highlighting',
17 17 'IPython.sphinxext.ipython_directive']
18 18
19 19 The IPython directive outputs code-blocks with the language 'ipython'. So
20 20 if you do not have the syntax highlighting extension enabled as well, then
21 21 all rendered code-blocks will be uncolored. By default this directive assumes
22 22 that your prompts are unchanged IPython ones, but this can be customized.
23 23 The configurable options that can be placed in conf.py are:
24 24
25 25 ipython_savefig_dir:
26 26 The directory in which to save the figures. This is relative to the
27 27 Sphinx source directory. The default is `html_static_path`.
28 28 ipython_rgxin:
29 29 The compiled regular expression to denote the start of IPython input
30 30 lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
31 31 shouldn't need to change this.
32 32 ipython_rgxout:
33 33 The compiled regular expression to denote the start of IPython output
34 34 lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
35 35 shouldn't need to change this.
36 36 ipython_promptin:
37 37 The string to represent the IPython input prompt in the generated ReST.
38 38 The default is 'In [%d]:'. This expects that the line numbers are used
39 39 in the prompt.
40 40 ipython_promptout:
41 41 The string to represent the IPython prompt in the generated ReST. The
42 42 default is 'Out [%d]:'. This expects that the line numbers are used
43 43 in the prompt.
44 44 ipython_mplbackend:
45 45 The string which specifies if the embedded Sphinx shell should import
46 46 Matplotlib and set the backend. The value specifies a backend that is
47 47 passed to `matplotlib.use()` before any lines in `ipython_execlines` are
48 48 executed. If not specified in conf.py, then the default value of 'agg' is
49 49 used. To use the IPython directive without matplotlib as a dependency, set
50 50 the value to `None`. It may end up that matplotlib is still imported
51 51 if the user specifies so in `ipython_execlines` or makes use of the
52 52 @savefig pseudo decorator.
53 53 ipython_execlines:
54 54 A list of strings to be exec'd in the embedded Sphinx shell. Typical
55 55 usage is to make certain packages always available. Set this to an empty
56 56 list if you wish to have no imports always available. If specified in
57 57 conf.py as `None`, then it has the effect of making no imports available.
58 58 If omitted from conf.py altogether, then the default value of
59 59 ['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
60 60 ipython_holdcount
61 61 When the @suppress pseudo-decorator is used, the execution count can be
62 62 incremented or not. The default behavior is to hold the execution count,
63 63 corresponding to a value of `True`. Set this to `False` to increment
64 64 the execution count after each suppressed command.
65 65
66 66 As an example, to use the IPython directive when `matplotlib` is not available,
67 67 one sets the backend to `None`::
68 68
69 69 ipython_mplbackend = None
70 70
71 71 An example usage of the directive is:
72 72
73 73 .. code-block:: rst
74 74
75 75 .. ipython::
76 76
77 77 In [1]: x = 1
78 78
79 79 In [2]: y = x**2
80 80
81 81 In [3]: print(y)
82 82
83 83 See http://matplotlib.org/sampledoc/ipython_directive.html for additional
84 84 documentation.
85 85
86 86 Pseudo-Decorators
87 87 =================
88 88
89 89 Note: Only one decorator is supported per input. If more than one decorator
90 90 is specified, then only the last one is used.
91 91
92 92 In addition to the Pseudo-Decorators/options described at the above link,
93 93 several enhancements have been made. The directive will emit a message to the
94 94 console at build-time if code-execution resulted in an exception or warning.
95 95 You can suppress these on a per-block basis by specifying the :okexcept:
96 96 or :okwarning: options:
97 97
98 98 .. code-block:: rst
99 99
100 100 .. ipython::
101 101 :okexcept:
102 102 :okwarning:
103 103
104 104 In [1]: 1/0
105 105 In [2]: # raise warning.
106 106
107 107 ToDo
108 108 ----
109 109
110 110 - Turn the ad-hoc test() function into a real test suite.
111 111 - Break up ipython-specific functionality from matplotlib stuff into better
112 112 separated code.
113 113
114 114 Authors
115 115 -------
116 116
117 117 - John D Hunter: orignal author.
118 118 - Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
119 119 - VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
120 120 - Skipper Seabold, refactoring, cleanups, pure python addition
121 121 """
122 from __future__ import print_function
123 122
124 123 #-----------------------------------------------------------------------------
125 124 # Imports
126 125 #-----------------------------------------------------------------------------
127 126
128 127 # Stdlib
129 128 import atexit
130 129 import os
131 130 import re
132 131 import sys
133 132 import tempfile
134 133 import ast
135 134 import warnings
136 135 import shutil
137 136
138 137
139 138 # Third-party
140 139 from docutils.parsers.rst import directives
141 140 from sphinx.util.compat import Directive
142 141
143 142 # Our own
144 143 from traitlets.config import Config
145 144 from IPython import InteractiveShell
146 145 from IPython.core.profiledir import ProfileDir
147 146 from IPython.utils import io
148 147 from IPython.utils.py3compat import PY3
149 148
150 149 if PY3:
151 150 from io import StringIO
152 151 else:
153 152 from StringIO import StringIO
154 153
155 154 #-----------------------------------------------------------------------------
156 155 # Globals
157 156 #-----------------------------------------------------------------------------
158 157 # for tokenizing blocks
159 158 COMMENT, INPUT, OUTPUT = range(3)
160 159
161 160 #-----------------------------------------------------------------------------
162 161 # Functions and class declarations
163 162 #-----------------------------------------------------------------------------
164 163
165 164 def block_parser(part, rgxin, rgxout, fmtin, fmtout):
166 165 """
167 166 part is a string of ipython text, comprised of at most one
168 167 input, one output, comments, and blank lines. The block parser
169 168 parses the text into a list of::
170 169
171 170 blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
172 171
173 172 where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
174 173 data is, depending on the type of token::
175 174
176 175 COMMENT : the comment string
177 176
178 177 INPUT: the (DECORATOR, INPUT_LINE, REST) where
179 178 DECORATOR: the input decorator (or None)
180 179 INPUT_LINE: the input as string (possibly multi-line)
181 180 REST : any stdout generated by the input line (not OUTPUT)
182 181
183 182 OUTPUT: the output string, possibly multi-line
184 183
185 184 """
186 185 block = []
187 186 lines = part.split('\n')
188 187 N = len(lines)
189 188 i = 0
190 189 decorator = None
191 190 while 1:
192 191
193 192 if i==N:
194 193 # nothing left to parse -- the last line
195 194 break
196 195
197 196 line = lines[i]
198 197 i += 1
199 198 line_stripped = line.strip()
200 199 if line_stripped.startswith('#'):
201 200 block.append((COMMENT, line))
202 201 continue
203 202
204 203 if line_stripped.startswith('@'):
205 204 # Here is where we assume there is, at most, one decorator.
206 205 # Might need to rethink this.
207 206 decorator = line_stripped
208 207 continue
209 208
210 209 # does this look like an input line?
211 210 matchin = rgxin.match(line)
212 211 if matchin:
213 212 lineno, inputline = int(matchin.group(1)), matchin.group(2)
214 213
215 214 # the ....: continuation string
216 215 continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
217 216 Nc = len(continuation)
218 217 # input lines can continue on for more than one line, if
219 218 # we have a '\' line continuation char or a function call
220 219 # echo line 'print'. The input line can only be
221 220 # terminated by the end of the block or an output line, so
222 221 # we parse out the rest of the input line if it is
223 222 # multiline as well as any echo text
224 223
225 224 rest = []
226 225 while i<N:
227 226
228 227 # look ahead; if the next line is blank, or a comment, or
229 228 # an output line, we're done
230 229
231 230 nextline = lines[i]
232 231 matchout = rgxout.match(nextline)
233 232 #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
234 233 if matchout or nextline.startswith('#'):
235 234 break
236 235 elif nextline.startswith(continuation):
237 236 # The default ipython_rgx* treat the space following the colon as optional.
238 237 # However, If the space is there we must consume it or code
239 238 # employing the cython_magic extension will fail to execute.
240 239 #
241 240 # This works with the default ipython_rgx* patterns,
242 241 # If you modify them, YMMV.
243 242 nextline = nextline[Nc:]
244 243 if nextline and nextline[0] == ' ':
245 244 nextline = nextline[1:]
246 245
247 246 inputline += '\n' + nextline
248 247 else:
249 248 rest.append(nextline)
250 249 i+= 1
251 250
252 251 block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
253 252 continue
254 253
255 254 # if it looks like an output line grab all the text to the end
256 255 # of the block
257 256 matchout = rgxout.match(line)
258 257 if matchout:
259 258 lineno, output = int(matchout.group(1)), matchout.group(2)
260 259 if i<N-1:
261 260 output = '\n'.join([output] + lines[i:])
262 261
263 262 block.append((OUTPUT, output))
264 263 break
265 264
266 265 return block
267 266
268 267
269 268 class EmbeddedSphinxShell(object):
270 269 """An embedded IPython instance to run inside Sphinx"""
271 270
272 271 def __init__(self, exec_lines=None):
273 272
274 273 self.cout = StringIO()
275 274
276 275 if exec_lines is None:
277 276 exec_lines = []
278 277
279 278 # Create config object for IPython
280 279 config = Config()
281 280 config.HistoryManager.hist_file = ':memory:'
282 281 config.InteractiveShell.autocall = False
283 282 config.InteractiveShell.autoindent = False
284 283 config.InteractiveShell.colors = 'NoColor'
285 284
286 285 # create a profile so instance history isn't saved
287 286 tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
288 287 profname = 'auto_profile_sphinx_build'
289 288 pdir = os.path.join(tmp_profile_dir,profname)
290 289 profile = ProfileDir.create_profile_dir(pdir)
291 290
292 291 # Create and initialize global ipython, but don't start its mainloop.
293 292 # This will persist across different EmbededSphinxShell instances.
294 293 IP = InteractiveShell.instance(config=config, profile_dir=profile)
295 294 atexit.register(self.cleanup)
296 295
297 296 sys.stdout = self.cout
298 297 sys.stderr = self.cout
299 298
300 299 # For debugging, so we can see normal output, use this:
301 300 #from IPython.utils.io import Tee
302 301 #sys.stdout = Tee(self.cout, channel='stdout') # dbg
303 302 #sys.stderr = Tee(self.cout, channel='stderr') # dbg
304 303
305 304 # Store a few parts of IPython we'll need.
306 305 self.IP = IP
307 306 self.user_ns = self.IP.user_ns
308 307 self.user_global_ns = self.IP.user_global_ns
309 308
310 309 self.input = ''
311 310 self.output = ''
312 311 self.tmp_profile_dir = tmp_profile_dir
313 312
314 313 self.is_verbatim = False
315 314 self.is_doctest = False
316 315 self.is_suppress = False
317 316
318 317 # Optionally, provide more detailed information to shell.
319 318 # this is assigned by the SetUp method of IPythonDirective
320 319 # to point at itself.
321 320 #
322 321 # So, you can access handy things at self.directive.state
323 322 self.directive = None
324 323
325 324 # on the first call to the savefig decorator, we'll import
326 325 # pyplot as plt so we can make a call to the plt.gcf().savefig
327 326 self._pyplot_imported = False
328 327
329 328 # Prepopulate the namespace.
330 329 for line in exec_lines:
331 330 self.process_input_line(line, store_history=False)
332 331
333 332 def cleanup(self):
334 333 shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
335 334
336 335 def clear_cout(self):
337 336 self.cout.seek(0)
338 337 self.cout.truncate(0)
339 338
340 339 def process_input_line(self, line, store_history=True):
341 340 """process the input, capturing stdout"""
342 341
343 342 stdout = sys.stdout
344 343 splitter = self.IP.input_splitter
345 344 try:
346 345 sys.stdout = self.cout
347 346 splitter.push(line)
348 347 more = splitter.push_accepts_more()
349 348 if not more:
350 349 source_raw = splitter.raw_reset()
351 350 self.IP.run_cell(source_raw, store_history=store_history)
352 351 finally:
353 352 sys.stdout = stdout
354 353
355 354 def process_image(self, decorator):
356 355 """
357 356 # build out an image directive like
358 357 # .. image:: somefile.png
359 358 # :width 4in
360 359 #
361 360 # from an input like
362 361 # savefig somefile.png width=4in
363 362 """
364 363 savefig_dir = self.savefig_dir
365 364 source_dir = self.source_dir
366 365 saveargs = decorator.split(' ')
367 366 filename = saveargs[1]
368 367 # insert relative path to image file in source
369 368 outfile = os.path.relpath(os.path.join(savefig_dir,filename),
370 369 source_dir)
371 370
372 371 imagerows = ['.. image:: %s'%outfile]
373 372
374 373 for kwarg in saveargs[2:]:
375 374 arg, val = kwarg.split('=')
376 375 arg = arg.strip()
377 376 val = val.strip()
378 377 imagerows.append(' :%s: %s'%(arg, val))
379 378
380 379 image_file = os.path.basename(outfile) # only return file name
381 380 image_directive = '\n'.join(imagerows)
382 381 return image_file, image_directive
383 382
384 383 # Callbacks for each type of token
385 384 def process_input(self, data, input_prompt, lineno):
386 385 """
387 386 Process data block for INPUT token.
388 387
389 388 """
390 389 decorator, input, rest = data
391 390 image_file = None
392 391 image_directive = None
393 392
394 393 is_verbatim = decorator=='@verbatim' or self.is_verbatim
395 394 is_doctest = (decorator is not None and \
396 395 decorator.startswith('@doctest')) or self.is_doctest
397 396 is_suppress = decorator=='@suppress' or self.is_suppress
398 397 is_okexcept = decorator=='@okexcept' or self.is_okexcept
399 398 is_okwarning = decorator=='@okwarning' or self.is_okwarning
400 399 is_savefig = decorator is not None and \
401 400 decorator.startswith('@savefig')
402 401
403 402 input_lines = input.split('\n')
404 403 if len(input_lines) > 1:
405 404 if input_lines[-1] != "":
406 405 input_lines.append('') # make sure there's a blank line
407 406 # so splitter buffer gets reset
408 407
409 408 continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
410 409
411 410 if is_savefig:
412 411 image_file, image_directive = self.process_image(decorator)
413 412
414 413 ret = []
415 414 is_semicolon = False
416 415
417 416 # Hold the execution count, if requested to do so.
418 417 if is_suppress and self.hold_count:
419 418 store_history = False
420 419 else:
421 420 store_history = True
422 421
423 422 # Note: catch_warnings is not thread safe
424 423 with warnings.catch_warnings(record=True) as ws:
425 424 for i, line in enumerate(input_lines):
426 425 if line.endswith(';'):
427 426 is_semicolon = True
428 427
429 428 if i == 0:
430 429 # process the first input line
431 430 if is_verbatim:
432 431 self.process_input_line('')
433 432 self.IP.execution_count += 1 # increment it anyway
434 433 else:
435 434 # only submit the line in non-verbatim mode
436 435 self.process_input_line(line, store_history=store_history)
437 436 formatted_line = '%s %s'%(input_prompt, line)
438 437 else:
439 438 # process a continuation line
440 439 if not is_verbatim:
441 440 self.process_input_line(line, store_history=store_history)
442 441
443 442 formatted_line = '%s %s'%(continuation, line)
444 443
445 444 if not is_suppress:
446 445 ret.append(formatted_line)
447 446
448 447 if not is_suppress and len(rest.strip()) and is_verbatim:
449 448 # The "rest" is the standard output of the input. This needs to be
450 449 # added when in verbatim mode. If there is no "rest", then we don't
451 450 # add it, as the new line will be added by the processed output.
452 451 ret.append(rest)
453 452
454 453 # Fetch the processed output. (This is not the submitted output.)
455 454 self.cout.seek(0)
456 455 processed_output = self.cout.read()
457 456 if not is_suppress and not is_semicolon:
458 457 #
459 458 # In IPythonDirective.run, the elements of `ret` are eventually
460 459 # combined such that '' entries correspond to newlines. So if
461 460 # `processed_output` is equal to '', then the adding it to `ret`
462 461 # ensures that there is a blank line between consecutive inputs
463 462 # that have no outputs, as in:
464 463 #
465 464 # In [1]: x = 4
466 465 #
467 466 # In [2]: x = 5
468 467 #
469 468 # When there is processed output, it has a '\n' at the tail end. So
470 469 # adding the output to `ret` will provide the necessary spacing
471 470 # between consecutive input/output blocks, as in:
472 471 #
473 472 # In [1]: x
474 473 # Out[1]: 5
475 474 #
476 475 # In [2]: x
477 476 # Out[2]: 5
478 477 #
479 478 # When there is stdout from the input, it also has a '\n' at the
480 479 # tail end, and so this ensures proper spacing as well. E.g.:
481 480 #
482 481 # In [1]: print x
483 482 # 5
484 483 #
485 484 # In [2]: x = 5
486 485 #
487 486 # When in verbatim mode, `processed_output` is empty (because
488 487 # nothing was passed to IP. Sometimes the submitted code block has
489 488 # an Out[] portion and sometimes it does not. When it does not, we
490 489 # need to ensure proper spacing, so we have to add '' to `ret`.
491 490 # However, if there is an Out[] in the submitted code, then we do
492 491 # not want to add a newline as `process_output` has stuff to add.
493 492 # The difficulty is that `process_input` doesn't know if
494 493 # `process_output` will be called---so it doesn't know if there is
495 494 # Out[] in the code block. The requires that we include a hack in
496 495 # `process_block`. See the comments there.
497 496 #
498 497 ret.append(processed_output)
499 498 elif is_semicolon:
500 499 # Make sure there is a newline after the semicolon.
501 500 ret.append('')
502 501
503 502 # context information
504 503 filename = "Unknown"
505 504 lineno = 0
506 505 if self.directive.state:
507 506 filename = self.directive.state.document.current_source
508 507 lineno = self.directive.state.document.current_line
509 508
510 509 # output any exceptions raised during execution to stdout
511 510 # unless :okexcept: has been specified.
512 511 if not is_okexcept and "Traceback" in processed_output:
513 512 s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
514 513 s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
515 514 sys.stdout.write('\n\n>>>' + ('-' * 73))
516 515 sys.stdout.write(s)
517 516 sys.stdout.write(processed_output)
518 517 sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
519 518
520 519 # output any warning raised during execution to stdout
521 520 # unless :okwarning: has been specified.
522 521 if not is_okwarning:
523 522 for w in ws:
524 523 s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
525 524 s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
526 525 sys.stdout.write('\n\n>>>' + ('-' * 73))
527 526 sys.stdout.write(s)
528 527 sys.stdout.write(('-' * 76) + '\n')
529 528 s=warnings.formatwarning(w.message, w.category,
530 529 w.filename, w.lineno, w.line)
531 530 sys.stdout.write(s)
532 531 sys.stdout.write('<<<' + ('-' * 73) + '\n')
533 532
534 533 self.cout.truncate(0)
535 534
536 535 return (ret, input_lines, processed_output,
537 536 is_doctest, decorator, image_file, image_directive)
538 537
539 538
540 539 def process_output(self, data, output_prompt, input_lines, output,
541 540 is_doctest, decorator, image_file):
542 541 """
543 542 Process data block for OUTPUT token.
544 543
545 544 """
546 545 # Recall: `data` is the submitted output, and `output` is the processed
547 546 # output from `input_lines`.
548 547
549 548 TAB = ' ' * 4
550 549
551 550 if is_doctest and output is not None:
552 551
553 552 found = output # This is the processed output
554 553 found = found.strip()
555 554 submitted = data.strip()
556 555
557 556 if self.directive is None:
558 557 source = 'Unavailable'
559 558 content = 'Unavailable'
560 559 else:
561 560 source = self.directive.state.document.current_source
562 561 content = self.directive.content
563 562 # Add tabs and join into a single string.
564 563 content = '\n'.join([TAB + line for line in content])
565 564
566 565 # Make sure the output contains the output prompt.
567 566 ind = found.find(output_prompt)
568 567 if ind < 0:
569 568 e = ('output does not contain output prompt\n\n'
570 569 'Document source: {0}\n\n'
571 570 'Raw content: \n{1}\n\n'
572 571 'Input line(s):\n{TAB}{2}\n\n'
573 572 'Output line(s):\n{TAB}{3}\n\n')
574 573 e = e.format(source, content, '\n'.join(input_lines),
575 574 repr(found), TAB=TAB)
576 575 raise RuntimeError(e)
577 576 found = found[len(output_prompt):].strip()
578 577
579 578 # Handle the actual doctest comparison.
580 579 if decorator.strip() == '@doctest':
581 580 # Standard doctest
582 581 if found != submitted:
583 582 e = ('doctest failure\n\n'
584 583 'Document source: {0}\n\n'
585 584 'Raw content: \n{1}\n\n'
586 585 'On input line(s):\n{TAB}{2}\n\n'
587 586 'we found output:\n{TAB}{3}\n\n'
588 587 'instead of the expected:\n{TAB}{4}\n\n')
589 588 e = e.format(source, content, '\n'.join(input_lines),
590 589 repr(found), repr(submitted), TAB=TAB)
591 590 raise RuntimeError(e)
592 591 else:
593 592 self.custom_doctest(decorator, input_lines, found, submitted)
594 593
595 594 # When in verbatim mode, this holds additional submitted output
596 595 # to be written in the final Sphinx output.
597 596 # https://github.com/ipython/ipython/issues/5776
598 597 out_data = []
599 598
600 599 is_verbatim = decorator=='@verbatim' or self.is_verbatim
601 600 if is_verbatim and data.strip():
602 601 # Note that `ret` in `process_block` has '' as its last element if
603 602 # the code block was in verbatim mode. So if there is no submitted
604 603 # output, then we will have proper spacing only if we do not add
605 604 # an additional '' to `out_data`. This is why we condition on
606 605 # `and data.strip()`.
607 606
608 607 # The submitted output has no output prompt. If we want the
609 608 # prompt and the code to appear, we need to join them now
610 609 # instead of adding them separately---as this would create an
611 610 # undesired newline. How we do this ultimately depends on the
612 611 # format of the output regex. I'll do what works for the default
613 612 # prompt for now, and we might have to adjust if it doesn't work
614 613 # in other cases. Finally, the submitted output does not have
615 614 # a trailing newline, so we must add it manually.
616 615 out_data.append("{0} {1}\n".format(output_prompt, data))
617 616
618 617 return out_data
619 618
620 619 def process_comment(self, data):
621 620 """Process data fPblock for COMMENT token."""
622 621 if not self.is_suppress:
623 622 return [data]
624 623
625 624 def save_image(self, image_file):
626 625 """
627 626 Saves the image file to disk.
628 627 """
629 628 self.ensure_pyplot()
630 629 command = 'plt.gcf().savefig("%s")'%image_file
631 630 #print 'SAVEFIG', command # dbg
632 631 self.process_input_line('bookmark ipy_thisdir', store_history=False)
633 632 self.process_input_line('cd -b ipy_savedir', store_history=False)
634 633 self.process_input_line(command, store_history=False)
635 634 self.process_input_line('cd -b ipy_thisdir', store_history=False)
636 635 self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
637 636 self.clear_cout()
638 637
639 638 def process_block(self, block):
640 639 """
641 640 process block from the block_parser and return a list of processed lines
642 641 """
643 642 ret = []
644 643 output = None
645 644 input_lines = None
646 645 lineno = self.IP.execution_count
647 646
648 647 input_prompt = self.promptin % lineno
649 648 output_prompt = self.promptout % lineno
650 649 image_file = None
651 650 image_directive = None
652 651
653 652 found_input = False
654 653 for token, data in block:
655 654 if token == COMMENT:
656 655 out_data = self.process_comment(data)
657 656 elif token == INPUT:
658 657 found_input = True
659 658 (out_data, input_lines, output, is_doctest,
660 659 decorator, image_file, image_directive) = \
661 660 self.process_input(data, input_prompt, lineno)
662 661 elif token == OUTPUT:
663 662 if not found_input:
664 663
665 664 TAB = ' ' * 4
666 665 linenumber = 0
667 666 source = 'Unavailable'
668 667 content = 'Unavailable'
669 668 if self.directive:
670 669 linenumber = self.directive.state.document.current_line
671 670 source = self.directive.state.document.current_source
672 671 content = self.directive.content
673 672 # Add tabs and join into a single string.
674 673 content = '\n'.join([TAB + line for line in content])
675 674
676 675 e = ('\n\nInvalid block: Block contains an output prompt '
677 676 'without an input prompt.\n\n'
678 677 'Document source: {0}\n\n'
679 678 'Content begins at line {1}: \n\n{2}\n\n'
680 679 'Problematic block within content: \n\n{TAB}{3}\n\n')
681 680 e = e.format(source, linenumber, content, block, TAB=TAB)
682 681
683 682 # Write, rather than include in exception, since Sphinx
684 683 # will truncate tracebacks.
685 684 sys.stdout.write(e)
686 685 raise RuntimeError('An invalid block was detected.')
687 686
688 687 out_data = \
689 688 self.process_output(data, output_prompt, input_lines,
690 689 output, is_doctest, decorator,
691 690 image_file)
692 691 if out_data:
693 692 # Then there was user submitted output in verbatim mode.
694 693 # We need to remove the last element of `ret` that was
695 694 # added in `process_input`, as it is '' and would introduce
696 695 # an undesirable newline.
697 696 assert(ret[-1] == '')
698 697 del ret[-1]
699 698
700 699 if out_data:
701 700 ret.extend(out_data)
702 701
703 702 # save the image files
704 703 if image_file is not None:
705 704 self.save_image(image_file)
706 705
707 706 return ret, image_directive
708 707
709 708 def ensure_pyplot(self):
710 709 """
711 710 Ensures that pyplot has been imported into the embedded IPython shell.
712 711
713 712 Also, makes sure to set the backend appropriately if not set already.
714 713
715 714 """
716 715 # We are here if the @figure pseudo decorator was used. Thus, it's
717 716 # possible that we could be here even if python_mplbackend were set to
718 717 # `None`. That's also strange and perhaps worthy of raising an
719 718 # exception, but for now, we just set the backend to 'agg'.
720 719
721 720 if not self._pyplot_imported:
722 721 if 'matplotlib.backends' not in sys.modules:
723 722 # Then ipython_matplotlib was set to None but there was a
724 723 # call to the @figure decorator (and ipython_execlines did
725 724 # not set a backend).
726 725 #raise Exception("No backend was set, but @figure was used!")
727 726 import matplotlib
728 727 matplotlib.use('agg')
729 728
730 729 # Always import pyplot into embedded shell.
731 730 self.process_input_line('import matplotlib.pyplot as plt',
732 731 store_history=False)
733 732 self._pyplot_imported = True
734 733
735 734 def process_pure_python(self, content):
736 735 """
737 736 content is a list of strings. it is unedited directive content
738 737
739 738 This runs it line by line in the InteractiveShell, prepends
740 739 prompts as needed capturing stderr and stdout, then returns
741 740 the content as a list as if it were ipython code
742 741 """
743 742 output = []
744 743 savefig = False # keep up with this to clear figure
745 744 multiline = False # to handle line continuation
746 745 multiline_start = None
747 746 fmtin = self.promptin
748 747
749 748 ct = 0
750 749
751 750 for lineno, line in enumerate(content):
752 751
753 752 line_stripped = line.strip()
754 753 if not len(line):
755 754 output.append(line)
756 755 continue
757 756
758 757 # handle decorators
759 758 if line_stripped.startswith('@'):
760 759 output.extend([line])
761 760 if 'savefig' in line:
762 761 savefig = True # and need to clear figure
763 762 continue
764 763
765 764 # handle comments
766 765 if line_stripped.startswith('#'):
767 766 output.extend([line])
768 767 continue
769 768
770 769 # deal with lines checking for multiline
771 770 continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
772 771 if not multiline:
773 772 modified = u"%s %s" % (fmtin % ct, line_stripped)
774 773 output.append(modified)
775 774 ct += 1
776 775 try:
777 776 ast.parse(line_stripped)
778 777 output.append(u'')
779 778 except Exception: # on a multiline
780 779 multiline = True
781 780 multiline_start = lineno
782 781 else: # still on a multiline
783 782 modified = u'%s %s' % (continuation, line)
784 783 output.append(modified)
785 784
786 785 # if the next line is indented, it should be part of multiline
787 786 if len(content) > lineno + 1:
788 787 nextline = content[lineno + 1]
789 788 if len(nextline) - len(nextline.lstrip()) > 3:
790 789 continue
791 790 try:
792 791 mod = ast.parse(
793 792 '\n'.join(content[multiline_start:lineno+1]))
794 793 if isinstance(mod.body[0], ast.FunctionDef):
795 794 # check to see if we have the whole function
796 795 for element in mod.body[0].body:
797 796 if isinstance(element, ast.Return):
798 797 multiline = False
799 798 else:
800 799 output.append(u'')
801 800 multiline = False
802 801 except Exception:
803 802 pass
804 803
805 804 if savefig: # clear figure if plotted
806 805 self.ensure_pyplot()
807 806 self.process_input_line('plt.clf()', store_history=False)
808 807 self.clear_cout()
809 808 savefig = False
810 809
811 810 return output
812 811
813 812 def custom_doctest(self, decorator, input_lines, found, submitted):
814 813 """
815 814 Perform a specialized doctest.
816 815
817 816 """
818 817 from .custom_doctests import doctests
819 818
820 819 args = decorator.split()
821 820 doctest_type = args[1]
822 821 if doctest_type in doctests:
823 822 doctests[doctest_type](self, args, input_lines, found, submitted)
824 823 else:
825 824 e = "Invalid option to @doctest: {0}".format(doctest_type)
826 825 raise Exception(e)
827 826
828 827
829 828 class IPythonDirective(Directive):
830 829
831 830 has_content = True
832 831 required_arguments = 0
833 832 optional_arguments = 4 # python, suppress, verbatim, doctest
834 833 final_argumuent_whitespace = True
835 834 option_spec = { 'python': directives.unchanged,
836 835 'suppress' : directives.flag,
837 836 'verbatim' : directives.flag,
838 837 'doctest' : directives.flag,
839 838 'okexcept': directives.flag,
840 839 'okwarning': directives.flag
841 840 }
842 841
843 842 shell = None
844 843
845 844 seen_docs = set()
846 845
847 846 def get_config_options(self):
848 847 # contains sphinx configuration variables
849 848 config = self.state.document.settings.env.config
850 849
851 850 # get config variables to set figure output directory
852 851 outdir = self.state.document.settings.env.app.outdir
853 852 savefig_dir = config.ipython_savefig_dir
854 853 source_dir = os.path.dirname(self.state.document.current_source)
855 854 if savefig_dir is None:
856 855 savefig_dir = config.html_static_path or '_static'
857 856 if isinstance(savefig_dir, list):
858 857 savefig_dir = os.path.join(*savefig_dir)
859 858 savefig_dir = os.path.join(outdir, savefig_dir)
860 859
861 860 # get regex and prompt stuff
862 861 rgxin = config.ipython_rgxin
863 862 rgxout = config.ipython_rgxout
864 863 promptin = config.ipython_promptin
865 864 promptout = config.ipython_promptout
866 865 mplbackend = config.ipython_mplbackend
867 866 exec_lines = config.ipython_execlines
868 867 hold_count = config.ipython_holdcount
869 868
870 869 return (savefig_dir, source_dir, rgxin, rgxout,
871 870 promptin, promptout, mplbackend, exec_lines, hold_count)
872 871
873 872 def setup(self):
874 873 # Get configuration values.
875 874 (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
876 875 mplbackend, exec_lines, hold_count) = self.get_config_options()
877 876
878 877 if self.shell is None:
879 878 # We will be here many times. However, when the
880 879 # EmbeddedSphinxShell is created, its interactive shell member
881 880 # is the same for each instance.
882 881
883 882 if mplbackend and 'matplotlib.backends' not in sys.modules:
884 883 import matplotlib
885 884 matplotlib.use(mplbackend)
886 885
887 886 # Must be called after (potentially) importing matplotlib and
888 887 # setting its backend since exec_lines might import pylab.
889 888 self.shell = EmbeddedSphinxShell(exec_lines)
890 889
891 890 # Store IPython directive to enable better error messages
892 891 self.shell.directive = self
893 892
894 893 # reset the execution count if we haven't processed this doc
895 894 #NOTE: this may be borked if there are multiple seen_doc tmp files
896 895 #check time stamp?
897 896 if not self.state.document.current_source in self.seen_docs:
898 897 self.shell.IP.history_manager.reset()
899 898 self.shell.IP.execution_count = 1
900 899 self.seen_docs.add(self.state.document.current_source)
901 900
902 901 # and attach to shell so we don't have to pass them around
903 902 self.shell.rgxin = rgxin
904 903 self.shell.rgxout = rgxout
905 904 self.shell.promptin = promptin
906 905 self.shell.promptout = promptout
907 906 self.shell.savefig_dir = savefig_dir
908 907 self.shell.source_dir = source_dir
909 908 self.shell.hold_count = hold_count
910 909
911 910 # setup bookmark for saving figures directory
912 911 self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
913 912 store_history=False)
914 913 self.shell.clear_cout()
915 914
916 915 return rgxin, rgxout, promptin, promptout
917 916
918 917 def teardown(self):
919 918 # delete last bookmark
920 919 self.shell.process_input_line('bookmark -d ipy_savedir',
921 920 store_history=False)
922 921 self.shell.clear_cout()
923 922
924 923 def run(self):
925 924 debug = False
926 925
927 926 #TODO, any reason block_parser can't be a method of embeddable shell
928 927 # then we wouldn't have to carry these around
929 928 rgxin, rgxout, promptin, promptout = self.setup()
930 929
931 930 options = self.options
932 931 self.shell.is_suppress = 'suppress' in options
933 932 self.shell.is_doctest = 'doctest' in options
934 933 self.shell.is_verbatim = 'verbatim' in options
935 934 self.shell.is_okexcept = 'okexcept' in options
936 935 self.shell.is_okwarning = 'okwarning' in options
937 936
938 937 # handle pure python code
939 938 if 'python' in self.arguments:
940 939 content = self.content
941 940 self.content = self.shell.process_pure_python(content)
942 941
943 942 # parts consists of all text within the ipython-block.
944 943 # Each part is an input/output block.
945 944 parts = '\n'.join(self.content).split('\n\n')
946 945
947 946 lines = ['.. code-block:: ipython', '']
948 947 figures = []
949 948
950 949 for part in parts:
951 950 block = block_parser(part, rgxin, rgxout, promptin, promptout)
952 951 if len(block):
953 952 rows, figure = self.shell.process_block(block)
954 953 for row in rows:
955 954 lines.extend([' {0}'.format(line)
956 955 for line in row.split('\n')])
957 956
958 957 if figure is not None:
959 958 figures.append(figure)
960 959
961 960 for figure in figures:
962 961 lines.append('')
963 962 lines.extend(figure.split('\n'))
964 963 lines.append('')
965 964
966 965 if len(lines) > 2:
967 966 if debug:
968 967 print('\n'.join(lines))
969 968 else:
970 969 # This has to do with input, not output. But if we comment
971 970 # these lines out, then no IPython code will appear in the
972 971 # final output.
973 972 self.state_machine.insert_input(
974 973 lines, self.state_machine.input_lines.source(0))
975 974
976 975 # cleanup
977 976 self.teardown()
978 977
979 978 return []
980 979
981 980 # Enable as a proper Sphinx directive
982 981 def setup(app):
983 982 setup.app = app
984 983
985 984 app.add_directive('ipython', IPythonDirective)
986 985 app.add_config_value('ipython_savefig_dir', None, 'env')
987 986 app.add_config_value('ipython_rgxin',
988 987 re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
989 988 app.add_config_value('ipython_rgxout',
990 989 re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
991 990 app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
992 991 app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
993 992
994 993 # We could just let matplotlib pick whatever is specified as the default
995 994 # backend in the matplotlibrc file, but this would cause issues if the
996 995 # backend didn't work in headless environments. For this reason, 'agg'
997 996 # is a good default backend choice.
998 997 app.add_config_value('ipython_mplbackend', 'agg', 'env')
999 998
1000 999 # If the user sets this config value to `None`, then EmbeddedSphinxShell's
1001 1000 # __init__ method will treat it as [].
1002 1001 execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
1003 1002 app.add_config_value('ipython_execlines', execlines, 'env')
1004 1003
1005 1004 app.add_config_value('ipython_holdcount', True, 'env')
1006 1005
1007 1006 metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
1008 1007 return metadata
1009 1008
1010 1009 # Simple smoke test, needs to be converted to a proper automatic test.
1011 1010 def test():
1012 1011
1013 1012 examples = [
1014 1013 r"""
1015 1014 In [9]: pwd
1016 1015 Out[9]: '/home/jdhunter/py4science/book'
1017 1016
1018 1017 In [10]: cd bookdata/
1019 1018 /home/jdhunter/py4science/book/bookdata
1020 1019
1021 1020 In [2]: from pylab import *
1022 1021
1023 1022 In [2]: ion()
1024 1023
1025 1024 In [3]: im = imread('stinkbug.png')
1026 1025
1027 1026 @savefig mystinkbug.png width=4in
1028 1027 In [4]: imshow(im)
1029 1028 Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
1030 1029
1031 1030 """,
1032 1031 r"""
1033 1032
1034 1033 In [1]: x = 'hello world'
1035 1034
1036 1035 # string methods can be
1037 1036 # used to alter the string
1038 1037 @doctest
1039 1038 In [2]: x.upper()
1040 1039 Out[2]: 'HELLO WORLD'
1041 1040
1042 1041 @verbatim
1043 1042 In [3]: x.st<TAB>
1044 1043 x.startswith x.strip
1045 1044 """,
1046 1045 r"""
1047 1046
1048 1047 In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
1049 1048 .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
1050 1049
1051 1050 In [131]: print url.split('&')
1052 1051 ['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
1053 1052
1054 1053 In [60]: import urllib
1055 1054
1056 1055 """,
1057 1056 r"""\
1058 1057
1059 1058 In [133]: import numpy.random
1060 1059
1061 1060 @suppress
1062 1061 In [134]: numpy.random.seed(2358)
1063 1062
1064 1063 @doctest
1065 1064 In [135]: numpy.random.rand(10,2)
1066 1065 Out[135]:
1067 1066 array([[ 0.64524308, 0.59943846],
1068 1067 [ 0.47102322, 0.8715456 ],
1069 1068 [ 0.29370834, 0.74776844],
1070 1069 [ 0.99539577, 0.1313423 ],
1071 1070 [ 0.16250302, 0.21103583],
1072 1071 [ 0.81626524, 0.1312433 ],
1073 1072 [ 0.67338089, 0.72302393],
1074 1073 [ 0.7566368 , 0.07033696],
1075 1074 [ 0.22591016, 0.77731835],
1076 1075 [ 0.0072729 , 0.34273127]])
1077 1076
1078 1077 """,
1079 1078
1080 1079 r"""
1081 1080 In [106]: print x
1082 1081 jdh
1083 1082
1084 1083 In [109]: for i in range(10):
1085 1084 .....: print i
1086 1085 .....:
1087 1086 .....:
1088 1087 0
1089 1088 1
1090 1089 2
1091 1090 3
1092 1091 4
1093 1092 5
1094 1093 6
1095 1094 7
1096 1095 8
1097 1096 9
1098 1097 """,
1099 1098
1100 1099 r"""
1101 1100
1102 1101 In [144]: from pylab import *
1103 1102
1104 1103 In [145]: ion()
1105 1104
1106 1105 # use a semicolon to suppress the output
1107 1106 @savefig test_hist.png width=4in
1108 1107 In [151]: hist(np.random.randn(10000), 100);
1109 1108
1110 1109
1111 1110 @savefig test_plot.png width=4in
1112 1111 In [151]: plot(np.random.randn(10000), 'o');
1113 1112 """,
1114 1113
1115 1114 r"""
1116 1115 # use a semicolon to suppress the output
1117 1116 In [151]: plt.clf()
1118 1117
1119 1118 @savefig plot_simple.png width=4in
1120 1119 In [151]: plot([1,2,3])
1121 1120
1122 1121 @savefig hist_simple.png width=4in
1123 1122 In [151]: hist(np.random.randn(10000), 100);
1124 1123
1125 1124 """,
1126 1125 r"""
1127 1126 # update the current fig
1128 1127 In [151]: ylabel('number')
1129 1128
1130 1129 In [152]: title('normal distribution')
1131 1130
1132 1131
1133 1132 @savefig hist_with_text.png
1134 1133 In [153]: grid(True)
1135 1134
1136 1135 @doctest float
1137 1136 In [154]: 0.1 + 0.2
1138 1137 Out[154]: 0.3
1139 1138
1140 1139 @doctest float
1141 1140 In [155]: np.arange(16).reshape(4,4)
1142 1141 Out[155]:
1143 1142 array([[ 0, 1, 2, 3],
1144 1143 [ 4, 5, 6, 7],
1145 1144 [ 8, 9, 10, 11],
1146 1145 [12, 13, 14, 15]])
1147 1146
1148 1147 In [1]: x = np.arange(16, dtype=float).reshape(4,4)
1149 1148
1150 1149 In [2]: x[0,0] = np.inf
1151 1150
1152 1151 In [3]: x[0,1] = np.nan
1153 1152
1154 1153 @doctest float
1155 1154 In [4]: x
1156 1155 Out[4]:
1157 1156 array([[ inf, nan, 2., 3.],
1158 1157 [ 4., 5., 6., 7.],
1159 1158 [ 8., 9., 10., 11.],
1160 1159 [ 12., 13., 14., 15.]])
1161 1160
1162 1161
1163 1162 """,
1164 1163 ]
1165 1164 # skip local-file depending first example:
1166 1165 examples = examples[1:]
1167 1166
1168 1167 #ipython_directive.DEBUG = True # dbg
1169 1168 #options = dict(suppress=True) # dbg
1170 1169 options = dict()
1171 1170 for example in examples:
1172 1171 content = example.split('\n')
1173 1172 IPythonDirective('debug', arguments=None, options=options,
1174 1173 content=content, lineno=0,
1175 1174 content_offset=None, block_text=None,
1176 1175 state=None, state_machine=None,
1177 1176 )
1178 1177
1179 1178 # Run test suite as a script
1180 1179 if __name__=='__main__':
1181 1180 if not os.path.isdir('_static'):
1182 1181 os.mkdir('_static')
1183 1182 test()
1184 1183 print('All OK? Check figures in _static/')
@@ -1,326 +1,324 b''
1 1 # encoding: utf-8
2 2 """
3 3 An embedded IPython shell.
4 4 """
5 5 # Copyright (c) IPython Development Team.
6 6 # Distributed under the terms of the Modified BSD License.
7 7
8 from __future__ import with_statement
9 from __future__ import print_function
10 8
11 9 import sys
12 10 import warnings
13 11
14 12 from IPython.core import ultratb, compilerop
15 13 from IPython.core.magic import Magics, magics_class, line_magic
16 14 from IPython.core.interactiveshell import DummyMod, InteractiveShell
17 15 from IPython.terminal.interactiveshell import TerminalInteractiveShell
18 16 from IPython.terminal.ipapp import load_default_config
19 17
20 18 from traitlets import Bool, CBool, Unicode
21 19 from IPython.utils.io import ask_yes_no
22 20
23 21 class KillEmbeded(Exception):pass
24 22
25 23 # This is an additional magic that is exposed in embedded shells.
26 24 @magics_class
27 25 class EmbeddedMagics(Magics):
28 26
29 27 @line_magic
30 28 def kill_embedded(self, parameter_s=''):
31 29 """%kill_embedded : deactivate for good the current embedded IPython.
32 30
33 31 This function (after asking for confirmation) sets an internal flag so
34 32 that an embedded IPython will never activate again. This is useful to
35 33 permanently disable a shell that is being called inside a loop: once
36 34 you've figured out what you needed from it, you may then kill it and
37 35 the program will then continue to run without the interactive shell
38 36 interfering again.
39 37 """
40 38
41 39 kill = ask_yes_no("Are you sure you want to kill this embedded instance? [y/N] ",'n')
42 40 if kill:
43 41 self.shell.embedded_active = False
44 42 print ("This embedded IPython will not reactivate anymore "
45 43 "once you exit.")
46 44
47 45
48 46 @line_magic
49 47 def exit_raise(self, parameter_s=''):
50 48 """%exit_raise Make the current embedded kernel exit and raise and exception.
51 49
52 50 This function sets an internal flag so that an embedded IPython will
53 51 raise a `IPython.terminal.embed.KillEmbeded` Exception on exit, and then exit the current I. This is
54 52 useful to permanently exit a loop that create IPython embed instance.
55 53 """
56 54
57 55 self.shell.should_raise = True
58 56 self.shell.ask_exit()
59 57
60 58
61 59
62 60 class InteractiveShellEmbed(TerminalInteractiveShell):
63 61
64 62 dummy_mode = Bool(False)
65 63 exit_msg = Unicode('')
66 64 embedded = CBool(True)
67 65 should_raise = CBool(False)
68 66 # Like the base class display_banner is not configurable, but here it
69 67 # is True by default.
70 68 display_banner = CBool(True)
71 69 exit_msg = Unicode()
72 70
73 71 # When embedding, by default we don't change the terminal title
74 72 term_title = Bool(False,
75 73 help="Automatically set the terminal title"
76 74 ).tag(config=True)
77 75
78 76 _inactive_locations = set()
79 77
80 78 @property
81 79 def embedded_active(self):
82 80 return self._call_location_id not in InteractiveShellEmbed._inactive_locations
83 81
84 82 @embedded_active.setter
85 83 def embedded_active(self, value):
86 84 if value :
87 85 if self._call_location_id in InteractiveShellEmbed._inactive_locations:
88 86 InteractiveShellEmbed._inactive_locations.remove(self._call_location_id)
89 87 else:
90 88 InteractiveShellEmbed._inactive_locations.add(self._call_location_id)
91 89
92 90 def __init__(self, **kw):
93 91
94 92
95 93 if kw.get('user_global_ns', None) is not None:
96 94 raise DeprecationWarning("Key word argument `user_global_ns` has been replaced by `user_module` since IPython 4.0.")
97 95
98 96 self._call_location_id = kw.pop('_call_location_id', None)
99 97
100 98 super(InteractiveShellEmbed,self).__init__(**kw)
101 99
102 100 if not self._call_location_id:
103 101 frame = sys._getframe(1)
104 102 self._call_location_id = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
105 103 # don't use the ipython crash handler so that user exceptions aren't
106 104 # trapped
107 105 sys.excepthook = ultratb.FormattedTB(color_scheme=self.colors,
108 106 mode=self.xmode,
109 107 call_pdb=self.pdb)
110 108
111 109 def init_sys_modules(self):
112 110 pass
113 111
114 112 def init_magics(self):
115 113 super(InteractiveShellEmbed, self).init_magics()
116 114 self.register_magics(EmbeddedMagics)
117 115
118 116 def __call__(self, header='', local_ns=None, module=None, dummy=None,
119 117 stack_depth=1, global_ns=None, compile_flags=None):
120 118 """Activate the interactive interpreter.
121 119
122 120 __call__(self,header='',local_ns=None,module=None,dummy=None) -> Start
123 121 the interpreter shell with the given local and global namespaces, and
124 122 optionally print a header string at startup.
125 123
126 124 The shell can be globally activated/deactivated using the
127 125 dummy_mode attribute. This allows you to turn off a shell used
128 126 for debugging globally.
129 127
130 128 However, *each* time you call the shell you can override the current
131 129 state of dummy_mode with the optional keyword parameter 'dummy'. For
132 130 example, if you set dummy mode on with IPShell.dummy_mode = True, you
133 131 can still have a specific call work by making it as IPShell(dummy=False).
134 132 """
135 133
136 134 # If the user has turned it off, go away
137 135 if not self.embedded_active:
138 136 return
139 137
140 138 # Normal exits from interactive mode set this flag, so the shell can't
141 139 # re-enter (it checks this variable at the start of interactive mode).
142 140 self.exit_now = False
143 141
144 142 # Allow the dummy parameter to override the global __dummy_mode
145 143 if dummy or (dummy != 0 and self.dummy_mode):
146 144 return
147 145
148 146 # self.banner is auto computed
149 147 if header:
150 148 self.old_banner2 = self.banner2
151 149 self.banner2 = self.banner2 + '\n' + header + '\n'
152 150 else:
153 151 self.old_banner2 = ''
154 152
155 153 if self.display_banner:
156 154 self.show_banner()
157 155
158 156 # Call the embedding code with a stack depth of 1 so it can skip over
159 157 # our call and get the original caller's namespaces.
160 158 self.mainloop(local_ns, module, stack_depth=stack_depth,
161 159 global_ns=global_ns, compile_flags=compile_flags)
162 160
163 161 self.banner2 = self.old_banner2
164 162
165 163 if self.exit_msg is not None:
166 164 print(self.exit_msg)
167 165
168 166 if self.should_raise:
169 167 raise KillEmbeded('Embedded IPython raising error, as user requested.')
170 168
171 169
172 170 def mainloop(self, local_ns=None, module=None, stack_depth=0,
173 171 display_banner=None, global_ns=None, compile_flags=None):
174 172 """Embeds IPython into a running python program.
175 173
176 174 Parameters
177 175 ----------
178 176
179 177 local_ns, module
180 178 Working local namespace (a dict) and module (a module or similar
181 179 object). If given as None, they are automatically taken from the scope
182 180 where the shell was called, so that program variables become visible.
183 181
184 182 stack_depth : int
185 183 How many levels in the stack to go to looking for namespaces (when
186 184 local_ns or module is None). This allows an intermediate caller to
187 185 make sure that this function gets the namespace from the intended
188 186 level in the stack. By default (0) it will get its locals and globals
189 187 from the immediate caller.
190 188
191 189 compile_flags
192 190 A bit field identifying the __future__ features
193 191 that are enabled, as passed to the builtin :func:`compile` function.
194 192 If given as None, they are automatically taken from the scope where
195 193 the shell was called.
196 194
197 195 """
198 196
199 197 if (global_ns is not None) and (module is None):
200 198 raise DeprecationWarning("'global_ns' keyword argument is deprecated, and has been removed in IPython 5.0 use `module` keyword argument instead.")
201 199
202 200 if (display_banner is not None):
203 201 warnings.warn("The display_banner parameter is deprecated since IPython 4.0", DeprecationWarning)
204 202
205 203 # Get locals and globals from caller
206 204 if ((local_ns is None or module is None or compile_flags is None)
207 205 and self.default_user_namespaces):
208 206 call_frame = sys._getframe(stack_depth).f_back
209 207
210 208 if local_ns is None:
211 209 local_ns = call_frame.f_locals
212 210 if module is None:
213 211 global_ns = call_frame.f_globals
214 212 try:
215 213 module = sys.modules[global_ns['__name__']]
216 214 except KeyError:
217 215 warnings.warn("Failed to get module %s" % \
218 216 global_ns.get('__name__', 'unknown module')
219 217 )
220 218 module = DummyMod()
221 219 module.__dict__ = global_ns
222 220 if compile_flags is None:
223 221 compile_flags = (call_frame.f_code.co_flags &
224 222 compilerop.PyCF_MASK)
225 223
226 224 # Save original namespace and module so we can restore them after
227 225 # embedding; otherwise the shell doesn't shut down correctly.
228 226 orig_user_module = self.user_module
229 227 orig_user_ns = self.user_ns
230 228 orig_compile_flags = self.compile.flags
231 229
232 230 # Update namespaces and fire up interpreter
233 231
234 232 # The global one is easy, we can just throw it in
235 233 if module is not None:
236 234 self.user_module = module
237 235
238 236 # But the user/local one is tricky: ipython needs it to store internal
239 237 # data, but we also need the locals. We'll throw our hidden variables
240 238 # like _ih and get_ipython() into the local namespace, but delete them
241 239 # later.
242 240 if local_ns is not None:
243 241 reentrant_local_ns = {k: v for (k, v) in local_ns.items() if k not in self.user_ns_hidden.keys()}
244 242 self.user_ns = reentrant_local_ns
245 243 self.init_user_ns()
246 244
247 245 # Compiler flags
248 246 if compile_flags is not None:
249 247 self.compile.flags = compile_flags
250 248
251 249 # make sure the tab-completer has the correct frame information, so it
252 250 # actually completes using the frame's locals/globals
253 251 self.set_completer_frame()
254 252
255 253 with self.builtin_trap, self.display_trap:
256 254 self.interact()
257 255
258 256 # now, purge out the local namespace of IPython's hidden variables.
259 257 if local_ns is not None:
260 258 local_ns.update({k: v for (k, v) in self.user_ns.items() if k not in self.user_ns_hidden.keys()})
261 259
262 260
263 261 # Restore original namespace so shell can shut down when we exit.
264 262 self.user_module = orig_user_module
265 263 self.user_ns = orig_user_ns
266 264 self.compile.flags = orig_compile_flags
267 265
268 266
269 267 def embed(**kwargs):
270 268 """Call this to embed IPython at the current point in your program.
271 269
272 270 The first invocation of this will create an :class:`InteractiveShellEmbed`
273 271 instance and then call it. Consecutive calls just call the already
274 272 created instance.
275 273
276 274 If you don't want the kernel to initialize the namespace
277 275 from the scope of the surrounding function,
278 276 and/or you want to load full IPython configuration,
279 277 you probably want `IPython.start_ipython()` instead.
280 278
281 279 Here is a simple example::
282 280
283 281 from IPython import embed
284 282 a = 10
285 283 b = 20
286 284 embed(header='First time')
287 285 c = 30
288 286 d = 40
289 287 embed()
290 288
291 289 Full customization can be done by passing a :class:`Config` in as the
292 290 config argument.
293 291 """
294 292 config = kwargs.get('config')
295 293 header = kwargs.pop('header', u'')
296 294 compile_flags = kwargs.pop('compile_flags', None)
297 295 if config is None:
298 296 config = load_default_config()
299 297 config.InteractiveShellEmbed = config.TerminalInteractiveShell
300 298 kwargs['config'] = config
301 299 #save ps1/ps2 if defined
302 300 ps1 = None
303 301 ps2 = None
304 302 try:
305 303 ps1 = sys.ps1
306 304 ps2 = sys.ps2
307 305 except AttributeError:
308 306 pass
309 307 #save previous instance
310 308 saved_shell_instance = InteractiveShell._instance
311 309 if saved_shell_instance is not None:
312 310 cls = type(saved_shell_instance)
313 311 cls.clear_instance()
314 312 frame = sys._getframe(1)
315 313 shell = InteractiveShellEmbed.instance(_call_location_id='%s:%s' % (frame.f_code.co_filename, frame.f_lineno), **kwargs)
316 314 shell(header=header, stack_depth=2, compile_flags=compile_flags)
317 315 InteractiveShellEmbed.clear_instance()
318 316 #restore previous instance
319 317 if saved_shell_instance is not None:
320 318 cls = type(saved_shell_instance)
321 319 cls.clear_instance()
322 320 for subclass in cls._walk_mro():
323 321 subclass._instance = saved_shell_instance
324 322 if ps1 is not None:
325 323 sys.ps1 = ps1
326 324 sys.ps2 = ps2
@@ -1,501 +1,500 b''
1 1 """IPython terminal interface using prompt_toolkit"""
2 from __future__ import print_function
3 2
4 3 import os
5 4 import sys
6 5 import warnings
7 6 from warnings import warn
8 7
9 8 from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
10 9 from IPython.utils import io
11 10 from IPython.utils.py3compat import PY3, cast_unicode_py2, input, string_types
12 11 from IPython.utils.terminal import toggle_set_term_title, set_term_title
13 12 from IPython.utils.process import abbrev_cwd
14 13 from traitlets import Bool, Unicode, Dict, Integer, observe, Instance, Type, default, Enum, Union
15 14
16 15 from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
17 16 from prompt_toolkit.filters import (HasFocus, Condition, IsDone)
18 17 from prompt_toolkit.history import InMemoryHistory
19 18 from prompt_toolkit.shortcuts import create_prompt_application, create_eventloop, create_prompt_layout, create_output
20 19 from prompt_toolkit.interface import CommandLineInterface
21 20 from prompt_toolkit.key_binding.manager import KeyBindingManager
22 21 from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor
23 22 from prompt_toolkit.styles import PygmentsStyle, DynamicStyle
24 23
25 24 from pygments.styles import get_style_by_name, get_all_styles
26 25 from pygments.style import Style
27 26 from pygments.token import Token
28 27
29 28 from .debugger import TerminalPdb, Pdb
30 29 from .magics import TerminalMagics
31 30 from .pt_inputhooks import get_inputhook_name_and_func
32 31 from .prompts import Prompts, ClassicPrompts, RichPromptDisplayHook
33 32 from .ptutils import IPythonPTCompleter, IPythonPTLexer
34 33 from .shortcuts import register_ipython_shortcuts
35 34
36 35 DISPLAY_BANNER_DEPRECATED = object()
37 36
38 37
39 38 from pygments.style import Style
40 39
41 40 class _NoStyle(Style): pass
42 41
43 42
44 43
45 44 _style_overrides_light_bg = {
46 45 Token.Prompt: '#0000ff',
47 46 Token.PromptNum: '#0000ee bold',
48 47 Token.OutPrompt: '#cc0000',
49 48 Token.OutPromptNum: '#bb0000 bold',
50 49 }
51 50
52 51 _style_overrides_linux = {
53 52 Token.Prompt: '#00cc00',
54 53 Token.PromptNum: '#00bb00 bold',
55 54 Token.OutPrompt: '#cc0000',
56 55 Token.OutPromptNum: '#bb0000 bold',
57 56 }
58 57
59 58
60 59
61 60 def get_default_editor():
62 61 try:
63 62 ed = os.environ['EDITOR']
64 63 if not PY3:
65 64 ed = ed.decode()
66 65 return ed
67 66 except KeyError:
68 67 pass
69 68 except UnicodeError:
70 69 warn("$EDITOR environment variable is not pure ASCII. Using platform "
71 70 "default editor.")
72 71
73 72 if os.name == 'posix':
74 73 return 'vi' # the only one guaranteed to be there!
75 74 else:
76 75 return 'notepad' # same in Windows!
77 76
78 77 # conservatively check for tty
79 78 # overridden streams can result in things like:
80 79 # - sys.stdin = None
81 80 # - no isatty method
82 81 for _name in ('stdin', 'stdout', 'stderr'):
83 82 _stream = getattr(sys, _name)
84 83 if not _stream or not hasattr(_stream, 'isatty') or not _stream.isatty():
85 84 _is_tty = False
86 85 break
87 86 else:
88 87 _is_tty = True
89 88
90 89
91 90 _use_simple_prompt = ('IPY_TEST_SIMPLE_PROMPT' in os.environ) or (not _is_tty)
92 91
93 92 class TerminalInteractiveShell(InteractiveShell):
94 93 space_for_menu = Integer(6, help='Number of line at the bottom of the screen '
95 94 'to reserve for the completion menu'
96 95 ).tag(config=True)
97 96
98 97 def _space_for_menu_changed(self, old, new):
99 98 self._update_layout()
100 99
101 100 pt_cli = None
102 101 debugger_history = None
103 102 _pt_app = None
104 103
105 104 simple_prompt = Bool(_use_simple_prompt,
106 105 help="""Use `raw_input` for the REPL, without completion, multiline input, and prompt colors.
107 106
108 107 Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
109 108 IPython own testing machinery, and emacs inferior-shell integration through elpy.
110 109
111 110 This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
112 111 environment variable is set, or the current terminal is not a tty.
113 112
114 113 """
115 114 ).tag(config=True)
116 115
117 116 @property
118 117 def debugger_cls(self):
119 118 return Pdb if self.simple_prompt else TerminalPdb
120 119
121 120 confirm_exit = Bool(True,
122 121 help="""
123 122 Set to confirm when you try to exit IPython with an EOF (Control-D
124 123 in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
125 124 you can force a direct exit without any confirmation.""",
126 125 ).tag(config=True)
127 126
128 127 editing_mode = Unicode('emacs',
129 128 help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
130 129 ).tag(config=True)
131 130
132 131 mouse_support = Bool(False,
133 132 help="Enable mouse support in the prompt"
134 133 ).tag(config=True)
135 134
136 135 highlighting_style = Union([Unicode('legacy'), Type(klass=Style)],
137 136 help="""The name or class of a Pygments style to use for syntax
138 137 highlighting: \n %s""" % ', '.join(get_all_styles())
139 138 ).tag(config=True)
140 139
141 140
142 141 @observe('highlighting_style')
143 142 @observe('colors')
144 143 def _highlighting_style_changed(self, change):
145 144 self.refresh_style()
146 145
147 146 def refresh_style(self):
148 147 self._style = self._make_style_from_name_or_cls(self.highlighting_style)
149 148
150 149
151 150 highlighting_style_overrides = Dict(
152 151 help="Override highlighting format for specific tokens"
153 152 ).tag(config=True)
154 153
155 154 true_color = Bool(False,
156 155 help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
157 156 "If your terminal supports true color, the following command "
158 157 "should print 'TRUECOLOR' in orange: "
159 158 "printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")
160 159 ).tag(config=True)
161 160
162 161 editor = Unicode(get_default_editor(),
163 162 help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
164 163 ).tag(config=True)
165 164
166 165 prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True)
167 166
168 167 prompts = Instance(Prompts)
169 168
170 169 @default('prompts')
171 170 def _prompts_default(self):
172 171 return self.prompts_class(self)
173 172
174 173 @observe('prompts')
175 174 def _(self, change):
176 175 self._update_layout()
177 176
178 177 @default('displayhook_class')
179 178 def _displayhook_class_default(self):
180 179 return RichPromptDisplayHook
181 180
182 181 term_title = Bool(True,
183 182 help="Automatically set the terminal title"
184 183 ).tag(config=True)
185 184
186 185 display_completions = Enum(('column', 'multicolumn','readlinelike'),
187 186 help= ( "Options for displaying tab completions, 'column', 'multicolumn', and "
188 187 "'readlinelike'. These options are for `prompt_toolkit`, see "
189 188 "`prompt_toolkit` documentation for more information."
190 189 ),
191 190 default_value='multicolumn').tag(config=True)
192 191
193 192 highlight_matching_brackets = Bool(True,
194 193 help="Highlight matching brackets .",
195 194 ).tag(config=True)
196 195
197 196 @observe('term_title')
198 197 def init_term_title(self, change=None):
199 198 # Enable or disable the terminal title.
200 199 if self.term_title:
201 200 toggle_set_term_title(True)
202 201 set_term_title('IPython: ' + abbrev_cwd())
203 202 else:
204 203 toggle_set_term_title(False)
205 204
206 205 def init_display_formatter(self):
207 206 super(TerminalInteractiveShell, self).init_display_formatter()
208 207 # terminal only supports plain text
209 208 self.display_formatter.active_types = ['text/plain']
210 209
211 210 def init_prompt_toolkit_cli(self):
212 211 if self.simple_prompt:
213 212 # Fall back to plain non-interactive output for tests.
214 213 # This is very limited, and only accepts a single line.
215 214 def prompt():
216 215 return cast_unicode_py2(input('In [%d]: ' % self.execution_count))
217 216 self.prompt_for_code = prompt
218 217 return
219 218
220 219 # Set up keyboard shortcuts
221 220 kbmanager = KeyBindingManager.for_prompt()
222 221 register_ipython_shortcuts(kbmanager.registry, self)
223 222
224 223 # Pre-populate history from IPython's history database
225 224 history = InMemoryHistory()
226 225 last_cell = u""
227 226 for __, ___, cell in self.history_manager.get_tail(self.history_load_length,
228 227 include_latest=True):
229 228 # Ignore blank lines and consecutive duplicates
230 229 cell = cell.rstrip()
231 230 if cell and (cell != last_cell):
232 231 history.append(cell)
233 232 last_cell = cell
234 233
235 234 self._style = self._make_style_from_name_or_cls(self.highlighting_style)
236 235 style = DynamicStyle(lambda: self._style)
237 236
238 237 editing_mode = getattr(EditingMode, self.editing_mode.upper())
239 238
240 239 self._pt_app = create_prompt_application(
241 240 editing_mode=editing_mode,
242 241 key_bindings_registry=kbmanager.registry,
243 242 history=history,
244 243 completer=IPythonPTCompleter(shell=self),
245 244 enable_history_search=True,
246 245 style=style,
247 246 mouse_support=self.mouse_support,
248 247 **self._layout_options()
249 248 )
250 249 self._eventloop = create_eventloop(self.inputhook)
251 250 self.pt_cli = CommandLineInterface(
252 251 self._pt_app, eventloop=self._eventloop,
253 252 output=create_output(true_color=self.true_color))
254 253
255 254 def _make_style_from_name_or_cls(self, name_or_cls):
256 255 """
257 256 Small wrapper that make an IPython compatible style from a style name
258 257
259 258 We need that to add style for prompt ... etc.
260 259 """
261 260 style_overrides = {}
262 261 if name_or_cls == 'legacy':
263 262 legacy = self.colors.lower()
264 263 if legacy == 'linux':
265 264 style_cls = get_style_by_name('monokai')
266 265 style_overrides = _style_overrides_linux
267 266 elif legacy == 'lightbg':
268 267 style_overrides = _style_overrides_light_bg
269 268 style_cls = get_style_by_name('pastie')
270 269 elif legacy == 'neutral':
271 270 # The default theme needs to be visible on both a dark background
272 271 # and a light background, because we can't tell what the terminal
273 272 # looks like. These tweaks to the default theme help with that.
274 273 style_cls = get_style_by_name('default')
275 274 style_overrides.update({
276 275 Token.Number: '#007700',
277 276 Token.Operator: 'noinherit',
278 277 Token.String: '#BB6622',
279 278 Token.Name.Function: '#2080D0',
280 279 Token.Name.Class: 'bold #2080D0',
281 280 Token.Name.Namespace: 'bold #2080D0',
282 281 Token.Prompt: '#009900',
283 282 Token.PromptNum: '#00ff00 bold',
284 283 Token.OutPrompt: '#990000',
285 284 Token.OutPromptNum: '#ff0000 bold',
286 285 })
287 286 elif legacy =='nocolor':
288 287 style_cls=_NoStyle
289 288 style_overrides = {}
290 289 else :
291 290 raise ValueError('Got unknown colors: ', legacy)
292 291 else :
293 292 if isinstance(name_or_cls, string_types):
294 293 style_cls = get_style_by_name(name_or_cls)
295 294 else:
296 295 style_cls = name_or_cls
297 296 style_overrides = {
298 297 Token.Prompt: '#009900',
299 298 Token.PromptNum: '#00ff00 bold',
300 299 Token.OutPrompt: '#990000',
301 300 Token.OutPromptNum: '#ff0000 bold',
302 301 }
303 302 style_overrides.update(self.highlighting_style_overrides)
304 303 style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls,
305 304 style_dict=style_overrides)
306 305
307 306 return style
308 307
309 308 def _layout_options(self):
310 309 """
311 310 Return the current layout option for the current Terminal InteractiveShell
312 311 """
313 312 return {
314 313 'lexer':IPythonPTLexer(),
315 314 'reserve_space_for_menu':self.space_for_menu,
316 315 'get_prompt_tokens':self.prompts.in_prompt_tokens,
317 316 'get_continuation_tokens':self.prompts.continuation_prompt_tokens,
318 317 'multiline':True,
319 318 'display_completions_in_columns': (self.display_completions == 'multicolumn'),
320 319
321 320 # Highlight matching brackets, but only when this setting is
322 321 # enabled, and only when the DEFAULT_BUFFER has the focus.
323 322 'extra_input_processors': [ConditionalProcessor(
324 323 processor=HighlightMatchingBracketProcessor(chars='[](){}'),
325 324 filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() &
326 325 Condition(lambda cli: self.highlight_matching_brackets))],
327 326 }
328 327
329 328 def _update_layout(self):
330 329 """
331 330 Ask for a re computation of the application layout, if for example ,
332 331 some configuration options have changed.
333 332 """
334 333 if self._pt_app:
335 334 self._pt_app.layout = create_prompt_layout(**self._layout_options())
336 335
337 336 def prompt_for_code(self):
338 337 document = self.pt_cli.run(
339 338 pre_run=self.pre_prompt, reset_current_buffer=True)
340 339 return document.text
341 340
342 341 def enable_win_unicode_console(self):
343 342 if sys.version_info >= (3, 6):
344 343 # Since PEP 528, Python uses the unicode APIs for the Windows
345 344 # console by default, so WUC shouldn't be needed.
346 345 return
347 346
348 347 import win_unicode_console
349 348
350 349 if PY3:
351 350 win_unicode_console.enable()
352 351 else:
353 352 # https://github.com/ipython/ipython/issues/9768
354 353 from win_unicode_console.streams import (TextStreamWrapper,
355 354 stdout_text_transcoded, stderr_text_transcoded)
356 355
357 356 class LenientStrStreamWrapper(TextStreamWrapper):
358 357 def write(self, s):
359 358 if isinstance(s, bytes):
360 359 s = s.decode(self.encoding, 'replace')
361 360
362 361 self.base.write(s)
363 362
364 363 stdout_text_str = LenientStrStreamWrapper(stdout_text_transcoded)
365 364 stderr_text_str = LenientStrStreamWrapper(stderr_text_transcoded)
366 365
367 366 win_unicode_console.enable(stdout=stdout_text_str,
368 367 stderr=stderr_text_str)
369 368
370 369 def init_io(self):
371 370 if sys.platform not in {'win32', 'cli'}:
372 371 return
373 372
374 373 self.enable_win_unicode_console()
375 374
376 375 import colorama
377 376 colorama.init()
378 377
379 378 # For some reason we make these wrappers around stdout/stderr.
380 379 # For now, we need to reset them so all output gets coloured.
381 380 # https://github.com/ipython/ipython/issues/8669
382 381 # io.std* are deprecated, but don't show our own deprecation warnings
383 382 # during initialization of the deprecated API.
384 383 with warnings.catch_warnings():
385 384 warnings.simplefilter('ignore', DeprecationWarning)
386 385 io.stdout = io.IOStream(sys.stdout)
387 386 io.stderr = io.IOStream(sys.stderr)
388 387
389 388 def init_magics(self):
390 389 super(TerminalInteractiveShell, self).init_magics()
391 390 self.register_magics(TerminalMagics)
392 391
393 392 def init_alias(self):
394 393 # The parent class defines aliases that can be safely used with any
395 394 # frontend.
396 395 super(TerminalInteractiveShell, self).init_alias()
397 396
398 397 # Now define aliases that only make sense on the terminal, because they
399 398 # need direct access to the console in a way that we can't emulate in
400 399 # GUI or web frontend
401 400 if os.name == 'posix':
402 401 for cmd in ['clear', 'more', 'less', 'man']:
403 402 self.alias_manager.soft_define_alias(cmd, cmd)
404 403
405 404
406 405 def __init__(self, *args, **kwargs):
407 406 super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
408 407 self.init_prompt_toolkit_cli()
409 408 self.init_term_title()
410 409 self.keep_running = True
411 410
412 411 self.debugger_history = InMemoryHistory()
413 412
414 413 def ask_exit(self):
415 414 self.keep_running = False
416 415
417 416 rl_next_input = None
418 417
419 418 def pre_prompt(self):
420 419 if self.rl_next_input:
421 420 self.pt_cli.application.buffer.text = cast_unicode_py2(self.rl_next_input)
422 421 self.rl_next_input = None
423 422
424 423 def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):
425 424
426 425 if display_banner is not DISPLAY_BANNER_DEPRECATED:
427 426 warn('interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
428 427
429 428 self.keep_running = True
430 429 while self.keep_running:
431 430 print(self.separate_in, end='')
432 431
433 432 try:
434 433 code = self.prompt_for_code()
435 434 except EOFError:
436 435 if (not self.confirm_exit) \
437 436 or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
438 437 self.ask_exit()
439 438
440 439 else:
441 440 if code:
442 441 self.run_cell(code, store_history=True)
443 442
444 443 def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
445 444 # An extra layer of protection in case someone mashing Ctrl-C breaks
446 445 # out of our internal code.
447 446 if display_banner is not DISPLAY_BANNER_DEPRECATED:
448 447 warn('mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
449 448 while True:
450 449 try:
451 450 self.interact()
452 451 break
453 452 except KeyboardInterrupt:
454 453 print("\nKeyboardInterrupt escaped interact()\n")
455 454
456 455 _inputhook = None
457 456 def inputhook(self, context):
458 457 if self._inputhook is not None:
459 458 self._inputhook(context)
460 459
461 460 active_eventloop = None
462 461 def enable_gui(self, gui=None):
463 462 if gui:
464 463 self.active_eventloop, self._inputhook =\
465 464 get_inputhook_name_and_func(gui)
466 465 else:
467 466 self.active_eventloop = self._inputhook = None
468 467
469 468 # Run !system commands directly, not through pipes, so terminal programs
470 469 # work correctly.
471 470 system = InteractiveShell.system_raw
472 471
473 472 def auto_rewrite_input(self, cmd):
474 473 """Overridden from the parent class to use fancy rewriting prompt"""
475 474 if not self.show_rewritten_input:
476 475 return
477 476
478 477 tokens = self.prompts.rewrite_prompt_tokens()
479 478 if self.pt_cli:
480 479 self.pt_cli.print_tokens(tokens)
481 480 print(cmd)
482 481 else:
483 482 prompt = ''.join(s for t, s in tokens)
484 483 print(prompt, cmd, sep='')
485 484
486 485 _prompts_before = None
487 486 def switch_doctest_mode(self, mode):
488 487 """Switch prompts to classic for %doctest_mode"""
489 488 if mode:
490 489 self._prompts_before = self.prompts
491 490 self.prompts = ClassicPrompts(self)
492 491 elif self._prompts_before:
493 492 self.prompts = self._prompts_before
494 493 self._prompts_before = None
495 494 self._update_layout()
496 495
497 496
498 497 InteractiveShellABC.register(TerminalInteractiveShell)
499 498
500 499 if __name__ == '__main__':
501 500 TerminalInteractiveShell.instance().interact()
@@ -1,372 +1,370 b''
1 1 #!/usr/bin/env python
2 2 # encoding: utf-8
3 3 """
4 4 The :class:`~IPython.core.application.Application` object for the command
5 5 line :command:`ipython` program.
6 6 """
7 7
8 8 # Copyright (c) IPython Development Team.
9 9 # Distributed under the terms of the Modified BSD License.
10 10
11 from __future__ import absolute_import
12 from __future__ import print_function
13 11
14 12 import logging
15 13 import os
16 14 import sys
17 15 import warnings
18 16
19 17 from traitlets.config.loader import Config
20 18 from traitlets.config.application import boolean_flag, catch_config_error, Application
21 19 from IPython.core import release
22 20 from IPython.core import usage
23 21 from IPython.core.completer import IPCompleter
24 22 from IPython.core.crashhandler import CrashHandler
25 23 from IPython.core.formatters import PlainTextFormatter
26 24 from IPython.core.history import HistoryManager
27 25 from IPython.core.application import (
28 26 ProfileDir, BaseIPythonApplication, base_flags, base_aliases
29 27 )
30 28 from IPython.core.magics import ScriptMagics
31 29 from IPython.core.shellapp import (
32 30 InteractiveShellApp, shell_flags, shell_aliases
33 31 )
34 32 from IPython.extensions.storemagic import StoreMagics
35 33 from .interactiveshell import TerminalInteractiveShell
36 34 from IPython.paths import get_ipython_dir
37 35 from traitlets import (
38 36 Bool, List, Dict, default, observe,
39 37 )
40 38
41 39 #-----------------------------------------------------------------------------
42 40 # Globals, utilities and helpers
43 41 #-----------------------------------------------------------------------------
44 42
45 43 _examples = """
46 44 ipython --matplotlib # enable matplotlib integration
47 45 ipython --matplotlib=qt # enable matplotlib integration with qt4 backend
48 46
49 47 ipython --log-level=DEBUG # set logging to DEBUG
50 48 ipython --profile=foo # start with profile foo
51 49
52 50 ipython profile create foo # create profile foo w/ default config files
53 51 ipython help profile # show the help for the profile subcmd
54 52
55 53 ipython locate # print the path to the IPython directory
56 54 ipython locate profile foo # print the path to the directory for profile `foo`
57 55 """
58 56
59 57 #-----------------------------------------------------------------------------
60 58 # Crash handler for this application
61 59 #-----------------------------------------------------------------------------
62 60
63 61 class IPAppCrashHandler(CrashHandler):
64 62 """sys.excepthook for IPython itself, leaves a detailed report on disk."""
65 63
66 64 def __init__(self, app):
67 65 contact_name = release.author
68 66 contact_email = release.author_email
69 67 bug_tracker = 'https://github.com/ipython/ipython/issues'
70 68 super(IPAppCrashHandler,self).__init__(
71 69 app, contact_name, contact_email, bug_tracker
72 70 )
73 71
74 72 def make_report(self,traceback):
75 73 """Return a string containing a crash report."""
76 74
77 75 sec_sep = self.section_sep
78 76 # Start with parent report
79 77 report = [super(IPAppCrashHandler, self).make_report(traceback)]
80 78 # Add interactive-specific info we may have
81 79 rpt_add = report.append
82 80 try:
83 81 rpt_add(sec_sep+"History of session input:")
84 82 for line in self.app.shell.user_ns['_ih']:
85 83 rpt_add(line)
86 84 rpt_add('\n*** Last line of input (may not be in above history):\n')
87 85 rpt_add(self.app.shell._last_input_line+'\n')
88 86 except:
89 87 pass
90 88
91 89 return ''.join(report)
92 90
93 91 #-----------------------------------------------------------------------------
94 92 # Aliases and Flags
95 93 #-----------------------------------------------------------------------------
96 94 flags = dict(base_flags)
97 95 flags.update(shell_flags)
98 96 frontend_flags = {}
99 97 addflag = lambda *args: frontend_flags.update(boolean_flag(*args))
100 98 addflag('autoedit-syntax', 'TerminalInteractiveShell.autoedit_syntax',
101 99 'Turn on auto editing of files with syntax errors.',
102 100 'Turn off auto editing of files with syntax errors.'
103 101 )
104 102 addflag('simple-prompt', 'TerminalInteractiveShell.simple_prompt',
105 103 "Force simple minimal prompt using `raw_input`",
106 104 "Use a rich interactive prompt with prompt_toolkit",
107 105 )
108 106
109 107 addflag('banner', 'TerminalIPythonApp.display_banner',
110 108 "Display a banner upon starting IPython.",
111 109 "Don't display a banner upon starting IPython."
112 110 )
113 111 addflag('confirm-exit', 'TerminalInteractiveShell.confirm_exit',
114 112 """Set to confirm when you try to exit IPython with an EOF (Control-D
115 113 in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
116 114 you can force a direct exit without any confirmation.""",
117 115 "Don't prompt the user when exiting."
118 116 )
119 117 addflag('term-title', 'TerminalInteractiveShell.term_title',
120 118 "Enable auto setting the terminal title.",
121 119 "Disable auto setting the terminal title."
122 120 )
123 121 classic_config = Config()
124 122 classic_config.InteractiveShell.cache_size = 0
125 123 classic_config.PlainTextFormatter.pprint = False
126 124 classic_config.TerminalInteractiveShell.prompts_class='IPython.terminal.prompts.ClassicPrompts'
127 125 classic_config.InteractiveShell.separate_in = ''
128 126 classic_config.InteractiveShell.separate_out = ''
129 127 classic_config.InteractiveShell.separate_out2 = ''
130 128 classic_config.InteractiveShell.colors = 'NoColor'
131 129 classic_config.InteractiveShell.xmode = 'Plain'
132 130
133 131 frontend_flags['classic']=(
134 132 classic_config,
135 133 "Gives IPython a similar feel to the classic Python prompt."
136 134 )
137 135 # # log doesn't make so much sense this way anymore
138 136 # paa('--log','-l',
139 137 # action='store_true', dest='InteractiveShell.logstart',
140 138 # help="Start logging to the default log file (./ipython_log.py).")
141 139 #
142 140 # # quick is harder to implement
143 141 frontend_flags['quick']=(
144 142 {'TerminalIPythonApp' : {'quick' : True}},
145 143 "Enable quick startup with no config files."
146 144 )
147 145
148 146 frontend_flags['i'] = (
149 147 {'TerminalIPythonApp' : {'force_interact' : True}},
150 148 """If running code from the command line, become interactive afterwards.
151 149 It is often useful to follow this with `--` to treat remaining flags as
152 150 script arguments.
153 151 """
154 152 )
155 153 flags.update(frontend_flags)
156 154
157 155 aliases = dict(base_aliases)
158 156 aliases.update(shell_aliases)
159 157
160 158 #-----------------------------------------------------------------------------
161 159 # Main classes and functions
162 160 #-----------------------------------------------------------------------------
163 161
164 162
165 163 class LocateIPythonApp(BaseIPythonApplication):
166 164 description = """print the path to the IPython dir"""
167 165 subcommands = Dict(dict(
168 166 profile=('IPython.core.profileapp.ProfileLocate',
169 167 "print the path to an IPython profile directory",
170 168 ),
171 169 ))
172 170 def start(self):
173 171 if self.subapp is not None:
174 172 return self.subapp.start()
175 173 else:
176 174 print(self.ipython_dir)
177 175
178 176
179 177 class TerminalIPythonApp(BaseIPythonApplication, InteractiveShellApp):
180 178 name = u'ipython'
181 179 description = usage.cl_usage
182 180 crash_handler_class = IPAppCrashHandler
183 181 examples = _examples
184 182
185 183 flags = Dict(flags)
186 184 aliases = Dict(aliases)
187 185 classes = List()
188 186 @default('classes')
189 187 def _classes_default(self):
190 188 """This has to be in a method, for TerminalIPythonApp to be available."""
191 189 return [
192 190 InteractiveShellApp, # ShellApp comes before TerminalApp, because
193 191 self.__class__, # it will also affect subclasses (e.g. QtConsole)
194 192 TerminalInteractiveShell,
195 193 HistoryManager,
196 194 ProfileDir,
197 195 PlainTextFormatter,
198 196 IPCompleter,
199 197 ScriptMagics,
200 198 StoreMagics,
201 199 ]
202 200
203 201 deprecated_subcommands = dict(
204 202 qtconsole=('qtconsole.qtconsoleapp.JupyterQtConsoleApp',
205 203 """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter Qt Console."""
206 204 ),
207 205 notebook=('notebook.notebookapp.NotebookApp',
208 206 """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter HTML Notebook Server."""
209 207 ),
210 208 console=('jupyter_console.app.ZMQTerminalIPythonApp',
211 209 """DEPRECATED, Will be removed in IPython 6.0 : Launch the Jupyter terminal-based Console."""
212 210 ),
213 211 nbconvert=('nbconvert.nbconvertapp.NbConvertApp',
214 212 "DEPRECATED, Will be removed in IPython 6.0 : Convert notebooks to/from other formats."
215 213 ),
216 214 trust=('nbformat.sign.TrustNotebookApp',
217 215 "DEPRECATED, Will be removed in IPython 6.0 : Sign notebooks to trust their potentially unsafe contents at load."
218 216 ),
219 217 kernelspec=('jupyter_client.kernelspecapp.KernelSpecApp',
220 218 "DEPRECATED, Will be removed in IPython 6.0 : Manage Jupyter kernel specifications."
221 219 ),
222 220 )
223 221 subcommands = dict(
224 222 profile = ("IPython.core.profileapp.ProfileApp",
225 223 "Create and manage IPython profiles."
226 224 ),
227 225 kernel = ("ipykernel.kernelapp.IPKernelApp",
228 226 "Start a kernel without an attached frontend."
229 227 ),
230 228 locate=('IPython.terminal.ipapp.LocateIPythonApp',
231 229 LocateIPythonApp.description
232 230 ),
233 231 history=('IPython.core.historyapp.HistoryApp',
234 232 "Manage the IPython history database."
235 233 ),
236 234 )
237 235 deprecated_subcommands['install-nbextension'] = (
238 236 "notebook.nbextensions.InstallNBExtensionApp",
239 237 "DEPRECATED, Will be removed in IPython 6.0 : Install Jupyter notebook extension files"
240 238 )
241 239 subcommands.update(deprecated_subcommands)
242 240
243 241 # *do* autocreate requested profile, but don't create the config file.
244 242 auto_create=Bool(True)
245 243 # configurables
246 244 quick = Bool(False,
247 245 help="""Start IPython quickly by skipping the loading of config files."""
248 246 ).tag(config=True)
249 247 @observe('quick')
250 248 def _quick_changed(self, change):
251 249 if change['new']:
252 250 self.load_config_file = lambda *a, **kw: None
253 251
254 252 display_banner = Bool(True,
255 253 help="Whether to display a banner upon starting IPython."
256 254 ).tag(config=True)
257 255
258 256 # if there is code of files to run from the cmd line, don't interact
259 257 # unless the --i flag (App.force_interact) is true.
260 258 force_interact = Bool(False,
261 259 help="""If a command or file is given via the command-line,
262 260 e.g. 'ipython foo.py', start an interactive shell after executing the
263 261 file or command."""
264 262 ).tag(config=True)
265 263 @observe('force_interact')
266 264 def _force_interact_changed(self, change):
267 265 if change['new']:
268 266 self.interact = True
269 267
270 268 @observe('file_to_run', 'code_to_run', 'module_to_run')
271 269 def _file_to_run_changed(self, change):
272 270 new = change['new']
273 271 if new:
274 272 self.something_to_run = True
275 273 if new and not self.force_interact:
276 274 self.interact = False
277 275
278 276 # internal, not-configurable
279 277 something_to_run=Bool(False)
280 278
281 279 def parse_command_line(self, argv=None):
282 280 """override to allow old '-pylab' flag with deprecation warning"""
283 281
284 282 argv = sys.argv[1:] if argv is None else argv
285 283
286 284 if '-pylab' in argv:
287 285 # deprecated `-pylab` given,
288 286 # warn and transform into current syntax
289 287 argv = argv[:] # copy, don't clobber
290 288 idx = argv.index('-pylab')
291 289 warnings.warn("`-pylab` flag has been deprecated.\n"
292 290 " Use `--matplotlib <backend>` and import pylab manually.")
293 291 argv[idx] = '--pylab'
294 292
295 293 return super(TerminalIPythonApp, self).parse_command_line(argv)
296 294
297 295 @catch_config_error
298 296 def initialize(self, argv=None):
299 297 """Do actions after construct, but before starting the app."""
300 298 super(TerminalIPythonApp, self).initialize(argv)
301 299 if self.subapp is not None:
302 300 # don't bother initializing further, starting subapp
303 301 return
304 302 # print self.extra_args
305 303 if self.extra_args and not self.something_to_run:
306 304 self.file_to_run = self.extra_args[0]
307 305 self.init_path()
308 306 # create the shell
309 307 self.init_shell()
310 308 # and draw the banner
311 309 self.init_banner()
312 310 # Now a variety of things that happen after the banner is printed.
313 311 self.init_gui_pylab()
314 312 self.init_extensions()
315 313 self.init_code()
316 314
317 315 def init_shell(self):
318 316 """initialize the InteractiveShell instance"""
319 317 # Create an InteractiveShell instance.
320 318 # shell.display_banner should always be False for the terminal
321 319 # based app, because we call shell.show_banner() by hand below
322 320 # so the banner shows *before* all extension loading stuff.
323 321 self.shell = TerminalInteractiveShell.instance(parent=self,
324 322 profile_dir=self.profile_dir,
325 323 ipython_dir=self.ipython_dir, user_ns=self.user_ns)
326 324 self.shell.configurables.append(self)
327 325
328 326 def init_banner(self):
329 327 """optionally display the banner"""
330 328 if self.display_banner and self.interact:
331 329 self.shell.show_banner()
332 330 # Make sure there is a space below the banner.
333 331 if self.log_level <= logging.INFO: print()
334 332
335 333 def _pylab_changed(self, name, old, new):
336 334 """Replace --pylab='inline' with --pylab='auto'"""
337 335 if new == 'inline':
338 336 warnings.warn("'inline' not available as pylab backend, "
339 337 "using 'auto' instead.")
340 338 self.pylab = 'auto'
341 339
342 340 def start(self):
343 341 if self.subapp is not None:
344 342 return self.subapp.start()
345 343 # perform any prexec steps:
346 344 if self.interact:
347 345 self.log.debug("Starting IPython's mainloop...")
348 346 self.shell.mainloop()
349 347 else:
350 348 self.log.debug("IPython not interactive...")
351 349
352 350 def load_default_config(ipython_dir=None):
353 351 """Load the default config file from the default ipython_dir.
354 352
355 353 This is useful for embedded shells.
356 354 """
357 355 if ipython_dir is None:
358 356 ipython_dir = get_ipython_dir()
359 357
360 358 profile_dir = os.path.join(ipython_dir, 'profile_default')
361 359
362 360 config = Config()
363 361 for cf in Application._load_config_files("ipython_config", path=profile_dir):
364 362 config.update(cf)
365 363
366 364 return config
367 365
368 366 launch_new_instance = TerminalIPythonApp.launch_instance
369 367
370 368
371 369 if __name__ == '__main__':
372 370 launch_new_instance()
@@ -1,207 +1,206 b''
1 1 """Extra magics for terminal use."""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 from __future__ import print_function
7 6
8 7 from logging import error
9 8 import os
10 9 import sys
11 10
12 11 from IPython.core.error import TryNext, UsageError
13 12 from IPython.core.inputsplitter import IPythonInputSplitter
14 13 from IPython.core.magic import Magics, magics_class, line_magic
15 14 from IPython.lib.clipboard import ClipboardEmpty
16 15 from IPython.utils.text import SList, strip_email_quotes
17 16 from IPython.utils import py3compat
18 17
19 18 def get_pasted_lines(sentinel, l_input=py3compat.input, quiet=False):
20 19 """ Yield pasted lines until the user enters the given sentinel value.
21 20 """
22 21 if not quiet:
23 22 print("Pasting code; enter '%s' alone on the line to stop or use Ctrl-D." \
24 23 % sentinel)
25 24 prompt = ":"
26 25 else:
27 26 prompt = ""
28 27 while True:
29 28 try:
30 29 l = py3compat.str_to_unicode(l_input(prompt))
31 30 if l == sentinel:
32 31 return
33 32 else:
34 33 yield l
35 34 except EOFError:
36 35 print('<EOF>')
37 36 return
38 37
39 38
40 39 @magics_class
41 40 class TerminalMagics(Magics):
42 41 def __init__(self, shell):
43 42 super(TerminalMagics, self).__init__(shell)
44 43 self.input_splitter = IPythonInputSplitter()
45 44
46 45 def store_or_execute(self, block, name):
47 46 """ Execute a block, or store it in a variable, per the user's request.
48 47 """
49 48 if name:
50 49 # If storing it for further editing
51 50 self.shell.user_ns[name] = SList(block.splitlines())
52 51 print("Block assigned to '%s'" % name)
53 52 else:
54 53 b = self.preclean_input(block)
55 54 self.shell.user_ns['pasted_block'] = b
56 55 self.shell.using_paste_magics = True
57 56 try:
58 57 self.shell.run_cell(b)
59 58 finally:
60 59 self.shell.using_paste_magics = False
61 60
62 61 def preclean_input(self, block):
63 62 lines = block.splitlines()
64 63 while lines and not lines[0].strip():
65 64 lines = lines[1:]
66 65 return strip_email_quotes('\n'.join(lines))
67 66
68 67 def rerun_pasted(self, name='pasted_block'):
69 68 """ Rerun a previously pasted command.
70 69 """
71 70 b = self.shell.user_ns.get(name)
72 71
73 72 # Sanity checks
74 73 if b is None:
75 74 raise UsageError('No previous pasted block available')
76 75 if not isinstance(b, py3compat.string_types):
77 76 raise UsageError(
78 77 "Variable 'pasted_block' is not a string, can't execute")
79 78
80 79 print("Re-executing '%s...' (%d chars)"% (b.split('\n',1)[0], len(b)))
81 80 self.shell.run_cell(b)
82 81
83 82 @line_magic
84 83 def autoindent(self, parameter_s = ''):
85 84 """Toggle autoindent on/off (if available)."""
86 85
87 86 self.shell.set_autoindent()
88 87 print("Automatic indentation is:",['OFF','ON'][self.shell.autoindent])
89 88
90 89 @line_magic
91 90 def cpaste(self, parameter_s=''):
92 91 """Paste & execute a pre-formatted code block from clipboard.
93 92
94 93 You must terminate the block with '--' (two minus-signs) or Ctrl-D
95 94 alone on the line. You can also provide your own sentinel with '%paste
96 95 -s %%' ('%%' is the new sentinel for this operation).
97 96
98 97 The block is dedented prior to execution to enable execution of method
99 98 definitions. '>' and '+' characters at the beginning of a line are
100 99 ignored, to allow pasting directly from e-mails, diff files and
101 100 doctests (the '...' continuation prompt is also stripped). The
102 101 executed block is also assigned to variable named 'pasted_block' for
103 102 later editing with '%edit pasted_block'.
104 103
105 104 You can also pass a variable name as an argument, e.g. '%cpaste foo'.
106 105 This assigns the pasted block to variable 'foo' as string, without
107 106 dedenting or executing it (preceding >>> and + is still stripped)
108 107
109 108 '%cpaste -r' re-executes the block previously entered by cpaste.
110 109 '%cpaste -q' suppresses any additional output messages.
111 110
112 111 Do not be alarmed by garbled output on Windows (it's a readline bug).
113 112 Just press enter and type -- (and press enter again) and the block
114 113 will be what was just pasted.
115 114
116 115 IPython statements (magics, shell escapes) are not supported (yet).
117 116
118 117 See also
119 118 --------
120 119 paste: automatically pull code from clipboard.
121 120
122 121 Examples
123 122 --------
124 123 ::
125 124
126 125 In [8]: %cpaste
127 126 Pasting code; enter '--' alone on the line to stop.
128 127 :>>> a = ["world!", "Hello"]
129 128 :>>> print " ".join(sorted(a))
130 129 :--
131 130 Hello world!
132 131 """
133 132 opts, name = self.parse_options(parameter_s, 'rqs:', mode='string')
134 133 if 'r' in opts:
135 134 self.rerun_pasted()
136 135 return
137 136
138 137 quiet = ('q' in opts)
139 138
140 139 sentinel = opts.get('s', u'--')
141 140 block = '\n'.join(get_pasted_lines(sentinel, quiet=quiet))
142 141 self.store_or_execute(block, name)
143 142
144 143 @line_magic
145 144 def paste(self, parameter_s=''):
146 145 """Paste & execute a pre-formatted code block from clipboard.
147 146
148 147 The text is pulled directly from the clipboard without user
149 148 intervention and printed back on the screen before execution (unless
150 149 the -q flag is given to force quiet mode).
151 150
152 151 The block is dedented prior to execution to enable execution of method
153 152 definitions. '>' and '+' characters at the beginning of a line are
154 153 ignored, to allow pasting directly from e-mails, diff files and
155 154 doctests (the '...' continuation prompt is also stripped). The
156 155 executed block is also assigned to variable named 'pasted_block' for
157 156 later editing with '%edit pasted_block'.
158 157
159 158 You can also pass a variable name as an argument, e.g. '%paste foo'.
160 159 This assigns the pasted block to variable 'foo' as string, without
161 160 executing it (preceding >>> and + is still stripped).
162 161
163 162 Options:
164 163
165 164 -r: re-executes the block previously entered by cpaste.
166 165
167 166 -q: quiet mode: do not echo the pasted text back to the terminal.
168 167
169 168 IPython statements (magics, shell escapes) are not supported (yet).
170 169
171 170 See also
172 171 --------
173 172 cpaste: manually paste code into terminal until you mark its end.
174 173 """
175 174 opts, name = self.parse_options(parameter_s, 'rq', mode='string')
176 175 if 'r' in opts:
177 176 self.rerun_pasted()
178 177 return
179 178 try:
180 179 block = self.shell.hooks.clipboard_get()
181 180 except TryNext as clipboard_exc:
182 181 message = getattr(clipboard_exc, 'args')
183 182 if message:
184 183 error(message[0])
185 184 else:
186 185 error('Could not get text from the clipboard.')
187 186 return
188 187 except ClipboardEmpty:
189 188 raise UsageError("The clipboard appears to be empty")
190 189
191 190 # By default, echo back to terminal unless quiet mode is requested
192 191 if 'q' not in opts:
193 192 write = self.shell.write
194 193 write(self.shell.pycolorize(block))
195 194 if not block.endswith('\n'):
196 195 write('\n')
197 196 write("## -- End pasted text --\n")
198 197
199 198 self.store_or_execute(block, name)
200 199
201 200 # Class-level: add a '%cls' magic only on Windows
202 201 if sys.platform == 'win32':
203 202 @line_magic
204 203 def cls(self, s):
205 204 """Clear screen.
206 205 """
207 206 os.system("cls")
@@ -1,74 +1,73 b''
1 1 """Terminal input and output prompts."""
2 from __future__ import print_function
3 2
4 3 from pygments.token import Token
5 4 import sys
6 5
7 6 from IPython.core.displayhook import DisplayHook
8 7
9 8 from prompt_toolkit.layout.utils import token_list_width
10 9
11 10 class Prompts(object):
12 11 def __init__(self, shell):
13 12 self.shell = shell
14 13
15 14 def in_prompt_tokens(self, cli=None):
16 15 return [
17 16 (Token.Prompt, 'In ['),
18 17 (Token.PromptNum, str(self.shell.execution_count)),
19 18 (Token.Prompt, ']: '),
20 19 ]
21 20
22 21 def _width(self):
23 22 return token_list_width(self.in_prompt_tokens())
24 23
25 24 def continuation_prompt_tokens(self, cli=None, width=None):
26 25 if width is None:
27 26 width = self._width()
28 27 return [
29 28 (Token.Prompt, (' ' * (width - 5)) + '...: '),
30 29 ]
31 30
32 31 def rewrite_prompt_tokens(self):
33 32 width = self._width()
34 33 return [
35 34 (Token.Prompt, ('-' * (width - 2)) + '> '),
36 35 ]
37 36
38 37 def out_prompt_tokens(self):
39 38 return [
40 39 (Token.OutPrompt, 'Out['),
41 40 (Token.OutPromptNum, str(self.shell.execution_count)),
42 41 (Token.OutPrompt, ']: '),
43 42 ]
44 43
45 44 class ClassicPrompts(Prompts):
46 45 def in_prompt_tokens(self, cli=None):
47 46 return [
48 47 (Token.Prompt, '>>> '),
49 48 ]
50 49
51 50 def continuation_prompt_tokens(self, cli=None, width=None):
52 51 return [
53 52 (Token.Prompt, '... ')
54 53 ]
55 54
56 55 def rewrite_prompt_tokens(self):
57 56 return []
58 57
59 58 def out_prompt_tokens(self):
60 59 return []
61 60
62 61 class RichPromptDisplayHook(DisplayHook):
63 62 """Subclass of base display hook using coloured prompt"""
64 63 def write_output_prompt(self):
65 64 sys.stdout.write(self.shell.separate_out)
66 65 self.prompt_end_newline = False
67 66 if self.do_full_cache:
68 67 tokens = self.shell.prompts.out_prompt_tokens()
69 68 if tokens and tokens[-1][1].endswith('\n'):
70 69 self.prompt_end_newline = True
71 70 if self.shell.pt_cli:
72 71 self.shell.pt_cli.print_tokens(tokens)
73 72 else:
74 73 sys.stdout.write(''.join(s for t, s in tokens))
@@ -1,141 +1,140 b''
1 1 """GLUT Input hook for interactive use with prompt_toolkit
2 2 """
3 from __future__ import print_function
4 3
5 4
6 5 # GLUT is quite an old library and it is difficult to ensure proper
7 6 # integration within IPython since original GLUT does not allow to handle
8 7 # events one by one. Instead, it requires for the mainloop to be entered
9 8 # and never returned (there is not even a function to exit he
10 9 # mainloop). Fortunately, there are alternatives such as freeglut
11 10 # (available for linux and windows) and the OSX implementation gives
12 11 # access to a glutCheckLoop() function that blocks itself until a new
13 12 # event is received. This means we have to setup the idle callback to
14 13 # ensure we got at least one event that will unblock the function.
15 14 #
16 15 # Furthermore, it is not possible to install these handlers without a window
17 16 # being first created. We choose to make this window invisible. This means that
18 17 # display mode options are set at this level and user won't be able to change
19 18 # them later without modifying the code. This should probably be made available
20 19 # via IPython options system.
21 20
22 21 import sys
23 22 import time
24 23 import signal
25 24 import OpenGL.GLUT as glut
26 25 import OpenGL.platform as platform
27 26 from timeit import default_timer as clock
28 27
29 28 # Frame per second : 60
30 29 # Should probably be an IPython option
31 30 glut_fps = 60
32 31
33 32 # Display mode : double buffeed + rgba + depth
34 33 # Should probably be an IPython option
35 34 glut_display_mode = (glut.GLUT_DOUBLE |
36 35 glut.GLUT_RGBA |
37 36 glut.GLUT_DEPTH)
38 37
39 38 glutMainLoopEvent = None
40 39 if sys.platform == 'darwin':
41 40 try:
42 41 glutCheckLoop = platform.createBaseFunction(
43 42 'glutCheckLoop', dll=platform.GLUT, resultType=None,
44 43 argTypes=[],
45 44 doc='glutCheckLoop( ) -> None',
46 45 argNames=(),
47 46 )
48 47 except AttributeError:
49 48 raise RuntimeError(
50 49 '''Your glut implementation does not allow interactive sessions'''
51 50 '''Consider installing freeglut.''')
52 51 glutMainLoopEvent = glutCheckLoop
53 52 elif glut.HAVE_FREEGLUT:
54 53 glutMainLoopEvent = glut.glutMainLoopEvent
55 54 else:
56 55 raise RuntimeError(
57 56 '''Your glut implementation does not allow interactive sessions. '''
58 57 '''Consider installing freeglut.''')
59 58
60 59
61 60 def glut_display():
62 61 # Dummy display function
63 62 pass
64 63
65 64 def glut_idle():
66 65 # Dummy idle function
67 66 pass
68 67
69 68 def glut_close():
70 69 # Close function only hides the current window
71 70 glut.glutHideWindow()
72 71 glutMainLoopEvent()
73 72
74 73 def glut_int_handler(signum, frame):
75 74 # Catch sigint and print the defaultipyt message
76 75 signal.signal(signal.SIGINT, signal.default_int_handler)
77 76 print('\nKeyboardInterrupt')
78 77 # Need to reprint the prompt at this stage
79 78
80 79 # Initialisation code
81 80 glut.glutInit( sys.argv )
82 81 glut.glutInitDisplayMode( glut_display_mode )
83 82 # This is specific to freeglut
84 83 if bool(glut.glutSetOption):
85 84 glut.glutSetOption( glut.GLUT_ACTION_ON_WINDOW_CLOSE,
86 85 glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS )
87 86 glut.glutCreateWindow( b'ipython' )
88 87 glut.glutReshapeWindow( 1, 1 )
89 88 glut.glutHideWindow( )
90 89 glut.glutWMCloseFunc( glut_close )
91 90 glut.glutDisplayFunc( glut_display )
92 91 glut.glutIdleFunc( glut_idle )
93 92
94 93
95 94 def inputhook(context):
96 95 """Run the pyglet event loop by processing pending events only.
97 96
98 97 This keeps processing pending events until stdin is ready. After
99 98 processing all pending events, a call to time.sleep is inserted. This is
100 99 needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
101 100 though for best performance.
102 101 """
103 102 # We need to protect against a user pressing Control-C when IPython is
104 103 # idle and this is running. We trap KeyboardInterrupt and pass.
105 104
106 105 signal.signal(signal.SIGINT, glut_int_handler)
107 106
108 107 try:
109 108 t = clock()
110 109
111 110 # Make sure the default window is set after a window has been closed
112 111 if glut.glutGetWindow() == 0:
113 112 glut.glutSetWindow( 1 )
114 113 glutMainLoopEvent()
115 114 return 0
116 115
117 116 while not context.input_is_ready():
118 117 glutMainLoopEvent()
119 118 # We need to sleep at this point to keep the idle CPU load
120 119 # low. However, if sleep to long, GUI response is poor. As
121 120 # a compromise, we watch how often GUI events are being processed
122 121 # and switch between a short and long sleep time. Here are some
123 122 # stats useful in helping to tune this.
124 123 # time CPU load
125 124 # 0.001 13%
126 125 # 0.005 3%
127 126 # 0.01 1.5%
128 127 # 0.05 0.5%
129 128 used_time = clock() - t
130 129 if used_time > 10.0:
131 130 # print 'Sleep for 1 s' # dbg
132 131 time.sleep(1.0)
133 132 elif used_time > 0.1:
134 133 # Few GUI events coming in, so we can sleep longer
135 134 # print 'Sleep for 0.05 s' # dbg
136 135 time.sleep(0.05)
137 136 else:
138 137 # Many GUI events coming in, so sleep only very little
139 138 time.sleep(0.001)
140 139 except KeyboardInterrupt:
141 140 pass
@@ -1,59 +1,58 b''
1 1 # Code borrowed from python-prompt-toolkit examples
2 2 # https://github.com/jonathanslenders/python-prompt-toolkit/blob/77cdcfbc7f4b4c34a9d2f9a34d422d7152f16209/examples/inputhook.py
3 3
4 4 # Copyright (c) 2014, Jonathan Slenders
5 5 # All rights reserved.
6 6 #
7 7 # Redistribution and use in source and binary forms, with or without modification,
8 8 # are permitted provided that the following conditions are met:
9 9 #
10 10 # * Redistributions of source code must retain the above copyright notice, this
11 11 # list of conditions and the following disclaimer.
12 12 #
13 13 # * Redistributions in binary form must reproduce the above copyright notice, this
14 14 # list of conditions and the following disclaimer in the documentation and/or
15 15 # other materials provided with the distribution.
16 16 #
17 17 # * Neither the name of the {organization} nor the names of its
18 18 # contributors may be used to endorse or promote products derived from
19 19 # this software without specific prior written permission.
20 20 #
21 21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
22 22 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 23 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
25 25 # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 26 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 27 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
28 28 # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 29 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 30 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 """
33 33 PyGTK input hook for prompt_toolkit.
34 34
35 35 Listens on the pipe prompt_toolkit sets up for a notification that it should
36 36 return control to the terminal event loop.
37 37 """
38 from __future__ import absolute_import
39 38
40 39 import gtk, gobject
41 40
42 41 # Enable threading in GTK. (Otherwise, GTK will keep the GIL.)
43 42 gtk.gdk.threads_init()
44 43
45 44 def inputhook(context):
46 45 """
47 46 When the eventloop of prompt-toolkit is idle, call this inputhook.
48 47
49 48 This will run the GTK main loop until the file descriptor
50 49 `context.fileno()` becomes ready.
51 50
52 51 :param context: An `InputHookContext` instance.
53 52 """
54 53 def _main_quit(*a, **kw):
55 54 gtk.main_quit()
56 55 return False
57 56
58 57 gobject.io_add_watch(context.fileno(), gobject.IO_IN, _main_quit)
59 58 gtk.main()
@@ -1,68 +1,67 b''
1 1 """Enable pyglet to be used interacively with prompt_toolkit
2 2 """
3 from __future__ import absolute_import
4 3
5 4 import os
6 5 import sys
7 6 import time
8 7 from timeit import default_timer as clock
9 8 import pyglet
10 9
11 10 # On linux only, window.flip() has a bug that causes an AttributeError on
12 11 # window close. For details, see:
13 12 # http://groups.google.com/group/pyglet-users/browse_thread/thread/47c1aab9aa4a3d23/c22f9e819826799e?#c22f9e819826799e
14 13
15 14 if sys.platform.startswith('linux'):
16 15 def flip(window):
17 16 try:
18 17 window.flip()
19 18 except AttributeError:
20 19 pass
21 20 else:
22 21 def flip(window):
23 22 window.flip()
24 23
25 24
26 25 def inputhook(context):
27 26 """Run the pyglet event loop by processing pending events only.
28 27
29 28 This keeps processing pending events until stdin is ready. After
30 29 processing all pending events, a call to time.sleep is inserted. This is
31 30 needed, otherwise, CPU usage is at 100%. This sleep time should be tuned
32 31 though for best performance.
33 32 """
34 33 # We need to protect against a user pressing Control-C when IPython is
35 34 # idle and this is running. We trap KeyboardInterrupt and pass.
36 35 try:
37 36 t = clock()
38 37 while not context.input_is_ready():
39 38 pyglet.clock.tick()
40 39 for window in pyglet.app.windows:
41 40 window.switch_to()
42 41 window.dispatch_events()
43 42 window.dispatch_event('on_draw')
44 43 flip(window)
45 44
46 45 # We need to sleep at this point to keep the idle CPU load
47 46 # low. However, if sleep to long, GUI response is poor. As
48 47 # a compromise, we watch how often GUI events are being processed
49 48 # and switch between a short and long sleep time. Here are some
50 49 # stats useful in helping to tune this.
51 50 # time CPU load
52 51 # 0.001 13%
53 52 # 0.005 3%
54 53 # 0.01 1.5%
55 54 # 0.05 0.5%
56 55 used_time = clock() - t
57 56 if used_time > 10.0:
58 57 # print 'Sleep for 1 s' # dbg
59 58 time.sleep(1.0)
60 59 elif used_time > 0.1:
61 60 # Few GUI events coming in, so we can sleep longer
62 61 # print 'Sleep for 0.05 s' # dbg
63 62 time.sleep(0.05)
64 63 else:
65 64 # Many GUI events coming in, so sleep only very little
66 65 time.sleep(0.001)
67 66 except KeyboardInterrupt:
68 67 pass
@@ -1,148 +1,147 b''
1 1 """Enable wxPython to be used interacively in prompt_toolkit
2 2 """
3 from __future__ import absolute_import
4 3
5 4 import sys
6 5 import signal
7 6 import time
8 7 from timeit import default_timer as clock
9 8 import wx
10 9
11 10
12 11 def inputhook_wx1(context):
13 12 """Run the wx event loop by processing pending events only.
14 13
15 14 This approach seems to work, but its performance is not great as it
16 15 relies on having PyOS_InputHook called regularly.
17 16 """
18 17 try:
19 18 app = wx.GetApp()
20 19 if app is not None:
21 20 assert wx.Thread_IsMain()
22 21
23 22 # Make a temporary event loop and process system events until
24 23 # there are no more waiting, then allow idle events (which
25 24 # will also deal with pending or posted wx events.)
26 25 evtloop = wx.EventLoop()
27 26 ea = wx.EventLoopActivator(evtloop)
28 27 while evtloop.Pending():
29 28 evtloop.Dispatch()
30 29 app.ProcessIdle()
31 30 del ea
32 31 except KeyboardInterrupt:
33 32 pass
34 33 return 0
35 34
36 35 class EventLoopTimer(wx.Timer):
37 36
38 37 def __init__(self, func):
39 38 self.func = func
40 39 wx.Timer.__init__(self)
41 40
42 41 def Notify(self):
43 42 self.func()
44 43
45 44 class EventLoopRunner(object):
46 45
47 46 def Run(self, time, input_is_ready):
48 47 self.input_is_ready = input_is_ready
49 48 self.evtloop = wx.EventLoop()
50 49 self.timer = EventLoopTimer(self.check_stdin)
51 50 self.timer.Start(time)
52 51 self.evtloop.Run()
53 52
54 53 def check_stdin(self):
55 54 if self.input_is_ready():
56 55 self.timer.Stop()
57 56 self.evtloop.Exit()
58 57
59 58 def inputhook_wx2(context):
60 59 """Run the wx event loop, polling for stdin.
61 60
62 61 This version runs the wx eventloop for an undetermined amount of time,
63 62 during which it periodically checks to see if anything is ready on
64 63 stdin. If anything is ready on stdin, the event loop exits.
65 64
66 65 The argument to elr.Run controls how often the event loop looks at stdin.
67 66 This determines the responsiveness at the keyboard. A setting of 1000
68 67 enables a user to type at most 1 char per second. I have found that a
69 68 setting of 10 gives good keyboard response. We can shorten it further,
70 69 but eventually performance would suffer from calling select/kbhit too
71 70 often.
72 71 """
73 72 try:
74 73 app = wx.GetApp()
75 74 if app is not None:
76 75 assert wx.Thread_IsMain()
77 76 elr = EventLoopRunner()
78 77 # As this time is made shorter, keyboard response improves, but idle
79 78 # CPU load goes up. 10 ms seems like a good compromise.
80 79 elr.Run(time=10, # CHANGE time here to control polling interval
81 80 input_is_ready=context.input_is_ready)
82 81 except KeyboardInterrupt:
83 82 pass
84 83 return 0
85 84
86 85 def inputhook_wx3(context):
87 86 """Run the wx event loop by processing pending events only.
88 87
89 88 This is like inputhook_wx1, but it keeps processing pending events
90 89 until stdin is ready. After processing all pending events, a call to
91 90 time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
92 91 This sleep time should be tuned though for best performance.
93 92 """
94 93 # We need to protect against a user pressing Control-C when IPython is
95 94 # idle and this is running. We trap KeyboardInterrupt and pass.
96 95 try:
97 96 app = wx.GetApp()
98 97 if app is not None:
99 98 assert wx.Thread_IsMain()
100 99
101 100 # The import of wx on Linux sets the handler for signal.SIGINT
102 101 # to 0. This is a bug in wx or gtk. We fix by just setting it
103 102 # back to the Python default.
104 103 if not callable(signal.getsignal(signal.SIGINT)):
105 104 signal.signal(signal.SIGINT, signal.default_int_handler)
106 105
107 106 evtloop = wx.EventLoop()
108 107 ea = wx.EventLoopActivator(evtloop)
109 108 t = clock()
110 109 while not context.input_is_ready():
111 110 while evtloop.Pending():
112 111 t = clock()
113 112 evtloop.Dispatch()
114 113 app.ProcessIdle()
115 114 # We need to sleep at this point to keep the idle CPU load
116 115 # low. However, if sleep to long, GUI response is poor. As
117 116 # a compromise, we watch how often GUI events are being processed
118 117 # and switch between a short and long sleep time. Here are some
119 118 # stats useful in helping to tune this.
120 119 # time CPU load
121 120 # 0.001 13%
122 121 # 0.005 3%
123 122 # 0.01 1.5%
124 123 # 0.05 0.5%
125 124 used_time = clock() - t
126 125 if used_time > 10.0:
127 126 # print 'Sleep for 1 s' # dbg
128 127 time.sleep(1.0)
129 128 elif used_time > 0.1:
130 129 # Few GUI events coming in, so we can sleep longer
131 130 # print 'Sleep for 0.05 s' # dbg
132 131 time.sleep(0.05)
133 132 else:
134 133 # Many GUI events coming in, so sleep only very little
135 134 time.sleep(0.001)
136 135 del ea
137 136 except KeyboardInterrupt:
138 137 pass
139 138 return 0
140 139
141 140 if sys.platform == 'darwin':
142 141 # On OSX, evtloop.Pending() always returns True, regardless of there being
143 142 # any events pending. As such we can't use implementations 1 or 3 of the
144 143 # inputhook as those depend on a pending/dispatch loop.
145 144 inputhook = inputhook_wx2
146 145 else:
147 146 # This is our default implementation
148 147 inputhook = inputhook_wx3
@@ -1,135 +1,132 b''
1 1 """Test embedding of IPython"""
2 2
3 3 #-----------------------------------------------------------------------------
4 4 # Copyright (C) 2013 The IPython Development Team
5 5 #
6 6 # Distributed under the terms of the BSD License. The full license is in
7 7 # the file COPYING, distributed as part of this software.
8 8 #-----------------------------------------------------------------------------
9 9
10 10 #-----------------------------------------------------------------------------
11 11 # Imports
12 12 #-----------------------------------------------------------------------------
13 13
14 14 import os
15 15 import subprocess
16 16 import sys
17 17 import nose.tools as nt
18 18 from IPython.utils.tempdir import NamedFileInTemporaryDirectory
19 19 from IPython.testing.decorators import skip_win32
20 20
21 21 #-----------------------------------------------------------------------------
22 22 # Tests
23 23 #-----------------------------------------------------------------------------
24 24
25 25
26 26 _sample_embed = b"""
27 from __future__ import print_function
28 27 import IPython
29 28
30 29 a = 3
31 30 b = 14
32 31 print(a, '.', b)
33 32
34 33 IPython.embed()
35 34
36 35 print('bye!')
37 36 """
38 37
39 38 _exit = b"exit\r"
40 39
41 40 def test_ipython_embed():
42 41 """test that `IPython.embed()` works"""
43 42 with NamedFileInTemporaryDirectory('file_with_embed.py') as f:
44 43 f.write(_sample_embed)
45 44 f.flush()
46 45 f.close() # otherwise msft won't be able to read the file
47 46
48 47 # run `python file_with_embed.py`
49 48 cmd = [sys.executable, f.name]
50 49 env = os.environ.copy()
51 50 env['IPY_TEST_SIMPLE_PROMPT'] = '1'
52 51
53 52 p = subprocess.Popen(cmd, env=env, stdin=subprocess.PIPE,
54 53 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
55 54 out, err = p.communicate(_exit)
56 55 std = out.decode('UTF-8')
57 56
58 57 nt.assert_equal(p.returncode, 0)
59 58 nt.assert_in('3 . 14', std)
60 59 if os.name != 'nt':
61 60 # TODO: Fix up our different stdout references, see issue gh-14
62 61 nt.assert_in('IPython', std)
63 62 nt.assert_in('bye!', std)
64 63
65 64 @skip_win32
66 65 def test_nest_embed():
67 66 """test that `IPython.embed()` is nestable"""
68 67 import pexpect
69 68 ipy_prompt = r']:' #ansi color codes give problems matching beyond this
70 69 env = os.environ.copy()
71 70 env['IPY_TEST_SIMPLE_PROMPT'] = '1'
72 71
73 72
74 73 child = pexpect.spawn(sys.executable, ['-m', 'IPython', '--colors=nocolor'],
75 74 env=env)
76 75 child.expect(ipy_prompt)
77 child.sendline("from __future__ import print_function")
78 child.expect(ipy_prompt)
79 76 child.sendline("import IPython")
80 77 child.expect(ipy_prompt)
81 78 child.sendline("ip0 = get_ipython()")
82 79 #enter first nested embed
83 80 child.sendline("IPython.embed()")
84 81 #skip the banner until we get to a prompt
85 82 try:
86 83 prompted = -1
87 84 while prompted != 0:
88 85 prompted = child.expect([ipy_prompt, '\r\n'])
89 86 except pexpect.TIMEOUT as e:
90 87 print(e)
91 88 #child.interact()
92 89 child.sendline("embed1 = get_ipython()"); child.expect(ipy_prompt)
93 90 child.sendline("print('true' if embed1 is not ip0 else 'false')")
94 91 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
95 92 child.expect(ipy_prompt)
96 93 child.sendline("print('true' if IPython.get_ipython() is embed1 else 'false')")
97 94 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
98 95 child.expect(ipy_prompt)
99 96 #enter second nested embed
100 97 child.sendline("IPython.embed()")
101 98 #skip the banner until we get to a prompt
102 99 try:
103 100 prompted = -1
104 101 while prompted != 0:
105 102 prompted = child.expect([ipy_prompt, '\r\n'])
106 103 except pexpect.TIMEOUT as e:
107 104 print(e)
108 105 #child.interact()
109 106 child.sendline("embed2 = get_ipython()"); child.expect(ipy_prompt)
110 107 child.sendline("print('true' if embed2 is not embed1 else 'false')")
111 108 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
112 109 child.expect(ipy_prompt)
113 110 child.sendline("print('true' if embed2 is IPython.get_ipython() else 'false')")
114 111 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
115 112 child.expect(ipy_prompt)
116 113 child.sendline('exit')
117 114 #back at first embed
118 115 child.expect(ipy_prompt)
119 116 child.sendline("print('true' if get_ipython() is embed1 else 'false')")
120 117 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
121 118 child.expect(ipy_prompt)
122 119 child.sendline("print('true' if IPython.get_ipython() is embed1 else 'false')")
123 120 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
124 121 child.expect(ipy_prompt)
125 122 child.sendline('exit')
126 123 #back at launching scope
127 124 child.expect(ipy_prompt)
128 125 child.sendline("print('true' if get_ipython() is ip0 else 'false')")
129 126 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
130 127 child.expect(ipy_prompt)
131 128 child.sendline("print('true' if IPython.get_ipython() is ip0 else 'false')")
132 129 assert(child.expect(['true\r\n', 'false\r\n']) == 0)
133 130 child.expect(ipy_prompt)
134 131 child.sendline('exit')
135 132 child.close()
@@ -1,380 +1,374 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Decorators for labeling test objects.
3 3
4 4 Decorators that merely return a modified version of the original function
5 5 object are straightforward. Decorators that return a new function object need
6 6 to use nose.tools.make_decorator(original_function)(decorator) in returning the
7 7 decorator, in order to preserve metadata such as function name, setup and
8 8 teardown functions and so on - see nose.tools for more information.
9 9
10 10 This module provides a set of useful decorators meant to be ready to use in
11 11 your own tests. See the bottom of the file for the ready-made ones, and if you
12 12 find yourself writing a new one that may be of generic use, add it here.
13 13
14 14 Included decorators:
15 15
16 16
17 17 Lightweight testing that remains unittest-compatible.
18 18
19 19 - An @as_unittest decorator can be used to tag any normal parameter-less
20 20 function as a unittest TestCase. Then, both nose and normal unittest will
21 21 recognize it as such. This will make it easier to migrate away from Nose if
22 22 we ever need/want to while maintaining very lightweight tests.
23 23
24 24 NOTE: This file contains IPython-specific decorators. Using the machinery in
25 25 IPython.external.decorators, we import either numpy.testing.decorators if numpy is
26 26 available, OR use equivalent code in IPython.external._decorators, which
27 27 we've copied verbatim from numpy.
28 28
29 29 """
30 30
31 31 # Copyright (c) IPython Development Team.
32 32 # Distributed under the terms of the Modified BSD License.
33 33
34 34 import sys
35 35 import os
36 36 import tempfile
37 37 import unittest
38 38 import warnings
39 39 from importlib import import_module
40 40
41 41 from decorator import decorator
42 42
43 43 # Expose the unittest-driven decorators
44 44 from .ipunittest import ipdoctest, ipdocstring
45 45
46 46 # Grab the numpy-specific decorators which we keep in a file that we
47 47 # occasionally update from upstream: decorators.py is a copy of
48 48 # numpy.testing.decorators, we expose all of it here.
49 49 from IPython.external.decorators import *
50 50
51 51 # For onlyif_cmd_exists decorator
52 52 from IPython.utils.py3compat import string_types, which, PY2, PY3, PYPY
53 53
54 54 #-----------------------------------------------------------------------------
55 55 # Classes and functions
56 56 #-----------------------------------------------------------------------------
57 57
58 58 # Simple example of the basic idea
59 59 def as_unittest(func):
60 60 """Decorator to make a simple function into a normal test via unittest."""
61 61 class Tester(unittest.TestCase):
62 62 def test(self):
63 63 func()
64 64
65 65 Tester.__name__ = func.__name__
66 66
67 67 return Tester
68 68
69 69 # Utility functions
70 70
71 71 def apply_wrapper(wrapper,func):
72 72 """Apply a wrapper to a function for decoration.
73 73
74 74 This mixes Michele Simionato's decorator tool with nose's make_decorator,
75 75 to apply a wrapper in a decorator so that all nose attributes, as well as
76 76 function signature and other properties, survive the decoration cleanly.
77 77 This will ensure that wrapped functions can still be well introspected via
78 78 IPython, for example.
79 79 """
80 80 warnings.warn("The function `apply_wrapper` is deprecated and might be removed in IPython 5.0", DeprecationWarning)
81 81
82 82 import nose.tools
83 83
84 84 return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
85 85
86 86
87 87 def make_label_dec(label,ds=None):
88 88 """Factory function to create a decorator that applies one or more labels.
89 89
90 90 Parameters
91 91 ----------
92 92 label : string or sequence
93 93 One or more labels that will be applied by the decorator to the functions
94 94 it decorates. Labels are attributes of the decorated function with their
95 95 value set to True.
96 96
97 97 ds : string
98 98 An optional docstring for the resulting decorator. If not given, a
99 99 default docstring is auto-generated.
100 100
101 101 Returns
102 102 -------
103 103 A decorator.
104 104
105 105 Examples
106 106 --------
107 107
108 108 A simple labeling decorator:
109 109
110 110 >>> slow = make_label_dec('slow')
111 111 >>> slow.__doc__
112 112 "Labels a test as 'slow'."
113 113
114 114 And one that uses multiple labels and a custom docstring:
115 115
116 116 >>> rare = make_label_dec(['slow','hard'],
117 117 ... "Mix labels 'slow' and 'hard' for rare tests.")
118 118 >>> rare.__doc__
119 119 "Mix labels 'slow' and 'hard' for rare tests."
120 120
121 121 Now, let's test using this one:
122 122 >>> @rare
123 123 ... def f(): pass
124 124 ...
125 125 >>>
126 126 >>> f.slow
127 127 True
128 128 >>> f.hard
129 129 True
130 130 """
131 131
132 132 warnings.warn("The function `make_label_dec` is deprecated and might be removed in IPython 5.0", DeprecationWarning)
133 133 if isinstance(label, string_types):
134 134 labels = [label]
135 135 else:
136 136 labels = label
137 137
138 138 # Validate that the given label(s) are OK for use in setattr() by doing a
139 139 # dry run on a dummy function.
140 140 tmp = lambda : None
141 141 for label in labels:
142 142 setattr(tmp,label,True)
143 143
144 144 # This is the actual decorator we'll return
145 145 def decor(f):
146 146 for label in labels:
147 147 setattr(f,label,True)
148 148 return f
149 149
150 150 # Apply the user's docstring, or autogenerate a basic one
151 151 if ds is None:
152 152 ds = "Labels a test as %r." % label
153 153 decor.__doc__ = ds
154 154
155 155 return decor
156 156
157 157
158 158 # Inspired by numpy's skipif, but uses the full apply_wrapper utility to
159 159 # preserve function metadata better and allows the skip condition to be a
160 160 # callable.
161 161 def skipif(skip_condition, msg=None):
162 162 ''' Make function raise SkipTest exception if skip_condition is true
163 163
164 164 Parameters
165 165 ----------
166 166
167 167 skip_condition : bool or callable
168 168 Flag to determine whether to skip test. If the condition is a
169 169 callable, it is used at runtime to dynamically make the decision. This
170 170 is useful for tests that may require costly imports, to delay the cost
171 171 until the test suite is actually executed.
172 172 msg : string
173 173 Message to give on raising a SkipTest exception.
174 174
175 175 Returns
176 176 -------
177 177 decorator : function
178 178 Decorator, which, when applied to a function, causes SkipTest
179 179 to be raised when the skip_condition was True, and the function
180 180 to be called normally otherwise.
181 181
182 182 Notes
183 183 -----
184 184 You will see from the code that we had to further decorate the
185 185 decorator with the nose.tools.make_decorator function in order to
186 186 transmit function name, and various other metadata.
187 187 '''
188 188
189 189 def skip_decorator(f):
190 190 # Local import to avoid a hard nose dependency and only incur the
191 191 # import time overhead at actual test-time.
192 192 import nose
193 193
194 194 # Allow for both boolean or callable skip conditions.
195 195 if callable(skip_condition):
196 196 skip_val = skip_condition
197 197 else:
198 198 skip_val = lambda : skip_condition
199 199
200 200 def get_msg(func,msg=None):
201 201 """Skip message with information about function being skipped."""
202 202 if msg is None: out = 'Test skipped due to test condition.'
203 203 else: out = msg
204 204 return "Skipping test: %s. %s" % (func.__name__,out)
205 205
206 206 # We need to define *two* skippers because Python doesn't allow both
207 207 # return with value and yield inside the same function.
208 208 def skipper_func(*args, **kwargs):
209 209 """Skipper for normal test functions."""
210 210 if skip_val():
211 211 raise nose.SkipTest(get_msg(f,msg))
212 212 else:
213 213 return f(*args, **kwargs)
214 214
215 215 def skipper_gen(*args, **kwargs):
216 216 """Skipper for test generators."""
217 217 if skip_val():
218 218 raise nose.SkipTest(get_msg(f,msg))
219 219 else:
220 220 for x in f(*args, **kwargs):
221 221 yield x
222 222
223 223 # Choose the right skipper to use when building the actual generator.
224 224 if nose.util.isgenerator(f):
225 225 skipper = skipper_gen
226 226 else:
227 227 skipper = skipper_func
228 228
229 229 return nose.tools.make_decorator(f)(skipper)
230 230
231 231 return skip_decorator
232 232
233 233 # A version with the condition set to true, common case just to attach a message
234 234 # to a skip decorator
235 235 def skip(msg=None):
236 236 """Decorator factory - mark a test function for skipping from test suite.
237 237
238 238 Parameters
239 239 ----------
240 240 msg : string
241 241 Optional message to be added.
242 242
243 243 Returns
244 244 -------
245 245 decorator : function
246 246 Decorator, which, when applied to a function, causes SkipTest
247 247 to be raised, with the optional message added.
248 248 """
249 249
250 250 return skipif(True,msg)
251 251
252 252
253 253 def onlyif(condition, msg):
254 254 """The reverse from skipif, see skipif for details."""
255 255
256 256 if callable(condition):
257 257 skip_condition = lambda : not condition()
258 258 else:
259 259 skip_condition = lambda : not condition
260 260
261 261 return skipif(skip_condition, msg)
262 262
263 263 #-----------------------------------------------------------------------------
264 264 # Utility functions for decorators
265 265 def module_not_available(module):
266 266 """Can module be imported? Returns true if module does NOT import.
267 267
268 268 This is used to make a decorator to skip tests that require module to be
269 269 available, but delay the 'import numpy' to test execution time.
270 270 """
271 271 try:
272 272 mod = import_module(module)
273 273 mod_not_avail = False
274 274 except ImportError:
275 275 mod_not_avail = True
276 276
277 277 return mod_not_avail
278 278
279 279
280 280 def decorated_dummy(dec, name):
281 281 """Return a dummy function decorated with dec, with the given name.
282 282
283 283 Examples
284 284 --------
285 285 import IPython.testing.decorators as dec
286 286 setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
287 287 """
288 288 warnings.warn("The function `make_label_dec` is deprecated and might be removed in IPython 5.0", DeprecationWarning)
289 289 dummy = lambda: None
290 290 dummy.__name__ = name
291 291 return dec(dummy)
292 292
293 293 #-----------------------------------------------------------------------------
294 294 # Decorators for public use
295 295
296 296 # Decorators to skip certain tests on specific platforms.
297 297 skip_win32 = skipif(sys.platform == 'win32',
298 298 "This test does not run under Windows")
299 299 skip_linux = skipif(sys.platform.startswith('linux'),
300 300 "This test does not run under Linux")
301 301 skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
302 302
303 303
304 304 # Decorators to skip tests if not on specific platforms.
305 305 skip_if_not_win32 = skipif(sys.platform != 'win32',
306 306 "This test only runs under Windows")
307 307 skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
308 308 "This test only runs under Linux")
309 309 skip_if_not_osx = skipif(sys.platform != 'darwin',
310 310 "This test only runs under OSX")
311 311
312 312
313 313 _x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
314 314 os.environ.get('DISPLAY', '') == '')
315 315 _x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
316 316
317 317 skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
318 318
319 319 # not a decorator itself, returns a dummy function to be used as setup
320 320 def skip_file_no_x11(name):
321 321 warnings.warn("The function `skip_file_no_x11` is deprecated and might be removed in IPython 5.0", DeprecationWarning)
322 322 return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
323 323
324 324 # Other skip decorators
325 325
326 326 # generic skip without module
327 327 skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
328 328
329 329 skipif_not_numpy = skip_without('numpy')
330 330
331 331 skipif_not_matplotlib = skip_without('matplotlib')
332 332
333 333 skipif_not_sympy = skip_without('sympy')
334 334
335 335 skip_known_failure = knownfailureif(True,'This test is known to fail')
336 336
337 known_failure_py3 = knownfailureif(sys.version_info[0] >= 3,
338 'This test is known to fail on Python 3.')
339
340 py2_only = skipif(PY3, "This test only runs on Python 2.")
341 py3_only = skipif(PY2, "This test only runs on Python 3.")
342
343 337 # A null 'decorator', useful to make more readable code that needs to pick
344 338 # between different decorators based on OS or other conditions
345 339 null_deco = lambda f: f
346 340
347 341 # Some tests only run where we can use unicode paths. Note that we can't just
348 342 # check os.path.supports_unicode_filenames, which is always False on Linux.
349 343 try:
350 344 f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
351 345 except UnicodeEncodeError:
352 346 unicode_paths = False
353 347 else:
354 348 unicode_paths = True
355 349 f.close()
356 350
357 351 onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
358 352 "where we can use unicode in filenames."))
359 353
360 354
361 355 def onlyif_cmds_exist(*commands):
362 356 """
363 357 Decorator to skip test when at least one of `commands` is not found.
364 358 """
365 359 for cmd in commands:
366 360 if not which(cmd):
367 361 return skip("This test runs only if command '{0}' "
368 362 "is installed".format(cmd))
369 363 return null_deco
370 364
371 365 def onlyif_any_cmd_exists(*commands):
372 366 """
373 367 Decorator to skip test unless at least one of `commands` is found.
374 368 """
375 369 warnings.warn("The function `onlyif_any_cmd_exists` is deprecated and might be removed in IPython 5.0", DeprecationWarning)
376 370 for cmd in commands:
377 371 if which(cmd):
378 372 return null_deco
379 373 return skip("This test runs only if one of the commands {0} "
380 374 "is installed".format(commands))
@@ -1,138 +1,136 b''
1 1 """Global IPython app to support test running.
2 2
3 3 We must start our own ipython object and heavily muck with it so that all the
4 4 modifications IPython makes to system behavior don't send the doctest machinery
5 5 into a fit. This code should be considered a gross hack, but it gets the job
6 6 done.
7 7 """
8 from __future__ import absolute_import
9 from __future__ import print_function
10 8
11 9 # Copyright (c) IPython Development Team.
12 10 # Distributed under the terms of the Modified BSD License.
13 11
14 12 import sys
15 13 import warnings
16 14
17 15 from . import tools
18 16
19 17 from IPython.core import page
20 18 from IPython.utils import io
21 19 from IPython.utils import py3compat
22 20 from IPython.utils.py3compat import builtin_mod
23 21 from IPython.terminal.interactiveshell import TerminalInteractiveShell
24 22
25 23
26 24 class StreamProxy(io.IOStream):
27 25 """Proxy for sys.stdout/err. This will request the stream *at call time*
28 26 allowing for nose's Capture plugin's redirection of sys.stdout/err.
29 27
30 28 Parameters
31 29 ----------
32 30 name : str
33 31 The name of the stream. This will be requested anew at every call
34 32 """
35 33
36 34 def __init__(self, name):
37 35 warnings.warn("StreamProxy is deprecated and unused as of IPython 5", DeprecationWarning,
38 36 stacklevel=2,
39 37 )
40 38 self.name=name
41 39
42 40 @property
43 41 def stream(self):
44 42 return getattr(sys, self.name)
45 43
46 44 def flush(self):
47 45 self.stream.flush()
48 46
49 47
50 48 def get_ipython():
51 49 # This will get replaced by the real thing once we start IPython below
52 50 return start_ipython()
53 51
54 52
55 53 # A couple of methods to override those in the running IPython to interact
56 54 # better with doctest (doctest captures on raw stdout, so we need to direct
57 55 # various types of output there otherwise it will miss them).
58 56
59 57 def xsys(self, cmd):
60 58 """Replace the default system call with a capturing one for doctest.
61 59 """
62 60 # We use getoutput, but we need to strip it because pexpect captures
63 61 # the trailing newline differently from commands.getoutput
64 62 print(self.getoutput(cmd, split=False, depth=1).rstrip(), end='', file=sys.stdout)
65 63 sys.stdout.flush()
66 64
67 65
68 66 def _showtraceback(self, etype, evalue, stb):
69 67 """Print the traceback purely on stdout for doctest to capture it.
70 68 """
71 69 print(self.InteractiveTB.stb2text(stb), file=sys.stdout)
72 70
73 71
74 72 def start_ipython():
75 73 """Start a global IPython shell, which we need for IPython-specific syntax.
76 74 """
77 75 global get_ipython
78 76
79 77 # This function should only ever run once!
80 78 if hasattr(start_ipython, 'already_called'):
81 79 return
82 80 start_ipython.already_called = True
83 81
84 82 # Store certain global objects that IPython modifies
85 83 _displayhook = sys.displayhook
86 84 _excepthook = sys.excepthook
87 85 _main = sys.modules.get('__main__')
88 86
89 87 # Create custom argv and namespaces for our IPython to be test-friendly
90 88 config = tools.default_config()
91 89 config.TerminalInteractiveShell.simple_prompt = True
92 90
93 91 # Create and initialize our test-friendly IPython instance.
94 92 shell = TerminalInteractiveShell.instance(config=config,
95 93 )
96 94
97 95 # A few more tweaks needed for playing nicely with doctests...
98 96
99 97 # remove history file
100 98 shell.tempfiles.append(config.HistoryManager.hist_file)
101 99
102 100 # These traps are normally only active for interactive use, set them
103 101 # permanently since we'll be mocking interactive sessions.
104 102 shell.builtin_trap.activate()
105 103
106 104 # Modify the IPython system call with one that uses getoutput, so that we
107 105 # can capture subcommands and print them to Python's stdout, otherwise the
108 106 # doctest machinery would miss them.
109 107 shell.system = py3compat.MethodType(xsys, shell)
110 108
111 109 shell._showtraceback = py3compat.MethodType(_showtraceback, shell)
112 110
113 111 # IPython is ready, now clean up some global state...
114 112
115 113 # Deactivate the various python system hooks added by ipython for
116 114 # interactive convenience so we don't confuse the doctest system
117 115 sys.modules['__main__'] = _main
118 116 sys.displayhook = _displayhook
119 117 sys.excepthook = _excepthook
120 118
121 119 # So that ipython magics and aliases can be doctested (they work by making
122 120 # a call into a global _ip object). Also make the top-level get_ipython
123 121 # now return this without recursively calling here again.
124 122 _ip = shell
125 123 get_ipython = _ip.get_ipython
126 124 builtin_mod._ip = _ip
127 125 builtin_mod.get_ipython = get_ipython
128 126
129 127 # Override paging, so we don't require user interaction during the tests.
130 128 def nopage(strng, start=0, screen_lines=0, pager_cmd=None):
131 129 if isinstance(strng, dict):
132 130 strng = strng.get('text/plain', '')
133 131 print(strng)
134 132
135 133 page.orig_page = page.pager_page
136 134 page.pager_page = nopage
137 135
138 136 return _ip
@@ -1,433 +1,431 b''
1 1 # -*- coding: utf-8 -*-
2 2 """IPython Test Suite Runner.
3 3
4 4 This module provides a main entry point to a user script to test IPython
5 5 itself from the command line. There are two ways of running this script:
6 6
7 7 1. With the syntax `iptest all`. This runs our entire test suite by
8 8 calling this script (with different arguments) recursively. This
9 9 causes modules and package to be tested in different processes, using nose
10 10 or trial where appropriate.
11 11 2. With the regular nose syntax, like `iptest -vvs IPython`. In this form
12 12 the script simply calls nose, but with special command line flags and
13 13 plugins loaded.
14 14
15 15 """
16 16
17 17 # Copyright (c) IPython Development Team.
18 18 # Distributed under the terms of the Modified BSD License.
19 19
20 from __future__ import print_function
21 20
22 21 import glob
23 22 from io import BytesIO
24 23 import os
25 24 import os.path as path
26 25 import sys
27 26 from threading import Thread, Lock, Event
28 27 import warnings
29 28
30 29 import nose.plugins.builtin
31 30 from nose.plugins.xunit import Xunit
32 31 from nose import SkipTest
33 32 from nose.core import TestProgram
34 33 from nose.plugins import Plugin
35 34 from nose.util import safe_str
36 35
37 36 from IPython import version_info
38 37 from IPython.utils.py3compat import bytes_to_str
39 38 from IPython.utils.importstring import import_item
40 39 from IPython.testing.plugin.ipdoctest import IPythonDoctest
41 40 from IPython.external.decorators import KnownFailure, knownfailureif
42 41
43 42 pjoin = path.join
44 43
45 44
46 45 # Enable printing all warnings raise by IPython's modules
47 46 warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*')
48 if sys.version_info > (3,0):
49 warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
47 warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*')
50 48 warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*')
51 49 warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*')
52 50
53 51 if version_info < (6,):
54 52 # nose.tools renames all things from `camelCase` to `snake_case` which raise an
55 53 # warning with the runner they also import from standard import library. (as of Dec 2015)
56 54 # Ignore, let's revisit that in a couple of years for IPython 6.
57 55 warnings.filterwarnings('ignore', message='.*Please use assertEqual instead', category=Warning, module='IPython.*')
58 56
59 57
60 58 # ------------------------------------------------------------------------------
61 59 # Monkeypatch Xunit to count known failures as skipped.
62 60 # ------------------------------------------------------------------------------
63 61 def monkeypatch_xunit():
64 62 try:
65 63 knownfailureif(True)(lambda: None)()
66 64 except Exception as e:
67 65 KnownFailureTest = type(e)
68 66
69 67 def addError(self, test, err, capt=None):
70 68 if issubclass(err[0], KnownFailureTest):
71 69 err = (SkipTest,) + err[1:]
72 70 return self.orig_addError(test, err, capt)
73 71
74 72 Xunit.orig_addError = Xunit.addError
75 73 Xunit.addError = addError
76 74
77 75 #-----------------------------------------------------------------------------
78 76 # Check which dependencies are installed and greater than minimum version.
79 77 #-----------------------------------------------------------------------------
80 78 def extract_version(mod):
81 79 return mod.__version__
82 80
83 81 def test_for(item, min_version=None, callback=extract_version):
84 82 """Test to see if item is importable, and optionally check against a minimum
85 83 version.
86 84
87 85 If min_version is given, the default behavior is to check against the
88 86 `__version__` attribute of the item, but specifying `callback` allows you to
89 87 extract the value you are interested in. e.g::
90 88
91 89 In [1]: import sys
92 90
93 91 In [2]: from IPython.testing.iptest import test_for
94 92
95 93 In [3]: test_for('sys', (2,6), callback=lambda sys: sys.version_info)
96 94 Out[3]: True
97 95
98 96 """
99 97 try:
100 98 check = import_item(item)
101 99 except (ImportError, RuntimeError):
102 100 # GTK reports Runtime error if it can't be initialized even if it's
103 101 # importable.
104 102 return False
105 103 else:
106 104 if min_version:
107 105 if callback:
108 106 # extra processing step to get version to compare
109 107 check = callback(check)
110 108
111 109 return check >= min_version
112 110 else:
113 111 return True
114 112
115 113 # Global dict where we can store information on what we have and what we don't
116 114 # have available at test run time
117 115 have = {'matplotlib': test_for('matplotlib'),
118 116 'pygments': test_for('pygments'),
119 117 'sqlite3': test_for('sqlite3')}
120 118
121 119 #-----------------------------------------------------------------------------
122 120 # Test suite definitions
123 121 #-----------------------------------------------------------------------------
124 122
125 123 test_group_names = ['core',
126 124 'extensions', 'lib', 'terminal', 'testing', 'utils',
127 125 ]
128 126
129 127 class TestSection(object):
130 128 def __init__(self, name, includes):
131 129 self.name = name
132 130 self.includes = includes
133 131 self.excludes = []
134 132 self.dependencies = []
135 133 self.enabled = True
136 134
137 135 def exclude(self, module):
138 136 if not module.startswith('IPython'):
139 137 module = self.includes[0] + "." + module
140 138 self.excludes.append(module.replace('.', os.sep))
141 139
142 140 def requires(self, *packages):
143 141 self.dependencies.extend(packages)
144 142
145 143 @property
146 144 def will_run(self):
147 145 return self.enabled and all(have[p] for p in self.dependencies)
148 146
149 147 # Name -> (include, exclude, dependencies_met)
150 148 test_sections = {n:TestSection(n, ['IPython.%s' % n]) for n in test_group_names}
151 149
152 150
153 151 # Exclusions and dependencies
154 152 # ---------------------------
155 153
156 154 # core:
157 155 sec = test_sections['core']
158 156 if not have['sqlite3']:
159 157 sec.exclude('tests.test_history')
160 158 sec.exclude('history')
161 159 if not have['matplotlib']:
162 160 sec.exclude('pylabtools'),
163 161 sec.exclude('tests.test_pylabtools')
164 162
165 163 # lib:
166 164 sec = test_sections['lib']
167 165 sec.exclude('kernel')
168 166 if not have['pygments']:
169 167 sec.exclude('tests.test_lexers')
170 168 # We do this unconditionally, so that the test suite doesn't import
171 169 # gtk, changing the default encoding and masking some unicode bugs.
172 170 sec.exclude('inputhookgtk')
173 171 # We also do this unconditionally, because wx can interfere with Unix signals.
174 172 # There are currently no tests for it anyway.
175 173 sec.exclude('inputhookwx')
176 174 # Testing inputhook will need a lot of thought, to figure out
177 175 # how to have tests that don't lock up with the gui event
178 176 # loops in the picture
179 177 sec.exclude('inputhook')
180 178
181 179 # testing:
182 180 sec = test_sections['testing']
183 181 # These have to be skipped on win32 because they use echo, rm, cd, etc.
184 182 # See ticket https://github.com/ipython/ipython/issues/87
185 183 if sys.platform == 'win32':
186 184 sec.exclude('plugin.test_exampleip')
187 185 sec.exclude('plugin.dtexample')
188 186
189 187 # don't run jupyter_console tests found via shim
190 188 test_sections['terminal'].exclude('console')
191 189
192 190 # extensions:
193 191 sec = test_sections['extensions']
194 192 # This is deprecated in favour of rpy2
195 193 sec.exclude('rmagic')
196 194 # autoreload does some strange stuff, so move it to its own test section
197 195 sec.exclude('autoreload')
198 196 sec.exclude('tests.test_autoreload')
199 197 test_sections['autoreload'] = TestSection('autoreload',
200 198 ['IPython.extensions.autoreload', 'IPython.extensions.tests.test_autoreload'])
201 199 test_group_names.append('autoreload')
202 200
203 201
204 202 #-----------------------------------------------------------------------------
205 203 # Functions and classes
206 204 #-----------------------------------------------------------------------------
207 205
208 206 def check_exclusions_exist():
209 207 from IPython.paths import get_ipython_package_dir
210 208 from warnings import warn
211 209 parent = os.path.dirname(get_ipython_package_dir())
212 210 for sec in test_sections:
213 211 for pattern in sec.exclusions:
214 212 fullpath = pjoin(parent, pattern)
215 213 if not os.path.exists(fullpath) and not glob.glob(fullpath + '.*'):
216 214 warn("Excluding nonexistent file: %r" % pattern)
217 215
218 216
219 217 class ExclusionPlugin(Plugin):
220 218 """A nose plugin to effect our exclusions of files and directories.
221 219 """
222 220 name = 'exclusions'
223 221 score = 3000 # Should come before any other plugins
224 222
225 223 def __init__(self, exclude_patterns=None):
226 224 """
227 225 Parameters
228 226 ----------
229 227
230 228 exclude_patterns : sequence of strings, optional
231 229 Filenames containing these patterns (as raw strings, not as regular
232 230 expressions) are excluded from the tests.
233 231 """
234 232 self.exclude_patterns = exclude_patterns or []
235 233 super(ExclusionPlugin, self).__init__()
236 234
237 235 def options(self, parser, env=os.environ):
238 236 Plugin.options(self, parser, env)
239 237
240 238 def configure(self, options, config):
241 239 Plugin.configure(self, options, config)
242 240 # Override nose trying to disable plugin.
243 241 self.enabled = True
244 242
245 243 def wantFile(self, filename):
246 244 """Return whether the given filename should be scanned for tests.
247 245 """
248 246 if any(pat in filename for pat in self.exclude_patterns):
249 247 return False
250 248 return None
251 249
252 250 def wantDirectory(self, directory):
253 251 """Return whether the given directory should be scanned for tests.
254 252 """
255 253 if any(pat in directory for pat in self.exclude_patterns):
256 254 return False
257 255 return None
258 256
259 257
260 258 class StreamCapturer(Thread):
261 259 daemon = True # Don't hang if main thread crashes
262 260 started = False
263 261 def __init__(self, echo=False):
264 262 super(StreamCapturer, self).__init__()
265 263 self.echo = echo
266 264 self.streams = []
267 265 self.buffer = BytesIO()
268 266 self.readfd, self.writefd = os.pipe()
269 267 self.buffer_lock = Lock()
270 268 self.stop = Event()
271 269
272 270 def run(self):
273 271 self.started = True
274 272
275 273 while not self.stop.is_set():
276 274 chunk = os.read(self.readfd, 1024)
277 275
278 276 with self.buffer_lock:
279 277 self.buffer.write(chunk)
280 278 if self.echo:
281 279 sys.stdout.write(bytes_to_str(chunk))
282 280
283 281 os.close(self.readfd)
284 282 os.close(self.writefd)
285 283
286 284 def reset_buffer(self):
287 285 with self.buffer_lock:
288 286 self.buffer.truncate(0)
289 287 self.buffer.seek(0)
290 288
291 289 def get_buffer(self):
292 290 with self.buffer_lock:
293 291 return self.buffer.getvalue()
294 292
295 293 def ensure_started(self):
296 294 if not self.started:
297 295 self.start()
298 296
299 297 def halt(self):
300 298 """Safely stop the thread."""
301 299 if not self.started:
302 300 return
303 301
304 302 self.stop.set()
305 303 os.write(self.writefd, b'\0') # Ensure we're not locked in a read()
306 304 self.join()
307 305
308 306 class SubprocessStreamCapturePlugin(Plugin):
309 307 name='subprocstreams'
310 308 def __init__(self):
311 309 Plugin.__init__(self)
312 310 self.stream_capturer = StreamCapturer()
313 311 self.destination = os.environ.get('IPTEST_SUBPROC_STREAMS', 'capture')
314 312 # This is ugly, but distant parts of the test machinery need to be able
315 313 # to redirect streams, so we make the object globally accessible.
316 314 nose.iptest_stdstreams_fileno = self.get_write_fileno
317 315
318 316 def get_write_fileno(self):
319 317 if self.destination == 'capture':
320 318 self.stream_capturer.ensure_started()
321 319 return self.stream_capturer.writefd
322 320 elif self.destination == 'discard':
323 321 return os.open(os.devnull, os.O_WRONLY)
324 322 else:
325 323 return sys.__stdout__.fileno()
326 324
327 325 def configure(self, options, config):
328 326 Plugin.configure(self, options, config)
329 327 # Override nose trying to disable plugin.
330 328 if self.destination == 'capture':
331 329 self.enabled = True
332 330
333 331 def startTest(self, test):
334 332 # Reset log capture
335 333 self.stream_capturer.reset_buffer()
336 334
337 335 def formatFailure(self, test, err):
338 336 # Show output
339 337 ec, ev, tb = err
340 338 captured = self.stream_capturer.get_buffer().decode('utf-8', 'replace')
341 339 if captured.strip():
342 340 ev = safe_str(ev)
343 341 out = [ev, '>> begin captured subprocess output <<',
344 342 captured,
345 343 '>> end captured subprocess output <<']
346 344 return ec, '\n'.join(out), tb
347 345
348 346 return err
349 347
350 348 formatError = formatFailure
351 349
352 350 def finalize(self, result):
353 351 self.stream_capturer.halt()
354 352
355 353
356 354 def run_iptest():
357 355 """Run the IPython test suite using nose.
358 356
359 357 This function is called when this script is **not** called with the form
360 358 `iptest all`. It simply calls nose with appropriate command line flags
361 359 and accepts all of the standard nose arguments.
362 360 """
363 361 # Apply our monkeypatch to Xunit
364 362 if '--with-xunit' in sys.argv and not hasattr(Xunit, 'orig_addError'):
365 363 monkeypatch_xunit()
366 364
367 365 arg1 = sys.argv[1]
368 366 if arg1 in test_sections:
369 367 section = test_sections[arg1]
370 368 sys.argv[1:2] = section.includes
371 369 elif arg1.startswith('IPython.') and arg1[8:] in test_sections:
372 370 section = test_sections[arg1[8:]]
373 371 sys.argv[1:2] = section.includes
374 372 else:
375 373 section = TestSection(arg1, includes=[arg1])
376 374
377 375
378 376 argv = sys.argv + [ '--detailed-errors', # extra info in tracebacks
379 377 # We add --exe because of setuptools' imbecility (it
380 378 # blindly does chmod +x on ALL files). Nose does the
381 379 # right thing and it tries to avoid executables,
382 380 # setuptools unfortunately forces our hand here. This
383 381 # has been discussed on the distutils list and the
384 382 # setuptools devs refuse to fix this problem!
385 383 '--exe',
386 384 ]
387 385 if '-a' not in argv and '-A' not in argv:
388 386 argv = argv + ['-a', '!crash']
389 387
390 388 if nose.__version__ >= '0.11':
391 389 # I don't fully understand why we need this one, but depending on what
392 390 # directory the test suite is run from, if we don't give it, 0 tests
393 391 # get run. Specifically, if the test suite is run from the source dir
394 392 # with an argument (like 'iptest.py IPython.core', 0 tests are run,
395 393 # even if the same call done in this directory works fine). It appears
396 394 # that if the requested package is in the current dir, nose bails early
397 395 # by default. Since it's otherwise harmless, leave it in by default
398 396 # for nose >= 0.11, though unfortunately nose 0.10 doesn't support it.
399 397 argv.append('--traverse-namespace')
400 398
401 399 plugins = [ ExclusionPlugin(section.excludes), KnownFailure(),
402 400 SubprocessStreamCapturePlugin() ]
403 401
404 402 # we still have some vestigial doctests in core
405 403 if (section.name.startswith(('core', 'IPython.core'))):
406 404 plugins.append(IPythonDoctest())
407 405 argv.extend([
408 406 '--with-ipdoctest',
409 407 '--ipdoctest-tests',
410 408 '--ipdoctest-extension=txt',
411 409 ])
412 410
413 411
414 412 # Use working directory set by parent process (see iptestcontroller)
415 413 if 'IPTEST_WORKING_DIR' in os.environ:
416 414 os.chdir(os.environ['IPTEST_WORKING_DIR'])
417 415
418 416 # We need a global ipython running in this process, but the special
419 417 # in-process group spawns its own IPython kernels, so for *that* group we
420 418 # must avoid also opening the global one (otherwise there's a conflict of
421 419 # singletons). Ultimately the solution to this problem is to refactor our
422 420 # assumptions about what needs to be a singleton and what doesn't (app
423 421 # objects should, individual shells shouldn't). But for now, this
424 422 # workaround allows the test suite for the inprocess module to complete.
425 423 if 'kernel.inprocess' not in section.name:
426 424 from IPython.testing import globalipapp
427 425 globalipapp.start_ipython()
428 426
429 427 # Now nose can run
430 428 TestProgram(argv=argv, addplugins=plugins)
431 429
432 430 if __name__ == '__main__':
433 431 run_iptest()
@@ -1,532 +1,531 b''
1 1 # -*- coding: utf-8 -*-
2 2 """IPython Test Process Controller
3 3
4 4 This module runs one or more subprocesses which will actually run the IPython
5 5 test suite.
6 6
7 7 """
8 8
9 9 # Copyright (c) IPython Development Team.
10 10 # Distributed under the terms of the Modified BSD License.
11 11
12 from __future__ import print_function
13 12
14 13 import argparse
15 14 import json
16 15 import multiprocessing.pool
17 16 import os
18 17 import stat
19 18 import re
20 19 import requests
21 20 import shutil
22 21 import signal
23 22 import sys
24 23 import subprocess
25 24 import time
26 25
27 26 from .iptest import (
28 27 have, test_group_names as py_test_group_names, test_sections, StreamCapturer,
29 28 test_for,
30 29 )
31 30 from IPython.utils.path import compress_user
32 31 from IPython.utils.py3compat import bytes_to_str
33 32 from IPython.utils.sysinfo import get_sys_info
34 33 from IPython.utils.tempdir import TemporaryDirectory
35 34 from IPython.utils.text import strip_ansi
36 35
37 36 try:
38 37 # Python >= 3.3
39 38 from subprocess import TimeoutExpired
40 39 def popen_wait(p, timeout):
41 40 return p.wait(timeout)
42 41 except ImportError:
43 42 class TimeoutExpired(Exception):
44 43 pass
45 44 def popen_wait(p, timeout):
46 45 """backport of Popen.wait from Python 3"""
47 46 for i in range(int(10 * timeout)):
48 47 if p.poll() is not None:
49 48 return
50 49 time.sleep(0.1)
51 50 if p.poll() is None:
52 51 raise TimeoutExpired
53 52
54 53 NOTEBOOK_SHUTDOWN_TIMEOUT = 10
55 54
56 55 class TestController(object):
57 56 """Run tests in a subprocess
58 57 """
59 58 #: str, IPython test suite to be executed.
60 59 section = None
61 60 #: list, command line arguments to be executed
62 61 cmd = None
63 62 #: dict, extra environment variables to set for the subprocess
64 63 env = None
65 64 #: list, TemporaryDirectory instances to clear up when the process finishes
66 65 dirs = None
67 66 #: subprocess.Popen instance
68 67 process = None
69 68 #: str, process stdout+stderr
70 69 stdout = None
71 70
72 71 def __init__(self):
73 72 self.cmd = []
74 73 self.env = {}
75 74 self.dirs = []
76 75
77 76 def setup(self):
78 77 """Create temporary directories etc.
79 78
80 79 This is only called when we know the test group will be run. Things
81 80 created here may be cleaned up by self.cleanup().
82 81 """
83 82 pass
84 83
85 84 def launch(self, buffer_output=False, capture_output=False):
86 85 # print('*** ENV:', self.env) # dbg
87 86 # print('*** CMD:', self.cmd) # dbg
88 87 env = os.environ.copy()
89 88 env.update(self.env)
90 89 if buffer_output:
91 90 capture_output = True
92 91 self.stdout_capturer = c = StreamCapturer(echo=not buffer_output)
93 92 c.start()
94 93 stdout = c.writefd if capture_output else None
95 94 stderr = subprocess.STDOUT if capture_output else None
96 95 self.process = subprocess.Popen(self.cmd, stdout=stdout,
97 96 stderr=stderr, env=env)
98 97
99 98 def wait(self):
100 99 self.process.wait()
101 100 self.stdout_capturer.halt()
102 101 self.stdout = self.stdout_capturer.get_buffer()
103 102 return self.process.returncode
104 103
105 104 def print_extra_info(self):
106 105 """Print extra information about this test run.
107 106
108 107 If we're running in parallel and showing the concise view, this is only
109 108 called if the test group fails. Otherwise, it's called before the test
110 109 group is started.
111 110
112 111 The base implementation does nothing, but it can be overridden by
113 112 subclasses.
114 113 """
115 114 return
116 115
117 116 def cleanup_process(self):
118 117 """Cleanup on exit by killing any leftover processes."""
119 118 subp = self.process
120 119 if subp is None or (subp.poll() is not None):
121 120 return # Process doesn't exist, or is already dead.
122 121
123 122 try:
124 123 print('Cleaning up stale PID: %d' % subp.pid)
125 124 subp.kill()
126 125 except: # (OSError, WindowsError) ?
127 126 # This is just a best effort, if we fail or the process was
128 127 # really gone, ignore it.
129 128 pass
130 129 else:
131 130 for i in range(10):
132 131 if subp.poll() is None:
133 132 time.sleep(0.1)
134 133 else:
135 134 break
136 135
137 136 if subp.poll() is None:
138 137 # The process did not die...
139 138 print('... failed. Manual cleanup may be required.')
140 139
141 140 def cleanup(self):
142 141 "Kill process if it's still alive, and clean up temporary directories"
143 142 self.cleanup_process()
144 143 for td in self.dirs:
145 144 td.cleanup()
146 145
147 146 __del__ = cleanup
148 147
149 148
150 149 class PyTestController(TestController):
151 150 """Run Python tests using IPython.testing.iptest"""
152 151 #: str, Python command to execute in subprocess
153 152 pycmd = None
154 153
155 154 def __init__(self, section, options):
156 155 """Create new test runner."""
157 156 TestController.__init__(self)
158 157 self.section = section
159 158 # pycmd is put into cmd[2] in PyTestController.launch()
160 159 self.cmd = [sys.executable, '-c', None, section]
161 160 self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
162 161 self.options = options
163 162
164 163 def setup(self):
165 164 ipydir = TemporaryDirectory()
166 165 self.dirs.append(ipydir)
167 166 self.env['IPYTHONDIR'] = ipydir.name
168 167 self.workingdir = workingdir = TemporaryDirectory()
169 168 self.dirs.append(workingdir)
170 169 self.env['IPTEST_WORKING_DIR'] = workingdir.name
171 170 # This means we won't get odd effects from our own matplotlib config
172 171 self.env['MPLCONFIGDIR'] = workingdir.name
173 172 # For security reasons (http://bugs.python.org/issue16202), use
174 173 # a temporary directory to which other users have no access.
175 174 self.env['TMPDIR'] = workingdir.name
176 175
177 176 # Add a non-accessible directory to PATH (see gh-7053)
178 177 noaccess = os.path.join(self.workingdir.name, "_no_access_")
179 178 self.noaccess = noaccess
180 179 os.mkdir(noaccess, 0)
181 180
182 181 PATH = os.environ.get('PATH', '')
183 182 if PATH:
184 183 PATH = noaccess + os.pathsep + PATH
185 184 else:
186 185 PATH = noaccess
187 186 self.env['PATH'] = PATH
188 187
189 188 # From options:
190 189 if self.options.xunit:
191 190 self.add_xunit()
192 191 if self.options.coverage:
193 192 self.add_coverage()
194 193 self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
195 194 self.cmd.extend(self.options.extra_args)
196 195
197 196 def cleanup(self):
198 197 """
199 198 Make the non-accessible directory created in setup() accessible
200 199 again, otherwise deleting the workingdir will fail.
201 200 """
202 201 os.chmod(self.noaccess, stat.S_IRWXU)
203 202 TestController.cleanup(self)
204 203
205 204 @property
206 205 def will_run(self):
207 206 try:
208 207 return test_sections[self.section].will_run
209 208 except KeyError:
210 209 return True
211 210
212 211 def add_xunit(self):
213 212 xunit_file = os.path.abspath(self.section + '.xunit.xml')
214 213 self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
215 214
216 215 def add_coverage(self):
217 216 try:
218 217 sources = test_sections[self.section].includes
219 218 except KeyError:
220 219 sources = ['IPython']
221 220
222 221 coverage_rc = ("[run]\n"
223 222 "data_file = {data_file}\n"
224 223 "source =\n"
225 224 " {source}\n"
226 225 ).format(data_file=os.path.abspath('.coverage.'+self.section),
227 226 source="\n ".join(sources))
228 227 config_file = os.path.join(self.workingdir.name, '.coveragerc')
229 228 with open(config_file, 'w') as f:
230 229 f.write(coverage_rc)
231 230
232 231 self.env['COVERAGE_PROCESS_START'] = config_file
233 232 self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
234 233
235 234 def launch(self, buffer_output=False):
236 235 self.cmd[2] = self.pycmd
237 236 super(PyTestController, self).launch(buffer_output=buffer_output)
238 237
239 238
240 239 def prepare_controllers(options):
241 240 """Returns two lists of TestController instances, those to run, and those
242 241 not to run."""
243 242 testgroups = options.testgroups
244 243 if not testgroups:
245 244 testgroups = py_test_group_names
246 245
247 246 controllers = [PyTestController(name, options) for name in testgroups]
248 247
249 248 to_run = [c for c in controllers if c.will_run]
250 249 not_run = [c for c in controllers if not c.will_run]
251 250 return to_run, not_run
252 251
253 252 def do_run(controller, buffer_output=True):
254 253 """Setup and run a test controller.
255 254
256 255 If buffer_output is True, no output is displayed, to avoid it appearing
257 256 interleaved. In this case, the caller is responsible for displaying test
258 257 output on failure.
259 258
260 259 Returns
261 260 -------
262 261 controller : TestController
263 262 The same controller as passed in, as a convenience for using map() type
264 263 APIs.
265 264 exitcode : int
266 265 The exit code of the test subprocess. Non-zero indicates failure.
267 266 """
268 267 try:
269 268 try:
270 269 controller.setup()
271 270 if not buffer_output:
272 271 controller.print_extra_info()
273 272 controller.launch(buffer_output=buffer_output)
274 273 except Exception:
275 274 import traceback
276 275 traceback.print_exc()
277 276 return controller, 1 # signal failure
278 277
279 278 exitcode = controller.wait()
280 279 return controller, exitcode
281 280
282 281 except KeyboardInterrupt:
283 282 return controller, -signal.SIGINT
284 283 finally:
285 284 controller.cleanup()
286 285
287 286 def report():
288 287 """Return a string with a summary report of test-related variables."""
289 288 inf = get_sys_info()
290 289 out = []
291 290 def _add(name, value):
292 291 out.append((name, value))
293 292
294 293 _add('IPython version', inf['ipython_version'])
295 294 _add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
296 295 _add('IPython package', compress_user(inf['ipython_path']))
297 296 _add('Python version', inf['sys_version'].replace('\n',''))
298 297 _add('sys.executable', compress_user(inf['sys_executable']))
299 298 _add('Platform', inf['platform'])
300 299
301 300 width = max(len(n) for (n,v) in out)
302 301 out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
303 302
304 303 avail = []
305 304 not_avail = []
306 305
307 306 for k, is_avail in have.items():
308 307 if is_avail:
309 308 avail.append(k)
310 309 else:
311 310 not_avail.append(k)
312 311
313 312 if avail:
314 313 out.append('\nTools and libraries available at test time:\n')
315 314 avail.sort()
316 315 out.append(' ' + ' '.join(avail)+'\n')
317 316
318 317 if not_avail:
319 318 out.append('\nTools and libraries NOT available at test time:\n')
320 319 not_avail.sort()
321 320 out.append(' ' + ' '.join(not_avail)+'\n')
322 321
323 322 return ''.join(out)
324 323
325 324 def run_iptestall(options):
326 325 """Run the entire IPython test suite by calling nose and trial.
327 326
328 327 This function constructs :class:`IPTester` instances for all IPython
329 328 modules and package and then runs each of them. This causes the modules
330 329 and packages of IPython to be tested each in their own subprocess using
331 330 nose.
332 331
333 332 Parameters
334 333 ----------
335 334
336 335 All parameters are passed as attributes of the options object.
337 336
338 337 testgroups : list of str
339 338 Run only these sections of the test suite. If empty, run all the available
340 339 sections.
341 340
342 341 fast : int or None
343 342 Run the test suite in parallel, using n simultaneous processes. If None
344 343 is passed, one process is used per CPU core. Default 1 (i.e. sequential)
345 344
346 345 inc_slow : bool
347 346 Include slow tests. By default, these tests aren't run.
348 347
349 348 url : unicode
350 349 Address:port to use when running the JS tests.
351 350
352 351 xunit : bool
353 352 Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
354 353
355 354 coverage : bool or str
356 355 Measure code coverage from tests. True will store the raw coverage data,
357 356 or pass 'html' or 'xml' to get reports.
358 357
359 358 extra_args : list
360 359 Extra arguments to pass to the test subprocesses, e.g. '-v'
361 360 """
362 361 to_run, not_run = prepare_controllers(options)
363 362
364 363 def justify(ltext, rtext, width=70, fill='-'):
365 364 ltext += ' '
366 365 rtext = (' ' + rtext).rjust(width - len(ltext), fill)
367 366 return ltext + rtext
368 367
369 368 # Run all test runners, tracking execution time
370 369 failed = []
371 370 t_start = time.time()
372 371
373 372 print()
374 373 if options.fast == 1:
375 374 # This actually means sequential, i.e. with 1 job
376 375 for controller in to_run:
377 376 print('Test group:', controller.section)
378 377 sys.stdout.flush() # Show in correct order when output is piped
379 378 controller, res = do_run(controller, buffer_output=False)
380 379 if res:
381 380 failed.append(controller)
382 381 if res == -signal.SIGINT:
383 382 print("Interrupted")
384 383 break
385 384 print()
386 385
387 386 else:
388 387 # Run tests concurrently
389 388 try:
390 389 pool = multiprocessing.pool.ThreadPool(options.fast)
391 390 for (controller, res) in pool.imap_unordered(do_run, to_run):
392 391 res_string = 'OK' if res == 0 else 'FAILED'
393 392 print(justify('Test group: ' + controller.section, res_string))
394 393 if res:
395 394 controller.print_extra_info()
396 395 print(bytes_to_str(controller.stdout))
397 396 failed.append(controller)
398 397 if res == -signal.SIGINT:
399 398 print("Interrupted")
400 399 break
401 400 except KeyboardInterrupt:
402 401 return
403 402
404 403 for controller in not_run:
405 404 print(justify('Test group: ' + controller.section, 'NOT RUN'))
406 405
407 406 t_end = time.time()
408 407 t_tests = t_end - t_start
409 408 nrunners = len(to_run)
410 409 nfail = len(failed)
411 410 # summarize results
412 411 print('_'*70)
413 412 print('Test suite completed for system with the following information:')
414 413 print(report())
415 414 took = "Took %.3fs." % t_tests
416 415 print('Status: ', end='')
417 416 if not failed:
418 417 print('OK (%d test groups).' % nrunners, took)
419 418 else:
420 419 # If anything went wrong, point out what command to rerun manually to
421 420 # see the actual errors and individual summary
422 421 failed_sections = [c.section for c in failed]
423 422 print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
424 423 nrunners, ', '.join(failed_sections)), took)
425 424 print()
426 425 print('You may wish to rerun these, with:')
427 426 print(' iptest', *failed_sections)
428 427 print()
429 428
430 429 if options.coverage:
431 430 from coverage import coverage, CoverageException
432 431 cov = coverage(data_file='.coverage')
433 432 cov.combine()
434 433 cov.save()
435 434
436 435 # Coverage HTML report
437 436 if options.coverage == 'html':
438 437 html_dir = 'ipy_htmlcov'
439 438 shutil.rmtree(html_dir, ignore_errors=True)
440 439 print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
441 440 sys.stdout.flush()
442 441
443 442 # Custom HTML reporter to clean up module names.
444 443 from coverage.html import HtmlReporter
445 444 class CustomHtmlReporter(HtmlReporter):
446 445 def find_code_units(self, morfs):
447 446 super(CustomHtmlReporter, self).find_code_units(morfs)
448 447 for cu in self.code_units:
449 448 nameparts = cu.name.split(os.sep)
450 449 if 'IPython' not in nameparts:
451 450 continue
452 451 ix = nameparts.index('IPython')
453 452 cu.name = '.'.join(nameparts[ix:])
454 453
455 454 # Reimplement the html_report method with our custom reporter
456 455 cov.get_data()
457 456 cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
458 457 html_title='IPython test coverage',
459 458 )
460 459 reporter = CustomHtmlReporter(cov, cov.config)
461 460 reporter.report(None)
462 461 print('done.')
463 462
464 463 # Coverage XML report
465 464 elif options.coverage == 'xml':
466 465 try:
467 466 cov.xml_report(outfile='ipy_coverage.xml')
468 467 except CoverageException as e:
469 468 print('Generating coverage report failed. Are you running javascript tests only?')
470 469 import traceback
471 470 traceback.print_exc()
472 471
473 472 if failed:
474 473 # Ensure that our exit code indicates failure
475 474 sys.exit(1)
476 475
477 476 argparser = argparse.ArgumentParser(description='Run IPython test suite')
478 477 argparser.add_argument('testgroups', nargs='*',
479 478 help='Run specified groups of tests. If omitted, run '
480 479 'all tests.')
481 480 argparser.add_argument('--all', action='store_true',
482 481 help='Include slow tests not run by default.')
483 482 argparser.add_argument('--url', help="URL to use for the JS tests.")
484 483 argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
485 484 help='Run test sections in parallel. This starts as many '
486 485 'processes as you have cores, or you can specify a number.')
487 486 argparser.add_argument('--xunit', action='store_true',
488 487 help='Produce Xunit XML results')
489 488 argparser.add_argument('--coverage', nargs='?', const=True, default=False,
490 489 help="Measure test coverage. Specify 'html' or "
491 490 "'xml' to get reports.")
492 491 argparser.add_argument('--subproc-streams', default='capture',
493 492 help="What to do with stdout/stderr from subprocesses. "
494 493 "'capture' (default), 'show' and 'discard' are the options.")
495 494
496 495 def default_options():
497 496 """Get an argparse Namespace object with the default arguments, to pass to
498 497 :func:`run_iptestall`.
499 498 """
500 499 options = argparser.parse_args([])
501 500 options.extra_args = []
502 501 return options
503 502
504 503 def main():
505 504 # iptest doesn't work correctly if the working directory is the
506 505 # root of the IPython source tree. Tell the user to avoid
507 506 # frustration.
508 507 if os.path.exists(os.path.join(os.getcwd(),
509 508 'IPython', 'testing', '__main__.py')):
510 509 print("Don't run iptest from the IPython source directory",
511 510 file=sys.stderr)
512 511 sys.exit(1)
513 512 # Arguments after -- should be passed through to nose. Argparse treats
514 513 # everything after -- as regular positional arguments, so we separate them
515 514 # first.
516 515 try:
517 516 ix = sys.argv.index('--')
518 517 except ValueError:
519 518 to_parse = sys.argv[1:]
520 519 extra_args = []
521 520 else:
522 521 to_parse = sys.argv[1:ix]
523 522 extra_args = sys.argv[ix+1:]
524 523
525 524 options = argparser.parse_args(to_parse)
526 525 options.extra_args = extra_args
527 526
528 527 run_iptestall(options)
529 528
530 529
531 530 if __name__ == '__main__':
532 531 main()
@@ -1,177 +1,176 b''
1 1 """Experimental code for cleaner support of IPython syntax with unittest.
2 2
3 3 In IPython up until 0.10, we've used very hacked up nose machinery for running
4 4 tests with IPython special syntax, and this has proved to be extremely slow.
5 5 This module provides decorators to try a different approach, stemming from a
6 6 conversation Brian and I (FP) had about this problem Sept/09.
7 7
8 8 The goal is to be able to easily write simple functions that can be seen by
9 9 unittest as tests, and ultimately for these to support doctests with full
10 10 IPython syntax. Nose already offers this based on naming conventions and our
11 11 hackish plugins, but we are seeking to move away from nose dependencies if
12 12 possible.
13 13
14 14 This module follows a different approach, based on decorators.
15 15
16 16 - A decorator called @ipdoctest can mark any function as having a docstring
17 17 that should be viewed as a doctest, but after syntax conversion.
18 18
19 19 Authors
20 20 -------
21 21
22 22 - Fernando Perez <Fernando.Perez@berkeley.edu>
23 23 """
24 24
25 from __future__ import absolute_import
26 25
27 26 #-----------------------------------------------------------------------------
28 27 # Copyright (C) 2009-2011 The IPython Development Team
29 28 #
30 29 # Distributed under the terms of the BSD License. The full license is in
31 30 # the file COPYING, distributed as part of this software.
32 31 #-----------------------------------------------------------------------------
33 32
34 33 #-----------------------------------------------------------------------------
35 34 # Imports
36 35 #-----------------------------------------------------------------------------
37 36
38 37 # Stdlib
39 38 import re
40 39 import unittest
41 40 from doctest import DocTestFinder, DocTestRunner, TestResults
42 41
43 42 #-----------------------------------------------------------------------------
44 43 # Classes and functions
45 44 #-----------------------------------------------------------------------------
46 45
47 46 def count_failures(runner):
48 47 """Count number of failures in a doctest runner.
49 48
50 49 Code modeled after the summarize() method in doctest.
51 50 """
52 51 return [TestResults(f, t) for f, t in runner._name2ft.values() if f > 0 ]
53 52
54 53
55 54 class IPython2PythonConverter(object):
56 55 """Convert IPython 'syntax' to valid Python.
57 56
58 57 Eventually this code may grow to be the full IPython syntax conversion
59 58 implementation, but for now it only does prompt convertion."""
60 59
61 60 def __init__(self):
62 61 self.rps1 = re.compile(r'In\ \[\d+\]: ')
63 62 self.rps2 = re.compile(r'\ \ \ \.\.\.+: ')
64 63 self.rout = re.compile(r'Out\[\d+\]: \s*?\n?')
65 64 self.pyps1 = '>>> '
66 65 self.pyps2 = '... '
67 66 self.rpyps1 = re.compile ('(\s*%s)(.*)$' % self.pyps1)
68 67 self.rpyps2 = re.compile ('(\s*%s)(.*)$' % self.pyps2)
69 68
70 69 def __call__(self, ds):
71 70 """Convert IPython prompts to python ones in a string."""
72 71 from . import globalipapp
73 72
74 73 pyps1 = '>>> '
75 74 pyps2 = '... '
76 75 pyout = ''
77 76
78 77 dnew = ds
79 78 dnew = self.rps1.sub(pyps1, dnew)
80 79 dnew = self.rps2.sub(pyps2, dnew)
81 80 dnew = self.rout.sub(pyout, dnew)
82 81 ip = globalipapp.get_ipython()
83 82
84 83 # Convert input IPython source into valid Python.
85 84 out = []
86 85 newline = out.append
87 86 for line in dnew.splitlines():
88 87
89 88 mps1 = self.rpyps1.match(line)
90 89 if mps1 is not None:
91 90 prompt, text = mps1.groups()
92 91 newline(prompt+ip.prefilter(text, False))
93 92 continue
94 93
95 94 mps2 = self.rpyps2.match(line)
96 95 if mps2 is not None:
97 96 prompt, text = mps2.groups()
98 97 newline(prompt+ip.prefilter(text, True))
99 98 continue
100 99
101 100 newline(line)
102 101 newline('') # ensure a closing newline, needed by doctest
103 102 #print "PYSRC:", '\n'.join(out) # dbg
104 103 return '\n'.join(out)
105 104
106 105 #return dnew
107 106
108 107
109 108 class Doc2UnitTester(object):
110 109 """Class whose instances act as a decorator for docstring testing.
111 110
112 111 In practice we're only likely to need one instance ever, made below (though
113 112 no attempt is made at turning it into a singleton, there is no need for
114 113 that).
115 114 """
116 115 def __init__(self, verbose=False):
117 116 """New decorator.
118 117
119 118 Parameters
120 119 ----------
121 120
122 121 verbose : boolean, optional (False)
123 122 Passed to the doctest finder and runner to control verbosity.
124 123 """
125 124 self.verbose = verbose
126 125 # We can reuse the same finder for all instances
127 126 self.finder = DocTestFinder(verbose=verbose, recurse=False)
128 127
129 128 def __call__(self, func):
130 129 """Use as a decorator: doctest a function's docstring as a unittest.
131 130
132 131 This version runs normal doctests, but the idea is to make it later run
133 132 ipython syntax instead."""
134 133
135 134 # Capture the enclosing instance with a different name, so the new
136 135 # class below can see it without confusion regarding its own 'self'
137 136 # that will point to the test instance at runtime
138 137 d2u = self
139 138
140 139 # Rewrite the function's docstring to have python syntax
141 140 if func.__doc__ is not None:
142 141 func.__doc__ = ip2py(func.__doc__)
143 142
144 143 # Now, create a tester object that is a real unittest instance, so
145 144 # normal unittest machinery (or Nose, or Trial) can find it.
146 145 class Tester(unittest.TestCase):
147 146 def test(self):
148 147 # Make a new runner per function to be tested
149 148 runner = DocTestRunner(verbose=d2u.verbose)
150 149 map(runner.run, d2u.finder.find(func, func.__name__))
151 150 failed = count_failures(runner)
152 151 if failed:
153 152 # Since we only looked at a single function's docstring,
154 153 # failed should contain at most one item. More than that
155 154 # is a case we can't handle and should error out on
156 155 if len(failed) > 1:
157 156 err = "Invalid number of test results:" % failed
158 157 raise ValueError(err)
159 158 # Report a normal failure.
160 159 self.fail('failed doctests: %s' % str(failed[0]))
161 160
162 161 # Rename it so test reports have the original signature.
163 162 Tester.__name__ = func.__name__
164 163 return Tester
165 164
166 165
167 166 def ipdocstring(func):
168 167 """Change the function docstring via ip2py.
169 168 """
170 169 if func.__doc__ is not None:
171 170 func.__doc__ = ip2py(func.__doc__)
172 171 return func
173 172
174 173
175 174 # Make an instance of the classes for public use
176 175 ipdoctest = Doc2UnitTester()
177 176 ip2py = IPython2PythonConverter()
@@ -1,158 +1,157 b''
1 1 """Simple example using doctests.
2 2
3 3 This file just contains doctests both using plain python and IPython prompts.
4 4 All tests should be loaded by nose.
5 5 """
6 from __future__ import print_function
7 6
8 7 def pyfunc():
9 8 """Some pure python tests...
10 9
11 10 >>> pyfunc()
12 11 'pyfunc'
13 12
14 13 >>> import os
15 14
16 15 >>> 2+3
17 16 5
18 17
19 18 >>> for i in range(3):
20 19 ... print(i, end=' ')
21 20 ... print(i+1, end=' ')
22 21 ...
23 22 0 1 1 2 2 3
24 23 """
25 24 return 'pyfunc'
26 25
27 26 def ipfunc():
28 27 """Some ipython tests...
29 28
30 29 In [1]: import os
31 30
32 31 In [3]: 2+3
33 32 Out[3]: 5
34 33
35 34 In [26]: for i in range(3):
36 35 ....: print(i, end=' ')
37 36 ....: print(i+1, end=' ')
38 37 ....:
39 38 0 1 1 2 2 3
40 39
41 40
42 41 Examples that access the operating system work:
43 42
44 43 In [1]: !echo hello
45 44 hello
46 45
47 46 In [2]: !echo hello > /tmp/foo_iptest
48 47
49 48 In [3]: !cat /tmp/foo_iptest
50 49 hello
51 50
52 51 In [4]: rm -f /tmp/foo_iptest
53 52
54 53 It's OK to use '_' for the last result, but do NOT try to use IPython's
55 54 numbered history of _NN outputs, since those won't exist under the
56 55 doctest environment:
57 56
58 57 In [7]: 'hi'
59 58 Out[7]: 'hi'
60 59
61 60 In [8]: print(repr(_))
62 61 'hi'
63 62
64 63 In [7]: 3+4
65 64 Out[7]: 7
66 65
67 66 In [8]: _+3
68 67 Out[8]: 10
69 68
70 69 In [9]: ipfunc()
71 70 Out[9]: 'ipfunc'
72 71 """
73 72 return 'ipfunc'
74 73
75 74
76 75 def ranfunc():
77 76 """A function with some random output.
78 77
79 78 Normal examples are verified as usual:
80 79 >>> 1+3
81 80 4
82 81
83 82 But if you put '# random' in the output, it is ignored:
84 83 >>> 1+3
85 84 junk goes here... # random
86 85
87 86 >>> 1+2
88 87 again, anything goes #random
89 88 if multiline, the random mark is only needed once.
90 89
91 90 >>> 1+2
92 91 You can also put the random marker at the end:
93 92 # random
94 93
95 94 >>> 1+2
96 95 # random
97 96 .. or at the beginning.
98 97
99 98 More correct input is properly verified:
100 99 >>> ranfunc()
101 100 'ranfunc'
102 101 """
103 102 return 'ranfunc'
104 103
105 104
106 105 def random_all():
107 106 """A function where we ignore the output of ALL examples.
108 107
109 108 Examples:
110 109
111 110 # all-random
112 111
113 112 This mark tells the testing machinery that all subsequent examples should
114 113 be treated as random (ignoring their output). They are still executed,
115 114 so if a they raise an error, it will be detected as such, but their
116 115 output is completely ignored.
117 116
118 117 >>> 1+3
119 118 junk goes here...
120 119
121 120 >>> 1+3
122 121 klasdfj;
123 122
124 123 >>> 1+2
125 124 again, anything goes
126 125 blah...
127 126 """
128 127 pass
129 128
130 129 def iprand():
131 130 """Some ipython tests with random output.
132 131
133 132 In [7]: 3+4
134 133 Out[7]: 7
135 134
136 135 In [8]: print('hello')
137 136 world # random
138 137
139 138 In [9]: iprand()
140 139 Out[9]: 'iprand'
141 140 """
142 141 return 'iprand'
143 142
144 143 def iprand_all():
145 144 """Some ipython tests with fully random output.
146 145
147 146 # all-random
148 147
149 148 In [7]: 1
150 149 Out[7]: 99
151 150
152 151 In [8]: print('hello')
153 152 world
154 153
155 154 In [9]: iprand_all()
156 155 Out[9]: 'junk'
157 156 """
158 157 return 'iprand_all'
@@ -1,19 +1,18 b''
1 1 #!/usr/bin/env python
2 2 """Nose-based test runner.
3 3 """
4 from __future__ import print_function
5 4
6 5 from nose.core import main
7 6 from nose.plugins.builtin import plugins
8 7 from nose.plugins.doctests import Doctest
9 8
10 9 from . import ipdoctest
11 10 from .ipdoctest import IPDocTestRunner
12 11
13 12 if __name__ == '__main__':
14 13 print('WARNING: this code is incomplete!')
15 14 print()
16 15
17 16 pp = [x() for x in plugins] # activate all builtin plugins first
18 17 main(testRunner=IPDocTestRunner(),
19 18 plugins=pp+[ipdoctest.IPythonDoctest(),Doctest()])
@@ -1,20 +1,19 b''
1 1 """Simple script to show reference holding behavior.
2 2
3 3 This is used by a companion test case.
4 4 """
5 from __future__ import print_function
6 5
7 6 import gc
8 7
9 8 class C(object):
10 9 def __del__(self):
11 10 pass
12 11 #print 'deleting object...' # dbg
13 12
14 13 if __name__ == '__main__':
15 14 c = C()
16 15
17 16 c_refs = gc.get_referrers(c)
18 17 ref_ids = list(map(id,c_refs))
19 18
20 19 print('c referrers:',list(map(type,c_refs)))
@@ -1,34 +1,33 b''
1 1 """Simple example using doctests.
2 2
3 3 This file just contains doctests both using plain python and IPython prompts.
4 4 All tests should be loaded by nose.
5 5 """
6 from __future__ import print_function
7 6
8 7 def pyfunc():
9 8 """Some pure python tests...
10 9
11 10 >>> pyfunc()
12 11 'pyfunc'
13 12
14 13 >>> import os
15 14
16 15 >>> 2+3
17 16 5
18 17
19 18 >>> for i in range(3):
20 19 ... print(i, end=' ')
21 20 ... print(i+1, end=' ')
22 21 ...
23 22 0 1 1 2 2 3
24 23 """
25 24 return 'pyfunc'
26 25
27 26
28 27 def ipyfunc2():
29 28 """Some pure python tests...
30 29
31 30 >>> 1+1
32 31 2
33 32 """
34 33 return 'pyfunc2'
@@ -1,3 +1,2 b''
1 from __future__ import print_function
2 1 x = 1
3 2 print('x is:',x)
@@ -1,43 +1,19 b''
1 """Decorators marks that a doctest should be skipped, for both python 2 and 3.
1 """Decorators marks that a doctest should be skipped.
2 2
3 3 The IPython.testing.decorators module triggers various extra imports, including
4 4 numpy and sympy if they're present. Since this decorator is used in core parts
5 5 of IPython, it's in a separate module so that running IPython doesn't trigger
6 6 those imports."""
7 7
8 #-----------------------------------------------------------------------------
9 # Copyright (C) 2009-2011 The IPython Development Team
10 #
11 # Distributed under the terms of the BSD License. The full license is in
12 # the file COPYING, distributed as part of this software.
13 #-----------------------------------------------------------------------------
8 # Copyright (C) IPython Development Team
9 # Distributed under the terms of the Modified BSD License.
14 10
15 #-----------------------------------------------------------------------------
16 # Imports
17 #-----------------------------------------------------------------------------
18
19 import sys
20
21 #-----------------------------------------------------------------------------
22 # Decorators
23 #-----------------------------------------------------------------------------
24 11
25 12 def skip_doctest(f):
26 13 """Decorator - mark a function or method for skipping its doctest.
27 14
28 15 This decorator allows you to mark a function whose docstring you wish to
29 16 omit from testing, while preserving the docstring for introspection, help,
30 17 etc."""
31 18 f.skip_doctest = True
32 19 return f
33
34
35 def skip_doctest_py3(f):
36 """Decorator - skip the doctest under Python 3."""
37 f.skip_doctest = (sys.version_info[0] >= 3)
38 return f
39
40 def skip_doctest_py2(f):
41 """Decorator - skip the doctest under Python 3."""
42 f.skip_doctest = (sys.version_info[0] < 3)
43 return f
@@ -1,165 +1,164 b''
1 1 """Tests for the decorators we've created for IPython.
2 2 """
3 from __future__ import print_function
4 3
5 4 # Module imports
6 5 # Std lib
7 6 import inspect
8 7 import sys
9 8
10 9 # Third party
11 10 import nose.tools as nt
12 11
13 12 # Our own
14 13 from IPython.testing import decorators as dec
15 14
16 15 #-----------------------------------------------------------------------------
17 16 # Utilities
18 17
19 18 # Note: copied from OInspect, kept here so the testing stuff doesn't create
20 19 # circular dependencies and is easier to reuse.
21 20 def getargspec(obj):
22 21 """Get the names and default values of a function's arguments.
23 22
24 23 A tuple of four things is returned: (args, varargs, varkw, defaults).
25 24 'args' is a list of the argument names (it may contain nested lists).
26 25 'varargs' and 'varkw' are the names of the * and ** arguments or None.
27 26 'defaults' is an n-tuple of the default values of the last n arguments.
28 27
29 28 Modified version of inspect.getargspec from the Python Standard
30 29 Library."""
31 30
32 31 if inspect.isfunction(obj):
33 32 func_obj = obj
34 33 elif inspect.ismethod(obj):
35 34 func_obj = obj.__func__
36 35 else:
37 36 raise TypeError('arg is not a Python function')
38 37 args, varargs, varkw = inspect.getargs(func_obj.__code__)
39 38 return args, varargs, varkw, func_obj.__defaults__
40 39
41 40 #-----------------------------------------------------------------------------
42 41 # Testing functions
43 42
44 43 @dec.as_unittest
45 44 def trivial():
46 45 """A trivial test"""
47 46 pass
48 47
49 48
50 49 @dec.skip
51 50 def test_deliberately_broken():
52 51 """A deliberately broken test - we want to skip this one."""
53 52 1/0
54 53
55 54 @dec.skip('Testing the skip decorator')
56 55 def test_deliberately_broken2():
57 56 """Another deliberately broken test - we want to skip this one."""
58 57 1/0
59 58
60 59
61 60 # Verify that we can correctly skip the doctest for a function at will, but
62 61 # that the docstring itself is NOT destroyed by the decorator.
63 62 def doctest_bad(x,y=1,**k):
64 63 """A function whose doctest we need to skip.
65 64
66 65 >>> 1+1
67 66 3
68 67 """
69 68 print('x:',x)
70 69 print('y:',y)
71 70 print('k:',k)
72 71
73 72
74 73 def call_doctest_bad():
75 74 """Check that we can still call the decorated functions.
76 75
77 76 >>> doctest_bad(3,y=4)
78 77 x: 3
79 78 y: 4
80 79 k: {}
81 80 """
82 81 pass
83 82
84 83
85 84 def test_skip_dt_decorator():
86 85 """Doctest-skipping decorator should preserve the docstring.
87 86 """
88 87 # Careful: 'check' must be a *verbatim* copy of the doctest_bad docstring!
89 88 check = """A function whose doctest we need to skip.
90 89
91 90 >>> 1+1
92 91 3
93 92 """
94 93 # Fetch the docstring from doctest_bad after decoration.
95 94 val = doctest_bad.__doc__
96 95
97 96 nt.assert_equal(check,val,"doctest_bad docstrings don't match")
98 97
99 98
100 99 # Doctest skipping should work for class methods too
101 100 class FooClass(object):
102 101 """FooClass
103 102
104 103 Example:
105 104
106 105 >>> 1+1
107 106 2
108 107 """
109 108
110 109 def __init__(self,x):
111 110 """Make a FooClass.
112 111
113 112 Example:
114 113
115 114 >>> f = FooClass(3)
116 115 junk
117 116 """
118 117 print('Making a FooClass.')
119 118 self.x = x
120 119
121 120 def bar(self,y):
122 121 """Example:
123 122
124 123 >>> ff = FooClass(3)
125 124 >>> ff.bar(0)
126 125 boom!
127 126 >>> 1/0
128 127 bam!
129 128 """
130 129 return 1/y
131 130
132 131 def baz(self,y):
133 132 """Example:
134 133
135 134 >>> ff2 = FooClass(3)
136 135 Making a FooClass.
137 136 >>> ff2.baz(3)
138 137 True
139 138 """
140 139 return self.x==y
141 140
142 141
143 142 def test_skip_dt_decorator2():
144 143 """Doctest-skipping decorator should preserve function signature.
145 144 """
146 145 # Hardcoded correct answer
147 146 dtargs = (['x', 'y'], None, 'k', (1,))
148 147 # Introspect out the value
149 148 dtargsr = getargspec(doctest_bad)
150 149 assert dtargsr==dtargs, \
151 150 "Incorrectly reconstructed args for doctest_bad: %s" % (dtargsr,)
152 151
153 152
154 153 @dec.skip_linux
155 154 def test_linux():
156 155 nt.assert_false(sys.platform.startswith('linux'),"This test can't run under linux")
157 156
158 157 @dec.skip_win32
159 158 def test_win32():
160 159 nt.assert_not_equal(sys.platform,'win32',"This test can't run under windows")
161 160
162 161 @dec.skip_osx
163 162 def test_osx():
164 163 nt.assert_not_equal(sys.platform,'darwin',"This test can't run under osx")
165 164
@@ -1,140 +1,136 b''
1 1 # encoding: utf-8
2 2 """
3 3 Tests for testing.tools
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2008-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13 #-----------------------------------------------------------------------------
14 14 # Imports
15 15 #-----------------------------------------------------------------------------
16 from __future__ import with_statement
17 from __future__ import print_function
18 16
19 17 import os
20 18 import unittest
21 19
22 20 import nose.tools as nt
23 21
24 22 from IPython.testing import decorators as dec
25 23 from IPython.testing import tools as tt
26 24
27 25 #-----------------------------------------------------------------------------
28 26 # Tests
29 27 #-----------------------------------------------------------------------------
30 28
31 29 @dec.skip_win32
32 30 def test_full_path_posix():
33 31 spath = '/foo/bar.py'
34 32 result = tt.full_path(spath,['a.txt','b.txt'])
35 33 nt.assert_equal(result, ['/foo/a.txt', '/foo/b.txt'])
36 34 spath = '/foo'
37 35 result = tt.full_path(spath,['a.txt','b.txt'])
38 36 nt.assert_equal(result, ['/a.txt', '/b.txt'])
39 37 result = tt.full_path(spath,'a.txt')
40 38 nt.assert_equal(result, ['/a.txt'])
41 39
42 40
43 41 @dec.skip_if_not_win32
44 42 def test_full_path_win32():
45 43 spath = 'c:\\foo\\bar.py'
46 44 result = tt.full_path(spath,['a.txt','b.txt'])
47 45 nt.assert_equal(result, ['c:\\foo\\a.txt', 'c:\\foo\\b.txt'])
48 46 spath = 'c:\\foo'
49 47 result = tt.full_path(spath,['a.txt','b.txt'])
50 48 nt.assert_equal(result, ['c:\\a.txt', 'c:\\b.txt'])
51 49 result = tt.full_path(spath,'a.txt')
52 50 nt.assert_equal(result, ['c:\\a.txt'])
53 51
54 52
55 53 def test_parser():
56 54 err = ("FAILED (errors=1)", 1, 0)
57 55 fail = ("FAILED (failures=1)", 0, 1)
58 56 both = ("FAILED (errors=1, failures=1)", 1, 1)
59 57 for txt, nerr, nfail in [err, fail, both]:
60 58 nerr1, nfail1 = tt.parse_test_output(txt)
61 59 nt.assert_equal(nerr, nerr1)
62 60 nt.assert_equal(nfail, nfail1)
63 61
64 62
65 63 def test_temp_pyfile():
66 64 src = 'pass\n'
67 65 fname, fh = tt.temp_pyfile(src)
68 66 assert os.path.isfile(fname)
69 67 fh.close()
70 68 with open(fname) as fh2:
71 69 src2 = fh2.read()
72 70 nt.assert_equal(src2, src)
73 71
74 72 class TestAssertPrints(unittest.TestCase):
75 73 def test_passing(self):
76 74 with tt.AssertPrints("abc"):
77 75 print("abcd")
78 76 print("def")
79 77 print(b"ghi")
80 78
81 79 def test_failing(self):
82 80 def func():
83 81 with tt.AssertPrints("abc"):
84 82 print("acd")
85 83 print("def")
86 84 print(b"ghi")
87 85
88 86 self.assertRaises(AssertionError, func)
89 87
90 88
91 89 class Test_ipexec_validate(unittest.TestCase, tt.TempFileMixin):
92 90 def test_main_path(self):
93 91 """Test with only stdout results.
94 92 """
95 93 self.mktmp("print('A')\n"
96 94 "print('B')\n"
97 95 )
98 96 out = "A\nB"
99 97 tt.ipexec_validate(self.fname, out)
100 98
101 99 def test_main_path2(self):
102 100 """Test with only stdout results, expecting windows line endings.
103 101 """
104 102 self.mktmp("print('A')\n"
105 103 "print('B')\n"
106 104 )
107 105 out = "A\r\nB"
108 106 tt.ipexec_validate(self.fname, out)
109 107
110 108 def test_exception_path(self):
111 109 """Test exception path in exception_validate.
112 110 """
113 self.mktmp("from __future__ import print_function\n"
114 "import sys\n"
111 self.mktmp("import sys\n"
115 112 "print('A')\n"
116 113 "print('B')\n"
117 114 "print('C', file=sys.stderr)\n"
118 115 "print('D', file=sys.stderr)\n"
119 116 )
120 117 out = "A\nB"
121 118 tt.ipexec_validate(self.fname, expected_out=out, expected_err="C\nD")
122 119
123 120 def test_exception_path2(self):
124 121 """Test exception path in exception_validate, expecting windows line endings.
125 122 """
126 self.mktmp("from __future__ import print_function\n"
127 "import sys\n"
123 self.mktmp("import sys\n"
128 124 "print('A')\n"
129 125 "print('B')\n"
130 126 "print('C', file=sys.stderr)\n"
131 127 "print('D', file=sys.stderr)\n"
132 128 )
133 129 out = "A\r\nB"
134 130 tt.ipexec_validate(self.fname, expected_out=out, expected_err="C\r\nD")
135 131
136 132
137 133 def tearDown(self):
138 134 # tear down correctly the mixin,
139 135 # unittest.TestCase.tearDown does nothing
140 136 tt.TempFileMixin.tearDown(self)
@@ -1,468 +1,467 b''
1 1 """Generic testing tools.
2 2
3 3 Authors
4 4 -------
5 5 - Fernando Perez <Fernando.Perez@berkeley.edu>
6 6 """
7 7
8 from __future__ import absolute_import
9 8
10 9 #-----------------------------------------------------------------------------
11 10 # Copyright (C) 2009 The IPython Development Team
12 11 #
13 12 # Distributed under the terms of the BSD License. The full license is in
14 13 # the file COPYING, distributed as part of this software.
15 14 #-----------------------------------------------------------------------------
16 15
17 16 #-----------------------------------------------------------------------------
18 17 # Imports
19 18 #-----------------------------------------------------------------------------
20 19
21 20 import os
22 21 import re
23 22 import sys
24 23 import tempfile
25 24
26 25 from contextlib import contextmanager
27 26 from io import StringIO
28 27 from subprocess import Popen, PIPE
29 28
30 29 try:
31 30 # These tools are used by parts of the runtime, so we make the nose
32 31 # dependency optional at this point. Nose is a hard dependency to run the
33 32 # test suite, but NOT to use ipython itself.
34 33 import nose.tools as nt
35 34 has_nose = True
36 35 except ImportError:
37 36 has_nose = False
38 37
39 38 from traitlets.config.loader import Config
40 39 from IPython.utils.process import get_output_error_code
41 40 from IPython.utils.text import list_strings
42 41 from IPython.utils.io import temp_pyfile, Tee
43 42 from IPython.utils import py3compat
44 43 from IPython.utils.encoding import DEFAULT_ENCODING
45 44
46 45 from . import decorators as dec
47 46 from . import skipdoctest
48 47
49 48 #-----------------------------------------------------------------------------
50 49 # Functions and classes
51 50 #-----------------------------------------------------------------------------
52 51
53 52 # The docstring for full_path doctests differently on win32 (different path
54 53 # separator) so just skip the doctest there. The example remains informative.
55 54 doctest_deco = skipdoctest.skip_doctest if sys.platform == 'win32' else dec.null_deco
56 55
57 56 @doctest_deco
58 57 def full_path(startPath,files):
59 58 """Make full paths for all the listed files, based on startPath.
60 59
61 60 Only the base part of startPath is kept, since this routine is typically
62 61 used with a script's ``__file__`` variable as startPath. The base of startPath
63 62 is then prepended to all the listed files, forming the output list.
64 63
65 64 Parameters
66 65 ----------
67 66 startPath : string
68 67 Initial path to use as the base for the results. This path is split
69 68 using os.path.split() and only its first component is kept.
70 69
71 70 files : string or list
72 71 One or more files.
73 72
74 73 Examples
75 74 --------
76 75
77 76 >>> full_path('/foo/bar.py',['a.txt','b.txt'])
78 77 ['/foo/a.txt', '/foo/b.txt']
79 78
80 79 >>> full_path('/foo',['a.txt','b.txt'])
81 80 ['/a.txt', '/b.txt']
82 81
83 82 If a single file is given, the output is still a list::
84 83
85 84 >>> full_path('/foo','a.txt')
86 85 ['/a.txt']
87 86 """
88 87
89 88 files = list_strings(files)
90 89 base = os.path.split(startPath)[0]
91 90 return [ os.path.join(base,f) for f in files ]
92 91
93 92
94 93 def parse_test_output(txt):
95 94 """Parse the output of a test run and return errors, failures.
96 95
97 96 Parameters
98 97 ----------
99 98 txt : str
100 99 Text output of a test run, assumed to contain a line of one of the
101 100 following forms::
102 101
103 102 'FAILED (errors=1)'
104 103 'FAILED (failures=1)'
105 104 'FAILED (errors=1, failures=1)'
106 105
107 106 Returns
108 107 -------
109 108 nerr, nfail
110 109 number of errors and failures.
111 110 """
112 111
113 112 err_m = re.search(r'^FAILED \(errors=(\d+)\)', txt, re.MULTILINE)
114 113 if err_m:
115 114 nerr = int(err_m.group(1))
116 115 nfail = 0
117 116 return nerr, nfail
118 117
119 118 fail_m = re.search(r'^FAILED \(failures=(\d+)\)', txt, re.MULTILINE)
120 119 if fail_m:
121 120 nerr = 0
122 121 nfail = int(fail_m.group(1))
123 122 return nerr, nfail
124 123
125 124 both_m = re.search(r'^FAILED \(errors=(\d+), failures=(\d+)\)', txt,
126 125 re.MULTILINE)
127 126 if both_m:
128 127 nerr = int(both_m.group(1))
129 128 nfail = int(both_m.group(2))
130 129 return nerr, nfail
131 130
132 131 # If the input didn't match any of these forms, assume no error/failures
133 132 return 0, 0
134 133
135 134
136 135 # So nose doesn't think this is a test
137 136 parse_test_output.__test__ = False
138 137
139 138
140 139 def default_argv():
141 140 """Return a valid default argv for creating testing instances of ipython"""
142 141
143 142 return ['--quick', # so no config file is loaded
144 143 # Other defaults to minimize side effects on stdout
145 144 '--colors=NoColor', '--no-term-title','--no-banner',
146 145 '--autocall=0']
147 146
148 147
149 148 def default_config():
150 149 """Return a config object with good defaults for testing."""
151 150 config = Config()
152 151 config.TerminalInteractiveShell.colors = 'NoColor'
153 152 config.TerminalTerminalInteractiveShell.term_title = False,
154 153 config.TerminalInteractiveShell.autocall = 0
155 154 f = tempfile.NamedTemporaryFile(suffix=u'test_hist.sqlite', delete=False)
156 155 config.HistoryManager.hist_file = f.name
157 156 f.close()
158 157 config.HistoryManager.db_cache_size = 10000
159 158 return config
160 159
161 160
162 161 def get_ipython_cmd(as_string=False):
163 162 """
164 163 Return appropriate IPython command line name. By default, this will return
165 164 a list that can be used with subprocess.Popen, for example, but passing
166 165 `as_string=True` allows for returning the IPython command as a string.
167 166
168 167 Parameters
169 168 ----------
170 169 as_string: bool
171 170 Flag to allow to return the command as a string.
172 171 """
173 172 ipython_cmd = [sys.executable, "-m", "IPython"]
174 173
175 174 if as_string:
176 175 ipython_cmd = " ".join(ipython_cmd)
177 176
178 177 return ipython_cmd
179 178
180 179 def ipexec(fname, options=None, commands=()):
181 180 """Utility to call 'ipython filename'.
182 181
183 182 Starts IPython with a minimal and safe configuration to make startup as fast
184 183 as possible.
185 184
186 185 Note that this starts IPython in a subprocess!
187 186
188 187 Parameters
189 188 ----------
190 189 fname : str
191 190 Name of file to be executed (should have .py or .ipy extension).
192 191
193 192 options : optional, list
194 193 Extra command-line flags to be passed to IPython.
195 194
196 195 commands : optional, list
197 196 Commands to send in on stdin
198 197
199 198 Returns
200 199 -------
201 200 (stdout, stderr) of ipython subprocess.
202 201 """
203 202 if options is None: options = []
204 203
205 204 cmdargs = default_argv() + options
206 205
207 206 test_dir = os.path.dirname(__file__)
208 207
209 208 ipython_cmd = get_ipython_cmd()
210 209 # Absolute path for filename
211 210 full_fname = os.path.join(test_dir, fname)
212 211 full_cmd = ipython_cmd + cmdargs + [full_fname]
213 212 env = os.environ.copy()
214 213 # FIXME: ignore all warnings in ipexec while we have shims
215 214 # should we keep suppressing warnings here, even after removing shims?
216 215 env['PYTHONWARNINGS'] = 'ignore'
217 216 # env.pop('PYTHONWARNINGS', None) # Avoid extraneous warnings appearing on stderr
218 217 for k, v in env.items():
219 218 # Debug a bizarre failure we've seen on Windows:
220 219 # TypeError: environment can only contain strings
221 220 if not isinstance(v, str):
222 221 print(k, v)
223 222 p = Popen(full_cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=env)
224 223 out, err = p.communicate(input=py3compat.str_to_bytes('\n'.join(commands)) or None)
225 224 out, err = py3compat.bytes_to_str(out), py3compat.bytes_to_str(err)
226 225 # `import readline` causes 'ESC[?1034h' to be output sometimes,
227 226 # so strip that out before doing comparisons
228 227 if out:
229 228 out = re.sub(r'\x1b\[[^h]+h', '', out)
230 229 return out, err
231 230
232 231
233 232 def ipexec_validate(fname, expected_out, expected_err='',
234 233 options=None, commands=()):
235 234 """Utility to call 'ipython filename' and validate output/error.
236 235
237 236 This function raises an AssertionError if the validation fails.
238 237
239 238 Note that this starts IPython in a subprocess!
240 239
241 240 Parameters
242 241 ----------
243 242 fname : str
244 243 Name of the file to be executed (should have .py or .ipy extension).
245 244
246 245 expected_out : str
247 246 Expected stdout of the process.
248 247
249 248 expected_err : optional, str
250 249 Expected stderr of the process.
251 250
252 251 options : optional, list
253 252 Extra command-line flags to be passed to IPython.
254 253
255 254 Returns
256 255 -------
257 256 None
258 257 """
259 258
260 259 import nose.tools as nt
261 260
262 261 out, err = ipexec(fname, options, commands)
263 262 #print 'OUT', out # dbg
264 263 #print 'ERR', err # dbg
265 264 # If there are any errors, we must check those befor stdout, as they may be
266 265 # more informative than simply having an empty stdout.
267 266 if err:
268 267 if expected_err:
269 268 nt.assert_equal("\n".join(err.strip().splitlines()), "\n".join(expected_err.strip().splitlines()))
270 269 else:
271 270 raise ValueError('Running file %r produced error: %r' %
272 271 (fname, err))
273 272 # If no errors or output on stderr was expected, match stdout
274 273 nt.assert_equal("\n".join(out.strip().splitlines()), "\n".join(expected_out.strip().splitlines()))
275 274
276 275
277 276 class TempFileMixin(object):
278 277 """Utility class to create temporary Python/IPython files.
279 278
280 279 Meant as a mixin class for test cases."""
281 280
282 281 def mktmp(self, src, ext='.py'):
283 282 """Make a valid python temp file."""
284 283 fname, f = temp_pyfile(src, ext)
285 284 self.tmpfile = f
286 285 self.fname = fname
287 286
288 287 def tearDown(self):
289 288 if hasattr(self, 'tmpfile'):
290 289 # If the tmpfile wasn't made because of skipped tests, like in
291 290 # win32, there's nothing to cleanup.
292 291 self.tmpfile.close()
293 292 try:
294 293 os.unlink(self.fname)
295 294 except:
296 295 # On Windows, even though we close the file, we still can't
297 296 # delete it. I have no clue why
298 297 pass
299 298
300 299 def __enter__(self):
301 300 return self
302 301
303 302 def __exit__(self, exc_type, exc_value, traceback):
304 303 self.tearDown()
305 304
306 305
307 306 pair_fail_msg = ("Testing {0}\n\n"
308 307 "In:\n"
309 308 " {1!r}\n"
310 309 "Expected:\n"
311 310 " {2!r}\n"
312 311 "Got:\n"
313 312 " {3!r}\n")
314 313 def check_pairs(func, pairs):
315 314 """Utility function for the common case of checking a function with a
316 315 sequence of input/output pairs.
317 316
318 317 Parameters
319 318 ----------
320 319 func : callable
321 320 The function to be tested. Should accept a single argument.
322 321 pairs : iterable
323 322 A list of (input, expected_output) tuples.
324 323
325 324 Returns
326 325 -------
327 326 None. Raises an AssertionError if any output does not match the expected
328 327 value.
329 328 """
330 329 name = getattr(func, "func_name", getattr(func, "__name__", "<unknown>"))
331 330 for inp, expected in pairs:
332 331 out = func(inp)
333 332 assert out == expected, pair_fail_msg.format(name, inp, expected, out)
334 333
335 334
336 335 if py3compat.PY3:
337 336 MyStringIO = StringIO
338 337 else:
339 338 # In Python 2, stdout/stderr can have either bytes or unicode written to them,
340 339 # so we need a class that can handle both.
341 340 class MyStringIO(StringIO):
342 341 def write(self, s):
343 342 s = py3compat.cast_unicode(s, encoding=DEFAULT_ENCODING)
344 343 super(MyStringIO, self).write(s)
345 344
346 345 _re_type = type(re.compile(r''))
347 346
348 347 notprinted_msg = """Did not find {0!r} in printed output (on {1}):
349 348 -------
350 349 {2!s}
351 350 -------
352 351 """
353 352
354 353 class AssertPrints(object):
355 354 """Context manager for testing that code prints certain text.
356 355
357 356 Examples
358 357 --------
359 358 >>> with AssertPrints("abc", suppress=False):
360 359 ... print("abcd")
361 360 ... print("def")
362 361 ...
363 362 abcd
364 363 def
365 364 """
366 365 def __init__(self, s, channel='stdout', suppress=True):
367 366 self.s = s
368 367 if isinstance(self.s, (py3compat.string_types, _re_type)):
369 368 self.s = [self.s]
370 369 self.channel = channel
371 370 self.suppress = suppress
372 371
373 372 def __enter__(self):
374 373 self.orig_stream = getattr(sys, self.channel)
375 374 self.buffer = MyStringIO()
376 375 self.tee = Tee(self.buffer, channel=self.channel)
377 376 setattr(sys, self.channel, self.buffer if self.suppress else self.tee)
378 377
379 378 def __exit__(self, etype, value, traceback):
380 379 try:
381 380 if value is not None:
382 381 # If an error was raised, don't check anything else
383 382 return False
384 383 self.tee.flush()
385 384 setattr(sys, self.channel, self.orig_stream)
386 385 printed = self.buffer.getvalue()
387 386 for s in self.s:
388 387 if isinstance(s, _re_type):
389 388 assert s.search(printed), notprinted_msg.format(s.pattern, self.channel, printed)
390 389 else:
391 390 assert s in printed, notprinted_msg.format(s, self.channel, printed)
392 391 return False
393 392 finally:
394 393 self.tee.close()
395 394
396 395 printed_msg = """Found {0!r} in printed output (on {1}):
397 396 -------
398 397 {2!s}
399 398 -------
400 399 """
401 400
402 401 class AssertNotPrints(AssertPrints):
403 402 """Context manager for checking that certain output *isn't* produced.
404 403
405 404 Counterpart of AssertPrints"""
406 405 def __exit__(self, etype, value, traceback):
407 406 try:
408 407 if value is not None:
409 408 # If an error was raised, don't check anything else
410 409 self.tee.close()
411 410 return False
412 411 self.tee.flush()
413 412 setattr(sys, self.channel, self.orig_stream)
414 413 printed = self.buffer.getvalue()
415 414 for s in self.s:
416 415 if isinstance(s, _re_type):
417 416 assert not s.search(printed),printed_msg.format(
418 417 s.pattern, self.channel, printed)
419 418 else:
420 419 assert s not in printed, printed_msg.format(
421 420 s, self.channel, printed)
422 421 return False
423 422 finally:
424 423 self.tee.close()
425 424
426 425 @contextmanager
427 426 def mute_warn():
428 427 from IPython.utils import warn
429 428 save_warn = warn.warn
430 429 warn.warn = lambda *a, **kw: None
431 430 try:
432 431 yield
433 432 finally:
434 433 warn.warn = save_warn
435 434
436 435 @contextmanager
437 436 def make_tempfile(name):
438 437 """ Create an empty, named, temporary file for the duration of the context.
439 438 """
440 439 f = open(name, 'w')
441 440 f.close()
442 441 try:
443 442 yield
444 443 finally:
445 444 os.unlink(name)
446 445
447 446
448 447 def help_output_test(subcommand=''):
449 448 """test that `ipython [subcommand] -h` works"""
450 449 cmd = get_ipython_cmd() + [subcommand, '-h']
451 450 out, err, rc = get_output_error_code(cmd)
452 451 nt.assert_equal(rc, 0, err)
453 452 nt.assert_not_in("Traceback", err)
454 453 nt.assert_in("Options", out)
455 454 nt.assert_in("--help-all", out)
456 455 return out, err
457 456
458 457
459 458 def help_all_output_test(subcommand=''):
460 459 """test that `ipython [subcommand] --help-all` works"""
461 460 cmd = get_ipython_cmd() + [subcommand, '--help-all']
462 461 out, err, rc = get_output_error_code(cmd)
463 462 nt.assert_equal(rc, 0, err)
464 463 nt.assert_not_in("Traceback", err)
465 464 nt.assert_in("Options", out)
466 465 nt.assert_in("Class parameters", out)
467 466 return out, err
468 467
@@ -1,327 +1,324 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Class and program to colorize python source code for ANSI terminals.
4 4
5 5 Based on an HTML code highlighter by Jurgen Hermann found at:
6 6 http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
7 7
8 8 Modifications by Fernando Perez (fperez@colorado.edu).
9 9
10 10 Information on the original HTML highlighter follows:
11 11
12 12 MoinMoin - Python Source Parser
13 13
14 14 Title: Colorize Python source using the built-in tokenizer
15 15
16 16 Submitter: Jurgen Hermann
17 17 Last Updated:2001/04/06
18 18
19 19 Version no:1.2
20 20
21 21 Description:
22 22
23 23 This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
24 24 Python source code to HTML markup, rendering comments, keywords,
25 25 operators, numeric and string literals in different colors.
26 26
27 27 It shows how to use the built-in keyword, token and tokenize modules to
28 28 scan Python source code and re-emit it with no changes to its original
29 29 formatting (which is the hard part).
30 30 """
31 from __future__ import print_function
32 from __future__ import absolute_import
33 from __future__ import unicode_literals
34 31
35 32 __all__ = ['ANSICodeColors','Parser']
36 33
37 34 _scheme_default = 'Linux'
38 35
39 36
40 37 # Imports
41 38 import keyword
42 39 import os
43 40 import sys
44 41 import token
45 42 import tokenize
46 43
47 44 generate_tokens = tokenize.generate_tokens
48 45
49 46 from IPython.utils.coloransi import TermColors, InputTermColors ,ColorScheme, ColorSchemeTable
50 47 from IPython.utils.py3compat import PY3
51 48
52 49 from .colorable import Colorable
53 50
54 51 if PY3:
55 52 from io import StringIO
56 53 else:
57 54 from StringIO import StringIO
58 55
59 56 #############################################################################
60 57 ### Python Source Parser (does Hilighting)
61 58 #############################################################################
62 59
63 60 _KEYWORD = token.NT_OFFSET + 1
64 61 _TEXT = token.NT_OFFSET + 2
65 62
66 63 #****************************************************************************
67 64 # Builtin color schemes
68 65
69 66 Colors = TermColors # just a shorthand
70 67
71 68 # Build a few color schemes
72 69 NoColor = ColorScheme(
73 70 'NoColor',{
74 71 'header' : Colors.NoColor,
75 72 token.NUMBER : Colors.NoColor,
76 73 token.OP : Colors.NoColor,
77 74 token.STRING : Colors.NoColor,
78 75 tokenize.COMMENT : Colors.NoColor,
79 76 token.NAME : Colors.NoColor,
80 77 token.ERRORTOKEN : Colors.NoColor,
81 78
82 79 _KEYWORD : Colors.NoColor,
83 80 _TEXT : Colors.NoColor,
84 81
85 82 'in_prompt' : InputTermColors.NoColor, # Input prompt
86 83 'in_number' : InputTermColors.NoColor, # Input prompt number
87 84 'in_prompt2' : InputTermColors.NoColor, # Continuation prompt
88 85 'in_normal' : InputTermColors.NoColor, # color off (usu. Colors.Normal)
89 86
90 87 'out_prompt' : Colors.NoColor, # Output prompt
91 88 'out_number' : Colors.NoColor, # Output prompt number
92 89
93 90 'normal' : Colors.NoColor # color off (usu. Colors.Normal)
94 91 } )
95 92
96 93 LinuxColors = ColorScheme(
97 94 'Linux',{
98 95 'header' : Colors.LightRed,
99 96 token.NUMBER : Colors.LightCyan,
100 97 token.OP : Colors.Yellow,
101 98 token.STRING : Colors.LightBlue,
102 99 tokenize.COMMENT : Colors.LightRed,
103 100 token.NAME : Colors.Normal,
104 101 token.ERRORTOKEN : Colors.Red,
105 102
106 103 _KEYWORD : Colors.LightGreen,
107 104 _TEXT : Colors.Yellow,
108 105
109 106 'in_prompt' : InputTermColors.Green,
110 107 'in_number' : InputTermColors.LightGreen,
111 108 'in_prompt2' : InputTermColors.Green,
112 109 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
113 110
114 111 'out_prompt' : Colors.Red,
115 112 'out_number' : Colors.LightRed,
116 113
117 114 'normal' : Colors.Normal # color off (usu. Colors.Normal)
118 115 } )
119 116
120 117 NeutralColors = ColorScheme(
121 118 'Neutral',{
122 119 'header' : Colors.Red,
123 120 token.NUMBER : Colors.Cyan,
124 121 token.OP : Colors.Blue,
125 122 token.STRING : Colors.Blue,
126 123 tokenize.COMMENT : Colors.Red,
127 124 token.NAME : Colors.Normal,
128 125 token.ERRORTOKEN : Colors.Red,
129 126
130 127 _KEYWORD : Colors.Green,
131 128 _TEXT : Colors.Blue,
132 129
133 130 'in_prompt' : InputTermColors.Blue,
134 131 'in_number' : InputTermColors.LightBlue,
135 132 'in_prompt2' : InputTermColors.Blue,
136 133 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
137 134
138 135 'out_prompt' : Colors.Red,
139 136 'out_number' : Colors.LightRed,
140 137
141 138 'normal' : Colors.Normal # color off (usu. Colors.Normal)
142 139 } )
143 140
144 141 # Hack: the 'neutral' colours are not very visible on a dark background on
145 142 # Windows. Since Windows command prompts have a dark background by default, and
146 143 # relatively few users are likely to alter that, we will use the 'Linux' colours,
147 144 # designed for a dark background, as the default on Windows. Changing it here
148 145 # avoids affecting the prompt colours rendered by prompt_toolkit, where the
149 146 # neutral defaults do work OK.
150 147
151 148 if os.name == 'nt':
152 149 NeutralColors = LinuxColors.copy(name='Neutral')
153 150
154 151 LightBGColors = ColorScheme(
155 152 'LightBG',{
156 153 'header' : Colors.Red,
157 154 token.NUMBER : Colors.Cyan,
158 155 token.OP : Colors.Blue,
159 156 token.STRING : Colors.Blue,
160 157 tokenize.COMMENT : Colors.Red,
161 158 token.NAME : Colors.Normal,
162 159 token.ERRORTOKEN : Colors.Red,
163 160
164 161
165 162 _KEYWORD : Colors.Green,
166 163 _TEXT : Colors.Blue,
167 164
168 165 'in_prompt' : InputTermColors.Blue,
169 166 'in_number' : InputTermColors.LightBlue,
170 167 'in_prompt2' : InputTermColors.Blue,
171 168 'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
172 169
173 170 'out_prompt' : Colors.Red,
174 171 'out_number' : Colors.LightRed,
175 172
176 173 'normal' : Colors.Normal # color off (usu. Colors.Normal)
177 174 } )
178 175
179 176 # Build table of color schemes (needed by the parser)
180 177 ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors, NeutralColors],
181 178 _scheme_default)
182 179
183 180 Undefined = object()
184 181
185 182 class Parser(Colorable):
186 183 """ Format colored Python source.
187 184 """
188 185
189 186 def __init__(self, color_table=None, out = sys.stdout, parent=None, style=None):
190 187 """ Create a parser with a specified color table and output channel.
191 188
192 189 Call format() to process code.
193 190 """
194 191
195 192 super(Parser, self).__init__(parent=parent)
196 193
197 194 self.color_table = color_table and color_table or ANSICodeColors
198 195 self.out = out
199 196 if not style:
200 197 self.style = self.default_style
201 198 else:
202 199 self.style = style
203 200
204 201
205 202 def format(self, raw, out=None, scheme=Undefined):
206 203 import warnings
207 204 if scheme is not Undefined:
208 205 warnings.warn('The `scheme` argument of IPython.utils.PyColorize:Parser.format is deprecated since IPython 6.0.'
209 206 'It will have no effect. Set the parser `style` directly.',
210 207 stacklevel=2)
211 208 return self.format2(raw, out)[0]
212 209
213 210 def format2(self, raw, out = None):
214 211 """ Parse and send the colored source.
215 212
216 213 If out and scheme are not specified, the defaults (given to
217 214 constructor) are used.
218 215
219 216 out should be a file-type object. Optionally, out can be given as the
220 217 string 'str' and the parser will automatically return the output in a
221 218 string."""
222 219
223 220 string_output = 0
224 221 if out == 'str' or self.out == 'str' or \
225 222 isinstance(self.out,StringIO):
226 223 # XXX - I don't really like this state handling logic, but at this
227 224 # point I don't want to make major changes, so adding the
228 225 # isinstance() check is the simplest I can do to ensure correct
229 226 # behavior.
230 227 out_old = self.out
231 228 self.out = StringIO()
232 229 string_output = 1
233 230 elif out is not None:
234 231 self.out = out
235 232
236 233 # Fast return of the unmodified input for NoColor scheme
237 234 if self.style == 'NoColor':
238 235 error = False
239 236 self.out.write(raw)
240 237 if string_output:
241 238 return raw,error
242 239 else:
243 240 return None,error
244 241
245 242 # local shorthands
246 243 colors = self.color_table[self.style].colors
247 244 self.colors = colors # put in object so __call__ sees it
248 245
249 246 # Remove trailing whitespace and normalize tabs
250 247 self.raw = raw.expandtabs().rstrip()
251 248
252 249 # store line offsets in self.lines
253 250 self.lines = [0, 0]
254 251 pos = 0
255 252 raw_find = self.raw.find
256 253 lines_append = self.lines.append
257 254 while 1:
258 255 pos = raw_find('\n', pos) + 1
259 256 if not pos: break
260 257 lines_append(pos)
261 258 lines_append(len(self.raw))
262 259
263 260 # parse the source and write it
264 261 self.pos = 0
265 262 text = StringIO(self.raw)
266 263
267 264 error = False
268 265 try:
269 266 for atoken in generate_tokens(text.readline):
270 267 self(*atoken)
271 268 except tokenize.TokenError as ex:
272 269 msg = ex.args[0]
273 270 line = ex.args[1][0]
274 271 self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
275 272 (colors[token.ERRORTOKEN],
276 273 msg, self.raw[self.lines[line]:],
277 274 colors.normal)
278 275 )
279 276 error = True
280 277 self.out.write(colors.normal+'\n')
281 278 if string_output:
282 279 output = self.out.getvalue()
283 280 self.out = out_old
284 281 return (output, error)
285 282 return (None, error)
286 283
287 284 def __call__(self, toktype, toktext, start_pos, end_pos, line):
288 285 """ Token handler, with syntax highlighting."""
289 286 (srow,scol) = start_pos
290 287 (erow,ecol) = end_pos
291 288 colors = self.colors
292 289 owrite = self.out.write
293 290
294 291 # line separator, so this works across platforms
295 292 linesep = os.linesep
296 293
297 294 # calculate new positions
298 295 oldpos = self.pos
299 296 newpos = self.lines[srow] + scol
300 297 self.pos = newpos + len(toktext)
301 298
302 299 # send the original whitespace, if needed
303 300 if newpos > oldpos:
304 301 owrite(self.raw[oldpos:newpos])
305 302
306 303 # skip indenting tokens
307 304 if toktype in [token.INDENT, token.DEDENT]:
308 305 self.pos = newpos
309 306 return
310 307
311 308 # map token type to a color group
312 309 if token.LPAR <= toktype <= token.OP:
313 310 toktype = token.OP
314 311 elif toktype == token.NAME and keyword.iskeyword(toktext):
315 312 toktype = _KEYWORD
316 313 color = colors.get(toktype, colors[_TEXT])
317 314
318 315 #print '<%s>' % toktext, # dbg
319 316
320 317 # Triple quoted strings must be handled carefully so that backtracking
321 318 # in pagers works correctly. We need color terminators on _each_ line.
322 319 if linesep in toktext:
323 320 toktext = toktext.replace(linesep, '%s%s%s' %
324 321 (colors.normal,linesep,color))
325 322
326 323 # send text
327 324 owrite('%s%s%s' % (color,toktext,colors.normal))
@@ -1,225 +1,224 b''
1 1 """Posix-specific implementation of process utilities.
2 2
3 3 This file is only meant to be imported by process.py, not by end-users.
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2010-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13 #-----------------------------------------------------------------------------
14 14 # Imports
15 15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17 16
18 17 # Stdlib
19 18 import errno
20 19 import os
21 20 import subprocess as sp
22 21 import sys
23 22
24 23 import pexpect
25 24
26 25 # Our own
27 26 from ._process_common import getoutput, arg_split
28 27 from IPython.utils import py3compat
29 28 from IPython.utils.encoding import DEFAULT_ENCODING
30 29
31 30 #-----------------------------------------------------------------------------
32 31 # Function definitions
33 32 #-----------------------------------------------------------------------------
34 33
35 34 def _find_cmd(cmd):
36 35 """Find the full path to a command using which."""
37 36
38 37 path = sp.Popen(['/usr/bin/env', 'which', cmd],
39 38 stdout=sp.PIPE, stderr=sp.PIPE).communicate()[0]
40 39 return py3compat.bytes_to_str(path)
41 40
42 41
43 42 class ProcessHandler(object):
44 43 """Execute subprocesses under the control of pexpect.
45 44 """
46 45 # Timeout in seconds to wait on each reading of the subprocess' output.
47 46 # This should not be set too low to avoid cpu overusage from our side,
48 47 # since we read in a loop whose period is controlled by this timeout.
49 48 read_timeout = 0.05
50 49
51 50 # Timeout to give a process if we receive SIGINT, between sending the
52 51 # SIGINT to the process and forcefully terminating it.
53 52 terminate_timeout = 0.2
54 53
55 54 # File object where stdout and stderr of the subprocess will be written
56 55 logfile = None
57 56
58 57 # Shell to call for subprocesses to execute
59 58 _sh = None
60 59
61 60 @property
62 61 def sh(self):
63 62 if self._sh is None:
64 63 self._sh = pexpect.which('sh')
65 64 if self._sh is None:
66 65 raise OSError('"sh" shell not found')
67 66
68 67 return self._sh
69 68
70 69 def __init__(self, logfile=None, read_timeout=None, terminate_timeout=None):
71 70 """Arguments are used for pexpect calls."""
72 71 self.read_timeout = (ProcessHandler.read_timeout if read_timeout is
73 72 None else read_timeout)
74 73 self.terminate_timeout = (ProcessHandler.terminate_timeout if
75 74 terminate_timeout is None else
76 75 terminate_timeout)
77 76 self.logfile = sys.stdout if logfile is None else logfile
78 77
79 78 def getoutput(self, cmd):
80 79 """Run a command and return its stdout/stderr as a string.
81 80
82 81 Parameters
83 82 ----------
84 83 cmd : str
85 84 A command to be executed in the system shell.
86 85
87 86 Returns
88 87 -------
89 88 output : str
90 89 A string containing the combination of stdout and stderr from the
91 90 subprocess, in whatever order the subprocess originally wrote to its
92 91 file descriptors (so the order of the information in this string is the
93 92 correct order as would be seen if running the command in a terminal).
94 93 """
95 94 try:
96 95 return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
97 96 except KeyboardInterrupt:
98 97 print('^C', file=sys.stderr, end='')
99 98
100 99 def getoutput_pexpect(self, cmd):
101 100 """Run a command and return its stdout/stderr as a string.
102 101
103 102 Parameters
104 103 ----------
105 104 cmd : str
106 105 A command to be executed in the system shell.
107 106
108 107 Returns
109 108 -------
110 109 output : str
111 110 A string containing the combination of stdout and stderr from the
112 111 subprocess, in whatever order the subprocess originally wrote to its
113 112 file descriptors (so the order of the information in this string is the
114 113 correct order as would be seen if running the command in a terminal).
115 114 """
116 115 try:
117 116 return pexpect.run(self.sh, args=['-c', cmd]).replace('\r\n', '\n')
118 117 except KeyboardInterrupt:
119 118 print('^C', file=sys.stderr, end='')
120 119
121 120 def system(self, cmd):
122 121 """Execute a command in a subshell.
123 122
124 123 Parameters
125 124 ----------
126 125 cmd : str
127 126 A command to be executed in the system shell.
128 127
129 128 Returns
130 129 -------
131 130 int : child's exitstatus
132 131 """
133 132 # Get likely encoding for the output.
134 133 enc = DEFAULT_ENCODING
135 134
136 135 # Patterns to match on the output, for pexpect. We read input and
137 136 # allow either a short timeout or EOF
138 137 patterns = [pexpect.TIMEOUT, pexpect.EOF]
139 138 # the index of the EOF pattern in the list.
140 139 # even though we know it's 1, this call means we don't have to worry if
141 140 # we change the above list, and forget to change this value:
142 141 EOF_index = patterns.index(pexpect.EOF)
143 142 # The size of the output stored so far in the process output buffer.
144 143 # Since pexpect only appends to this buffer, each time we print we
145 144 # record how far we've printed, so that next time we only print *new*
146 145 # content from the buffer.
147 146 out_size = 0
148 147 try:
149 148 # Since we're not really searching the buffer for text patterns, we
150 149 # can set pexpect's search window to be tiny and it won't matter.
151 150 # We only search for the 'patterns' timeout or EOF, which aren't in
152 151 # the text itself.
153 152 #child = pexpect.spawn(pcmd, searchwindowsize=1)
154 153 if hasattr(pexpect, 'spawnb'):
155 154 child = pexpect.spawnb(self.sh, args=['-c', cmd]) # Pexpect-U
156 155 else:
157 156 child = pexpect.spawn(self.sh, args=['-c', cmd]) # Vanilla Pexpect
158 157 flush = sys.stdout.flush
159 158 while True:
160 159 # res is the index of the pattern that caused the match, so we
161 160 # know whether we've finished (if we matched EOF) or not
162 161 res_idx = child.expect_list(patterns, self.read_timeout)
163 162 print(child.before[out_size:].decode(enc, 'replace'), end='')
164 163 flush()
165 164 if res_idx==EOF_index:
166 165 break
167 166 # Update the pointer to what we've already printed
168 167 out_size = len(child.before)
169 168 except KeyboardInterrupt:
170 169 # We need to send ^C to the process. The ascii code for '^C' is 3
171 170 # (the character is known as ETX for 'End of Text', see
172 171 # curses.ascii.ETX).
173 172 child.sendline(chr(3))
174 173 # Read and print any more output the program might produce on its
175 174 # way out.
176 175 try:
177 176 out_size = len(child.before)
178 177 child.expect_list(patterns, self.terminate_timeout)
179 178 print(child.before[out_size:].decode(enc, 'replace'), end='')
180 179 sys.stdout.flush()
181 180 except KeyboardInterrupt:
182 181 # Impatient users tend to type it multiple times
183 182 pass
184 183 finally:
185 184 # Ensure the subprocess really is terminated
186 185 child.terminate(force=True)
187 186 # add isalive check, to ensure exitstatus is set:
188 187 child.isalive()
189 188
190 189 # We follow the subprocess pattern, returning either the exit status
191 190 # as a positive number, or the terminating signal as a negative
192 191 # number.
193 192 # on Linux, sh returns 128+n for signals terminating child processes on Linux
194 193 # on BSD (OS X), the signal code is set instead
195 194 if child.exitstatus is None:
196 195 # on WIFSIGNALED, pexpect sets signalstatus, leaving exitstatus=None
197 196 if child.signalstatus is None:
198 197 # this condition may never occur,
199 198 # but let's be certain we always return an integer.
200 199 return 0
201 200 return -child.signalstatus
202 201 if child.exitstatus > 128:
203 202 return -(child.exitstatus - 128)
204 203 return child.exitstatus
205 204
206 205
207 206 # Make system() with a functional interface for outside use. Note that we use
208 207 # getoutput() from the _common utils, which is built on top of popen(). Using
209 208 # pexpect to get subprocess output produces difficult to parse output, since
210 209 # programs think they are talking to a tty and produce highly formatted output
211 210 # (ls is a good example) that makes them hard.
212 211 system = ProcessHandler().system
213 212
214 213 def check_pid(pid):
215 214 try:
216 215 os.kill(pid, 0)
217 216 except OSError as err:
218 217 if err.errno == errno.ESRCH:
219 218 return False
220 219 elif err.errno == errno.EPERM:
221 220 # Don't have permission to signal the process - probably means it exists
222 221 return True
223 222 raise
224 223 else:
225 224 return True
@@ -1,192 +1,191 b''
1 1 """Windows-specific implementation of process utilities.
2 2
3 3 This file is only meant to be imported by process.py, not by end-users.
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2010-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13 #-----------------------------------------------------------------------------
14 14 # Imports
15 15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17 16
18 17 # stdlib
19 18 import os
20 19 import sys
21 20 import ctypes
22 21
23 22 from ctypes import c_int, POINTER
24 23 from ctypes.wintypes import LPCWSTR, HLOCAL
25 24 from subprocess import STDOUT
26 25
27 26 # our own imports
28 27 from ._process_common import read_no_interrupt, process_handler, arg_split as py_arg_split
29 28 from . import py3compat
30 29 from .encoding import DEFAULT_ENCODING
31 30
32 31 #-----------------------------------------------------------------------------
33 32 # Function definitions
34 33 #-----------------------------------------------------------------------------
35 34
36 35 class AvoidUNCPath(object):
37 36 """A context manager to protect command execution from UNC paths.
38 37
39 38 In the Win32 API, commands can't be invoked with the cwd being a UNC path.
40 39 This context manager temporarily changes directory to the 'C:' drive on
41 40 entering, and restores the original working directory on exit.
42 41
43 42 The context manager returns the starting working directory *if* it made a
44 43 change and None otherwise, so that users can apply the necessary adjustment
45 44 to their system calls in the event of a change.
46 45
47 46 Examples
48 47 --------
49 48 ::
50 49 cmd = 'dir'
51 50 with AvoidUNCPath() as path:
52 51 if path is not None:
53 52 cmd = '"pushd %s &&"%s' % (path, cmd)
54 53 os.system(cmd)
55 54 """
56 55 def __enter__(self):
57 56 self.path = py3compat.getcwd()
58 57 self.is_unc_path = self.path.startswith(r"\\")
59 58 if self.is_unc_path:
60 59 # change to c drive (as cmd.exe cannot handle UNC addresses)
61 60 os.chdir("C:")
62 61 return self.path
63 62 else:
64 63 # We return None to signal that there was no change in the working
65 64 # directory
66 65 return None
67 66
68 67 def __exit__(self, exc_type, exc_value, traceback):
69 68 if self.is_unc_path:
70 69 os.chdir(self.path)
71 70
72 71
73 72 def _find_cmd(cmd):
74 73 """Find the full path to a .bat or .exe using the win32api module."""
75 74 try:
76 75 from win32api import SearchPath
77 76 except ImportError:
78 77 raise ImportError('you need to have pywin32 installed for this to work')
79 78 else:
80 79 PATH = os.environ['PATH']
81 80 extensions = ['.exe', '.com', '.bat', '.py']
82 81 path = None
83 82 for ext in extensions:
84 83 try:
85 84 path = SearchPath(PATH, cmd, ext)[0]
86 85 except:
87 86 pass
88 87 if path is None:
89 88 raise OSError("command %r not found" % cmd)
90 89 else:
91 90 return path
92 91
93 92
94 93 def _system_body(p):
95 94 """Callback for _system."""
96 95 enc = DEFAULT_ENCODING
97 96 for line in read_no_interrupt(p.stdout).splitlines():
98 97 line = line.decode(enc, 'replace')
99 98 print(line, file=sys.stdout)
100 99 for line in read_no_interrupt(p.stderr).splitlines():
101 100 line = line.decode(enc, 'replace')
102 101 print(line, file=sys.stderr)
103 102
104 103 # Wait to finish for returncode
105 104 return p.wait()
106 105
107 106
108 107 def system(cmd):
109 108 """Win32 version of os.system() that works with network shares.
110 109
111 110 Note that this implementation returns None, as meant for use in IPython.
112 111
113 112 Parameters
114 113 ----------
115 114 cmd : str or list
116 115 A command to be executed in the system shell.
117 116
118 117 Returns
119 118 -------
120 119 None : we explicitly do NOT return the subprocess status code, as this
121 120 utility is meant to be used extensively in IPython, where any return value
122 121 would trigger :func:`sys.displayhook` calls.
123 122 """
124 123 # The controller provides interactivity with both
125 124 # stdin and stdout
126 125 #import _process_win32_controller
127 126 #_process_win32_controller.system(cmd)
128 127
129 128 with AvoidUNCPath() as path:
130 129 if path is not None:
131 130 cmd = '"pushd %s &&"%s' % (path, cmd)
132 131 return process_handler(cmd, _system_body)
133 132
134 133 def getoutput(cmd):
135 134 """Return standard output of executing cmd in a shell.
136 135
137 136 Accepts the same arguments as os.system().
138 137
139 138 Parameters
140 139 ----------
141 140 cmd : str or list
142 141 A command to be executed in the system shell.
143 142
144 143 Returns
145 144 -------
146 145 stdout : str
147 146 """
148 147
149 148 with AvoidUNCPath() as path:
150 149 if path is not None:
151 150 cmd = '"pushd %s &&"%s' % (path, cmd)
152 151 out = process_handler(cmd, lambda p: p.communicate()[0], STDOUT)
153 152
154 153 if out is None:
155 154 out = b''
156 155 return py3compat.bytes_to_str(out)
157 156
158 157 try:
159 158 CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
160 159 CommandLineToArgvW.arg_types = [LPCWSTR, POINTER(c_int)]
161 160 CommandLineToArgvW.restype = POINTER(LPCWSTR)
162 161 LocalFree = ctypes.windll.kernel32.LocalFree
163 162 LocalFree.res_type = HLOCAL
164 163 LocalFree.arg_types = [HLOCAL]
165 164
166 165 def arg_split(commandline, posix=False, strict=True):
167 166 """Split a command line's arguments in a shell-like manner.
168 167
169 168 This is a special version for windows that use a ctypes call to CommandLineToArgvW
170 169 to do the argv splitting. The posix paramter is ignored.
171 170
172 171 If strict=False, process_common.arg_split(...strict=False) is used instead.
173 172 """
174 173 #CommandLineToArgvW returns path to executable if called with empty string.
175 174 if commandline.strip() == "":
176 175 return []
177 176 if not strict:
178 177 # not really a cl-arg, fallback on _process_common
179 178 return py_arg_split(commandline, posix=posix, strict=strict)
180 179 argvn = c_int()
181 180 result_pointer = CommandLineToArgvW(py3compat.cast_unicode(commandline.lstrip()), ctypes.byref(argvn))
182 181 result_array_type = LPCWSTR * argvn.value
183 182 result = [arg for arg in result_array_type.from_address(ctypes.addressof(result_pointer.contents))]
184 183 retval = LocalFree(result_pointer)
185 184 return result
186 185 except AttributeError:
187 186 arg_split = py_arg_split
188 187
189 188 def check_pid(pid):
190 189 # OpenProcess returns 0 if no such process (of ours) exists
191 190 # positive int otherwise
192 191 return bool(ctypes.windll.kernel32.OpenProcess(1,0,pid))
@@ -1,577 +1,576 b''
1 1 """Windows-specific implementation of process utilities with direct WinAPI.
2 2
3 3 This file is meant to be used by process.py
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2010-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 from __future__ import print_function
14 13
15 14 # stdlib
16 15 import os, sys, threading
17 16 import ctypes, msvcrt
18 17
19 18 # local imports
20 19 from . import py3compat
21 20
22 21 # Win32 API types needed for the API calls
23 22 from ctypes import POINTER
24 23 from ctypes.wintypes import HANDLE, HLOCAL, LPVOID, WORD, DWORD, BOOL, \
25 24 ULONG, LPCWSTR
26 25 LPDWORD = POINTER(DWORD)
27 26 LPHANDLE = POINTER(HANDLE)
28 27 ULONG_PTR = POINTER(ULONG)
29 28 class SECURITY_ATTRIBUTES(ctypes.Structure):
30 29 _fields_ = [("nLength", DWORD),
31 30 ("lpSecurityDescriptor", LPVOID),
32 31 ("bInheritHandle", BOOL)]
33 32 LPSECURITY_ATTRIBUTES = POINTER(SECURITY_ATTRIBUTES)
34 33 class STARTUPINFO(ctypes.Structure):
35 34 _fields_ = [("cb", DWORD),
36 35 ("lpReserved", LPCWSTR),
37 36 ("lpDesktop", LPCWSTR),
38 37 ("lpTitle", LPCWSTR),
39 38 ("dwX", DWORD),
40 39 ("dwY", DWORD),
41 40 ("dwXSize", DWORD),
42 41 ("dwYSize", DWORD),
43 42 ("dwXCountChars", DWORD),
44 43 ("dwYCountChars", DWORD),
45 44 ("dwFillAttribute", DWORD),
46 45 ("dwFlags", DWORD),
47 46 ("wShowWindow", WORD),
48 47 ("cbReserved2", WORD),
49 48 ("lpReserved2", LPVOID),
50 49 ("hStdInput", HANDLE),
51 50 ("hStdOutput", HANDLE),
52 51 ("hStdError", HANDLE)]
53 52 LPSTARTUPINFO = POINTER(STARTUPINFO)
54 53 class PROCESS_INFORMATION(ctypes.Structure):
55 54 _fields_ = [("hProcess", HANDLE),
56 55 ("hThread", HANDLE),
57 56 ("dwProcessId", DWORD),
58 57 ("dwThreadId", DWORD)]
59 58 LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
60 59
61 60 # Win32 API constants needed
62 61 ERROR_HANDLE_EOF = 38
63 62 ERROR_BROKEN_PIPE = 109
64 63 ERROR_NO_DATA = 232
65 64 HANDLE_FLAG_INHERIT = 0x0001
66 65 STARTF_USESTDHANDLES = 0x0100
67 66 CREATE_SUSPENDED = 0x0004
68 67 CREATE_NEW_CONSOLE = 0x0010
69 68 CREATE_NO_WINDOW = 0x08000000
70 69 STILL_ACTIVE = 259
71 70 WAIT_TIMEOUT = 0x0102
72 71 WAIT_FAILED = 0xFFFFFFFF
73 72 INFINITE = 0xFFFFFFFF
74 73 DUPLICATE_SAME_ACCESS = 0x00000002
75 74 ENABLE_ECHO_INPUT = 0x0004
76 75 ENABLE_LINE_INPUT = 0x0002
77 76 ENABLE_PROCESSED_INPUT = 0x0001
78 77
79 78 # Win32 API functions needed
80 79 GetLastError = ctypes.windll.kernel32.GetLastError
81 80 GetLastError.argtypes = []
82 81 GetLastError.restype = DWORD
83 82
84 83 CreateFile = ctypes.windll.kernel32.CreateFileW
85 84 CreateFile.argtypes = [LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE]
86 85 CreateFile.restype = HANDLE
87 86
88 87 CreatePipe = ctypes.windll.kernel32.CreatePipe
89 88 CreatePipe.argtypes = [POINTER(HANDLE), POINTER(HANDLE),
90 89 LPSECURITY_ATTRIBUTES, DWORD]
91 90 CreatePipe.restype = BOOL
92 91
93 92 CreateProcess = ctypes.windll.kernel32.CreateProcessW
94 93 CreateProcess.argtypes = [LPCWSTR, LPCWSTR, LPSECURITY_ATTRIBUTES,
95 94 LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCWSTR, LPSTARTUPINFO,
96 95 LPPROCESS_INFORMATION]
97 96 CreateProcess.restype = BOOL
98 97
99 98 GetExitCodeProcess = ctypes.windll.kernel32.GetExitCodeProcess
100 99 GetExitCodeProcess.argtypes = [HANDLE, LPDWORD]
101 100 GetExitCodeProcess.restype = BOOL
102 101
103 102 GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
104 103 GetCurrentProcess.argtypes = []
105 104 GetCurrentProcess.restype = HANDLE
106 105
107 106 ResumeThread = ctypes.windll.kernel32.ResumeThread
108 107 ResumeThread.argtypes = [HANDLE]
109 108 ResumeThread.restype = DWORD
110 109
111 110 ReadFile = ctypes.windll.kernel32.ReadFile
112 111 ReadFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
113 112 ReadFile.restype = BOOL
114 113
115 114 WriteFile = ctypes.windll.kernel32.WriteFile
116 115 WriteFile.argtypes = [HANDLE, LPVOID, DWORD, LPDWORD, LPVOID]
117 116 WriteFile.restype = BOOL
118 117
119 118 GetConsoleMode = ctypes.windll.kernel32.GetConsoleMode
120 119 GetConsoleMode.argtypes = [HANDLE, LPDWORD]
121 120 GetConsoleMode.restype = BOOL
122 121
123 122 SetConsoleMode = ctypes.windll.kernel32.SetConsoleMode
124 123 SetConsoleMode.argtypes = [HANDLE, DWORD]
125 124 SetConsoleMode.restype = BOOL
126 125
127 126 FlushConsoleInputBuffer = ctypes.windll.kernel32.FlushConsoleInputBuffer
128 127 FlushConsoleInputBuffer.argtypes = [HANDLE]
129 128 FlushConsoleInputBuffer.restype = BOOL
130 129
131 130 WaitForSingleObject = ctypes.windll.kernel32.WaitForSingleObject
132 131 WaitForSingleObject.argtypes = [HANDLE, DWORD]
133 132 WaitForSingleObject.restype = DWORD
134 133
135 134 DuplicateHandle = ctypes.windll.kernel32.DuplicateHandle
136 135 DuplicateHandle.argtypes = [HANDLE, HANDLE, HANDLE, LPHANDLE,
137 136 DWORD, BOOL, DWORD]
138 137 DuplicateHandle.restype = BOOL
139 138
140 139 SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation
141 140 SetHandleInformation.argtypes = [HANDLE, DWORD, DWORD]
142 141 SetHandleInformation.restype = BOOL
143 142
144 143 CloseHandle = ctypes.windll.kernel32.CloseHandle
145 144 CloseHandle.argtypes = [HANDLE]
146 145 CloseHandle.restype = BOOL
147 146
148 147 CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
149 148 CommandLineToArgvW.argtypes = [LPCWSTR, POINTER(ctypes.c_int)]
150 149 CommandLineToArgvW.restype = POINTER(LPCWSTR)
151 150
152 151 LocalFree = ctypes.windll.kernel32.LocalFree
153 152 LocalFree.argtypes = [HLOCAL]
154 153 LocalFree.restype = HLOCAL
155 154
156 155 class AvoidUNCPath(object):
157 156 """A context manager to protect command execution from UNC paths.
158 157
159 158 In the Win32 API, commands can't be invoked with the cwd being a UNC path.
160 159 This context manager temporarily changes directory to the 'C:' drive on
161 160 entering, and restores the original working directory on exit.
162 161
163 162 The context manager returns the starting working directory *if* it made a
164 163 change and None otherwise, so that users can apply the necessary adjustment
165 164 to their system calls in the event of a change.
166 165
167 166 Examples
168 167 --------
169 168 ::
170 169 cmd = 'dir'
171 170 with AvoidUNCPath() as path:
172 171 if path is not None:
173 172 cmd = '"pushd %s &&"%s' % (path, cmd)
174 173 os.system(cmd)
175 174 """
176 175 def __enter__(self):
177 176 self.path = py3compat.getcwd()
178 177 self.is_unc_path = self.path.startswith(r"\\")
179 178 if self.is_unc_path:
180 179 # change to c drive (as cmd.exe cannot handle UNC addresses)
181 180 os.chdir("C:")
182 181 return self.path
183 182 else:
184 183 # We return None to signal that there was no change in the working
185 184 # directory
186 185 return None
187 186
188 187 def __exit__(self, exc_type, exc_value, traceback):
189 188 if self.is_unc_path:
190 189 os.chdir(self.path)
191 190
192 191
193 192 class Win32ShellCommandController(object):
194 193 """Runs a shell command in a 'with' context.
195 194
196 195 This implementation is Win32-specific.
197 196
198 197 Example:
199 198 # Runs the command interactively with default console stdin/stdout
200 199 with ShellCommandController('python -i') as scc:
201 200 scc.run()
202 201
203 202 # Runs the command using the provided functions for stdin/stdout
204 203 def my_stdout_func(s):
205 204 # print or save the string 's'
206 205 write_to_stdout(s)
207 206 def my_stdin_func():
208 207 # If input is available, return it as a string.
209 208 if input_available():
210 209 return get_input()
211 210 # If no input available, return None after a short delay to
212 211 # keep from blocking.
213 212 else:
214 213 time.sleep(0.01)
215 214 return None
216 215
217 216 with ShellCommandController('python -i') as scc:
218 217 scc.run(my_stdout_func, my_stdin_func)
219 218 """
220 219
221 220 def __init__(self, cmd, mergeout = True):
222 221 """Initializes the shell command controller.
223 222
224 223 The cmd is the program to execute, and mergeout is
225 224 whether to blend stdout and stderr into one output
226 225 in stdout. Merging them together in this fashion more
227 226 reliably keeps stdout and stderr in the correct order
228 227 especially for interactive shell usage.
229 228 """
230 229 self.cmd = cmd
231 230 self.mergeout = mergeout
232 231
233 232 def __enter__(self):
234 233 cmd = self.cmd
235 234 mergeout = self.mergeout
236 235
237 236 self.hstdout, self.hstdin, self.hstderr = None, None, None
238 237 self.piProcInfo = None
239 238 try:
240 239 p_hstdout, c_hstdout, p_hstderr, \
241 240 c_hstderr, p_hstdin, c_hstdin = [None]*6
242 241
243 242 # SECURITY_ATTRIBUTES with inherit handle set to True
244 243 saAttr = SECURITY_ATTRIBUTES()
245 244 saAttr.nLength = ctypes.sizeof(saAttr)
246 245 saAttr.bInheritHandle = True
247 246 saAttr.lpSecurityDescriptor = None
248 247
249 248 def create_pipe(uninherit):
250 249 """Creates a Windows pipe, which consists of two handles.
251 250
252 251 The 'uninherit' parameter controls which handle is not
253 252 inherited by the child process.
254 253 """
255 254 handles = HANDLE(), HANDLE()
256 255 if not CreatePipe(ctypes.byref(handles[0]),
257 256 ctypes.byref(handles[1]), ctypes.byref(saAttr), 0):
258 257 raise ctypes.WinError()
259 258 if not SetHandleInformation(handles[uninherit],
260 259 HANDLE_FLAG_INHERIT, 0):
261 260 raise ctypes.WinError()
262 261 return handles[0].value, handles[1].value
263 262
264 263 p_hstdout, c_hstdout = create_pipe(uninherit=0)
265 264 # 'mergeout' signals that stdout and stderr should be merged.
266 265 # We do that by using one pipe for both of them.
267 266 if mergeout:
268 267 c_hstderr = HANDLE()
269 268 if not DuplicateHandle(GetCurrentProcess(), c_hstdout,
270 269 GetCurrentProcess(), ctypes.byref(c_hstderr),
271 270 0, True, DUPLICATE_SAME_ACCESS):
272 271 raise ctypes.WinError()
273 272 else:
274 273 p_hstderr, c_hstderr = create_pipe(uninherit=0)
275 274 c_hstdin, p_hstdin = create_pipe(uninherit=1)
276 275
277 276 # Create the process object
278 277 piProcInfo = PROCESS_INFORMATION()
279 278 siStartInfo = STARTUPINFO()
280 279 siStartInfo.cb = ctypes.sizeof(siStartInfo)
281 280 siStartInfo.hStdInput = c_hstdin
282 281 siStartInfo.hStdOutput = c_hstdout
283 282 siStartInfo.hStdError = c_hstderr
284 283 siStartInfo.dwFlags = STARTF_USESTDHANDLES
285 284 dwCreationFlags = CREATE_SUSPENDED | CREATE_NO_WINDOW # | CREATE_NEW_CONSOLE
286 285
287 286 if not CreateProcess(None,
288 287 u"cmd.exe /c " + cmd,
289 288 None, None, True, dwCreationFlags,
290 289 None, None, ctypes.byref(siStartInfo),
291 290 ctypes.byref(piProcInfo)):
292 291 raise ctypes.WinError()
293 292
294 293 # Close this process's versions of the child handles
295 294 CloseHandle(c_hstdin)
296 295 c_hstdin = None
297 296 CloseHandle(c_hstdout)
298 297 c_hstdout = None
299 298 if c_hstderr is not None:
300 299 CloseHandle(c_hstderr)
301 300 c_hstderr = None
302 301
303 302 # Transfer ownership of the parent handles to the object
304 303 self.hstdin = p_hstdin
305 304 p_hstdin = None
306 305 self.hstdout = p_hstdout
307 306 p_hstdout = None
308 307 if not mergeout:
309 308 self.hstderr = p_hstderr
310 309 p_hstderr = None
311 310 self.piProcInfo = piProcInfo
312 311
313 312 finally:
314 313 if p_hstdin:
315 314 CloseHandle(p_hstdin)
316 315 if c_hstdin:
317 316 CloseHandle(c_hstdin)
318 317 if p_hstdout:
319 318 CloseHandle(p_hstdout)
320 319 if c_hstdout:
321 320 CloseHandle(c_hstdout)
322 321 if p_hstderr:
323 322 CloseHandle(p_hstderr)
324 323 if c_hstderr:
325 324 CloseHandle(c_hstderr)
326 325
327 326 return self
328 327
329 328 def _stdin_thread(self, handle, hprocess, func, stdout_func):
330 329 exitCode = DWORD()
331 330 bytesWritten = DWORD(0)
332 331 while True:
333 332 #print("stdin thread loop start")
334 333 # Get the input string (may be bytes or unicode)
335 334 data = func()
336 335
337 336 # None signals to poll whether the process has exited
338 337 if data is None:
339 338 #print("checking for process completion")
340 339 if not GetExitCodeProcess(hprocess, ctypes.byref(exitCode)):
341 340 raise ctypes.WinError()
342 341 if exitCode.value != STILL_ACTIVE:
343 342 return
344 343 # TESTING: Does zero-sized writefile help?
345 344 if not WriteFile(handle, "", 0,
346 345 ctypes.byref(bytesWritten), None):
347 346 raise ctypes.WinError()
348 347 continue
349 348 #print("\nGot str %s\n" % repr(data), file=sys.stderr)
350 349
351 350 # Encode the string to the console encoding
352 351 if isinstance(data, unicode): #FIXME: Python3
353 352 data = data.encode('utf_8')
354 353
355 354 # What we have now must be a string of bytes
356 355 if not isinstance(data, str): #FIXME: Python3
357 356 raise RuntimeError("internal stdin function string error")
358 357
359 358 # An empty string signals EOF
360 359 if len(data) == 0:
361 360 return
362 361
363 362 # In a windows console, sometimes the input is echoed,
364 363 # but sometimes not. How do we determine when to do this?
365 364 stdout_func(data)
366 365 # WriteFile may not accept all the data at once.
367 366 # Loop until everything is processed
368 367 while len(data) != 0:
369 368 #print("Calling writefile")
370 369 if not WriteFile(handle, data, len(data),
371 370 ctypes.byref(bytesWritten), None):
372 371 # This occurs at exit
373 372 if GetLastError() == ERROR_NO_DATA:
374 373 return
375 374 raise ctypes.WinError()
376 375 #print("Called writefile")
377 376 data = data[bytesWritten.value:]
378 377
379 378 def _stdout_thread(self, handle, func):
380 379 # Allocate the output buffer
381 380 data = ctypes.create_string_buffer(4096)
382 381 while True:
383 382 bytesRead = DWORD(0)
384 383 if not ReadFile(handle, data, 4096,
385 384 ctypes.byref(bytesRead), None):
386 385 le = GetLastError()
387 386 if le == ERROR_BROKEN_PIPE:
388 387 return
389 388 else:
390 389 raise ctypes.WinError()
391 390 # FIXME: Python3
392 391 s = data.value[0:bytesRead.value]
393 392 #print("\nv: %s" % repr(s), file=sys.stderr)
394 393 func(s.decode('utf_8', 'replace'))
395 394
396 395 def run(self, stdout_func = None, stdin_func = None, stderr_func = None):
397 396 """Runs the process, using the provided functions for I/O.
398 397
399 398 The function stdin_func should return strings whenever a
400 399 character or characters become available.
401 400 The functions stdout_func and stderr_func are called whenever
402 401 something is printed to stdout or stderr, respectively.
403 402 These functions are called from different threads (but not
404 403 concurrently, because of the GIL).
405 404 """
406 405 if stdout_func is None and stdin_func is None and stderr_func is None:
407 406 return self._run_stdio()
408 407
409 408 if stderr_func is not None and self.mergeout:
410 409 raise RuntimeError("Shell command was initiated with "
411 410 "merged stdin/stdout, but a separate stderr_func "
412 411 "was provided to the run() method")
413 412
414 413 # Create a thread for each input/output handle
415 414 stdin_thread = None
416 415 threads = []
417 416 if stdin_func:
418 417 stdin_thread = threading.Thread(target=self._stdin_thread,
419 418 args=(self.hstdin, self.piProcInfo.hProcess,
420 419 stdin_func, stdout_func))
421 420 threads.append(threading.Thread(target=self._stdout_thread,
422 421 args=(self.hstdout, stdout_func)))
423 422 if not self.mergeout:
424 423 if stderr_func is None:
425 424 stderr_func = stdout_func
426 425 threads.append(threading.Thread(target=self._stdout_thread,
427 426 args=(self.hstderr, stderr_func)))
428 427 # Start the I/O threads and the process
429 428 if ResumeThread(self.piProcInfo.hThread) == 0xFFFFFFFF:
430 429 raise ctypes.WinError()
431 430 if stdin_thread is not None:
432 431 stdin_thread.start()
433 432 for thread in threads:
434 433 thread.start()
435 434 # Wait for the process to complete
436 435 if WaitForSingleObject(self.piProcInfo.hProcess, INFINITE) == \
437 436 WAIT_FAILED:
438 437 raise ctypes.WinError()
439 438 # Wait for the I/O threads to complete
440 439 for thread in threads:
441 440 thread.join()
442 441
443 442 # Wait for the stdin thread to complete
444 443 if stdin_thread is not None:
445 444 stdin_thread.join()
446 445
447 446 def _stdin_raw_nonblock(self):
448 447 """Use the raw Win32 handle of sys.stdin to do non-blocking reads"""
449 448 # WARNING: This is experimental, and produces inconsistent results.
450 449 # It's possible for the handle not to be appropriate for use
451 450 # with WaitForSingleObject, among other things.
452 451 handle = msvcrt.get_osfhandle(sys.stdin.fileno())
453 452 result = WaitForSingleObject(handle, 100)
454 453 if result == WAIT_FAILED:
455 454 raise ctypes.WinError()
456 455 elif result == WAIT_TIMEOUT:
457 456 print(".", end='')
458 457 return None
459 458 else:
460 459 data = ctypes.create_string_buffer(256)
461 460 bytesRead = DWORD(0)
462 461 print('?', end='')
463 462
464 463 if not ReadFile(handle, data, 256,
465 464 ctypes.byref(bytesRead), None):
466 465 raise ctypes.WinError()
467 466 # This ensures the non-blocking works with an actual console
468 467 # Not checking the error, so the processing will still work with
469 468 # other handle types
470 469 FlushConsoleInputBuffer(handle)
471 470
472 471 data = data.value
473 472 data = data.replace('\r\n', '\n')
474 473 data = data.replace('\r', '\n')
475 474 print(repr(data) + " ", end='')
476 475 return data
477 476
478 477 def _stdin_raw_block(self):
479 478 """Use a blocking stdin read"""
480 479 # The big problem with the blocking read is that it doesn't
481 480 # exit when it's supposed to in all contexts. An extra
482 481 # key-press may be required to trigger the exit.
483 482 try:
484 483 data = sys.stdin.read(1)
485 484 data = data.replace('\r', '\n')
486 485 return data
487 486 except WindowsError as we:
488 487 if we.winerror == ERROR_NO_DATA:
489 488 # This error occurs when the pipe is closed
490 489 return None
491 490 else:
492 491 # Otherwise let the error propagate
493 492 raise we
494 493
495 494 def _stdout_raw(self, s):
496 495 """Writes the string to stdout"""
497 496 print(s, end='', file=sys.stdout)
498 497 sys.stdout.flush()
499 498
500 499 def _stderr_raw(self, s):
501 500 """Writes the string to stdout"""
502 501 print(s, end='', file=sys.stderr)
503 502 sys.stderr.flush()
504 503
505 504 def _run_stdio(self):
506 505 """Runs the process using the system standard I/O.
507 506
508 507 IMPORTANT: stdin needs to be asynchronous, so the Python
509 508 sys.stdin object is not used. Instead,
510 509 msvcrt.kbhit/getwch are used asynchronously.
511 510 """
512 511 # Disable Line and Echo mode
513 512 #lpMode = DWORD()
514 513 #handle = msvcrt.get_osfhandle(sys.stdin.fileno())
515 514 #if GetConsoleMode(handle, ctypes.byref(lpMode)):
516 515 # set_console_mode = True
517 516 # if not SetConsoleMode(handle, lpMode.value &
518 517 # ~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT)):
519 518 # raise ctypes.WinError()
520 519
521 520 if self.mergeout:
522 521 return self.run(stdout_func = self._stdout_raw,
523 522 stdin_func = self._stdin_raw_block)
524 523 else:
525 524 return self.run(stdout_func = self._stdout_raw,
526 525 stdin_func = self._stdin_raw_block,
527 526 stderr_func = self._stderr_raw)
528 527
529 528 # Restore the previous console mode
530 529 #if set_console_mode:
531 530 # if not SetConsoleMode(handle, lpMode.value):
532 531 # raise ctypes.WinError()
533 532
534 533 def __exit__(self, exc_type, exc_value, traceback):
535 534 if self.hstdin:
536 535 CloseHandle(self.hstdin)
537 536 self.hstdin = None
538 537 if self.hstdout:
539 538 CloseHandle(self.hstdout)
540 539 self.hstdout = None
541 540 if self.hstderr:
542 541 CloseHandle(self.hstderr)
543 542 self.hstderr = None
544 543 if self.piProcInfo is not None:
545 544 CloseHandle(self.piProcInfo.hProcess)
546 545 CloseHandle(self.piProcInfo.hThread)
547 546 self.piProcInfo = None
548 547
549 548
550 549 def system(cmd):
551 550 """Win32 version of os.system() that works with network shares.
552 551
553 552 Note that this implementation returns None, as meant for use in IPython.
554 553
555 554 Parameters
556 555 ----------
557 556 cmd : str
558 557 A command to be executed in the system shell.
559 558
560 559 Returns
561 560 -------
562 561 None : we explicitly do NOT return the subprocess status code, as this
563 562 utility is meant to be used extensively in IPython, where any return value
564 563 would trigger :func:`sys.displayhook` calls.
565 564 """
566 565 with AvoidUNCPath() as path:
567 566 if path is not None:
568 567 cmd = '"pushd %s &&"%s' % (path, cmd)
569 568 with Win32ShellCommandController(cmd) as scc:
570 569 scc.run()
571 570
572 571
573 572 if __name__ == "__main__":
574 573 print("Test starting!")
575 574 #system("cmd")
576 575 system("python -i")
577 576 print("Test finished!")
@@ -1,817 +1,816 b''
1 1 """Function signature objects for callables.
2 2
3 3 Back port of Python 3.3's function signature tools from the inspect module,
4 4 modified to be compatible with Python 2.7 and 3.2+.
5 5 """
6 6
7 7 #-----------------------------------------------------------------------------
8 8 # Python 3.3 stdlib inspect.py is public domain
9 9 #
10 10 # Backports Copyright (C) 2013 Aaron Iles
11 11 # Used under Apache License Version 2.0
12 12 #
13 13 # Further Changes are Copyright (C) 2013 The IPython Development Team
14 14 #
15 15 # Distributed under the terms of the BSD License. The full license is in
16 16 # the file COPYING, distributed as part of this software.
17 17 #-----------------------------------------------------------------------------
18 18
19 from __future__ import absolute_import, division, print_function
20 19 import itertools
21 20 import functools
22 21 import re
23 22 import types
24 23
25 24
26 25 # patch for single-file
27 26 # we don't support 2.6, so we can just import OrderedDict
28 27 from collections import OrderedDict
29 28
30 29 __version__ = '0.3'
31 30 # end patch
32 31
33 32 __all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
34 33
35 34
36 35 _WrapperDescriptor = type(type.__call__)
37 36 _MethodWrapper = type(all.__call__)
38 37
39 38 _NonUserDefinedCallables = (_WrapperDescriptor,
40 39 _MethodWrapper,
41 40 types.BuiltinFunctionType)
42 41
43 42
44 43 def formatannotation(annotation, base_module=None):
45 44 if isinstance(annotation, type):
46 45 if annotation.__module__ in ('builtins', '__builtin__', base_module):
47 46 return annotation.__name__
48 47 return annotation.__module__+'.'+annotation.__name__
49 48 return repr(annotation)
50 49
51 50
52 51 def _get_user_defined_method(cls, method_name, *nested):
53 52 try:
54 53 if cls is type:
55 54 return
56 55 meth = getattr(cls, method_name)
57 56 for name in nested:
58 57 meth = getattr(meth, name, meth)
59 58 except AttributeError:
60 59 return
61 60 else:
62 61 if not isinstance(meth, _NonUserDefinedCallables):
63 62 # Once '__signature__' will be added to 'C'-level
64 63 # callables, this check won't be necessary
65 64 return meth
66 65
67 66
68 67 def signature(obj):
69 68 '''Get a signature object for the passed callable.'''
70 69
71 70 if not callable(obj):
72 71 raise TypeError('{0!r} is not a callable object'.format(obj))
73 72
74 73 if isinstance(obj, types.MethodType):
75 74 if obj.__self__ is None:
76 75 # Unbound method - treat it as a function (no distinction in Py 3)
77 76 obj = obj.__func__
78 77 else:
79 78 # Bound method: trim off the first parameter (typically self or cls)
80 79 sig = signature(obj.__func__)
81 80 return sig.replace(parameters=tuple(sig.parameters.values())[1:])
82 81
83 82 try:
84 83 sig = obj.__signature__
85 84 except AttributeError:
86 85 pass
87 86 else:
88 87 if sig is not None:
89 88 return sig
90 89
91 90 try:
92 91 # Was this function wrapped by a decorator?
93 92 wrapped = obj.__wrapped__
94 93 except AttributeError:
95 94 pass
96 95 else:
97 96 return signature(wrapped)
98 97
99 98 if isinstance(obj, types.FunctionType):
100 99 return Signature.from_function(obj)
101 100
102 101 if isinstance(obj, functools.partial):
103 102 sig = signature(obj.func)
104 103
105 104 new_params = OrderedDict(sig.parameters.items())
106 105
107 106 partial_args = obj.args or ()
108 107 partial_keywords = obj.keywords or {}
109 108 try:
110 109 ba = sig.bind_partial(*partial_args, **partial_keywords)
111 110 except TypeError as ex:
112 111 msg = 'partial object {0!r} has incorrect arguments'.format(obj)
113 112 raise ValueError(msg)
114 113
115 114 for arg_name, arg_value in ba.arguments.items():
116 115 param = new_params[arg_name]
117 116 if arg_name in partial_keywords:
118 117 # We set a new default value, because the following code
119 118 # is correct:
120 119 #
121 120 # >>> def foo(a): print(a)
122 121 # >>> print(partial(partial(foo, a=10), a=20)())
123 122 # 20
124 123 # >>> print(partial(partial(foo, a=10), a=20)(a=30))
125 124 # 30
126 125 #
127 126 # So, with 'partial' objects, passing a keyword argument is
128 127 # like setting a new default value for the corresponding
129 128 # parameter
130 129 #
131 130 # We also mark this parameter with '_partial_kwarg'
132 131 # flag. Later, in '_bind', the 'default' value of this
133 132 # parameter will be added to 'kwargs', to simulate
134 133 # the 'functools.partial' real call.
135 134 new_params[arg_name] = param.replace(default=arg_value,
136 135 _partial_kwarg=True)
137 136
138 137 elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
139 138 not param._partial_kwarg):
140 139 new_params.pop(arg_name)
141 140
142 141 return sig.replace(parameters=new_params.values())
143 142
144 143 sig = None
145 144 if isinstance(obj, type):
146 145 # obj is a class or a metaclass
147 146
148 147 # First, let's see if it has an overloaded __call__ defined
149 148 # in its metaclass
150 149 call = _get_user_defined_method(type(obj), '__call__')
151 150 if call is not None:
152 151 sig = signature(call)
153 152 else:
154 153 # Now we check if the 'obj' class has a '__new__' method
155 154 new = _get_user_defined_method(obj, '__new__')
156 155 if new is not None:
157 156 sig = signature(new)
158 157 else:
159 158 # Finally, we should have at least __init__ implemented
160 159 init = _get_user_defined_method(obj, '__init__')
161 160 if init is not None:
162 161 sig = signature(init)
163 162 elif not isinstance(obj, _NonUserDefinedCallables):
164 163 # An object with __call__
165 164 # We also check that the 'obj' is not an instance of
166 165 # _WrapperDescriptor or _MethodWrapper to avoid
167 166 # infinite recursion (and even potential segfault)
168 167 call = _get_user_defined_method(type(obj), '__call__', 'im_func')
169 168 if call is not None:
170 169 sig = signature(call)
171 170
172 171 if sig is not None:
173 172 return sig
174 173
175 174 if isinstance(obj, types.BuiltinFunctionType):
176 175 # Raise a nicer error message for builtins
177 176 msg = 'no signature found for builtin function {0!r}'.format(obj)
178 177 raise ValueError(msg)
179 178
180 179 raise ValueError('callable {0!r} is not supported by signature'.format(obj))
181 180
182 181
183 182 class _void(object):
184 183 '''A private marker - used in Parameter & Signature'''
185 184
186 185
187 186 class _empty(object):
188 187 pass
189 188
190 189
191 190 class _ParameterKind(int):
192 191 def __new__(self, *args, **kwargs):
193 192 obj = int.__new__(self, *args)
194 193 obj._name = kwargs['name']
195 194 return obj
196 195
197 196 def __str__(self):
198 197 return self._name
199 198
200 199 def __repr__(self):
201 200 return '<_ParameterKind: {0!r}>'.format(self._name)
202 201
203 202
204 203 _POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
205 204 _POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
206 205 _VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
207 206 _KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
208 207 _VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
209 208
210 209
211 210 class Parameter(object):
212 211 '''Represents a parameter in a function signature.
213 212
214 213 Has the following public attributes:
215 214
216 215 * name : str
217 216 The name of the parameter as a string.
218 217 * default : object
219 218 The default value for the parameter if specified. If the
220 219 parameter has no default value, this attribute is not set.
221 220 * annotation
222 221 The annotation for the parameter if specified. If the
223 222 parameter has no annotation, this attribute is not set.
224 223 * kind : str
225 224 Describes how argument values are bound to the parameter.
226 225 Possible values: `Parameter.POSITIONAL_ONLY`,
227 226 `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
228 227 `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
229 228 '''
230 229
231 230 __slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
232 231
233 232 POSITIONAL_ONLY = _POSITIONAL_ONLY
234 233 POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
235 234 VAR_POSITIONAL = _VAR_POSITIONAL
236 235 KEYWORD_ONLY = _KEYWORD_ONLY
237 236 VAR_KEYWORD = _VAR_KEYWORD
238 237
239 238 empty = _empty
240 239
241 240 def __init__(self, name, kind, default=_empty, annotation=_empty,
242 241 _partial_kwarg=False):
243 242
244 243 if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
245 244 _VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
246 245 raise ValueError("invalid value for 'Parameter.kind' attribute")
247 246 self._kind = kind
248 247
249 248 if default is not _empty:
250 249 if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
251 250 msg = '{0} parameters cannot have default values'.format(kind)
252 251 raise ValueError(msg)
253 252 self._default = default
254 253 self._annotation = annotation
255 254
256 255 if name is None:
257 256 if kind != _POSITIONAL_ONLY:
258 257 raise ValueError("None is not a valid name for a "
259 258 "non-positional-only parameter")
260 259 self._name = name
261 260 else:
262 261 name = str(name)
263 262 if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
264 263 msg = '{0!r} is not a valid parameter name'.format(name)
265 264 raise ValueError(msg)
266 265 self._name = name
267 266
268 267 self._partial_kwarg = _partial_kwarg
269 268
270 269 @property
271 270 def name(self):
272 271 return self._name
273 272
274 273 @property
275 274 def default(self):
276 275 return self._default
277 276
278 277 @property
279 278 def annotation(self):
280 279 return self._annotation
281 280
282 281 @property
283 282 def kind(self):
284 283 return self._kind
285 284
286 285 def replace(self, name=_void, kind=_void, annotation=_void,
287 286 default=_void, _partial_kwarg=_void):
288 287 '''Creates a customized copy of the Parameter.'''
289 288
290 289 if name is _void:
291 290 name = self._name
292 291
293 292 if kind is _void:
294 293 kind = self._kind
295 294
296 295 if annotation is _void:
297 296 annotation = self._annotation
298 297
299 298 if default is _void:
300 299 default = self._default
301 300
302 301 if _partial_kwarg is _void:
303 302 _partial_kwarg = self._partial_kwarg
304 303
305 304 return type(self)(name, kind, default=default, annotation=annotation,
306 305 _partial_kwarg=_partial_kwarg)
307 306
308 307 def __str__(self):
309 308 kind = self.kind
310 309
311 310 formatted = self._name
312 311 if kind == _POSITIONAL_ONLY:
313 312 if formatted is None:
314 313 formatted = ''
315 314 formatted = '<{0}>'.format(formatted)
316 315
317 316 # Add annotation and default value
318 317 if self._annotation is not _empty:
319 318 formatted = '{0}:{1}'.format(formatted,
320 319 formatannotation(self._annotation))
321 320
322 321 if self._default is not _empty:
323 322 formatted = '{0}={1}'.format(formatted, repr(self._default))
324 323
325 324 if kind == _VAR_POSITIONAL:
326 325 formatted = '*' + formatted
327 326 elif kind == _VAR_KEYWORD:
328 327 formatted = '**' + formatted
329 328
330 329 return formatted
331 330
332 331 def __repr__(self):
333 332 return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
334 333 id(self), self.name)
335 334
336 335 def __hash__(self):
337 336 msg = "unhashable type: '{0}'".format(self.__class__.__name__)
338 337 raise TypeError(msg)
339 338
340 339 def __eq__(self, other):
341 340 return (issubclass(other.__class__, Parameter) and
342 341 self._name == other._name and
343 342 self._kind == other._kind and
344 343 self._default == other._default and
345 344 self._annotation == other._annotation)
346 345
347 346 def __ne__(self, other):
348 347 return not self.__eq__(other)
349 348
350 349
351 350 class BoundArguments(object):
352 351 '''Result of :meth:`Signature.bind` call. Holds the mapping of arguments
353 352 to the function's parameters.
354 353
355 354 Has the following public attributes:
356 355
357 356 arguments : :class:`collections.OrderedDict`
358 357 An ordered mutable mapping of parameters' names to arguments' values.
359 358 Does not contain arguments' default values.
360 359 signature : :class:`Signature`
361 360 The Signature object that created this instance.
362 361 args : tuple
363 362 Tuple of positional arguments values.
364 363 kwargs : dict
365 364 Dict of keyword arguments values.
366 365 '''
367 366
368 367 def __init__(self, signature, arguments):
369 368 self.arguments = arguments
370 369 self._signature = signature
371 370
372 371 @property
373 372 def signature(self):
374 373 return self._signature
375 374
376 375 @property
377 376 def args(self):
378 377 args = []
379 378 for param_name, param in self._signature.parameters.items():
380 379 if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
381 380 param._partial_kwarg):
382 381 # Keyword arguments mapped by 'functools.partial'
383 382 # (Parameter._partial_kwarg is True) are mapped
384 383 # in 'BoundArguments.kwargs', along with VAR_KEYWORD &
385 384 # KEYWORD_ONLY
386 385 break
387 386
388 387 try:
389 388 arg = self.arguments[param_name]
390 389 except KeyError:
391 390 # We're done here. Other arguments
392 391 # will be mapped in 'BoundArguments.kwargs'
393 392 break
394 393 else:
395 394 if param.kind == _VAR_POSITIONAL:
396 395 # *args
397 396 args.extend(arg)
398 397 else:
399 398 # plain argument
400 399 args.append(arg)
401 400
402 401 return tuple(args)
403 402
404 403 @property
405 404 def kwargs(self):
406 405 kwargs = {}
407 406 kwargs_started = False
408 407 for param_name, param in self._signature.parameters.items():
409 408 if not kwargs_started:
410 409 if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
411 410 param._partial_kwarg):
412 411 kwargs_started = True
413 412 else:
414 413 if param_name not in self.arguments:
415 414 kwargs_started = True
416 415 continue
417 416
418 417 if not kwargs_started:
419 418 continue
420 419
421 420 try:
422 421 arg = self.arguments[param_name]
423 422 except KeyError:
424 423 pass
425 424 else:
426 425 if param.kind == _VAR_KEYWORD:
427 426 # **kwargs
428 427 kwargs.update(arg)
429 428 else:
430 429 # plain keyword argument
431 430 kwargs[param_name] = arg
432 431
433 432 return kwargs
434 433
435 434 def __hash__(self):
436 435 msg = "unhashable type: '{0}'".format(self.__class__.__name__)
437 436 raise TypeError(msg)
438 437
439 438 def __eq__(self, other):
440 439 return (issubclass(other.__class__, BoundArguments) and
441 440 self.signature == other.signature and
442 441 self.arguments == other.arguments)
443 442
444 443 def __ne__(self, other):
445 444 return not self.__eq__(other)
446 445
447 446
448 447 class Signature(object):
449 448 '''A Signature object represents the overall signature of a function.
450 449 It stores a Parameter object for each parameter accepted by the
451 450 function, as well as information specific to the function itself.
452 451
453 452 A Signature object has the following public attributes:
454 453
455 454 parameters : :class:`collections.OrderedDict`
456 455 An ordered mapping of parameters' names to the corresponding
457 456 Parameter objects (keyword-only arguments are in the same order
458 457 as listed in `code.co_varnames`).
459 458 return_annotation
460 459 The annotation for the return type of the function if specified.
461 460 If the function has no annotation for its return type, this
462 461 attribute is not set.
463 462 '''
464 463
465 464 __slots__ = ('_return_annotation', '_parameters')
466 465
467 466 _parameter_cls = Parameter
468 467 _bound_arguments_cls = BoundArguments
469 468
470 469 empty = _empty
471 470
472 471 def __init__(self, parameters=None, return_annotation=_empty,
473 472 __validate_parameters__=True):
474 473 '''Constructs Signature from the given list of Parameter
475 474 objects and 'return_annotation'. All arguments are optional.
476 475 '''
477 476
478 477 if parameters is None:
479 478 params = OrderedDict()
480 479 else:
481 480 if __validate_parameters__:
482 481 params = OrderedDict()
483 482 top_kind = _POSITIONAL_ONLY
484 483
485 484 for idx, param in enumerate(parameters):
486 485 kind = param.kind
487 486 if kind < top_kind:
488 487 msg = 'wrong parameter order: {0} before {1}'
489 488 msg = msg.format(top_kind, param.kind)
490 489 raise ValueError(msg)
491 490 else:
492 491 top_kind = kind
493 492
494 493 name = param.name
495 494 if name is None:
496 495 name = str(idx)
497 496 param = param.replace(name=name)
498 497
499 498 if name in params:
500 499 msg = 'duplicate parameter name: {0!r}'.format(name)
501 500 raise ValueError(msg)
502 501 params[name] = param
503 502 else:
504 503 params = OrderedDict(((param.name, param)
505 504 for param in parameters))
506 505
507 506 self._parameters = params
508 507 self._return_annotation = return_annotation
509 508
510 509 @classmethod
511 510 def from_function(cls, func):
512 511 '''Constructs Signature for the given python function'''
513 512
514 513 if not isinstance(func, types.FunctionType):
515 514 raise TypeError('{0!r} is not a Python function'.format(func))
516 515
517 516 Parameter = cls._parameter_cls
518 517
519 518 # Parameter information.
520 519 func_code = func.__code__
521 520 pos_count = func_code.co_argcount
522 521 arg_names = func_code.co_varnames
523 522 positional = tuple(arg_names[:pos_count])
524 523 keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
525 524 keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
526 525 annotations = getattr(func, '__annotations__', {})
527 526 defaults = func.__defaults__
528 527 kwdefaults = getattr(func, '__kwdefaults__', None)
529 528
530 529 if defaults:
531 530 pos_default_count = len(defaults)
532 531 else:
533 532 pos_default_count = 0
534 533
535 534 parameters = []
536 535
537 536 # Non-keyword-only parameters w/o defaults.
538 537 non_default_count = pos_count - pos_default_count
539 538 for name in positional[:non_default_count]:
540 539 annotation = annotations.get(name, _empty)
541 540 parameters.append(Parameter(name, annotation=annotation,
542 541 kind=_POSITIONAL_OR_KEYWORD))
543 542
544 543 # ... w/ defaults.
545 544 for offset, name in enumerate(positional[non_default_count:]):
546 545 annotation = annotations.get(name, _empty)
547 546 parameters.append(Parameter(name, annotation=annotation,
548 547 kind=_POSITIONAL_OR_KEYWORD,
549 548 default=defaults[offset]))
550 549
551 550 # *args
552 551 if func_code.co_flags & 0x04:
553 552 name = arg_names[pos_count + keyword_only_count]
554 553 annotation = annotations.get(name, _empty)
555 554 parameters.append(Parameter(name, annotation=annotation,
556 555 kind=_VAR_POSITIONAL))
557 556
558 557 # Keyword-only parameters.
559 558 for name in keyword_only:
560 559 default = _empty
561 560 if kwdefaults is not None:
562 561 default = kwdefaults.get(name, _empty)
563 562
564 563 annotation = annotations.get(name, _empty)
565 564 parameters.append(Parameter(name, annotation=annotation,
566 565 kind=_KEYWORD_ONLY,
567 566 default=default))
568 567 # **kwargs
569 568 if func_code.co_flags & 0x08:
570 569 index = pos_count + keyword_only_count
571 570 if func_code.co_flags & 0x04:
572 571 index += 1
573 572
574 573 name = arg_names[index]
575 574 annotation = annotations.get(name, _empty)
576 575 parameters.append(Parameter(name, annotation=annotation,
577 576 kind=_VAR_KEYWORD))
578 577
579 578 return cls(parameters,
580 579 return_annotation=annotations.get('return', _empty),
581 580 __validate_parameters__=False)
582 581
583 582 @property
584 583 def parameters(self):
585 584 try:
586 585 return types.MappingProxyType(self._parameters)
587 586 except AttributeError:
588 587 return OrderedDict(self._parameters.items())
589 588
590 589 @property
591 590 def return_annotation(self):
592 591 return self._return_annotation
593 592
594 593 def replace(self, parameters=_void, return_annotation=_void):
595 594 '''Creates a customized copy of the Signature.
596 595 Pass 'parameters' and/or 'return_annotation' arguments
597 596 to override them in the new copy.
598 597 '''
599 598
600 599 if parameters is _void:
601 600 parameters = self.parameters.values()
602 601
603 602 if return_annotation is _void:
604 603 return_annotation = self._return_annotation
605 604
606 605 return type(self)(parameters,
607 606 return_annotation=return_annotation)
608 607
609 608 def __hash__(self):
610 609 msg = "unhashable type: '{0}'".format(self.__class__.__name__)
611 610 raise TypeError(msg)
612 611
613 612 def __eq__(self, other):
614 613 if (not issubclass(type(other), Signature) or
615 614 self.return_annotation != other.return_annotation or
616 615 len(self.parameters) != len(other.parameters)):
617 616 return False
618 617
619 618 other_positions = dict((param, idx)
620 619 for idx, param in enumerate(other.parameters.keys()))
621 620
622 621 for idx, (param_name, param) in enumerate(self.parameters.items()):
623 622 if param.kind == _KEYWORD_ONLY:
624 623 try:
625 624 other_param = other.parameters[param_name]
626 625 except KeyError:
627 626 return False
628 627 else:
629 628 if param != other_param:
630 629 return False
631 630 else:
632 631 try:
633 632 other_idx = other_positions[param_name]
634 633 except KeyError:
635 634 return False
636 635 else:
637 636 if (idx != other_idx or
638 637 param != other.parameters[param_name]):
639 638 return False
640 639
641 640 return True
642 641
643 642 def __ne__(self, other):
644 643 return not self.__eq__(other)
645 644
646 645 def _bind(self, args, kwargs, partial=False):
647 646 '''Private method. Don't use directly.'''
648 647
649 648 arguments = OrderedDict()
650 649
651 650 parameters = iter(self.parameters.values())
652 651 parameters_ex = ()
653 652 arg_vals = iter(args)
654 653
655 654 if partial:
656 655 # Support for binding arguments to 'functools.partial' objects.
657 656 # See 'functools.partial' case in 'signature()' implementation
658 657 # for details.
659 658 for param_name, param in self.parameters.items():
660 659 if (param._partial_kwarg and param_name not in kwargs):
661 660 # Simulating 'functools.partial' behavior
662 661 kwargs[param_name] = param.default
663 662
664 663 while True:
665 664 # Let's iterate through the positional arguments and corresponding
666 665 # parameters
667 666 try:
668 667 arg_val = next(arg_vals)
669 668 except StopIteration:
670 669 # No more positional arguments
671 670 try:
672 671 param = next(parameters)
673 672 except StopIteration:
674 673 # No more parameters. That's it. Just need to check that
675 674 # we have no `kwargs` after this while loop
676 675 break
677 676 else:
678 677 if param.kind == _VAR_POSITIONAL:
679 678 # That's OK, just empty *args. Let's start parsing
680 679 # kwargs
681 680 break
682 681 elif param.name in kwargs:
683 682 if param.kind == _POSITIONAL_ONLY:
684 683 msg = '{arg!r} parameter is positional only, ' \
685 684 'but was passed as a keyword'
686 685 msg = msg.format(arg=param.name)
687 686 raise TypeError(msg)
688 687 parameters_ex = (param,)
689 688 break
690 689 elif (param.kind == _VAR_KEYWORD or
691 690 param.default is not _empty):
692 691 # That's fine too - we have a default value for this
693 692 # parameter. So, lets start parsing `kwargs`, starting
694 693 # with the current parameter
695 694 parameters_ex = (param,)
696 695 break
697 696 else:
698 697 if partial:
699 698 parameters_ex = (param,)
700 699 break
701 700 else:
702 701 msg = '{arg!r} parameter lacking default value'
703 702 msg = msg.format(arg=param.name)
704 703 raise TypeError(msg)
705 704 else:
706 705 # We have a positional argument to process
707 706 try:
708 707 param = next(parameters)
709 708 except StopIteration:
710 709 raise TypeError('too many positional arguments')
711 710 else:
712 711 if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
713 712 # Looks like we have no parameter for this positional
714 713 # argument
715 714 raise TypeError('too many positional arguments')
716 715
717 716 if param.kind == _VAR_POSITIONAL:
718 717 # We have an '*args'-like argument, let's fill it with
719 718 # all positional arguments we have left and move on to
720 719 # the next phase
721 720 values = [arg_val]
722 721 values.extend(arg_vals)
723 722 arguments[param.name] = tuple(values)
724 723 break
725 724
726 725 if param.name in kwargs:
727 726 raise TypeError('multiple values for argument '
728 727 '{arg!r}'.format(arg=param.name))
729 728
730 729 arguments[param.name] = arg_val
731 730
732 731 # Now, we iterate through the remaining parameters to process
733 732 # keyword arguments
734 733 kwargs_param = None
735 734 for param in itertools.chain(parameters_ex, parameters):
736 735 if param.kind == _POSITIONAL_ONLY:
737 736 # This should never happen in case of a properly built
738 737 # Signature object (but let's have this check here
739 738 # to ensure correct behaviour just in case)
740 739 raise TypeError('{arg!r} parameter is positional only, '
741 740 'but was passed as a keyword'. \
742 741 format(arg=param.name))
743 742
744 743 if param.kind == _VAR_KEYWORD:
745 744 # Memorize that we have a '**kwargs'-like parameter
746 745 kwargs_param = param
747 746 continue
748 747
749 748 param_name = param.name
750 749 try:
751 750 arg_val = kwargs.pop(param_name)
752 751 except KeyError:
753 752 # We have no value for this parameter. It's fine though,
754 753 # if it has a default value, or it is an '*args'-like
755 754 # parameter, left alone by the processing of positional
756 755 # arguments.
757 756 if (not partial and param.kind != _VAR_POSITIONAL and
758 757 param.default is _empty):
759 758 raise TypeError('{arg!r} parameter lacking default value'. \
760 759 format(arg=param_name))
761 760
762 761 else:
763 762 arguments[param_name] = arg_val
764 763
765 764 if kwargs:
766 765 if kwargs_param is not None:
767 766 # Process our '**kwargs'-like parameter
768 767 arguments[kwargs_param.name] = kwargs
769 768 else:
770 769 raise TypeError('too many keyword arguments')
771 770
772 771 return self._bound_arguments_cls(self, arguments)
773 772
774 773 def bind(self, *args, **kwargs):
775 774 '''Get a :class:`BoundArguments` object, that maps the passed `args`
776 775 and `kwargs` to the function's signature. Raises :exc:`TypeError`
777 776 if the passed arguments can not be bound.
778 777 '''
779 778 return self._bind(args, kwargs)
780 779
781 780 def bind_partial(self, *args, **kwargs):
782 781 '''Get a :class:`BoundArguments` object, that partially maps the
783 782 passed `args` and `kwargs` to the function's signature.
784 783 Raises :exc:`TypeError` if the passed arguments can not be bound.
785 784 '''
786 785 return self._bind(args, kwargs, partial=True)
787 786
788 787 def __str__(self):
789 788 result = []
790 789 render_kw_only_separator = True
791 790 for idx, param in enumerate(self.parameters.values()):
792 791 formatted = str(param)
793 792
794 793 kind = param.kind
795 794 if kind == _VAR_POSITIONAL:
796 795 # OK, we have an '*args'-like parameter, so we won't need
797 796 # a '*' to separate keyword-only arguments
798 797 render_kw_only_separator = False
799 798 elif kind == _KEYWORD_ONLY and render_kw_only_separator:
800 799 # We have a keyword-only parameter to render and we haven't
801 800 # rendered an '*args'-like parameter before, so add a '*'
802 801 # separator to the parameters list ("foo(arg1, *, arg2)" case)
803 802 result.append('*')
804 803 # This condition should be only triggered once, so
805 804 # reset the flag
806 805 render_kw_only_separator = False
807 806
808 807 result.append(formatted)
809 808
810 809 rendered = '({0})'.format(', '.join(result))
811 810
812 811 if self.return_annotation is not _empty:
813 812 anno = formatannotation(self.return_annotation)
814 813 rendered += ' -> {0}'.format(anno)
815 814
816 815 return rendered
817 816
@@ -1,173 +1,172 b''
1 1 # encoding: utf-8
2 2 """IO capturing utilities."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function, absolute_import
8 7
9 8 import sys
10 9
11 10 from IPython.utils.py3compat import PY3
12 11
13 12 if PY3:
14 13 from io import StringIO
15 14 else:
16 15 from StringIO import StringIO
17 16
18 17 #-----------------------------------------------------------------------------
19 18 # Classes and functions
20 19 #-----------------------------------------------------------------------------
21 20
22 21
23 22 class RichOutput(object):
24 23 def __init__(self, data=None, metadata=None):
25 24 self.data = data or {}
26 25 self.metadata = metadata or {}
27 26
28 27 def display(self):
29 28 from IPython.display import publish_display_data
30 29 publish_display_data(data=self.data, metadata=self.metadata)
31 30
32 31 def _repr_mime_(self, mime):
33 32 if mime not in self.data:
34 33 return
35 34 data = self.data[mime]
36 35 if mime in self.metadata:
37 36 return data, self.metadata[mime]
38 37 else:
39 38 return data
40 39
41 40 def _repr_html_(self):
42 41 return self._repr_mime_("text/html")
43 42
44 43 def _repr_latex_(self):
45 44 return self._repr_mime_("text/latex")
46 45
47 46 def _repr_json_(self):
48 47 return self._repr_mime_("application/json")
49 48
50 49 def _repr_javascript_(self):
51 50 return self._repr_mime_("application/javascript")
52 51
53 52 def _repr_png_(self):
54 53 return self._repr_mime_("image/png")
55 54
56 55 def _repr_jpeg_(self):
57 56 return self._repr_mime_("image/jpeg")
58 57
59 58 def _repr_svg_(self):
60 59 return self._repr_mime_("image/svg+xml")
61 60
62 61
63 62 class CapturedIO(object):
64 63 """Simple object for containing captured stdout/err and rich display StringIO objects
65 64
66 65 Each instance `c` has three attributes:
67 66
68 67 - ``c.stdout`` : standard output as a string
69 68 - ``c.stderr`` : standard error as a string
70 69 - ``c.outputs``: a list of rich display outputs
71 70
72 71 Additionally, there's a ``c.show()`` method which will print all of the
73 72 above in the same order, and can be invoked simply via ``c()``.
74 73 """
75 74
76 75 def __init__(self, stdout, stderr, outputs=None):
77 76 self._stdout = stdout
78 77 self._stderr = stderr
79 78 if outputs is None:
80 79 outputs = []
81 80 self._outputs = outputs
82 81
83 82 def __str__(self):
84 83 return self.stdout
85 84
86 85 @property
87 86 def stdout(self):
88 87 "Captured standard output"
89 88 if not self._stdout:
90 89 return ''
91 90 return self._stdout.getvalue()
92 91
93 92 @property
94 93 def stderr(self):
95 94 "Captured standard error"
96 95 if not self._stderr:
97 96 return ''
98 97 return self._stderr.getvalue()
99 98
100 99 @property
101 100 def outputs(self):
102 101 """A list of the captured rich display outputs, if any.
103 102
104 103 If you have a CapturedIO object ``c``, these can be displayed in IPython
105 104 using::
106 105
107 106 from IPython.display import display
108 107 for o in c.outputs:
109 108 display(o)
110 109 """
111 110 return [ RichOutput(d, md) for d, md in self._outputs ]
112 111
113 112 def show(self):
114 113 """write my output to sys.stdout/err as appropriate"""
115 114 sys.stdout.write(self.stdout)
116 115 sys.stderr.write(self.stderr)
117 116 sys.stdout.flush()
118 117 sys.stderr.flush()
119 118 for data, metadata in self._outputs:
120 119 RichOutput(data, metadata).display()
121 120
122 121 __call__ = show
123 122
124 123
125 124 class capture_output(object):
126 125 """context manager for capturing stdout/err"""
127 126 stdout = True
128 127 stderr = True
129 128 display = True
130 129
131 130 def __init__(self, stdout=True, stderr=True, display=True):
132 131 self.stdout = stdout
133 132 self.stderr = stderr
134 133 self.display = display
135 134 self.shell = None
136 135
137 136 def __enter__(self):
138 137 from IPython.core.getipython import get_ipython
139 138 from IPython.core.displaypub import CapturingDisplayPublisher
140 139 from IPython.core.displayhook import CapturingDisplayHook
141 140
142 141 self.sys_stdout = sys.stdout
143 142 self.sys_stderr = sys.stderr
144 143
145 144 if self.display:
146 145 self.shell = get_ipython()
147 146 if self.shell is None:
148 147 self.save_display_pub = None
149 148 self.display = False
150 149
151 150 stdout = stderr = outputs = None
152 151 if self.stdout:
153 152 stdout = sys.stdout = StringIO()
154 153 if self.stderr:
155 154 stderr = sys.stderr = StringIO()
156 155 if self.display:
157 156 self.save_display_pub = self.shell.display_pub
158 157 self.shell.display_pub = CapturingDisplayPublisher()
159 158 outputs = self.shell.display_pub.outputs
160 159 self.save_display_hook = sys.displayhook
161 160 sys.displayhook = CapturingDisplayHook(shell=self.shell,
162 161 outputs=outputs)
163 162
164 163 return CapturedIO(stdout, stderr, outputs)
165 164
166 165 def __exit__(self, exc_type, exc_value, traceback):
167 166 sys.stdout = self.sys_stdout
168 167 sys.stderr = self.sys_stderr
169 168 if self.display and self.shell:
170 169 self.shell.display_pub = self.save_display_pub
171 170 sys.displayhook = self.save_display_hook
172 171
173 172
@@ -1,26 +1,25 b''
1 1 #*****************************************************************************
2 2 # Copyright (C) 2016 The IPython Team <ipython-dev@scipy.org>
3 3 #
4 4 # Distributed under the terms of the BSD License. The full license is in
5 5 # the file COPYING, distributed as part of this software.
6 6 #*****************************************************************************
7 from __future__ import absolute_import
8 7
9 8 """
10 9 Color managing related utilities
11 10 """
12 11
13 12 import pygments
14 13
15 14 from traitlets.config import Configurable
16 15 from traitlets import Unicode
17 16
18 17
19 18 available_themes = lambda : [s for s in pygments.styles.get_all_styles()]+['NoColor','LightBG','Linux', 'Neutral']
20 19
21 20 class Colorable(Configurable):
22 21 """
23 22 A subclass of configurable for all the classes that have a `default_scheme`
24 23 """
25 24 default_style=Unicode('LightBG').tag(config=True)
26 25
@@ -1,7 +1,6 b''
1 from __future__ import absolute_import
2 1
3 2 from warnings import warn
4 3
5 4 warn("IPython.utils.eventful has moved to traitlets.eventful")
6 5
7 6 from traitlets.eventful import *
@@ -1,98 +1,97 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for working with stack frames.
4 4 """
5 from __future__ import print_function
6 5
7 6 #-----------------------------------------------------------------------------
8 7 # Copyright (C) 2008-2011 The IPython Development Team
9 8 #
10 9 # Distributed under the terms of the BSD License. The full license is in
11 10 # the file COPYING, distributed as part of this software.
12 11 #-----------------------------------------------------------------------------
13 12
14 13 #-----------------------------------------------------------------------------
15 14 # Imports
16 15 #-----------------------------------------------------------------------------
17 16
18 17 import sys
19 18 from IPython.utils import py3compat
20 19
21 20 #-----------------------------------------------------------------------------
22 21 # Code
23 22 #-----------------------------------------------------------------------------
24 23
25 24 @py3compat.doctest_refactor_print
26 25 def extract_vars(*names,**kw):
27 26 """Extract a set of variables by name from another frame.
28 27
29 28 Parameters
30 29 ----------
31 30 *names : str
32 31 One or more variable names which will be extracted from the caller's
33 32 frame.
34 33
35 34 depth : integer, optional
36 35 How many frames in the stack to walk when looking for your variables.
37 36 The default is 0, which will use the frame where the call was made.
38 37
39 38
40 39 Examples
41 40 --------
42 41 ::
43 42
44 43 In [2]: def func(x):
45 44 ...: y = 1
46 45 ...: print(sorted(extract_vars('x','y').items()))
47 46 ...:
48 47
49 48 In [3]: func('hello')
50 49 [('x', 'hello'), ('y', 1)]
51 50 """
52 51
53 52 depth = kw.get('depth',0)
54 53
55 54 callerNS = sys._getframe(depth+1).f_locals
56 55 return dict((k,callerNS[k]) for k in names)
57 56
58 57
59 58 def extract_vars_above(*names):
60 59 """Extract a set of variables by name from another frame.
61 60
62 61 Similar to extractVars(), but with a specified depth of 1, so that names
63 62 are exctracted exactly from above the caller.
64 63
65 64 This is simply a convenience function so that the very common case (for us)
66 65 of skipping exactly 1 frame doesn't have to construct a special dict for
67 66 keyword passing."""
68 67
69 68 callerNS = sys._getframe(2).f_locals
70 69 return dict((k,callerNS[k]) for k in names)
71 70
72 71
73 72 def debugx(expr,pre_msg=''):
74 73 """Print the value of an expression from the caller's frame.
75 74
76 75 Takes an expression, evaluates it in the caller's frame and prints both
77 76 the given expression and the resulting value (as well as a debug mark
78 77 indicating the name of the calling function. The input must be of a form
79 78 suitable for eval().
80 79
81 80 An optional message can be passed, which will be prepended to the printed
82 81 expr->value pair."""
83 82
84 83 cf = sys._getframe(1)
85 84 print('[DBG:%s] %s%s -> %r' % (cf.f_code.co_name,pre_msg,expr,
86 85 eval(expr,cf.f_globals,cf.f_locals)))
87 86
88 87
89 88 # deactivate it by uncommenting the following line, which makes it a no-op
90 89 #def debugx(expr,pre_msg=''): pass
91 90
92 91 def extract_module_locals(depth=0):
93 92 """Returns (module, locals) of the function `depth` frames away from the caller"""
94 93 f = sys._getframe(depth + 1)
95 94 global_ns = f.f_globals
96 95 module = sys.modules[global_ns['__name__']]
97 96 return (module, f.f_locals)
98 97
@@ -1,241 +1,239 b''
1 1 # encoding: utf-8
2 2 """
3 3 IO related utilities.
4 4 """
5 5
6 6 # Copyright (c) IPython Development Team.
7 7 # Distributed under the terms of the Modified BSD License.
8 8
9 from __future__ import print_function
10 from __future__ import absolute_import
11 9
12 10
13 11 import atexit
14 12 import os
15 13 import sys
16 14 import tempfile
17 15 import warnings
18 16 from warnings import warn
19 17
20 18 from IPython.utils.decorators import undoc
21 19 from .capture import CapturedIO, capture_output
22 20 from .py3compat import string_types, input, PY3
23 21
24 22 @undoc
25 23 class IOStream:
26 24
27 25 def __init__(self, stream, fallback=None):
28 26 warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
29 27 DeprecationWarning, stacklevel=2)
30 28 if not hasattr(stream,'write') or not hasattr(stream,'flush'):
31 29 if fallback is not None:
32 30 stream = fallback
33 31 else:
34 32 raise ValueError("fallback required, but not specified")
35 33 self.stream = stream
36 34 self._swrite = stream.write
37 35
38 36 # clone all methods not overridden:
39 37 def clone(meth):
40 38 return not hasattr(self, meth) and not meth.startswith('_')
41 39 for meth in filter(clone, dir(stream)):
42 40 setattr(self, meth, getattr(stream, meth))
43 41
44 42 def __repr__(self):
45 43 cls = self.__class__
46 44 tpl = '{mod}.{cls}({args})'
47 45 return tpl.format(mod=cls.__module__, cls=cls.__name__, args=self.stream)
48 46
49 47 def write(self,data):
50 48 warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
51 49 DeprecationWarning, stacklevel=2)
52 50 try:
53 51 self._swrite(data)
54 52 except:
55 53 try:
56 54 # print handles some unicode issues which may trip a plain
57 55 # write() call. Emulate write() by using an empty end
58 56 # argument.
59 57 print(data, end='', file=self.stream)
60 58 except:
61 59 # if we get here, something is seriously broken.
62 60 print('ERROR - failed to write data to stream:', self.stream,
63 61 file=sys.stderr)
64 62
65 63 def writelines(self, lines):
66 64 warn('IOStream is deprecated since IPython 5.0, use sys.{stdin,stdout,stderr} instead',
67 65 DeprecationWarning, stacklevel=2)
68 66 if isinstance(lines, string_types):
69 67 lines = [lines]
70 68 for line in lines:
71 69 self.write(line)
72 70
73 71 # This class used to have a writeln method, but regular files and streams
74 72 # in Python don't have this method. We need to keep this completely
75 73 # compatible so we removed it.
76 74
77 75 @property
78 76 def closed(self):
79 77 return self.stream.closed
80 78
81 79 def close(self):
82 80 pass
83 81
84 82 # setup stdin/stdout/stderr to sys.stdin/sys.stdout/sys.stderr
85 83 devnull = open(os.devnull, 'w')
86 84 atexit.register(devnull.close)
87 85
88 86 # io.std* are deprecated, but don't show our own deprecation warnings
89 87 # during initialization of the deprecated API.
90 88 with warnings.catch_warnings():
91 89 warnings.simplefilter('ignore', DeprecationWarning)
92 90 stdin = IOStream(sys.stdin, fallback=devnull)
93 91 stdout = IOStream(sys.stdout, fallback=devnull)
94 92 stderr = IOStream(sys.stderr, fallback=devnull)
95 93
96 94 class Tee(object):
97 95 """A class to duplicate an output stream to stdout/err.
98 96
99 97 This works in a manner very similar to the Unix 'tee' command.
100 98
101 99 When the object is closed or deleted, it closes the original file given to
102 100 it for duplication.
103 101 """
104 102 # Inspired by:
105 103 # http://mail.python.org/pipermail/python-list/2007-May/442737.html
106 104
107 105 def __init__(self, file_or_name, mode="w", channel='stdout'):
108 106 """Construct a new Tee object.
109 107
110 108 Parameters
111 109 ----------
112 110 file_or_name : filename or open filehandle (writable)
113 111 File that will be duplicated
114 112
115 113 mode : optional, valid mode for open().
116 114 If a filename was give, open with this mode.
117 115
118 116 channel : str, one of ['stdout', 'stderr']
119 117 """
120 118 if channel not in ['stdout', 'stderr']:
121 119 raise ValueError('Invalid channel spec %s' % channel)
122 120
123 121 if hasattr(file_or_name, 'write') and hasattr(file_or_name, 'seek'):
124 122 self.file = file_or_name
125 123 else:
126 124 self.file = open(file_or_name, mode)
127 125 self.channel = channel
128 126 self.ostream = getattr(sys, channel)
129 127 setattr(sys, channel, self)
130 128 self._closed = False
131 129
132 130 def close(self):
133 131 """Close the file and restore the channel."""
134 132 self.flush()
135 133 setattr(sys, self.channel, self.ostream)
136 134 self.file.close()
137 135 self._closed = True
138 136
139 137 def write(self, data):
140 138 """Write data to both channels."""
141 139 self.file.write(data)
142 140 self.ostream.write(data)
143 141 self.ostream.flush()
144 142
145 143 def flush(self):
146 144 """Flush both channels."""
147 145 self.file.flush()
148 146 self.ostream.flush()
149 147
150 148 def __del__(self):
151 149 if not self._closed:
152 150 self.close()
153 151
154 152
155 153 def ask_yes_no(prompt, default=None, interrupt=None):
156 154 """Asks a question and returns a boolean (y/n) answer.
157 155
158 156 If default is given (one of 'y','n'), it is used if the user input is
159 157 empty. If interrupt is given (one of 'y','n'), it is used if the user
160 158 presses Ctrl-C. Otherwise the question is repeated until an answer is
161 159 given.
162 160
163 161 An EOF is treated as the default answer. If there is no default, an
164 162 exception is raised to prevent infinite loops.
165 163
166 164 Valid answers are: y/yes/n/no (match is not case sensitive)."""
167 165
168 166 answers = {'y':True,'n':False,'yes':True,'no':False}
169 167 ans = None
170 168 while ans not in answers.keys():
171 169 try:
172 170 ans = input(prompt+' ').lower()
173 171 if not ans: # response was an empty string
174 172 ans = default
175 173 except KeyboardInterrupt:
176 174 if interrupt:
177 175 ans = interrupt
178 176 except EOFError:
179 177 if default in answers.keys():
180 178 ans = default
181 179 print()
182 180 else:
183 181 raise
184 182
185 183 return answers[ans]
186 184
187 185
188 186 def temp_pyfile(src, ext='.py'):
189 187 """Make a temporary python file, return filename and filehandle.
190 188
191 189 Parameters
192 190 ----------
193 191 src : string or list of strings (no need for ending newlines if list)
194 192 Source code to be written to the file.
195 193
196 194 ext : optional, string
197 195 Extension for the generated file.
198 196
199 197 Returns
200 198 -------
201 199 (filename, open filehandle)
202 200 It is the caller's responsibility to close the open file and unlink it.
203 201 """
204 202 fname = tempfile.mkstemp(ext)[1]
205 203 f = open(fname,'w')
206 204 f.write(src)
207 205 f.flush()
208 206 return fname, f
209 207
210 208 def atomic_writing(*args, **kwargs):
211 209 """DEPRECATED: moved to notebook.services.contents.fileio"""
212 210 warn("IPython.utils.io.atomic_writing has moved to notebook.services.contents.fileio")
213 211 from notebook.services.contents.fileio import atomic_writing
214 212 return atomic_writing(*args, **kwargs)
215 213
216 214 def raw_print(*args, **kw):
217 215 """Raw print to sys.__stdout__, otherwise identical interface to print()."""
218 216
219 217 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
220 218 file=sys.__stdout__)
221 219 sys.__stdout__.flush()
222 220
223 221
224 222 def raw_print_err(*args, **kw):
225 223 """Raw print to sys.__stderr__, otherwise identical interface to print()."""
226 224
227 225 print(*args, sep=kw.get('sep', ' '), end=kw.get('end', '\n'),
228 226 file=sys.__stderr__)
229 227 sys.__stderr__.flush()
230 228
231 229
232 230 # Short aliases for quick debugging, do NOT use these in production code.
233 231 rprint = raw_print
234 232 rprinte = raw_print_err
235 233
236 234
237 235 def unicode_std_stream(stream='stdout'):
238 236 """DEPRECATED, moved to nbconvert.utils.io"""
239 237 warn("IPython.utils.io.unicode_std_stream has moved to nbconvert.utils.io")
240 238 from nbconvert.utils.io import unicode_std_stream
241 239 return unicode_std_stream(stream)
@@ -1,7 +1,6 b''
1 from __future__ import absolute_import
2 1
3 2 from warnings import warn
4 3
5 4 warn("IPython.utils.log has moved to traitlets.log")
6 5
7 6 from traitlets.log import *
@@ -1,125 +1,124 b''
1 1 """Utility functions for finding modules
2 2
3 3 Utility functions for finding modules on sys.path.
4 4
5 5 `find_mod` finds named module on sys.path.
6 6
7 7 `get_init` helper function that finds __init__ file in a directory.
8 8
9 9 `find_module` variant of imp.find_module in std_lib that only returns
10 10 path to module and not an open file object as well.
11 11
12 12
13 13
14 14 """
15 15 #-----------------------------------------------------------------------------
16 16 # Copyright (c) 2011, the IPython Development Team.
17 17 #
18 18 # Distributed under the terms of the Modified BSD License.
19 19 #
20 20 # The full license is in the file COPYING.txt, distributed with this software.
21 21 #-----------------------------------------------------------------------------
22 22
23 23 #-----------------------------------------------------------------------------
24 24 # Imports
25 25 #-----------------------------------------------------------------------------
26 from __future__ import print_function
27 26
28 27 # Stdlib imports
29 28 import imp
30 29 import os
31 30
32 31 # Third-party imports
33 32
34 33 # Our own imports
35 34
36 35
37 36 #-----------------------------------------------------------------------------
38 37 # Globals and constants
39 38 #-----------------------------------------------------------------------------
40 39
41 40 #-----------------------------------------------------------------------------
42 41 # Local utilities
43 42 #-----------------------------------------------------------------------------
44 43
45 44 #-----------------------------------------------------------------------------
46 45 # Classes and functions
47 46 #-----------------------------------------------------------------------------
48 47 def find_module(name, path=None):
49 48 """imp.find_module variant that only return path of module.
50 49
51 50 The `imp.find_module` returns a filehandle that we are not interested in.
52 51 Also we ignore any bytecode files that `imp.find_module` finds.
53 52
54 53 Parameters
55 54 ----------
56 55 name : str
57 56 name of module to locate
58 57 path : list of str
59 58 list of paths to search for `name`. If path=None then search sys.path
60 59
61 60 Returns
62 61 -------
63 62 filename : str
64 63 Return full path of module or None if module is missing or does not have
65 64 .py or .pyw extension
66 65 """
67 66 if name is None:
68 67 return None
69 68 try:
70 69 file, filename, _ = imp.find_module(name, path)
71 70 except ImportError:
72 71 return None
73 72 if file is None:
74 73 return filename
75 74 else:
76 75 file.close()
77 76 if os.path.splitext(filename)[1] in [".py", ".pyc"]:
78 77 return filename
79 78 else:
80 79 return None
81 80
82 81 def get_init(dirname):
83 82 """Get __init__ file path for module directory
84 83
85 84 Parameters
86 85 ----------
87 86 dirname : str
88 87 Find the __init__ file in directory `dirname`
89 88
90 89 Returns
91 90 -------
92 91 init_path : str
93 92 Path to __init__ file
94 93 """
95 94 fbase = os.path.join(dirname, "__init__")
96 95 for ext in [".py", ".pyw"]:
97 96 fname = fbase + ext
98 97 if os.path.isfile(fname):
99 98 return fname
100 99
101 100
102 101 def find_mod(module_name):
103 102 """Find module `module_name` on sys.path
104 103
105 104 Return the path to module `module_name`. If `module_name` refers to
106 105 a module directory then return path to __init__ file. Return full
107 106 path of module or None if module is missing or does not have .py or .pyw
108 107 extension. We are not interested in running bytecode.
109 108
110 109 Parameters
111 110 ----------
112 111 module_name : str
113 112
114 113 Returns
115 114 -------
116 115 modulepath : str
117 116 Path to module `module_name`.
118 117 """
119 118 parts = module_name.split(".")
120 119 basepath = find_module(parts[0])
121 120 for submodname in parts[1:]:
122 121 basepath = find_module(submodname, [basepath])
123 122 if basepath and os.path.isdir(basepath):
124 123 basepath = get_init(basepath)
125 124 return basepath
@@ -1,249 +1,248 b''
1 1 """
2 2 Tools to open .py files as Unicode, using the encoding specified within the file,
3 3 as per PEP 263.
4 4
5 5 Much of the code is taken from the tokenize module in Python 3.2.
6 6 """
7 from __future__ import absolute_import
8 7
9 8 import io
10 9 from io import TextIOWrapper, BytesIO
11 10 import os.path
12 11 import re
13 12
14 13 from .py3compat import unicode_type
15 14
16 15 cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
17 16 cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
18 17
19 18 try:
20 19 # Available in Python 3
21 20 from tokenize import detect_encoding
22 21 except ImportError:
23 22 from codecs import lookup, BOM_UTF8
24 23
25 24 # Copied from Python 3.2 tokenize
26 25 def _get_normal_name(orig_enc):
27 26 """Imitates get_normal_name in tokenizer.c."""
28 27 # Only care about the first 12 characters.
29 28 enc = orig_enc[:12].lower().replace("_", "-")
30 29 if enc == "utf-8" or enc.startswith("utf-8-"):
31 30 return "utf-8"
32 31 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
33 32 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
34 33 return "iso-8859-1"
35 34 return orig_enc
36 35
37 36 # Copied from Python 3.2 tokenize
38 37 def detect_encoding(readline):
39 38 """
40 39 The detect_encoding() function is used to detect the encoding that should
41 40 be used to decode a Python source file. It requires one argment, readline,
42 41 in the same way as the tokenize() generator.
43 42
44 43 It will call readline a maximum of twice, and return the encoding used
45 44 (as a string) and a list of any lines (left as bytes) it has read in.
46 45
47 46 It detects the encoding from the presence of a utf-8 bom or an encoding
48 47 cookie as specified in pep-0263. If both a bom and a cookie are present,
49 48 but disagree, a SyntaxError will be raised. If the encoding cookie is an
50 49 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
51 50 'utf-8-sig' is returned.
52 51
53 52 If no encoding is specified, then the default of 'utf-8' will be returned.
54 53 """
55 54 bom_found = False
56 55 encoding = None
57 56 default = 'utf-8'
58 57 def read_or_stop():
59 58 try:
60 59 return readline()
61 60 except StopIteration:
62 61 return b''
63 62
64 63 def find_cookie(line):
65 64 try:
66 65 line_string = line.decode('ascii')
67 66 except UnicodeDecodeError:
68 67 return None
69 68
70 69 matches = cookie_re.findall(line_string)
71 70 if not matches:
72 71 return None
73 72 encoding = _get_normal_name(matches[0])
74 73 try:
75 74 codec = lookup(encoding)
76 75 except LookupError:
77 76 # This behaviour mimics the Python interpreter
78 77 raise SyntaxError("unknown encoding: " + encoding)
79 78
80 79 if bom_found:
81 80 if codec.name != 'utf-8':
82 81 # This behaviour mimics the Python interpreter
83 82 raise SyntaxError('encoding problem: utf-8')
84 83 encoding += '-sig'
85 84 return encoding
86 85
87 86 first = read_or_stop()
88 87 if first.startswith(BOM_UTF8):
89 88 bom_found = True
90 89 first = first[3:]
91 90 default = 'utf-8-sig'
92 91 if not first:
93 92 return default, []
94 93
95 94 encoding = find_cookie(first)
96 95 if encoding:
97 96 return encoding, [first]
98 97
99 98 second = read_or_stop()
100 99 if not second:
101 100 return default, [first]
102 101
103 102 encoding = find_cookie(second)
104 103 if encoding:
105 104 return encoding, [first, second]
106 105
107 106 return default, [first, second]
108 107
109 108 try:
110 109 # Available in Python 3.2 and above.
111 110 from tokenize import open
112 111 except ImportError:
113 112 # Copied from Python 3.2 tokenize
114 113 def open(filename):
115 114 """Open a file in read only mode using the encoding detected by
116 115 detect_encoding().
117 116 """
118 117 buffer = io.open(filename, 'rb') # Tweaked to use io.open for Python 2
119 118 encoding, lines = detect_encoding(buffer.readline)
120 119 buffer.seek(0)
121 120 text = TextIOWrapper(buffer, encoding, line_buffering=True)
122 121 text.mode = 'r'
123 122 return text
124 123
125 124 def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
126 125 """Converts a bytes string with python source code to unicode.
127 126
128 127 Unicode strings are passed through unchanged. Byte strings are checked
129 128 for the python source file encoding cookie to determine encoding.
130 129 txt can be either a bytes buffer or a string containing the source
131 130 code.
132 131 """
133 132 if isinstance(txt, unicode_type):
134 133 return txt
135 134 if isinstance(txt, bytes):
136 135 buffer = BytesIO(txt)
137 136 else:
138 137 buffer = txt
139 138 try:
140 139 encoding, _ = detect_encoding(buffer.readline)
141 140 except SyntaxError:
142 141 encoding = "ascii"
143 142 buffer.seek(0)
144 143 text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
145 144 text.mode = 'r'
146 145 if skip_encoding_cookie:
147 146 return u"".join(strip_encoding_cookie(text))
148 147 else:
149 148 return text.read()
150 149
151 150 def strip_encoding_cookie(filelike):
152 151 """Generator to pull lines from a text-mode file, skipping the encoding
153 152 cookie if it is found in the first two lines.
154 153 """
155 154 it = iter(filelike)
156 155 try:
157 156 first = next(it)
158 157 if not cookie_comment_re.match(first):
159 158 yield first
160 159 second = next(it)
161 160 if not cookie_comment_re.match(second):
162 161 yield second
163 162 except StopIteration:
164 163 return
165 164
166 165 for line in it:
167 166 yield line
168 167
169 168 def read_py_file(filename, skip_encoding_cookie=True):
170 169 """Read a Python file, using the encoding declared inside the file.
171 170
172 171 Parameters
173 172 ----------
174 173 filename : str
175 174 The path to the file to read.
176 175 skip_encoding_cookie : bool
177 176 If True (the default), and the encoding declaration is found in the first
178 177 two lines, that line will be excluded from the output - compiling a
179 178 unicode string with an encoding declaration is a SyntaxError in Python 2.
180 179
181 180 Returns
182 181 -------
183 182 A unicode string containing the contents of the file.
184 183 """
185 184 with open(filename) as f: # the open function defined in this module.
186 185 if skip_encoding_cookie:
187 186 return "".join(strip_encoding_cookie(f))
188 187 else:
189 188 return f.read()
190 189
191 190 def read_py_url(url, errors='replace', skip_encoding_cookie=True):
192 191 """Read a Python file from a URL, using the encoding declared inside the file.
193 192
194 193 Parameters
195 194 ----------
196 195 url : str
197 196 The URL from which to fetch the file.
198 197 errors : str
199 198 How to handle decoding errors in the file. Options are the same as for
200 199 bytes.decode(), but here 'replace' is the default.
201 200 skip_encoding_cookie : bool
202 201 If True (the default), and the encoding declaration is found in the first
203 202 two lines, that line will be excluded from the output - compiling a
204 203 unicode string with an encoding declaration is a SyntaxError in Python 2.
205 204
206 205 Returns
207 206 -------
208 207 A unicode string containing the contents of the file.
209 208 """
210 209 # Deferred import for faster start
211 210 try:
212 211 from urllib.request import urlopen # Py 3
213 212 except ImportError:
214 213 from urllib import urlopen
215 214 response = urlopen(url)
216 215 buffer = io.BytesIO(response.read())
217 216 return source_to_unicode(buffer, errors, skip_encoding_cookie)
218 217
219 218 def _list_readline(x):
220 219 """Given a list, returns a readline() function that returns the next element
221 220 with each call.
222 221 """
223 222 x = iter(x)
224 223 def readline():
225 224 return next(x)
226 225 return readline
227 226
228 227 # Code for going between .py files and cached .pyc files ----------------------
229 228
230 229 try: # Python 3.2, see PEP 3147
231 230 try:
232 231 from importlib.util import source_from_cache, cache_from_source
233 232 except ImportError :
234 233 ## deprecated since 3.4
235 234 from imp import source_from_cache, cache_from_source
236 235 except ImportError:
237 236 # Python <= 3.1: .pyc files go next to .py
238 237 def source_from_cache(path):
239 238 basename, ext = os.path.splitext(path)
240 239 if ext not in ('.pyc', '.pyo'):
241 240 raise ValueError('Not a cached Python file extension', ext)
242 241 # Should we look for .pyw files?
243 242 return basename + '.py'
244 243
245 244 def cache_from_source(path, debug_override=None):
246 245 if debug_override is None:
247 246 debug_override = __debug__
248 247 basename, ext = os.path.splitext(path)
249 248 return basename + '.pyc' if debug_override else '.pyo'
@@ -1,70 +1,69 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for working with external processes.
4 4 """
5 5
6 6 # Copyright (c) IPython Development Team.
7 7 # Distributed under the terms of the Modified BSD License.
8 8
9 from __future__ import print_function
10 9
11 10 import os
12 11 import sys
13 12
14 13 if sys.platform == 'win32':
15 14 from ._process_win32 import system, getoutput, arg_split, check_pid
16 15 elif sys.platform == 'cli':
17 16 from ._process_cli import system, getoutput, arg_split, check_pid
18 17 else:
19 18 from ._process_posix import system, getoutput, arg_split, check_pid
20 19
21 20 from ._process_common import getoutputerror, get_output_error_code, process_handler
22 21 from . import py3compat
23 22
24 23
25 24 class FindCmdError(Exception):
26 25 pass
27 26
28 27
29 28 def find_cmd(cmd):
30 29 """Find absolute path to executable cmd in a cross platform manner.
31 30
32 31 This function tries to determine the full path to a command line program
33 32 using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
34 33 time it will use the version that is first on the users `PATH`.
35 34
36 35 Warning, don't use this to find IPython command line programs as there
37 36 is a risk you will find the wrong one. Instead find those using the
38 37 following code and looking for the application itself::
39 38
40 39 import sys
41 40 argv = [sys.executable, '-m', 'IPython']
42 41
43 42 Parameters
44 43 ----------
45 44 cmd : str
46 45 The command line program to look for.
47 46 """
48 47 path = py3compat.which(cmd)
49 48 if path is None:
50 49 raise FindCmdError('command could not be found: %s' % cmd)
51 50 return path
52 51
53 52
54 53 def abbrev_cwd():
55 54 """ Return abbreviated version of cwd, e.g. d:mydir """
56 55 cwd = py3compat.getcwd().replace('\\','/')
57 56 drivepart = ''
58 57 tail = cwd
59 58 if sys.platform == 'win32':
60 59 if len(cwd) < 4:
61 60 return cwd
62 61 drivepart,tail = os.path.splitdrive(cwd)
63 62
64 63
65 64 parts = tail.split('/')
66 65 if len(parts) > 2:
67 66 tail = '/'.join(parts[-2:])
68 67
69 68 return (drivepart + (
70 69 cwd == '/' and '/' or tail))
@@ -1,145 +1,144 b''
1 1 """TemporaryDirectory class, copied from Python 3.2.
2 2
3 3 This is copied from the stdlib and will be standard in Python 3.2 and onwards.
4 4 """
5 from __future__ import print_function
6 5
7 6 import os as _os
8 7 import warnings as _warnings
9 8 import sys as _sys
10 9
11 10 # This code should only be used in Python versions < 3.2, since after that we
12 11 # can rely on the stdlib itself.
13 12 try:
14 13 from tempfile import TemporaryDirectory
15 14
16 15 except ImportError:
17 16 from tempfile import mkdtemp, template
18 17
19 18 class TemporaryDirectory(object):
20 19 """Create and return a temporary directory. This has the same
21 20 behavior as mkdtemp but can be used as a context manager. For
22 21 example:
23 22
24 23 with TemporaryDirectory() as tmpdir:
25 24 ...
26 25
27 26 Upon exiting the context, the directory and everthing contained
28 27 in it are removed.
29 28 """
30 29
31 30 def __init__(self, suffix="", prefix=template, dir=None):
32 31 self.name = mkdtemp(suffix, prefix, dir)
33 32 self._closed = False
34 33
35 34 def __enter__(self):
36 35 return self.name
37 36
38 37 def cleanup(self, _warn=False):
39 38 if self.name and not self._closed:
40 39 try:
41 40 self._rmtree(self.name)
42 41 except (TypeError, AttributeError) as ex:
43 42 # Issue #10188: Emit a warning on stderr
44 43 # if the directory could not be cleaned
45 44 # up due to missing globals
46 45 if "None" not in str(ex):
47 46 raise
48 47 print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
49 48 file=_sys.stderr)
50 49 return
51 50 self._closed = True
52 51 if _warn:
53 52 self._warn("Implicitly cleaning up {!r}".format(self),
54 53 Warning)
55 54
56 55 def __exit__(self, exc, value, tb):
57 56 self.cleanup()
58 57
59 58 def __del__(self):
60 59 # Issue a ResourceWarning if implicit cleanup needed
61 60 self.cleanup(_warn=True)
62 61
63 62
64 63 # XXX (ncoghlan): The following code attempts to make
65 64 # this class tolerant of the module nulling out process
66 65 # that happens during CPython interpreter shutdown
67 66 # Alas, it doesn't actually manage it. See issue #10188
68 67 _listdir = staticmethod(_os.listdir)
69 68 _path_join = staticmethod(_os.path.join)
70 69 _isdir = staticmethod(_os.path.isdir)
71 70 _remove = staticmethod(_os.remove)
72 71 _rmdir = staticmethod(_os.rmdir)
73 72 _os_error = _os.error
74 73 _warn = _warnings.warn
75 74
76 75 def _rmtree(self, path):
77 76 # Essentially a stripped down version of shutil.rmtree. We can't
78 77 # use globals because they may be None'ed out at shutdown.
79 78 for name in self._listdir(path):
80 79 fullname = self._path_join(path, name)
81 80 try:
82 81 isdir = self._isdir(fullname)
83 82 except self._os_error:
84 83 isdir = False
85 84 if isdir:
86 85 self._rmtree(fullname)
87 86 else:
88 87 try:
89 88 self._remove(fullname)
90 89 except self._os_error:
91 90 pass
92 91 try:
93 92 self._rmdir(path)
94 93 except self._os_error:
95 94 pass
96 95
97 96
98 97 class NamedFileInTemporaryDirectory(object):
99 98
100 99 def __init__(self, filename, mode='w+b', bufsize=-1, **kwds):
101 100 """
102 101 Open a file named `filename` in a temporary directory.
103 102
104 103 This context manager is preferred over `NamedTemporaryFile` in
105 104 stdlib `tempfile` when one needs to reopen the file.
106 105
107 106 Arguments `mode` and `bufsize` are passed to `open`.
108 107 Rest of the arguments are passed to `TemporaryDirectory`.
109 108
110 109 """
111 110 self._tmpdir = TemporaryDirectory(**kwds)
112 111 path = _os.path.join(self._tmpdir.name, filename)
113 112 self.file = open(path, mode, bufsize)
114 113
115 114 def cleanup(self):
116 115 self.file.close()
117 116 self._tmpdir.cleanup()
118 117
119 118 __del__ = cleanup
120 119
121 120 def __enter__(self):
122 121 return self.file
123 122
124 123 def __exit__(self, type, value, traceback):
125 124 self.cleanup()
126 125
127 126
128 127 class TemporaryWorkingDirectory(TemporaryDirectory):
129 128 """
130 129 Creates a temporary directory and sets the cwd to that directory.
131 130 Automatically reverts to previous cwd upon cleanup.
132 131 Usage example:
133 132
134 133 with TemporaryWorkingDirectory() as tmpdir:
135 134 ...
136 135 """
137 136 def __enter__(self):
138 137 self.old_wd = _os.getcwd()
139 138 _os.chdir(self.name)
140 139 return super(TemporaryWorkingDirectory, self).__enter__()
141 140
142 141 def __exit__(self, exc, value, tb):
143 142 _os.chdir(self.old_wd)
144 143 return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)
145 144
@@ -1,160 +1,159 b''
1 1 # encoding: utf-8
2 2 """Tests for IPython.utils.capture"""
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2013 The IPython Development Team
6 6 #
7 7 # Distributed under the terms of the BSD License. The full license is in
8 8 # the file COPYING, distributed as part of this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 from __future__ import print_function
16 15
17 16 import sys
18 17
19 18 import nose.tools as nt
20 19
21 20 from IPython.utils import capture
22 21
23 22 #-----------------------------------------------------------------------------
24 23 # Globals
25 24 #-----------------------------------------------------------------------------
26 25
27 26 _mime_map = dict(
28 27 _repr_png_="image/png",
29 28 _repr_jpeg_="image/jpeg",
30 29 _repr_svg_="image/svg+xml",
31 30 _repr_html_="text/html",
32 31 _repr_json_="application/json",
33 32 _repr_javascript_="application/javascript",
34 33 )
35 34
36 35 basic_data = {
37 36 'image/png' : b'binarydata',
38 37 'text/html' : "<b>bold</b>",
39 38 }
40 39 basic_metadata = {
41 40 'image/png' : {
42 41 'width' : 10,
43 42 'height' : 20,
44 43 },
45 44 }
46 45
47 46 full_data = {
48 47 'image/png' : b'binarydata',
49 48 'image/jpeg' : b'binarydata',
50 49 'image/svg+xml' : "<svg>",
51 50 'text/html' : "<b>bold</b>",
52 51 'application/javascript' : "alert();",
53 52 'application/json' : "{}",
54 53 }
55 54 full_metadata = {
56 55 'image/png' : {"png" : "exists"},
57 56 'image/jpeg' : {"jpeg" : "exists"},
58 57 'image/svg+xml' : {"svg" : "exists"},
59 58 'text/html' : {"html" : "exists"},
60 59 'application/javascript' : {"js" : "exists"},
61 60 'application/json' : {"json" : "exists"},
62 61 }
63 62
64 63 hello_stdout = "hello, stdout"
65 64 hello_stderr = "hello, stderr"
66 65
67 66 #-----------------------------------------------------------------------------
68 67 # Test Functions
69 68 #-----------------------------------------------------------------------------
70 69
71 70 def test_rich_output_empty():
72 71 """RichOutput with no args"""
73 72 rich = capture.RichOutput()
74 73 for method, mime in _mime_map.items():
75 74 yield nt.assert_equal, getattr(rich, method)(), None
76 75
77 76 def test_rich_output():
78 77 """test RichOutput basics"""
79 78 data = basic_data
80 79 metadata = basic_metadata
81 80 rich = capture.RichOutput(data=data, metadata=metadata)
82 81 yield nt.assert_equal, rich._repr_html_(), data['text/html']
83 82 yield nt.assert_equal, rich._repr_png_(), (data['image/png'], metadata['image/png'])
84 83 yield nt.assert_equal, rich._repr_latex_(), None
85 84 yield nt.assert_equal, rich._repr_javascript_(), None
86 85 yield nt.assert_equal, rich._repr_svg_(), None
87 86
88 87 def test_rich_output_no_metadata():
89 88 """test RichOutput with no metadata"""
90 89 data = full_data
91 90 rich = capture.RichOutput(data=data)
92 91 for method, mime in _mime_map.items():
93 92 yield nt.assert_equal, getattr(rich, method)(), data[mime]
94 93
95 94 def test_rich_output_metadata():
96 95 """test RichOutput with metadata"""
97 96 data = full_data
98 97 metadata = full_metadata
99 98 rich = capture.RichOutput(data=data, metadata=metadata)
100 99 for method, mime in _mime_map.items():
101 100 yield nt.assert_equal, getattr(rich, method)(), (data[mime], metadata[mime])
102 101
103 102 def test_rich_output_display():
104 103 """test RichOutput.display
105 104
106 105 This is a bit circular, because we are actually using the capture code we are testing
107 106 to test itself.
108 107 """
109 108 data = full_data
110 109 rich = capture.RichOutput(data=data)
111 110 with capture.capture_output() as cap:
112 111 rich.display()
113 112 yield nt.assert_equal, len(cap.outputs), 1
114 113 rich2 = cap.outputs[0]
115 114 yield nt.assert_equal, rich2.data, rich.data
116 115 yield nt.assert_equal, rich2.metadata, rich.metadata
117 116
118 117 def test_capture_output():
119 118 """capture_output works"""
120 119 rich = capture.RichOutput(data=full_data)
121 120 with capture.capture_output() as cap:
122 121 print(hello_stdout, end="")
123 122 print(hello_stderr, end="", file=sys.stderr)
124 123 rich.display()
125 124 yield nt.assert_equal, hello_stdout, cap.stdout
126 125 yield nt.assert_equal, hello_stderr, cap.stderr
127 126
128 127 def test_capture_output_no_stdout():
129 128 """test capture_output(stdout=False)"""
130 129 rich = capture.RichOutput(data=full_data)
131 130 with capture.capture_output(stdout=False) as cap:
132 131 print(hello_stdout, end="")
133 132 print(hello_stderr, end="", file=sys.stderr)
134 133 rich.display()
135 134 yield nt.assert_equal, "", cap.stdout
136 135 yield nt.assert_equal, hello_stderr, cap.stderr
137 136 yield nt.assert_equal, len(cap.outputs), 1
138 137
139 138 def test_capture_output_no_stderr():
140 139 """test capture_output(stderr=False)"""
141 140 rich = capture.RichOutput(data=full_data)
142 141 # add nested capture_output so stderr doesn't make it to nose output
143 142 with capture.capture_output(), capture.capture_output(stderr=False) as cap:
144 143 print(hello_stdout, end="")
145 144 print(hello_stderr, end="", file=sys.stderr)
146 145 rich.display()
147 146 yield nt.assert_equal, hello_stdout, cap.stdout
148 147 yield nt.assert_equal, "", cap.stderr
149 148 yield nt.assert_equal, len(cap.outputs), 1
150 149
151 150 def test_capture_output_no_display():
152 151 """test capture_output(display=False)"""
153 152 rich = capture.RichOutput(data=full_data)
154 153 with capture.capture_output(display=False) as cap:
155 154 print(hello_stdout, end="")
156 155 print(hello_stderr, end="", file=sys.stderr)
157 156 rich.display()
158 157 yield nt.assert_equal, hello_stdout, cap.stdout
159 158 yield nt.assert_equal, hello_stderr, cap.stderr
160 159 yield nt.assert_equal, cap.outputs, [] No newline at end of file
@@ -1,87 +1,85 b''
1 1 # encoding: utf-8
2 2 """Tests for io.py"""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 from __future__ import print_function
8 from __future__ import absolute_import
9 7
10 8 import io as stdlib_io
11 9 import os.path
12 10 import stat
13 11 import sys
14 12
15 13 from subprocess import Popen, PIPE
16 14 import unittest
17 15
18 16 import nose.tools as nt
19 17
20 18 from IPython.testing.decorators import skipif, skip_win32
21 19 from IPython.utils.io import Tee, capture_output
22 20 from IPython.utils.py3compat import doctest_refactor_print, PY3
23 21 from IPython.utils.tempdir import TemporaryDirectory
24 22
25 23 if PY3:
26 24 from io import StringIO
27 25 else:
28 26 from StringIO import StringIO
29 27
30 28
31 29 def test_tee_simple():
32 30 "Very simple check with stdout only"
33 31 chan = StringIO()
34 32 text = 'Hello'
35 33 tee = Tee(chan, channel='stdout')
36 34 print(text, file=chan)
37 35 nt.assert_equal(chan.getvalue(), text+"\n")
38 36
39 37
40 38 class TeeTestCase(unittest.TestCase):
41 39
42 40 def tchan(self, channel, check='close'):
43 41 trap = StringIO()
44 42 chan = StringIO()
45 43 text = 'Hello'
46 44
47 45 std_ori = getattr(sys, channel)
48 46 setattr(sys, channel, trap)
49 47
50 48 tee = Tee(chan, channel=channel)
51 49 print(text, end='', file=chan)
52 50 setattr(sys, channel, std_ori)
53 51 trap_val = trap.getvalue()
54 52 nt.assert_equal(chan.getvalue(), text)
55 53 if check=='close':
56 54 tee.close()
57 55 else:
58 56 del tee
59 57
60 58 def test(self):
61 59 for chan in ['stdout', 'stderr']:
62 60 for check in ['close', 'del']:
63 61 self.tchan(chan, check)
64 62
65 63 def test_io_init():
66 64 """Test that io.stdin/out/err exist at startup"""
67 65 for name in ('stdin', 'stdout', 'stderr'):
68 66 cmd = doctest_refactor_print("from IPython.utils import io;print io.%s.__class__"%name)
69 67 p = Popen([sys.executable, '-c', cmd],
70 68 stdout=PIPE)
71 69 p.wait()
72 70 classname = p.stdout.read().strip().decode('ascii')
73 71 # __class__ is a reference to the class object in Python 3, so we can't
74 72 # just test for string equality.
75 73 assert 'IPython.utils.io.IOStream' in classname, classname
76 74
77 75 def test_capture_output():
78 76 """capture_output() context works"""
79 77
80 78 with capture_output() as io:
81 79 print('hi, stdout')
82 80 print('hi, stderr', file=sys.stderr)
83 81
84 82 nt.assert_equal(io.stdout, 'hi, stdout\n')
85 83 nt.assert_equal(io.stderr, 'hi, stderr\n')
86 84
87 85
@@ -1,128 +1,127 b''
1 1 # encoding: utf-8
2 2 """Tests for IPython.utils.module_paths.py"""
3 3
4 4 #-----------------------------------------------------------------------------
5 5 # Copyright (C) 2008-2011 The IPython Development Team
6 6 #
7 7 # Distributed under the terms of the BSD License. The full license is in
8 8 # the file COPYING, distributed as part of this software.
9 9 #-----------------------------------------------------------------------------
10 10
11 11 #-----------------------------------------------------------------------------
12 12 # Imports
13 13 #-----------------------------------------------------------------------------
14 14
15 from __future__ import with_statement
16 15
17 16 import os
18 17 import shutil
19 18 import sys
20 19 import tempfile
21 20
22 21 from os.path import join, abspath, split
23 22
24 23 from IPython.testing.tools import make_tempfile
25 24
26 25 import IPython.utils.module_paths as mp
27 26
28 27 import nose.tools as nt
29 28
30 29 env = os.environ
31 30 TEST_FILE_PATH = split(abspath(__file__))[0]
32 31 TMP_TEST_DIR = tempfile.mkdtemp()
33 32 #
34 33 # Setup/teardown functions/decorators
35 34 #
36 35
37 36 old_syspath = sys.path
38 37
39 38 def make_empty_file(fname):
40 39 f = open(fname, 'w')
41 40 f.close()
42 41
43 42
44 43 def setup():
45 44 """Setup testenvironment for the module:
46 45
47 46 """
48 47 # Do not mask exceptions here. In particular, catching WindowsError is a
49 48 # problem because that exception is only defined on Windows...
50 49 os.makedirs(join(TMP_TEST_DIR, "xmod"))
51 50 os.makedirs(join(TMP_TEST_DIR, "nomod"))
52 51 make_empty_file(join(TMP_TEST_DIR, "xmod/__init__.py"))
53 52 make_empty_file(join(TMP_TEST_DIR, "xmod/sub.py"))
54 53 make_empty_file(join(TMP_TEST_DIR, "pack.py"))
55 54 make_empty_file(join(TMP_TEST_DIR, "packpyc.pyc"))
56 55 sys.path = [TMP_TEST_DIR]
57 56
58 57 def teardown():
59 58 """Teardown testenvironment for the module:
60 59
61 60 - Remove tempdir
62 61 - restore sys.path
63 62 """
64 63 # Note: we remove the parent test dir, which is the root of all test
65 64 # subdirs we may have created. Use shutil instead of os.removedirs, so
66 65 # that non-empty directories are all recursively removed.
67 66 shutil.rmtree(TMP_TEST_DIR)
68 67 sys.path = old_syspath
69 68
70 69
71 70 def test_get_init_1():
72 71 """See if get_init can find __init__.py in this testdir"""
73 72 with make_tempfile(join(TMP_TEST_DIR, "__init__.py")):
74 73 assert mp.get_init(TMP_TEST_DIR)
75 74
76 75 def test_get_init_2():
77 76 """See if get_init can find __init__.pyw in this testdir"""
78 77 with make_tempfile(join(TMP_TEST_DIR, "__init__.pyw")):
79 78 assert mp.get_init(TMP_TEST_DIR)
80 79
81 80 def test_get_init_3():
82 81 """get_init can't find __init__.pyc in this testdir"""
83 82 with make_tempfile(join(TMP_TEST_DIR, "__init__.pyc")):
84 83 nt.assert_is_none(mp.get_init(TMP_TEST_DIR))
85 84
86 85 def test_get_init_4():
87 86 """get_init can't find __init__ in empty testdir"""
88 87 nt.assert_is_none(mp.get_init(TMP_TEST_DIR))
89 88
90 89
91 90 def test_find_mod_1():
92 91 modpath = join(TMP_TEST_DIR, "xmod", "__init__.py")
93 92 nt.assert_equal(mp.find_mod("xmod"), modpath)
94 93
95 94 def test_find_mod_2():
96 95 modpath = join(TMP_TEST_DIR, "xmod", "__init__.py")
97 96 nt.assert_equal(mp.find_mod("xmod"), modpath)
98 97
99 98 def test_find_mod_3():
100 99 modpath = join(TMP_TEST_DIR, "xmod", "sub.py")
101 100 nt.assert_equal(mp.find_mod("xmod.sub"), modpath)
102 101
103 102 def test_find_mod_4():
104 103 modpath = join(TMP_TEST_DIR, "pack.py")
105 104 nt.assert_equal(mp.find_mod("pack"), modpath)
106 105
107 106 def test_find_mod_5():
108 107 modpath = join(TMP_TEST_DIR, "packpyc.pyc")
109 108 nt.assert_equal(mp.find_mod("packpyc"), modpath)
110 109
111 110 def test_find_module_1():
112 111 modpath = join(TMP_TEST_DIR, "xmod")
113 112 nt.assert_equal(mp.find_module("xmod"), modpath)
114 113
115 114 def test_find_module_2():
116 115 """Testing sys.path that is empty"""
117 116 nt.assert_is_none(mp.find_module("xmod", []))
118 117
119 118 def test_find_module_3():
120 119 """Testing sys.path that is empty"""
121 120 nt.assert_is_none(mp.find_module(None, None))
122 121
123 122 def test_find_module_4():
124 123 """Testing sys.path that is empty"""
125 124 nt.assert_is_none(mp.find_module(None))
126 125
127 126 def test_find_module_5():
128 127 nt.assert_is_none(mp.find_module("xmod.nopack"))
@@ -1,146 +1,145 b''
1 1 # encoding: utf-8
2 2 """
3 3 Tests for platutils.py
4 4 """
5 5
6 6 #-----------------------------------------------------------------------------
7 7 # Copyright (C) 2008-2011 The IPython Development Team
8 8 #
9 9 # Distributed under the terms of the BSD License. The full license is in
10 10 # the file COPYING, distributed as part of this software.
11 11 #-----------------------------------------------------------------------------
12 12
13 13 #-----------------------------------------------------------------------------
14 14 # Imports
15 15 #-----------------------------------------------------------------------------
16 16
17 17 import sys
18 18 import os
19 19 from unittest import TestCase
20 20
21 21 import nose.tools as nt
22 22
23 23 from IPython.utils.process import (find_cmd, FindCmdError, arg_split,
24 24 system, getoutput, getoutputerror,
25 25 get_output_error_code)
26 26 from IPython.testing import decorators as dec
27 27 from IPython.testing import tools as tt
28 28
29 29 python = os.path.basename(sys.executable)
30 30
31 31 #-----------------------------------------------------------------------------
32 32 # Tests
33 33 #-----------------------------------------------------------------------------
34 34
35 35
36 36 @dec.skip_win32
37 37 def test_find_cmd_ls():
38 38 """Make sure we can find the full path to ls."""
39 39 path = find_cmd('ls')
40 40 nt.assert_true(path.endswith('ls'))
41 41
42 42
43 43 def has_pywin32():
44 44 try:
45 45 import win32api
46 46 except ImportError:
47 47 return False
48 48 return True
49 49
50 50
51 51 @dec.onlyif(has_pywin32, "This test requires win32api to run")
52 52 def test_find_cmd_pythonw():
53 53 """Try to find pythonw on Windows."""
54 54 path = find_cmd('pythonw')
55 55 assert path.lower().endswith('pythonw.exe'), path
56 56
57 57
58 58 @dec.onlyif(lambda : sys.platform != 'win32' or has_pywin32(),
59 59 "This test runs on posix or in win32 with win32api installed")
60 60 def test_find_cmd_fail():
61 61 """Make sure that FindCmdError is raised if we can't find the cmd."""
62 62 nt.assert_raises(FindCmdError,find_cmd,'asdfasdf')
63 63
64 64
65 65 @dec.skip_win32
66 66 def test_arg_split():
67 67 """Ensure that argument lines are correctly split like in a shell."""
68 68 tests = [['hi', ['hi']],
69 69 [u'hi', [u'hi']],
70 70 ['hello there', ['hello', 'there']],
71 71 # \u01ce == \N{LATIN SMALL LETTER A WITH CARON}
72 72 # Do not use \N because the tests crash with syntax error in
73 73 # some cases, for example windows python2.6.
74 74 [u'h\u01cello', [u'h\u01cello']],
75 75 ['something "with quotes"', ['something', '"with quotes"']],
76 76 ]
77 77 for argstr, argv in tests:
78 78 nt.assert_equal(arg_split(argstr), argv)
79 79
80 80 @dec.skip_if_not_win32
81 81 def test_arg_split_win32():
82 82 """Ensure that argument lines are correctly split like in a shell."""
83 83 tests = [['hi', ['hi']],
84 84 [u'hi', [u'hi']],
85 85 ['hello there', ['hello', 'there']],
86 86 [u'h\u01cello', [u'h\u01cello']],
87 87 ['something "with quotes"', ['something', 'with quotes']],
88 88 ]
89 89 for argstr, argv in tests:
90 90 nt.assert_equal(arg_split(argstr), argv)
91 91
92 92
93 93 class SubProcessTestCase(TestCase, tt.TempFileMixin):
94 94 def setUp(self):
95 95 """Make a valid python temp file."""
96 lines = ["from __future__ import print_function",
97 "import sys",
96 lines = [ "import sys",
98 97 "print('on stdout', end='', file=sys.stdout)",
99 98 "print('on stderr', end='', file=sys.stderr)",
100 99 "sys.stdout.flush()",
101 100 "sys.stderr.flush()"]
102 101 self.mktmp('\n'.join(lines))
103 102
104 103 def test_system(self):
105 104 status = system('%s "%s"' % (python, self.fname))
106 105 self.assertEqual(status, 0)
107 106
108 107 def test_system_quotes(self):
109 108 status = system('%s -c "import sys"' % python)
110 109 self.assertEqual(status, 0)
111 110
112 111 def test_getoutput(self):
113 112 out = getoutput('%s "%s"' % (python, self.fname))
114 113 # we can't rely on the order the line buffered streams are flushed
115 114 try:
116 115 self.assertEqual(out, 'on stderron stdout')
117 116 except AssertionError:
118 117 self.assertEqual(out, 'on stdouton stderr')
119 118
120 119 def test_getoutput_quoted(self):
121 120 out = getoutput('%s -c "print (1)"' % python)
122 121 self.assertEqual(out.strip(), '1')
123 122
124 123 #Invalid quoting on windows
125 124 @dec.skip_win32
126 125 def test_getoutput_quoted2(self):
127 126 out = getoutput("%s -c 'print (1)'" % python)
128 127 self.assertEqual(out.strip(), '1')
129 128 out = getoutput("%s -c 'print (\"1\")'" % python)
130 129 self.assertEqual(out.strip(), '1')
131 130
132 131 def test_getoutput_error(self):
133 132 out, err = getoutputerror('%s "%s"' % (python, self.fname))
134 133 self.assertEqual(out, 'on stdout')
135 134 self.assertEqual(err, 'on stderr')
136 135
137 136 def test_get_output_error_code(self):
138 137 quiet_exit = '%s -c "import sys; sys.exit(1)"' % python
139 138 out, err, code = get_output_error_code(quiet_exit)
140 139 self.assertEqual(out, '')
141 140 self.assertEqual(err, '')
142 141 self.assertEqual(code, 1)
143 142 out, err, code = get_output_error_code('%s "%s"' % (python, self.fname))
144 143 self.assertEqual(out, 'on stdout')
145 144 self.assertEqual(err, 'on stderr')
146 145 self.assertEqual(code, 0)
@@ -1,222 +1,221 b''
1 1 # encoding: utf-8
2 2 """Tests for IPython.utils.text"""
3 from __future__ import print_function
4 3
5 4 #-----------------------------------------------------------------------------
6 5 # Copyright (C) 2011 The IPython Development Team
7 6 #
8 7 # Distributed under the terms of the BSD License. The full license is in
9 8 # the file COPYING, distributed as part of this software.
10 9 #-----------------------------------------------------------------------------
11 10
12 11 #-----------------------------------------------------------------------------
13 12 # Imports
14 13 #-----------------------------------------------------------------------------
15 14
16 15 import os
17 16 import math
18 17 import random
19 18 import sys
20 19
21 20 import nose.tools as nt
22 21 try:
23 22 from pathlib import Path
24 23 except ImportError:
25 24 # Python 2 backport
26 25 from pathlib2 import Path
27 26
28 27 from IPython.utils import text
29 28
30 29 #-----------------------------------------------------------------------------
31 30 # Globals
32 31 #-----------------------------------------------------------------------------
33 32
34 33 def test_columnize():
35 34 """Basic columnize tests."""
36 35 size = 5
37 36 items = [l*size for l in 'abcd']
38 37
39 38 out = text.columnize(items, displaywidth=80)
40 39 nt.assert_equal(out, 'aaaaa bbbbb ccccc ddddd\n')
41 40 out = text.columnize(items, displaywidth=25)
42 41 nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
43 42 out = text.columnize(items, displaywidth=12)
44 43 nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
45 44 out = text.columnize(items, displaywidth=10)
46 45 nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\nddddd\n')
47 46
48 47 out = text.columnize(items, row_first=True, displaywidth=80)
49 48 nt.assert_equal(out, 'aaaaa bbbbb ccccc ddddd\n')
50 49 out = text.columnize(items, row_first=True, displaywidth=25)
51 50 nt.assert_equal(out, 'aaaaa bbbbb\nccccc ddddd\n')
52 51 out = text.columnize(items, row_first=True, displaywidth=12)
53 52 nt.assert_equal(out, 'aaaaa bbbbb\nccccc ddddd\n')
54 53 out = text.columnize(items, row_first=True, displaywidth=10)
55 54 nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\nddddd\n')
56 55
57 56 out = text.columnize(items, displaywidth=40, spread=True)
58 57 nt.assert_equal(out, 'aaaaa bbbbb ccccc ddddd\n')
59 58 out = text.columnize(items, displaywidth=20, spread=True)
60 59 nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
61 60 out = text.columnize(items, displaywidth=12, spread=True)
62 61 nt.assert_equal(out, 'aaaaa ccccc\nbbbbb ddddd\n')
63 62 out = text.columnize(items, displaywidth=10, spread=True)
64 63 nt.assert_equal(out, 'aaaaa\nbbbbb\nccccc\nddddd\n')
65 64
66 65
67 66 def test_columnize_random():
68 67 """Test with random input to hopfully catch edge case """
69 68 for row_first in [True, False]:
70 69 for nitems in [random.randint(2,70) for i in range(2,20)]:
71 70 displaywidth = random.randint(20,200)
72 71 rand_len = [random.randint(2,displaywidth) for i in range(nitems)]
73 72 items = ['x'*l for l in rand_len]
74 73 out = text.columnize(items, row_first=row_first, displaywidth=displaywidth)
75 74 longer_line = max([len(x) for x in out.split('\n')])
76 75 longer_element = max(rand_len)
77 76 if longer_line > displaywidth:
78 77 print("Columnize displayed something lager than displaywidth : %s " % longer_line)
79 78 print("longer element : %s " % longer_element)
80 79 print("displaywidth : %s " % displaywidth)
81 80 print("number of element : %s " % nitems)
82 81 print("size of each element :\n %s" % rand_len)
83 82 assert False, "row_first={0}".format(row_first)
84 83
85 84 def test_columnize_medium():
86 85 """Test with inputs than shouldn't be wider than 80"""
87 86 size = 40
88 87 items = [l*size for l in 'abc']
89 88 for row_first in [True, False]:
90 89 out = text.columnize(items, row_first=row_first, displaywidth=80)
91 90 nt.assert_equal(out, '\n'.join(items+['']), "row_first={0}".format(row_first))
92 91
93 92 def test_columnize_long():
94 93 """Test columnize with inputs longer than the display window"""
95 94 size = 11
96 95 items = [l*size for l in 'abc']
97 96 for row_first in [True, False]:
98 97 out = text.columnize(items, row_first=row_first, displaywidth=size-1)
99 98 nt.assert_equal(out, '\n'.join(items+['']), "row_first={0}".format(row_first))
100 99
101 100 def eval_formatter_check(f):
102 101 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os, u=u"café", b="café")
103 102 s = f.format("{n} {n//4} {stuff.split()[0]}", **ns)
104 103 nt.assert_equal(s, "12 3 hello")
105 104 s = f.format(' '.join(['{n//%i}'%i for i in range(1,8)]), **ns)
106 105 nt.assert_equal(s, "12 6 4 3 2 2 1")
107 106 s = f.format('{[n//i for i in range(1,8)]}', **ns)
108 107 nt.assert_equal(s, "[12, 6, 4, 3, 2, 2, 1]")
109 108 s = f.format("{stuff!s}", **ns)
110 109 nt.assert_equal(s, ns['stuff'])
111 110 s = f.format("{stuff!r}", **ns)
112 111 nt.assert_equal(s, repr(ns['stuff']))
113 112
114 113 # Check with unicode:
115 114 s = f.format("{u}", **ns)
116 115 nt.assert_equal(s, ns['u'])
117 116 # This decodes in a platform dependent manner, but it shouldn't error out
118 117 s = f.format("{b}", **ns)
119 118
120 119 nt.assert_raises(NameError, f.format, '{dne}', **ns)
121 120
122 121 def eval_formatter_slicing_check(f):
123 122 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
124 123 s = f.format(" {stuff.split()[:]} ", **ns)
125 124 nt.assert_equal(s, " ['hello', 'there'] ")
126 125 s = f.format(" {stuff.split()[::-1]} ", **ns)
127 126 nt.assert_equal(s, " ['there', 'hello'] ")
128 127 s = f.format("{stuff[::2]}", **ns)
129 128 nt.assert_equal(s, ns['stuff'][::2])
130 129
131 130 nt.assert_raises(SyntaxError, f.format, "{n:x}", **ns)
132 131
133 132 def eval_formatter_no_slicing_check(f):
134 133 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
135 134
136 135 s = f.format('{n:x} {pi**2:+f}', **ns)
137 136 nt.assert_equal(s, "c +9.869604")
138 137
139 138 s = f.format('{stuff[slice(1,4)]}', **ns)
140 139 nt.assert_equal(s, 'ell')
141 140
142 141 if sys.version_info >= (3, 4):
143 142 # String formatting has changed in Python 3.4, so this now works.
144 143 s = f.format("{a[:]}", a=[1, 2])
145 144 nt.assert_equal(s, "[1, 2]")
146 145 else:
147 146 nt.assert_raises(SyntaxError, f.format, "{a[:]}")
148 147
149 148 def test_eval_formatter():
150 149 f = text.EvalFormatter()
151 150 eval_formatter_check(f)
152 151 eval_formatter_no_slicing_check(f)
153 152
154 153 def test_full_eval_formatter():
155 154 f = text.FullEvalFormatter()
156 155 eval_formatter_check(f)
157 156 eval_formatter_slicing_check(f)
158 157
159 158 def test_dollar_formatter():
160 159 f = text.DollarFormatter()
161 160 eval_formatter_check(f)
162 161 eval_formatter_slicing_check(f)
163 162
164 163 ns = dict(n=12, pi=math.pi, stuff='hello there', os=os)
165 164 s = f.format("$n", **ns)
166 165 nt.assert_equal(s, "12")
167 166 s = f.format("$n.real", **ns)
168 167 nt.assert_equal(s, "12")
169 168 s = f.format("$n/{stuff[:5]}", **ns)
170 169 nt.assert_equal(s, "12/hello")
171 170 s = f.format("$n $$HOME", **ns)
172 171 nt.assert_equal(s, "12 $HOME")
173 172 s = f.format("${foo}", foo="HOME")
174 173 nt.assert_equal(s, "$HOME")
175 174
176 175
177 176 def test_long_substr():
178 177 data = ['hi']
179 178 nt.assert_equal(text.long_substr(data), 'hi')
180 179
181 180
182 181 def test_long_substr2():
183 182 data = ['abc', 'abd', 'abf', 'ab']
184 183 nt.assert_equal(text.long_substr(data), 'ab')
185 184
186 185 def test_long_substr_empty():
187 186 data = []
188 187 nt.assert_equal(text.long_substr(data), '')
189 188
190 189 def test_strip_email():
191 190 src = """\
192 191 >> >>> def f(x):
193 192 >> ... return x+1
194 193 >> ...
195 194 >> >>> zz = f(2.5)"""
196 195 cln = """\
197 196 >>> def f(x):
198 197 ... return x+1
199 198 ...
200 199 >>> zz = f(2.5)"""
201 200 nt.assert_equal(text.strip_email_quotes(src), cln)
202 201
203 202
204 203 def test_strip_email2():
205 204 src = '> > > list()'
206 205 cln = 'list()'
207 206 nt.assert_equal(text.strip_email_quotes(src), cln)
208 207
209 208 def test_LSString():
210 209 lss = text.LSString("abc\ndef")
211 210 nt.assert_equal(lss.l, ['abc', 'def'])
212 211 nt.assert_equal(lss.s, 'abc def')
213 212 lss = text.LSString(os.getcwd())
214 213 nt.assert_is_instance(lss.p[0], Path)
215 214
216 215 def test_SList():
217 216 sl = text.SList(['a 11', 'b 1', 'a 2'])
218 217 nt.assert_equal(sl.n, 'a 11\nb 1\na 2')
219 218 nt.assert_equal(sl.s, 'a 11 b 1 a 2')
220 219 nt.assert_equal(sl.grep(lambda x: x.startswith('a')), text.SList(['a 11', 'a 2']))
221 220 nt.assert_equal(sl.fields(0), text.SList(['a', 'b', 'a']))
222 221 nt.assert_equal(sl.sort(field=1, nums=True), text.SList(['b 1', 'a 2', 'a 11']))
@@ -1,783 +1,779 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for working with strings and text.
4 4
5 5 Inheritance diagram:
6 6
7 7 .. inheritance-diagram:: IPython.utils.text
8 8 :parts: 3
9 9 """
10 from __future__ import absolute_import
11 10
12 11 import os
13 12 import re
14 13 import sys
15 14 import textwrap
16 15 from string import Formatter
17 16 try:
18 17 from pathlib import Path
19 18 except ImportError:
20 19 # Python 2 backport
21 20 from pathlib2 import Path
22 21
23 from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest
24 22 from IPython.utils import py3compat
25 23
26 24 # datetime.strftime date format for ipython
27 25 if sys.platform == 'win32':
28 26 date_format = "%B %d, %Y"
29 27 else:
30 28 date_format = "%B %-d, %Y"
31 29
32 30 class LSString(str):
33 31 """String derivative with a special access attributes.
34 32
35 33 These are normal strings, but with the special attributes:
36 34
37 35 .l (or .list) : value as list (split on newlines).
38 36 .n (or .nlstr): original value (the string itself).
39 37 .s (or .spstr): value as whitespace-separated string.
40 38 .p (or .paths): list of path objects (requires path.py package)
41 39
42 40 Any values which require transformations are computed only once and
43 41 cached.
44 42
45 43 Such strings are very useful to efficiently interact with the shell, which
46 44 typically only understands whitespace-separated options for commands."""
47 45
48 46 def get_list(self):
49 47 try:
50 48 return self.__list
51 49 except AttributeError:
52 50 self.__list = self.split('\n')
53 51 return self.__list
54 52
55 53 l = list = property(get_list)
56 54
57 55 def get_spstr(self):
58 56 try:
59 57 return self.__spstr
60 58 except AttributeError:
61 59 self.__spstr = self.replace('\n',' ')
62 60 return self.__spstr
63 61
64 62 s = spstr = property(get_spstr)
65 63
66 64 def get_nlstr(self):
67 65 return self
68 66
69 67 n = nlstr = property(get_nlstr)
70 68
71 69 def get_paths(self):
72 70 try:
73 71 return self.__paths
74 72 except AttributeError:
75 73 self.__paths = [Path(p) for p in self.split('\n') if os.path.exists(p)]
76 74 return self.__paths
77 75
78 76 p = paths = property(get_paths)
79 77
80 78 # FIXME: We need to reimplement type specific displayhook and then add this
81 79 # back as a custom printer. This should also be moved outside utils into the
82 80 # core.
83 81
84 82 # def print_lsstring(arg):
85 83 # """ Prettier (non-repr-like) and more informative printer for LSString """
86 84 # print "LSString (.p, .n, .l, .s available). Value:"
87 85 # print arg
88 86 #
89 87 #
90 88 # print_lsstring = result_display.when_type(LSString)(print_lsstring)
91 89
92 90
93 91 class SList(list):
94 92 """List derivative with a special access attributes.
95 93
96 94 These are normal lists, but with the special attributes:
97 95
98 96 * .l (or .list) : value as list (the list itself).
99 97 * .n (or .nlstr): value as a string, joined on newlines.
100 98 * .s (or .spstr): value as a string, joined on spaces.
101 99 * .p (or .paths): list of path objects (requires path.py package)
102 100
103 101 Any values which require transformations are computed only once and
104 102 cached."""
105 103
106 104 def get_list(self):
107 105 return self
108 106
109 107 l = list = property(get_list)
110 108
111 109 def get_spstr(self):
112 110 try:
113 111 return self.__spstr
114 112 except AttributeError:
115 113 self.__spstr = ' '.join(self)
116 114 return self.__spstr
117 115
118 116 s = spstr = property(get_spstr)
119 117
120 118 def get_nlstr(self):
121 119 try:
122 120 return self.__nlstr
123 121 except AttributeError:
124 122 self.__nlstr = '\n'.join(self)
125 123 return self.__nlstr
126 124
127 125 n = nlstr = property(get_nlstr)
128 126
129 127 def get_paths(self):
130 128 try:
131 129 return self.__paths
132 130 except AttributeError:
133 131 self.__paths = [Path(p) for p in self if os.path.exists(p)]
134 132 return self.__paths
135 133
136 134 p = paths = property(get_paths)
137 135
138 136 def grep(self, pattern, prune = False, field = None):
139 137 """ Return all strings matching 'pattern' (a regex or callable)
140 138
141 139 This is case-insensitive. If prune is true, return all items
142 140 NOT matching the pattern.
143 141
144 142 If field is specified, the match must occur in the specified
145 143 whitespace-separated field.
146 144
147 145 Examples::
148 146
149 147 a.grep( lambda x: x.startswith('C') )
150 148 a.grep('Cha.*log', prune=1)
151 149 a.grep('chm', field=-1)
152 150 """
153 151
154 152 def match_target(s):
155 153 if field is None:
156 154 return s
157 155 parts = s.split()
158 156 try:
159 157 tgt = parts[field]
160 158 return tgt
161 159 except IndexError:
162 160 return ""
163 161
164 162 if isinstance(pattern, py3compat.string_types):
165 163 pred = lambda x : re.search(pattern, x, re.IGNORECASE)
166 164 else:
167 165 pred = pattern
168 166 if not prune:
169 167 return SList([el for el in self if pred(match_target(el))])
170 168 else:
171 169 return SList([el for el in self if not pred(match_target(el))])
172 170
173 171 def fields(self, *fields):
174 172 """ Collect whitespace-separated fields from string list
175 173
176 174 Allows quick awk-like usage of string lists.
177 175
178 176 Example data (in var a, created by 'a = !ls -l')::
179 177
180 178 -rwxrwxrwx 1 ville None 18 Dec 14 2006 ChangeLog
181 179 drwxrwxrwx+ 6 ville None 0 Oct 24 18:05 IPython
182 180
183 181 * ``a.fields(0)`` is ``['-rwxrwxrwx', 'drwxrwxrwx+']``
184 182 * ``a.fields(1,0)`` is ``['1 -rwxrwxrwx', '6 drwxrwxrwx+']``
185 183 (note the joining by space).
186 184 * ``a.fields(-1)`` is ``['ChangeLog', 'IPython']``
187 185
188 186 IndexErrors are ignored.
189 187
190 188 Without args, fields() just split()'s the strings.
191 189 """
192 190 if len(fields) == 0:
193 191 return [el.split() for el in self]
194 192
195 193 res = SList()
196 194 for el in [f.split() for f in self]:
197 195 lineparts = []
198 196
199 197 for fd in fields:
200 198 try:
201 199 lineparts.append(el[fd])
202 200 except IndexError:
203 201 pass
204 202 if lineparts:
205 203 res.append(" ".join(lineparts))
206 204
207 205 return res
208 206
209 207 def sort(self,field= None, nums = False):
210 208 """ sort by specified fields (see fields())
211 209
212 210 Example::
213 211
214 212 a.sort(1, nums = True)
215 213
216 214 Sorts a by second field, in numerical order (so that 21 > 3)
217 215
218 216 """
219 217
220 218 #decorate, sort, undecorate
221 219 if field is not None:
222 220 dsu = [[SList([line]).fields(field), line] for line in self]
223 221 else:
224 222 dsu = [[line, line] for line in self]
225 223 if nums:
226 224 for i in range(len(dsu)):
227 225 numstr = "".join([ch for ch in dsu[i][0] if ch.isdigit()])
228 226 try:
229 227 n = int(numstr)
230 228 except ValueError:
231 229 n = 0
232 230 dsu[i][0] = n
233 231
234 232
235 233 dsu.sort()
236 234 return SList([t[1] for t in dsu])
237 235
238 236
239 237 # FIXME: We need to reimplement type specific displayhook and then add this
240 238 # back as a custom printer. This should also be moved outside utils into the
241 239 # core.
242 240
243 241 # def print_slist(arg):
244 242 # """ Prettier (non-repr-like) and more informative printer for SList """
245 243 # print "SList (.p, .n, .l, .s, .grep(), .fields(), sort() available):"
246 244 # if hasattr(arg, 'hideonce') and arg.hideonce:
247 245 # arg.hideonce = False
248 246 # return
249 247 #
250 248 # nlprint(arg) # This was a nested list printer, now removed.
251 249 #
252 250 # print_slist = result_display.when_type(SList)(print_slist)
253 251
254 252
255 253 def indent(instr,nspaces=4, ntabs=0, flatten=False):
256 254 """Indent a string a given number of spaces or tabstops.
257 255
258 256 indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
259 257
260 258 Parameters
261 259 ----------
262 260
263 261 instr : basestring
264 262 The string to be indented.
265 263 nspaces : int (default: 4)
266 264 The number of spaces to be indented.
267 265 ntabs : int (default: 0)
268 266 The number of tabs to be indented.
269 267 flatten : bool (default: False)
270 268 Whether to scrub existing indentation. If True, all lines will be
271 269 aligned to the same indentation. If False, existing indentation will
272 270 be strictly increased.
273 271
274 272 Returns
275 273 -------
276 274
277 275 str|unicode : string indented by ntabs and nspaces.
278 276
279 277 """
280 278 if instr is None:
281 279 return
282 280 ind = '\t'*ntabs+' '*nspaces
283 281 if flatten:
284 282 pat = re.compile(r'^\s*', re.MULTILINE)
285 283 else:
286 284 pat = re.compile(r'^', re.MULTILINE)
287 285 outstr = re.sub(pat, ind, instr)
288 286 if outstr.endswith(os.linesep+ind):
289 287 return outstr[:-len(ind)]
290 288 else:
291 289 return outstr
292 290
293 291
294 292 def list_strings(arg):
295 293 """Always return a list of strings, given a string or list of strings
296 294 as input.
297 295
298 296 Examples
299 297 --------
300 298 ::
301 299
302 300 In [7]: list_strings('A single string')
303 301 Out[7]: ['A single string']
304 302
305 303 In [8]: list_strings(['A single string in a list'])
306 304 Out[8]: ['A single string in a list']
307 305
308 306 In [9]: list_strings(['A','list','of','strings'])
309 307 Out[9]: ['A', 'list', 'of', 'strings']
310 308 """
311 309
312 310 if isinstance(arg, py3compat.string_types): return [arg]
313 311 else: return arg
314 312
315 313
316 314 def marquee(txt='',width=78,mark='*'):
317 315 """Return the input string centered in a 'marquee'.
318 316
319 317 Examples
320 318 --------
321 319 ::
322 320
323 321 In [16]: marquee('A test',40)
324 322 Out[16]: '**************** A test ****************'
325 323
326 324 In [17]: marquee('A test',40,'-')
327 325 Out[17]: '---------------- A test ----------------'
328 326
329 327 In [18]: marquee('A test',40,' ')
330 328 Out[18]: ' A test '
331 329
332 330 """
333 331 if not txt:
334 332 return (mark*width)[:width]
335 333 nmark = (width-len(txt)-2)//len(mark)//2
336 334 if nmark < 0: nmark =0
337 335 marks = mark*nmark
338 336 return '%s %s %s' % (marks,txt,marks)
339 337
340 338
341 339 ini_spaces_re = re.compile(r'^(\s+)')
342 340
343 341 def num_ini_spaces(strng):
344 342 """Return the number of initial spaces in a string"""
345 343
346 344 ini_spaces = ini_spaces_re.match(strng)
347 345 if ini_spaces:
348 346 return ini_spaces.end()
349 347 else:
350 348 return 0
351 349
352 350
353 351 def format_screen(strng):
354 352 """Format a string for screen printing.
355 353
356 354 This removes some latex-type format codes."""
357 355 # Paragraph continue
358 356 par_re = re.compile(r'\\$',re.MULTILINE)
359 357 strng = par_re.sub('',strng)
360 358 return strng
361 359
362 360
363 361 def dedent(text):
364 362 """Equivalent of textwrap.dedent that ignores unindented first line.
365 363
366 364 This means it will still dedent strings like:
367 365 '''foo
368 366 is a bar
369 367 '''
370 368
371 369 For use in wrap_paragraphs.
372 370 """
373 371
374 372 if text.startswith('\n'):
375 373 # text starts with blank line, don't ignore the first line
376 374 return textwrap.dedent(text)
377 375
378 376 # split first line
379 377 splits = text.split('\n',1)
380 378 if len(splits) == 1:
381 379 # only one line
382 380 return textwrap.dedent(text)
383 381
384 382 first, rest = splits
385 383 # dedent everything but the first line
386 384 rest = textwrap.dedent(rest)
387 385 return '\n'.join([first, rest])
388 386
389 387
390 388 def wrap_paragraphs(text, ncols=80):
391 389 """Wrap multiple paragraphs to fit a specified width.
392 390
393 391 This is equivalent to textwrap.wrap, but with support for multiple
394 392 paragraphs, as separated by empty lines.
395 393
396 394 Returns
397 395 -------
398 396
399 397 list of complete paragraphs, wrapped to fill `ncols` columns.
400 398 """
401 399 paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE)
402 400 text = dedent(text).strip()
403 401 paragraphs = paragraph_re.split(text)[::2] # every other entry is space
404 402 out_ps = []
405 403 indent_re = re.compile(r'\n\s+', re.MULTILINE)
406 404 for p in paragraphs:
407 405 # presume indentation that survives dedent is meaningful formatting,
408 406 # so don't fill unless text is flush.
409 407 if indent_re.search(p) is None:
410 408 # wrap paragraph
411 409 p = textwrap.fill(p, ncols)
412 410 out_ps.append(p)
413 411 return out_ps
414 412
415 413
416 414 def long_substr(data):
417 415 """Return the longest common substring in a list of strings.
418 416
419 417 Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
420 418 """
421 419 substr = ''
422 420 if len(data) > 1 and len(data[0]) > 0:
423 421 for i in range(len(data[0])):
424 422 for j in range(len(data[0])-i+1):
425 423 if j > len(substr) and all(data[0][i:i+j] in x for x in data):
426 424 substr = data[0][i:i+j]
427 425 elif len(data) == 1:
428 426 substr = data[0]
429 427 return substr
430 428
431 429
432 430 def strip_email_quotes(text):
433 431 """Strip leading email quotation characters ('>').
434 432
435 433 Removes any combination of leading '>' interspersed with whitespace that
436 434 appears *identically* in all lines of the input text.
437 435
438 436 Parameters
439 437 ----------
440 438 text : str
441 439
442 440 Examples
443 441 --------
444 442
445 443 Simple uses::
446 444
447 445 In [2]: strip_email_quotes('> > text')
448 446 Out[2]: 'text'
449 447
450 448 In [3]: strip_email_quotes('> > text\\n> > more')
451 449 Out[3]: 'text\\nmore'
452 450
453 451 Note how only the common prefix that appears in all lines is stripped::
454 452
455 453 In [4]: strip_email_quotes('> > text\\n> > more\\n> more...')
456 454 Out[4]: '> text\\n> more\\nmore...'
457 455
458 456 So if any line has no quote marks ('>') , then none are stripped from any
459 457 of them ::
460 458
461 459 In [5]: strip_email_quotes('> > text\\n> > more\\nlast different')
462 460 Out[5]: '> > text\\n> > more\\nlast different'
463 461 """
464 462 lines = text.splitlines()
465 463 matches = set()
466 464 for line in lines:
467 465 prefix = re.match(r'^(\s*>[ >]*)', line)
468 466 if prefix:
469 467 matches.add(prefix.group(1))
470 468 else:
471 469 break
472 470 else:
473 471 prefix = long_substr(list(matches))
474 472 if prefix:
475 473 strip = len(prefix)
476 474 text = '\n'.join([ ln[strip:] for ln in lines])
477 475 return text
478 476
479 477 def strip_ansi(source):
480 478 """
481 479 Remove ansi escape codes from text.
482 480
483 481 Parameters
484 482 ----------
485 483 source : str
486 484 Source to remove the ansi from
487 485 """
488 486 return re.sub(r'\033\[(\d|;)+?m', '', source)
489 487
490 488
491 489 class EvalFormatter(Formatter):
492 490 """A String Formatter that allows evaluation of simple expressions.
493 491
494 492 Note that this version interprets a : as specifying a format string (as per
495 493 standard string formatting), so if slicing is required, you must explicitly
496 494 create a slice.
497 495
498 496 This is to be used in templating cases, such as the parallel batch
499 497 script templates, where simple arithmetic on arguments is useful.
500 498
501 499 Examples
502 500 --------
503 501 ::
504 502
505 503 In [1]: f = EvalFormatter()
506 504 In [2]: f.format('{n//4}', n=8)
507 505 Out[2]: '2'
508 506
509 507 In [3]: f.format("{greeting[slice(2,4)]}", greeting="Hello")
510 508 Out[3]: 'll'
511 509 """
512 510 def get_field(self, name, args, kwargs):
513 511 v = eval(name, kwargs)
514 512 return v, name
515 513
516 514 #XXX: As of Python 3.4, the format string parsing no longer splits on a colon
517 515 # inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and
518 516 # above, it should be possible to remove FullEvalFormatter.
519 517
520 @skip_doctest_py3
521 518 class FullEvalFormatter(Formatter):
522 519 """A String Formatter that allows evaluation of simple expressions.
523 520
524 521 Any time a format key is not found in the kwargs,
525 522 it will be tried as an expression in the kwargs namespace.
526 523
527 524 Note that this version allows slicing using [1:2], so you cannot specify
528 525 a format string. Use :class:`EvalFormatter` to permit format strings.
529 526
530 527 Examples
531 528 --------
532 529 ::
533 530
534 531 In [1]: f = FullEvalFormatter()
535 532 In [2]: f.format('{n//4}', n=8)
536 Out[2]: u'2'
533 Out[2]: '2'
537 534
538 535 In [3]: f.format('{list(range(5))[2:4]}')
539 Out[3]: u'[2, 3]'
536 Out[3]: '[2, 3]'
540 537
541 538 In [4]: f.format('{3*2}')
542 Out[4]: u'6'
539 Out[4]: '6'
543 540 """
544 541 # copied from Formatter._vformat with minor changes to allow eval
545 542 # and replace the format_spec code with slicing
546 543 def vformat(self, format_string, args, kwargs):
547 544 result = []
548 545 for literal_text, field_name, format_spec, conversion in \
549 546 self.parse(format_string):
550 547
551 548 # output the literal text
552 549 if literal_text:
553 550 result.append(literal_text)
554 551
555 552 # if there's a field, output it
556 553 if field_name is not None:
557 554 # this is some markup, find the object and do
558 555 # the formatting
559 556
560 557 if format_spec:
561 558 # override format spec, to allow slicing:
562 559 field_name = ':'.join([field_name, format_spec])
563 560
564 561 # eval the contents of the field for the object
565 562 # to be formatted
566 563 obj = eval(field_name, kwargs)
567 564
568 565 # do any conversion on the resulting object
569 566 obj = self.convert_field(obj, conversion)
570 567
571 568 # format the object and append to the result
572 569 result.append(self.format_field(obj, ''))
573 570
574 571 return u''.join(py3compat.cast_unicode(s) for s in result)
575 572
576 573
577 @skip_doctest_py3
578 574 class DollarFormatter(FullEvalFormatter):
579 575 """Formatter allowing Itpl style $foo replacement, for names and attribute
580 576 access only. Standard {foo} replacement also works, and allows full
581 577 evaluation of its arguments.
582 578
583 579 Examples
584 580 --------
585 581 ::
586 582
587 583 In [1]: f = DollarFormatter()
588 584 In [2]: f.format('{n//4}', n=8)
589 Out[2]: u'2'
585 Out[2]: '2'
590 586
591 587 In [3]: f.format('23 * 76 is $result', result=23*76)
592 Out[3]: u'23 * 76 is 1748'
588 Out[3]: '23 * 76 is 1748'
593 589
594 590 In [4]: f.format('$a or {b}', a=1, b=2)
595 Out[4]: u'1 or 2'
591 Out[4]: '1 or 2'
596 592 """
597 593 _dollar_pattern = re.compile("(.*?)\$(\$?[\w\.]+)")
598 594 def parse(self, fmt_string):
599 595 for literal_txt, field_name, format_spec, conversion \
600 596 in Formatter.parse(self, fmt_string):
601 597
602 598 # Find $foo patterns in the literal text.
603 599 continue_from = 0
604 600 txt = ""
605 601 for m in self._dollar_pattern.finditer(literal_txt):
606 602 new_txt, new_field = m.group(1,2)
607 603 # $$foo --> $foo
608 604 if new_field.startswith("$"):
609 605 txt += new_txt + new_field
610 606 else:
611 607 yield (txt + new_txt, new_field, "", None)
612 608 txt = ""
613 609 continue_from = m.end()
614 610
615 611 # Re-yield the {foo} style pattern
616 612 yield (txt + literal_txt[continue_from:], field_name, format_spec, conversion)
617 613
618 614 #-----------------------------------------------------------------------------
619 615 # Utils to columnize a list of string
620 616 #-----------------------------------------------------------------------------
621 617
622 618 def _col_chunks(l, max_rows, row_first=False):
623 619 """Yield successive max_rows-sized column chunks from l."""
624 620 if row_first:
625 621 ncols = (len(l) // max_rows) + (len(l) % max_rows > 0)
626 622 for i in py3compat.xrange(ncols):
627 623 yield [l[j] for j in py3compat.xrange(i, len(l), ncols)]
628 624 else:
629 625 for i in py3compat.xrange(0, len(l), max_rows):
630 626 yield l[i:(i + max_rows)]
631 627
632 628
633 629 def _find_optimal(rlist, row_first=False, separator_size=2, displaywidth=80):
634 630 """Calculate optimal info to columnize a list of string"""
635 631 for max_rows in range(1, len(rlist) + 1):
636 632 col_widths = list(map(max, _col_chunks(rlist, max_rows, row_first)))
637 633 sumlength = sum(col_widths)
638 634 ncols = len(col_widths)
639 635 if sumlength + separator_size * (ncols - 1) <= displaywidth:
640 636 break
641 637 return {'num_columns': ncols,
642 638 'optimal_separator_width': (displaywidth - sumlength) / (ncols - 1) if (ncols - 1) else 0,
643 639 'max_rows': max_rows,
644 640 'column_widths': col_widths
645 641 }
646 642
647 643
648 644 def _get_or_default(mylist, i, default=None):
649 645 """return list item number, or default if don't exist"""
650 646 if i >= len(mylist):
651 647 return default
652 648 else :
653 649 return mylist[i]
654 650
655 651
656 652 def compute_item_matrix(items, row_first=False, empty=None, *args, **kwargs) :
657 653 """Returns a nested list, and info to columnize items
658 654
659 655 Parameters
660 656 ----------
661 657
662 658 items
663 659 list of strings to columize
664 660 row_first : (default False)
665 661 Whether to compute columns for a row-first matrix instead of
666 662 column-first (default).
667 663 empty : (default None)
668 664 default value to fill list if needed
669 665 separator_size : int (default=2)
670 666 How much caracters will be used as a separation between each columns.
671 667 displaywidth : int (default=80)
672 668 The width of the area onto wich the columns should enter
673 669
674 670 Returns
675 671 -------
676 672
677 673 strings_matrix
678 674
679 675 nested list of string, the outer most list contains as many list as
680 676 rows, the innermost lists have each as many element as colums. If the
681 677 total number of elements in `items` does not equal the product of
682 678 rows*columns, the last element of some lists are filled with `None`.
683 679
684 680 dict_info
685 681 some info to make columnize easier:
686 682
687 683 num_columns
688 684 number of columns
689 685 max_rows
690 686 maximum number of rows (final number may be less)
691 687 column_widths
692 688 list of with of each columns
693 689 optimal_separator_width
694 690 best separator width between columns
695 691
696 692 Examples
697 693 --------
698 694 ::
699 695
700 696 In [1]: l = ['aaa','b','cc','d','eeeee','f','g','h','i','j','k','l']
701 697 ...: compute_item_matrix(l, displaywidth=12)
702 698 Out[1]:
703 699 ([['aaa', 'f', 'k'],
704 700 ['b', 'g', 'l'],
705 701 ['cc', 'h', None],
706 702 ['d', 'i', None],
707 703 ['eeeee', 'j', None]],
708 704 {'num_columns': 3,
709 705 'column_widths': [5, 1, 1],
710 706 'optimal_separator_width': 2,
711 707 'max_rows': 5})
712 708 """
713 709 info = _find_optimal(list(map(len, items)), row_first, *args, **kwargs)
714 710 nrow, ncol = info['max_rows'], info['num_columns']
715 711 if row_first:
716 712 return ([[_get_or_default(items, r * ncol + c, default=empty) for c in range(ncol)] for r in range(nrow)], info)
717 713 else:
718 714 return ([[_get_or_default(items, c * nrow + r, default=empty) for c in range(ncol)] for r in range(nrow)], info)
719 715
720 716
721 717 def columnize(items, row_first=False, separator=' ', displaywidth=80, spread=False):
722 718 """ Transform a list of strings into a single string with columns.
723 719
724 720 Parameters
725 721 ----------
726 722 items : sequence of strings
727 723 The strings to process.
728 724
729 725 row_first : (default False)
730 726 Whether to compute columns for a row-first matrix instead of
731 727 column-first (default).
732 728
733 729 separator : str, optional [default is two spaces]
734 730 The string that separates columns.
735 731
736 732 displaywidth : int, optional [default is 80]
737 733 Width of the display in number of characters.
738 734
739 735 Returns
740 736 -------
741 737 The formatted string.
742 738 """
743 739 if not items:
744 740 return '\n'
745 741 matrix, info = compute_item_matrix(items, row_first=row_first, separator_size=len(separator), displaywidth=displaywidth)
746 742 if spread:
747 743 separator = separator.ljust(int(info['optimal_separator_width']))
748 744 fmatrix = [filter(None, x) for x in matrix]
749 745 sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['column_widths'])])
750 746 return '\n'.join(map(sjoin, fmatrix))+'\n'
751 747
752 748
753 749 def get_text_list(list_, last_sep=' and ', sep=", ", wrap_item_with=""):
754 750 """
755 751 Return a string with a natural enumeration of items
756 752
757 753 >>> get_text_list(['a', 'b', 'c', 'd'])
758 754 'a, b, c and d'
759 755 >>> get_text_list(['a', 'b', 'c'], ' or ')
760 756 'a, b or c'
761 757 >>> get_text_list(['a', 'b', 'c'], ', ')
762 758 'a, b, c'
763 759 >>> get_text_list(['a', 'b'], ' or ')
764 760 'a or b'
765 761 >>> get_text_list(['a'])
766 762 'a'
767 763 >>> get_text_list([])
768 764 ''
769 765 >>> get_text_list(['a', 'b'], wrap_item_with="`")
770 766 '`a` and `b`'
771 767 >>> get_text_list(['a', 'b', 'c', 'd'], " = ", sep=" + ")
772 768 'a + b + c = d'
773 769 """
774 770 if len(list_) == 0:
775 771 return ''
776 772 if wrap_item_with:
777 773 list_ = ['%s%s%s' % (wrap_item_with, item, wrap_item_with) for
778 774 item in list_]
779 775 if len(list_) == 1:
780 776 return list_[0]
781 777 return '%s%s%s' % (
782 778 sep.join(i for i in list_[:-1]),
783 779 last_sep, list_[-1])
This diff has been collapsed as it changes many lines, (593 lines changed) Show them Hide them
@@ -1,9 +1,594 b''
1 """Load our patched versions of tokenize.
1 """Patched version of standard library tokenize, to deal with various bugs.
2
3 Based on Python 3.2 code.
4
5 Patches:
6
7 - Gareth Rees' patch for Python issue #12691 (untokenizing)
8 - Except we don't encode the output of untokenize
9 - Python 2 compatible syntax, so that it can be byte-compiled at installation
10 - Newlines in comments and blank lines should be either NL or NEWLINE, depending
11 on whether they are in a multi-line statement. Filed as Python issue #17061.
12 - Export generate_tokens & TokenError
13 - u and rb literals are allowed under Python 3.3 and above.
14
15 ------------------------------------------------------------------------------
16 Tokenization help for Python programs.
17
18 tokenize(readline) is a generator that breaks a stream of bytes into
19 Python tokens. It decodes the bytes according to PEP-0263 for
20 determining source file encoding.
21
22 It accepts a readline-like method which is called repeatedly to get the
23 next line of input (or b"" for EOF). It generates 5-tuples with these
24 members:
25
26 the token type (see token.py)
27 the token (a string)
28 the starting (row, column) indices of the token (a 2-tuple of ints)
29 the ending (row, column) indices of the token (a 2-tuple of ints)
30 the original line (string)
31
32 It is designed to match the working of the Python tokenizer exactly, except
33 that it produces COMMENT tokens for comments and gives type OP for all
34 operators. Additionally, all token lists start with an ENCODING token
35 which tells you which encoding was used to decode the bytes stream.
2 36 """
3 37
38 __author__ = 'Ka-Ping Yee <ping@lfw.org>'
39 __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
40 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
41 'Michael Foord')
42 import builtins
43 import re
4 44 import sys
45 from token import *
46 from codecs import lookup, BOM_UTF8
47 import collections
48 from io import TextIOWrapper
49 cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
50
51 import token
52 __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
53 "NL", "untokenize", "ENCODING", "TokenInfo"]
54 del token
55
56 __all__ += ["generate_tokens", "TokenError"]
5 57
6 if sys.version_info[0] >= 3:
7 from ._tokenize_py3 import *
58 COMMENT = N_TOKENS
59 tok_name[COMMENT] = 'COMMENT'
60 NL = N_TOKENS + 1
61 tok_name[NL] = 'NL'
62 ENCODING = N_TOKENS + 2
63 tok_name[ENCODING] = 'ENCODING'
64 N_TOKENS += 3
65
66 class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
67 def __repr__(self):
68 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
69 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
70 self._replace(type=annotated_type))
71
72 def group(*choices): return '(' + '|'.join(choices) + ')'
73 def any(*choices): return group(*choices) + '*'
74 def maybe(*choices): return group(*choices) + '?'
75
76 # Note: we use unicode matching for names ("\w") but ascii matching for
77 # number literals.
78 Whitespace = r'[ \f\t]*'
79 Comment = r'#[^\r\n]*'
80 Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
81 Name = r'\w+'
82
83 Hexnumber = r'0[xX][0-9a-fA-F]+'
84 Binnumber = r'0[bB][01]+'
85 Octnumber = r'0[oO][0-7]+'
86 Decnumber = r'(?:0+|[1-9][0-9]*)'
87 Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
88 Exponent = r'[eE][-+]?[0-9]+'
89 Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
90 Expfloat = r'[0-9]+' + Exponent
91 Floatnumber = group(Pointfloat, Expfloat)
92 Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
93 Number = group(Imagnumber, Floatnumber, Intnumber)
94
95 if sys.version_info.minor >= 3:
96 StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
8 97 else:
9 from ._tokenize_py2 import *
98 StringPrefix = r'(?:[bB]?[rR]?)?'
99
100 # Tail end of ' string.
101 Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
102 # Tail end of " string.
103 Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
104 # Tail end of ''' string.
105 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
106 # Tail end of """ string.
107 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
108 Triple = group(StringPrefix + "'''", StringPrefix + '"""')
109 # Single-line ' or " string.
110 String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
111 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
112
113 # Because of leftmost-then-longest match semantics, be sure to put the
114 # longest operators first (e.g., if = came before ==, == would get
115 # recognized as two instances of =).
116 Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
117 r"//=?", r"->",
118 r"[+\-*/%&|^=<>]=?",
119 r"~")
120
121 Bracket = '[][(){}]'
122 Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
123 Funny = group(Operator, Bracket, Special)
124
125 PlainToken = group(Number, Funny, String, Name)
126 Token = Ignore + PlainToken
127
128 # First (or only) line of ' or " string.
129 ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
130 group("'", r'\\\r?\n'),
131 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
132 group('"', r'\\\r?\n'))
133 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
134 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
135
136 def _compile(expr):
137 return re.compile(expr, re.UNICODE)
138
139 tokenprog, pseudoprog, single3prog, double3prog = map(
140 _compile, (Token, PseudoToken, Single3, Double3))
141 endprogs = {"'": _compile(Single), '"': _compile(Double),
142 "'''": single3prog, '"""': double3prog,
143 "r'''": single3prog, 'r"""': double3prog,
144 "b'''": single3prog, 'b"""': double3prog,
145 "R'''": single3prog, 'R"""': double3prog,
146 "B'''": single3prog, 'B"""': double3prog,
147 "br'''": single3prog, 'br"""': double3prog,
148 "bR'''": single3prog, 'bR"""': double3prog,
149 "Br'''": single3prog, 'Br"""': double3prog,
150 "BR'''": single3prog, 'BR"""': double3prog,
151 'r': None, 'R': None, 'b': None, 'B': None}
152
153 triple_quoted = {}
154 for t in ("'''", '"""',
155 "r'''", 'r"""', "R'''", 'R"""',
156 "b'''", 'b"""', "B'''", 'B"""',
157 "br'''", 'br"""', "Br'''", 'Br"""',
158 "bR'''", 'bR"""', "BR'''", 'BR"""'):
159 triple_quoted[t] = t
160 single_quoted = {}
161 for t in ("'", '"',
162 "r'", 'r"', "R'", 'R"',
163 "b'", 'b"', "B'", 'B"',
164 "br'", 'br"', "Br'", 'Br"',
165 "bR'", 'bR"', "BR'", 'BR"' ):
166 single_quoted[t] = t
167
168 if sys.version_info.minor >= 3:
169 # Python 3.3
170 for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']:
171 _t2 = _prefix+'"""'
172 endprogs[_t2] = double3prog
173 triple_quoted[_t2] = _t2
174 _t1 = _prefix + "'''"
175 endprogs[_t1] = single3prog
176 triple_quoted[_t1] = _t1
177 single_quoted[_prefix+'"'] = _prefix+'"'
178 single_quoted[_prefix+"'"] = _prefix+"'"
179 del _prefix, _t2, _t1
180 endprogs['u'] = None
181 endprogs['U'] = None
182
183 del _compile
184
185 tabsize = 8
186
187 class TokenError(Exception): pass
188
189 class StopTokenizing(Exception): pass
190
191
192 class Untokenizer:
193
194 def __init__(self):
195 self.tokens = []
196 self.prev_row = 1
197 self.prev_col = 0
198 self.encoding = 'utf-8'
199
200 def add_whitespace(self, tok_type, start):
201 row, col = start
202 assert row >= self.prev_row
203 col_offset = col - self.prev_col
204 if col_offset > 0:
205 self.tokens.append(" " * col_offset)
206 elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
207 # Line was backslash-continued.
208 self.tokens.append(" ")
209
210 def untokenize(self, tokens):
211 iterable = iter(tokens)
212 for t in iterable:
213 if len(t) == 2:
214 self.compat(t, iterable)
215 break
216 tok_type, token, start, end = t[:4]
217 if tok_type == ENCODING:
218 self.encoding = token
219 continue
220 self.add_whitespace(tok_type, start)
221 self.tokens.append(token)
222 self.prev_row, self.prev_col = end
223 if tok_type in (NEWLINE, NL):
224 self.prev_row += 1
225 self.prev_col = 0
226 return "".join(self.tokens)
227
228 def compat(self, token, iterable):
229 # This import is here to avoid problems when the itertools
230 # module is not built yet and tokenize is imported.
231 from itertools import chain
232 startline = False
233 prevstring = False
234 indents = []
235 toks_append = self.tokens.append
236
237 for tok in chain([token], iterable):
238 toknum, tokval = tok[:2]
239 if toknum == ENCODING:
240 self.encoding = tokval
241 continue
242
243 if toknum in (NAME, NUMBER):
244 tokval += ' '
245
246 # Insert a space between two consecutive strings
247 if toknum == STRING:
248 if prevstring:
249 tokval = ' ' + tokval
250 prevstring = True
251 else:
252 prevstring = False
253
254 if toknum == INDENT:
255 indents.append(tokval)
256 continue
257 elif toknum == DEDENT:
258 indents.pop()
259 continue
260 elif toknum in (NEWLINE, NL):
261 startline = True
262 elif startline and indents:
263 toks_append(indents[-1])
264 startline = False
265 toks_append(tokval)
266
267
268 def untokenize(tokens):
269 """
270 Convert ``tokens`` (an iterable) back into Python source code. Return
271 a bytes object, encoded using the encoding specified by the last
272 ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found.
273
274 The result is guaranteed to tokenize back to match the input so that
275 the conversion is lossless and round-trips are assured. The
276 guarantee applies only to the token type and token string as the
277 spacing between tokens (column positions) may change.
278
279 :func:`untokenize` has two modes. If the input tokens are sequences
280 of length 2 (``type``, ``string``) then spaces are added as necessary to
281 preserve the round-trip property.
282
283 If the input tokens are sequences of length 4 or more (``type``,
284 ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then
285 spaces are added so that each token appears in the result at the
286 position indicated by ``start`` and ``end``, if possible.
287 """
288 return Untokenizer().untokenize(tokens)
289
290
291 def _get_normal_name(orig_enc):
292 """Imitates get_normal_name in tokenizer.c."""
293 # Only care about the first 12 characters.
294 enc = orig_enc[:12].lower().replace("_", "-")
295 if enc == "utf-8" or enc.startswith("utf-8-"):
296 return "utf-8"
297 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
298 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
299 return "iso-8859-1"
300 return orig_enc
301
302 def detect_encoding(readline):
303 """
304 The detect_encoding() function is used to detect the encoding that should
305 be used to decode a Python source file. It requires one argment, readline,
306 in the same way as the tokenize() generator.
307
308 It will call readline a maximum of twice, and return the encoding used
309 (as a string) and a list of any lines (left as bytes) it has read in.
310
311 It detects the encoding from the presence of a utf-8 bom or an encoding
312 cookie as specified in pep-0263. If both a bom and a cookie are present,
313 but disagree, a SyntaxError will be raised. If the encoding cookie is an
314 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
315 'utf-8-sig' is returned.
316
317 If no encoding is specified, then the default of 'utf-8' will be returned.
318 """
319 bom_found = False
320 encoding = None
321 default = 'utf-8'
322 def read_or_stop():
323 try:
324 return readline()
325 except StopIteration:
326 return b''
327
328 def find_cookie(line):
329 try:
330 # Decode as UTF-8. Either the line is an encoding declaration,
331 # in which case it should be pure ASCII, or it must be UTF-8
332 # per default encoding.
333 line_string = line.decode('utf-8')
334 except UnicodeDecodeError:
335 raise SyntaxError("invalid or missing encoding declaration")
336
337 matches = cookie_re.findall(line_string)
338 if not matches:
339 return None
340 encoding = _get_normal_name(matches[0])
341 try:
342 codec = lookup(encoding)
343 except LookupError:
344 # This behaviour mimics the Python interpreter
345 raise SyntaxError("unknown encoding: " + encoding)
346
347 if bom_found:
348 if encoding != 'utf-8':
349 # This behaviour mimics the Python interpreter
350 raise SyntaxError('encoding problem: utf-8')
351 encoding += '-sig'
352 return encoding
353
354 first = read_or_stop()
355 if first.startswith(BOM_UTF8):
356 bom_found = True
357 first = first[3:]
358 default = 'utf-8-sig'
359 if not first:
360 return default, []
361
362 encoding = find_cookie(first)
363 if encoding:
364 return encoding, [first]
365
366 second = read_or_stop()
367 if not second:
368 return default, [first]
369
370 encoding = find_cookie(second)
371 if encoding:
372 return encoding, [first, second]
373
374 return default, [first, second]
375
376
377 def open(filename):
378 """Open a file in read only mode using the encoding detected by
379 detect_encoding().
380 """
381 buffer = builtins.open(filename, 'rb')
382 encoding, lines = detect_encoding(buffer.readline)
383 buffer.seek(0)
384 text = TextIOWrapper(buffer, encoding, line_buffering=True)
385 text.mode = 'r'
386 return text
387
388
389 def tokenize(readline):
390 """
391 The tokenize() generator requires one argment, readline, which
392 must be a callable object which provides the same interface as the
393 readline() method of built-in file objects. Each call to the function
394 should return one line of input as bytes. Alternately, readline
395 can be a callable function terminating with StopIteration:
396 readline = open(myfile, 'rb').__next__ # Example of alternate readline
397
398 The generator produces 5-tuples with these members: the token type; the
399 token string; a 2-tuple (srow, scol) of ints specifying the row and
400 column where the token begins in the source; a 2-tuple (erow, ecol) of
401 ints specifying the row and column where the token ends in the source;
402 and the line on which the token was found. The line passed is the
403 logical line; continuation lines are included.
404
405 The first token sequence will always be an ENCODING token
406 which tells you which encoding was used to decode the bytes stream.
407 """
408 # This import is here to avoid problems when the itertools module is not
409 # built yet and tokenize is imported.
410 from itertools import chain, repeat
411 encoding, consumed = detect_encoding(readline)
412 rl_gen = iter(readline, b"")
413 empty = repeat(b"")
414 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
415
416
417 def _tokenize(readline, encoding):
418 lnum = parenlev = continued = 0
419 numchars = '0123456789'
420 contstr, needcont = '', 0
421 contline = None
422 indents = [0]
423
424 if encoding is not None:
425 if encoding == "utf-8-sig":
426 # BOM will already have been stripped.
427 encoding = "utf-8"
428 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
429 while True: # loop over lines in stream
430 try:
431 line = readline()
432 except StopIteration:
433 line = b''
434
435 if encoding is not None:
436 line = line.decode(encoding)
437 lnum += 1
438 pos, max = 0, len(line)
439
440 if contstr: # continued string
441 if not line:
442 raise TokenError("EOF in multi-line string", strstart)
443 endmatch = endprog.match(line)
444 if endmatch:
445 pos = end = endmatch.end(0)
446 yield TokenInfo(STRING, contstr + line[:end],
447 strstart, (lnum, end), contline + line)
448 contstr, needcont = '', 0
449 contline = None
450 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
451 yield TokenInfo(ERRORTOKEN, contstr + line,
452 strstart, (lnum, len(line)), contline)
453 contstr = ''
454 contline = None
455 continue
456 else:
457 contstr = contstr + line
458 contline = contline + line
459 continue
460
461 elif parenlev == 0 and not continued: # new statement
462 if not line: break
463 column = 0
464 while pos < max: # measure leading whitespace
465 if line[pos] == ' ':
466 column += 1
467 elif line[pos] == '\t':
468 column = (column//tabsize + 1)*tabsize
469 elif line[pos] == '\f':
470 column = 0
471 else:
472 break
473 pos += 1
474 if pos == max:
475 break
476
477 if line[pos] in '#\r\n': # skip comments or blank lines
478 if line[pos] == '#':
479 comment_token = line[pos:].rstrip('\r\n')
480 nl_pos = pos + len(comment_token)
481 yield TokenInfo(COMMENT, comment_token,
482 (lnum, pos), (lnum, pos + len(comment_token)), line)
483 yield TokenInfo(NEWLINE, line[nl_pos:],
484 (lnum, nl_pos), (lnum, len(line)), line)
485 else:
486 yield TokenInfo(NEWLINE, line[pos:],
487 (lnum, pos), (lnum, len(line)), line)
488 continue
489
490 if column > indents[-1]: # count indents or dedents
491 indents.append(column)
492 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
493 while column < indents[-1]:
494 if column not in indents:
495 raise IndentationError(
496 "unindent does not match any outer indentation level",
497 ("<tokenize>", lnum, pos, line))
498 indents = indents[:-1]
499 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
500
501 else: # continued statement
502 if not line:
503 raise TokenError("EOF in multi-line statement", (lnum, 0))
504 continued = 0
505
506 while pos < max:
507 pseudomatch = pseudoprog.match(line, pos)
508 if pseudomatch: # scan for tokens
509 start, end = pseudomatch.span(1)
510 spos, epos, pos = (lnum, start), (lnum, end), end
511 token, initial = line[start:end], line[start]
512
513 if (initial in numchars or # ordinary number
514 (initial == '.' and token != '.' and token != '...')):
515 yield TokenInfo(NUMBER, token, spos, epos, line)
516 elif initial in '\r\n':
517 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
518 token, spos, epos, line)
519 elif initial == '#':
520 assert not token.endswith("\n")
521 yield TokenInfo(COMMENT, token, spos, epos, line)
522 elif token in triple_quoted:
523 endprog = endprogs[token]
524 endmatch = endprog.match(line, pos)
525 if endmatch: # all on one line
526 pos = endmatch.end(0)
527 token = line[start:pos]
528 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
529 else:
530 strstart = (lnum, start) # multiple lines
531 contstr = line[start:]
532 contline = line
533 break
534 elif initial in single_quoted or \
535 token[:2] in single_quoted or \
536 token[:3] in single_quoted:
537 if token[-1] == '\n': # continued string
538 strstart = (lnum, start)
539 endprog = (endprogs[initial] or endprogs[token[1]] or
540 endprogs[token[2]])
541 contstr, needcont = line[start:], 1
542 contline = line
543 break
544 else: # ordinary string
545 yield TokenInfo(STRING, token, spos, epos, line)
546 elif initial.isidentifier(): # ordinary name
547 yield TokenInfo(NAME, token, spos, epos, line)
548 elif initial == '\\': # continued stmt
549 continued = 1
550 else:
551 if initial in '([{':
552 parenlev += 1
553 elif initial in ')]}':
554 parenlev -= 1
555 yield TokenInfo(OP, token, spos, epos, line)
556 else:
557 yield TokenInfo(ERRORTOKEN, line[pos],
558 (lnum, pos), (lnum, pos+1), line)
559 pos += 1
560
561 for indent in indents[1:]: # pop remaining indent levels
562 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
563 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
564
565
566 # An undocumented, backwards compatible, API for all the places in the standard
567 # library that expect to be able to use tokenize with strings
568 def generate_tokens(readline):
569 return _tokenize(readline, None)
570
571 if __name__ == "__main__":
572 # Quick sanity check
573 s = b'''def parseline(self, line):
574 """Parse the line into a command name and a string containing
575 the arguments. Returns a tuple containing (command, args, line).
576 'command' and 'args' may be None if the line couldn't be parsed.
577 """
578 line = line.strip()
579 if not line:
580 return None, None, line
581 elif line[0] == '?':
582 line = 'help ' + line[1:]
583 elif line[0] == '!':
584 if hasattr(self, 'do_shell'):
585 line = 'shell ' + line[1:]
586 else:
587 return None, None, line
588 i, n = 0, len(line)
589 while i < n and line[i] in self.identchars: i = i+1
590 cmd, arg = line[:i], line[i:].strip()
591 return cmd, arg, line
592 '''
593 for tok in tokenize(iter(s.splitlines()).__next__):
594 print(tok)
@@ -1,128 +1,127 b''
1 1 """Token-related utilities"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 from __future__ import absolute_import, print_function
7 6
8 7 from collections import namedtuple
9 8 from io import StringIO
10 9 from keyword import iskeyword
11 10
12 11 from . import tokenize2
13 12 from .py3compat import cast_unicode_py2
14 13
15 14 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
16 15
17 16 def generate_tokens(readline):
18 17 """wrap generate_tokens to catch EOF errors"""
19 18 try:
20 19 for token in tokenize2.generate_tokens(readline):
21 20 yield token
22 21 except tokenize2.TokenError:
23 22 # catch EOF error
24 23 return
25 24
26 25 def line_at_cursor(cell, cursor_pos=0):
27 26 """Return the line in a cell at a given cursor position
28 27
29 28 Used for calling line-based APIs that don't support multi-line input, yet.
30 29
31 30 Parameters
32 31 ----------
33 32
34 33 cell: str
35 34 multiline block of text
36 35 cursor_pos: integer
37 36 the cursor position
38 37
39 38 Returns
40 39 -------
41 40
42 41 (line, offset): (text, integer)
43 42 The line with the current cursor, and the character offset of the start of the line.
44 43 """
45 44 offset = 0
46 45 lines = cell.splitlines(True)
47 46 for line in lines:
48 47 next_offset = offset + len(line)
49 48 if next_offset >= cursor_pos:
50 49 break
51 50 offset = next_offset
52 51 else:
53 52 line = ""
54 53 return (line, offset)
55 54
56 55 def token_at_cursor(cell, cursor_pos=0):
57 56 """Get the token at a given cursor
58 57
59 58 Used for introspection.
60 59
61 60 Function calls are prioritized, so the token for the callable will be returned
62 61 if the cursor is anywhere inside the call.
63 62
64 63 Parameters
65 64 ----------
66 65
67 66 cell : unicode
68 67 A block of Python code
69 68 cursor_pos : int
70 69 The location of the cursor in the block where the token should be found
71 70 """
72 71 cell = cast_unicode_py2(cell)
73 72 names = []
74 73 tokens = []
75 74 call_names = []
76 75
77 76 offsets = {1: 0} # lines start at 1
78 77 for tup in generate_tokens(StringIO(cell).readline):
79 78
80 79 tok = Token(*tup)
81 80
82 81 # token, text, start, end, line = tup
83 82 start_line, start_col = tok.start
84 83 end_line, end_col = tok.end
85 84 if end_line + 1 not in offsets:
86 85 # keep track of offsets for each line
87 86 lines = tok.line.splitlines(True)
88 87 for lineno, line in zip(range(start_line + 1, end_line + 2), lines):
89 88 if lineno not in offsets:
90 89 offsets[lineno] = offsets[lineno-1] + len(line)
91 90
92 91 offset = offsets[start_line]
93 92 # allow '|foo' to find 'foo' at the beginning of a line
94 93 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
95 94 if offset + start_col >= boundary:
96 95 # current token starts after the cursor,
97 96 # don't consume it
98 97 break
99 98
100 99 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
101 100 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
102 101 names[-1] = "%s.%s" % (names[-1], tok.text)
103 102 else:
104 103 names.append(tok.text)
105 104 elif tok.token == tokenize2.OP:
106 105 if tok.text == '=' and names:
107 106 # don't inspect the lhs of an assignment
108 107 names.pop(-1)
109 108 if tok.text == '(' and names:
110 109 # if we are inside a function call, inspect the function
111 110 call_names.append(names[-1])
112 111 elif tok.text == ')' and call_names:
113 112 call_names.pop(-1)
114 113
115 114 tokens.append(tok)
116 115
117 116 if offsets[end_line] + end_col > cursor_pos:
118 117 # we found the cursor, stop reading
119 118 break
120 119
121 120 if call_names:
122 121 return call_names[-1]
123 122 elif names:
124 123 return names[-1]
125 124 else:
126 125 return ''
127 126
128 127
@@ -1,7 +1,6 b''
1 from __future__ import absolute_import
2 1
3 2 from warnings import warn
4 3
5 4 warn("IPython.utils.traitlets has moved to a top-level traitlets package.")
6 5
7 6 from traitlets import *
@@ -1,456 +1,455 b''
1 1 """Attempt to generate templates for module reference with Sphinx
2 2
3 3 XXX - we exclude extension modules
4 4
5 5 To include extension modules, first identify them as valid in the
6 6 ``_uri2path`` method, then handle them in the ``_parse_module`` script.
7 7
8 8 We get functions and classes by parsing the text of .py files.
9 9 Alternatively we could import the modules for discovery, and we'd have
10 10 to do that for extension modules. This would involve changing the
11 11 ``_parse_module`` method to work via import and introspection, and
12 12 might involve changing ``discover_modules`` (which determines which
13 13 files are modules, and therefore which module URIs will be passed to
14 14 ``_parse_module``).
15 15
16 16 NOTE: this is a modified version of a script originally shipped with the
17 17 PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
18 18 project."""
19 19
20 from __future__ import print_function
21 20
22 21 # Stdlib imports
23 22 import ast
24 23 import inspect
25 24 import os
26 25 import re
27 26 from importlib import import_module
28 27
29 28
30 29 class Obj(object):
31 30 '''Namespace to hold arbitrary information.'''
32 31 def __init__(self, **kwargs):
33 32 for k, v in kwargs.items():
34 33 setattr(self, k, v)
35 34
36 35 class FuncClsScanner(ast.NodeVisitor):
37 36 """Scan a module for top-level functions and classes.
38 37
39 38 Skips objects with an @undoc decorator, or a name starting with '_'.
40 39 """
41 40 def __init__(self):
42 41 ast.NodeVisitor.__init__(self)
43 42 self.classes = []
44 43 self.classes_seen = set()
45 44 self.functions = []
46 45
47 46 @staticmethod
48 47 def has_undoc_decorator(node):
49 48 return any(isinstance(d, ast.Name) and d.id == 'undoc' \
50 49 for d in node.decorator_list)
51 50
52 51 def visit_If(self, node):
53 52 if isinstance(node.test, ast.Compare) \
54 53 and isinstance(node.test.left, ast.Name) \
55 54 and node.test.left.id == '__name__':
56 55 return # Ignore classes defined in "if __name__ == '__main__':"
57 56
58 57 self.generic_visit(node)
59 58
60 59 def visit_FunctionDef(self, node):
61 60 if not (node.name.startswith('_') or self.has_undoc_decorator(node)) \
62 61 and node.name not in self.functions:
63 62 self.functions.append(node.name)
64 63
65 64 def visit_ClassDef(self, node):
66 65 if not (node.name.startswith('_') or self.has_undoc_decorator(node)) \
67 66 and node.name not in self.classes_seen:
68 67 cls = Obj(name=node.name)
69 68 cls.has_init = any(isinstance(n, ast.FunctionDef) and \
70 69 n.name=='__init__' for n in node.body)
71 70 self.classes.append(cls)
72 71 self.classes_seen.add(node.name)
73 72
74 73 def scan(self, mod):
75 74 self.visit(mod)
76 75 return self.functions, self.classes
77 76
78 77 # Functions and classes
79 78 class ApiDocWriter(object):
80 79 ''' Class for automatic detection and parsing of API docs
81 80 to Sphinx-parsable reST format'''
82 81
83 82 # only separating first two levels
84 83 rst_section_levels = ['*', '=', '-', '~', '^']
85 84
86 85 def __init__(self,
87 86 package_name,
88 87 rst_extension='.rst',
89 88 package_skip_patterns=None,
90 89 module_skip_patterns=None,
91 90 names_from__all__=None,
92 91 ):
93 92 ''' Initialize package for parsing
94 93
95 94 Parameters
96 95 ----------
97 96 package_name : string
98 97 Name of the top-level package. *package_name* must be the
99 98 name of an importable package
100 99 rst_extension : string, optional
101 100 Extension for reST files, default '.rst'
102 101 package_skip_patterns : None or sequence of {strings, regexps}
103 102 Sequence of strings giving URIs of packages to be excluded
104 103 Operates on the package path, starting at (including) the
105 104 first dot in the package path, after *package_name* - so,
106 105 if *package_name* is ``sphinx``, then ``sphinx.util`` will
107 106 result in ``.util`` being passed for earching by these
108 107 regexps. If is None, gives default. Default is:
109 108 ['\.tests$']
110 109 module_skip_patterns : None or sequence
111 110 Sequence of strings giving URIs of modules to be excluded
112 111 Operates on the module name including preceding URI path,
113 112 back to the first dot after *package_name*. For example
114 113 ``sphinx.util.console`` results in the string to search of
115 114 ``.util.console``
116 115 If is None, gives default. Default is:
117 116 ['\.setup$', '\._']
118 117 names_from__all__ : set, optional
119 118 Modules listed in here will be scanned by doing ``from mod import *``,
120 119 rather than finding function and class definitions by scanning the
121 120 AST. This is intended for API modules which expose things defined in
122 121 other files. Modules listed here must define ``__all__`` to avoid
123 122 exposing everything they import.
124 123 '''
125 124 if package_skip_patterns is None:
126 125 package_skip_patterns = ['\\.tests$']
127 126 if module_skip_patterns is None:
128 127 module_skip_patterns = ['\\.setup$', '\\._']
129 128 self.package_name = package_name
130 129 self.rst_extension = rst_extension
131 130 self.package_skip_patterns = package_skip_patterns
132 131 self.module_skip_patterns = module_skip_patterns
133 132 self.names_from__all__ = names_from__all__ or set()
134 133
135 134 def get_package_name(self):
136 135 return self._package_name
137 136
138 137 def set_package_name(self, package_name):
139 138 ''' Set package_name
140 139
141 140 >>> docwriter = ApiDocWriter('sphinx')
142 141 >>> import sphinx
143 142 >>> docwriter.root_path == sphinx.__path__[0]
144 143 True
145 144 >>> docwriter.package_name = 'docutils'
146 145 >>> import docutils
147 146 >>> docwriter.root_path == docutils.__path__[0]
148 147 True
149 148 '''
150 149 # It's also possible to imagine caching the module parsing here
151 150 self._package_name = package_name
152 151 self.root_module = import_module(package_name)
153 152 self.root_path = self.root_module.__path__[0]
154 153 self.written_modules = None
155 154
156 155 package_name = property(get_package_name, set_package_name, None,
157 156 'get/set package_name')
158 157
159 158 def _uri2path(self, uri):
160 159 ''' Convert uri to absolute filepath
161 160
162 161 Parameters
163 162 ----------
164 163 uri : string
165 164 URI of python module to return path for
166 165
167 166 Returns
168 167 -------
169 168 path : None or string
170 169 Returns None if there is no valid path for this URI
171 170 Otherwise returns absolute file system path for URI
172 171
173 172 Examples
174 173 --------
175 174 >>> docwriter = ApiDocWriter('sphinx')
176 175 >>> import sphinx
177 176 >>> modpath = sphinx.__path__[0]
178 177 >>> res = docwriter._uri2path('sphinx.builder')
179 178 >>> res == os.path.join(modpath, 'builder.py')
180 179 True
181 180 >>> res = docwriter._uri2path('sphinx')
182 181 >>> res == os.path.join(modpath, '__init__.py')
183 182 True
184 183 >>> docwriter._uri2path('sphinx.does_not_exist')
185 184
186 185 '''
187 186 if uri == self.package_name:
188 187 return os.path.join(self.root_path, '__init__.py')
189 188 path = uri.replace('.', os.path.sep)
190 189 path = path.replace(self.package_name + os.path.sep, '')
191 190 path = os.path.join(self.root_path, path)
192 191 # XXX maybe check for extensions as well?
193 192 if os.path.exists(path + '.py'): # file
194 193 path += '.py'
195 194 elif os.path.exists(os.path.join(path, '__init__.py')):
196 195 path = os.path.join(path, '__init__.py')
197 196 else:
198 197 return None
199 198 return path
200 199
201 200 def _path2uri(self, dirpath):
202 201 ''' Convert directory path to uri '''
203 202 relpath = dirpath.replace(self.root_path, self.package_name)
204 203 if relpath.startswith(os.path.sep):
205 204 relpath = relpath[1:]
206 205 return relpath.replace(os.path.sep, '.')
207 206
208 207 def _parse_module(self, uri):
209 208 ''' Parse module defined in *uri* '''
210 209 filename = self._uri2path(uri)
211 210 if filename is None:
212 211 # nothing that we could handle here.
213 212 return ([],[])
214 213 with open(filename, 'rb') as f:
215 214 mod = ast.parse(f.read())
216 215 return FuncClsScanner().scan(mod)
217 216
218 217 def _import_funcs_classes(self, uri):
219 218 """Import * from uri, and separate out functions and classes."""
220 219 ns = {}
221 220 exec('from %s import *' % uri, ns)
222 221 funcs, classes = [], []
223 222 for name, obj in ns.items():
224 223 if inspect.isclass(obj):
225 224 cls = Obj(name=name, has_init='__init__' in obj.__dict__)
226 225 classes.append(cls)
227 226 elif inspect.isfunction(obj):
228 227 funcs.append(name)
229 228
230 229 return sorted(funcs), sorted(classes, key=lambda x: x.name)
231 230
232 231 def find_funcs_classes(self, uri):
233 232 """Find the functions and classes defined in the module ``uri``"""
234 233 if uri in self.names_from__all__:
235 234 # For API modules which expose things defined elsewhere, import them
236 235 return self._import_funcs_classes(uri)
237 236 else:
238 237 # For other modules, scan their AST to see what they define
239 238 return self._parse_module(uri)
240 239
241 240 def generate_api_doc(self, uri):
242 241 '''Make autodoc documentation template string for a module
243 242
244 243 Parameters
245 244 ----------
246 245 uri : string
247 246 python location of module - e.g 'sphinx.builder'
248 247
249 248 Returns
250 249 -------
251 250 S : string
252 251 Contents of API doc
253 252 '''
254 253 # get the names of all classes and functions
255 254 functions, classes = self.find_funcs_classes(uri)
256 255 if not len(functions) and not len(classes):
257 256 #print ('WARNING: Empty -', uri) # dbg
258 257 return ''
259 258
260 259 # Make a shorter version of the uri that omits the package name for
261 260 # titles
262 261 uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
263 262
264 263 ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
265 264
266 265 # Set the chapter title to read 'Module:' for all modules except for the
267 266 # main packages
268 267 if '.' in uri:
269 268 chap_title = 'Module: :mod:`' + uri_short + '`'
270 269 else:
271 270 chap_title = ':mod:`' + uri_short + '`'
272 271 ad += chap_title + '\n' + self.rst_section_levels[1] * len(chap_title)
273 272
274 273 ad += '\n.. automodule:: ' + uri + '\n'
275 274 ad += '\n.. currentmodule:: ' + uri + '\n'
276 275
277 276 if classes:
278 277 subhead = str(len(classes)) + (' Classes' if len(classes) > 1 else ' Class')
279 278 ad += '\n'+ subhead + '\n' + \
280 279 self.rst_section_levels[2] * len(subhead) + '\n'
281 280
282 281 for c in classes:
283 282 ad += '\n.. autoclass:: ' + c.name + '\n'
284 283 # must NOT exclude from index to keep cross-refs working
285 284 ad += ' :members:\n' \
286 285 ' :show-inheritance:\n'
287 286 if c.has_init:
288 287 ad += '\n .. automethod:: __init__\n'
289 288
290 289 if functions:
291 290 subhead = str(len(functions)) + (' Functions' if len(functions) > 1 else ' Function')
292 291 ad += '\n'+ subhead + '\n' + \
293 292 self.rst_section_levels[2] * len(subhead) + '\n'
294 293 for f in functions:
295 294 # must NOT exclude from index to keep cross-refs working
296 295 ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
297 296 return ad
298 297
299 298 def _survives_exclude(self, matchstr, match_type):
300 299 ''' Returns True if *matchstr* does not match patterns
301 300
302 301 ``self.package_name`` removed from front of string if present
303 302
304 303 Examples
305 304 --------
306 305 >>> dw = ApiDocWriter('sphinx')
307 306 >>> dw._survives_exclude('sphinx.okpkg', 'package')
308 307 True
309 308 >>> dw.package_skip_patterns.append('^\\.badpkg$')
310 309 >>> dw._survives_exclude('sphinx.badpkg', 'package')
311 310 False
312 311 >>> dw._survives_exclude('sphinx.badpkg', 'module')
313 312 True
314 313 >>> dw._survives_exclude('sphinx.badmod', 'module')
315 314 True
316 315 >>> dw.module_skip_patterns.append('^\\.badmod$')
317 316 >>> dw._survives_exclude('sphinx.badmod', 'module')
318 317 False
319 318 '''
320 319 if match_type == 'module':
321 320 patterns = self.module_skip_patterns
322 321 elif match_type == 'package':
323 322 patterns = self.package_skip_patterns
324 323 else:
325 324 raise ValueError('Cannot interpret match type "%s"'
326 325 % match_type)
327 326 # Match to URI without package name
328 327 L = len(self.package_name)
329 328 if matchstr[:L] == self.package_name:
330 329 matchstr = matchstr[L:]
331 330 for pat in patterns:
332 331 try:
333 332 pat.search
334 333 except AttributeError:
335 334 pat = re.compile(pat)
336 335 if pat.search(matchstr):
337 336 return False
338 337 return True
339 338
340 339 def discover_modules(self):
341 340 ''' Return module sequence discovered from ``self.package_name``
342 341
343 342
344 343 Parameters
345 344 ----------
346 345 None
347 346
348 347 Returns
349 348 -------
350 349 mods : sequence
351 350 Sequence of module names within ``self.package_name``
352 351
353 352 Examples
354 353 --------
355 354 >>> dw = ApiDocWriter('sphinx')
356 355 >>> mods = dw.discover_modules()
357 356 >>> 'sphinx.util' in mods
358 357 True
359 358 >>> dw.package_skip_patterns.append('\.util$')
360 359 >>> 'sphinx.util' in dw.discover_modules()
361 360 False
362 361 >>>
363 362 '''
364 363 modules = [self.package_name]
365 364 # raw directory parsing
366 365 for dirpath, dirnames, filenames in os.walk(self.root_path):
367 366 # Check directory names for packages
368 367 root_uri = self._path2uri(os.path.join(self.root_path,
369 368 dirpath))
370 369 for dirname in dirnames[:]: # copy list - we modify inplace
371 370 package_uri = '.'.join((root_uri, dirname))
372 371 if (self._uri2path(package_uri) and
373 372 self._survives_exclude(package_uri, 'package')):
374 373 modules.append(package_uri)
375 374 else:
376 375 dirnames.remove(dirname)
377 376 # Check filenames for modules
378 377 for filename in filenames:
379 378 module_name = filename[:-3]
380 379 module_uri = '.'.join((root_uri, module_name))
381 380 if (self._uri2path(module_uri) and
382 381 self._survives_exclude(module_uri, 'module')):
383 382 modules.append(module_uri)
384 383 return sorted(modules)
385 384
386 385 def write_modules_api(self, modules,outdir):
387 386 # write the list
388 387 written_modules = []
389 388 for m in modules:
390 389 api_str = self.generate_api_doc(m)
391 390 if not api_str:
392 391 continue
393 392 # write out to file
394 393 outfile = os.path.join(outdir,
395 394 m + self.rst_extension)
396 395 fileobj = open(outfile, 'wt')
397 396 fileobj.write(api_str)
398 397 fileobj.close()
399 398 written_modules.append(m)
400 399 self.written_modules = written_modules
401 400
402 401 def write_api_docs(self, outdir):
403 402 """Generate API reST files.
404 403
405 404 Parameters
406 405 ----------
407 406 outdir : string
408 407 Directory name in which to store files
409 408 We create automatic filenames for each module
410 409
411 410 Returns
412 411 -------
413 412 None
414 413
415 414 Notes
416 415 -----
417 416 Sets self.written_modules to list of written modules
418 417 """
419 418 if not os.path.exists(outdir):
420 419 os.mkdir(outdir)
421 420 # compose list of modules
422 421 modules = self.discover_modules()
423 422 self.write_modules_api(modules,outdir)
424 423
425 424 def write_index(self, outdir, path='gen.rst', relative_to=None):
426 425 """Make a reST API index file from written files
427 426
428 427 Parameters
429 428 ----------
430 429 outdir : string
431 430 Directory to which to write generated index file
432 431 path : string
433 432 Filename to write index to
434 433 relative_to : string
435 434 path to which written filenames are relative. This
436 435 component of the written file path will be removed from
437 436 outdir, in the generated index. Default is None, meaning,
438 437 leave path as it is.
439 438 """
440 439 if self.written_modules is None:
441 440 raise ValueError('No modules written')
442 441 # Get full filename path
443 442 path = os.path.join(outdir, path)
444 443 # Path written into index is relative to rootpath
445 444 if relative_to is not None:
446 445 relpath = outdir.replace(relative_to + os.path.sep, '')
447 446 else:
448 447 relpath = outdir
449 448 idx = open(path,'wt')
450 449 w = idx.write
451 450 w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
452 451 w('.. autosummary::\n'
453 452 ' :toctree: %s\n\n' % relpath)
454 453 for mod in self.written_modules:
455 454 w(' %s\n' % mod)
456 455 idx.close()
@@ -1,146 +1,145 b''
1 1 #!/usr/bin/env python
2 2 """An example of how to embed an IPython shell into a running program.
3 3
4 4 Please see the documentation in the IPython.Shell module for more details.
5 5
6 6 The accompanying file embed_class_short.py has quick code fragments for
7 7 embedding which you can cut and paste in your code once you understand how
8 8 things work.
9 9
10 10 The code in this file is deliberately extra-verbose, meant for learning."""
11 from __future__ import print_function
12 11
13 12 # The basics to get you going:
14 13
15 14 # IPython injects get_ipython into builtins, so you can know if you have nested
16 15 # copies running.
17 16
18 17 # Try running this code both at the command line and from inside IPython (with
19 18 # %run example-embed.py)
20 19
21 20 from IPython.terminal.prompts import Prompts, Token
22 21
23 22 class CustomPrompt(Prompts):
24 23
25 24 def in_prompt_tokens(self, cli=None):
26 25
27 26 return [
28 27 (Token.Prompt, 'In <'),
29 28 (Token.PromptNum, str(self.shell.execution_count)),
30 29 (Token.Prompt, '>: '),
31 30 ]
32 31
33 32 def out_prompt_tokens(self):
34 33 return [
35 34 (Token.OutPrompt, 'Out<'),
36 35 (Token.OutPromptNum, str(self.shell.execution_count)),
37 36 (Token.OutPrompt, '>: '),
38 37 ]
39 38
40 39
41 40 from traitlets.config.loader import Config
42 41 try:
43 42 get_ipython
44 43 except NameError:
45 44 nested = 0
46 45 cfg = Config()
47 46 cfg.TerminalInteractiveShell.prompts_class=CustomPrompt
48 47 else:
49 48 print("Running nested copies of IPython.")
50 49 print("The prompts for the nested copy have been modified")
51 50 cfg = Config()
52 51 nested = 1
53 52
54 53 # First import the embeddable shell class
55 54 from IPython.terminal.embed import InteractiveShellEmbed
56 55
57 56 # Now create an instance of the embeddable shell. The first argument is a
58 57 # string with options exactly as you would type them if you were starting
59 58 # IPython at the system command line. Any parameters you want to define for
60 59 # configuration can thus be specified here.
61 60 ipshell = InteractiveShellEmbed(config=cfg,
62 61 banner1 = 'Dropping into IPython',
63 62 exit_msg = 'Leaving Interpreter, back to program.')
64 63
65 64 # Make a second instance, you can have as many as you want.
66 65 ipshell2 = InteractiveShellEmbed(config=cfg,
67 66 banner1 = 'Second IPython instance.')
68 67
69 68 print('\nHello. This is printed from the main controller program.\n')
70 69
71 70 # You can then call ipshell() anywhere you need it (with an optional
72 71 # message):
73 72 ipshell('***Called from top level. '
74 73 'Hit Ctrl-D to exit interpreter and continue program.\n'
75 74 'Note that if you use %kill_embedded, you can fully deactivate\n'
76 75 'This embedded instance so it will never turn on again')
77 76
78 77 print('\nBack in caller program, moving along...\n')
79 78
80 79 #---------------------------------------------------------------------------
81 80 # More details:
82 81
83 82 # InteractiveShellEmbed instances don't print the standard system banner and
84 83 # messages. The IPython banner (which actually may contain initialization
85 84 # messages) is available as get_ipython().banner in case you want it.
86 85
87 86 # InteractiveShellEmbed instances print the following information everytime they
88 87 # start:
89 88
90 89 # - A global startup banner.
91 90
92 91 # - A call-specific header string, which you can use to indicate where in the
93 92 # execution flow the shell is starting.
94 93
95 94 # They also print an exit message every time they exit.
96 95
97 96 # Both the startup banner and the exit message default to None, and can be set
98 97 # either at the instance constructor or at any other time with the
99 98 # by setting the banner and exit_msg attributes.
100 99
101 100 # The shell instance can be also put in 'dummy' mode globally or on a per-call
102 101 # basis. This gives you fine control for debugging without having to change
103 102 # code all over the place.
104 103
105 104 # The code below illustrates all this.
106 105
107 106
108 107 # This is how the global banner and exit_msg can be reset at any point
109 108 ipshell.banner2 = 'Entering interpreter - New Banner'
110 109 ipshell.exit_msg = 'Leaving interpreter - New exit_msg'
111 110
112 111 def foo(m):
113 112 s = 'spam'
114 113 ipshell('***In foo(). Try %whos, or print s or m:')
115 114 print('foo says m = ',m)
116 115
117 116 def bar(n):
118 117 s = 'eggs'
119 118 ipshell('***In bar(). Try %whos, or print s or n:')
120 119 print('bar says n = ',n)
121 120
122 121 # Some calls to the above functions which will trigger IPython:
123 122 print('Main program calling foo("eggs")\n')
124 123 foo('eggs')
125 124
126 125 # The shell can be put in 'dummy' mode where calls to it silently return. This
127 126 # allows you, for example, to globally turn off debugging for a program with a
128 127 # single call.
129 128 ipshell.dummy_mode = True
130 129 print('\nTrying to call IPython which is now "dummy":')
131 130 ipshell()
132 131 print('Nothing happened...')
133 132 # The global 'dummy' mode can still be overridden for a single call
134 133 print('\nOverriding dummy mode manually:')
135 134 ipshell(dummy=False)
136 135
137 136 # Reactivate the IPython shell
138 137 ipshell.dummy_mode = False
139 138
140 139 print('You can even have multiple embedded instances:')
141 140 ipshell2()
142 141
143 142 print('\nMain program calling bar("spam")\n')
144 143 bar('spam')
145 144
146 145 print('Main program finished. Bye!')
@@ -1,43 +1,42 b''
1 1 # -*- coding: utf-8 -*-
2 2 """A simple interactive demo to illustrate the use of IPython's Demo class.
3 3
4 4 Any python script can be run as a demo, but that does little more than showing
5 5 it on-screen, syntax-highlighted in one shot. If you add a little simple
6 6 markup, you can stop at specified intervals and return to the ipython prompt,
7 7 resuming execution later.
8 8
9 9 This is a unicode test, åäö
10 10 """
11 from __future__ import print_function
12 11
13 12 print('Hello, welcome to an interactive IPython demo.')
14 13 print('Executing this block should require confirmation before proceeding,')
15 14 print('unless auto_all has been set to true in the demo object')
16 15
17 16 # The mark below defines a block boundary, which is a point where IPython will
18 17 # stop execution and return to the interactive prompt.
19 18 # <demo> --- stop ---
20 19
21 20 x = 1
22 21 y = 2
23 22
24 23 # <demo> --- stop ---
25 24
26 25 # the mark below makes this block as silent
27 26 # <demo> silent
28 27
29 28 print('This is a silent block, which gets executed but not printed.')
30 29
31 30 # <demo> --- stop ---
32 31 # <demo> auto
33 32 print('This is an automatic block.')
34 33 print('It is executed without asking for confirmation, but printed.')
35 34 z = x+y
36 35
37 36 print('z=',x)
38 37
39 38 # <demo> --- stop ---
40 39 # This is just another normal block.
41 40 print('z is now:', z)
42 41
43 42 print('bye!')
@@ -1,138 +1,137 b''
1 1 #!python
2 2 """Distutils post installation script for Windows.
3 3
4 4 http://docs.python.org/2/distutils/builtdist.html#the-postinstallation-script
5 5
6 6 """
7 7
8 from __future__ import print_function
9 8
10 9 import os
11 10 import sys
12 11 import shutil
13 12
14 13 try:
15 14 import setuptools
16 15 have_setuptools = True
17 16 except ImportError:
18 17 have_setuptools = False
19 18
20 19
21 20 pjoin = os.path.join
22 21
23 22 # suffix for start menu folder names
24 23 pyver = "(Py%i.%i %i bit)" % (sys.version_info[0], sys.version_info[1],
25 24 (32, 64)[sys.maxsize > 2**32])
26 25
27 26
28 27 def mkshortcut(target, description, linkdir, arguments="", iconpath='',
29 28 workdir="%HOMEDRIVE%%HOMEPATH%", iconindex=0):
30 29 """Make a shortcut if it doesn't exist and register its creation."""
31 30 filename = pjoin(linkdir, description + '.lnk')
32 31 description = "%s %s" % (description, pyver)
33 32 create_shortcut(target, description, filename, arguments, workdir,
34 33 iconpath, iconindex)
35 34 file_created(filename)
36 35
37 36
38 37 def arguments(scriptsdir, script, scriptargs=''):
39 38 """Return command line arguments to be passed to the python executable."""
40 39 cmdbase = suffix(pjoin(scriptsdir, script))
41 40 if have_setuptools:
42 41 cmdbase += '-script.py'
43 42 return '"%s" %s' % (cmdbase, scriptargs)
44 43
45 44
46 45 def suffix(s):
47 46 """Add '3' suffix to programs for Python 3."""
48 47 if sys.version_info[0] == 3:
49 48 s = s + '3'
50 49 return s
51 50
52 51
53 52 def install():
54 53 """Routine to be run by the win32 installer with the -install switch."""
55 54 # Get some system constants
56 55 python = pjoin(sys.prefix, 'python.exe')
57 56 pythonw = pjoin(sys.prefix, 'pythonw.exe')
58 57
59 58 if not have_setuptools:
60 59 # This currently doesn't work without setuptools,
61 60 # so don't bother making broken links
62 61 print("Setuptools is required to"
63 62 " create Start Menu items.", file=sys.stderr)
64 63 print("Re-run this installer after installing"
65 64 " Setuptools to get Start Menu items.", file=sys.stderr)
66 65 return
67 66
68 67 # Lookup path to common startmenu ...
69 68 ip_start_menu = pjoin(get_special_folder_path('CSIDL_COMMON_PROGRAMS'),
70 69 'IPython %s' % pyver)
71 70
72 71 # Create IPython entry ...
73 72 if not os.path.isdir(ip_start_menu):
74 73 os.mkdir(ip_start_menu)
75 74 directory_created(ip_start_menu)
76 75
77 76 # Create .py and .bat files to make things available from
78 77 # the Windows command line. Thanks to the Twisted project
79 78 # for this logic!
80 79 programs = [
81 80 'ipython',
82 81 'iptest',
83 82 ]
84 83 programs = [suffix(p) for p in programs]
85 84 scripts = pjoin(sys.prefix, 'scripts')
86 85 if not have_setuptools:
87 86 # only create .bat files if we don't have setuptools
88 87 for program in programs:
89 88 raw = pjoin(scripts, program)
90 89 bat = raw + '.bat'
91 90 py = raw + '.py'
92 91 # Create .py versions of the scripts
93 92 shutil.copy(raw, py)
94 93 # Create .bat files for each of the scripts
95 94 bat_file = file(bat, 'w')
96 95 bat_file.write("@%s %s %%*" % (python, py))
97 96 bat_file.close()
98 97
99 98 # Create Start Menu shortcuts
100 99 iconpath = pjoin(scripts, 'ipython.ico')
101 100 mkshortcut(python, 'IPython', ip_start_menu,
102 101 arguments(scripts, 'ipython'), iconpath)
103 102 mkshortcut(python, 'IPython (pylab mode)', ip_start_menu,
104 103 arguments(scripts, 'ipython', '--pylab'), iconpath)
105 104
106 105 iconpath = pjoin(scripts, 'ipython_nb.ico')
107 106 mkshortcut(python, 'IPython Notebook', ip_start_menu,
108 107 arguments(scripts, 'ipython', 'notebook'), iconpath)
109 108
110 109 mkshortcut(pythonw, 'IPython Documentation', ip_start_menu,
111 110 '-m webbrowser -t "http://ipython.org/documentation.html',
112 111 iconpath='url.dll')
113 112
114 113 # Disable pysh Start item until the profile restores functionality
115 114 # Most of this code is in IPython/deathrow, and needs to be updated
116 115 # to 0.11 APIs
117 116 #mkshortcut(python, 'IPython%s (command prompt mode)', ip_start_menu,
118 117 # arguments(scripts, 'ipython', 'profile=pysh --init'))
119 118
120 119
121 120 def remove():
122 121 """Routine to be run by the win32 installer with the -remove switch."""
123 122 pass
124 123
125 124
126 125 # main()
127 126 if len(sys.argv) > 1:
128 127 if sys.argv[1] == '-install':
129 128 try:
130 129 install()
131 130 except OSError:
132 131 print("Failed to create Start Menu items, try running the"
133 132 " installer as administrator.", file=sys.stderr)
134 133 elif sys.argv[1] == '-remove':
135 134 remove()
136 135 else:
137 136 print("Script was called with option %s" % sys.argv[1],
138 137 file=sys.stderr)
@@ -1,299 +1,297 b''
1 1 #!/usr/bin/env python
2 2 # -*- coding: utf-8 -*-
3 3 """Setup script for IPython.
4 4
5 5 Under Posix environments it works like a typical setup.py script.
6 6 Under Windows, the command sdist is not supported, since IPython
7 7 requires utilities which are not available under Windows."""
8 8
9 9 #-----------------------------------------------------------------------------
10 10 # Copyright (c) 2008-2011, IPython Development Team.
11 11 # Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
12 12 # Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
13 13 # Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
14 14 #
15 15 # Distributed under the terms of the Modified BSD License.
16 16 #
17 17 # The full license is in the file COPYING.rst, distributed with this software.
18 18 #-----------------------------------------------------------------------------
19 19
20 20 #-----------------------------------------------------------------------------
21 21 # Minimal Python version sanity check
22 22 #-----------------------------------------------------------------------------
23 23 from __future__ import print_function
24 24
25 25 import sys
26 26
27 27 # This check is also made in IPython/__init__, don't forget to update both when
28 28 # changing Python version requirements.
29 29 if sys.version_info < (3,3):
30 30 error = """
31 31 IPython 6.0+ does not support Python 2.6, 2.7, 3.0, 3.1, or 3.2.
32 32 When using Python 2.7, please install IPython 5.x LTS Long Term Support version.
33 33 Beginning with IPython 6.0, Python 3.3 and above is required.
34 34
35 35 See IPython `README.rst` file for more information:
36 36
37 37 https://github.com/ipython/ipython/blob/master/README.rst
38 38
39 39 """
40 40
41 41 print(error, file=sys.stderr)
42 42 sys.exit(1)
43 43
44 PY3 = (sys.version_info[0] >= 3)
45
46 44 # At least we're on the python version we need, move on.
47 45
48 46 #-------------------------------------------------------------------------------
49 47 # Imports
50 48 #-------------------------------------------------------------------------------
51 49
52 50 # Stdlib imports
53 51 import os
54 52
55 53 from glob import glob
56 54
57 55 # BEFORE importing distutils, remove MANIFEST. distutils doesn't properly
58 56 # update it when the contents of directories change.
59 57 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
60 58
61 59 from distutils.core import setup
62 60
63 61 # Our own imports
64 62 from setupbase import target_update
65 63
66 64 from setupbase import (
67 65 setup_args,
68 66 find_packages,
69 67 find_package_data,
70 68 check_package_data_first,
71 69 find_entry_points,
72 70 build_scripts_entrypt,
73 71 find_data_files,
74 72 git_prebuild,
75 73 install_symlinked,
76 74 install_lib_symlink,
77 75 install_scripts_for_symlink,
78 76 unsymlink,
79 77 )
80 78
81 79 isfile = os.path.isfile
82 80 pjoin = os.path.join
83 81
84 82 #-------------------------------------------------------------------------------
85 83 # Handle OS specific things
86 84 #-------------------------------------------------------------------------------
87 85
88 86 if os.name in ('nt','dos'):
89 87 os_name = 'windows'
90 88 else:
91 89 os_name = os.name
92 90
93 91 # Under Windows, 'sdist' has not been supported. Now that the docs build with
94 92 # Sphinx it might work, but let's not turn it on until someone confirms that it
95 93 # actually works.
96 94 if os_name == 'windows' and 'sdist' in sys.argv:
97 95 print('The sdist command is not available under Windows. Exiting.')
98 96 sys.exit(1)
99 97
100 98
101 99 #-------------------------------------------------------------------------------
102 100 # Things related to the IPython documentation
103 101 #-------------------------------------------------------------------------------
104 102
105 103 # update the manuals when building a source dist
106 104 if len(sys.argv) >= 2 and sys.argv[1] in ('sdist','bdist_rpm'):
107 105
108 106 # List of things to be updated. Each entry is a triplet of args for
109 107 # target_update()
110 108 to_update = [
111 109 ('docs/man/ipython.1.gz',
112 110 ['docs/man/ipython.1'],
113 111 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz'),
114 112 ]
115 113
116 114
117 115 [ target_update(*t) for t in to_update ]
118 116
119 117 #---------------------------------------------------------------------------
120 118 # Find all the packages, package data, and data_files
121 119 #---------------------------------------------------------------------------
122 120
123 121 packages = find_packages()
124 122 package_data = find_package_data()
125 123
126 124 data_files = find_data_files()
127 125
128 126 setup_args['packages'] = packages
129 127 setup_args['package_data'] = package_data
130 128 setup_args['data_files'] = data_files
131 129
132 130 #---------------------------------------------------------------------------
133 131 # custom distutils commands
134 132 #---------------------------------------------------------------------------
135 133 # imports here, so they are after setuptools import if there was one
136 134 from distutils.command.sdist import sdist
137 135 from distutils.command.upload import upload
138 136
139 137 class UploadWindowsInstallers(upload):
140 138
141 139 description = "Upload Windows installers to PyPI (only used from tools/release_windows.py)"
142 140 user_options = upload.user_options + [
143 141 ('files=', 'f', 'exe file (or glob) to upload')
144 142 ]
145 143 def initialize_options(self):
146 144 upload.initialize_options(self)
147 145 meta = self.distribution.metadata
148 146 base = '{name}-{version}'.format(
149 147 name=meta.get_name(),
150 148 version=meta.get_version()
151 149 )
152 150 self.files = os.path.join('dist', '%s.*.exe' % base)
153 151
154 152 def run(self):
155 153 for dist_file in glob(self.files):
156 154 self.upload_file('bdist_wininst', 'any', dist_file)
157 155
158 156 setup_args['cmdclass'] = {
159 157 'build_py': \
160 158 check_package_data_first(git_prebuild('IPython')),
161 159 'sdist' : git_prebuild('IPython', sdist),
162 160 'upload_wininst' : UploadWindowsInstallers,
163 161 'symlink': install_symlinked,
164 162 'install_lib_symlink': install_lib_symlink,
165 163 'install_scripts_sym': install_scripts_for_symlink,
166 164 'unsymlink': unsymlink,
167 165 }
168 166
169 167
170 168 #---------------------------------------------------------------------------
171 169 # Handle scripts, dependencies, and setuptools specific things
172 170 #---------------------------------------------------------------------------
173 171
174 172 # For some commands, use setuptools. Note that we do NOT list install here!
175 173 # If you want a setuptools-enhanced install, just run 'setupegg.py install'
176 174 needs_setuptools = set(('develop', 'release', 'bdist_egg', 'bdist_rpm',
177 175 'bdist', 'bdist_dumb', 'bdist_wininst', 'bdist_wheel',
178 176 'egg_info', 'easy_install', 'upload', 'install_egg_info',
179 177 ))
180 178
181 179 if len(needs_setuptools.intersection(sys.argv)) > 0:
182 180 import setuptools
183 181
184 182 # This dict is used for passing extra arguments that are setuptools
185 183 # specific to setup
186 184 setuptools_extra_args = {}
187 185
188 186 # setuptools requirements
189 187
190 188 extras_require = dict(
191 189 parallel = ['ipyparallel'],
192 190 qtconsole = ['qtconsole'],
193 191 doc = ['Sphinx>=1.3'],
194 192 test = ['nose>=0.10.1', 'requests', 'testpath', 'pygments', 'nbformat', 'ipykernel', 'numpy'],
195 193 terminal = [],
196 194 kernel = ['ipykernel'],
197 195 nbformat = ['nbformat'],
198 196 notebook = ['notebook', 'ipywidgets'],
199 197 nbconvert = ['nbconvert'],
200 198 )
201 199
202 200 install_requires = [
203 201 'setuptools>=18.5',
204 202 'decorator',
205 203 'pickleshare',
206 204 'simplegeneric>0.8',
207 205 'traitlets>=4.2',
208 206 'prompt_toolkit>=1.0.3,<2.0.0',
209 207 'pygments',
210 208 ]
211 209
212 210 # Platform-specific dependencies:
213 211 # This is the correct way to specify these,
214 212 # but requires pip >= 6. pip < 6 ignores these.
215 213
216 214 extras_require.update({
217 215 ':python_version == "2.7"': ['backports.shutil_get_terminal_size'],
218 216 ':python_version == "2.7" or python_version == "3.3"': ['pathlib2'],
219 217 ':sys_platform != "win32"': ['pexpect'],
220 218 ':sys_platform == "darwin"': ['appnope'],
221 219 ':sys_platform == "win32"': ['colorama'],
222 220 ':sys_platform == "win32" and python_version < "3.6"': ['win_unicode_console>=0.5'],
223 221 'test:python_version == "2.7"': ['mock'],
224 222 })
225 223 # FIXME: re-specify above platform dependencies for pip < 6
226 224 # These would result in non-portable bdists.
227 225 if not any(arg.startswith('bdist') for arg in sys.argv):
228 226 if sys.version_info < (3, 3):
229 227 extras_require['test'].append('mock')
230 228
231 229 if sys.platform == 'darwin':
232 230 install_requires.extend(['appnope'])
233 231
234 232 if not sys.platform.startswith('win'):
235 233 install_requires.append('pexpect')
236 234
237 235 # workaround pypa/setuptools#147, where setuptools misspells
238 236 # platform_python_implementation as python_implementation
239 237 if 'setuptools' in sys.modules:
240 238 for key in list(extras_require):
241 239 if 'platform_python_implementation' in key:
242 240 new_key = key.replace('platform_python_implementation', 'python_implementation')
243 241 extras_require[new_key] = extras_require.pop(key)
244 242
245 243 everything = set()
246 244 for key, deps in extras_require.items():
247 245 if ':' not in key:
248 246 everything.update(deps)
249 247 extras_require['all'] = everything
250 248
251 249 if 'setuptools' in sys.modules:
252 250 setuptools_extra_args['python_requires'] = '>=3.3'
253 251 setuptools_extra_args['zip_safe'] = False
254 252 setuptools_extra_args['entry_points'] = {
255 253 'console_scripts': find_entry_points(),
256 254 'pygments.lexers': [
257 255 'ipythonconsole = IPython.lib.lexers:IPythonConsoleLexer',
258 256 'ipython = IPython.lib.lexers:IPythonLexer',
259 257 'ipython3 = IPython.lib.lexers:IPython3Lexer',
260 258 ],
261 259 }
262 260 setup_args['extras_require'] = extras_require
263 261 requires = setup_args['install_requires'] = install_requires
264 262
265 263 # Script to be run by the windows binary installer after the default setup
266 264 # routine, to add shortcuts and similar windows-only things. Windows
267 265 # post-install scripts MUST reside in the scripts/ dir, otherwise distutils
268 266 # doesn't find them.
269 267 if 'bdist_wininst' in sys.argv:
270 268 if len(sys.argv) > 2 and \
271 269 ('sdist' in sys.argv or 'bdist_rpm' in sys.argv):
272 270 print("ERROR: bdist_wininst must be run alone. Exiting.", file=sys.stderr)
273 271 sys.exit(1)
274 272 setup_args['data_files'].append(
275 273 ['Scripts', ('scripts/ipython.ico', 'scripts/ipython_nb.ico')])
276 274 setup_args['scripts'] = [pjoin('scripts','ipython_win_post_install.py')]
277 275 setup_args['options'] = {"bdist_wininst":
278 276 {"install_script":
279 277 "ipython_win_post_install.py"}}
280 278
281 279 else:
282 280 # scripts has to be a non-empty list, or install_scripts isn't called
283 281 setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()]
284 282
285 283 setup_args['cmdclass']['build_scripts'] = build_scripts_entrypt
286 284
287 285 #---------------------------------------------------------------------------
288 286 # Do the actual setup now
289 287 #---------------------------------------------------------------------------
290 288
291 289 setup_args.update(setuptools_extra_args)
292 290
293 291
294 292
295 293 def main():
296 294 setup(**setup_args)
297 295
298 296 if __name__ == '__main__':
299 297 main()
@@ -1,469 +1,468 b''
1 1 # encoding: utf-8
2 2 """
3 3 This module defines the things that are used in setup.py for building IPython
4 4
5 5 This includes:
6 6
7 7 * The basic arguments to setup
8 8 * Functions for finding things like packages, package data, etc.
9 9 * A function for checking dependencies.
10 10 """
11 11
12 12 # Copyright (c) IPython Development Team.
13 13 # Distributed under the terms of the Modified BSD License.
14 14
15 from __future__ import print_function
16 15
17 16 import re
18 17 import os
19 18 import sys
20 19
21 20 from distutils import log
22 21 from distutils.command.build_py import build_py
23 22 from distutils.command.build_scripts import build_scripts
24 23 from distutils.command.install import install
25 24 from distutils.command.install_scripts import install_scripts
26 25 from distutils.cmd import Command
27 26 from glob import glob
28 27
29 28 from setupext import install_data_ext
30 29
31 30 #-------------------------------------------------------------------------------
32 31 # Useful globals and utility functions
33 32 #-------------------------------------------------------------------------------
34 33
35 34 # A few handy globals
36 35 isfile = os.path.isfile
37 36 pjoin = os.path.join
38 37 repo_root = os.path.dirname(os.path.abspath(__file__))
39 38
40 39 def oscmd(s):
41 40 print(">", s)
42 41 os.system(s)
43 42
44 43 # Py3 compatibility hacks, without assuming IPython itself is installed with
45 44 # the full py3compat machinery.
46 45
47 46 try:
48 47 execfile
49 48 except NameError:
50 49 def execfile(fname, globs, locs=None):
51 50 locs = locs or globs
52 51 exec(compile(open(fname).read(), fname, "exec"), globs, locs)
53 52
54 53 # A little utility we'll need below, since glob() does NOT allow you to do
55 54 # exclusion on multiple endings!
56 55 def file_doesnt_endwith(test,endings):
57 56 """Return true if test is a file and its name does NOT end with any
58 57 of the strings listed in endings."""
59 58 if not isfile(test):
60 59 return False
61 60 for e in endings:
62 61 if test.endswith(e):
63 62 return False
64 63 return True
65 64
66 65 #---------------------------------------------------------------------------
67 66 # Basic project information
68 67 #---------------------------------------------------------------------------
69 68
70 69 # release.py contains version, authors, license, url, keywords, etc.
71 70 execfile(pjoin(repo_root, 'IPython','core','release.py'), globals())
72 71
73 72 # Create a dict with the basic information
74 73 # This dict is eventually passed to setup after additional keys are added.
75 74 setup_args = dict(
76 75 name = name,
77 76 version = version,
78 77 description = description,
79 78 long_description = long_description,
80 79 author = author,
81 80 author_email = author_email,
82 81 url = url,
83 82 license = license,
84 83 platforms = platforms,
85 84 keywords = keywords,
86 85 classifiers = classifiers,
87 86 cmdclass = {'install_data': install_data_ext},
88 87 )
89 88
90 89
91 90 #---------------------------------------------------------------------------
92 91 # Find packages
93 92 #---------------------------------------------------------------------------
94 93
95 94 def find_packages():
96 95 """
97 96 Find all of IPython's packages.
98 97 """
99 98 excludes = ['deathrow', 'quarantine']
100 99 packages = []
101 100 for dir,subdirs,files in os.walk('IPython'):
102 101 package = dir.replace(os.path.sep, '.')
103 102 if any(package.startswith('IPython.'+exc) for exc in excludes):
104 103 # package is to be excluded (e.g. deathrow)
105 104 continue
106 105 if '__init__.py' not in files:
107 106 # not a package
108 107 continue
109 108 packages.append(package)
110 109 return packages
111 110
112 111 #---------------------------------------------------------------------------
113 112 # Find package data
114 113 #---------------------------------------------------------------------------
115 114
116 115 def find_package_data():
117 116 """
118 117 Find IPython's package_data.
119 118 """
120 119 # This is not enough for these things to appear in an sdist.
121 120 # We need to muck with the MANIFEST to get this to work
122 121
123 122 package_data = {
124 123 'IPython.core' : ['profile/README*'],
125 124 'IPython.core.tests' : ['*.png', '*.jpg', 'daft_extension/*.py'],
126 125 'IPython.lib.tests' : ['*.wav'],
127 126 'IPython.testing.plugin' : ['*.txt'],
128 127 }
129 128
130 129 return package_data
131 130
132 131
133 132 def check_package_data(package_data):
134 133 """verify that package_data globs make sense"""
135 134 print("checking package data")
136 135 for pkg, data in package_data.items():
137 136 pkg_root = pjoin(*pkg.split('.'))
138 137 for d in data:
139 138 path = pjoin(pkg_root, d)
140 139 if '*' in path:
141 140 assert len(glob(path)) > 0, "No files match pattern %s" % path
142 141 else:
143 142 assert os.path.exists(path), "Missing package data: %s" % path
144 143
145 144
146 145 def check_package_data_first(command):
147 146 """decorator for checking package_data before running a given command
148 147
149 148 Probably only needs to wrap build_py
150 149 """
151 150 class DecoratedCommand(command):
152 151 def run(self):
153 152 check_package_data(self.package_data)
154 153 command.run(self)
155 154 return DecoratedCommand
156 155
157 156
158 157 #---------------------------------------------------------------------------
159 158 # Find data files
160 159 #---------------------------------------------------------------------------
161 160
162 161 def make_dir_struct(tag,base,out_base):
163 162 """Make the directory structure of all files below a starting dir.
164 163
165 164 This is just a convenience routine to help build a nested directory
166 165 hierarchy because distutils is too stupid to do this by itself.
167 166
168 167 XXX - this needs a proper docstring!
169 168 """
170 169
171 170 # we'll use these a lot below
172 171 lbase = len(base)
173 172 pathsep = os.path.sep
174 173 lpathsep = len(pathsep)
175 174
176 175 out = []
177 176 for (dirpath,dirnames,filenames) in os.walk(base):
178 177 # we need to strip out the dirpath from the base to map it to the
179 178 # output (installation) path. This requires possibly stripping the
180 179 # path separator, because otherwise pjoin will not work correctly
181 180 # (pjoin('foo/','/bar') returns '/bar').
182 181
183 182 dp_eff = dirpath[lbase:]
184 183 if dp_eff.startswith(pathsep):
185 184 dp_eff = dp_eff[lpathsep:]
186 185 # The output path must be anchored at the out_base marker
187 186 out_path = pjoin(out_base,dp_eff)
188 187 # Now we can generate the final filenames. Since os.walk only produces
189 188 # filenames, we must join back with the dirpath to get full valid file
190 189 # paths:
191 190 pfiles = [pjoin(dirpath,f) for f in filenames]
192 191 # Finally, generate the entry we need, which is a pari of (output
193 192 # path, files) for use as a data_files parameter in install_data.
194 193 out.append((out_path, pfiles))
195 194
196 195 return out
197 196
198 197
199 198 def find_data_files():
200 199 """
201 200 Find IPython's data_files.
202 201
203 202 Just man pages at this point.
204 203 """
205 204
206 205 manpagebase = pjoin('share', 'man', 'man1')
207 206
208 207 # Simple file lists can be made by hand
209 208 manpages = [f for f in glob(pjoin('docs','man','*.1.gz')) if isfile(f)]
210 209 if not manpages:
211 210 # When running from a source tree, the manpages aren't gzipped
212 211 manpages = [f for f in glob(pjoin('docs','man','*.1')) if isfile(f)]
213 212
214 213 # And assemble the entire output list
215 214 data_files = [ (manpagebase, manpages) ]
216 215
217 216 return data_files
218 217
219 218
220 219 def make_man_update_target(manpage):
221 220 """Return a target_update-compliant tuple for the given manpage.
222 221
223 222 Parameters
224 223 ----------
225 224 manpage : string
226 225 Name of the manpage, must include the section number (trailing number).
227 226
228 227 Example
229 228 -------
230 229
231 230 >>> make_man_update_target('ipython.1') #doctest: +NORMALIZE_WHITESPACE
232 231 ('docs/man/ipython.1.gz',
233 232 ['docs/man/ipython.1'],
234 233 'cd docs/man && gzip -9c ipython.1 > ipython.1.gz')
235 234 """
236 235 man_dir = pjoin('docs', 'man')
237 236 manpage_gz = manpage + '.gz'
238 237 manpath = pjoin(man_dir, manpage)
239 238 manpath_gz = pjoin(man_dir, manpage_gz)
240 239 gz_cmd = ( "cd %(man_dir)s && gzip -9c %(manpage)s > %(manpage_gz)s" %
241 240 locals() )
242 241 return (manpath_gz, [manpath], gz_cmd)
243 242
244 243 # The two functions below are copied from IPython.utils.path, so we don't need
245 244 # to import IPython during setup, which fails on Python 3.
246 245
247 246 def target_outdated(target,deps):
248 247 """Determine whether a target is out of date.
249 248
250 249 target_outdated(target,deps) -> 1/0
251 250
252 251 deps: list of filenames which MUST exist.
253 252 target: single filename which may or may not exist.
254 253
255 254 If target doesn't exist or is older than any file listed in deps, return
256 255 true, otherwise return false.
257 256 """
258 257 try:
259 258 target_time = os.path.getmtime(target)
260 259 except os.error:
261 260 return 1
262 261 for dep in deps:
263 262 dep_time = os.path.getmtime(dep)
264 263 if dep_time > target_time:
265 264 #print "For target",target,"Dep failed:",dep # dbg
266 265 #print "times (dep,tar):",dep_time,target_time # dbg
267 266 return 1
268 267 return 0
269 268
270 269
271 270 def target_update(target,deps,cmd):
272 271 """Update a target with a given command given a list of dependencies.
273 272
274 273 target_update(target,deps,cmd) -> runs cmd if target is outdated.
275 274
276 275 This is just a wrapper around target_outdated() which calls the given
277 276 command if target is outdated."""
278 277
279 278 if target_outdated(target,deps):
280 279 os.system(cmd)
281 280
282 281 #---------------------------------------------------------------------------
283 282 # Find scripts
284 283 #---------------------------------------------------------------------------
285 284
286 285 def find_entry_points():
287 286 """Defines the command line entry points for IPython
288 287
289 288 This always uses setuptools-style entry points. When setuptools is not in
290 289 use, our own build_scripts_entrypt class below parses these and builds
291 290 command line scripts.
292 291
293 292 Each of our entry points gets both a plain name, e.g. ipython, and one
294 293 suffixed with the Python major version number, e.g. ipython3.
295 294 """
296 295 ep = [
297 296 'ipython%s = IPython:start_ipython',
298 297 'iptest%s = IPython.testing.iptestcontroller:main',
299 298 ]
300 299 suffix = str(sys.version_info[0])
301 300 return [e % '' for e in ep] + [e % suffix for e in ep]
302 301
303 302 script_src = """#!{executable}
304 303 # This script was automatically generated by setup.py
305 304 if __name__ == '__main__':
306 305 from {mod} import {func}
307 306 {func}()
308 307 """
309 308
310 309 class build_scripts_entrypt(build_scripts):
311 310 """Build the command line scripts
312 311
313 312 Parse setuptools style entry points and write simple scripts to run the
314 313 target functions.
315 314
316 315 On Windows, this also creates .cmd wrappers for the scripts so that you can
317 316 easily launch them from a command line.
318 317 """
319 318 def run(self):
320 319 self.mkpath(self.build_dir)
321 320 outfiles = []
322 321 for script in find_entry_points():
323 322 name, entrypt = script.split('=')
324 323 name = name.strip()
325 324 entrypt = entrypt.strip()
326 325 outfile = os.path.join(self.build_dir, name)
327 326 outfiles.append(outfile)
328 327 print('Writing script to', outfile)
329 328
330 329 mod, func = entrypt.split(':')
331 330 with open(outfile, 'w') as f:
332 331 f.write(script_src.format(executable=sys.executable,
333 332 mod=mod, func=func))
334 333
335 334 if sys.platform == 'win32':
336 335 # Write .cmd wrappers for Windows so 'ipython' etc. work at the
337 336 # command line
338 337 cmd_file = os.path.join(self.build_dir, name + '.cmd')
339 338 cmd = '@"{python}" "%~dp0\{script}" %*\r\n'.format(
340 339 python=sys.executable, script=name)
341 340 log.info("Writing %s wrapper script" % cmd_file)
342 341 with open(cmd_file, 'w') as f:
343 342 f.write(cmd)
344 343
345 344 return outfiles, outfiles
346 345
347 346 class install_lib_symlink(Command):
348 347 user_options = [
349 348 ('install-dir=', 'd', "directory to install to"),
350 349 ]
351 350
352 351 def initialize_options(self):
353 352 self.install_dir = None
354 353
355 354 def finalize_options(self):
356 355 self.set_undefined_options('symlink',
357 356 ('install_lib', 'install_dir'),
358 357 )
359 358
360 359 def run(self):
361 360 if sys.platform == 'win32':
362 361 raise Exception("This doesn't work on Windows.")
363 362 pkg = os.path.join(os.getcwd(), 'IPython')
364 363 dest = os.path.join(self.install_dir, 'IPython')
365 364 if os.path.islink(dest):
366 365 print('removing existing symlink at %s' % dest)
367 366 os.unlink(dest)
368 367 print('symlinking %s -> %s' % (pkg, dest))
369 368 os.symlink(pkg, dest)
370 369
371 370 class unsymlink(install):
372 371 def run(self):
373 372 dest = os.path.join(self.install_lib, 'IPython')
374 373 if os.path.islink(dest):
375 374 print('removing symlink at %s' % dest)
376 375 os.unlink(dest)
377 376 else:
378 377 print('No symlink exists at %s' % dest)
379 378
380 379 class install_symlinked(install):
381 380 def run(self):
382 381 if sys.platform == 'win32':
383 382 raise Exception("This doesn't work on Windows.")
384 383
385 384 # Run all sub-commands (at least those that need to be run)
386 385 for cmd_name in self.get_sub_commands():
387 386 self.run_command(cmd_name)
388 387
389 388 # 'sub_commands': a list of commands this command might have to run to
390 389 # get its work done. See cmd.py for more info.
391 390 sub_commands = [('install_lib_symlink', lambda self:True),
392 391 ('install_scripts_sym', lambda self:True),
393 392 ]
394 393
395 394 class install_scripts_for_symlink(install_scripts):
396 395 """Redefined to get options from 'symlink' instead of 'install'.
397 396
398 397 I love distutils almost as much as I love setuptools.
399 398 """
400 399 def finalize_options(self):
401 400 self.set_undefined_options('build', ('build_scripts', 'build_dir'))
402 401 self.set_undefined_options('symlink',
403 402 ('install_scripts', 'install_dir'),
404 403 ('force', 'force'),
405 404 ('skip_build', 'skip_build'),
406 405 )
407 406
408 407
409 408 #---------------------------------------------------------------------------
410 409 # VCS related
411 410 #---------------------------------------------------------------------------
412 411
413 412
414 413 def git_prebuild(pkg_dir, build_cmd=build_py):
415 414 """Return extended build or sdist command class for recording commit
416 415
417 416 records git commit in IPython.utils._sysinfo.commit
418 417
419 418 for use in IPython.utils.sysinfo.sys_info() calls after installation.
420 419 """
421 420
422 421 class MyBuildPy(build_cmd):
423 422 ''' Subclass to write commit data into installation tree '''
424 423 def run(self):
425 424 # loose as `.dev` is suppose to be invalid
426 425 print("check version number")
427 426 loose_pep440re = re.compile('^(\d+)\.(\d+)\.(\d+((a|b|rc)\d+)?)(\.post\d+)?(\.dev\d*)?$')
428 427 if not loose_pep440re.match(version):
429 428 raise ValueError("Version number '%s' is not valid (should match [N!]N(.N)*[{a|b|rc}N][.postN][.devN])" % version)
430 429
431 430
432 431 build_cmd.run(self)
433 432 # this one will only fire for build commands
434 433 if hasattr(self, 'build_lib'):
435 434 self._record_commit(self.build_lib)
436 435
437 436 def make_release_tree(self, base_dir, files):
438 437 # this one will fire for sdist
439 438 build_cmd.make_release_tree(self, base_dir, files)
440 439 self._record_commit(base_dir)
441 440
442 441 def _record_commit(self, base_dir):
443 442 import subprocess
444 443 proc = subprocess.Popen('git rev-parse --short HEAD',
445 444 stdout=subprocess.PIPE,
446 445 stderr=subprocess.PIPE,
447 446 shell=True)
448 447 repo_commit, _ = proc.communicate()
449 448 repo_commit = repo_commit.strip().decode("ascii")
450 449
451 450 out_pth = pjoin(base_dir, pkg_dir, 'utils', '_sysinfo.py')
452 451 if os.path.isfile(out_pth) and not repo_commit:
453 452 # nothing to write, don't clobber
454 453 return
455 454
456 455 print("writing git commit '%s' to %s" % (repo_commit, out_pth))
457 456
458 457 # remove to avoid overwriting original via hard link
459 458 try:
460 459 os.remove(out_pth)
461 460 except (IOError, OSError):
462 461 pass
463 462 with open(out_pth, 'w') as out_file:
464 463 out_file.writelines([
465 464 '# GENERATED BY setup.py\n',
466 465 'commit = u"%s"\n' % repo_commit,
467 466 ])
468 467 return MyBuildPy
469 468
@@ -1,193 +1,192 b''
1 1 #!/usr/bin/env python
2 2 """
3 3 Backport pull requests to a particular branch.
4 4
5 5 Usage: backport_pr.py [org/repository] branch [PR] [PR2]
6 6
7 7 e.g.:
8 8
9 9 python tools/backport_pr.py 0.13.1 123 155
10 10
11 11 to backport PRs #123 and #155 onto branch 0.13.1
12 12
13 13 or
14 14
15 15 python tools/backport_pr.py 2.1
16 16
17 17 to see what PRs are marked for backport with milestone=2.1 that have yet to be applied
18 18 to branch 2.x
19 19
20 20 or
21 21
22 22 python tools/backport_pr.py jupyter/notebook 0.13.1 123 155
23 23
24 24 to backport PRs #123 and #155 of the `jupyter/notebook` repo onto branch 0.13.1
25 25 of that repo.
26 26
27 27 """
28 28
29 from __future__ import print_function
30 29
31 30 import os
32 31 import re
33 32 import sys
34 33
35 34 from subprocess import Popen, PIPE, check_call, check_output
36 35 try:
37 36 from urllib.request import urlopen
38 37 except:
39 38 from urllib import urlopen
40 39
41 40 from gh_api import (
42 41 get_issues_list,
43 42 get_pull_request,
44 43 get_pull_request_files,
45 44 is_pull_request,
46 45 get_milestone_id,
47 46 )
48 47
49 48 def find_rejects(root='.'):
50 49 for dirname, dirs, files in os.walk(root):
51 50 for fname in files:
52 51 if fname.endswith('.rej'):
53 52 yield os.path.join(dirname, fname)
54 53
55 54 def get_current_branch():
56 55 branches = check_output(['git', 'branch'])
57 56 for branch in branches.splitlines():
58 57 if branch.startswith(b'*'):
59 58 return branch[1:].strip().decode('utf-8')
60 59
61 60 def backport_pr(branch, num, project='ipython/ipython'):
62 61 current_branch = get_current_branch()
63 62 if branch != current_branch:
64 63 check_call(['git', 'checkout', branch])
65 64 check_call(['git', 'pull'])
66 65 pr = get_pull_request(project, num, auth=True)
67 66 files = get_pull_request_files(project, num, auth=True)
68 67 patch_url = pr['patch_url']
69 68 title = pr['title']
70 69 description = pr['body']
71 70 fname = "PR%i.patch" % num
72 71 if os.path.exists(fname):
73 72 print("using patch from {fname}".format(**locals()))
74 73 with open(fname, 'rb') as f:
75 74 patch = f.read()
76 75 else:
77 76 req = urlopen(patch_url)
78 77 patch = req.read()
79 78
80 79 lines = description.splitlines()
81 80 if len(lines) > 5:
82 81 lines = lines[:5] + ['...']
83 82 description = '\n'.join(lines)
84 83
85 84 msg = "Backport PR #%i: %s" % (num, title) + '\n\n' + description
86 85 check = Popen(['git', 'apply', '--check', '--verbose'], stdin=PIPE)
87 86 a,b = check.communicate(patch)
88 87
89 88 if check.returncode:
90 89 print("patch did not apply, saving to {fname}".format(**locals()))
91 90 print("edit {fname} until `cat {fname} | git apply --check` succeeds".format(**locals()))
92 91 print("then run tools/backport_pr.py {num} again".format(**locals()))
93 92 if not os.path.exists(fname):
94 93 with open(fname, 'wb') as f:
95 94 f.write(patch)
96 95 return 1
97 96
98 97 p = Popen(['git', 'apply'], stdin=PIPE)
99 98 a,b = p.communicate(patch)
100 99
101 100 filenames = [ f['filename'] for f in files ]
102 101
103 102 check_call(['git', 'add'] + filenames)
104 103
105 104 check_call(['git', 'commit', '-m', msg])
106 105
107 106 print("PR #%i applied, with msg:" % num)
108 107 print()
109 108 print(msg)
110 109 print()
111 110
112 111 if branch != current_branch:
113 112 check_call(['git', 'checkout', current_branch])
114 113
115 114 return 0
116 115
117 116 backport_re = re.compile(r"(?:[Bb]ackport|[Mm]erge).*#(\d+)")
118 117
119 118 def already_backported(branch, since_tag=None):
120 119 """return set of PRs that have been backported already"""
121 120 if since_tag is None:
122 121 since_tag = check_output(['git','describe', branch, '--abbrev=0']).decode('utf8').strip()
123 122 cmd = ['git', 'log', '%s..%s' % (since_tag, branch), '--oneline']
124 123 lines = check_output(cmd).decode('utf8')
125 124 return set(int(num) for num in backport_re.findall(lines))
126 125
127 126 def should_backport(labels=None, milestone=None, project='ipython/ipython'):
128 127 """return set of PRs marked for backport"""
129 128 if labels is None and milestone is None:
130 129 raise ValueError("Specify one of labels or milestone.")
131 130 elif labels is not None and milestone is not None:
132 131 raise ValueError("Specify only one of labels or milestone.")
133 132 if labels is not None:
134 133 issues = get_issues_list(project,
135 134 labels=labels,
136 135 state='closed',
137 136 auth=True,
138 137 )
139 138 else:
140 139 milestone_id = get_milestone_id(project, milestone,
141 140 auth=True)
142 141 issues = get_issues_list(project,
143 142 milestone=milestone_id,
144 143 state='closed',
145 144 auth=True,
146 145 )
147 146
148 147 should_backport = set()
149 148 for issue in issues:
150 149 if not is_pull_request(issue):
151 150 continue
152 151 pr = get_pull_request(project, issue['number'],
153 152 auth=True)
154 153 if not pr['merged']:
155 154 print ("Marked PR closed without merge: %i" % pr['number'])
156 155 continue
157 156 if pr['base']['ref'] != 'master':
158 157 continue
159 158 should_backport.add(pr['number'])
160 159 return should_backport
161 160
162 161 if __name__ == '__main__':
163 162 project = 'ipython/ipython'
164 163
165 164 print("DEPRECATE: backport_pr.py is deprecated and is is now recommended"
166 165 "to install `ghpro` from PyPI.", file=sys.stderr)
167 166
168 167 args = list(sys.argv)
169 168 if len(args) >= 2:
170 169 if '/' in args[1]:
171 170 project = args[1]
172 171 del args[1]
173 172
174 173 if len(args) < 2:
175 174 print(__doc__)
176 175 sys.exit(1)
177 176
178 177 if len(args) < 3:
179 178 milestone = args[1]
180 179 branch = milestone.split('.')[0] + '.x'
181 180 already = already_backported(branch)
182 181 should = should_backport(milestone=milestone, project=project)
183 182 print ("The following PRs should be backported:")
184 183 for pr in sorted(should.difference(already)):
185 184 print (pr)
186 185 sys.exit(0)
187 186
188 187 for prno in map(int, args[2:]):
189 188 print("Backporting PR #%i" % prno)
190 189 rc = backport_pr(args[1], prno, project=project)
191 190 if rc:
192 191 print("Backporting PR #%i failed" % prno)
193 192 sys.exit(rc)
@@ -1,55 +1,54 b''
1 1 #!/usr/bin/env python
2 2 """Utility to look for hard tabs and \r characters in all sources.
3 3
4 4 Usage:
5 5
6 6 ./check_sources.py
7 7
8 8 It prints summaries and if chosen, line-by-line info of where \\t or \\r
9 9 characters can be found in our source tree.
10 10 """
11 from __future__ import print_function
12 11
13 12 # Config
14 13 # If true, all lines that have tabs are printed, with line number
15 14 full_report_tabs = True
16 15 # If true, all lines that have tabs are printed, with line number
17 16 full_report_rets = False
18 17
19 18 # Code begins
20 19 from IPython.external.path import path
21 20
22 21 rets = []
23 22 tabs = []
24 23
25 24 for f in path('..').walkfiles('*.py'):
26 25 errs = ''
27 26 cont = f.bytes()
28 27 if '\t' in cont:
29 28 errs+='t'
30 29 tabs.append(f)
31 30
32 31 if '\r' in cont:
33 32 errs+='r'
34 33 rets.append(f)
35 34
36 35 if errs:
37 36 print("%3s" % errs, f)
38 37
39 38 if 't' in errs and full_report_tabs:
40 39 for ln,line in enumerate(f.lines()):
41 40 if '\t' in line:
42 41 print('TAB:',ln,':',line, end=' ')
43 42
44 43 if 'r' in errs and full_report_rets:
45 44 for ln,line in enumerate(open(f.abspath(),'rb')):
46 45 if '\r' in line:
47 46 print('RET:',ln,':',line, end=' ')
48 47
49 48 # Summary at the end, to call cleanup tools if necessary
50 49 if tabs:
51 50 print('Hard tabs found. These can be cleaned with untabify:')
52 51 for f in tabs: print(f, end=' ')
53 52 if rets:
54 53 print('Carriage returns (\\r) found in:')
55 54 for f in rets: print(f, end=' ')
@@ -1,89 +1,88 b''
1 1 # coding: utf-8
2 2
3 3 # This script autogenerates `IPython.core.latex_symbols.py`, which contains a
4 4 # single dict , named `latex_symbols`. The keys in this dict are latex symbols,
5 5 # such as `\\alpha` and the values in the dict are the unicode equivalents for
6 6 # those. Most importantly, only unicode symbols that are valid identifers in
7 7 # Python 3 are included.
8 8
9 9 #
10 10 # The original mapping of latex symbols to unicode comes from the `latex_symbols.jl` files from Julia.
11 11
12 from __future__ import print_function
13 12 import os, sys
14 13
15 14 if not sys.version_info[0] == 3:
16 15 print("This script must be run with Python 3, exiting...")
17 16 sys.exit(1)
18 17
19 18 # Import the Julia LaTeX symbols
20 19 print('Importing latex_symbols.js from Julia...')
21 20 import requests
22 21 url = 'https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl'
23 22 r = requests.get(url)
24 23
25 24
26 25 # Build a list of key, value pairs
27 26 print('Building a list of (latex, unicode) key-value pairs...')
28 27 lines = r.text.splitlines()[60:]
29 28 lines = [line for line in lines if '=>' in line]
30 29 lines = [line.replace('=>',':') for line in lines]
31 30
32 31 def line_to_tuple(line):
33 32 """Convert a single line of the .jl file to a 2-tuple of strings like ("\\alpha", "α")"""
34 33 kv = line.split(',')[0].split(':')
35 34 # kv = tuple(line.strip(', ').split(':'))
36 35 k, v = kv[0].strip(' "'), kv[1].strip(' "')
37 36 # if not test_ident(v):
38 37 # print(line)
39 38 return k, v
40 39
41 40 assert line_to_tuple(' "\\sqrt" : "\u221A",') == ('\\sqrt', '\u221A')
42 41 lines = [line_to_tuple(line) for line in lines]
43 42
44 43
45 44 # Filter out non-valid identifiers
46 45 print('Filtering out characters that are not valid Python 3 identifiers')
47 46
48 47 def test_ident(i):
49 48 """Is the unicode string valid in a Python 3 identifer."""
50 49 # Some characters are not valid at the start of a name, but we still want to
51 50 # include them. So prefix with 'a', which is valid at the start.
52 51 return ('a' + i).isidentifier()
53 52
54 53 assert test_ident("α")
55 54 assert not test_ident('‴')
56 55
57 56 valid_idents = [line for line in lines if test_ident(line[1])]
58 57
59 58
60 59 # Write the `latex_symbols.py` module in the cwd
61 60
62 61 s = """# encoding: utf-8
63 62
64 63 # DO NOT EDIT THIS FILE BY HAND.
65 64
66 65 # To update this file, run the script /tools/gen_latex_symbols.py using Python 3
67 66
68 67 # This file is autogenerated from the file:
69 68 # https://raw.githubusercontent.com/JuliaLang/julia/master/base/latex_symbols.jl
70 69 # This original list is filtered to remove any unicode characters that are not valid
71 70 # Python identifiers.
72 71
73 72 latex_symbols = {\n
74 73 """
75 74 for line in valid_idents:
76 75 s += ' "%s" : "%s",\n' % (line[0], line[1])
77 76 s += "}\n"
78 77
79 78 s += """
80 79
81 80 reverse_latex_symbol = { v:k for k,v in latex_symbols.items()}
82 81 """
83 82
84 83 fn = os.path.join('..','IPython','core','latex_symbols.py')
85 84 print("Writing the file: %s" % fn)
86 85 with open(fn, 'w', encoding='utf-8') as f:
87 86 f.write(s)
88 87
89 88
@@ -1,303 +1,302 b''
1 1 """Functions for Github API requests."""
2 from __future__ import print_function
3 2
4 3 try:
5 4 input = raw_input
6 5 except NameError:
7 6 pass
8 7
9 8 import os
10 9 import re
11 10 import sys
12 11
13 12 import requests
14 13 import getpass
15 14 import json
16 15
17 16 try:
18 17 import requests_cache
19 18 except ImportError:
20 19 print("cache not available, install `requests_cache` for caching.", file=sys.stderr)
21 20 else:
22 21 requests_cache.install_cache("gh_api", expire_after=3600)
23 22
24 23 # Keyring stores passwords by a 'username', but we're not storing a username and
25 24 # password
26 25 fake_username = 'ipython_tools'
27 26
28 27 class Obj(dict):
29 28 """Dictionary with attribute access to names."""
30 29 def __getattr__(self, name):
31 30 try:
32 31 return self[name]
33 32 except KeyError:
34 33 raise AttributeError(name)
35 34
36 35 def __setattr__(self, name, val):
37 36 self[name] = val
38 37
39 38 token = None
40 39 def get_auth_token():
41 40 global token
42 41
43 42 if token is not None:
44 43 return token
45 44
46 45 import keyring
47 46 token = keyring.get_password('github', fake_username)
48 47 if token is not None:
49 48 return token
50 49
51 50 print("Please enter your github username and password. These are not "
52 51 "stored, only used to get an oAuth token. You can revoke this at "
53 52 "any time on Github.\n"
54 53 "Username: ", file=sys.stderr, end='')
55 54 user = input('')
56 55 pw = getpass.getpass("Password: ", stream=sys.stderr)
57 56
58 57 auth_request = {
59 58 "scopes": [
60 59 "public_repo",
61 60 "gist"
62 61 ],
63 62 "note": "IPython tools",
64 63 "note_url": "https://github.com/ipython/ipython/tree/master/tools",
65 64 }
66 65 response = requests.post('https://api.github.com/authorizations',
67 66 auth=(user, pw), data=json.dumps(auth_request))
68 67 if response.status_code == 401 and \
69 68 'required;' in response.headers.get('X-GitHub-OTP', ''):
70 69 print("Your login API requested a one time password", file=sys.stderr)
71 70 otp = getpass.getpass("One Time Password: ", stream=sys.stderr)
72 71 response = requests.post('https://api.github.com/authorizations',
73 72 auth=(user, pw),
74 73 data=json.dumps(auth_request),
75 74 headers={'X-GitHub-OTP':otp})
76 75 response.raise_for_status()
77 76 token = json.loads(response.text)['token']
78 77 keyring.set_password('github', fake_username, token)
79 78 return token
80 79
81 80 def make_auth_header():
82 81 return {'Authorization': 'token ' + get_auth_token()}
83 82
84 83 def post_issue_comment(project, num, body):
85 84 url = 'https://api.github.com/repos/{project}/issues/{num}/comments'.format(project=project, num=num)
86 85 payload = json.dumps({'body': body})
87 86 requests.post(url, data=payload, headers=make_auth_header())
88 87
89 88 def post_gist(content, description='', filename='file', auth=False):
90 89 """Post some text to a Gist, and return the URL."""
91 90 post_data = json.dumps({
92 91 "description": description,
93 92 "public": True,
94 93 "files": {
95 94 filename: {
96 95 "content": content
97 96 }
98 97 }
99 98 }).encode('utf-8')
100 99
101 100 headers = make_auth_header() if auth else {}
102 101 response = requests.post("https://api.github.com/gists", data=post_data, headers=headers)
103 102 response.raise_for_status()
104 103 response_data = json.loads(response.text)
105 104 return response_data['html_url']
106 105
107 106 def get_pull_request(project, num, auth=False):
108 107 """get pull request info by number
109 108 """
110 109 url = "https://api.github.com/repos/{project}/pulls/{num}".format(project=project, num=num)
111 110 if auth:
112 111 header = make_auth_header()
113 112 else:
114 113 header = None
115 114 print("fetching %s" % url, file=sys.stderr)
116 115 response = requests.get(url, headers=header)
117 116 response.raise_for_status()
118 117 return json.loads(response.text, object_hook=Obj)
119 118
120 119 def get_pull_request_files(project, num, auth=False):
121 120 """get list of files in a pull request"""
122 121 url = "https://api.github.com/repos/{project}/pulls/{num}/files".format(project=project, num=num)
123 122 if auth:
124 123 header = make_auth_header()
125 124 else:
126 125 header = None
127 126 return get_paged_request(url, headers=header)
128 127
129 128 element_pat = re.compile(r'<(.+?)>')
130 129 rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
131 130
132 131 def get_paged_request(url, headers=None, **params):
133 132 """get a full list, handling APIv3's paging"""
134 133 results = []
135 134 params.setdefault("per_page", 100)
136 135 while True:
137 136 if '?' in url:
138 137 params = None
139 138 print("fetching %s" % url, file=sys.stderr)
140 139 else:
141 140 print("fetching %s with %s" % (url, params), file=sys.stderr)
142 141 response = requests.get(url, headers=headers, params=params)
143 142 response.raise_for_status()
144 143 results.extend(response.json())
145 144 if 'next' in response.links:
146 145 url = response.links['next']['url']
147 146 else:
148 147 break
149 148 return results
150 149
151 150 def get_pulls_list(project, auth=False, **params):
152 151 """get pull request list"""
153 152 params.setdefault("state", "closed")
154 153 url = "https://api.github.com/repos/{project}/pulls".format(project=project)
155 154 if auth:
156 155 headers = make_auth_header()
157 156 else:
158 157 headers = None
159 158 pages = get_paged_request(url, headers=headers, **params)
160 159 return pages
161 160
162 161 def get_issues_list(project, auth=False, **params):
163 162 """get issues list"""
164 163 params.setdefault("state", "closed")
165 164 url = "https://api.github.com/repos/{project}/issues".format(project=project)
166 165 if auth:
167 166 headers = make_auth_header()
168 167 else:
169 168 headers = None
170 169 pages = get_paged_request(url, headers=headers, **params)
171 170 return pages
172 171
173 172 def get_milestones(project, auth=False, **params):
174 173 params.setdefault('state', 'all')
175 174 url = "https://api.github.com/repos/{project}/milestones".format(project=project)
176 175 if auth:
177 176 headers = make_auth_header()
178 177 else:
179 178 headers = None
180 179 milestones = get_paged_request(url, headers=headers, **params)
181 180 return milestones
182 181
183 182 def get_milestone_id(project, milestone, auth=False, **params):
184 183 milestones = get_milestones(project, auth=auth, **params)
185 184 for mstone in milestones:
186 185 if mstone['title'] == milestone:
187 186 return mstone['number']
188 187 else:
189 188 raise ValueError("milestone %s not found" % milestone)
190 189
191 190 def is_pull_request(issue):
192 191 """Return True if the given issue is a pull request."""
193 192 return bool(issue.get('pull_request', {}).get('html_url', None))
194 193
195 194 def get_authors(pr):
196 195 print("getting authors for #%i" % pr['number'], file=sys.stderr)
197 196 h = make_auth_header()
198 197 r = requests.get(pr['commits_url'], headers=h)
199 198 r.raise_for_status()
200 199 commits = r.json()
201 200 authors = []
202 201 for commit in commits:
203 202 author = commit['commit']['author']
204 203 authors.append("%s <%s>" % (author['name'], author['email']))
205 204 return authors
206 205
207 206 # encode_multipart_formdata is from urllib3.filepost
208 207 # The only change is to iter_fields, to enforce S3's required key ordering
209 208
210 209 def iter_fields(fields):
211 210 fields = fields.copy()
212 211 for key in ('key', 'acl', 'Filename', 'success_action_status', 'AWSAccessKeyId',
213 212 'Policy', 'Signature', 'Content-Type', 'file'):
214 213 yield (key, fields.pop(key))
215 214 for (k,v) in fields.items():
216 215 yield k,v
217 216
218 217 def encode_multipart_formdata(fields, boundary=None):
219 218 """
220 219 Encode a dictionary of ``fields`` using the multipart/form-data mime format.
221 220
222 221 :param fields:
223 222 Dictionary of fields or list of (key, value) field tuples. The key is
224 223 treated as the field name, and the value as the body of the form-data
225 224 bytes. If the value is a tuple of two elements, then the first element
226 225 is treated as the filename of the form-data section.
227 226
228 227 Field names and filenames must be unicode.
229 228
230 229 :param boundary:
231 230 If not specified, then a random boundary will be generated using
232 231 :func:`mimetools.choose_boundary`.
233 232 """
234 233 # copy requests imports in here:
235 234 from io import BytesIO
236 235 from requests.packages.urllib3.filepost import (
237 236 choose_boundary, six, writer, b, get_content_type
238 237 )
239 238 body = BytesIO()
240 239 if boundary is None:
241 240 boundary = choose_boundary()
242 241
243 242 for fieldname, value in iter_fields(fields):
244 243 body.write(b('--%s\r\n' % (boundary)))
245 244
246 245 if isinstance(value, tuple):
247 246 filename, data = value
248 247 writer(body).write('Content-Disposition: form-data; name="%s"; '
249 248 'filename="%s"\r\n' % (fieldname, filename))
250 249 body.write(b('Content-Type: %s\r\n\r\n' %
251 250 (get_content_type(filename))))
252 251 else:
253 252 data = value
254 253 writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
255 254 % (fieldname))
256 255 body.write(b'Content-Type: text/plain\r\n\r\n')
257 256
258 257 if isinstance(data, int):
259 258 data = str(data) # Backwards compatibility
260 259 if isinstance(data, six.text_type):
261 260 writer(body).write(data)
262 261 else:
263 262 body.write(data)
264 263
265 264 body.write(b'\r\n')
266 265
267 266 body.write(b('--%s--\r\n' % (boundary)))
268 267
269 268 content_type = b('multipart/form-data; boundary=%s' % boundary)
270 269
271 270 return body.getvalue(), content_type
272 271
273 272
274 273 def post_download(project, filename, name=None, description=""):
275 274 """Upload a file to the GitHub downloads area"""
276 275 if name is None:
277 276 name = os.path.basename(filename)
278 277 with open(filename, 'rb') as f:
279 278 filedata = f.read()
280 279
281 280 url = "https://api.github.com/repos/{project}/downloads".format(project=project)
282 281
283 282 payload = json.dumps(dict(name=name, size=len(filedata),
284 283 description=description))
285 284 response = requests.post(url, data=payload, headers=make_auth_header())
286 285 response.raise_for_status()
287 286 reply = json.loads(response.content)
288 287 s3_url = reply['s3_url']
289 288
290 289 fields = dict(
291 290 key=reply['path'],
292 291 acl=reply['acl'],
293 292 success_action_status=201,
294 293 Filename=reply['name'],
295 294 AWSAccessKeyId=reply['accesskeyid'],
296 295 Policy=reply['policy'],
297 296 Signature=reply['signature'],
298 297 file=(reply['name'], filedata),
299 298 )
300 299 fields['Content-Type'] = reply['mime_type']
301 300 data, content_type = encode_multipart_formdata(fields)
302 301 s3r = requests.post(s3_url, data=data, headers={'Content-Type': content_type})
303 302 return s3r
@@ -1,128 +1,127 b''
1 1 #!/usr/bin/env python
2 2 # -*- coding: utf-8 -*-
3 3 """
4 4 Usage:
5 5 git-mpr [-h] [-l | -a] [pr-number [pr-number ...]]
6 6
7 7 Type `git mpr -h` for details.
8 8 """
9 9
10 from __future__ import print_function
11 10
12 11 import io, os
13 12 import argparse
14 13 from subprocess import check_call, CalledProcessError
15 14
16 15 import gh_api
17 16
18 17 ipy_repository = 'git://github.com/ipython/ipython.git'
19 18 gh_project = "ipython/ipython"
20 19 not_merged = {}
21 20
22 21 def merge_branch(repo, branch ):
23 22 """try to merge the givent branch into the current one
24 23
25 24 If something does not goes smoothly, merge is aborted
26 25
27 26 Returns True if merge sucessfull, False otherwise
28 27 """
29 28 # Delete the branch first
30 29 try :
31 30 check_call(['git', 'pull', repo, branch], stdin=io.open(os.devnull))
32 31 except CalledProcessError :
33 32 check_call(['git', 'merge', '--abort'])
34 33 return False
35 34 return True
36 35
37 36
38 37 def git_new_branch(name):
39 38 """Create a new branch with the given name and check it out.
40 39 """
41 40 check_call(['git', 'checkout', '-b', name])
42 41
43 42
44 43 def merge_pr(num):
45 44 """ try to merge the branch of PR `num` into current branch
46 45 """
47 46 # Get Github authorisation first, so that the user is prompted straight away
48 47 # if their login is needed.
49 48
50 49 pr = gh_api.get_pull_request(gh_project, num)
51 50 repo = pr['head']['repo']['clone_url']
52 51
53 52
54 53 branch = pr['head']['ref']
55 54 mergeable = merge_branch(repo=repo,
56 55 branch=branch,
57 56 )
58 57 if not mergeable :
59 58 cmd = "git pull "+repo+" "+branch
60 59 not_merged[str(num)] = cmd
61 60 print("==============================================================================")
62 61 print("Something went wrong merging this branch, you can try it manually by runngin :")
63 62 print(cmd)
64 63 print("==============================================================================")
65 64
66 65
67 66 def main(*args):
68 67 parser = argparse.ArgumentParser(
69 68 description="""
70 69 Merge one or more github pull requests by their number. If any
71 70 one pull request can't be merged as is, its merge is ignored
72 71 and the process continues with the next ones (if any).
73 72 """
74 73 )
75 74
76 75 grp = parser.add_mutually_exclusive_group()
77 76 grp.add_argument(
78 77 '-l',
79 78 '--list',
80 79 action='store_const',
81 80 const=True,
82 81 help='list PR, their number and their mergeability')
83 82 grp.add_argument('-a',
84 83 '--merge-all',
85 84 action='store_const',
86 85 const=True ,
87 86 help='try to merge as many PR as possible, one by one')
88 87 parser.add_argument('merge',
89 88 type=int,
90 89 help="The pull request numbers",
91 90 nargs='*',
92 91 metavar='pr-number')
93 92 args = parser.parse_args()
94 93
95 94 if(args.list):
96 95 pr_list = gh_api.get_pulls_list(gh_project)
97 96 for pr in pr_list :
98 97 mergeable = gh_api.get_pull_request(gh_project, pr['number'])['mergeable']
99 98
100 99 ismgb = u"√" if mergeable else " "
101 100 print(u"* #{number} [{ismgb}]: {title}".format(
102 101 number=pr['number'],
103 102 title=pr['title'],
104 103 ismgb=ismgb))
105 104
106 105 if(args.merge_all):
107 106 branch_name = 'merge-' + '-'.join(str(pr['number']) for pr in pr_list)
108 107 git_new_branch(branch_name)
109 108 pr_list = gh_api.get_pulls_list(gh_project)
110 109 for pr in pr_list :
111 110 merge_pr(pr['number'])
112 111
113 112
114 113 elif args.merge:
115 114 branch_name = 'merge-' + '-'.join(map(str, args.merge))
116 115 git_new_branch(branch_name)
117 116 for num in args.merge :
118 117 merge_pr(num)
119 118
120 119 if not_merged :
121 120 print('*************************************************************************************')
122 121 print('the following branch have not been merged automatically, considere doing it by hand :')
123 122 for num, cmd in not_merged.items() :
124 123 print( "PR {num}: {cmd}".format(num=num, cmd=cmd))
125 124 print('*************************************************************************************')
126 125
127 126 if __name__ == '__main__':
128 127 main()
@@ -1,235 +1,231 b''
1 1 #!/usr/bin/env python
2 2 """Simple tools to query github.com and gather stats about issues.
3 3
4 4 To generate a report for IPython 2.0, run:
5 5
6 6 python github_stats.py --milestone 2.0 --since-tag rel-1.0.0
7 7 """
8 8 #-----------------------------------------------------------------------------
9 9 # Imports
10 10 #-----------------------------------------------------------------------------
11 11
12 from __future__ import print_function
13 12
14 13 import codecs
15 14 import sys
16 15
17 16 from argparse import ArgumentParser
18 17 from datetime import datetime, timedelta
19 18 from subprocess import check_output
20 19
21 20 from gh_api import (
22 21 get_paged_request, make_auth_header, get_pull_request, is_pull_request,
23 22 get_milestone_id, get_issues_list, get_authors,
24 23 )
25 24 #-----------------------------------------------------------------------------
26 25 # Globals
27 26 #-----------------------------------------------------------------------------
28 27
29 28 ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
30 29 PER_PAGE = 100
31 30
32 31 #-----------------------------------------------------------------------------
33 32 # Functions
34 33 #-----------------------------------------------------------------------------
35 34
36 35 def round_hour(dt):
37 36 return dt.replace(minute=0,second=0,microsecond=0)
38 37
39 38 def _parse_datetime(s):
40 39 """Parse dates in the format returned by the Github API."""
41 40 if s:
42 41 return datetime.strptime(s, ISO8601)
43 42 else:
44 43 return datetime.fromtimestamp(0)
45 44
46 45 def issues2dict(issues):
47 46 """Convert a list of issues to a dict, keyed by issue number."""
48 47 idict = {}
49 48 for i in issues:
50 49 idict[i['number']] = i
51 50 return idict
52 51
53 52 def split_pulls(all_issues, project="ipython/ipython"):
54 53 """split a list of closed issues into non-PR Issues and Pull Requests"""
55 54 pulls = []
56 55 issues = []
57 56 for i in all_issues:
58 57 if is_pull_request(i):
59 58 pull = get_pull_request(project, i['number'], auth=True)
60 59 pulls.append(pull)
61 60 else:
62 61 issues.append(i)
63 62 return issues, pulls
64 63
65 64
66 65 def issues_closed_since(period=timedelta(days=365), project="ipython/ipython", pulls=False):
67 66 """Get all issues closed since a particular point in time. period
68 67 can either be a datetime object, or a timedelta object. In the
69 68 latter case, it is used as a time before the present.
70 69 """
71 70
72 71 which = 'pulls' if pulls else 'issues'
73 72
74 73 if isinstance(period, timedelta):
75 74 since = round_hour(datetime.utcnow() - period)
76 75 else:
77 76 since = period
78 77 url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
79 78 allclosed = get_paged_request(url, headers=make_auth_header())
80 79
81 80 filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
82 81 if pulls:
83 82 filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
84 83 # filter out PRs not against master (backports)
85 84 filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
86 85 else:
87 86 filtered = [ i for i in filtered if not is_pull_request(i) ]
88 87
89 88 return filtered
90 89
91 90
92 91 def sorted_by_field(issues, field='closed_at', reverse=False):
93 92 """Return a list of issues sorted by closing date date."""
94 93 return sorted(issues, key = lambda i:i[field], reverse=reverse)
95 94
96 95
97 96 def report(issues, show_urls=False):
98 97 """Summary report about a list of issues, printing number and title."""
99 98 if show_urls:
100 99 for i in issues:
101 100 role = 'ghpull' if 'merged_at' in i else 'ghissue'
102 101 print(u'* :%s:`%d`: %s' % (role, i['number'],
103 102 i['title'].replace(u'`', u'``')))
104 103 else:
105 104 for i in issues:
106 105 print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
107 106
108 107 #-----------------------------------------------------------------------------
109 108 # Main script
110 109 #-----------------------------------------------------------------------------
111 110
112 111 if __name__ == "__main__":
113 112
114 113 print("DEPRECATE: backport_pr.py is deprecated and is is now recommended"
115 114 "to install `ghpro` from PyPI.", file=sys.stderr)
116 115
117 # deal with unicode
118 if sys.version_info < (3,):
119 sys.stdout = codecs.getwriter('utf8')(sys.stdout)
120 116
121 117 # Whether to add reST urls for all issues in printout.
122 118 show_urls = True
123 119
124 120 parser = ArgumentParser()
125 121 parser.add_argument('--since-tag', type=str,
126 122 help="The git tag to use for the starting point (typically the last major release)."
127 123 )
128 124 parser.add_argument('--milestone', type=str,
129 125 help="The GitHub milestone to use for filtering issues [optional]."
130 126 )
131 127 parser.add_argument('--days', type=int,
132 128 help="The number of days of data to summarize (use this or --since-tag)."
133 129 )
134 130 parser.add_argument('--project', type=str, default="ipython/ipython",
135 131 help="The project to summarize."
136 132 )
137 133 parser.add_argument('--links', action='store_true', default=False,
138 134 help="Include links to all closed Issues and PRs in the output."
139 135 )
140 136
141 137 opts = parser.parse_args()
142 138 tag = opts.since_tag
143 139
144 140 # set `since` from days or git tag
145 141 if opts.days:
146 142 since = datetime.utcnow() - timedelta(days=opts.days)
147 143 else:
148 144 if not tag:
149 145 tag = check_output(['git', 'describe', '--abbrev=0']).strip().decode('utf8')
150 146 cmd = ['git', 'log', '-1', '--format=%ai', tag]
151 147 tagday, tz = check_output(cmd).strip().decode('utf8').rsplit(' ', 1)
152 148 since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
153 149 h = int(tz[1:3])
154 150 m = int(tz[3:])
155 151 td = timedelta(hours=h, minutes=m)
156 152 if tz[0] == '-':
157 153 since += td
158 154 else:
159 155 since -= td
160 156
161 157 since = round_hour(since)
162 158
163 159 milestone = opts.milestone
164 160 project = opts.project
165 161
166 162 print("fetching GitHub stats since %s (tag: %s, milestone: %s)" % (since, tag, milestone), file=sys.stderr)
167 163 if milestone:
168 164 milestone_id = get_milestone_id(project=project, milestone=milestone,
169 165 auth=True)
170 166 issues_and_pulls = get_issues_list(project=project,
171 167 milestone=milestone_id,
172 168 state='closed',
173 169 auth=True,
174 170 )
175 171 issues, pulls = split_pulls(issues_and_pulls, project=project)
176 172 else:
177 173 issues = issues_closed_since(since, project=project, pulls=False)
178 174 pulls = issues_closed_since(since, project=project, pulls=True)
179 175
180 176 # For regular reports, it's nice to show them in reverse chronological order
181 177 issues = sorted_by_field(issues, reverse=True)
182 178 pulls = sorted_by_field(pulls, reverse=True)
183 179
184 180 n_issues, n_pulls = map(len, (issues, pulls))
185 181 n_total = n_issues + n_pulls
186 182
187 183 # Print summary report we can directly include into release notes.
188 184
189 185 print()
190 186 since_day = since.strftime("%Y/%m/%d")
191 187 today = datetime.today().strftime("%Y/%m/%d")
192 188 print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
193 189 print()
194 190 print("These lists are automatically generated, and may be incomplete or contain duplicates.")
195 191 print()
196 192
197 193 ncommits = 0
198 194 all_authors = []
199 195 if tag:
200 196 # print git info, in addition to GitHub info:
201 197 since_tag = tag+'..'
202 198 cmd = ['git', 'log', '--oneline', since_tag]
203 199 ncommits += len(check_output(cmd).splitlines())
204 200
205 201 author_cmd = ['git', 'log', '--use-mailmap', "--format=* %aN", since_tag]
206 202 all_authors.extend(check_output(author_cmd).decode('utf-8', 'replace').splitlines())
207 203
208 204 pr_authors = []
209 205 for pr in pulls:
210 206 pr_authors.extend(get_authors(pr))
211 207 ncommits = len(pr_authors) + ncommits - len(pulls)
212 208 author_cmd = ['git', 'check-mailmap'] + pr_authors
213 209 with_email = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
214 210 all_authors.extend([ u'* ' + a.split(' <')[0] for a in with_email ])
215 211 unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
216 212
217 213 print("We closed %d issues and merged %d pull requests." % (n_issues, n_pulls))
218 214 if milestone:
219 215 print("The full list can be seen `on GitHub <https://github.com/{project}/issues?q=milestone%3A{milestone}+>`__".format(project=project,milestone=milestone)
220 216 )
221 217
222 218 print()
223 219 print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
224 220 print()
225 221 print('\n'.join(unique_authors))
226 222
227 223 if opts.links:
228 224 print()
229 225 print("GitHub issues and pull requests:")
230 226 print()
231 227 print('Pull Requests (%d):\n' % n_pulls)
232 228 report(pulls, show_urls)
233 229 print()
234 230 print('Issues (%d):\n' % n_issues)
235 231 report(issues, show_urls)
@@ -1,289 +1,288 b''
1 1 #!/usr/bin/env python
2 2 """
3 3 This is a script for testing pull requests for IPython. It merges the pull
4 4 request with current master, installs and tests on all available versions of
5 5 Python, and posts the results to Gist if any tests fail.
6 6
7 7 Usage:
8 8 python test_pr.py 1657
9 9 """
10 from __future__ import print_function
11 10
12 11 import errno
13 12 from glob import glob
14 13 import io
15 14 import os
16 15 import pickle
17 16 import re
18 17 import shutil
19 18 import time
20 19 from subprocess import call, check_call, check_output, PIPE, STDOUT, CalledProcessError
21 20 import sys
22 21
23 22 import gh_api
24 23 from gh_api import Obj
25 24
26 25 basedir = os.path.join(os.path.expanduser("~"), ".ipy_pr_tests")
27 26 repodir = os.path.join(basedir, "ipython")
28 27 ipy_repository = 'git://github.com/ipython/ipython.git'
29 28 ipy_http_repository = 'http://github.com/ipython/ipython.git'
30 29 gh_project="ipython/ipython"
31 30
32 31 supported_pythons = ['python2.7', 'python3.3']
33 32
34 33 missing_libs_re = re.compile(r"Tools and libraries NOT available at test time:\n"
35 34 r"\s*(.*?)\n")
36 35 def get_missing_libraries(log):
37 36 m = missing_libs_re.search(log)
38 37 if m:
39 38 return m.group(1)
40 39
41 40 class TestRun(object):
42 41 def __init__(self, pr_num, extra_args):
43 42 self.unavailable_pythons = []
44 43 self.venvs = []
45 44 self.pr_num = pr_num
46 45 self.extra_args = extra_args
47 46
48 47 self.pr = gh_api.get_pull_request(gh_project, pr_num)
49 48
50 49 self.setup()
51 50
52 51 self.results = []
53 52
54 53 def available_python_versions(self):
55 54 """Get the executable names of available versions of Python on the system.
56 55 """
57 56 for py in supported_pythons:
58 57 try:
59 58 check_call([py, '-c', 'import nose'], stdout=PIPE)
60 59 yield py
61 60 except (OSError, CalledProcessError):
62 61 self.unavailable_pythons.append(py)
63 62
64 63 def setup(self):
65 64 """Prepare the repository and virtualenvs."""
66 65 try:
67 66 os.mkdir(basedir)
68 67 except OSError as e:
69 68 if e.errno != errno.EEXIST:
70 69 raise
71 70 os.chdir(basedir)
72 71
73 72 # Delete virtualenvs and recreate
74 73 for venv in glob('venv-*'):
75 74 shutil.rmtree(venv)
76 75 for py in self.available_python_versions():
77 76 check_call(['virtualenv', '-p', py, '--system-site-packages', 'venv-%s' % py])
78 77 self.venvs.append((py, 'venv-%s' % py))
79 78
80 79 # Check out and update the repository
81 80 if not os.path.exists('ipython'):
82 81 try :
83 82 check_call(['git', 'clone', ipy_repository])
84 83 except CalledProcessError :
85 84 check_call(['git', 'clone', ipy_http_repository])
86 85 os.chdir(repodir)
87 86 check_call(['git', 'checkout', 'master'])
88 87 try :
89 88 check_call(['git', 'pull', 'origin', 'master'])
90 89 except CalledProcessError :
91 90 check_call(['git', 'pull', ipy_http_repository, 'master'])
92 91 self.master_sha = check_output(['git', 'log', '-1', '--format=%h']).decode('ascii').strip()
93 92 os.chdir(basedir)
94 93
95 94 def get_branch(self):
96 95 repo = self.pr['head']['repo']['clone_url']
97 96 branch = self.pr['head']['ref']
98 97 owner = self.pr['head']['repo']['owner']['login']
99 98 mergeable = self.pr['mergeable']
100 99
101 100 os.chdir(repodir)
102 101 if mergeable:
103 102 merged_branch = "%s-%s" % (owner, branch)
104 103 # Delete the branch first
105 104 call(['git', 'branch', '-D', merged_branch])
106 105 check_call(['git', 'checkout', '-b', merged_branch])
107 106 check_call(['git', 'pull', '--no-ff', '--no-commit', repo, branch])
108 107 check_call(['git', 'commit', '-m', "merge %s/%s" % (repo, branch)])
109 108 else:
110 109 # Fetch the branch without merging it.
111 110 check_call(['git', 'fetch', repo, branch])
112 111 check_call(['git', 'checkout', 'FETCH_HEAD'])
113 112 os.chdir(basedir)
114 113
115 114 def markdown_format(self):
116 115 def format_result(result):
117 116 s = "* %s: " % result.py
118 117 if result.passed:
119 118 s += "OK"
120 119 else:
121 120 s += "Failed, log at %s" % result.log_url
122 121 if result.missing_libraries:
123 122 s += " (libraries not available: " + result.missing_libraries + ")"
124 123 return s
125 124
126 125 if self.pr['mergeable']:
127 126 com = self.pr['head']['sha'][:7] + " merged into master (%s)" % self.master_sha
128 127 else:
129 128 com = self.pr['head']['sha'][:7] + " (can't merge cleanly)"
130 129 lines = ["**Test results for commit %s**" % com,
131 130 "Platform: " + sys.platform,
132 131 ""] + \
133 132 [format_result(r) for r in self.results] + \
134 133 [""]
135 134 if self.extra_args:
136 135 lines.append("Extra args: %r" % self.extra_args),
137 136 lines.append("Not available for testing: " + ", ".join(self.unavailable_pythons))
138 137 return "\n".join(lines)
139 138
140 139 def post_results_comment(self):
141 140 body = self.markdown_format()
142 141 gh_api.post_issue_comment(gh_project, self.pr_num, body)
143 142
144 143 def print_results(self):
145 144 pr = self.pr
146 145
147 146 print("\n")
148 147 msg = "**Test results for commit %s" % pr['head']['sha'][:7]
149 148 if pr['mergeable']:
150 149 msg += " merged into master (%s)**" % self.master_sha
151 150 else:
152 151 msg += " (can't merge cleanly)**"
153 152 print(msg)
154 153 print("Platform:", sys.platform)
155 154 for result in self.results:
156 155 if result.passed:
157 156 print(result.py, ":", "OK")
158 157 else:
159 158 print(result.py, ":", "Failed")
160 159 print(" Test log:", result.get('log_url') or result.log_file)
161 160 if result.missing_libraries:
162 161 print(" Libraries not available:", result.missing_libraries)
163 162
164 163 if self.extra_args:
165 164 print("Extra args:", self.extra_args)
166 165 print("Not available for testing:", ", ".join(self.unavailable_pythons))
167 166
168 167 def dump_results(self):
169 168 with open(os.path.join(basedir, 'lastresults.pkl'), 'wb') as f:
170 169 pickle.dump(self, f)
171 170
172 171 @staticmethod
173 172 def load_results():
174 173 with open(os.path.join(basedir, 'lastresults.pkl'), 'rb') as f:
175 174 return pickle.load(f)
176 175
177 176 def save_logs(self):
178 177 for result in self.results:
179 178 if not result.passed:
180 179 result_locn = os.path.abspath(os.path.join('venv-%s' % result.py,
181 180 self.pr['head']['sha'][:7]+".log"))
182 181 with io.open(result_locn, 'w', encoding='utf-8') as f:
183 182 f.write(result.log)
184 183
185 184 result.log_file = result_locn
186 185
187 186 def post_logs(self):
188 187 for result in self.results:
189 188 if not result.passed:
190 189 result.log_url = gh_api.post_gist(result.log,
191 190 description='IPython test log',
192 191 filename="results.log", auth=True)
193 192
194 193 def run(self):
195 194 for py, venv in self.venvs:
196 195 tic = time.time()
197 196 passed, log = run_tests(venv, self.extra_args)
198 197 elapsed = int(time.time() - tic)
199 198 print("Ran tests with %s in %is" % (py, elapsed))
200 199 missing_libraries = get_missing_libraries(log)
201 200
202 201 self.results.append(Obj(py=py,
203 202 passed=passed,
204 203 log=log,
205 204 missing_libraries=missing_libraries
206 205 )
207 206 )
208 207
209 208
210 209 def run_tests(venv, extra_args):
211 210 py = os.path.join(basedir, venv, 'bin', 'python')
212 211 print(py)
213 212 os.chdir(repodir)
214 213 # cleanup build-dir
215 214 if os.path.exists('build'):
216 215 shutil.rmtree('build')
217 216 tic = time.time()
218 217 print ("\nInstalling IPython with %s" % py)
219 218 logfile = os.path.join(basedir, venv, 'install.log')
220 219 print ("Install log at %s" % logfile)
221 220 with open(logfile, 'wb') as f:
222 221 check_call([py, 'setup.py', 'install'], stdout=f)
223 222 toc = time.time()
224 223 print ("Installed IPython in %.1fs" % (toc-tic))
225 224 os.chdir(basedir)
226 225
227 226 # Environment variables:
228 227 orig_path = os.environ["PATH"]
229 228 os.environ["PATH"] = os.path.join(basedir, venv, 'bin') + ':' + os.environ["PATH"]
230 229 os.environ.pop("PYTHONPATH", None)
231 230
232 231 # check that the right IPython is imported
233 232 ipython_file = check_output([py, '-c', 'import IPython; print (IPython.__file__)'])
234 233 ipython_file = ipython_file.strip().decode('utf-8')
235 234 if not ipython_file.startswith(os.path.join(basedir, venv)):
236 235 msg = "IPython does not appear to be in the venv: %s" % ipython_file
237 236 msg += "\nDo you use setupegg.py develop?"
238 237 print(msg, file=sys.stderr)
239 238 return False, msg
240 239
241 240 iptest = os.path.join(basedir, venv, 'bin', 'iptest')
242 241 if not os.path.exists(iptest):
243 242 iptest = os.path.join(basedir, venv, 'bin', 'iptest3')
244 243
245 244 print("\nRunning tests, this typically takes a few minutes...")
246 245 try:
247 246 return True, check_output([iptest] + extra_args, stderr=STDOUT).decode('utf-8')
248 247 except CalledProcessError as e:
249 248 return False, e.output.decode('utf-8')
250 249 finally:
251 250 # Restore $PATH
252 251 os.environ["PATH"] = orig_path
253 252
254 253
255 254 def test_pr(num, post_results=True, extra_args=None):
256 255 # Get Github authorisation first, so that the user is prompted straight away
257 256 # if their login is needed.
258 257 if post_results:
259 258 gh_api.get_auth_token()
260 259
261 260 testrun = TestRun(num, extra_args or [])
262 261
263 262 testrun.get_branch()
264 263
265 264 testrun.run()
266 265
267 266 testrun.dump_results()
268 267
269 268 testrun.save_logs()
270 269 testrun.print_results()
271 270
272 271 if post_results:
273 272 testrun.post_logs()
274 273 testrun.post_results_comment()
275 274 print("(Posted to Github)")
276 275 else:
277 276 post_script = os.path.join(os.path.dirname(sys.argv[0]), "post_pr_test.py")
278 277 print("To post the results to Github, run", post_script)
279 278
280 279
281 280 if __name__ == '__main__':
282 281 import argparse
283 282 parser = argparse.ArgumentParser(description="Test an IPython pull request")
284 283 parser.add_argument('-p', '--publish', action='store_true',
285 284 help="Publish the results to Github")
286 285 parser.add_argument('number', type=int, help="The pull request number")
287 286
288 287 args, extra_args = parser.parse_known_args()
289 288 test_pr(args.number, post_results=args.publish, extra_args=extra_args)
@@ -1,6 +1,6 b''
1 1 """This tests that future compiler flags are passed to the embedded IPython."""
2 2 from IPython import embed
3 3 import __future__
4 embed(banner1='', header='check 1/2 == 0 in Python 2')
5 embed(banner1='', header='check 1/2 == 0.5 in Python 2',
6 compile_flags=__future__.division.compiler_flag)
4 embed(banner1='', header='check 1 <> 2 cause SyntaxError')
5 embed(banner1='', header='check 1 <> 2 == True',
6 compile_flags=__future__.barry_as_FLUFL.compiler_flag)
@@ -1,56 +1,55 b''
1 1 """Various utilities common to IPython release and maintenance tools.
2 2 """
3 from __future__ import print_function
4 3
5 4 # Library imports
6 5 import os
7 6
8 7 # Useful shorthands
9 8 pjoin = os.path.join
10 9 cd = os.chdir
11 10
12 11 # Constants
13 12
14 13 # SSH root address of the archive site
15 14 archive_user = 'ipython@archive.ipython.org'
16 15 archive_dir = 'archive.ipython.org'
17 16 archive = '%s:%s' % (archive_user, archive_dir)
18 17
19 18 # Build commands
20 19 # Source dists
21 20 sdists = './setup.py sdist --formats=gztar'
22 21 # Binary dists
23 22 def buildwheels():
24 23 sh('python3 setupegg.py bdist_wheel' % py)
25 24
26 25 # Utility functions
27 26 def sh(cmd):
28 27 """Run system command in shell, raise SystemExit if it returns an error."""
29 28 print("$", cmd)
30 29 stat = os.system(cmd)
31 30 #stat = 0 # Uncomment this and comment previous to run in debug mode
32 31 if stat:
33 32 raise SystemExit("Command %s failed with code: %s" % (cmd, stat))
34 33
35 34 # Backwards compatibility
36 35 c = sh
37 36
38 37 def get_ipdir():
39 38 """Get IPython directory from command line, or assume it's the one above."""
40 39
41 40 # Initialize arguments and check location
42 41 ipdir = pjoin(os.path.dirname(__file__), os.pardir)
43 42
44 43 ipdir = os.path.abspath(ipdir)
45 44
46 45 cd(ipdir)
47 46 if not os.path.isdir('IPython') and os.path.isfile('setup.py'):
48 47 raise SystemExit('Invalid ipython directory: %s' % ipdir)
49 48 return ipdir
50 49
51 50 try:
52 51 execfile = execfile
53 52 except NameError:
54 53 def execfile(fname, globs, locs=None):
55 54 locs = locs or globs
56 55 exec(compile(open(fname).read(), fname, "exec"), globs, locs)
@@ -1,439 +0,0 b''
1 """Patched version of standard library tokenize, to deal with various bugs.
2
3 Patches
4
5 - Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing),
6 manually applied.
7 - Newlines in comments and blank lines should be either NL or NEWLINE, depending
8 on whether they are in a multi-line statement. Filed as Python issue #17061.
9
10 -------------------------------------------------------------------------------
11 Tokenization help for Python programs.
12
13 generate_tokens(readline) is a generator that breaks a stream of
14 text into Python tokens. It accepts a readline-like method which is called
15 repeatedly to get the next line of input (or "" for EOF). It generates
16 5-tuples with these members:
17
18 the token type (see token.py)
19 the token (a string)
20 the starting (row, column) indices of the token (a 2-tuple of ints)
21 the ending (row, column) indices of the token (a 2-tuple of ints)
22 the original line (string)
23
24 It is designed to match the working of the Python tokenizer exactly, except
25 that it produces COMMENT tokens for comments and gives type OP for all
26 operators
27
28 Older entry points
29 tokenize_loop(readline, tokeneater)
30 tokenize(readline, tokeneater=printtoken)
31 are the same, except instead of generating tokens, tokeneater is a callback
32 function to which the 5 fields described above are passed as 5 arguments,
33 each time a new token is found."""
34 from __future__ import print_function
35
36 __author__ = 'Ka-Ping Yee <ping@lfw.org>'
37 __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
38 'Skip Montanaro, Raymond Hettinger')
39
40 import string, re
41 from token import *
42
43 import token
44 __all__ = [x for x in dir(token) if not x.startswith("_")]
45 __all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
46 del x
47 del token
48
49 __all__ += ["TokenError"]
50
51 COMMENT = N_TOKENS
52 tok_name[COMMENT] = 'COMMENT'
53 NL = N_TOKENS + 1
54 tok_name[NL] = 'NL'
55 N_TOKENS += 2
56
57 def group(*choices): return '(' + '|'.join(choices) + ')'
58 def any(*choices): return group(*choices) + '*'
59 def maybe(*choices): return group(*choices) + '?'
60
61 Whitespace = r'[ \f\t]*'
62 Comment = r'#[^\r\n]*'
63 Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
64 Name = r'[a-zA-Z_]\w*'
65
66 Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
67 Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
68 Binnumber = r'0[bB][01]+[lL]?'
69 Decnumber = r'[1-9]\d*[lL]?'
70 Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
71 Exponent = r'[eE][-+]?\d+'
72 Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
73 Expfloat = r'\d+' + Exponent
74 Floatnumber = group(Pointfloat, Expfloat)
75 Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
76 Number = group(Imagnumber, Floatnumber, Intnumber)
77
78 # Tail end of ' string.
79 Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
80 # Tail end of " string.
81 Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
82 # Tail end of ''' string.
83 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
84 # Tail end of """ string.
85 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
86 Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
87 # Single-line ' or " string.
88 String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
89 r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
90
91 # Because of leftmost-then-longest match semantics, be sure to put the
92 # longest operators first (e.g., if = came before ==, == would get
93 # recognized as two instances of =).
94 Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
95 r"//=?",
96 r"[+\-*/%&|^=<>]=?",
97 r"~")
98
99 Bracket = '[][(){}]'
100 Special = group(r'\r?\n', r'[:;.,`@]')
101 Funny = group(Operator, Bracket, Special)
102
103 PlainToken = group(Number, Funny, String, Name)
104 Token = Ignore + PlainToken
105
106 # First (or only) line of ' or " string.
107 ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
108 group("'", r'\\\r?\n'),
109 r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
110 group('"', r'\\\r?\n'))
111 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
112 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
113
114 tokenprog, pseudoprog, single3prog, double3prog = map(
115 re.compile, (Token, PseudoToken, Single3, Double3))
116 endprogs = {"'": re.compile(Single), '"': re.compile(Double),
117 "'''": single3prog, '"""': double3prog,
118 "r'''": single3prog, 'r"""': double3prog,
119 "u'''": single3prog, 'u"""': double3prog,
120 "ur'''": single3prog, 'ur"""': double3prog,
121 "R'''": single3prog, 'R"""': double3prog,
122 "U'''": single3prog, 'U"""': double3prog,
123 "uR'''": single3prog, 'uR"""': double3prog,
124 "Ur'''": single3prog, 'Ur"""': double3prog,
125 "UR'''": single3prog, 'UR"""': double3prog,
126 "b'''": single3prog, 'b"""': double3prog,
127 "br'''": single3prog, 'br"""': double3prog,
128 "B'''": single3prog, 'B"""': double3prog,
129 "bR'''": single3prog, 'bR"""': double3prog,
130 "Br'''": single3prog, 'Br"""': double3prog,
131 "BR'''": single3prog, 'BR"""': double3prog,
132 'r': None, 'R': None, 'u': None, 'U': None,
133 'b': None, 'B': None}
134
135 triple_quoted = {}
136 for t in ("'''", '"""',
137 "r'''", 'r"""', "R'''", 'R"""',
138 "u'''", 'u"""', "U'''", 'U"""',
139 "ur'''", 'ur"""', "Ur'''", 'Ur"""',
140 "uR'''", 'uR"""', "UR'''", 'UR"""',
141 "b'''", 'b"""', "B'''", 'B"""',
142 "br'''", 'br"""', "Br'''", 'Br"""',
143 "bR'''", 'bR"""', "BR'''", 'BR"""'):
144 triple_quoted[t] = t
145 single_quoted = {}
146 for t in ("'", '"',
147 "r'", 'r"', "R'", 'R"',
148 "u'", 'u"', "U'", 'U"',
149 "ur'", 'ur"', "Ur'", 'Ur"',
150 "uR'", 'uR"', "UR'", 'UR"',
151 "b'", 'b"', "B'", 'B"',
152 "br'", 'br"', "Br'", 'Br"',
153 "bR'", 'bR"', "BR'", 'BR"' ):
154 single_quoted[t] = t
155
156 tabsize = 8
157
158 class TokenError(Exception): pass
159
160 class StopTokenizing(Exception): pass
161
162 def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
163 srow, scol = srow_scol
164 erow, ecol = erow_ecol
165 print("%d,%d-%d,%d:\t%s\t%s" % \
166 (srow, scol, erow, ecol, tok_name[type], repr(token)))
167
168 def tokenize(readline, tokeneater=printtoken):
169 """
170 The tokenize() function accepts two parameters: one representing the
171 input stream, and one providing an output mechanism for tokenize().
172
173 The first parameter, readline, must be a callable object which provides
174 the same interface as the readline() method of built-in file objects.
175 Each call to the function should return one line of input as a string.
176
177 The second parameter, tokeneater, must also be a callable object. It is
178 called once for each token, with five arguments, corresponding to the
179 tuples generated by generate_tokens().
180 """
181 try:
182 tokenize_loop(readline, tokeneater)
183 except StopTokenizing:
184 pass
185
186 # backwards compatible interface
187 def tokenize_loop(readline, tokeneater):
188 for token_info in generate_tokens(readline):
189 tokeneater(*token_info)
190
191 class Untokenizer:
192
193 def __init__(self):
194 self.tokens = []
195 self.prev_row = 1
196 self.prev_col = 0
197
198 def add_whitespace(self, start):
199 row, col = start
200 assert row >= self.prev_row
201 col_offset = col - self.prev_col
202 if col_offset > 0:
203 self.tokens.append(" " * col_offset)
204 elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
205 # Line was backslash-continued
206 self.tokens.append(" ")
207
208 def untokenize(self, tokens):
209 iterable = iter(tokens)
210 for t in iterable:
211 if len(t) == 2:
212 self.compat(t, iterable)
213 break
214 tok_type, token, start, end = t[:4]
215 self.add_whitespace(start)
216 self.tokens.append(token)
217 self.prev_row, self.prev_col = end
218 if tok_type in (NEWLINE, NL):
219 self.prev_row += 1
220 self.prev_col = 0
221 return "".join(self.tokens)
222
223 def compat(self, token, iterable):
224 # This import is here to avoid problems when the itertools
225 # module is not built yet and tokenize is imported.
226 from itertools import chain
227 startline = False
228 prevstring = False
229 indents = []
230 toks_append = self.tokens.append
231 for tok in chain([token], iterable):
232 toknum, tokval = tok[:2]
233
234 if toknum in (NAME, NUMBER):
235 tokval += ' '
236
237 # Insert a space between two consecutive strings
238 if toknum == STRING:
239 if prevstring:
240 tokval = ' ' + tokval
241 prevstring = True
242 else:
243 prevstring = False
244
245 if toknum == INDENT:
246 indents.append(tokval)
247 continue
248 elif toknum == DEDENT:
249 indents.pop()
250 continue
251 elif toknum in (NEWLINE, NL):
252 startline = True
253 elif startline and indents:
254 toks_append(indents[-1])
255 startline = False
256 toks_append(tokval)
257
258 def untokenize(iterable):
259 """Transform tokens back into Python source code.
260
261 Each element returned by the iterable must be a token sequence
262 with at least two elements, a token number and token value. If
263 only two tokens are passed, the resulting output is poor.
264
265 Round-trip invariant for full input:
266 Untokenized source will match input source exactly
267
268 Round-trip invariant for limited intput:
269 # Output text will tokenize the back to the input
270 t1 = [tok[:2] for tok in generate_tokens(f.readline)]
271 newcode = untokenize(t1)
272 readline = iter(newcode.splitlines(1)).next
273 t2 = [tok[:2] for tok in generate_tokens(readline)]
274 assert t1 == t2
275 """
276 ut = Untokenizer()
277 return ut.untokenize(iterable)
278
279 def generate_tokens(readline):
280 """
281 The generate_tokens() generator requires one argment, readline, which
282 must be a callable object which provides the same interface as the
283 readline() method of built-in file objects. Each call to the function
284 should return one line of input as a string. Alternately, readline
285 can be a callable function terminating with StopIteration:
286 readline = open(myfile).next # Example of alternate readline
287
288 The generator produces 5-tuples with these members: the token type; the
289 token string; a 2-tuple (srow, scol) of ints specifying the row and
290 column where the token begins in the source; a 2-tuple (erow, ecol) of
291 ints specifying the row and column where the token ends in the source;
292 and the line on which the token was found. The line passed is the
293 logical line; continuation lines are included.
294 """
295 lnum = parenlev = continued = 0
296 namechars, numchars = string.ascii_letters + '_', '0123456789'
297 contstr, needcont = '', 0
298 contline = None
299 indents = [0]
300
301 while 1: # loop over lines in stream
302 try:
303 line = readline()
304 except StopIteration:
305 line = ''
306 lnum += 1
307 pos, max = 0, len(line)
308
309 if contstr: # continued string
310 if not line:
311 raise TokenError("EOF in multi-line string", strstart)
312 endmatch = endprog.match(line)
313 if endmatch:
314 pos = end = endmatch.end(0)
315 yield (STRING, contstr + line[:end],
316 strstart, (lnum, end), contline + line)
317 contstr, needcont = '', 0
318 contline = None
319 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
320 yield (ERRORTOKEN, contstr + line,
321 strstart, (lnum, len(line)), contline)
322 contstr = ''
323 contline = None
324 continue
325 else:
326 contstr = contstr + line
327 contline = contline + line
328 continue
329
330 elif parenlev == 0 and not continued: # new statement
331 if not line: break
332 column = 0
333 while pos < max: # measure leading whitespace
334 if line[pos] == ' ':
335 column += 1
336 elif line[pos] == '\t':
337 column = (column//tabsize + 1)*tabsize
338 elif line[pos] == '\f':
339 column = 0
340 else:
341 break
342 pos += 1
343 if pos == max:
344 break
345
346 if line[pos] in '#\r\n': # skip comments or blank lines
347 if line[pos] == '#':
348 comment_token = line[pos:].rstrip('\r\n')
349 nl_pos = pos + len(comment_token)
350 yield (COMMENT, comment_token,
351 (lnum, pos), (lnum, pos + len(comment_token)), line)
352 yield (NEWLINE, line[nl_pos:],
353 (lnum, nl_pos), (lnum, len(line)), line)
354 else:
355 yield (NEWLINE, line[pos:],
356 (lnum, pos), (lnum, len(line)), line)
357 continue
358
359 if column > indents[-1]: # count indents or dedents
360 indents.append(column)
361 yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
362 while column < indents[-1]:
363 if column not in indents:
364 raise IndentationError(
365 "unindent does not match any outer indentation level",
366 ("<tokenize>", lnum, pos, line))
367 indents = indents[:-1]
368 yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
369
370 else: # continued statement
371 if not line:
372 raise TokenError("EOF in multi-line statement", (lnum, 0))
373 continued = 0
374
375 while pos < max:
376 pseudomatch = pseudoprog.match(line, pos)
377 if pseudomatch: # scan for tokens
378 start, end = pseudomatch.span(1)
379 spos, epos, pos = (lnum, start), (lnum, end), end
380 token, initial = line[start:end], line[start]
381
382 if initial in numchars or \
383 (initial == '.' and token != '.'): # ordinary number
384 yield (NUMBER, token, spos, epos, line)
385 elif initial in '\r\n':
386 yield (NL if parenlev > 0 else NEWLINE,
387 token, spos, epos, line)
388 elif initial == '#':
389 assert not token.endswith("\n")
390 yield (COMMENT, token, spos, epos, line)
391 elif token in triple_quoted:
392 endprog = endprogs[token]
393 endmatch = endprog.match(line, pos)
394 if endmatch: # all on one line
395 pos = endmatch.end(0)
396 token = line[start:pos]
397 yield (STRING, token, spos, (lnum, pos), line)
398 else:
399 strstart = (lnum, start) # multiple lines
400 contstr = line[start:]
401 contline = line
402 break
403 elif initial in single_quoted or \
404 token[:2] in single_quoted or \
405 token[:3] in single_quoted:
406 if token[-1] == '\n': # continued string
407 strstart = (lnum, start)
408 endprog = (endprogs[initial] or endprogs[token[1]] or
409 endprogs[token[2]])
410 contstr, needcont = line[start:], 1
411 contline = line
412 break
413 else: # ordinary string
414 yield (STRING, token, spos, epos, line)
415 elif initial in namechars: # ordinary name
416 yield (NAME, token, spos, epos, line)
417 elif initial == '\\': # continued stmt
418 continued = 1
419 else:
420 if initial in '([{':
421 parenlev += 1
422 elif initial in ')]}':
423 parenlev -= 1
424 yield (OP, token, spos, epos, line)
425 else:
426 yield (ERRORTOKEN, line[pos],
427 (lnum, pos), (lnum, pos+1), line)
428 pos += 1
429
430 for indent in indents[1:]: # pop remaining indent levels
431 yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
432 yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
433
434 if __name__ == '__main__': # testing
435 import sys
436 if len(sys.argv) > 1:
437 tokenize(open(sys.argv[1]).readline)
438 else:
439 tokenize(sys.stdin.readline)
This diff has been collapsed as it changes many lines, (595 lines changed) Show them Hide them
@@ -1,595 +0,0 b''
1 """Patched version of standard library tokenize, to deal with various bugs.
2
3 Based on Python 3.2 code.
4
5 Patches:
6
7 - Gareth Rees' patch for Python issue #12691 (untokenizing)
8 - Except we don't encode the output of untokenize
9 - Python 2 compatible syntax, so that it can be byte-compiled at installation
10 - Newlines in comments and blank lines should be either NL or NEWLINE, depending
11 on whether they are in a multi-line statement. Filed as Python issue #17061.
12 - Export generate_tokens & TokenError
13 - u and rb literals are allowed under Python 3.3 and above.
14
15 ------------------------------------------------------------------------------
16 Tokenization help for Python programs.
17
18 tokenize(readline) is a generator that breaks a stream of bytes into
19 Python tokens. It decodes the bytes according to PEP-0263 for
20 determining source file encoding.
21
22 It accepts a readline-like method which is called repeatedly to get the
23 next line of input (or b"" for EOF). It generates 5-tuples with these
24 members:
25
26 the token type (see token.py)
27 the token (a string)
28 the starting (row, column) indices of the token (a 2-tuple of ints)
29 the ending (row, column) indices of the token (a 2-tuple of ints)
30 the original line (string)
31
32 It is designed to match the working of the Python tokenizer exactly, except
33 that it produces COMMENT tokens for comments and gives type OP for all
34 operators. Additionally, all token lists start with an ENCODING token
35 which tells you which encoding was used to decode the bytes stream.
36 """
37 from __future__ import absolute_import
38
39 __author__ = 'Ka-Ping Yee <ping@lfw.org>'
40 __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
41 'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
42 'Michael Foord')
43 import builtins
44 import re
45 import sys
46 from token import *
47 from codecs import lookup, BOM_UTF8
48 import collections
49 from io import TextIOWrapper
50 cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
51
52 import token
53 __all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
54 "NL", "untokenize", "ENCODING", "TokenInfo"]
55 del token
56
57 __all__ += ["generate_tokens", "TokenError"]
58
59 COMMENT = N_TOKENS
60 tok_name[COMMENT] = 'COMMENT'
61 NL = N_TOKENS + 1
62 tok_name[NL] = 'NL'
63 ENCODING = N_TOKENS + 2
64 tok_name[ENCODING] = 'ENCODING'
65 N_TOKENS += 3
66
67 class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
68 def __repr__(self):
69 annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
70 return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
71 self._replace(type=annotated_type))
72
73 def group(*choices): return '(' + '|'.join(choices) + ')'
74 def any(*choices): return group(*choices) + '*'
75 def maybe(*choices): return group(*choices) + '?'
76
77 # Note: we use unicode matching for names ("\w") but ascii matching for
78 # number literals.
79 Whitespace = r'[ \f\t]*'
80 Comment = r'#[^\r\n]*'
81 Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
82 Name = r'\w+'
83
84 Hexnumber = r'0[xX][0-9a-fA-F]+'
85 Binnumber = r'0[bB][01]+'
86 Octnumber = r'0[oO][0-7]+'
87 Decnumber = r'(?:0+|[1-9][0-9]*)'
88 Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
89 Exponent = r'[eE][-+]?[0-9]+'
90 Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
91 Expfloat = r'[0-9]+' + Exponent
92 Floatnumber = group(Pointfloat, Expfloat)
93 Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
94 Number = group(Imagnumber, Floatnumber, Intnumber)
95
96 if sys.version_info.minor >= 3:
97 StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
98 else:
99 StringPrefix = r'(?:[bB]?[rR]?)?'
100
101 # Tail end of ' string.
102 Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
103 # Tail end of " string.
104 Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
105 # Tail end of ''' string.
106 Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
107 # Tail end of """ string.
108 Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
109 Triple = group(StringPrefix + "'''", StringPrefix + '"""')
110 # Single-line ' or " string.
111 String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
112 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
113
114 # Because of leftmost-then-longest match semantics, be sure to put the
115 # longest operators first (e.g., if = came before ==, == would get
116 # recognized as two instances of =).
117 Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
118 r"//=?", r"->",
119 r"[+\-*/%&|^=<>]=?",
120 r"~")
121
122 Bracket = '[][(){}]'
123 Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
124 Funny = group(Operator, Bracket, Special)
125
126 PlainToken = group(Number, Funny, String, Name)
127 Token = Ignore + PlainToken
128
129 # First (or only) line of ' or " string.
130 ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
131 group("'", r'\\\r?\n'),
132 StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
133 group('"', r'\\\r?\n'))
134 PseudoExtras = group(r'\\\r?\n', Comment, Triple)
135 PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
136
137 def _compile(expr):
138 return re.compile(expr, re.UNICODE)
139
140 tokenprog, pseudoprog, single3prog, double3prog = map(
141 _compile, (Token, PseudoToken, Single3, Double3))
142 endprogs = {"'": _compile(Single), '"': _compile(Double),
143 "'''": single3prog, '"""': double3prog,
144 "r'''": single3prog, 'r"""': double3prog,
145 "b'''": single3prog, 'b"""': double3prog,
146 "R'''": single3prog, 'R"""': double3prog,
147 "B'''": single3prog, 'B"""': double3prog,
148 "br'''": single3prog, 'br"""': double3prog,
149 "bR'''": single3prog, 'bR"""': double3prog,
150 "Br'''": single3prog, 'Br"""': double3prog,
151 "BR'''": single3prog, 'BR"""': double3prog,
152 'r': None, 'R': None, 'b': None, 'B': None}
153
154 triple_quoted = {}
155 for t in ("'''", '"""',
156 "r'''", 'r"""', "R'''", 'R"""',
157 "b'''", 'b"""', "B'''", 'B"""',
158 "br'''", 'br"""', "Br'''", 'Br"""',
159 "bR'''", 'bR"""', "BR'''", 'BR"""'):
160 triple_quoted[t] = t
161 single_quoted = {}
162 for t in ("'", '"',
163 "r'", 'r"', "R'", 'R"',
164 "b'", 'b"', "B'", 'B"',
165 "br'", 'br"', "Br'", 'Br"',
166 "bR'", 'bR"', "BR'", 'BR"' ):
167 single_quoted[t] = t
168
169 if sys.version_info.minor >= 3:
170 # Python 3.3
171 for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']:
172 _t2 = _prefix+'"""'
173 endprogs[_t2] = double3prog
174 triple_quoted[_t2] = _t2
175 _t1 = _prefix + "'''"
176 endprogs[_t1] = single3prog
177 triple_quoted[_t1] = _t1
178 single_quoted[_prefix+'"'] = _prefix+'"'
179 single_quoted[_prefix+"'"] = _prefix+"'"
180 del _prefix, _t2, _t1
181 endprogs['u'] = None
182 endprogs['U'] = None
183
184 del _compile
185
186 tabsize = 8
187
188 class TokenError(Exception): pass
189
190 class StopTokenizing(Exception): pass
191
192
193 class Untokenizer:
194
195 def __init__(self):
196 self.tokens = []
197 self.prev_row = 1
198 self.prev_col = 0
199 self.encoding = 'utf-8'
200
201 def add_whitespace(self, tok_type, start):
202 row, col = start
203 assert row >= self.prev_row
204 col_offset = col - self.prev_col
205 if col_offset > 0:
206 self.tokens.append(" " * col_offset)
207 elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER):
208 # Line was backslash-continued.
209 self.tokens.append(" ")
210
211 def untokenize(self, tokens):
212 iterable = iter(tokens)
213 for t in iterable:
214 if len(t) == 2:
215 self.compat(t, iterable)
216 break
217 tok_type, token, start, end = t[:4]
218 if tok_type == ENCODING:
219 self.encoding = token
220 continue
221 self.add_whitespace(tok_type, start)
222 self.tokens.append(token)
223 self.prev_row, self.prev_col = end
224 if tok_type in (NEWLINE, NL):
225 self.prev_row += 1
226 self.prev_col = 0
227 return "".join(self.tokens)
228
229 def compat(self, token, iterable):
230 # This import is here to avoid problems when the itertools
231 # module is not built yet and tokenize is imported.
232 from itertools import chain
233 startline = False
234 prevstring = False
235 indents = []
236 toks_append = self.tokens.append
237
238 for tok in chain([token], iterable):
239 toknum, tokval = tok[:2]
240 if toknum == ENCODING:
241 self.encoding = tokval
242 continue
243
244 if toknum in (NAME, NUMBER):
245 tokval += ' '
246
247 # Insert a space between two consecutive strings
248 if toknum == STRING:
249 if prevstring:
250 tokval = ' ' + tokval
251 prevstring = True
252 else:
253 prevstring = False
254
255 if toknum == INDENT:
256 indents.append(tokval)
257 continue
258 elif toknum == DEDENT:
259 indents.pop()
260 continue
261 elif toknum in (NEWLINE, NL):
262 startline = True
263 elif startline and indents:
264 toks_append(indents[-1])
265 startline = False
266 toks_append(tokval)
267
268
269 def untokenize(tokens):
270 """
271 Convert ``tokens`` (an iterable) back into Python source code. Return
272 a bytes object, encoded using the encoding specified by the last
273 ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found.
274
275 The result is guaranteed to tokenize back to match the input so that
276 the conversion is lossless and round-trips are assured. The
277 guarantee applies only to the token type and token string as the
278 spacing between tokens (column positions) may change.
279
280 :func:`untokenize` has two modes. If the input tokens are sequences
281 of length 2 (``type``, ``string``) then spaces are added as necessary to
282 preserve the round-trip property.
283
284 If the input tokens are sequences of length 4 or more (``type``,
285 ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then
286 spaces are added so that each token appears in the result at the
287 position indicated by ``start`` and ``end``, if possible.
288 """
289 return Untokenizer().untokenize(tokens)
290
291
292 def _get_normal_name(orig_enc):
293 """Imitates get_normal_name in tokenizer.c."""
294 # Only care about the first 12 characters.
295 enc = orig_enc[:12].lower().replace("_", "-")
296 if enc == "utf-8" or enc.startswith("utf-8-"):
297 return "utf-8"
298 if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
299 enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
300 return "iso-8859-1"
301 return orig_enc
302
303 def detect_encoding(readline):
304 """
305 The detect_encoding() function is used to detect the encoding that should
306 be used to decode a Python source file. It requires one argment, readline,
307 in the same way as the tokenize() generator.
308
309 It will call readline a maximum of twice, and return the encoding used
310 (as a string) and a list of any lines (left as bytes) it has read in.
311
312 It detects the encoding from the presence of a utf-8 bom or an encoding
313 cookie as specified in pep-0263. If both a bom and a cookie are present,
314 but disagree, a SyntaxError will be raised. If the encoding cookie is an
315 invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
316 'utf-8-sig' is returned.
317
318 If no encoding is specified, then the default of 'utf-8' will be returned.
319 """
320 bom_found = False
321 encoding = None
322 default = 'utf-8'
323 def read_or_stop():
324 try:
325 return readline()
326 except StopIteration:
327 return b''
328
329 def find_cookie(line):
330 try:
331 # Decode as UTF-8. Either the line is an encoding declaration,
332 # in which case it should be pure ASCII, or it must be UTF-8
333 # per default encoding.
334 line_string = line.decode('utf-8')
335 except UnicodeDecodeError:
336 raise SyntaxError("invalid or missing encoding declaration")
337
338 matches = cookie_re.findall(line_string)
339 if not matches:
340 return None
341 encoding = _get_normal_name(matches[0])
342 try:
343 codec = lookup(encoding)
344 except LookupError:
345 # This behaviour mimics the Python interpreter
346 raise SyntaxError("unknown encoding: " + encoding)
347
348 if bom_found:
349 if encoding != 'utf-8':
350 # This behaviour mimics the Python interpreter
351 raise SyntaxError('encoding problem: utf-8')
352 encoding += '-sig'
353 return encoding
354
355 first = read_or_stop()
356 if first.startswith(BOM_UTF8):
357 bom_found = True
358 first = first[3:]
359 default = 'utf-8-sig'
360 if not first:
361 return default, []
362
363 encoding = find_cookie(first)
364 if encoding:
365 return encoding, [first]
366
367 second = read_or_stop()
368 if not second:
369 return default, [first]
370
371 encoding = find_cookie(second)
372 if encoding:
373 return encoding, [first, second]
374
375 return default, [first, second]
376
377
378 def open(filename):
379 """Open a file in read only mode using the encoding detected by
380 detect_encoding().
381 """
382 buffer = builtins.open(filename, 'rb')
383 encoding, lines = detect_encoding(buffer.readline)
384 buffer.seek(0)
385 text = TextIOWrapper(buffer, encoding, line_buffering=True)
386 text.mode = 'r'
387 return text
388
389
390 def tokenize(readline):
391 """
392 The tokenize() generator requires one argment, readline, which
393 must be a callable object which provides the same interface as the
394 readline() method of built-in file objects. Each call to the function
395 should return one line of input as bytes. Alternately, readline
396 can be a callable function terminating with StopIteration:
397 readline = open(myfile, 'rb').__next__ # Example of alternate readline
398
399 The generator produces 5-tuples with these members: the token type; the
400 token string; a 2-tuple (srow, scol) of ints specifying the row and
401 column where the token begins in the source; a 2-tuple (erow, ecol) of
402 ints specifying the row and column where the token ends in the source;
403 and the line on which the token was found. The line passed is the
404 logical line; continuation lines are included.
405
406 The first token sequence will always be an ENCODING token
407 which tells you which encoding was used to decode the bytes stream.
408 """
409 # This import is here to avoid problems when the itertools module is not
410 # built yet and tokenize is imported.
411 from itertools import chain, repeat
412 encoding, consumed = detect_encoding(readline)
413 rl_gen = iter(readline, b"")
414 empty = repeat(b"")
415 return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
416
417
418 def _tokenize(readline, encoding):
419 lnum = parenlev = continued = 0
420 numchars = '0123456789'
421 contstr, needcont = '', 0
422 contline = None
423 indents = [0]
424
425 if encoding is not None:
426 if encoding == "utf-8-sig":
427 # BOM will already have been stripped.
428 encoding = "utf-8"
429 yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
430 while True: # loop over lines in stream
431 try:
432 line = readline()
433 except StopIteration:
434 line = b''
435
436 if encoding is not None:
437 line = line.decode(encoding)
438 lnum += 1
439 pos, max = 0, len(line)
440
441 if contstr: # continued string
442 if not line:
443 raise TokenError("EOF in multi-line string", strstart)
444 endmatch = endprog.match(line)
445 if endmatch:
446 pos = end = endmatch.end(0)
447 yield TokenInfo(STRING, contstr + line[:end],
448 strstart, (lnum, end), contline + line)
449 contstr, needcont = '', 0
450 contline = None
451 elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
452 yield TokenInfo(ERRORTOKEN, contstr + line,
453 strstart, (lnum, len(line)), contline)
454 contstr = ''
455 contline = None
456 continue
457 else:
458 contstr = contstr + line
459 contline = contline + line
460 continue
461
462 elif parenlev == 0 and not continued: # new statement
463 if not line: break
464 column = 0
465 while pos < max: # measure leading whitespace
466 if line[pos] == ' ':
467 column += 1
468 elif line[pos] == '\t':
469 column = (column//tabsize + 1)*tabsize
470 elif line[pos] == '\f':
471 column = 0
472 else:
473 break
474 pos += 1
475 if pos == max:
476 break
477
478 if line[pos] in '#\r\n': # skip comments or blank lines
479 if line[pos] == '#':
480 comment_token = line[pos:].rstrip('\r\n')
481 nl_pos = pos + len(comment_token)
482 yield TokenInfo(COMMENT, comment_token,
483 (lnum, pos), (lnum, pos + len(comment_token)), line)
484 yield TokenInfo(NEWLINE, line[nl_pos:],
485 (lnum, nl_pos), (lnum, len(line)), line)
486 else:
487 yield TokenInfo(NEWLINE, line[pos:],
488 (lnum, pos), (lnum, len(line)), line)
489 continue
490
491 if column > indents[-1]: # count indents or dedents
492 indents.append(column)
493 yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
494 while column < indents[-1]:
495 if column not in indents:
496 raise IndentationError(
497 "unindent does not match any outer indentation level",
498 ("<tokenize>", lnum, pos, line))
499 indents = indents[:-1]
500 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
501
502 else: # continued statement
503 if not line:
504 raise TokenError("EOF in multi-line statement", (lnum, 0))
505 continued = 0
506
507 while pos < max:
508 pseudomatch = pseudoprog.match(line, pos)
509 if pseudomatch: # scan for tokens
510 start, end = pseudomatch.span(1)
511 spos, epos, pos = (lnum, start), (lnum, end), end
512 token, initial = line[start:end], line[start]
513
514 if (initial in numchars or # ordinary number
515 (initial == '.' and token != '.' and token != '...')):
516 yield TokenInfo(NUMBER, token, spos, epos, line)
517 elif initial in '\r\n':
518 yield TokenInfo(NL if parenlev > 0 else NEWLINE,
519 token, spos, epos, line)
520 elif initial == '#':
521 assert not token.endswith("\n")
522 yield TokenInfo(COMMENT, token, spos, epos, line)
523 elif token in triple_quoted:
524 endprog = endprogs[token]
525 endmatch = endprog.match(line, pos)
526 if endmatch: # all on one line
527 pos = endmatch.end(0)
528 token = line[start:pos]
529 yield TokenInfo(STRING, token, spos, (lnum, pos), line)
530 else:
531 strstart = (lnum, start) # multiple lines
532 contstr = line[start:]
533 contline = line
534 break
535 elif initial in single_quoted or \
536 token[:2] in single_quoted or \
537 token[:3] in single_quoted:
538 if token[-1] == '\n': # continued string
539 strstart = (lnum, start)
540 endprog = (endprogs[initial] or endprogs[token[1]] or
541 endprogs[token[2]])
542 contstr, needcont = line[start:], 1
543 contline = line
544 break
545 else: # ordinary string
546 yield TokenInfo(STRING, token, spos, epos, line)
547 elif initial.isidentifier(): # ordinary name
548 yield TokenInfo(NAME, token, spos, epos, line)
549 elif initial == '\\': # continued stmt
550 continued = 1
551 else:
552 if initial in '([{':
553 parenlev += 1
554 elif initial in ')]}':
555 parenlev -= 1
556 yield TokenInfo(OP, token, spos, epos, line)
557 else:
558 yield TokenInfo(ERRORTOKEN, line[pos],
559 (lnum, pos), (lnum, pos+1), line)
560 pos += 1
561
562 for indent in indents[1:]: # pop remaining indent levels
563 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
564 yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
565
566
567 # An undocumented, backwards compatible, API for all the places in the standard
568 # library that expect to be able to use tokenize with strings
569 def generate_tokens(readline):
570 return _tokenize(readline, None)
571
572 if __name__ == "__main__":
573 # Quick sanity check
574 s = b'''def parseline(self, line):
575 """Parse the line into a command name and a string containing
576 the arguments. Returns a tuple containing (command, args, line).
577 'command' and 'args' may be None if the line couldn't be parsed.
578 """
579 line = line.strip()
580 if not line:
581 return None, None, line
582 elif line[0] == '?':
583 line = 'help ' + line[1:]
584 elif line[0] == '!':
585 if hasattr(self, 'do_shell'):
586 line = 'shell ' + line[1:]
587 else:
588 return None, None, line
589 i, n = 0, len(line)
590 while i < n and line[i] in self.identchars: i = i+1
591 cmd, arg = line[:i], line[i:].strip()
592 return cmd, arg, line
593 '''
594 for tok in tokenize(iter(s.splitlines()).__next__):
595 print(tok)
@@ -1,5 +0,0 b''
1 """This tests that future compiler flags are passed to the embedded IPython."""
2 from __future__ import division
3 from IPython import embed
4 embed(banner1='', header='check 1/2 == 0.5 in Python 2')
5 embed(banner1='', header='check 1/2 = 0 in Python 2', compile_flags=0)
General Comments 0
You need to be logged in to leave comments. Login now