Show More
@@ -1,1011 +1,1015 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """Tools for inspecting Python objects. |
|
3 | 3 | |
|
4 | 4 | Uses syntax highlighting for presenting the various information elements. |
|
5 | 5 | |
|
6 | 6 | Similar in spirit to the inspect module, but all calls take a name argument to |
|
7 | 7 | reference the name under which an object is being read. |
|
8 | 8 | """ |
|
9 | 9 | |
|
10 | 10 | # Copyright (c) IPython Development Team. |
|
11 | 11 | # Distributed under the terms of the Modified BSD License. |
|
12 | 12 | |
|
13 | 13 | from __future__ import print_function |
|
14 | 14 | |
|
15 | 15 | __all__ = ['Inspector','InspectColors'] |
|
16 | 16 | |
|
17 | 17 | # stdlib modules |
|
18 | 18 | import inspect |
|
19 | 19 | import linecache |
|
20 | 20 | import warnings |
|
21 | 21 | import os |
|
22 | 22 | from textwrap import dedent |
|
23 | 23 | import types |
|
24 | 24 | import io as stdlib_io |
|
25 | 25 | |
|
26 | 26 | try: |
|
27 | 27 | from itertools import izip_longest |
|
28 | 28 | except ImportError: |
|
29 | 29 | from itertools import zip_longest as izip_longest |
|
30 | 30 | |
|
31 | 31 | # IPython's own |
|
32 | 32 | from IPython.core import page |
|
33 | 33 | from IPython.lib.pretty import pretty |
|
34 | 34 | from IPython.testing.skipdoctest import skip_doctest_py3 |
|
35 | 35 | from IPython.utils import PyColorize |
|
36 | 36 | from IPython.utils import openpy |
|
37 | 37 | from IPython.utils import py3compat |
|
38 | 38 | from IPython.utils.dir2 import safe_hasattr |
|
39 | 39 | from IPython.utils.path import compress_user |
|
40 | 40 | from IPython.utils.text import indent |
|
41 | 41 | from IPython.utils.wildcard import list_namespace |
|
42 | 42 | from IPython.utils.coloransi import TermColors, ColorScheme, ColorSchemeTable |
|
43 | 43 | from IPython.utils.py3compat import cast_unicode, string_types, PY3 |
|
44 | 44 | from IPython.utils.signatures import signature |
|
45 | 45 | from IPython.utils.colorable import Colorable |
|
46 | 46 | |
|
47 | 47 | from pygments import highlight |
|
48 | from pygments.lexers import PythonLexer | |
|
48 | try: | |
|
49 | # PythonLexer was renamed to Python2Lexer in pygments 2.5 | |
|
50 | from pygments.lexers import Python2Lexer | |
|
51 | except ImportError: | |
|
52 | from pygments.lexers import PythonLexer as Python2Lexer | |
|
49 | 53 | from pygments.formatters import HtmlFormatter |
|
50 | 54 | |
|
51 | 55 | def pylight(code): |
|
52 | return highlight(code, PythonLexer(), HtmlFormatter(noclasses=True)) | |
|
56 | return highlight(code, Python2Lexer(), HtmlFormatter(noclasses=True)) | |
|
53 | 57 | |
|
54 | 58 | # builtin docstrings to ignore |
|
55 | 59 | _func_call_docstring = types.FunctionType.__call__.__doc__ |
|
56 | 60 | _object_init_docstring = object.__init__.__doc__ |
|
57 | 61 | _builtin_type_docstrings = { |
|
58 | 62 | inspect.getdoc(t) for t in (types.ModuleType, types.MethodType, |
|
59 | 63 | types.FunctionType, property) |
|
60 | 64 | } |
|
61 | 65 | |
|
62 | 66 | _builtin_func_type = type(all) |
|
63 | 67 | _builtin_meth_type = type(str.upper) # Bound methods have the same type as builtin functions |
|
64 | 68 | #**************************************************************************** |
|
65 | 69 | # Builtin color schemes |
|
66 | 70 | |
|
67 | 71 | Colors = TermColors # just a shorthand |
|
68 | 72 | |
|
69 | 73 | InspectColors = PyColorize.ANSICodeColors |
|
70 | 74 | |
|
71 | 75 | #**************************************************************************** |
|
72 | 76 | # Auxiliary functions and objects |
|
73 | 77 | |
|
74 | 78 | # See the messaging spec for the definition of all these fields. This list |
|
75 | 79 | # effectively defines the order of display |
|
76 | 80 | info_fields = ['type_name', 'base_class', 'string_form', 'namespace', |
|
77 | 81 | 'length', 'file', 'definition', 'docstring', 'source', |
|
78 | 82 | 'init_definition', 'class_docstring', 'init_docstring', |
|
79 | 83 | 'call_def', 'call_docstring', |
|
80 | 84 | # These won't be printed but will be used to determine how to |
|
81 | 85 | # format the object |
|
82 | 86 | 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name' |
|
83 | 87 | ] |
|
84 | 88 | |
|
85 | 89 | |
|
86 | 90 | def object_info(**kw): |
|
87 | 91 | """Make an object info dict with all fields present.""" |
|
88 | 92 | infodict = dict(izip_longest(info_fields, [None])) |
|
89 | 93 | infodict.update(kw) |
|
90 | 94 | return infodict |
|
91 | 95 | |
|
92 | 96 | |
|
93 | 97 | def get_encoding(obj): |
|
94 | 98 | """Get encoding for python source file defining obj |
|
95 | 99 | |
|
96 | 100 | Returns None if obj is not defined in a sourcefile. |
|
97 | 101 | """ |
|
98 | 102 | ofile = find_file(obj) |
|
99 | 103 | # run contents of file through pager starting at line where the object |
|
100 | 104 | # is defined, as long as the file isn't binary and is actually on the |
|
101 | 105 | # filesystem. |
|
102 | 106 | if ofile is None: |
|
103 | 107 | return None |
|
104 | 108 | elif ofile.endswith(('.so', '.dll', '.pyd')): |
|
105 | 109 | return None |
|
106 | 110 | elif not os.path.isfile(ofile): |
|
107 | 111 | return None |
|
108 | 112 | else: |
|
109 | 113 | # Print only text files, not extension binaries. Note that |
|
110 | 114 | # getsourcelines returns lineno with 1-offset and page() uses |
|
111 | 115 | # 0-offset, so we must adjust. |
|
112 | 116 | with stdlib_io.open(ofile, 'rb') as buffer: # Tweaked to use io.open for Python 2 |
|
113 | 117 | encoding, lines = openpy.detect_encoding(buffer.readline) |
|
114 | 118 | return encoding |
|
115 | 119 | |
|
116 | 120 | def getdoc(obj): |
|
117 | 121 | """Stable wrapper around inspect.getdoc. |
|
118 | 122 | |
|
119 | 123 | This can't crash because of attribute problems. |
|
120 | 124 | |
|
121 | 125 | It also attempts to call a getdoc() method on the given object. This |
|
122 | 126 | allows objects which provide their docstrings via non-standard mechanisms |
|
123 | 127 | (like Pyro proxies) to still be inspected by ipython's ? system. |
|
124 | 128 | """ |
|
125 | 129 | # Allow objects to offer customized documentation via a getdoc method: |
|
126 | 130 | try: |
|
127 | 131 | ds = obj.getdoc() |
|
128 | 132 | except Exception: |
|
129 | 133 | pass |
|
130 | 134 | else: |
|
131 | 135 | # if we get extra info, we add it to the normal docstring. |
|
132 | 136 | if isinstance(ds, string_types): |
|
133 | 137 | return inspect.cleandoc(ds) |
|
134 | 138 | try: |
|
135 | 139 | docstr = inspect.getdoc(obj) |
|
136 | 140 | encoding = get_encoding(obj) |
|
137 | 141 | return py3compat.cast_unicode(docstr, encoding=encoding) |
|
138 | 142 | except Exception: |
|
139 | 143 | # Harden against an inspect failure, which can occur with |
|
140 | 144 | # extensions modules. |
|
141 | 145 | raise |
|
142 | 146 | return None |
|
143 | 147 | |
|
144 | 148 | |
|
145 | 149 | def getsource(obj, oname=''): |
|
146 | 150 | """Wrapper around inspect.getsource. |
|
147 | 151 | |
|
148 | 152 | This can be modified by other projects to provide customized source |
|
149 | 153 | extraction. |
|
150 | 154 | |
|
151 | 155 | Parameters |
|
152 | 156 | ---------- |
|
153 | 157 | obj : object |
|
154 | 158 | an object whose source code we will attempt to extract |
|
155 | 159 | oname : str |
|
156 | 160 | (optional) a name under which the object is known |
|
157 | 161 | |
|
158 | 162 | Returns |
|
159 | 163 | ------- |
|
160 | 164 | src : unicode or None |
|
161 | 165 | |
|
162 | 166 | """ |
|
163 | 167 | |
|
164 | 168 | if isinstance(obj, property): |
|
165 | 169 | sources = [] |
|
166 | 170 | for attrname in ['fget', 'fset', 'fdel']: |
|
167 | 171 | fn = getattr(obj, attrname) |
|
168 | 172 | if fn is not None: |
|
169 | 173 | encoding = get_encoding(fn) |
|
170 | 174 | oname_prefix = ('%s.' % oname) if oname else '' |
|
171 | 175 | sources.append(cast_unicode( |
|
172 | 176 | ''.join(('# ', oname_prefix, attrname)), |
|
173 | 177 | encoding=encoding)) |
|
174 | 178 | if inspect.isfunction(fn): |
|
175 | 179 | sources.append(dedent(getsource(fn))) |
|
176 | 180 | else: |
|
177 | 181 | # Default str/repr only prints function name, |
|
178 | 182 | # pretty.pretty prints module name too. |
|
179 | 183 | sources.append(cast_unicode( |
|
180 | 184 | '%s%s = %s\n' % ( |
|
181 | 185 | oname_prefix, attrname, pretty(fn)), |
|
182 | 186 | encoding=encoding)) |
|
183 | 187 | if sources: |
|
184 | 188 | return '\n'.join(sources) |
|
185 | 189 | else: |
|
186 | 190 | return None |
|
187 | 191 | |
|
188 | 192 | else: |
|
189 | 193 | # Get source for non-property objects. |
|
190 | 194 | |
|
191 | 195 | obj = _get_wrapped(obj) |
|
192 | 196 | |
|
193 | 197 | try: |
|
194 | 198 | src = inspect.getsource(obj) |
|
195 | 199 | except TypeError: |
|
196 | 200 | # The object itself provided no meaningful source, try looking for |
|
197 | 201 | # its class definition instead. |
|
198 | 202 | if hasattr(obj, '__class__'): |
|
199 | 203 | try: |
|
200 | 204 | src = inspect.getsource(obj.__class__) |
|
201 | 205 | except TypeError: |
|
202 | 206 | return None |
|
203 | 207 | |
|
204 | 208 | encoding = get_encoding(obj) |
|
205 | 209 | return cast_unicode(src, encoding=encoding) |
|
206 | 210 | |
|
207 | 211 | |
|
208 | 212 | def is_simple_callable(obj): |
|
209 | 213 | """True if obj is a function ()""" |
|
210 | 214 | return (inspect.isfunction(obj) or inspect.ismethod(obj) or \ |
|
211 | 215 | isinstance(obj, _builtin_func_type) or isinstance(obj, _builtin_meth_type)) |
|
212 | 216 | |
|
213 | 217 | |
|
214 | 218 | def getargspec(obj): |
|
215 | 219 | """Wrapper around :func:`inspect.getfullargspec` on Python 3, and |
|
216 | 220 | :func:inspect.getargspec` on Python 2. |
|
217 | 221 | |
|
218 | 222 | In addition to functions and methods, this can also handle objects with a |
|
219 | 223 | ``__call__`` attribute. |
|
220 | 224 | """ |
|
221 | 225 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
222 | 226 | obj = obj.__call__ |
|
223 | 227 | |
|
224 | 228 | return inspect.getfullargspec(obj) if PY3 else inspect.getargspec(obj) |
|
225 | 229 | |
|
226 | 230 | |
|
227 | 231 | def format_argspec(argspec): |
|
228 | 232 | """Format argspect, convenience wrapper around inspect's. |
|
229 | 233 | |
|
230 | 234 | This takes a dict instead of ordered arguments and calls |
|
231 | 235 | inspect.format_argspec with the arguments in the necessary order. |
|
232 | 236 | """ |
|
233 | 237 | return inspect.formatargspec(argspec['args'], argspec['varargs'], |
|
234 | 238 | argspec['varkw'], argspec['defaults']) |
|
235 | 239 | |
|
236 | 240 | |
|
237 | 241 | def call_tip(oinfo, format_call=True): |
|
238 | 242 | """Extract call tip data from an oinfo dict. |
|
239 | 243 | |
|
240 | 244 | Parameters |
|
241 | 245 | ---------- |
|
242 | 246 | oinfo : dict |
|
243 | 247 | |
|
244 | 248 | format_call : bool, optional |
|
245 | 249 | If True, the call line is formatted and returned as a string. If not, a |
|
246 | 250 | tuple of (name, argspec) is returned. |
|
247 | 251 | |
|
248 | 252 | Returns |
|
249 | 253 | ------- |
|
250 | 254 | call_info : None, str or (str, dict) tuple. |
|
251 | 255 | When format_call is True, the whole call information is formattted as a |
|
252 | 256 | single string. Otherwise, the object's name and its argspec dict are |
|
253 | 257 | returned. If no call information is available, None is returned. |
|
254 | 258 | |
|
255 | 259 | docstring : str or None |
|
256 | 260 | The most relevant docstring for calling purposes is returned, if |
|
257 | 261 | available. The priority is: call docstring for callable instances, then |
|
258 | 262 | constructor docstring for classes, then main object's docstring otherwise |
|
259 | 263 | (regular functions). |
|
260 | 264 | """ |
|
261 | 265 | # Get call definition |
|
262 | 266 | argspec = oinfo.get('argspec') |
|
263 | 267 | if argspec is None: |
|
264 | 268 | call_line = None |
|
265 | 269 | else: |
|
266 | 270 | # Callable objects will have 'self' as their first argument, prune |
|
267 | 271 | # it out if it's there for clarity (since users do *not* pass an |
|
268 | 272 | # extra first argument explicitly). |
|
269 | 273 | try: |
|
270 | 274 | has_self = argspec['args'][0] == 'self' |
|
271 | 275 | except (KeyError, IndexError): |
|
272 | 276 | pass |
|
273 | 277 | else: |
|
274 | 278 | if has_self: |
|
275 | 279 | argspec['args'] = argspec['args'][1:] |
|
276 | 280 | |
|
277 | 281 | call_line = oinfo['name']+format_argspec(argspec) |
|
278 | 282 | |
|
279 | 283 | # Now get docstring. |
|
280 | 284 | # The priority is: call docstring, constructor docstring, main one. |
|
281 | 285 | doc = oinfo.get('call_docstring') |
|
282 | 286 | if doc is None: |
|
283 | 287 | doc = oinfo.get('init_docstring') |
|
284 | 288 | if doc is None: |
|
285 | 289 | doc = oinfo.get('docstring','') |
|
286 | 290 | |
|
287 | 291 | return call_line, doc |
|
288 | 292 | |
|
289 | 293 | |
|
290 | 294 | def _get_wrapped(obj): |
|
291 | 295 | """Get the original object if wrapped in one or more @decorators |
|
292 | 296 | |
|
293 | 297 | Some objects automatically construct similar objects on any unrecognised |
|
294 | 298 | attribute access (e.g. unittest.mock.call). To protect against infinite loops, |
|
295 | 299 | this will arbitrarily cut off after 100 levels of obj.__wrapped__ |
|
296 | 300 | attribute access. --TK, Jan 2016 |
|
297 | 301 | """ |
|
298 | 302 | orig_obj = obj |
|
299 | 303 | i = 0 |
|
300 | 304 | while safe_hasattr(obj, '__wrapped__'): |
|
301 | 305 | obj = obj.__wrapped__ |
|
302 | 306 | i += 1 |
|
303 | 307 | if i > 100: |
|
304 | 308 | # __wrapped__ is probably a lie, so return the thing we started with |
|
305 | 309 | return orig_obj |
|
306 | 310 | return obj |
|
307 | 311 | |
|
308 | 312 | def find_file(obj): |
|
309 | 313 | """Find the absolute path to the file where an object was defined. |
|
310 | 314 | |
|
311 | 315 | This is essentially a robust wrapper around `inspect.getabsfile`. |
|
312 | 316 | |
|
313 | 317 | Returns None if no file can be found. |
|
314 | 318 | |
|
315 | 319 | Parameters |
|
316 | 320 | ---------- |
|
317 | 321 | obj : any Python object |
|
318 | 322 | |
|
319 | 323 | Returns |
|
320 | 324 | ------- |
|
321 | 325 | fname : str |
|
322 | 326 | The absolute path to the file where the object was defined. |
|
323 | 327 | """ |
|
324 | 328 | obj = _get_wrapped(obj) |
|
325 | 329 | |
|
326 | 330 | fname = None |
|
327 | 331 | try: |
|
328 | 332 | fname = inspect.getabsfile(obj) |
|
329 | 333 | except TypeError: |
|
330 | 334 | # For an instance, the file that matters is where its class was |
|
331 | 335 | # declared. |
|
332 | 336 | if hasattr(obj, '__class__'): |
|
333 | 337 | try: |
|
334 | 338 | fname = inspect.getabsfile(obj.__class__) |
|
335 | 339 | except TypeError: |
|
336 | 340 | # Can happen for builtins |
|
337 | 341 | pass |
|
338 | 342 | except: |
|
339 | 343 | pass |
|
340 | 344 | return cast_unicode(fname) |
|
341 | 345 | |
|
342 | 346 | |
|
343 | 347 | def find_source_lines(obj): |
|
344 | 348 | """Find the line number in a file where an object was defined. |
|
345 | 349 | |
|
346 | 350 | This is essentially a robust wrapper around `inspect.getsourcelines`. |
|
347 | 351 | |
|
348 | 352 | Returns None if no file can be found. |
|
349 | 353 | |
|
350 | 354 | Parameters |
|
351 | 355 | ---------- |
|
352 | 356 | obj : any Python object |
|
353 | 357 | |
|
354 | 358 | Returns |
|
355 | 359 | ------- |
|
356 | 360 | lineno : int |
|
357 | 361 | The line number where the object definition starts. |
|
358 | 362 | """ |
|
359 | 363 | obj = _get_wrapped(obj) |
|
360 | 364 | |
|
361 | 365 | try: |
|
362 | 366 | try: |
|
363 | 367 | lineno = inspect.getsourcelines(obj)[1] |
|
364 | 368 | except TypeError: |
|
365 | 369 | # For instances, try the class object like getsource() does |
|
366 | 370 | if hasattr(obj, '__class__'): |
|
367 | 371 | lineno = inspect.getsourcelines(obj.__class__)[1] |
|
368 | 372 | else: |
|
369 | 373 | lineno = None |
|
370 | 374 | except: |
|
371 | 375 | return None |
|
372 | 376 | |
|
373 | 377 | return lineno |
|
374 | 378 | |
|
375 | 379 | class Inspector(Colorable): |
|
376 | 380 | |
|
377 | 381 | def __init__(self, color_table=InspectColors, |
|
378 | 382 | code_color_table=PyColorize.ANSICodeColors, |
|
379 | 383 | scheme='NoColor', |
|
380 | 384 | str_detail_level=0, |
|
381 | 385 | parent=None, config=None): |
|
382 | 386 | super(Inspector, self).__init__(parent=parent, config=config) |
|
383 | 387 | self.color_table = color_table |
|
384 | 388 | self.parser = PyColorize.Parser(out='str', parent=self, style=scheme) |
|
385 | 389 | self.format = self.parser.format |
|
386 | 390 | self.str_detail_level = str_detail_level |
|
387 | 391 | self.set_active_scheme(scheme) |
|
388 | 392 | |
|
389 | 393 | def _getdef(self,obj,oname=''): |
|
390 | 394 | """Return the call signature for any callable object. |
|
391 | 395 | |
|
392 | 396 | If any exception is generated, None is returned instead and the |
|
393 | 397 | exception is suppressed.""" |
|
394 | 398 | try: |
|
395 | 399 | hdef = oname + str(signature(obj)) |
|
396 | 400 | return cast_unicode(hdef) |
|
397 | 401 | except: |
|
398 | 402 | return None |
|
399 | 403 | |
|
400 | 404 | def __head(self,h): |
|
401 | 405 | """Return a header string with proper colors.""" |
|
402 | 406 | return '%s%s%s' % (self.color_table.active_colors.header,h, |
|
403 | 407 | self.color_table.active_colors.normal) |
|
404 | 408 | |
|
405 | 409 | def set_active_scheme(self, scheme): |
|
406 | 410 | self.color_table.set_active_scheme(scheme) |
|
407 | 411 | self.parser.color_table.set_active_scheme(scheme) |
|
408 | 412 | |
|
409 | 413 | def noinfo(self, msg, oname): |
|
410 | 414 | """Generic message when no information is found.""" |
|
411 | 415 | print('No %s found' % msg, end=' ') |
|
412 | 416 | if oname: |
|
413 | 417 | print('for %s' % oname) |
|
414 | 418 | else: |
|
415 | 419 | print() |
|
416 | 420 | |
|
417 | 421 | def pdef(self, obj, oname=''): |
|
418 | 422 | """Print the call signature for any callable object. |
|
419 | 423 | |
|
420 | 424 | If the object is a class, print the constructor information.""" |
|
421 | 425 | |
|
422 | 426 | if not callable(obj): |
|
423 | 427 | print('Object is not callable.') |
|
424 | 428 | return |
|
425 | 429 | |
|
426 | 430 | header = '' |
|
427 | 431 | |
|
428 | 432 | if inspect.isclass(obj): |
|
429 | 433 | header = self.__head('Class constructor information:\n') |
|
430 | 434 | elif (not py3compat.PY3) and type(obj) is types.InstanceType: |
|
431 | 435 | obj = obj.__call__ |
|
432 | 436 | |
|
433 | 437 | output = self._getdef(obj,oname) |
|
434 | 438 | if output is None: |
|
435 | 439 | self.noinfo('definition header',oname) |
|
436 | 440 | else: |
|
437 | 441 | print(header,self.format(output), end=' ') |
|
438 | 442 | |
|
439 | 443 | # In Python 3, all classes are new-style, so they all have __init__. |
|
440 | 444 | @skip_doctest_py3 |
|
441 | 445 | def pdoc(self, obj, oname='', formatter=None): |
|
442 | 446 | """Print the docstring for any object. |
|
443 | 447 | |
|
444 | 448 | Optional: |
|
445 | 449 | -formatter: a function to run the docstring through for specially |
|
446 | 450 | formatted docstrings. |
|
447 | 451 | |
|
448 | 452 | Examples |
|
449 | 453 | -------- |
|
450 | 454 | |
|
451 | 455 | In [1]: class NoInit: |
|
452 | 456 | ...: pass |
|
453 | 457 | |
|
454 | 458 | In [2]: class NoDoc: |
|
455 | 459 | ...: def __init__(self): |
|
456 | 460 | ...: pass |
|
457 | 461 | |
|
458 | 462 | In [3]: %pdoc NoDoc |
|
459 | 463 | No documentation found for NoDoc |
|
460 | 464 | |
|
461 | 465 | In [4]: %pdoc NoInit |
|
462 | 466 | No documentation found for NoInit |
|
463 | 467 | |
|
464 | 468 | In [5]: obj = NoInit() |
|
465 | 469 | |
|
466 | 470 | In [6]: %pdoc obj |
|
467 | 471 | No documentation found for obj |
|
468 | 472 | |
|
469 | 473 | In [5]: obj2 = NoDoc() |
|
470 | 474 | |
|
471 | 475 | In [6]: %pdoc obj2 |
|
472 | 476 | No documentation found for obj2 |
|
473 | 477 | """ |
|
474 | 478 | |
|
475 | 479 | head = self.__head # For convenience |
|
476 | 480 | lines = [] |
|
477 | 481 | ds = getdoc(obj) |
|
478 | 482 | if formatter: |
|
479 | 483 | ds = formatter(ds).get('plain/text', ds) |
|
480 | 484 | if ds: |
|
481 | 485 | lines.append(head("Class docstring:")) |
|
482 | 486 | lines.append(indent(ds)) |
|
483 | 487 | if inspect.isclass(obj) and hasattr(obj, '__init__'): |
|
484 | 488 | init_ds = getdoc(obj.__init__) |
|
485 | 489 | if init_ds is not None: |
|
486 | 490 | lines.append(head("Init docstring:")) |
|
487 | 491 | lines.append(indent(init_ds)) |
|
488 | 492 | elif hasattr(obj,'__call__'): |
|
489 | 493 | call_ds = getdoc(obj.__call__) |
|
490 | 494 | if call_ds: |
|
491 | 495 | lines.append(head("Call docstring:")) |
|
492 | 496 | lines.append(indent(call_ds)) |
|
493 | 497 | |
|
494 | 498 | if not lines: |
|
495 | 499 | self.noinfo('documentation',oname) |
|
496 | 500 | else: |
|
497 | 501 | page.page('\n'.join(lines)) |
|
498 | 502 | |
|
499 | 503 | def psource(self, obj, oname=''): |
|
500 | 504 | """Print the source code for an object.""" |
|
501 | 505 | |
|
502 | 506 | # Flush the source cache because inspect can return out-of-date source |
|
503 | 507 | linecache.checkcache() |
|
504 | 508 | try: |
|
505 | 509 | src = getsource(obj, oname=oname) |
|
506 | 510 | except Exception: |
|
507 | 511 | src = None |
|
508 | 512 | |
|
509 | 513 | if src is None: |
|
510 | 514 | self.noinfo('source', oname) |
|
511 | 515 | else: |
|
512 | 516 | page.page(self.format(src)) |
|
513 | 517 | |
|
514 | 518 | def pfile(self, obj, oname=''): |
|
515 | 519 | """Show the whole file where an object was defined.""" |
|
516 | 520 | |
|
517 | 521 | lineno = find_source_lines(obj) |
|
518 | 522 | if lineno is None: |
|
519 | 523 | self.noinfo('file', oname) |
|
520 | 524 | return |
|
521 | 525 | |
|
522 | 526 | ofile = find_file(obj) |
|
523 | 527 | # run contents of file through pager starting at line where the object |
|
524 | 528 | # is defined, as long as the file isn't binary and is actually on the |
|
525 | 529 | # filesystem. |
|
526 | 530 | if ofile.endswith(('.so', '.dll', '.pyd')): |
|
527 | 531 | print('File %r is binary, not printing.' % ofile) |
|
528 | 532 | elif not os.path.isfile(ofile): |
|
529 | 533 | print('File %r does not exist, not printing.' % ofile) |
|
530 | 534 | else: |
|
531 | 535 | # Print only text files, not extension binaries. Note that |
|
532 | 536 | # getsourcelines returns lineno with 1-offset and page() uses |
|
533 | 537 | # 0-offset, so we must adjust. |
|
534 | 538 | page.page(self.format(openpy.read_py_file(ofile, skip_encoding_cookie=False)), lineno - 1) |
|
535 | 539 | |
|
536 | 540 | def _format_fields(self, fields, title_width=0): |
|
537 | 541 | """Formats a list of fields for display. |
|
538 | 542 | |
|
539 | 543 | Parameters |
|
540 | 544 | ---------- |
|
541 | 545 | fields : list |
|
542 | 546 | A list of 2-tuples: (field_title, field_content) |
|
543 | 547 | title_width : int |
|
544 | 548 | How many characters to pad titles to. Default to longest title. |
|
545 | 549 | """ |
|
546 | 550 | out = [] |
|
547 | 551 | header = self.__head |
|
548 | 552 | if title_width == 0: |
|
549 | 553 | title_width = max(len(title) + 2 for title, _ in fields) |
|
550 | 554 | for title, content in fields: |
|
551 | 555 | if len(content.splitlines()) > 1: |
|
552 | 556 | title = header(title + ':') + '\n' |
|
553 | 557 | else: |
|
554 | 558 | title = header((title + ':').ljust(title_width)) |
|
555 | 559 | out.append(cast_unicode(title) + cast_unicode(content)) |
|
556 | 560 | return "\n".join(out) |
|
557 | 561 | |
|
558 | 562 | def _mime_format(self, text, formatter=None): |
|
559 | 563 | """Return a mime bundle representation of the input text. |
|
560 | 564 | |
|
561 | 565 | - if `formatter` is None, the returned mime bundle has |
|
562 | 566 | a `text/plain` field, with the input text. |
|
563 | 567 | a `text/html` field with a `<pre>` tag containing the input text. |
|
564 | 568 | |
|
565 | 569 | - if `formatter` is not None, it must be a callable transforming the |
|
566 | 570 | input text into a mime bundle. Default values for `text/plain` and |
|
567 | 571 | `text/html` representations are the ones described above. |
|
568 | 572 | |
|
569 | 573 | Note: |
|
570 | 574 | |
|
571 | 575 | Formatters returning strings are supported but this behavior is deprecated. |
|
572 | 576 | |
|
573 | 577 | """ |
|
574 | 578 | text = cast_unicode(text) |
|
575 | 579 | defaults = { |
|
576 | 580 | 'text/plain': text, |
|
577 | 581 | 'text/html': '<pre>' + text + '</pre>' |
|
578 | 582 | } |
|
579 | 583 | |
|
580 | 584 | if formatter is None: |
|
581 | 585 | return defaults |
|
582 | 586 | else: |
|
583 | 587 | formatted = formatter(text) |
|
584 | 588 | |
|
585 | 589 | if not isinstance(formatted, dict): |
|
586 | 590 | # Handle the deprecated behavior of a formatter returning |
|
587 | 591 | # a string instead of a mime bundle. |
|
588 | 592 | return { |
|
589 | 593 | 'text/plain': formatted, |
|
590 | 594 | 'text/html': '<pre>' + formatted + '</pre>' |
|
591 | 595 | } |
|
592 | 596 | |
|
593 | 597 | else: |
|
594 | 598 | return dict(defaults, **formatted) |
|
595 | 599 | |
|
596 | 600 | |
|
597 | 601 | def format_mime(self, bundle): |
|
598 | 602 | |
|
599 | 603 | text_plain = bundle['text/plain'] |
|
600 | 604 | |
|
601 | 605 | text = '' |
|
602 | 606 | heads, bodies = list(zip(*text_plain)) |
|
603 | 607 | _len = max(len(h) for h in heads) |
|
604 | 608 | |
|
605 | 609 | for head, body in zip(heads, bodies): |
|
606 | 610 | body = body.strip('\n') |
|
607 | 611 | delim = '\n' if '\n' in body else ' ' |
|
608 | 612 | text += self.__head(head+':') + (_len - len(head))*' ' +delim + body +'\n' |
|
609 | 613 | |
|
610 | 614 | bundle['text/plain'] = text |
|
611 | 615 | return bundle |
|
612 | 616 | |
|
613 | 617 | def _get_info(self, obj, oname='', formatter=None, info=None, detail_level=0): |
|
614 | 618 | """Retrieve an info dict and format it.""" |
|
615 | 619 | |
|
616 | 620 | info = self._info(obj, oname=oname, info=info, detail_level=detail_level) |
|
617 | 621 | |
|
618 | 622 | _mime = { |
|
619 | 623 | 'text/plain': [], |
|
620 | 624 | 'text/html': '', |
|
621 | 625 | } |
|
622 | 626 | |
|
623 | 627 | def append_field(bundle, title, key, formatter=None): |
|
624 | 628 | field = info[key] |
|
625 | 629 | if field is not None: |
|
626 | 630 | formatted_field = self._mime_format(field, formatter) |
|
627 | 631 | bundle['text/plain'].append((title, formatted_field['text/plain'])) |
|
628 | 632 | bundle['text/html'] += '<h1>' + title + '</h1>\n' + formatted_field['text/html'] + '\n' |
|
629 | 633 | |
|
630 | 634 | def code_formatter(text): |
|
631 | 635 | return { |
|
632 | 636 | 'text/plain': self.format(text), |
|
633 | 637 | 'text/html': pylight(text) |
|
634 | 638 | } |
|
635 | 639 | |
|
636 | 640 | if info['isalias']: |
|
637 | 641 | append_field(_mime, 'Repr', 'string_form') |
|
638 | 642 | |
|
639 | 643 | elif info['ismagic']: |
|
640 | 644 | if detail_level > 0: |
|
641 | 645 | append_field(_mime, 'Source', 'source', code_formatter) |
|
642 | 646 | else: |
|
643 | 647 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
644 | 648 | append_field(_mime, 'File', 'file') |
|
645 | 649 | |
|
646 | 650 | elif info['isclass'] or is_simple_callable(obj): |
|
647 | 651 | # Functions, methods, classes |
|
648 | 652 | append_field(_mime, 'Signature', 'definition', code_formatter) |
|
649 | 653 | append_field(_mime, 'Init signature', 'init_definition', code_formatter) |
|
650 | 654 | if detail_level > 0 and info['source']: |
|
651 | 655 | append_field(_mime, 'Source', 'source', code_formatter) |
|
652 | 656 | else: |
|
653 | 657 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
654 | 658 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) |
|
655 | 659 | |
|
656 | 660 | append_field(_mime, 'File', 'file') |
|
657 | 661 | append_field(_mime, 'Type', 'type_name') |
|
658 | 662 | |
|
659 | 663 | else: |
|
660 | 664 | # General Python objects |
|
661 | 665 | append_field(_mime, 'Signature', 'definition', code_formatter) |
|
662 | 666 | append_field(_mime, 'Call signature', 'call_def', code_formatter) |
|
663 | 667 | |
|
664 | 668 | append_field(_mime, 'Type', 'type_name') |
|
665 | 669 | |
|
666 | 670 | # Base class for old-style instances |
|
667 | 671 | if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']: |
|
668 | 672 | append_field(_mime, 'Base Class', 'base_class') |
|
669 | 673 | |
|
670 | 674 | append_field(_mime, 'String form', 'string_form') |
|
671 | 675 | |
|
672 | 676 | # Namespace |
|
673 | 677 | if info['namespace'] != 'Interactive': |
|
674 | 678 | append_field(_mime, 'Namespace', 'namespace') |
|
675 | 679 | |
|
676 | 680 | append_field(_mime, 'Length', 'length') |
|
677 | 681 | append_field(_mime, 'File', 'file') |
|
678 | 682 | |
|
679 | 683 | # Source or docstring, depending on detail level and whether |
|
680 | 684 | # source found. |
|
681 | 685 | if detail_level > 0: |
|
682 | 686 | append_field(_mime, 'Source', 'source', code_formatter) |
|
683 | 687 | else: |
|
684 | 688 | append_field(_mime, 'Docstring', 'docstring', formatter) |
|
685 | 689 | |
|
686 | 690 | append_field(_mime, 'Class docstring', 'class_docstring', formatter) |
|
687 | 691 | append_field(_mime, 'Init docstring', 'init_docstring', formatter) |
|
688 | 692 | append_field(_mime, 'Call docstring', 'call_docstring', formatter) |
|
689 | 693 | |
|
690 | 694 | |
|
691 | 695 | return self.format_mime(_mime) |
|
692 | 696 | |
|
693 | 697 | def pinfo(self, obj, oname='', formatter=None, info=None, detail_level=0, enable_html_pager=True): |
|
694 | 698 | """Show detailed information about an object. |
|
695 | 699 | |
|
696 | 700 | Optional arguments: |
|
697 | 701 | |
|
698 | 702 | - oname: name of the variable pointing to the object. |
|
699 | 703 | |
|
700 | 704 | - formatter: callable (optional) |
|
701 | 705 | A special formatter for docstrings. |
|
702 | 706 | |
|
703 | 707 | The formatter is a callable that takes a string as an input |
|
704 | 708 | and returns either a formatted string or a mime type bundle |
|
705 | 709 | in the form of a dictionnary. |
|
706 | 710 | |
|
707 | 711 | Although the support of custom formatter returning a string |
|
708 | 712 | instead of a mime type bundle is deprecated. |
|
709 | 713 | |
|
710 | 714 | - info: a structure with some information fields which may have been |
|
711 | 715 | precomputed already. |
|
712 | 716 | |
|
713 | 717 | - detail_level: if set to 1, more information is given. |
|
714 | 718 | """ |
|
715 | 719 | info = self._get_info(obj, oname, formatter, info, detail_level) |
|
716 | 720 | if not enable_html_pager: |
|
717 | 721 | del info['text/html'] |
|
718 | 722 | page.page(info) |
|
719 | 723 | |
|
720 | 724 | def info(self, obj, oname='', formatter=None, info=None, detail_level=0): |
|
721 | 725 | """DEPRECATED. Compute a dict with detailed information about an object. |
|
722 | 726 | """ |
|
723 | 727 | if formatter is not None: |
|
724 | 728 | warnings.warn('The `formatter` keyword argument to `Inspector.info`' |
|
725 | 729 | 'is deprecated as of IPython 5.0 and will have no effects.', |
|
726 | 730 | DeprecationWarning, stacklevel=2) |
|
727 | 731 | return self._info(obj, oname=oname, info=info, detail_level=detail_level) |
|
728 | 732 | |
|
729 | 733 | def _info(self, obj, oname='', info=None, detail_level=0): |
|
730 | 734 | """Compute a dict with detailed information about an object. |
|
731 | 735 | |
|
732 | 736 | Optional arguments: |
|
733 | 737 | |
|
734 | 738 | - oname: name of the variable pointing to the object. |
|
735 | 739 | |
|
736 | 740 | - info: a structure with some information fields which may have been |
|
737 | 741 | precomputed already. |
|
738 | 742 | |
|
739 | 743 | - detail_level: if set to 1, more information is given. |
|
740 | 744 | """ |
|
741 | 745 | |
|
742 | 746 | obj_type = type(obj) |
|
743 | 747 | |
|
744 | 748 | if info is None: |
|
745 | 749 | ismagic = 0 |
|
746 | 750 | isalias = 0 |
|
747 | 751 | ospace = '' |
|
748 | 752 | else: |
|
749 | 753 | ismagic = info.ismagic |
|
750 | 754 | isalias = info.isalias |
|
751 | 755 | ospace = info.namespace |
|
752 | 756 | |
|
753 | 757 | # Get docstring, special-casing aliases: |
|
754 | 758 | if isalias: |
|
755 | 759 | if not callable(obj): |
|
756 | 760 | try: |
|
757 | 761 | ds = "Alias to the system command:\n %s" % obj[1] |
|
758 | 762 | except: |
|
759 | 763 | ds = "Alias: " + str(obj) |
|
760 | 764 | else: |
|
761 | 765 | ds = "Alias to " + str(obj) |
|
762 | 766 | if obj.__doc__: |
|
763 | 767 | ds += "\nDocstring:\n" + obj.__doc__ |
|
764 | 768 | else: |
|
765 | 769 | ds = getdoc(obj) |
|
766 | 770 | if ds is None: |
|
767 | 771 | ds = '<no docstring>' |
|
768 | 772 | |
|
769 | 773 | # store output in a dict, we initialize it here and fill it as we go |
|
770 | 774 | out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic) |
|
771 | 775 | |
|
772 | 776 | string_max = 200 # max size of strings to show (snipped if longer) |
|
773 | 777 | shalf = int((string_max - 5) / 2) |
|
774 | 778 | |
|
775 | 779 | if ismagic: |
|
776 | 780 | obj_type_name = 'Magic function' |
|
777 | 781 | elif isalias: |
|
778 | 782 | obj_type_name = 'System alias' |
|
779 | 783 | else: |
|
780 | 784 | obj_type_name = obj_type.__name__ |
|
781 | 785 | out['type_name'] = obj_type_name |
|
782 | 786 | |
|
783 | 787 | try: |
|
784 | 788 | bclass = obj.__class__ |
|
785 | 789 | out['base_class'] = str(bclass) |
|
786 | 790 | except: pass |
|
787 | 791 | |
|
788 | 792 | # String form, but snip if too long in ? form (full in ??) |
|
789 | 793 | if detail_level >= self.str_detail_level: |
|
790 | 794 | try: |
|
791 | 795 | ostr = str(obj) |
|
792 | 796 | str_head = 'string_form' |
|
793 | 797 | if not detail_level and len(ostr)>string_max: |
|
794 | 798 | ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] |
|
795 | 799 | ostr = ("\n" + " " * len(str_head.expandtabs())).\ |
|
796 | 800 | join(q.strip() for q in ostr.split("\n")) |
|
797 | 801 | out[str_head] = ostr |
|
798 | 802 | except: |
|
799 | 803 | pass |
|
800 | 804 | |
|
801 | 805 | if ospace: |
|
802 | 806 | out['namespace'] = ospace |
|
803 | 807 | |
|
804 | 808 | # Length (for strings and lists) |
|
805 | 809 | try: |
|
806 | 810 | out['length'] = str(len(obj)) |
|
807 | 811 | except: pass |
|
808 | 812 | |
|
809 | 813 | # Filename where object was defined |
|
810 | 814 | binary_file = False |
|
811 | 815 | fname = find_file(obj) |
|
812 | 816 | if fname is None: |
|
813 | 817 | # if anything goes wrong, we don't want to show source, so it's as |
|
814 | 818 | # if the file was binary |
|
815 | 819 | binary_file = True |
|
816 | 820 | else: |
|
817 | 821 | if fname.endswith(('.so', '.dll', '.pyd')): |
|
818 | 822 | binary_file = True |
|
819 | 823 | elif fname.endswith('<string>'): |
|
820 | 824 | fname = 'Dynamically generated function. No source code available.' |
|
821 | 825 | out['file'] = compress_user(fname) |
|
822 | 826 | |
|
823 | 827 | # Original source code for a callable, class or property. |
|
824 | 828 | if detail_level: |
|
825 | 829 | # Flush the source cache because inspect can return out-of-date |
|
826 | 830 | # source |
|
827 | 831 | linecache.checkcache() |
|
828 | 832 | try: |
|
829 | 833 | if isinstance(obj, property) or not binary_file: |
|
830 | 834 | src = getsource(obj, oname) |
|
831 | 835 | if src is not None: |
|
832 | 836 | src = src.rstrip() |
|
833 | 837 | out['source'] = src |
|
834 | 838 | |
|
835 | 839 | except Exception: |
|
836 | 840 | pass |
|
837 | 841 | |
|
838 | 842 | # Add docstring only if no source is to be shown (avoid repetitions). |
|
839 | 843 | if ds and out.get('source', None) is None: |
|
840 | 844 | out['docstring'] = ds |
|
841 | 845 | |
|
842 | 846 | # Constructor docstring for classes |
|
843 | 847 | if inspect.isclass(obj): |
|
844 | 848 | out['isclass'] = True |
|
845 | 849 | |
|
846 | 850 | # get the init signature: |
|
847 | 851 | try: |
|
848 | 852 | init_def = self._getdef(obj, oname) |
|
849 | 853 | except AttributeError: |
|
850 | 854 | init_def = None |
|
851 | 855 | |
|
852 | 856 | # get the __init__ docstring |
|
853 | 857 | try: |
|
854 | 858 | obj_init = obj.__init__ |
|
855 | 859 | except AttributeError: |
|
856 | 860 | init_ds = None |
|
857 | 861 | else: |
|
858 | 862 | if init_def is None: |
|
859 | 863 | # Get signature from init if top-level sig failed. |
|
860 | 864 | # Can happen for built-in types (list, etc.). |
|
861 | 865 | try: |
|
862 | 866 | init_def = self._getdef(obj_init, oname) |
|
863 | 867 | except AttributeError: |
|
864 | 868 | pass |
|
865 | 869 | init_ds = getdoc(obj_init) |
|
866 | 870 | # Skip Python's auto-generated docstrings |
|
867 | 871 | if init_ds == _object_init_docstring: |
|
868 | 872 | init_ds = None |
|
869 | 873 | |
|
870 | 874 | if init_def: |
|
871 | 875 | out['init_definition'] = init_def |
|
872 | 876 | |
|
873 | 877 | if init_ds: |
|
874 | 878 | out['init_docstring'] = init_ds |
|
875 | 879 | |
|
876 | 880 | # and class docstring for instances: |
|
877 | 881 | else: |
|
878 | 882 | # reconstruct the function definition and print it: |
|
879 | 883 | defln = self._getdef(obj, oname) |
|
880 | 884 | if defln: |
|
881 | 885 | out['definition'] = defln |
|
882 | 886 | |
|
883 | 887 | # First, check whether the instance docstring is identical to the |
|
884 | 888 | # class one, and print it separately if they don't coincide. In |
|
885 | 889 | # most cases they will, but it's nice to print all the info for |
|
886 | 890 | # objects which use instance-customized docstrings. |
|
887 | 891 | if ds: |
|
888 | 892 | try: |
|
889 | 893 | cls = getattr(obj,'__class__') |
|
890 | 894 | except: |
|
891 | 895 | class_ds = None |
|
892 | 896 | else: |
|
893 | 897 | class_ds = getdoc(cls) |
|
894 | 898 | # Skip Python's auto-generated docstrings |
|
895 | 899 | if class_ds in _builtin_type_docstrings: |
|
896 | 900 | class_ds = None |
|
897 | 901 | if class_ds and ds != class_ds: |
|
898 | 902 | out['class_docstring'] = class_ds |
|
899 | 903 | |
|
900 | 904 | # Next, try to show constructor docstrings |
|
901 | 905 | try: |
|
902 | 906 | init_ds = getdoc(obj.__init__) |
|
903 | 907 | # Skip Python's auto-generated docstrings |
|
904 | 908 | if init_ds == _object_init_docstring: |
|
905 | 909 | init_ds = None |
|
906 | 910 | except AttributeError: |
|
907 | 911 | init_ds = None |
|
908 | 912 | if init_ds: |
|
909 | 913 | out['init_docstring'] = init_ds |
|
910 | 914 | |
|
911 | 915 | # Call form docstring for callable instances |
|
912 | 916 | if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): |
|
913 | 917 | call_def = self._getdef(obj.__call__, oname) |
|
914 | 918 | if call_def and (call_def != out.get('definition')): |
|
915 | 919 | # it may never be the case that call def and definition differ, |
|
916 | 920 | # but don't include the same signature twice |
|
917 | 921 | out['call_def'] = call_def |
|
918 | 922 | call_ds = getdoc(obj.__call__) |
|
919 | 923 | # Skip Python's auto-generated docstrings |
|
920 | 924 | if call_ds == _func_call_docstring: |
|
921 | 925 | call_ds = None |
|
922 | 926 | if call_ds: |
|
923 | 927 | out['call_docstring'] = call_ds |
|
924 | 928 | |
|
925 | 929 | # Compute the object's argspec as a callable. The key is to decide |
|
926 | 930 | # whether to pull it from the object itself, from its __init__ or |
|
927 | 931 | # from its __call__ method. |
|
928 | 932 | |
|
929 | 933 | if inspect.isclass(obj): |
|
930 | 934 | # Old-style classes need not have an __init__ |
|
931 | 935 | callable_obj = getattr(obj, "__init__", None) |
|
932 | 936 | elif callable(obj): |
|
933 | 937 | callable_obj = obj |
|
934 | 938 | else: |
|
935 | 939 | callable_obj = None |
|
936 | 940 | |
|
937 | 941 | if callable_obj is not None: |
|
938 | 942 | try: |
|
939 | 943 | argspec = getargspec(callable_obj) |
|
940 | 944 | except (TypeError, AttributeError): |
|
941 | 945 | # For extensions/builtins we can't retrieve the argspec |
|
942 | 946 | pass |
|
943 | 947 | else: |
|
944 | 948 | # named tuples' _asdict() method returns an OrderedDict, but we |
|
945 | 949 | # we want a normal |
|
946 | 950 | out['argspec'] = argspec_dict = dict(argspec._asdict()) |
|
947 | 951 | # We called this varkw before argspec became a named tuple. |
|
948 | 952 | # With getfullargspec it's also called varkw. |
|
949 | 953 | if 'varkw' not in argspec_dict: |
|
950 | 954 | argspec_dict['varkw'] = argspec_dict.pop('keywords') |
|
951 | 955 | |
|
952 | 956 | return object_info(**out) |
|
953 | 957 | |
|
954 | 958 | def psearch(self,pattern,ns_table,ns_search=[], |
|
955 | 959 | ignore_case=False,show_all=False): |
|
956 | 960 | """Search namespaces with wildcards for objects. |
|
957 | 961 | |
|
958 | 962 | Arguments: |
|
959 | 963 | |
|
960 | 964 | - pattern: string containing shell-like wildcards to use in namespace |
|
961 | 965 | searches and optionally a type specification to narrow the search to |
|
962 | 966 | objects of that type. |
|
963 | 967 | |
|
964 | 968 | - ns_table: dict of name->namespaces for search. |
|
965 | 969 | |
|
966 | 970 | Optional arguments: |
|
967 | 971 | |
|
968 | 972 | - ns_search: list of namespace names to include in search. |
|
969 | 973 | |
|
970 | 974 | - ignore_case(False): make the search case-insensitive. |
|
971 | 975 | |
|
972 | 976 | - show_all(False): show all names, including those starting with |
|
973 | 977 | underscores. |
|
974 | 978 | """ |
|
975 | 979 | #print 'ps pattern:<%r>' % pattern # dbg |
|
976 | 980 | |
|
977 | 981 | # defaults |
|
978 | 982 | type_pattern = 'all' |
|
979 | 983 | filter = '' |
|
980 | 984 | |
|
981 | 985 | cmds = pattern.split() |
|
982 | 986 | len_cmds = len(cmds) |
|
983 | 987 | if len_cmds == 1: |
|
984 | 988 | # Only filter pattern given |
|
985 | 989 | filter = cmds[0] |
|
986 | 990 | elif len_cmds == 2: |
|
987 | 991 | # Both filter and type specified |
|
988 | 992 | filter,type_pattern = cmds |
|
989 | 993 | else: |
|
990 | 994 | raise ValueError('invalid argument string for psearch: <%s>' % |
|
991 | 995 | pattern) |
|
992 | 996 | |
|
993 | 997 | # filter search namespaces |
|
994 | 998 | for name in ns_search: |
|
995 | 999 | if name not in ns_table: |
|
996 | 1000 | raise ValueError('invalid namespace <%s>. Valid names: %s' % |
|
997 | 1001 | (name,ns_table.keys())) |
|
998 | 1002 | |
|
999 | 1003 | #print 'type_pattern:',type_pattern # dbg |
|
1000 | 1004 | search_result, namespaces_seen = set(), set() |
|
1001 | 1005 | for ns_name in ns_search: |
|
1002 | 1006 | ns = ns_table[ns_name] |
|
1003 | 1007 | # Normally, locals and globals are the same, so we just check one. |
|
1004 | 1008 | if id(ns) in namespaces_seen: |
|
1005 | 1009 | continue |
|
1006 | 1010 | namespaces_seen.add(id(ns)) |
|
1007 | 1011 | tmp_res = list_namespace(ns, type_pattern, filter, |
|
1008 | 1012 | ignore_case=ignore_case, show_all=show_all) |
|
1009 | 1013 | search_result.update(tmp_res) |
|
1010 | 1014 | |
|
1011 | 1015 | page.page('\n'.join(sorted(search_result))) |
@@ -1,512 +1,517 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | """ |
|
3 | 3 | Defines a variety of Pygments lexers for highlighting IPython code. |
|
4 | 4 | |
|
5 | 5 | This includes: |
|
6 | 6 | |
|
7 | 7 | IPythonLexer, IPython3Lexer |
|
8 | 8 | Lexers for pure IPython (python + magic/shell commands) |
|
9 | 9 | |
|
10 | 10 | IPythonPartialTracebackLexer, IPythonTracebackLexer |
|
11 | 11 | Supports 2.x and 3.x via keyword `python3`. The partial traceback |
|
12 | 12 | lexer reads everything but the Python code appearing in a traceback. |
|
13 | 13 | The full lexer combines the partial lexer with an IPython lexer. |
|
14 | 14 | |
|
15 | 15 | IPythonConsoleLexer |
|
16 | 16 | A lexer for IPython console sessions, with support for tracebacks. |
|
17 | 17 | |
|
18 | 18 | IPyLexer |
|
19 | 19 | A friendly lexer which examines the first line of text and from it, |
|
20 | 20 | decides whether to use an IPython lexer or an IPython console lexer. |
|
21 | 21 | This is probably the only lexer that needs to be explicitly added |
|
22 | 22 | to Pygments. |
|
23 | 23 | |
|
24 | 24 | """ |
|
25 | 25 | #----------------------------------------------------------------------------- |
|
26 | 26 | # Copyright (c) 2013, the IPython Development Team. |
|
27 | 27 | # |
|
28 | 28 | # Distributed under the terms of the Modified BSD License. |
|
29 | 29 | # |
|
30 | 30 | # The full license is in the file COPYING.txt, distributed with this software. |
|
31 | 31 | #----------------------------------------------------------------------------- |
|
32 | 32 | |
|
33 | 33 | # Standard library |
|
34 | 34 | import re |
|
35 | 35 | |
|
36 | 36 | # Third party |
|
37 |
from pygments.lexers import BashLexer, |
|
|
37 | from pygments.lexers import BashLexer, Python3Lexer | |
|
38 | try: | |
|
39 | # PythonLexer was renamed to Python2Lexer in pygments 2.5 | |
|
40 | from pygments.lexers import Python2Lexer | |
|
41 | except ImportError: | |
|
42 | from pygments.lexers import PythonLexer as Python2Lexer | |
|
38 | 43 | from pygments.lexer import ( |
|
39 | 44 | Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using, |
|
40 | 45 | ) |
|
41 | 46 | from pygments.token import ( |
|
42 | 47 | Generic, Keyword, Literal, Name, Operator, Other, Text, Error, |
|
43 | 48 | ) |
|
44 | 49 | from pygments.util import get_bool_opt |
|
45 | 50 | |
|
46 | 51 | # Local |
|
47 | 52 | |
|
48 | 53 | line_re = re.compile('.*?\n') |
|
49 | 54 | |
|
50 | 55 | __all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer', |
|
51 | 56 | 'IPythonPartialTracebackLexer', 'IPythonTracebackLexer', |
|
52 | 57 | 'IPythonConsoleLexer', 'IPyLexer'] |
|
53 | 58 | |
|
54 | 59 | ipython_tokens = [ |
|
55 | 60 | (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)), |
|
56 | 61 | (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))), |
|
57 | 62 | (r"(%%?)(\w+)(\?\??)$", bygroups(Operator, Keyword, Operator)), |
|
58 | 63 | (r"\b(\?\??)(\s*)$", bygroups(Operator, Text)), |
|
59 | 64 | (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword, |
|
60 | 65 | using(BashLexer), Text)), |
|
61 | 66 | (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)), |
|
62 | 67 | (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), |
|
63 | 68 | (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)), |
|
64 | 69 | (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)), |
|
65 | 70 | (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)), |
|
66 | 71 | ] |
|
67 | 72 | |
|
68 | 73 | def build_ipy_lexer(python3): |
|
69 | 74 | """Builds IPython lexers depending on the value of `python3`. |
|
70 | 75 | |
|
71 | 76 | The lexer inherits from an appropriate Python lexer and then adds |
|
72 | 77 | information about IPython specific keywords (i.e. magic commands, |
|
73 | 78 | shell commands, etc.) |
|
74 | 79 | |
|
75 | 80 | Parameters |
|
76 | 81 | ---------- |
|
77 | 82 | python3 : bool |
|
78 | 83 | If `True`, then build an IPython lexer from a Python 3 lexer. |
|
79 | 84 | |
|
80 | 85 | """ |
|
81 | 86 | # It would be nice to have a single IPython lexer class which takes |
|
82 | 87 | # a boolean `python3`. But since there are two Python lexer classes, |
|
83 | 88 | # we will also have two IPython lexer classes. |
|
84 | 89 | if python3: |
|
85 | 90 | PyLexer = Python3Lexer |
|
86 | 91 | name = 'IPython3' |
|
87 | 92 | aliases = ['ipython3'] |
|
88 | 93 | doc = """IPython3 Lexer""" |
|
89 | 94 | else: |
|
90 | PyLexer = PythonLexer | |
|
95 | PyLexer = Python2Lexer | |
|
91 | 96 | name = 'IPython' |
|
92 | 97 | aliases = ['ipython2', 'ipython'] |
|
93 | 98 | doc = """IPython Lexer""" |
|
94 | 99 | |
|
95 | 100 | tokens = PyLexer.tokens.copy() |
|
96 | 101 | tokens['root'] = ipython_tokens + tokens['root'] |
|
97 | 102 | |
|
98 | 103 | attrs = {'name': name, 'aliases': aliases, 'filenames': [], |
|
99 | 104 | '__doc__': doc, 'tokens': tokens} |
|
100 | 105 | |
|
101 | 106 | return type(name, (PyLexer,), attrs) |
|
102 | 107 | |
|
103 | 108 | |
|
104 | 109 | IPython3Lexer = build_ipy_lexer(python3=True) |
|
105 | 110 | IPythonLexer = build_ipy_lexer(python3=False) |
|
106 | 111 | |
|
107 | 112 | |
|
108 | 113 | class IPythonPartialTracebackLexer(RegexLexer): |
|
109 | 114 | """ |
|
110 | 115 | Partial lexer for IPython tracebacks. |
|
111 | 116 | |
|
112 | 117 | Handles all the non-python output. This works for both Python 2.x and 3.x. |
|
113 | 118 | |
|
114 | 119 | """ |
|
115 | 120 | name = 'IPython Partial Traceback' |
|
116 | 121 | |
|
117 | 122 | tokens = { |
|
118 | 123 | 'root': [ |
|
119 | 124 | # Tracebacks for syntax errors have a different style. |
|
120 | 125 | # For both types of tracebacks, we mark the first line with |
|
121 | 126 | # Generic.Traceback. For syntax errors, we mark the filename |
|
122 | 127 | # as we mark the filenames for non-syntax tracebacks. |
|
123 | 128 | # |
|
124 | 129 | # These two regexps define how IPythonConsoleLexer finds a |
|
125 | 130 | # traceback. |
|
126 | 131 | # |
|
127 | 132 | ## Non-syntax traceback |
|
128 | 133 | (r'^(\^C)?(-+\n)', bygroups(Error, Generic.Traceback)), |
|
129 | 134 | ## Syntax traceback |
|
130 | 135 | (r'^( File)(.*)(, line )(\d+\n)', |
|
131 | 136 | bygroups(Generic.Traceback, Name.Namespace, |
|
132 | 137 | Generic.Traceback, Literal.Number.Integer)), |
|
133 | 138 | |
|
134 | 139 | # (Exception Identifier)(Whitespace)(Traceback Message) |
|
135 | 140 | (r'(?u)(^[^\d\W]\w*)(\s*)(Traceback.*?\n)', |
|
136 | 141 | bygroups(Name.Exception, Generic.Whitespace, Text)), |
|
137 | 142 | # (Module/Filename)(Text)(Callee)(Function Signature) |
|
138 | 143 | # Better options for callee and function signature? |
|
139 | 144 | (r'(.*)( in )(.*)(\(.*\)\n)', |
|
140 | 145 | bygroups(Name.Namespace, Text, Name.Entity, Name.Tag)), |
|
141 | 146 | # Regular line: (Whitespace)(Line Number)(Python Code) |
|
142 | 147 | (r'(\s*?)(\d+)(.*?\n)', |
|
143 | 148 | bygroups(Generic.Whitespace, Literal.Number.Integer, Other)), |
|
144 | 149 | # Emphasized line: (Arrow)(Line Number)(Python Code) |
|
145 | 150 | # Using Exception token so arrow color matches the Exception. |
|
146 | 151 | (r'(-*>?\s?)(\d+)(.*?\n)', |
|
147 | 152 | bygroups(Name.Exception, Literal.Number.Integer, Other)), |
|
148 | 153 | # (Exception Identifier)(Message) |
|
149 | 154 | (r'(?u)(^[^\d\W]\w*)(:.*?\n)', |
|
150 | 155 | bygroups(Name.Exception, Text)), |
|
151 | 156 | # Tag everything else as Other, will be handled later. |
|
152 | 157 | (r'.*\n', Other), |
|
153 | 158 | ], |
|
154 | 159 | } |
|
155 | 160 | |
|
156 | 161 | |
|
157 | 162 | class IPythonTracebackLexer(DelegatingLexer): |
|
158 | 163 | """ |
|
159 | 164 | IPython traceback lexer. |
|
160 | 165 | |
|
161 | 166 | For doctests, the tracebacks can be snipped as much as desired with the |
|
162 | 167 | exception to the lines that designate a traceback. For non-syntax error |
|
163 | 168 | tracebacks, this is the line of hyphens. For syntax error tracebacks, |
|
164 | 169 | this is the line which lists the File and line number. |
|
165 | 170 | |
|
166 | 171 | """ |
|
167 | 172 | # The lexer inherits from DelegatingLexer. The "root" lexer is an |
|
168 | 173 | # appropriate IPython lexer, which depends on the value of the boolean |
|
169 | 174 | # `python3`. First, we parse with the partial IPython traceback lexer. |
|
170 | 175 | # Then, any code marked with the "Other" token is delegated to the root |
|
171 | 176 | # lexer. |
|
172 | 177 | # |
|
173 | 178 | name = 'IPython Traceback' |
|
174 | 179 | aliases = ['ipythontb'] |
|
175 | 180 | |
|
176 | 181 | def __init__(self, **options): |
|
177 | 182 | self.python3 = get_bool_opt(options, 'python3', False) |
|
178 | 183 | if self.python3: |
|
179 | 184 | self.aliases = ['ipython3tb'] |
|
180 | 185 | else: |
|
181 | 186 | self.aliases = ['ipython2tb', 'ipythontb'] |
|
182 | 187 | |
|
183 | 188 | if self.python3: |
|
184 | 189 | IPyLexer = IPython3Lexer |
|
185 | 190 | else: |
|
186 | 191 | IPyLexer = IPythonLexer |
|
187 | 192 | |
|
188 | 193 | DelegatingLexer.__init__(self, IPyLexer, |
|
189 | 194 | IPythonPartialTracebackLexer, **options) |
|
190 | 195 | |
|
191 | 196 | class IPythonConsoleLexer(Lexer): |
|
192 | 197 | """ |
|
193 | 198 | An IPython console lexer for IPython code-blocks and doctests, such as: |
|
194 | 199 | |
|
195 | 200 | .. code-block:: rst |
|
196 | 201 | |
|
197 | 202 | .. code-block:: ipythonconsole |
|
198 | 203 | |
|
199 | 204 | In [1]: a = 'foo' |
|
200 | 205 | |
|
201 | 206 | In [2]: a |
|
202 | 207 | Out[2]: 'foo' |
|
203 | 208 | |
|
204 | 209 | In [3]: print a |
|
205 | 210 | foo |
|
206 | 211 | |
|
207 | 212 | In [4]: 1 / 0 |
|
208 | 213 | |
|
209 | 214 | |
|
210 | 215 | Support is also provided for IPython exceptions: |
|
211 | 216 | |
|
212 | 217 | .. code-block:: rst |
|
213 | 218 | |
|
214 | 219 | .. code-block:: ipythonconsole |
|
215 | 220 | |
|
216 | 221 | In [1]: raise Exception |
|
217 | 222 | |
|
218 | 223 | --------------------------------------------------------------------------- |
|
219 | 224 | Exception Traceback (most recent call last) |
|
220 | 225 | <ipython-input-1-fca2ab0ca76b> in <module>() |
|
221 | 226 | ----> 1 raise Exception |
|
222 | 227 | |
|
223 | 228 | Exception: |
|
224 | 229 | |
|
225 | 230 | """ |
|
226 | 231 | name = 'IPython console session' |
|
227 | 232 | aliases = ['ipythonconsole'] |
|
228 | 233 | mimetypes = ['text/x-ipython-console'] |
|
229 | 234 | |
|
230 | 235 | # The regexps used to determine what is input and what is output. |
|
231 | 236 | # The default prompts for IPython are: |
|
232 | 237 | # |
|
233 | 238 | # in = 'In [#]: ' |
|
234 | 239 | # continuation = ' .D.: ' |
|
235 | 240 | # template = 'Out[#]: ' |
|
236 | 241 | # |
|
237 | 242 | # Where '#' is the 'prompt number' or 'execution count' and 'D' |
|
238 | 243 | # D is a number of dots matching the width of the execution count |
|
239 | 244 | # |
|
240 | 245 | in1_regex = r'In \[[0-9]+\]: ' |
|
241 | 246 | in2_regex = r' \.\.+\.: ' |
|
242 | 247 | out_regex = r'Out\[[0-9]+\]: ' |
|
243 | 248 | |
|
244 | 249 | #: The regex to determine when a traceback starts. |
|
245 | 250 | ipytb_start = re.compile(r'^(\^C)?(-+\n)|^( File)(.*)(, line )(\d+\n)') |
|
246 | 251 | |
|
247 | 252 | def __init__(self, **options): |
|
248 | 253 | """Initialize the IPython console lexer. |
|
249 | 254 | |
|
250 | 255 | Parameters |
|
251 | 256 | ---------- |
|
252 | 257 | python3 : bool |
|
253 | 258 | If `True`, then the console inputs are parsed using a Python 3 |
|
254 | 259 | lexer. Otherwise, they are parsed using a Python 2 lexer. |
|
255 | 260 | in1_regex : RegexObject |
|
256 | 261 | The compiled regular expression used to detect the start |
|
257 | 262 | of inputs. Although the IPython configuration setting may have a |
|
258 | 263 | trailing whitespace, do not include it in the regex. If `None`, |
|
259 | 264 | then the default input prompt is assumed. |
|
260 | 265 | in2_regex : RegexObject |
|
261 | 266 | The compiled regular expression used to detect the continuation |
|
262 | 267 | of inputs. Although the IPython configuration setting may have a |
|
263 | 268 | trailing whitespace, do not include it in the regex. If `None`, |
|
264 | 269 | then the default input prompt is assumed. |
|
265 | 270 | out_regex : RegexObject |
|
266 | 271 | The compiled regular expression used to detect outputs. If `None`, |
|
267 | 272 | then the default output prompt is assumed. |
|
268 | 273 | |
|
269 | 274 | """ |
|
270 | 275 | self.python3 = get_bool_opt(options, 'python3', False) |
|
271 | 276 | if self.python3: |
|
272 | 277 | self.aliases = ['ipython3console'] |
|
273 | 278 | else: |
|
274 | 279 | self.aliases = ['ipython2console', 'ipythonconsole'] |
|
275 | 280 | |
|
276 | 281 | in1_regex = options.get('in1_regex', self.in1_regex) |
|
277 | 282 | in2_regex = options.get('in2_regex', self.in2_regex) |
|
278 | 283 | out_regex = options.get('out_regex', self.out_regex) |
|
279 | 284 | |
|
280 | 285 | # So that we can work with input and output prompts which have been |
|
281 | 286 | # rstrip'd (possibly by editors) we also need rstrip'd variants. If |
|
282 | 287 | # we do not do this, then such prompts will be tagged as 'output'. |
|
283 | 288 | # The reason can't just use the rstrip'd variants instead is because |
|
284 | 289 | # we want any whitespace associated with the prompt to be inserted |
|
285 | 290 | # with the token. This allows formatted code to be modified so as hide |
|
286 | 291 | # the appearance of prompts, with the whitespace included. One example |
|
287 | 292 | # use of this is in copybutton.js from the standard lib Python docs. |
|
288 | 293 | in1_regex_rstrip = in1_regex.rstrip() + '\n' |
|
289 | 294 | in2_regex_rstrip = in2_regex.rstrip() + '\n' |
|
290 | 295 | out_regex_rstrip = out_regex.rstrip() + '\n' |
|
291 | 296 | |
|
292 | 297 | # Compile and save them all. |
|
293 | 298 | attrs = ['in1_regex', 'in2_regex', 'out_regex', |
|
294 | 299 | 'in1_regex_rstrip', 'in2_regex_rstrip', 'out_regex_rstrip'] |
|
295 | 300 | for attr in attrs: |
|
296 | 301 | self.__setattr__(attr, re.compile(locals()[attr])) |
|
297 | 302 | |
|
298 | 303 | Lexer.__init__(self, **options) |
|
299 | 304 | |
|
300 | 305 | if self.python3: |
|
301 | 306 | pylexer = IPython3Lexer |
|
302 | 307 | tblexer = IPythonTracebackLexer |
|
303 | 308 | else: |
|
304 | 309 | pylexer = IPythonLexer |
|
305 | 310 | tblexer = IPythonTracebackLexer |
|
306 | 311 | |
|
307 | 312 | self.pylexer = pylexer(**options) |
|
308 | 313 | self.tblexer = tblexer(**options) |
|
309 | 314 | |
|
310 | 315 | self.reset() |
|
311 | 316 | |
|
312 | 317 | def reset(self): |
|
313 | 318 | self.mode = 'output' |
|
314 | 319 | self.index = 0 |
|
315 | 320 | self.buffer = u'' |
|
316 | 321 | self.insertions = [] |
|
317 | 322 | |
|
318 | 323 | def buffered_tokens(self): |
|
319 | 324 | """ |
|
320 | 325 | Generator of unprocessed tokens after doing insertions and before |
|
321 | 326 | changing to a new state. |
|
322 | 327 | |
|
323 | 328 | """ |
|
324 | 329 | if self.mode == 'output': |
|
325 | 330 | tokens = [(0, Generic.Output, self.buffer)] |
|
326 | 331 | elif self.mode == 'input': |
|
327 | 332 | tokens = self.pylexer.get_tokens_unprocessed(self.buffer) |
|
328 | 333 | else: # traceback |
|
329 | 334 | tokens = self.tblexer.get_tokens_unprocessed(self.buffer) |
|
330 | 335 | |
|
331 | 336 | for i, t, v in do_insertions(self.insertions, tokens): |
|
332 | 337 | # All token indexes are relative to the buffer. |
|
333 | 338 | yield self.index + i, t, v |
|
334 | 339 | |
|
335 | 340 | # Clear it all |
|
336 | 341 | self.index += len(self.buffer) |
|
337 | 342 | self.buffer = u'' |
|
338 | 343 | self.insertions = [] |
|
339 | 344 | |
|
340 | 345 | def get_mci(self, line): |
|
341 | 346 | """ |
|
342 | 347 | Parses the line and returns a 3-tuple: (mode, code, insertion). |
|
343 | 348 | |
|
344 | 349 | `mode` is the next mode (or state) of the lexer, and is always equal |
|
345 | 350 | to 'input', 'output', or 'tb'. |
|
346 | 351 | |
|
347 | 352 | `code` is a portion of the line that should be added to the buffer |
|
348 | 353 | corresponding to the next mode and eventually lexed by another lexer. |
|
349 | 354 | For example, `code` could be Python code if `mode` were 'input'. |
|
350 | 355 | |
|
351 | 356 | `insertion` is a 3-tuple (index, token, text) representing an |
|
352 | 357 | unprocessed "token" that will be inserted into the stream of tokens |
|
353 | 358 | that are created from the buffer once we change modes. This is usually |
|
354 | 359 | the input or output prompt. |
|
355 | 360 | |
|
356 | 361 | In general, the next mode depends on current mode and on the contents |
|
357 | 362 | of `line`. |
|
358 | 363 | |
|
359 | 364 | """ |
|
360 | 365 | # To reduce the number of regex match checks, we have multiple |
|
361 | 366 | # 'if' blocks instead of 'if-elif' blocks. |
|
362 | 367 | |
|
363 | 368 | # Check for possible end of input |
|
364 | 369 | in2_match = self.in2_regex.match(line) |
|
365 | 370 | in2_match_rstrip = self.in2_regex_rstrip.match(line) |
|
366 | 371 | if (in2_match and in2_match.group().rstrip() == line.rstrip()) or \ |
|
367 | 372 | in2_match_rstrip: |
|
368 | 373 | end_input = True |
|
369 | 374 | else: |
|
370 | 375 | end_input = False |
|
371 | 376 | if end_input and self.mode != 'tb': |
|
372 | 377 | # Only look for an end of input when not in tb mode. |
|
373 | 378 | # An ellipsis could appear within the traceback. |
|
374 | 379 | mode = 'output' |
|
375 | 380 | code = u'' |
|
376 | 381 | insertion = (0, Generic.Prompt, line) |
|
377 | 382 | return mode, code, insertion |
|
378 | 383 | |
|
379 | 384 | # Check for output prompt |
|
380 | 385 | out_match = self.out_regex.match(line) |
|
381 | 386 | out_match_rstrip = self.out_regex_rstrip.match(line) |
|
382 | 387 | if out_match or out_match_rstrip: |
|
383 | 388 | mode = 'output' |
|
384 | 389 | if out_match: |
|
385 | 390 | idx = out_match.end() |
|
386 | 391 | else: |
|
387 | 392 | idx = out_match_rstrip.end() |
|
388 | 393 | code = line[idx:] |
|
389 | 394 | # Use the 'heading' token for output. We cannot use Generic.Error |
|
390 | 395 | # since it would conflict with exceptions. |
|
391 | 396 | insertion = (0, Generic.Heading, line[:idx]) |
|
392 | 397 | return mode, code, insertion |
|
393 | 398 | |
|
394 | 399 | |
|
395 | 400 | # Check for input or continuation prompt (non stripped version) |
|
396 | 401 | in1_match = self.in1_regex.match(line) |
|
397 | 402 | if in1_match or (in2_match and self.mode != 'tb'): |
|
398 | 403 | # New input or when not in tb, continued input. |
|
399 | 404 | # We do not check for continued input when in tb since it is |
|
400 | 405 | # allowable to replace a long stack with an ellipsis. |
|
401 | 406 | mode = 'input' |
|
402 | 407 | if in1_match: |
|
403 | 408 | idx = in1_match.end() |
|
404 | 409 | else: # in2_match |
|
405 | 410 | idx = in2_match.end() |
|
406 | 411 | code = line[idx:] |
|
407 | 412 | insertion = (0, Generic.Prompt, line[:idx]) |
|
408 | 413 | return mode, code, insertion |
|
409 | 414 | |
|
410 | 415 | # Check for input or continuation prompt (stripped version) |
|
411 | 416 | in1_match_rstrip = self.in1_regex_rstrip.match(line) |
|
412 | 417 | if in1_match_rstrip or (in2_match_rstrip and self.mode != 'tb'): |
|
413 | 418 | # New input or when not in tb, continued input. |
|
414 | 419 | # We do not check for continued input when in tb since it is |
|
415 | 420 | # allowable to replace a long stack with an ellipsis. |
|
416 | 421 | mode = 'input' |
|
417 | 422 | if in1_match_rstrip: |
|
418 | 423 | idx = in1_match_rstrip.end() |
|
419 | 424 | else: # in2_match |
|
420 | 425 | idx = in2_match_rstrip.end() |
|
421 | 426 | code = line[idx:] |
|
422 | 427 | insertion = (0, Generic.Prompt, line[:idx]) |
|
423 | 428 | return mode, code, insertion |
|
424 | 429 | |
|
425 | 430 | # Check for traceback |
|
426 | 431 | if self.ipytb_start.match(line): |
|
427 | 432 | mode = 'tb' |
|
428 | 433 | code = line |
|
429 | 434 | insertion = None |
|
430 | 435 | return mode, code, insertion |
|
431 | 436 | |
|
432 | 437 | # All other stuff... |
|
433 | 438 | if self.mode in ('input', 'output'): |
|
434 | 439 | # We assume all other text is output. Multiline input that |
|
435 | 440 | # does not use the continuation marker cannot be detected. |
|
436 | 441 | # For example, the 3 in the following is clearly output: |
|
437 | 442 | # |
|
438 | 443 | # In [1]: print 3 |
|
439 | 444 | # 3 |
|
440 | 445 | # |
|
441 | 446 | # But the following second line is part of the input: |
|
442 | 447 | # |
|
443 | 448 | # In [2]: while True: |
|
444 | 449 | # print True |
|
445 | 450 | # |
|
446 | 451 | # In both cases, the 2nd line will be 'output'. |
|
447 | 452 | # |
|
448 | 453 | mode = 'output' |
|
449 | 454 | else: |
|
450 | 455 | mode = 'tb' |
|
451 | 456 | |
|
452 | 457 | code = line |
|
453 | 458 | insertion = None |
|
454 | 459 | |
|
455 | 460 | return mode, code, insertion |
|
456 | 461 | |
|
457 | 462 | def get_tokens_unprocessed(self, text): |
|
458 | 463 | self.reset() |
|
459 | 464 | for match in line_re.finditer(text): |
|
460 | 465 | line = match.group() |
|
461 | 466 | mode, code, insertion = self.get_mci(line) |
|
462 | 467 | |
|
463 | 468 | if mode != self.mode: |
|
464 | 469 | # Yield buffered tokens before transitioning to new mode. |
|
465 | 470 | for token in self.buffered_tokens(): |
|
466 | 471 | yield token |
|
467 | 472 | self.mode = mode |
|
468 | 473 | |
|
469 | 474 | if insertion: |
|
470 | 475 | self.insertions.append((len(self.buffer), [insertion])) |
|
471 | 476 | self.buffer += code |
|
472 | 477 | |
|
473 | 478 | for token in self.buffered_tokens(): |
|
474 | 479 | yield token |
|
475 | 480 | |
|
476 | 481 | class IPyLexer(Lexer): |
|
477 | 482 | """ |
|
478 | 483 | Primary lexer for all IPython-like code. |
|
479 | 484 | |
|
480 | 485 | This is a simple helper lexer. If the first line of the text begins with |
|
481 | 486 | "In \[[0-9]+\]:", then the entire text is parsed with an IPython console |
|
482 | 487 | lexer. If not, then the entire text is parsed with an IPython lexer. |
|
483 | 488 | |
|
484 | 489 | The goal is to reduce the number of lexers that are registered |
|
485 | 490 | with Pygments. |
|
486 | 491 | |
|
487 | 492 | """ |
|
488 | 493 | name = 'IPy session' |
|
489 | 494 | aliases = ['ipy'] |
|
490 | 495 | |
|
491 | 496 | def __init__(self, **options): |
|
492 | 497 | self.python3 = get_bool_opt(options, 'python3', False) |
|
493 | 498 | if self.python3: |
|
494 | 499 | self.aliases = ['ipy3'] |
|
495 | 500 | else: |
|
496 | 501 | self.aliases = ['ipy2', 'ipy'] |
|
497 | 502 | |
|
498 | 503 | Lexer.__init__(self, **options) |
|
499 | 504 | |
|
500 | 505 | self.IPythonLexer = IPythonLexer(**options) |
|
501 | 506 | self.IPythonConsoleLexer = IPythonConsoleLexer(**options) |
|
502 | 507 | |
|
503 | 508 | def get_tokens_unprocessed(self, text): |
|
504 | 509 | # Search for the input prompt anywhere...this allows code blocks to |
|
505 | 510 | # begin with comments as well. |
|
506 | 511 | if re.match(r'.*(In \[[0-9]+\]:)', text.strip(), re.DOTALL): |
|
507 | 512 | lex = self.IPythonConsoleLexer |
|
508 | 513 | else: |
|
509 | 514 | lex = self.IPythonLexer |
|
510 | 515 | for token in lex.get_tokens_unprocessed(text): |
|
511 | 516 | yield token |
|
512 | 517 |
General Comments 0
You need to be logged in to leave comments.
Login now