##// END OF EJS Templates
Fixes for UltraTB and PyColorize with Python 3
Thomas Kluyver -
Show More
@@ -83,6 +83,11 b' import tokenize'
83 83 import traceback
84 84 import types
85 85
86 try: # Python 2
87 generate_tokens = tokenize.generate_tokens
88 except AttributeError: # Python 3
89 generate_tokens = tokenize.tokenize
90
86 91 # For purposes of monkeypatching inspect to fix a bug in it.
87 92 from inspect import getsourcefile, getfile, getmodule,\
88 93 ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode
@@ -94,6 +99,7 b' from IPython.core.display_trap import DisplayTrap'
94 99 from IPython.core.excolors import exception_colors
95 100 from IPython.utils import PyColorize
96 101 from IPython.utils import io
102 from IPython.utils import py3compat
97 103 from IPython.utils.data import uniq_stable
98 104 from IPython.utils.warn import info, error
99 105
@@ -278,8 +284,7 b' def _format_traceback_lines(lnum, index, lines, Colors, lvals=None,scheme=None):'
278 284 # serious refactoring, so that all of the ultratb and PyColorize code
279 285 # is unicode-safe. So for now this is rather an ugly hack, but
280 286 # necessary to at least have readable tracebacks. Improvements welcome!
281 if type(line)==unicode:
282 line = line.encode('utf-8', 'replace')
287 line = py3compat.cast_bytes_py2(line, 'utf-8')
283 288
284 289 new_line, err = _line_format(line, 'str', scheme)
285 290 if not err: line = new_line
@@ -872,7 +877,8 b' class VerboseTB(TBTools):'
872 877 try:
873 878 # This builds the names list in-place by capturing it from the
874 879 # enclosing scope.
875 tokenize.tokenize(linereader, tokeneater)
880 for token in generate_tokens(linereader):
881 tokeneater(*token)
876 882 except IndexError:
877 883 # signals exit of tokenizer
878 884 pass
@@ -933,7 +939,7 b' class VerboseTB(TBTools):'
933 939 # ... and format it
934 940 exception = ['%s%s%s: %s' % (Colors.excName, etype_str,
935 941 ColorsNormal, evalue_str)]
936 if type(evalue) is types.InstanceType:
942 if (not py3compat.PY3) and type(evalue) is types.InstanceType:
937 943 try:
938 944 names = [w for w in dir(evalue) if isinstance(w, basestring)]
939 945 except:
@@ -42,6 +42,13 b' import sys'
42 42 import token
43 43 import tokenize
44 44
45 try:
46 generate_tokens = tokenize.generate_tokens
47 except AttributeError:
48 # Python 3. Note that we use the undocumented _tokenize because it expects
49 # strings, not bytes. See also Python issue #9969.
50 generate_tokens = tokenize._tokenize
51
45 52 from IPython.utils.coloransi import *
46 53
47 54 #############################################################################
@@ -177,7 +184,8 b' class Parser:'
177 184
178 185 error = False
179 186 try:
180 tokenize.tokenize(text.readline, self)
187 for token in generate_tokens(text.readline):
188 self(*token)
181 189 except tokenize.TokenError, ex:
182 190 msg = ex[0]
183 191 line = ex[1][0]
@@ -35,6 +35,7 b' if sys.version_info[0] >= 3:'
35 35 unicode_to_str = no_code
36 36 str_to_bytes = encode
37 37 bytes_to_str = decode
38 cast_bytes_py2 = no_code
38 39
39 40 def isidentifier(s, dotted=False):
40 41 if dotted:
@@ -53,6 +54,7 b' else:'
53 54 unicode_to_str = encode
54 55 str_to_bytes = no_code
55 56 bytes_to_str = no_code
57 cast_bytes_py2 = cast_bytes
56 58
57 59 import re
58 60 _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
General Comments 0
You need to be logged in to leave comments. Login now