##// END OF EJS Templates
Drop bundled, outdated copy of the tokenize module
Thomas Kluyver -
Show More
@@ -8,11 +8,11 b' This includes the machinery to recognise and transform ``%magic`` commands,'
8 8 import abc
9 9 import functools
10 10 import re
11 import tokenize
12 from tokenize import generate_tokens, untokenize, TokenError
11 13 from io import StringIO
12 14
13 15 from IPython.core.splitinput import LineInfo
14 from IPython.utils import tokenize2
15 from IPython.utils.tokenize2 import generate_tokens, untokenize, TokenError
16 16
17 17 #-----------------------------------------------------------------------------
18 18 # Globals
@@ -140,10 +140,10 b' class TokenInputTransformer(InputTransformer):'
140 140 for intok in self.tokenizer:
141 141 tokens.append(intok)
142 142 t = intok[0]
143 if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
143 if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
144 144 # Stop before we try to pull a line we don't have yet
145 145 break
146 elif t == tokenize2.ERRORTOKEN:
146 elif t == tokenize.ERRORTOKEN:
147 147 stop_at_NL = True
148 148 except TokenError:
149 149 # Multi-line statement - stop and try again with the next line
@@ -319,7 +319,7 b' def has_comment(src):'
319 319 comment : bool
320 320 True if source has a comment.
321 321 """
322 return (tokenize2.COMMENT in _line_tokens(src))
322 return (tokenize.COMMENT in _line_tokens(src))
323 323
324 324 def ends_in_comment_or_string(src):
325 325 """Indicates whether or not an input line ends in a comment or within
@@ -336,7 +336,7 b' def ends_in_comment_or_string(src):'
336 336 True if source ends in a comment or multiline string.
337 337 """
338 338 toktypes = _line_tokens(src)
339 return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
339 return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
340 340
341 341
342 342 @StatelessInputTransformer.wrap
@@ -7,7 +7,7 b' from collections import namedtuple'
7 7 from io import StringIO
8 8 from keyword import iskeyword
9 9
10 from . import tokenize2
10 import tokenize
11 11
12 12
13 13 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
@@ -15,9 +15,9 b" Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])"
15 15 def generate_tokens(readline):
16 16 """wrap generate_tokens to catch EOF errors"""
17 17 try:
18 for token in tokenize2.generate_tokens(readline):
18 for token in tokenize.generate_tokens(readline):
19 19 yield token
20 except tokenize2.TokenError:
20 except tokenize.TokenError:
21 21 # catch EOF error
22 22 return
23 23
@@ -99,12 +99,12 b' def token_at_cursor(cell, cursor_pos=0):'
99 99 # don't consume it
100 100 break
101 101
102 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
103 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
102 if tok.token == tokenize.NAME and not iskeyword(tok.text):
103 if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.':
104 104 names[-1] = "%s.%s" % (names[-1], tok.text)
105 105 else:
106 106 names.append(tok.text)
107 elif tok.token == tokenize2.OP:
107 elif tok.token == tokenize.OP:
108 108 if tok.text == '=' and names:
109 109 # don't inspect the lhs of an assignment
110 110 names.pop(-1)
1 NO CONTENT: file was removed
This diff has been collapsed as it changes many lines, (590 lines changed) Show them Hide them
General Comments 0
You need to be logged in to leave comments. Login now