##// END OF EJS Templates
Drop bundled, outdated copy of the tokenize module
Thomas Kluyver -
Show More
@@ -8,11 +8,11 b' This includes the machinery to recognise and transform ``%magic`` commands,'
8 import abc
8 import abc
9 import functools
9 import functools
10 import re
10 import re
11 import tokenize
12 from tokenize import generate_tokens, untokenize, TokenError
11 from io import StringIO
13 from io import StringIO
12
14
13 from IPython.core.splitinput import LineInfo
15 from IPython.core.splitinput import LineInfo
14 from IPython.utils import tokenize2
15 from IPython.utils.tokenize2 import generate_tokens, untokenize, TokenError
16
16
17 #-----------------------------------------------------------------------------
17 #-----------------------------------------------------------------------------
18 # Globals
18 # Globals
@@ -140,10 +140,10 b' class TokenInputTransformer(InputTransformer):'
140 for intok in self.tokenizer:
140 for intok in self.tokenizer:
141 tokens.append(intok)
141 tokens.append(intok)
142 t = intok[0]
142 t = intok[0]
143 if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
143 if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
144 # Stop before we try to pull a line we don't have yet
144 # Stop before we try to pull a line we don't have yet
145 break
145 break
146 elif t == tokenize2.ERRORTOKEN:
146 elif t == tokenize.ERRORTOKEN:
147 stop_at_NL = True
147 stop_at_NL = True
148 except TokenError:
148 except TokenError:
149 # Multi-line statement - stop and try again with the next line
149 # Multi-line statement - stop and try again with the next line
@@ -319,7 +319,7 b' def has_comment(src):'
319 comment : bool
319 comment : bool
320 True if source has a comment.
320 True if source has a comment.
321 """
321 """
322 return (tokenize2.COMMENT in _line_tokens(src))
322 return (tokenize.COMMENT in _line_tokens(src))
323
323
324 def ends_in_comment_or_string(src):
324 def ends_in_comment_or_string(src):
325 """Indicates whether or not an input line ends in a comment or within
325 """Indicates whether or not an input line ends in a comment or within
@@ -336,7 +336,7 b' def ends_in_comment_or_string(src):'
336 True if source ends in a comment or multiline string.
336 True if source ends in a comment or multiline string.
337 """
337 """
338 toktypes = _line_tokens(src)
338 toktypes = _line_tokens(src)
339 return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
339 return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
340
340
341
341
342 @StatelessInputTransformer.wrap
342 @StatelessInputTransformer.wrap
@@ -7,7 +7,7 b' from collections import namedtuple'
7 from io import StringIO
7 from io import StringIO
8 from keyword import iskeyword
8 from keyword import iskeyword
9
9
10 from . import tokenize2
10 import tokenize
11
11
12
12
13 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
13 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
@@ -15,9 +15,9 b" Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])"
15 def generate_tokens(readline):
15 def generate_tokens(readline):
16 """wrap generate_tokens to catch EOF errors"""
16 """wrap generate_tokens to catch EOF errors"""
17 try:
17 try:
18 for token in tokenize2.generate_tokens(readline):
18 for token in tokenize.generate_tokens(readline):
19 yield token
19 yield token
20 except tokenize2.TokenError:
20 except tokenize.TokenError:
21 # catch EOF error
21 # catch EOF error
22 return
22 return
23
23
@@ -99,12 +99,12 b' def token_at_cursor(cell, cursor_pos=0):'
99 # don't consume it
99 # don't consume it
100 break
100 break
101
101
102 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
102 if tok.token == tokenize.NAME and not iskeyword(tok.text):
103 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
103 if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.':
104 names[-1] = "%s.%s" % (names[-1], tok.text)
104 names[-1] = "%s.%s" % (names[-1], tok.text)
105 else:
105 else:
106 names.append(tok.text)
106 names.append(tok.text)
107 elif tok.token == tokenize2.OP:
107 elif tok.token == tokenize.OP:
108 if tok.text == '=' and names:
108 if tok.text == '=' and names:
109 # don't inspect the lhs of an assignment
109 # don't inspect the lhs of an assignment
110 names.pop(-1)
110 names.pop(-1)
1 NO CONTENT: file was removed
NO CONTENT: file was removed
This diff has been collapsed as it changes many lines, (590 lines changed) Show them Hide them
General Comments 0
You need to be logged in to leave comments. Login now