##// END OF EJS Templates
Drop bundled, outdated copy of the tokenize module
Thomas Kluyver -
Show More
@@ -1,536 +1,536 b''
1 1 """DEPRECATED: Input transformer classes to support IPython special syntax.
2 2
3 3 This module was deprecated in IPython 7.0, in favour of inputtransformer2.
4 4
5 5 This includes the machinery to recognise and transform ``%magic`` commands,
6 6 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
7 7 """
8 8 import abc
9 9 import functools
10 10 import re
11 import tokenize
12 from tokenize import generate_tokens, untokenize, TokenError
11 13 from io import StringIO
12 14
13 15 from IPython.core.splitinput import LineInfo
14 from IPython.utils import tokenize2
15 from IPython.utils.tokenize2 import generate_tokens, untokenize, TokenError
16 16
17 17 #-----------------------------------------------------------------------------
18 18 # Globals
19 19 #-----------------------------------------------------------------------------
20 20
21 21 # The escape sequences that define the syntax transformations IPython will
22 22 # apply to user input. These can NOT be just changed here: many regular
23 23 # expressions and other parts of the code may use their hardcoded values, and
24 24 # for all intents and purposes they constitute the 'IPython syntax', so they
25 25 # should be considered fixed.
26 26
27 27 ESC_SHELL = '!' # Send line to underlying system shell
28 28 ESC_SH_CAP = '!!' # Send line to system shell and capture output
29 29 ESC_HELP = '?' # Find information about object
30 30 ESC_HELP2 = '??' # Find extra-detailed information about object
31 31 ESC_MAGIC = '%' # Call magic function
32 32 ESC_MAGIC2 = '%%' # Call cell-magic function
33 33 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
34 34 ESC_QUOTE2 = ';' # Quote all args as a single string, call
35 35 ESC_PAREN = '/' # Call first argument with rest of line as arguments
36 36
37 37 ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
38 38 ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
39 39 ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
40 40
41 41
42 42 class InputTransformer(metaclass=abc.ABCMeta):
43 43 """Abstract base class for line-based input transformers."""
44 44
45 45 @abc.abstractmethod
46 46 def push(self, line):
47 47 """Send a line of input to the transformer, returning the transformed
48 48 input or None if the transformer is waiting for more input.
49 49
50 50 Must be overridden by subclasses.
51 51
52 52 Implementations may raise ``SyntaxError`` if the input is invalid. No
53 53 other exceptions may be raised.
54 54 """
55 55 pass
56 56
57 57 @abc.abstractmethod
58 58 def reset(self):
59 59 """Return, transformed any lines that the transformer has accumulated,
60 60 and reset its internal state.
61 61
62 62 Must be overridden by subclasses.
63 63 """
64 64 pass
65 65
66 66 @classmethod
67 67 def wrap(cls, func):
68 68 """Can be used by subclasses as a decorator, to return a factory that
69 69 will allow instantiation with the decorated object.
70 70 """
71 71 @functools.wraps(func)
72 72 def transformer_factory(**kwargs):
73 73 return cls(func, **kwargs)
74 74
75 75 return transformer_factory
76 76
77 77 class StatelessInputTransformer(InputTransformer):
78 78 """Wrapper for a stateless input transformer implemented as a function."""
79 79 def __init__(self, func):
80 80 self.func = func
81 81
82 82 def __repr__(self):
83 83 return "StatelessInputTransformer(func={0!r})".format(self.func)
84 84
85 85 def push(self, line):
86 86 """Send a line of input to the transformer, returning the
87 87 transformed input."""
88 88 return self.func(line)
89 89
90 90 def reset(self):
91 91 """No-op - exists for compatibility."""
92 92 pass
93 93
94 94 class CoroutineInputTransformer(InputTransformer):
95 95 """Wrapper for an input transformer implemented as a coroutine."""
96 96 def __init__(self, coro, **kwargs):
97 97 # Prime it
98 98 self.coro = coro(**kwargs)
99 99 next(self.coro)
100 100
101 101 def __repr__(self):
102 102 return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
103 103
104 104 def push(self, line):
105 105 """Send a line of input to the transformer, returning the
106 106 transformed input or None if the transformer is waiting for more
107 107 input.
108 108 """
109 109 return self.coro.send(line)
110 110
111 111 def reset(self):
112 112 """Return, transformed any lines that the transformer has
113 113 accumulated, and reset its internal state.
114 114 """
115 115 return self.coro.send(None)
116 116
117 117 class TokenInputTransformer(InputTransformer):
118 118 """Wrapper for a token-based input transformer.
119 119
120 120 func should accept a list of tokens (5-tuples, see tokenize docs), and
121 121 return an iterable which can be passed to tokenize.untokenize().
122 122 """
123 123 def __init__(self, func):
124 124 self.func = func
125 125 self.buf = []
126 126 self.reset_tokenizer()
127 127
128 128 def reset_tokenizer(self):
129 129 it = iter(self.buf)
130 130 self.tokenizer = generate_tokens(it.__next__)
131 131
132 132 def push(self, line):
133 133 self.buf.append(line + '\n')
134 134 if all(l.isspace() for l in self.buf):
135 135 return self.reset()
136 136
137 137 tokens = []
138 138 stop_at_NL = False
139 139 try:
140 140 for intok in self.tokenizer:
141 141 tokens.append(intok)
142 142 t = intok[0]
143 if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
143 if t == tokenize.NEWLINE or (stop_at_NL and t == tokenize.NL):
144 144 # Stop before we try to pull a line we don't have yet
145 145 break
146 elif t == tokenize2.ERRORTOKEN:
146 elif t == tokenize.ERRORTOKEN:
147 147 stop_at_NL = True
148 148 except TokenError:
149 149 # Multi-line statement - stop and try again with the next line
150 150 self.reset_tokenizer()
151 151 return None
152 152
153 153 return self.output(tokens)
154 154
155 155 def output(self, tokens):
156 156 self.buf.clear()
157 157 self.reset_tokenizer()
158 158 return untokenize(self.func(tokens)).rstrip('\n')
159 159
160 160 def reset(self):
161 161 l = ''.join(self.buf)
162 162 self.buf.clear()
163 163 self.reset_tokenizer()
164 164 if l:
165 165 return l.rstrip('\n')
166 166
167 167 class assemble_python_lines(TokenInputTransformer):
168 168 def __init__(self):
169 169 super(assemble_python_lines, self).__init__(None)
170 170
171 171 def output(self, tokens):
172 172 return self.reset()
173 173
174 174 @CoroutineInputTransformer.wrap
175 175 def assemble_logical_lines():
176 176 """Join lines following explicit line continuations (\)"""
177 177 line = ''
178 178 while True:
179 179 line = (yield line)
180 180 if not line or line.isspace():
181 181 continue
182 182
183 183 parts = []
184 184 while line is not None:
185 185 if line.endswith('\\') and (not has_comment(line)):
186 186 parts.append(line[:-1])
187 187 line = (yield None) # Get another line
188 188 else:
189 189 parts.append(line)
190 190 break
191 191
192 192 # Output
193 193 line = ''.join(parts)
194 194
195 195 # Utilities
196 196 def _make_help_call(target, esc, lspace, next_input=None):
197 197 """Prepares a pinfo(2)/psearch call from a target name and the escape
198 198 (i.e. ? or ??)"""
199 199 method = 'pinfo2' if esc == '??' \
200 200 else 'psearch' if '*' in target \
201 201 else 'pinfo'
202 202 arg = " ".join([method, target])
203 203 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
204 204 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
205 205 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
206 206 if next_input is None:
207 207 return '%sget_ipython().run_line_magic(%r, %r)' % (lspace, t_magic_name, t_magic_arg_s)
208 208 else:
209 209 return '%sget_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
210 210 (lspace, next_input, t_magic_name, t_magic_arg_s)
211 211
212 212 # These define the transformations for the different escape characters.
213 213 def _tr_system(line_info):
214 214 "Translate lines escaped with: !"
215 215 cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
216 216 return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
217 217
218 218 def _tr_system2(line_info):
219 219 "Translate lines escaped with: !!"
220 220 cmd = line_info.line.lstrip()[2:]
221 221 return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
222 222
223 223 def _tr_help(line_info):
224 224 "Translate lines escaped with: ?/??"
225 225 # A naked help line should just fire the intro help screen
226 226 if not line_info.line[1:]:
227 227 return 'get_ipython().show_usage()'
228 228
229 229 return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
230 230
231 231 def _tr_magic(line_info):
232 232 "Translate lines escaped with: %"
233 233 tpl = '%sget_ipython().run_line_magic(%r, %r)'
234 234 if line_info.line.startswith(ESC_MAGIC2):
235 235 return line_info.line
236 236 cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
237 237 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
238 238 t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
239 239 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
240 240 return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
241 241
242 242 def _tr_quote(line_info):
243 243 "Translate lines escaped with: ,"
244 244 return '%s%s("%s")' % (line_info.pre, line_info.ifun,
245 245 '", "'.join(line_info.the_rest.split()) )
246 246
247 247 def _tr_quote2(line_info):
248 248 "Translate lines escaped with: ;"
249 249 return '%s%s("%s")' % (line_info.pre, line_info.ifun,
250 250 line_info.the_rest)
251 251
252 252 def _tr_paren(line_info):
253 253 "Translate lines escaped with: /"
254 254 return '%s%s(%s)' % (line_info.pre, line_info.ifun,
255 255 ", ".join(line_info.the_rest.split()))
256 256
257 257 tr = { ESC_SHELL : _tr_system,
258 258 ESC_SH_CAP : _tr_system2,
259 259 ESC_HELP : _tr_help,
260 260 ESC_HELP2 : _tr_help,
261 261 ESC_MAGIC : _tr_magic,
262 262 ESC_QUOTE : _tr_quote,
263 263 ESC_QUOTE2 : _tr_quote2,
264 264 ESC_PAREN : _tr_paren }
265 265
266 266 @StatelessInputTransformer.wrap
267 267 def escaped_commands(line):
268 268 """Transform escaped commands - %magic, !system, ?help + various autocalls.
269 269 """
270 270 if not line or line.isspace():
271 271 return line
272 272 lineinf = LineInfo(line)
273 273 if lineinf.esc not in tr:
274 274 return line
275 275
276 276 return tr[lineinf.esc](lineinf)
277 277
278 278 _initial_space_re = re.compile(r'\s*')
279 279
280 280 _help_end_re = re.compile(r"""(%{0,2}
281 281 [a-zA-Z_*][\w*]* # Variable name
282 282 (\.[a-zA-Z_*][\w*]*)* # .etc.etc
283 283 )
284 284 (\?\??)$ # ? or ??
285 285 """,
286 286 re.VERBOSE)
287 287
288 288 # Extra pseudotokens for multiline strings and data structures
289 289 _MULTILINE_STRING = object()
290 290 _MULTILINE_STRUCTURE = object()
291 291
292 292 def _line_tokens(line):
293 293 """Helper for has_comment and ends_in_comment_or_string."""
294 294 readline = StringIO(line).readline
295 295 toktypes = set()
296 296 try:
297 297 for t in generate_tokens(readline):
298 298 toktypes.add(t[0])
299 299 except TokenError as e:
300 300 # There are only two cases where a TokenError is raised.
301 301 if 'multi-line string' in e.args[0]:
302 302 toktypes.add(_MULTILINE_STRING)
303 303 else:
304 304 toktypes.add(_MULTILINE_STRUCTURE)
305 305 return toktypes
306 306
307 307 def has_comment(src):
308 308 """Indicate whether an input line has (i.e. ends in, or is) a comment.
309 309
310 310 This uses tokenize, so it can distinguish comments from # inside strings.
311 311
312 312 Parameters
313 313 ----------
314 314 src : string
315 315 A single line input string.
316 316
317 317 Returns
318 318 -------
319 319 comment : bool
320 320 True if source has a comment.
321 321 """
322 return (tokenize2.COMMENT in _line_tokens(src))
322 return (tokenize.COMMENT in _line_tokens(src))
323 323
324 324 def ends_in_comment_or_string(src):
325 325 """Indicates whether or not an input line ends in a comment or within
326 326 a multiline string.
327 327
328 328 Parameters
329 329 ----------
330 330 src : string
331 331 A single line input string.
332 332
333 333 Returns
334 334 -------
335 335 comment : bool
336 336 True if source ends in a comment or multiline string.
337 337 """
338 338 toktypes = _line_tokens(src)
339 return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
339 return (tokenize.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
340 340
341 341
342 342 @StatelessInputTransformer.wrap
343 343 def help_end(line):
344 344 """Translate lines with ?/?? at the end"""
345 345 m = _help_end_re.search(line)
346 346 if m is None or ends_in_comment_or_string(line):
347 347 return line
348 348 target = m.group(1)
349 349 esc = m.group(3)
350 350 lspace = _initial_space_re.match(line).group(0)
351 351
352 352 # If we're mid-command, put it back on the next prompt for the user.
353 353 next_input = line.rstrip('?') if line.strip() != m.group(0) else None
354 354
355 355 return _make_help_call(target, esc, lspace, next_input)
356 356
357 357
358 358 @CoroutineInputTransformer.wrap
359 359 def cellmagic(end_on_blank_line=False):
360 360 """Captures & transforms cell magics.
361 361
362 362 After a cell magic is started, this stores up any lines it gets until it is
363 363 reset (sent None).
364 364 """
365 365 tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
366 366 cellmagic_help_re = re.compile('%%\w+\?')
367 367 line = ''
368 368 while True:
369 369 line = (yield line)
370 370 # consume leading empty lines
371 371 while not line:
372 372 line = (yield line)
373 373
374 374 if not line.startswith(ESC_MAGIC2):
375 375 # This isn't a cell magic, idle waiting for reset then start over
376 376 while line is not None:
377 377 line = (yield line)
378 378 continue
379 379
380 380 if cellmagic_help_re.match(line):
381 381 # This case will be handled by help_end
382 382 continue
383 383
384 384 first = line
385 385 body = []
386 386 line = (yield None)
387 387 while (line is not None) and \
388 388 ((line.strip() != '') or not end_on_blank_line):
389 389 body.append(line)
390 390 line = (yield None)
391 391
392 392 # Output
393 393 magic_name, _, first = first.partition(' ')
394 394 magic_name = magic_name.lstrip(ESC_MAGIC2)
395 395 line = tpl % (magic_name, first, u'\n'.join(body))
396 396
397 397
398 398 def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
399 399 """Remove matching input prompts from a block of input.
400 400
401 401 Parameters
402 402 ----------
403 403 prompt_re : regular expression
404 404 A regular expression matching any input prompt (including continuation)
405 405 initial_re : regular expression, optional
406 406 A regular expression matching only the initial prompt, but not continuation.
407 407 If no initial expression is given, prompt_re will be used everywhere.
408 408 Used mainly for plain Python prompts, where the continuation prompt
409 409 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
410 410
411 411 If initial_re and prompt_re differ,
412 412 only initial_re will be tested against the first line.
413 413 If any prompt is found on the first two lines,
414 414 prompts will be stripped from the rest of the block.
415 415 """
416 416 if initial_re is None:
417 417 initial_re = prompt_re
418 418 line = ''
419 419 while True:
420 420 line = (yield line)
421 421
422 422 # First line of cell
423 423 if line is None:
424 424 continue
425 425 out, n1 = initial_re.subn('', line, count=1)
426 426 if turnoff_re and not n1:
427 427 if turnoff_re.match(line):
428 428 # We're in e.g. a cell magic; disable this transformer for
429 429 # the rest of the cell.
430 430 while line is not None:
431 431 line = (yield line)
432 432 continue
433 433
434 434 line = (yield out)
435 435
436 436 if line is None:
437 437 continue
438 438 # check for any prompt on the second line of the cell,
439 439 # because people often copy from just after the first prompt,
440 440 # so we might not see it in the first line.
441 441 out, n2 = prompt_re.subn('', line, count=1)
442 442 line = (yield out)
443 443
444 444 if n1 or n2:
445 445 # Found a prompt in the first two lines - check for it in
446 446 # the rest of the cell as well.
447 447 while line is not None:
448 448 line = (yield prompt_re.sub('', line, count=1))
449 449
450 450 else:
451 451 # Prompts not in input - wait for reset
452 452 while line is not None:
453 453 line = (yield line)
454 454
455 455 @CoroutineInputTransformer.wrap
456 456 def classic_prompt():
457 457 """Strip the >>>/... prompts of the Python interactive shell."""
458 458 # FIXME: non-capturing version (?:...) usable?
459 459 prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
460 460 initial_re = re.compile(r'^>>>( |$)')
461 461 # Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
462 462 turnoff_re = re.compile(r'^[%!]')
463 463 return _strip_prompts(prompt_re, initial_re, turnoff_re)
464 464
465 465 @CoroutineInputTransformer.wrap
466 466 def ipy_prompt():
467 467 """Strip IPython's In [1]:/...: prompts."""
468 468 # FIXME: non-capturing version (?:...) usable?
469 469 prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
470 470 # Disable prompt stripping inside cell magics
471 471 turnoff_re = re.compile(r'^%%')
472 472 return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
473 473
474 474
475 475 @CoroutineInputTransformer.wrap
476 476 def leading_indent():
477 477 """Remove leading indentation.
478 478
479 479 If the first line starts with a spaces or tabs, the same whitespace will be
480 480 removed from each following line until it is reset.
481 481 """
482 482 space_re = re.compile(r'^[ \t]+')
483 483 line = ''
484 484 while True:
485 485 line = (yield line)
486 486
487 487 if line is None:
488 488 continue
489 489
490 490 m = space_re.match(line)
491 491 if m:
492 492 space = m.group(0)
493 493 while line is not None:
494 494 if line.startswith(space):
495 495 line = line[len(space):]
496 496 line = (yield line)
497 497 else:
498 498 # No leading spaces - wait for reset
499 499 while line is not None:
500 500 line = (yield line)
501 501
502 502
503 503 _assign_pat = \
504 504 r'''(?P<lhs>(\s*)
505 505 ([\w\.]+) # Initial identifier
506 506 (\s*,\s*
507 507 \*?[\w\.]+)* # Further identifiers for unpacking
508 508 \s*?,? # Trailing comma
509 509 )
510 510 \s*=\s*
511 511 '''
512 512
513 513 assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
514 514 assign_system_template = '%s = get_ipython().getoutput(%r)'
515 515 @StatelessInputTransformer.wrap
516 516 def assign_from_system(line):
517 517 """Transform assignment from system commands (e.g. files = !ls)"""
518 518 m = assign_system_re.match(line)
519 519 if m is None:
520 520 return line
521 521
522 522 return assign_system_template % m.group('lhs', 'cmd')
523 523
524 524 assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
525 525 assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
526 526 @StatelessInputTransformer.wrap
527 527 def assign_from_magic(line):
528 528 """Transform assignment from magic commands (e.g. a = %who_ls)"""
529 529 m = assign_magic_re.match(line)
530 530 if m is None:
531 531 return line
532 532 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
533 533 m_lhs, m_cmd = m.group('lhs', 'cmd')
534 534 t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
535 535 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
536 536 return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
@@ -1,130 +1,130 b''
1 1 """Token-related utilities"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from collections import namedtuple
7 7 from io import StringIO
8 8 from keyword import iskeyword
9 9
10 from . import tokenize2
10 import tokenize
11 11
12 12
13 13 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
14 14
15 15 def generate_tokens(readline):
16 16 """wrap generate_tokens to catch EOF errors"""
17 17 try:
18 for token in tokenize2.generate_tokens(readline):
18 for token in tokenize.generate_tokens(readline):
19 19 yield token
20 except tokenize2.TokenError:
20 except tokenize.TokenError:
21 21 # catch EOF error
22 22 return
23 23
24 24 def line_at_cursor(cell, cursor_pos=0):
25 25 """Return the line in a cell at a given cursor position
26 26
27 27 Used for calling line-based APIs that don't support multi-line input, yet.
28 28
29 29 Parameters
30 30 ----------
31 31
32 32 cell: str
33 33 multiline block of text
34 34 cursor_pos: integer
35 35 the cursor position
36 36
37 37 Returns
38 38 -------
39 39
40 40 (line, offset): (string, integer)
41 41 The line with the current cursor, and the character offset of the start of the line.
42 42 """
43 43 offset = 0
44 44 lines = cell.splitlines(True)
45 45 for line in lines:
46 46 next_offset = offset + len(line)
47 47 if not line.endswith('\n'):
48 48 # If the last line doesn't have a trailing newline, treat it as if
49 49 # it does so that the cursor at the end of the line still counts
50 50 # as being on that line.
51 51 next_offset += 1
52 52 if next_offset > cursor_pos:
53 53 break
54 54 offset = next_offset
55 55 else:
56 56 line = ""
57 57 return (line, offset)
58 58
59 59 def token_at_cursor(cell, cursor_pos=0):
60 60 """Get the token at a given cursor
61 61
62 62 Used for introspection.
63 63
64 64 Function calls are prioritized, so the token for the callable will be returned
65 65 if the cursor is anywhere inside the call.
66 66
67 67 Parameters
68 68 ----------
69 69
70 70 cell : unicode
71 71 A block of Python code
72 72 cursor_pos : int
73 73 The location of the cursor in the block where the token should be found
74 74 """
75 75 names = []
76 76 tokens = []
77 77 call_names = []
78 78
79 79 offsets = {1: 0} # lines start at 1
80 80 for tup in generate_tokens(StringIO(cell).readline):
81 81
82 82 tok = Token(*tup)
83 83
84 84 # token, text, start, end, line = tup
85 85 start_line, start_col = tok.start
86 86 end_line, end_col = tok.end
87 87 if end_line + 1 not in offsets:
88 88 # keep track of offsets for each line
89 89 lines = tok.line.splitlines(True)
90 90 for lineno, line in enumerate(lines, start_line + 1):
91 91 if lineno not in offsets:
92 92 offsets[lineno] = offsets[lineno-1] + len(line)
93 93
94 94 offset = offsets[start_line]
95 95 # allow '|foo' to find 'foo' at the beginning of a line
96 96 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
97 97 if offset + start_col >= boundary:
98 98 # current token starts after the cursor,
99 99 # don't consume it
100 100 break
101 101
102 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
103 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
102 if tok.token == tokenize.NAME and not iskeyword(tok.text):
103 if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.':
104 104 names[-1] = "%s.%s" % (names[-1], tok.text)
105 105 else:
106 106 names.append(tok.text)
107 elif tok.token == tokenize2.OP:
107 elif tok.token == tokenize.OP:
108 108 if tok.text == '=' and names:
109 109 # don't inspect the lhs of an assignment
110 110 names.pop(-1)
111 111 if tok.text == '(' and names:
112 112 # if we are inside a function call, inspect the function
113 113 call_names.append(names[-1])
114 114 elif tok.text == ')' and call_names:
115 115 call_names.pop(-1)
116 116
117 117 tokens.append(tok)
118 118
119 119 if offsets[end_line] + end_col > cursor_pos:
120 120 # we found the cursor, stop reading
121 121 break
122 122
123 123 if call_names:
124 124 return call_names[-1]
125 125 elif names:
126 126 return names[-1]
127 127 else:
128 128 return ''
129 129
130 130
1 NO CONTENT: file was removed
This diff has been collapsed as it changes many lines, (590 lines changed) Show them Hide them
General Comments 0
You need to be logged in to leave comments. Login now