##// END OF EJS Templates
validate that ESC_PAREN ('/') is followed by a callable name and not empty
Guy Bortnikov -
Show More
@@ -1,827 +1,830
1 """Input transformer machinery to support IPython special syntax.
1 """Input transformer machinery to support IPython special syntax.
2
2
3 This includes the machinery to recognise and transform ``%magic`` commands,
3 This includes the machinery to recognise and transform ``%magic`` commands,
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
5
5
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7 deprecated in 7.0.
7 deprecated in 7.0.
8 """
8 """
9
9
10 # Copyright (c) IPython Development Team.
10 # Copyright (c) IPython Development Team.
11 # Distributed under the terms of the Modified BSD License.
11 # Distributed under the terms of the Modified BSD License.
12
12
13 import ast
13 import ast
14 from codeop import CommandCompiler, Compile
14 from codeop import CommandCompiler, Compile
15 import re
15 import re
16 import sys
16 import sys
17 import tokenize
17 import tokenize
18 from typing import List, Tuple, Optional, Any
18 from typing import List, Tuple, Optional, Any
19 import warnings
19 import warnings
20
20
21 from IPython.utils import tokenutil
21 from IPython.utils import tokenutil
22
22
23 _indent_re = re.compile(r'^[ \t]+')
23 _indent_re = re.compile(r'^[ \t]+')
24
24
25 def leading_empty_lines(lines):
25 def leading_empty_lines(lines):
26 """Remove leading empty lines
26 """Remove leading empty lines
27
27
28 If the leading lines are empty or contain only whitespace, they will be
28 If the leading lines are empty or contain only whitespace, they will be
29 removed.
29 removed.
30 """
30 """
31 if not lines:
31 if not lines:
32 return lines
32 return lines
33 for i, line in enumerate(lines):
33 for i, line in enumerate(lines):
34 if line and not line.isspace():
34 if line and not line.isspace():
35 return lines[i:]
35 return lines[i:]
36 return lines
36 return lines
37
37
38 def leading_indent(lines):
38 def leading_indent(lines):
39 """Remove leading indentation.
39 """Remove leading indentation.
40
40
41 If the first line starts with a spaces or tabs, the same whitespace will be
41 If the first line starts with a spaces or tabs, the same whitespace will be
42 removed from each following line in the cell.
42 removed from each following line in the cell.
43 """
43 """
44 if not lines:
44 if not lines:
45 return lines
45 return lines
46 m = _indent_re.match(lines[0])
46 m = _indent_re.match(lines[0])
47 if not m:
47 if not m:
48 return lines
48 return lines
49 space = m.group(0)
49 space = m.group(0)
50 n = len(space)
50 n = len(space)
51 return [l[n:] if l.startswith(space) else l
51 return [l[n:] if l.startswith(space) else l
52 for l in lines]
52 for l in lines]
53
53
54 class PromptStripper:
54 class PromptStripper:
55 """Remove matching input prompts from a block of input.
55 """Remove matching input prompts from a block of input.
56
56
57 Parameters
57 Parameters
58 ----------
58 ----------
59 prompt_re : regular expression
59 prompt_re : regular expression
60 A regular expression matching any input prompt (including continuation,
60 A regular expression matching any input prompt (including continuation,
61 e.g. ``...``)
61 e.g. ``...``)
62 initial_re : regular expression, optional
62 initial_re : regular expression, optional
63 A regular expression matching only the initial prompt, but not continuation.
63 A regular expression matching only the initial prompt, but not continuation.
64 If no initial expression is given, prompt_re will be used everywhere.
64 If no initial expression is given, prompt_re will be used everywhere.
65 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
65 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
66 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
66 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
67
67
68 Notes
68 Notes
69 -----
69 -----
70
70
71 If initial_re and prompt_re differ,
71 If initial_re and prompt_re differ,
72 only initial_re will be tested against the first line.
72 only initial_re will be tested against the first line.
73 If any prompt is found on the first two lines,
73 If any prompt is found on the first two lines,
74 prompts will be stripped from the rest of the block.
74 prompts will be stripped from the rest of the block.
75 """
75 """
76 def __init__(self, prompt_re, initial_re=None):
76 def __init__(self, prompt_re, initial_re=None):
77 self.prompt_re = prompt_re
77 self.prompt_re = prompt_re
78 self.initial_re = initial_re or prompt_re
78 self.initial_re = initial_re or prompt_re
79
79
80 def _strip(self, lines):
80 def _strip(self, lines):
81 return [self.prompt_re.sub('', l, count=1) for l in lines]
81 return [self.prompt_re.sub('', l, count=1) for l in lines]
82
82
83 def __call__(self, lines):
83 def __call__(self, lines):
84 if not lines:
84 if not lines:
85 return lines
85 return lines
86 if self.initial_re.match(lines[0]) or \
86 if self.initial_re.match(lines[0]) or \
87 (len(lines) > 1 and self.prompt_re.match(lines[1])):
87 (len(lines) > 1 and self.prompt_re.match(lines[1])):
88 return self._strip(lines)
88 return self._strip(lines)
89 return lines
89 return lines
90
90
91 classic_prompt = PromptStripper(
91 classic_prompt = PromptStripper(
92 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
92 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
93 initial_re=re.compile(r'^>>>( |$)')
93 initial_re=re.compile(r'^>>>( |$)')
94 )
94 )
95
95
96 ipython_prompt = PromptStripper(
96 ipython_prompt = PromptStripper(
97 re.compile(
97 re.compile(
98 r"""
98 r"""
99 ^( # Match from the beginning of a line, either:
99 ^( # Match from the beginning of a line, either:
100
100
101 # 1. First-line prompt:
101 # 1. First-line prompt:
102 ((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
102 ((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
103 In\ # The 'In' of the prompt, with a space
103 In\ # The 'In' of the prompt, with a space
104 \[\d+\]: # Command index, as displayed in the prompt
104 \[\d+\]: # Command index, as displayed in the prompt
105 \ # With a mandatory trailing space
105 \ # With a mandatory trailing space
106
106
107 | # ... or ...
107 | # ... or ...
108
108
109 # 2. The three dots of the multiline prompt
109 # 2. The three dots of the multiline prompt
110 \s* # All leading whitespace characters
110 \s* # All leading whitespace characters
111 \.{3,}: # The three (or more) dots
111 \.{3,}: # The three (or more) dots
112 \ ? # With an optional trailing space
112 \ ? # With an optional trailing space
113
113
114 )
114 )
115 """,
115 """,
116 re.VERBOSE,
116 re.VERBOSE,
117 )
117 )
118 )
118 )
119
119
120
120
121 def cell_magic(lines):
121 def cell_magic(lines):
122 if not lines or not lines[0].startswith('%%'):
122 if not lines or not lines[0].startswith('%%'):
123 return lines
123 return lines
124 if re.match(r'%%\w+\?', lines[0]):
124 if re.match(r'%%\w+\?', lines[0]):
125 # This case will be handled by help_end
125 # This case will be handled by help_end
126 return lines
126 return lines
127 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
127 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
128 body = ''.join(lines[1:])
128 body = ''.join(lines[1:])
129 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
129 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
130 % (magic_name, first_line, body)]
130 % (magic_name, first_line, body)]
131
131
132
132
133 def _find_assign_op(token_line) -> Optional[int]:
133 def _find_assign_op(token_line) -> Optional[int]:
134 """Get the index of the first assignment in the line ('=' not inside brackets)
134 """Get the index of the first assignment in the line ('=' not inside brackets)
135
135
136 Note: We don't try to support multiple special assignment (a = b = %foo)
136 Note: We don't try to support multiple special assignment (a = b = %foo)
137 """
137 """
138 paren_level = 0
138 paren_level = 0
139 for i, ti in enumerate(token_line):
139 for i, ti in enumerate(token_line):
140 s = ti.string
140 s = ti.string
141 if s == '=' and paren_level == 0:
141 if s == '=' and paren_level == 0:
142 return i
142 return i
143 if s in {'(','[','{'}:
143 if s in {'(','[','{'}:
144 paren_level += 1
144 paren_level += 1
145 elif s in {')', ']', '}'}:
145 elif s in {')', ']', '}'}:
146 if paren_level > 0:
146 if paren_level > 0:
147 paren_level -= 1
147 paren_level -= 1
148 return None
148 return None
149
149
150 def find_end_of_continued_line(lines, start_line: int):
150 def find_end_of_continued_line(lines, start_line: int):
151 """Find the last line of a line explicitly extended using backslashes.
151 """Find the last line of a line explicitly extended using backslashes.
152
152
153 Uses 0-indexed line numbers.
153 Uses 0-indexed line numbers.
154 """
154 """
155 end_line = start_line
155 end_line = start_line
156 while lines[end_line].endswith('\\\n'):
156 while lines[end_line].endswith('\\\n'):
157 end_line += 1
157 end_line += 1
158 if end_line >= len(lines):
158 if end_line >= len(lines):
159 break
159 break
160 return end_line
160 return end_line
161
161
162 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
162 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
163 r"""Assemble a single line from multiple continued line pieces
163 r"""Assemble a single line from multiple continued line pieces
164
164
165 Continued lines are lines ending in ``\``, and the line following the last
165 Continued lines are lines ending in ``\``, and the line following the last
166 ``\`` in the block.
166 ``\`` in the block.
167
167
168 For example, this code continues over multiple lines::
168 For example, this code continues over multiple lines::
169
169
170 if (assign_ix is not None) \
170 if (assign_ix is not None) \
171 and (len(line) >= assign_ix + 2) \
171 and (len(line) >= assign_ix + 2) \
172 and (line[assign_ix+1].string == '%') \
172 and (line[assign_ix+1].string == '%') \
173 and (line[assign_ix+2].type == tokenize.NAME):
173 and (line[assign_ix+2].type == tokenize.NAME):
174
174
175 This statement contains four continued line pieces.
175 This statement contains four continued line pieces.
176 Assembling these pieces into a single line would give::
176 Assembling these pieces into a single line would give::
177
177
178 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
178 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
179
179
180 This uses 0-indexed line numbers. *start* is (lineno, colno).
180 This uses 0-indexed line numbers. *start* is (lineno, colno).
181
181
182 Used to allow ``%magic`` and ``!system`` commands to be continued over
182 Used to allow ``%magic`` and ``!system`` commands to be continued over
183 multiple lines.
183 multiple lines.
184 """
184 """
185 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
185 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
186 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
186 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
187 + [parts[-1].rstrip()]) # Strip newline from last line
187 + [parts[-1].rstrip()]) # Strip newline from last line
188
188
189 class TokenTransformBase:
189 class TokenTransformBase:
190 """Base class for transformations which examine tokens.
190 """Base class for transformations which examine tokens.
191
191
192 Special syntax should not be transformed when it occurs inside strings or
192 Special syntax should not be transformed when it occurs inside strings or
193 comments. This is hard to reliably avoid with regexes. The solution is to
193 comments. This is hard to reliably avoid with regexes. The solution is to
194 tokenise the code as Python, and recognise the special syntax in the tokens.
194 tokenise the code as Python, and recognise the special syntax in the tokens.
195
195
196 IPython's special syntax is not valid Python syntax, so tokenising may go
196 IPython's special syntax is not valid Python syntax, so tokenising may go
197 wrong after the special syntax starts. These classes therefore find and
197 wrong after the special syntax starts. These classes therefore find and
198 transform *one* instance of special syntax at a time into regular Python
198 transform *one* instance of special syntax at a time into regular Python
199 syntax. After each transformation, tokens are regenerated to find the next
199 syntax. After each transformation, tokens are regenerated to find the next
200 piece of special syntax.
200 piece of special syntax.
201
201
202 Subclasses need to implement one class method (find)
202 Subclasses need to implement one class method (find)
203 and one regular method (transform).
203 and one regular method (transform).
204
204
205 The priority attribute can select which transformation to apply if multiple
205 The priority attribute can select which transformation to apply if multiple
206 transformers match in the same place. Lower numbers have higher priority.
206 transformers match in the same place. Lower numbers have higher priority.
207 This allows "%magic?" to be turned into a help call rather than a magic call.
207 This allows "%magic?" to be turned into a help call rather than a magic call.
208 """
208 """
209 # Lower numbers -> higher priority (for matches in the same location)
209 # Lower numbers -> higher priority (for matches in the same location)
210 priority = 10
210 priority = 10
211
211
212 def sortby(self):
212 def sortby(self):
213 return self.start_line, self.start_col, self.priority
213 return self.start_line, self.start_col, self.priority
214
214
215 def __init__(self, start):
215 def __init__(self, start):
216 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
216 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
217 self.start_col = start[1]
217 self.start_col = start[1]
218
218
219 @classmethod
219 @classmethod
220 def find(cls, tokens_by_line):
220 def find(cls, tokens_by_line):
221 """Find one instance of special syntax in the provided tokens.
221 """Find one instance of special syntax in the provided tokens.
222
222
223 Tokens are grouped into logical lines for convenience,
223 Tokens are grouped into logical lines for convenience,
224 so it is easy to e.g. look at the first token of each line.
224 so it is easy to e.g. look at the first token of each line.
225 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
225 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
226
226
227 This should return an instance of its class, pointing to the start
227 This should return an instance of its class, pointing to the start
228 position it has found, or None if it found no match.
228 position it has found, or None if it found no match.
229 """
229 """
230 raise NotImplementedError
230 raise NotImplementedError
231
231
232 def transform(self, lines: List[str]):
232 def transform(self, lines: List[str]):
233 """Transform one instance of special syntax found by ``find()``
233 """Transform one instance of special syntax found by ``find()``
234
234
235 Takes a list of strings representing physical lines,
235 Takes a list of strings representing physical lines,
236 returns a similar list of transformed lines.
236 returns a similar list of transformed lines.
237 """
237 """
238 raise NotImplementedError
238 raise NotImplementedError
239
239
240 class MagicAssign(TokenTransformBase):
240 class MagicAssign(TokenTransformBase):
241 """Transformer for assignments from magics (a = %foo)"""
241 """Transformer for assignments from magics (a = %foo)"""
242 @classmethod
242 @classmethod
243 def find(cls, tokens_by_line):
243 def find(cls, tokens_by_line):
244 """Find the first magic assignment (a = %foo) in the cell.
244 """Find the first magic assignment (a = %foo) in the cell.
245 """
245 """
246 for line in tokens_by_line:
246 for line in tokens_by_line:
247 assign_ix = _find_assign_op(line)
247 assign_ix = _find_assign_op(line)
248 if (assign_ix is not None) \
248 if (assign_ix is not None) \
249 and (len(line) >= assign_ix + 2) \
249 and (len(line) >= assign_ix + 2) \
250 and (line[assign_ix+1].string == '%') \
250 and (line[assign_ix+1].string == '%') \
251 and (line[assign_ix+2].type == tokenize.NAME):
251 and (line[assign_ix+2].type == tokenize.NAME):
252 return cls(line[assign_ix+1].start)
252 return cls(line[assign_ix+1].start)
253
253
254 def transform(self, lines: List[str]):
254 def transform(self, lines: List[str]):
255 """Transform a magic assignment found by the ``find()`` classmethod.
255 """Transform a magic assignment found by the ``find()`` classmethod.
256 """
256 """
257 start_line, start_col = self.start_line, self.start_col
257 start_line, start_col = self.start_line, self.start_col
258 lhs = lines[start_line][:start_col]
258 lhs = lines[start_line][:start_col]
259 end_line = find_end_of_continued_line(lines, start_line)
259 end_line = find_end_of_continued_line(lines, start_line)
260 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
260 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
261 assert rhs.startswith('%'), rhs
261 assert rhs.startswith('%'), rhs
262 magic_name, _, args = rhs[1:].partition(' ')
262 magic_name, _, args = rhs[1:].partition(' ')
263
263
264 lines_before = lines[:start_line]
264 lines_before = lines[:start_line]
265 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
265 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
266 new_line = lhs + call + '\n'
266 new_line = lhs + call + '\n'
267 lines_after = lines[end_line+1:]
267 lines_after = lines[end_line+1:]
268
268
269 return lines_before + [new_line] + lines_after
269 return lines_before + [new_line] + lines_after
270
270
271
271
272 class SystemAssign(TokenTransformBase):
272 class SystemAssign(TokenTransformBase):
273 """Transformer for assignments from system commands (a = !foo)"""
273 """Transformer for assignments from system commands (a = !foo)"""
274 @classmethod
274 @classmethod
275 def find_pre_312(cls, tokens_by_line):
275 def find_pre_312(cls, tokens_by_line):
276 for line in tokens_by_line:
276 for line in tokens_by_line:
277 assign_ix = _find_assign_op(line)
277 assign_ix = _find_assign_op(line)
278 if (assign_ix is not None) \
278 if (assign_ix is not None) \
279 and not line[assign_ix].line.strip().startswith('=') \
279 and not line[assign_ix].line.strip().startswith('=') \
280 and (len(line) >= assign_ix + 2) \
280 and (len(line) >= assign_ix + 2) \
281 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
281 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
282 ix = assign_ix + 1
282 ix = assign_ix + 1
283
283
284 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
284 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
285 if line[ix].string == '!':
285 if line[ix].string == '!':
286 return cls(line[ix].start)
286 return cls(line[ix].start)
287 elif not line[ix].string.isspace():
287 elif not line[ix].string.isspace():
288 break
288 break
289 ix += 1
289 ix += 1
290
290
291 @classmethod
291 @classmethod
292 def find_post_312(cls, tokens_by_line):
292 def find_post_312(cls, tokens_by_line):
293 for line in tokens_by_line:
293 for line in tokens_by_line:
294 assign_ix = _find_assign_op(line)
294 assign_ix = _find_assign_op(line)
295 if (
295 if (
296 (assign_ix is not None)
296 (assign_ix is not None)
297 and not line[assign_ix].line.strip().startswith("=")
297 and not line[assign_ix].line.strip().startswith("=")
298 and (len(line) >= assign_ix + 2)
298 and (len(line) >= assign_ix + 2)
299 and (line[assign_ix + 1].type == tokenize.OP)
299 and (line[assign_ix + 1].type == tokenize.OP)
300 and (line[assign_ix + 1].string == "!")
300 and (line[assign_ix + 1].string == "!")
301 ):
301 ):
302 return cls(line[assign_ix + 1].start)
302 return cls(line[assign_ix + 1].start)
303
303
304 @classmethod
304 @classmethod
305 def find(cls, tokens_by_line):
305 def find(cls, tokens_by_line):
306 """Find the first system assignment (a = !foo) in the cell."""
306 """Find the first system assignment (a = !foo) in the cell."""
307 if sys.version_info < (3, 12):
307 if sys.version_info < (3, 12):
308 return cls.find_pre_312(tokens_by_line)
308 return cls.find_pre_312(tokens_by_line)
309 return cls.find_post_312(tokens_by_line)
309 return cls.find_post_312(tokens_by_line)
310
310
311 def transform(self, lines: List[str]):
311 def transform(self, lines: List[str]):
312 """Transform a system assignment found by the ``find()`` classmethod.
312 """Transform a system assignment found by the ``find()`` classmethod.
313 """
313 """
314 start_line, start_col = self.start_line, self.start_col
314 start_line, start_col = self.start_line, self.start_col
315
315
316 lhs = lines[start_line][:start_col]
316 lhs = lines[start_line][:start_col]
317 end_line = find_end_of_continued_line(lines, start_line)
317 end_line = find_end_of_continued_line(lines, start_line)
318 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
318 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
319 assert rhs.startswith('!'), rhs
319 assert rhs.startswith('!'), rhs
320 cmd = rhs[1:]
320 cmd = rhs[1:]
321
321
322 lines_before = lines[:start_line]
322 lines_before = lines[:start_line]
323 call = "get_ipython().getoutput({!r})".format(cmd)
323 call = "get_ipython().getoutput({!r})".format(cmd)
324 new_line = lhs + call + '\n'
324 new_line = lhs + call + '\n'
325 lines_after = lines[end_line + 1:]
325 lines_after = lines[end_line + 1:]
326
326
327 return lines_before + [new_line] + lines_after
327 return lines_before + [new_line] + lines_after
328
328
329 # The escape sequences that define the syntax transformations IPython will
329 # The escape sequences that define the syntax transformations IPython will
330 # apply to user input. These can NOT be just changed here: many regular
330 # apply to user input. These can NOT be just changed here: many regular
331 # expressions and other parts of the code may use their hardcoded values, and
331 # expressions and other parts of the code may use their hardcoded values, and
332 # for all intents and purposes they constitute the 'IPython syntax', so they
332 # for all intents and purposes they constitute the 'IPython syntax', so they
333 # should be considered fixed.
333 # should be considered fixed.
334
334
335 ESC_SHELL = '!' # Send line to underlying system shell
335 ESC_SHELL = '!' # Send line to underlying system shell
336 ESC_SH_CAP = '!!' # Send line to system shell and capture output
336 ESC_SH_CAP = '!!' # Send line to system shell and capture output
337 ESC_HELP = '?' # Find information about object
337 ESC_HELP = '?' # Find information about object
338 ESC_HELP2 = '??' # Find extra-detailed information about object
338 ESC_HELP2 = '??' # Find extra-detailed information about object
339 ESC_MAGIC = '%' # Call magic function
339 ESC_MAGIC = '%' # Call magic function
340 ESC_MAGIC2 = '%%' # Call cell-magic function
340 ESC_MAGIC2 = '%%' # Call cell-magic function
341 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
341 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
342 ESC_QUOTE2 = ';' # Quote all args as a single string, call
342 ESC_QUOTE2 = ';' # Quote all args as a single string, call
343 ESC_PAREN = '/' # Call first argument with rest of line as arguments
343 ESC_PAREN = '/' # Call first argument with rest of line as arguments
344
344
345 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
345 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
346 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
346 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
347
347
348 def _make_help_call(target, esc):
348 def _make_help_call(target, esc):
349 """Prepares a pinfo(2)/psearch call from a target name and the escape
349 """Prepares a pinfo(2)/psearch call from a target name and the escape
350 (i.e. ? or ??)"""
350 (i.e. ? or ??)"""
351 method = 'pinfo2' if esc == '??' \
351 method = 'pinfo2' if esc == '??' \
352 else 'psearch' if '*' in target \
352 else 'psearch' if '*' in target \
353 else 'pinfo'
353 else 'pinfo'
354 arg = " ".join([method, target])
354 arg = " ".join([method, target])
355 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
355 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
356 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
356 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
357 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
357 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
358 return "get_ipython().run_line_magic(%r, %r)" % (t_magic_name, t_magic_arg_s)
358 return "get_ipython().run_line_magic(%r, %r)" % (t_magic_name, t_magic_arg_s)
359
359
360
360
361 def _tr_help(content):
361 def _tr_help(content):
362 """Translate lines escaped with: ?
362 """Translate lines escaped with: ?
363
363
364 A naked help line should fire the intro help screen (shell.show_usage())
364 A naked help line should fire the intro help screen (shell.show_usage())
365 """
365 """
366 if not content:
366 if not content:
367 return 'get_ipython().show_usage()'
367 return 'get_ipython().show_usage()'
368
368
369 return _make_help_call(content, '?')
369 return _make_help_call(content, '?')
370
370
371 def _tr_help2(content):
371 def _tr_help2(content):
372 """Translate lines escaped with: ??
372 """Translate lines escaped with: ??
373
373
374 A naked help line should fire the intro help screen (shell.show_usage())
374 A naked help line should fire the intro help screen (shell.show_usage())
375 """
375 """
376 if not content:
376 if not content:
377 return 'get_ipython().show_usage()'
377 return 'get_ipython().show_usage()'
378
378
379 return _make_help_call(content, '??')
379 return _make_help_call(content, '??')
380
380
381 def _tr_magic(content):
381 def _tr_magic(content):
382 "Translate lines escaped with a percent sign: %"
382 "Translate lines escaped with a percent sign: %"
383 name, _, args = content.partition(' ')
383 name, _, args = content.partition(' ')
384 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
384 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
385
385
386 def _tr_quote(content):
386 def _tr_quote(content):
387 "Translate lines escaped with a comma: ,"
387 "Translate lines escaped with a comma: ,"
388 name, _, args = content.partition(' ')
388 name, _, args = content.partition(' ')
389 return '%s("%s")' % (name, '", "'.join(args.split()) )
389 return '%s("%s")' % (name, '", "'.join(args.split()) )
390
390
391 def _tr_quote2(content):
391 def _tr_quote2(content):
392 "Translate lines escaped with a semicolon: ;"
392 "Translate lines escaped with a semicolon: ;"
393 name, _, args = content.partition(' ')
393 name, _, args = content.partition(' ')
394 return '%s("%s")' % (name, args)
394 return '%s("%s")' % (name, args)
395
395
396 def _tr_paren(content):
396 def _tr_paren(content):
397 "Translate lines escaped with a slash: /"
397 "Translate lines escaped with a slash: /"
398 name, _, args = content.partition(' ')
398 name, _, args = content.partition(" ")
399 if name == "":
400 raise SyntaxError(f'"{ESC_SHELL}" must be followed by a callable name')
401
399 return '%s(%s)' % (name, ", ".join(args.split()))
402 return '%s(%s)' % (name, ", ".join(args.split()))
400
403
401 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
404 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
402 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
405 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
403 ESC_HELP : _tr_help,
406 ESC_HELP : _tr_help,
404 ESC_HELP2 : _tr_help2,
407 ESC_HELP2 : _tr_help2,
405 ESC_MAGIC : _tr_magic,
408 ESC_MAGIC : _tr_magic,
406 ESC_QUOTE : _tr_quote,
409 ESC_QUOTE : _tr_quote,
407 ESC_QUOTE2 : _tr_quote2,
410 ESC_QUOTE2 : _tr_quote2,
408 ESC_PAREN : _tr_paren }
411 ESC_PAREN : _tr_paren }
409
412
410 class EscapedCommand(TokenTransformBase):
413 class EscapedCommand(TokenTransformBase):
411 """Transformer for escaped commands like %foo, !foo, or /foo"""
414 """Transformer for escaped commands like %foo, !foo, or /foo"""
412 @classmethod
415 @classmethod
413 def find(cls, tokens_by_line):
416 def find(cls, tokens_by_line):
414 """Find the first escaped command (%foo, !foo, etc.) in the cell.
417 """Find the first escaped command (%foo, !foo, etc.) in the cell.
415 """
418 """
416 for line in tokens_by_line:
419 for line in tokens_by_line:
417 if not line:
420 if not line:
418 continue
421 continue
419 ix = 0
422 ix = 0
420 ll = len(line)
423 ll = len(line)
421 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
424 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
422 ix += 1
425 ix += 1
423 if ix >= ll:
426 if ix >= ll:
424 continue
427 continue
425 if line[ix].string in ESCAPE_SINGLES:
428 if line[ix].string in ESCAPE_SINGLES:
426 return cls(line[ix].start)
429 return cls(line[ix].start)
427
430
428 def transform(self, lines):
431 def transform(self, lines):
429 """Transform an escaped line found by the ``find()`` classmethod.
432 """Transform an escaped line found by the ``find()`` classmethod.
430 """
433 """
431 start_line, start_col = self.start_line, self.start_col
434 start_line, start_col = self.start_line, self.start_col
432
435
433 indent = lines[start_line][:start_col]
436 indent = lines[start_line][:start_col]
434 end_line = find_end_of_continued_line(lines, start_line)
437 end_line = find_end_of_continued_line(lines, start_line)
435 line = assemble_continued_line(lines, (start_line, start_col), end_line)
438 line = assemble_continued_line(lines, (start_line, start_col), end_line)
436
439
437 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
440 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
438 escape, content = line[:2], line[2:]
441 escape, content = line[:2], line[2:]
439 else:
442 else:
440 escape, content = line[:1], line[1:]
443 escape, content = line[:1], line[1:]
441
444
442 if escape in tr:
445 if escape in tr:
443 call = tr[escape](content)
446 call = tr[escape](content)
444 else:
447 else:
445 call = ''
448 call = ''
446
449
447 lines_before = lines[:start_line]
450 lines_before = lines[:start_line]
448 new_line = indent + call + '\n'
451 new_line = indent + call + '\n'
449 lines_after = lines[end_line + 1:]
452 lines_after = lines[end_line + 1:]
450
453
451 return lines_before + [new_line] + lines_after
454 return lines_before + [new_line] + lines_after
452
455
453
456
454 _help_end_re = re.compile(
457 _help_end_re = re.compile(
455 r"""(%{0,2}
458 r"""(%{0,2}
456 (?!\d)[\w*]+ # Variable name
459 (?!\d)[\w*]+ # Variable name
457 (\.(?!\d)[\w*]+|\[-?[0-9]+\])* # .etc.etc or [0], we only support literal integers.
460 (\.(?!\d)[\w*]+|\[-?[0-9]+\])* # .etc.etc or [0], we only support literal integers.
458 )
461 )
459 (\?\??)$ # ? or ??
462 (\?\??)$ # ? or ??
460 """,
463 """,
461 re.VERBOSE,
464 re.VERBOSE,
462 )
465 )
463
466
464
467
465 class HelpEnd(TokenTransformBase):
468 class HelpEnd(TokenTransformBase):
466 """Transformer for help syntax: obj? and obj??"""
469 """Transformer for help syntax: obj? and obj??"""
467 # This needs to be higher priority (lower number) than EscapedCommand so
470 # This needs to be higher priority (lower number) than EscapedCommand so
468 # that inspecting magics (%foo?) works.
471 # that inspecting magics (%foo?) works.
469 priority = 5
472 priority = 5
470
473
471 def __init__(self, start, q_locn):
474 def __init__(self, start, q_locn):
472 super().__init__(start)
475 super().__init__(start)
473 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
476 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
474 self.q_col = q_locn[1]
477 self.q_col = q_locn[1]
475
478
476 @classmethod
479 @classmethod
477 def find(cls, tokens_by_line):
480 def find(cls, tokens_by_line):
478 """Find the first help command (foo?) in the cell.
481 """Find the first help command (foo?) in the cell.
479 """
482 """
480 for line in tokens_by_line:
483 for line in tokens_by_line:
481 # Last token is NEWLINE; look at last but one
484 # Last token is NEWLINE; look at last but one
482 if len(line) > 2 and line[-2].string == '?':
485 if len(line) > 2 and line[-2].string == '?':
483 # Find the first token that's not INDENT/DEDENT
486 # Find the first token that's not INDENT/DEDENT
484 ix = 0
487 ix = 0
485 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
488 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
486 ix += 1
489 ix += 1
487 return cls(line[ix].start, line[-2].start)
490 return cls(line[ix].start, line[-2].start)
488
491
489 def transform(self, lines):
492 def transform(self, lines):
490 """Transform a help command found by the ``find()`` classmethod.
493 """Transform a help command found by the ``find()`` classmethod.
491 """
494 """
492
495
493 piece = "".join(lines[self.start_line : self.q_line + 1])
496 piece = "".join(lines[self.start_line : self.q_line + 1])
494 indent, content = piece[: self.start_col], piece[self.start_col :]
497 indent, content = piece[: self.start_col], piece[self.start_col :]
495 lines_before = lines[: self.start_line]
498 lines_before = lines[: self.start_line]
496 lines_after = lines[self.q_line + 1 :]
499 lines_after = lines[self.q_line + 1 :]
497
500
498 m = _help_end_re.search(content)
501 m = _help_end_re.search(content)
499 if not m:
502 if not m:
500 raise SyntaxError(content)
503 raise SyntaxError(content)
501 assert m is not None, content
504 assert m is not None, content
502 target = m.group(1)
505 target = m.group(1)
503 esc = m.group(3)
506 esc = m.group(3)
504
507
505
508
506 call = _make_help_call(target, esc)
509 call = _make_help_call(target, esc)
507 new_line = indent + call + '\n'
510 new_line = indent + call + '\n'
508
511
509 return lines_before + [new_line] + lines_after
512 return lines_before + [new_line] + lines_after
510
513
511 def make_tokens_by_line(lines:List[str]):
514 def make_tokens_by_line(lines:List[str]):
512 """Tokenize a series of lines and group tokens by line.
515 """Tokenize a series of lines and group tokens by line.
513
516
514 The tokens for a multiline Python string or expression are grouped as one
517 The tokens for a multiline Python string or expression are grouped as one
515 line. All lines except the last lines should keep their line ending ('\\n',
518 line. All lines except the last lines should keep their line ending ('\\n',
516 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
519 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
517 for example when passing block of text to this function.
520 for example when passing block of text to this function.
518
521
519 """
522 """
520 # NL tokens are used inside multiline expressions, but also after blank
523 # NL tokens are used inside multiline expressions, but also after blank
521 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
524 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
522 # We want to group the former case together but split the latter, so we
525 # We want to group the former case together but split the latter, so we
523 # track parentheses level, similar to the internals of tokenize.
526 # track parentheses level, similar to the internals of tokenize.
524
527
525 # reexported from token on 3.7+
528 # reexported from token on 3.7+
526 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
529 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
527 tokens_by_line: List[List[Any]] = [[]]
530 tokens_by_line: List[List[Any]] = [[]]
528 if len(lines) > 1 and not lines[0].endswith(("\n", "\r", "\r\n", "\x0b", "\x0c")):
531 if len(lines) > 1 and not lines[0].endswith(("\n", "\r", "\r\n", "\x0b", "\x0c")):
529 warnings.warn(
532 warnings.warn(
530 "`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified",
533 "`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified",
531 stacklevel=2,
534 stacklevel=2,
532 )
535 )
533 parenlev = 0
536 parenlev = 0
534 try:
537 try:
535 for token in tokenutil.generate_tokens_catch_errors(
538 for token in tokenutil.generate_tokens_catch_errors(
536 iter(lines).__next__, extra_errors_to_catch=["expected EOF"]
539 iter(lines).__next__, extra_errors_to_catch=["expected EOF"]
537 ):
540 ):
538 tokens_by_line[-1].append(token)
541 tokens_by_line[-1].append(token)
539 if (token.type == NEWLINE) \
542 if (token.type == NEWLINE) \
540 or ((token.type == NL) and (parenlev <= 0)):
543 or ((token.type == NL) and (parenlev <= 0)):
541 tokens_by_line.append([])
544 tokens_by_line.append([])
542 elif token.string in {'(', '[', '{'}:
545 elif token.string in {'(', '[', '{'}:
543 parenlev += 1
546 parenlev += 1
544 elif token.string in {')', ']', '}'}:
547 elif token.string in {')', ']', '}'}:
545 if parenlev > 0:
548 if parenlev > 0:
546 parenlev -= 1
549 parenlev -= 1
547 except tokenize.TokenError:
550 except tokenize.TokenError:
548 # Input ended in a multiline string or expression. That's OK for us.
551 # Input ended in a multiline string or expression. That's OK for us.
549 pass
552 pass
550
553
551
554
552 if not tokens_by_line[-1]:
555 if not tokens_by_line[-1]:
553 tokens_by_line.pop()
556 tokens_by_line.pop()
554
557
555
558
556 return tokens_by_line
559 return tokens_by_line
557
560
558
561
559 def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
562 def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
560 """Check if the depth of brackets in the list of tokens drops below 0"""
563 """Check if the depth of brackets in the list of tokens drops below 0"""
561 parenlev = 0
564 parenlev = 0
562 for token in tokens:
565 for token in tokens:
563 if token.string in {"(", "[", "{"}:
566 if token.string in {"(", "[", "{"}:
564 parenlev += 1
567 parenlev += 1
565 elif token.string in {")", "]", "}"}:
568 elif token.string in {")", "]", "}"}:
566 parenlev -= 1
569 parenlev -= 1
567 if parenlev < 0:
570 if parenlev < 0:
568 return True
571 return True
569 return False
572 return False
570
573
571
574
572 def show_linewise_tokens(s: str):
575 def show_linewise_tokens(s: str):
573 """For investigation and debugging"""
576 """For investigation and debugging"""
574 warnings.warn(
577 warnings.warn(
575 "show_linewise_tokens is deprecated since IPython 8.6",
578 "show_linewise_tokens is deprecated since IPython 8.6",
576 DeprecationWarning,
579 DeprecationWarning,
577 stacklevel=2,
580 stacklevel=2,
578 )
581 )
579 if not s.endswith("\n"):
582 if not s.endswith("\n"):
580 s += "\n"
583 s += "\n"
581 lines = s.splitlines(keepends=True)
584 lines = s.splitlines(keepends=True)
582 for line in make_tokens_by_line(lines):
585 for line in make_tokens_by_line(lines):
583 print("Line -------")
586 print("Line -------")
584 for tokinfo in line:
587 for tokinfo in line:
585 print(" ", tokinfo)
588 print(" ", tokinfo)
586
589
587 # Arbitrary limit to prevent getting stuck in infinite loops
590 # Arbitrary limit to prevent getting stuck in infinite loops
588 TRANSFORM_LOOP_LIMIT = 500
591 TRANSFORM_LOOP_LIMIT = 500
589
592
590 class TransformerManager:
593 class TransformerManager:
591 """Applies various transformations to a cell or code block.
594 """Applies various transformations to a cell or code block.
592
595
593 The key methods for external use are ``transform_cell()``
596 The key methods for external use are ``transform_cell()``
594 and ``check_complete()``.
597 and ``check_complete()``.
595 """
598 """
596 def __init__(self):
599 def __init__(self):
597 self.cleanup_transforms = [
600 self.cleanup_transforms = [
598 leading_empty_lines,
601 leading_empty_lines,
599 leading_indent,
602 leading_indent,
600 classic_prompt,
603 classic_prompt,
601 ipython_prompt,
604 ipython_prompt,
602 ]
605 ]
603 self.line_transforms = [
606 self.line_transforms = [
604 cell_magic,
607 cell_magic,
605 ]
608 ]
606 self.token_transformers = [
609 self.token_transformers = [
607 MagicAssign,
610 MagicAssign,
608 SystemAssign,
611 SystemAssign,
609 EscapedCommand,
612 EscapedCommand,
610 HelpEnd,
613 HelpEnd,
611 ]
614 ]
612
615
613 def do_one_token_transform(self, lines):
616 def do_one_token_transform(self, lines):
614 """Find and run the transform earliest in the code.
617 """Find and run the transform earliest in the code.
615
618
616 Returns (changed, lines).
619 Returns (changed, lines).
617
620
618 This method is called repeatedly until changed is False, indicating
621 This method is called repeatedly until changed is False, indicating
619 that all available transformations are complete.
622 that all available transformations are complete.
620
623
621 The tokens following IPython special syntax might not be valid, so
624 The tokens following IPython special syntax might not be valid, so
622 the transformed code is retokenised every time to identify the next
625 the transformed code is retokenised every time to identify the next
623 piece of special syntax. Hopefully long code cells are mostly valid
626 piece of special syntax. Hopefully long code cells are mostly valid
624 Python, not using lots of IPython special syntax, so this shouldn't be
627 Python, not using lots of IPython special syntax, so this shouldn't be
625 a performance issue.
628 a performance issue.
626 """
629 """
627 tokens_by_line = make_tokens_by_line(lines)
630 tokens_by_line = make_tokens_by_line(lines)
628 candidates = []
631 candidates = []
629 for transformer_cls in self.token_transformers:
632 for transformer_cls in self.token_transformers:
630 transformer = transformer_cls.find(tokens_by_line)
633 transformer = transformer_cls.find(tokens_by_line)
631 if transformer:
634 if transformer:
632 candidates.append(transformer)
635 candidates.append(transformer)
633
636
634 if not candidates:
637 if not candidates:
635 # Nothing to transform
638 # Nothing to transform
636 return False, lines
639 return False, lines
637 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
640 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
638 for transformer in ordered_transformers:
641 for transformer in ordered_transformers:
639 try:
642 try:
640 return True, transformer.transform(lines)
643 return True, transformer.transform(lines)
641 except SyntaxError:
644 except SyntaxError:
642 pass
645 pass
643 return False, lines
646 return False, lines
644
647
645 def do_token_transforms(self, lines):
648 def do_token_transforms(self, lines):
646 for _ in range(TRANSFORM_LOOP_LIMIT):
649 for _ in range(TRANSFORM_LOOP_LIMIT):
647 changed, lines = self.do_one_token_transform(lines)
650 changed, lines = self.do_one_token_transform(lines)
648 if not changed:
651 if not changed:
649 return lines
652 return lines
650
653
651 raise RuntimeError("Input transformation still changing after "
654 raise RuntimeError("Input transformation still changing after "
652 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
655 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
653
656
654 def transform_cell(self, cell: str) -> str:
657 def transform_cell(self, cell: str) -> str:
655 """Transforms a cell of input code"""
658 """Transforms a cell of input code"""
656 if not cell.endswith('\n'):
659 if not cell.endswith('\n'):
657 cell += '\n' # Ensure the cell has a trailing newline
660 cell += '\n' # Ensure the cell has a trailing newline
658 lines = cell.splitlines(keepends=True)
661 lines = cell.splitlines(keepends=True)
659 for transform in self.cleanup_transforms + self.line_transforms:
662 for transform in self.cleanup_transforms + self.line_transforms:
660 lines = transform(lines)
663 lines = transform(lines)
661
664
662 lines = self.do_token_transforms(lines)
665 lines = self.do_token_transforms(lines)
663 return ''.join(lines)
666 return ''.join(lines)
664
667
665 def check_complete(self, cell: str):
668 def check_complete(self, cell: str):
666 """Return whether a block of code is ready to execute, or should be continued
669 """Return whether a block of code is ready to execute, or should be continued
667
670
668 Parameters
671 Parameters
669 ----------
672 ----------
670 cell : string
673 cell : string
671 Python input code, which can be multiline.
674 Python input code, which can be multiline.
672
675
673 Returns
676 Returns
674 -------
677 -------
675 status : str
678 status : str
676 One of 'complete', 'incomplete', or 'invalid' if source is not a
679 One of 'complete', 'incomplete', or 'invalid' if source is not a
677 prefix of valid code.
680 prefix of valid code.
678 indent_spaces : int or None
681 indent_spaces : int or None
679 The number of spaces by which to indent the next line of code. If
682 The number of spaces by which to indent the next line of code. If
680 status is not 'incomplete', this is None.
683 status is not 'incomplete', this is None.
681 """
684 """
682 # Remember if the lines ends in a new line.
685 # Remember if the lines ends in a new line.
683 ends_with_newline = False
686 ends_with_newline = False
684 for character in reversed(cell):
687 for character in reversed(cell):
685 if character == '\n':
688 if character == '\n':
686 ends_with_newline = True
689 ends_with_newline = True
687 break
690 break
688 elif character.strip():
691 elif character.strip():
689 break
692 break
690 else:
693 else:
691 continue
694 continue
692
695
693 if not ends_with_newline:
696 if not ends_with_newline:
694 # Append an newline for consistent tokenization
697 # Append an newline for consistent tokenization
695 # See https://bugs.python.org/issue33899
698 # See https://bugs.python.org/issue33899
696 cell += '\n'
699 cell += '\n'
697
700
698 lines = cell.splitlines(keepends=True)
701 lines = cell.splitlines(keepends=True)
699
702
700 if not lines:
703 if not lines:
701 return 'complete', None
704 return 'complete', None
702
705
703 for line in reversed(lines):
706 for line in reversed(lines):
704 if not line.strip():
707 if not line.strip():
705 continue
708 continue
706 elif line.strip("\n").endswith("\\"):
709 elif line.strip("\n").endswith("\\"):
707 return "incomplete", find_last_indent(lines)
710 return "incomplete", find_last_indent(lines)
708 else:
711 else:
709 break
712 break
710
713
711 try:
714 try:
712 for transform in self.cleanup_transforms:
715 for transform in self.cleanup_transforms:
713 if not getattr(transform, 'has_side_effects', False):
716 if not getattr(transform, 'has_side_effects', False):
714 lines = transform(lines)
717 lines = transform(lines)
715 except SyntaxError:
718 except SyntaxError:
716 return 'invalid', None
719 return 'invalid', None
717
720
718 if lines[0].startswith('%%'):
721 if lines[0].startswith('%%'):
719 # Special case for cell magics - completion marked by blank line
722 # Special case for cell magics - completion marked by blank line
720 if lines[-1].strip():
723 if lines[-1].strip():
721 return 'incomplete', find_last_indent(lines)
724 return 'incomplete', find_last_indent(lines)
722 else:
725 else:
723 return 'complete', None
726 return 'complete', None
724
727
725 try:
728 try:
726 for transform in self.line_transforms:
729 for transform in self.line_transforms:
727 if not getattr(transform, 'has_side_effects', False):
730 if not getattr(transform, 'has_side_effects', False):
728 lines = transform(lines)
731 lines = transform(lines)
729 lines = self.do_token_transforms(lines)
732 lines = self.do_token_transforms(lines)
730 except SyntaxError:
733 except SyntaxError:
731 return 'invalid', None
734 return 'invalid', None
732
735
733 tokens_by_line = make_tokens_by_line(lines)
736 tokens_by_line = make_tokens_by_line(lines)
734
737
735 # Bail if we got one line and there are more closing parentheses than
738 # Bail if we got one line and there are more closing parentheses than
736 # the opening ones
739 # the opening ones
737 if (
740 if (
738 len(lines) == 1
741 len(lines) == 1
739 and tokens_by_line
742 and tokens_by_line
740 and has_sunken_brackets(tokens_by_line[0])
743 and has_sunken_brackets(tokens_by_line[0])
741 ):
744 ):
742 return "invalid", None
745 return "invalid", None
743
746
744 if not tokens_by_line:
747 if not tokens_by_line:
745 return 'incomplete', find_last_indent(lines)
748 return 'incomplete', find_last_indent(lines)
746
749
747 if (
750 if (
748 tokens_by_line[-1][-1].type != tokenize.ENDMARKER
751 tokens_by_line[-1][-1].type != tokenize.ENDMARKER
749 and tokens_by_line[-1][-1].type != tokenize.ERRORTOKEN
752 and tokens_by_line[-1][-1].type != tokenize.ERRORTOKEN
750 ):
753 ):
751 # We're in a multiline string or expression
754 # We're in a multiline string or expression
752 return 'incomplete', find_last_indent(lines)
755 return 'incomplete', find_last_indent(lines)
753
756
754 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
757 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
755
758
756 # Pop the last line which only contains DEDENTs and ENDMARKER
759 # Pop the last line which only contains DEDENTs and ENDMARKER
757 last_token_line = None
760 last_token_line = None
758 if {t.type for t in tokens_by_line[-1]} in [
761 if {t.type for t in tokens_by_line[-1]} in [
759 {tokenize.DEDENT, tokenize.ENDMARKER},
762 {tokenize.DEDENT, tokenize.ENDMARKER},
760 {tokenize.ENDMARKER}
763 {tokenize.ENDMARKER}
761 ] and len(tokens_by_line) > 1:
764 ] and len(tokens_by_line) > 1:
762 last_token_line = tokens_by_line.pop()
765 last_token_line = tokens_by_line.pop()
763
766
764 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
767 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
765 tokens_by_line[-1].pop()
768 tokens_by_line[-1].pop()
766
769
767 if not tokens_by_line[-1]:
770 if not tokens_by_line[-1]:
768 return 'incomplete', find_last_indent(lines)
771 return 'incomplete', find_last_indent(lines)
769
772
770 if tokens_by_line[-1][-1].string == ':':
773 if tokens_by_line[-1][-1].string == ':':
771 # The last line starts a block (e.g. 'if foo:')
774 # The last line starts a block (e.g. 'if foo:')
772 ix = 0
775 ix = 0
773 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
776 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
774 ix += 1
777 ix += 1
775
778
776 indent = tokens_by_line[-1][ix].start[1]
779 indent = tokens_by_line[-1][ix].start[1]
777 return 'incomplete', indent + 4
780 return 'incomplete', indent + 4
778
781
779 if tokens_by_line[-1][0].line.endswith('\\'):
782 if tokens_by_line[-1][0].line.endswith('\\'):
780 return 'incomplete', None
783 return 'incomplete', None
781
784
782 # At this point, our checks think the code is complete (or invalid).
785 # At this point, our checks think the code is complete (or invalid).
783 # We'll use codeop.compile_command to check this with the real parser
786 # We'll use codeop.compile_command to check this with the real parser
784 try:
787 try:
785 with warnings.catch_warnings():
788 with warnings.catch_warnings():
786 warnings.simplefilter('error', SyntaxWarning)
789 warnings.simplefilter('error', SyntaxWarning)
787 res = compile_command(''.join(lines), symbol='exec')
790 res = compile_command(''.join(lines), symbol='exec')
788 except (SyntaxError, OverflowError, ValueError, TypeError,
791 except (SyntaxError, OverflowError, ValueError, TypeError,
789 MemoryError, SyntaxWarning):
792 MemoryError, SyntaxWarning):
790 return 'invalid', None
793 return 'invalid', None
791 else:
794 else:
792 if res is None:
795 if res is None:
793 return 'incomplete', find_last_indent(lines)
796 return 'incomplete', find_last_indent(lines)
794
797
795 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
798 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
796 if ends_with_newline:
799 if ends_with_newline:
797 return 'complete', None
800 return 'complete', None
798 return 'incomplete', find_last_indent(lines)
801 return 'incomplete', find_last_indent(lines)
799
802
800 # If there's a blank line at the end, assume we're ready to execute
803 # If there's a blank line at the end, assume we're ready to execute
801 if not lines[-1].strip():
804 if not lines[-1].strip():
802 return 'complete', None
805 return 'complete', None
803
806
804 return 'complete', None
807 return 'complete', None
805
808
806
809
807 def find_last_indent(lines):
810 def find_last_indent(lines):
808 m = _indent_re.match(lines[-1])
811 m = _indent_re.match(lines[-1])
809 if not m:
812 if not m:
810 return 0
813 return 0
811 return len(m.group(0).replace('\t', ' '*4))
814 return len(m.group(0).replace('\t', ' '*4))
812
815
813
816
814 class MaybeAsyncCompile(Compile):
817 class MaybeAsyncCompile(Compile):
815 def __init__(self, extra_flags=0):
818 def __init__(self, extra_flags=0):
816 super().__init__()
819 super().__init__()
817 self.flags |= extra_flags
820 self.flags |= extra_flags
818
821
819
822
820 class MaybeAsyncCommandCompiler(CommandCompiler):
823 class MaybeAsyncCommandCompiler(CommandCompiler):
821 def __init__(self, extra_flags=0):
824 def __init__(self, extra_flags=0):
822 self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
825 self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
823
826
824
827
825 _extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
828 _extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
826
829
827 compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)
830 compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)
General Comments 0
You need to be logged in to leave comments. Login now