##// END OF EJS Templates
Some more optional typing to make mypy happy
Matthias Bussonnier -
Show More
@@ -1,721 +1,724 b''
1 """Input transformer machinery to support IPython special syntax.
1 """Input transformer machinery to support IPython special syntax.
2
2
3 This includes the machinery to recognise and transform ``%magic`` commands,
3 This includes the machinery to recognise and transform ``%magic`` commands,
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
5
5
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7 deprecated in 7.0.
7 deprecated in 7.0.
8 """
8 """
9
9
10 # Copyright (c) IPython Development Team.
10 # Copyright (c) IPython Development Team.
11 # Distributed under the terms of the Modified BSD License.
11 # Distributed under the terms of the Modified BSD License.
12
12
13 from codeop import compile_command
13 from codeop import compile_command
14 import re
14 import re
15 import tokenize
15 import tokenize
16 from typing import List, Tuple, Union
16 from typing import List, Tuple, Optional, Any
17 import warnings
17 import warnings
18
18
19 _indent_re = re.compile(r'^[ \t]+')
19 _indent_re = re.compile(r'^[ \t]+')
20
20
21 def leading_empty_lines(lines):
21 def leading_empty_lines(lines):
22 """Remove leading empty lines
22 """Remove leading empty lines
23
23
24 If the leading lines are empty or contain only whitespace, they will be
24 If the leading lines are empty or contain only whitespace, they will be
25 removed.
25 removed.
26 """
26 """
27 if not lines:
27 if not lines:
28 return lines
28 return lines
29 for i, line in enumerate(lines):
29 for i, line in enumerate(lines):
30 if line and not line.isspace():
30 if line and not line.isspace():
31 return lines[i:]
31 return lines[i:]
32 return lines
32 return lines
33
33
34 def leading_indent(lines):
34 def leading_indent(lines):
35 """Remove leading indentation.
35 """Remove leading indentation.
36
36
37 If the first line starts with a spaces or tabs, the same whitespace will be
37 If the first line starts with a spaces or tabs, the same whitespace will be
38 removed from each following line in the cell.
38 removed from each following line in the cell.
39 """
39 """
40 if not lines:
40 if not lines:
41 return lines
41 return lines
42 m = _indent_re.match(lines[0])
42 m = _indent_re.match(lines[0])
43 if not m:
43 if not m:
44 return lines
44 return lines
45 space = m.group(0)
45 space = m.group(0)
46 n = len(space)
46 n = len(space)
47 return [l[n:] if l.startswith(space) else l
47 return [l[n:] if l.startswith(space) else l
48 for l in lines]
48 for l in lines]
49
49
50 class PromptStripper:
50 class PromptStripper:
51 """Remove matching input prompts from a block of input.
51 """Remove matching input prompts from a block of input.
52
52
53 Parameters
53 Parameters
54 ----------
54 ----------
55 prompt_re : regular expression
55 prompt_re : regular expression
56 A regular expression matching any input prompt (including continuation,
56 A regular expression matching any input prompt (including continuation,
57 e.g. ``...``)
57 e.g. ``...``)
58 initial_re : regular expression, optional
58 initial_re : regular expression, optional
59 A regular expression matching only the initial prompt, but not continuation.
59 A regular expression matching only the initial prompt, but not continuation.
60 If no initial expression is given, prompt_re will be used everywhere.
60 If no initial expression is given, prompt_re will be used everywhere.
61 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
61 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
62 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
62 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
63
63
64 If initial_re and prompt_re differ,
64 If initial_re and prompt_re differ,
65 only initial_re will be tested against the first line.
65 only initial_re will be tested against the first line.
66 If any prompt is found on the first two lines,
66 If any prompt is found on the first two lines,
67 prompts will be stripped from the rest of the block.
67 prompts will be stripped from the rest of the block.
68 """
68 """
69 def __init__(self, prompt_re, initial_re=None):
69 def __init__(self, prompt_re, initial_re=None):
70 self.prompt_re = prompt_re
70 self.prompt_re = prompt_re
71 self.initial_re = initial_re or prompt_re
71 self.initial_re = initial_re or prompt_re
72
72
73 def _strip(self, lines):
73 def _strip(self, lines):
74 return [self.prompt_re.sub('', l, count=1) for l in lines]
74 return [self.prompt_re.sub('', l, count=1) for l in lines]
75
75
76 def __call__(self, lines):
76 def __call__(self, lines):
77 if not lines:
77 if not lines:
78 return lines
78 return lines
79 if self.initial_re.match(lines[0]) or \
79 if self.initial_re.match(lines[0]) or \
80 (len(lines) > 1 and self.prompt_re.match(lines[1])):
80 (len(lines) > 1 and self.prompt_re.match(lines[1])):
81 return self._strip(lines)
81 return self._strip(lines)
82 return lines
82 return lines
83
83
84 classic_prompt = PromptStripper(
84 classic_prompt = PromptStripper(
85 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
85 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
86 initial_re=re.compile(r'^>>>( |$)')
86 initial_re=re.compile(r'^>>>( |$)')
87 )
87 )
88
88
89 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
89 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
90
90
91 def cell_magic(lines):
91 def cell_magic(lines):
92 if not lines or not lines[0].startswith('%%'):
92 if not lines or not lines[0].startswith('%%'):
93 return lines
93 return lines
94 if re.match(r'%%\w+\?', lines[0]):
94 if re.match(r'%%\w+\?', lines[0]):
95 # This case will be handled by help_end
95 # This case will be handled by help_end
96 return lines
96 return lines
97 magic_name, _, first_line = lines[0][2:-1].partition(' ')
97 magic_name, _, first_line = lines[0][2:-1].partition(' ')
98 body = ''.join(lines[1:])
98 body = ''.join(lines[1:])
99 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
99 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
100 % (magic_name, first_line, body)]
100 % (magic_name, first_line, body)]
101
101
102
102
103 def _find_assign_op(token_line) -> Union[int, None]:
103 def _find_assign_op(token_line) -> Optional[int]:
104 """Get the index of the first assignment in the line ('=' not inside brackets)
104 """Get the index of the first assignment in the line ('=' not inside brackets)
105
105
106 Note: We don't try to support multiple special assignment (a = b = %foo)
106 Note: We don't try to support multiple special assignment (a = b = %foo)
107 """
107 """
108 paren_level = 0
108 paren_level = 0
109 for i, ti in enumerate(token_line):
109 for i, ti in enumerate(token_line):
110 s = ti.string
110 s = ti.string
111 if s == '=' and paren_level == 0:
111 if s == '=' and paren_level == 0:
112 return i
112 return i
113 if s in {'(','[','{'}:
113 if s in {'(','[','{'}:
114 paren_level += 1
114 paren_level += 1
115 elif s in {')', ']', '}'}:
115 elif s in {')', ']', '}'}:
116 if paren_level > 0:
116 if paren_level > 0:
117 paren_level -= 1
117 paren_level -= 1
118 return None
118
119
119 def find_end_of_continued_line(lines, start_line: int):
120 def find_end_of_continued_line(lines, start_line: int):
120 """Find the last line of a line explicitly extended using backslashes.
121 """Find the last line of a line explicitly extended using backslashes.
121
122
122 Uses 0-indexed line numbers.
123 Uses 0-indexed line numbers.
123 """
124 """
124 end_line = start_line
125 end_line = start_line
125 while lines[end_line].endswith('\\\n'):
126 while lines[end_line].endswith('\\\n'):
126 end_line += 1
127 end_line += 1
127 if end_line >= len(lines):
128 if end_line >= len(lines):
128 break
129 break
129 return end_line
130 return end_line
130
131
131 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
132 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
132 r"""Assemble a single line from multiple continued line pieces
133 r"""Assemble a single line from multiple continued line pieces
133
134
134 Continued lines are lines ending in ``\``, and the line following the last
135 Continued lines are lines ending in ``\``, and the line following the last
135 ``\`` in the block.
136 ``\`` in the block.
136
137
137 For example, this code continues over multiple lines::
138 For example, this code continues over multiple lines::
138
139
139 if (assign_ix is not None) \
140 if (assign_ix is not None) \
140 and (len(line) >= assign_ix + 2) \
141 and (len(line) >= assign_ix + 2) \
141 and (line[assign_ix+1].string == '%') \
142 and (line[assign_ix+1].string == '%') \
142 and (line[assign_ix+2].type == tokenize.NAME):
143 and (line[assign_ix+2].type == tokenize.NAME):
143
144
144 This statement contains four continued line pieces.
145 This statement contains four continued line pieces.
145 Assembling these pieces into a single line would give::
146 Assembling these pieces into a single line would give::
146
147
147 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
148 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
148
149
149 This uses 0-indexed line numbers. *start* is (lineno, colno).
150 This uses 0-indexed line numbers. *start* is (lineno, colno).
150
151
151 Used to allow ``%magic`` and ``!system`` commands to be continued over
152 Used to allow ``%magic`` and ``!system`` commands to be continued over
152 multiple lines.
153 multiple lines.
153 """
154 """
154 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
155 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
155 return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
156 return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
156 + [parts[-1][:-1]]) # Strip newline from last line
157 + [parts[-1][:-1]]) # Strip newline from last line
157
158
158 class TokenTransformBase:
159 class TokenTransformBase:
159 """Base class for transformations which examine tokens.
160 """Base class for transformations which examine tokens.
160
161
161 Special syntax should not be transformed when it occurs inside strings or
162 Special syntax should not be transformed when it occurs inside strings or
162 comments. This is hard to reliably avoid with regexes. The solution is to
163 comments. This is hard to reliably avoid with regexes. The solution is to
163 tokenise the code as Python, and recognise the special syntax in the tokens.
164 tokenise the code as Python, and recognise the special syntax in the tokens.
164
165
165 IPython's special syntax is not valid Python syntax, so tokenising may go
166 IPython's special syntax is not valid Python syntax, so tokenising may go
166 wrong after the special syntax starts. These classes therefore find and
167 wrong after the special syntax starts. These classes therefore find and
167 transform *one* instance of special syntax at a time into regular Python
168 transform *one* instance of special syntax at a time into regular Python
168 syntax. After each transformation, tokens are regenerated to find the next
169 syntax. After each transformation, tokens are regenerated to find the next
169 piece of special syntax.
170 piece of special syntax.
170
171
171 Subclasses need to implement one class method (find)
172 Subclasses need to implement one class method (find)
172 and one regular method (transform).
173 and one regular method (transform).
173
174
174 The priority attribute can select which transformation to apply if multiple
175 The priority attribute can select which transformation to apply if multiple
175 transformers match in the same place. Lower numbers have higher priority.
176 transformers match in the same place. Lower numbers have higher priority.
176 This allows "%magic?" to be turned into a help call rather than a magic call.
177 This allows "%magic?" to be turned into a help call rather than a magic call.
177 """
178 """
178 # Lower numbers -> higher priority (for matches in the same location)
179 # Lower numbers -> higher priority (for matches in the same location)
179 priority = 10
180 priority = 10
180
181
181 def sortby(self):
182 def sortby(self):
182 return self.start_line, self.start_col, self.priority
183 return self.start_line, self.start_col, self.priority
183
184
184 def __init__(self, start):
185 def __init__(self, start):
185 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
186 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
186 self.start_col = start[1]
187 self.start_col = start[1]
187
188
188 @classmethod
189 @classmethod
189 def find(cls, tokens_by_line):
190 def find(cls, tokens_by_line):
190 """Find one instance of special syntax in the provided tokens.
191 """Find one instance of special syntax in the provided tokens.
191
192
192 Tokens are grouped into logical lines for convenience,
193 Tokens are grouped into logical lines for convenience,
193 so it is easy to e.g. look at the first token of each line.
194 so it is easy to e.g. look at the first token of each line.
194 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
195 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
195
196
196 This should return an instance of its class, pointing to the start
197 This should return an instance of its class, pointing to the start
197 position it has found, or None if it found no match.
198 position it has found, or None if it found no match.
198 """
199 """
199 raise NotImplementedError
200 raise NotImplementedError
200
201
201 def transform(self, lines: List[str]):
202 def transform(self, lines: List[str]):
202 """Transform one instance of special syntax found by ``find()``
203 """Transform one instance of special syntax found by ``find()``
203
204
204 Takes a list of strings representing physical lines,
205 Takes a list of strings representing physical lines,
205 returns a similar list of transformed lines.
206 returns a similar list of transformed lines.
206 """
207 """
207 raise NotImplementedError
208 raise NotImplementedError
208
209
209 class MagicAssign(TokenTransformBase):
210 class MagicAssign(TokenTransformBase):
210 """Transformer for assignments from magics (a = %foo)"""
211 """Transformer for assignments from magics (a = %foo)"""
211 @classmethod
212 @classmethod
212 def find(cls, tokens_by_line):
213 def find(cls, tokens_by_line):
213 """Find the first magic assignment (a = %foo) in the cell.
214 """Find the first magic assignment (a = %foo) in the cell.
214 """
215 """
215 for line in tokens_by_line:
216 for line in tokens_by_line:
216 assign_ix = _find_assign_op(line)
217 assign_ix = _find_assign_op(line)
217 if (assign_ix is not None) \
218 if (assign_ix is not None) \
218 and (len(line) >= assign_ix + 2) \
219 and (len(line) >= assign_ix + 2) \
219 and (line[assign_ix+1].string == '%') \
220 and (line[assign_ix+1].string == '%') \
220 and (line[assign_ix+2].type == tokenize.NAME):
221 and (line[assign_ix+2].type == tokenize.NAME):
221 return cls(line[assign_ix+1].start)
222 return cls(line[assign_ix+1].start)
222
223
223 def transform(self, lines: List[str]):
224 def transform(self, lines: List[str]):
224 """Transform a magic assignment found by the ``find()`` classmethod.
225 """Transform a magic assignment found by the ``find()`` classmethod.
225 """
226 """
226 start_line, start_col = self.start_line, self.start_col
227 start_line, start_col = self.start_line, self.start_col
227 lhs = lines[start_line][:start_col]
228 lhs = lines[start_line][:start_col]
228 end_line = find_end_of_continued_line(lines, start_line)
229 end_line = find_end_of_continued_line(lines, start_line)
229 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
230 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
230 assert rhs.startswith('%'), rhs
231 assert rhs.startswith('%'), rhs
231 magic_name, _, args = rhs[1:].partition(' ')
232 magic_name, _, args = rhs[1:].partition(' ')
232
233
233 lines_before = lines[:start_line]
234 lines_before = lines[:start_line]
234 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
235 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
235 new_line = lhs + call + '\n'
236 new_line = lhs + call + '\n'
236 lines_after = lines[end_line+1:]
237 lines_after = lines[end_line+1:]
237
238
238 return lines_before + [new_line] + lines_after
239 return lines_before + [new_line] + lines_after
239
240
240
241
241 class SystemAssign(TokenTransformBase):
242 class SystemAssign(TokenTransformBase):
242 """Transformer for assignments from system commands (a = !foo)"""
243 """Transformer for assignments from system commands (a = !foo)"""
243 @classmethod
244 @classmethod
244 def find(cls, tokens_by_line):
245 def find(cls, tokens_by_line):
245 """Find the first system assignment (a = !foo) in the cell.
246 """Find the first system assignment (a = !foo) in the cell.
246 """
247 """
247 for line in tokens_by_line:
248 for line in tokens_by_line:
248 assign_ix = _find_assign_op(line)
249 assign_ix = _find_assign_op(line)
249 if (assign_ix is not None) \
250 if (assign_ix is not None) \
250 and not line[assign_ix].line.strip().startswith('=') \
251 and not line[assign_ix].line.strip().startswith('=') \
251 and (len(line) >= assign_ix + 2) \
252 and (len(line) >= assign_ix + 2) \
252 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
253 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
253 ix = assign_ix + 1
254 ix = assign_ix + 1
254
255
255 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
256 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
256 if line[ix].string == '!':
257 if line[ix].string == '!':
257 return cls(line[ix].start)
258 return cls(line[ix].start)
258 elif not line[ix].string.isspace():
259 elif not line[ix].string.isspace():
259 break
260 break
260 ix += 1
261 ix += 1
261
262
262 def transform(self, lines: List[str]):
263 def transform(self, lines: List[str]):
263 """Transform a system assignment found by the ``find()`` classmethod.
264 """Transform a system assignment found by the ``find()`` classmethod.
264 """
265 """
265 start_line, start_col = self.start_line, self.start_col
266 start_line, start_col = self.start_line, self.start_col
266
267
267 lhs = lines[start_line][:start_col]
268 lhs = lines[start_line][:start_col]
268 end_line = find_end_of_continued_line(lines, start_line)
269 end_line = find_end_of_continued_line(lines, start_line)
269 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
270 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
270 assert rhs.startswith('!'), rhs
271 assert rhs.startswith('!'), rhs
271 cmd = rhs[1:]
272 cmd = rhs[1:]
272
273
273 lines_before = lines[:start_line]
274 lines_before = lines[:start_line]
274 call = "get_ipython().getoutput({!r})".format(cmd)
275 call = "get_ipython().getoutput({!r})".format(cmd)
275 new_line = lhs + call + '\n'
276 new_line = lhs + call + '\n'
276 lines_after = lines[end_line + 1:]
277 lines_after = lines[end_line + 1:]
277
278
278 return lines_before + [new_line] + lines_after
279 return lines_before + [new_line] + lines_after
279
280
280 # The escape sequences that define the syntax transformations IPython will
281 # The escape sequences that define the syntax transformations IPython will
281 # apply to user input. These can NOT be just changed here: many regular
282 # apply to user input. These can NOT be just changed here: many regular
282 # expressions and other parts of the code may use their hardcoded values, and
283 # expressions and other parts of the code may use their hardcoded values, and
283 # for all intents and purposes they constitute the 'IPython syntax', so they
284 # for all intents and purposes they constitute the 'IPython syntax', so they
284 # should be considered fixed.
285 # should be considered fixed.
285
286
286 ESC_SHELL = '!' # Send line to underlying system shell
287 ESC_SHELL = '!' # Send line to underlying system shell
287 ESC_SH_CAP = '!!' # Send line to system shell and capture output
288 ESC_SH_CAP = '!!' # Send line to system shell and capture output
288 ESC_HELP = '?' # Find information about object
289 ESC_HELP = '?' # Find information about object
289 ESC_HELP2 = '??' # Find extra-detailed information about object
290 ESC_HELP2 = '??' # Find extra-detailed information about object
290 ESC_MAGIC = '%' # Call magic function
291 ESC_MAGIC = '%' # Call magic function
291 ESC_MAGIC2 = '%%' # Call cell-magic function
292 ESC_MAGIC2 = '%%' # Call cell-magic function
292 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
293 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
293 ESC_QUOTE2 = ';' # Quote all args as a single string, call
294 ESC_QUOTE2 = ';' # Quote all args as a single string, call
294 ESC_PAREN = '/' # Call first argument with rest of line as arguments
295 ESC_PAREN = '/' # Call first argument with rest of line as arguments
295
296
296 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
297 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
297 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
298 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
298
299
299 def _make_help_call(target, esc, next_input=None):
300 def _make_help_call(target, esc, next_input=None):
300 """Prepares a pinfo(2)/psearch call from a target name and the escape
301 """Prepares a pinfo(2)/psearch call from a target name and the escape
301 (i.e. ? or ??)"""
302 (i.e. ? or ??)"""
302 method = 'pinfo2' if esc == '??' \
303 method = 'pinfo2' if esc == '??' \
303 else 'psearch' if '*' in target \
304 else 'psearch' if '*' in target \
304 else 'pinfo'
305 else 'pinfo'
305 arg = " ".join([method, target])
306 arg = " ".join([method, target])
306 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
307 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
307 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
308 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
308 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
309 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
309 if next_input is None:
310 if next_input is None:
310 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
311 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
311 else:
312 else:
312 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
313 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
313 (next_input, t_magic_name, t_magic_arg_s)
314 (next_input, t_magic_name, t_magic_arg_s)
314
315
315 def _tr_help(content):
316 def _tr_help(content):
316 """Translate lines escaped with: ?
317 """Translate lines escaped with: ?
317
318
318 A naked help line should fire the intro help screen (shell.show_usage())
319 A naked help line should fire the intro help screen (shell.show_usage())
319 """
320 """
320 if not content:
321 if not content:
321 return 'get_ipython().show_usage()'
322 return 'get_ipython().show_usage()'
322
323
323 return _make_help_call(content, '?')
324 return _make_help_call(content, '?')
324
325
325 def _tr_help2(content):
326 def _tr_help2(content):
326 """Translate lines escaped with: ??
327 """Translate lines escaped with: ??
327
328
328 A naked help line should fire the intro help screen (shell.show_usage())
329 A naked help line should fire the intro help screen (shell.show_usage())
329 """
330 """
330 if not content:
331 if not content:
331 return 'get_ipython().show_usage()'
332 return 'get_ipython().show_usage()'
332
333
333 return _make_help_call(content, '??')
334 return _make_help_call(content, '??')
334
335
335 def _tr_magic(content):
336 def _tr_magic(content):
336 "Translate lines escaped with a percent sign: %"
337 "Translate lines escaped with a percent sign: %"
337 name, _, args = content.partition(' ')
338 name, _, args = content.partition(' ')
338 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
339 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
339
340
340 def _tr_quote(content):
341 def _tr_quote(content):
341 "Translate lines escaped with a comma: ,"
342 "Translate lines escaped with a comma: ,"
342 name, _, args = content.partition(' ')
343 name, _, args = content.partition(' ')
343 return '%s("%s")' % (name, '", "'.join(args.split()) )
344 return '%s("%s")' % (name, '", "'.join(args.split()) )
344
345
345 def _tr_quote2(content):
346 def _tr_quote2(content):
346 "Translate lines escaped with a semicolon: ;"
347 "Translate lines escaped with a semicolon: ;"
347 name, _, args = content.partition(' ')
348 name, _, args = content.partition(' ')
348 return '%s("%s")' % (name, args)
349 return '%s("%s")' % (name, args)
349
350
350 def _tr_paren(content):
351 def _tr_paren(content):
351 "Translate lines escaped with a slash: /"
352 "Translate lines escaped with a slash: /"
352 name, _, args = content.partition(' ')
353 name, _, args = content.partition(' ')
353 return '%s(%s)' % (name, ", ".join(args.split()))
354 return '%s(%s)' % (name, ", ".join(args.split()))
354
355
355 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
356 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
356 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
357 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
357 ESC_HELP : _tr_help,
358 ESC_HELP : _tr_help,
358 ESC_HELP2 : _tr_help2,
359 ESC_HELP2 : _tr_help2,
359 ESC_MAGIC : _tr_magic,
360 ESC_MAGIC : _tr_magic,
360 ESC_QUOTE : _tr_quote,
361 ESC_QUOTE : _tr_quote,
361 ESC_QUOTE2 : _tr_quote2,
362 ESC_QUOTE2 : _tr_quote2,
362 ESC_PAREN : _tr_paren }
363 ESC_PAREN : _tr_paren }
363
364
364 class EscapedCommand(TokenTransformBase):
365 class EscapedCommand(TokenTransformBase):
365 """Transformer for escaped commands like %foo, !foo, or /foo"""
366 """Transformer for escaped commands like %foo, !foo, or /foo"""
366 @classmethod
367 @classmethod
367 def find(cls, tokens_by_line):
368 def find(cls, tokens_by_line):
368 """Find the first escaped command (%foo, !foo, etc.) in the cell.
369 """Find the first escaped command (%foo, !foo, etc.) in the cell.
369 """
370 """
370 for line in tokens_by_line:
371 for line in tokens_by_line:
371 if not line:
372 if not line:
372 continue
373 continue
373 ix = 0
374 ix = 0
374 ll = len(line)
375 ll = len(line)
375 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
376 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
376 ix += 1
377 ix += 1
377 if ix >= ll:
378 if ix >= ll:
378 continue
379 continue
379 if line[ix].string in ESCAPE_SINGLES:
380 if line[ix].string in ESCAPE_SINGLES:
380 return cls(line[ix].start)
381 return cls(line[ix].start)
381
382
382 def transform(self, lines):
383 def transform(self, lines):
383 """Transform an escaped line found by the ``find()`` classmethod.
384 """Transform an escaped line found by the ``find()`` classmethod.
384 """
385 """
385 start_line, start_col = self.start_line, self.start_col
386 start_line, start_col = self.start_line, self.start_col
386
387
387 indent = lines[start_line][:start_col]
388 indent = lines[start_line][:start_col]
388 end_line = find_end_of_continued_line(lines, start_line)
389 end_line = find_end_of_continued_line(lines, start_line)
389 line = assemble_continued_line(lines, (start_line, start_col), end_line)
390 line = assemble_continued_line(lines, (start_line, start_col), end_line)
390
391
391 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
392 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
392 escape, content = line[:2], line[2:]
393 escape, content = line[:2], line[2:]
393 else:
394 else:
394 escape, content = line[:1], line[1:]
395 escape, content = line[:1], line[1:]
395
396
396 if escape in tr:
397 if escape in tr:
397 call = tr[escape](content)
398 call = tr[escape](content)
398 else:
399 else:
399 call = ''
400 call = ''
400
401
401 lines_before = lines[:start_line]
402 lines_before = lines[:start_line]
402 new_line = indent + call + '\n'
403 new_line = indent + call + '\n'
403 lines_after = lines[end_line + 1:]
404 lines_after = lines[end_line + 1:]
404
405
405 return lines_before + [new_line] + lines_after
406 return lines_before + [new_line] + lines_after
406
407
407 _help_end_re = re.compile(r"""(%{0,2}
408 _help_end_re = re.compile(r"""(%{0,2}
408 (?!\d)[\w*]+ # Variable name
409 (?!\d)[\w*]+ # Variable name
409 (\.(?!\d)[\w*]+)* # .etc.etc
410 (\.(?!\d)[\w*]+)* # .etc.etc
410 )
411 )
411 (\?\??)$ # ? or ??
412 (\?\??)$ # ? or ??
412 """,
413 """,
413 re.VERBOSE)
414 re.VERBOSE)
414
415
415 class HelpEnd(TokenTransformBase):
416 class HelpEnd(TokenTransformBase):
416 """Transformer for help syntax: obj? and obj??"""
417 """Transformer for help syntax: obj? and obj??"""
417 # This needs to be higher priority (lower number) than EscapedCommand so
418 # This needs to be higher priority (lower number) than EscapedCommand so
418 # that inspecting magics (%foo?) works.
419 # that inspecting magics (%foo?) works.
419 priority = 5
420 priority = 5
420
421
421 def __init__(self, start, q_locn):
422 def __init__(self, start, q_locn):
422 super().__init__(start)
423 super().__init__(start)
423 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
424 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
424 self.q_col = q_locn[1]
425 self.q_col = q_locn[1]
425
426
426 @classmethod
427 @classmethod
427 def find(cls, tokens_by_line):
428 def find(cls, tokens_by_line):
428 """Find the first help command (foo?) in the cell.
429 """Find the first help command (foo?) in the cell.
429 """
430 """
430 for line in tokens_by_line:
431 for line in tokens_by_line:
431 # Last token is NEWLINE; look at last but one
432 # Last token is NEWLINE; look at last but one
432 if len(line) > 2 and line[-2].string == '?':
433 if len(line) > 2 and line[-2].string == '?':
433 # Find the first token that's not INDENT/DEDENT
434 # Find the first token that's not INDENT/DEDENT
434 ix = 0
435 ix = 0
435 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
436 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
436 ix += 1
437 ix += 1
437 return cls(line[ix].start, line[-2].start)
438 return cls(line[ix].start, line[-2].start)
438
439
439 def transform(self, lines):
440 def transform(self, lines):
440 """Transform a help command found by the ``find()`` classmethod.
441 """Transform a help command found by the ``find()`` classmethod.
441 """
442 """
442 piece = ''.join(lines[self.start_line:self.q_line+1])
443 piece = ''.join(lines[self.start_line:self.q_line+1])
443 indent, content = piece[:self.start_col], piece[self.start_col:]
444 indent, content = piece[:self.start_col], piece[self.start_col:]
444 lines_before = lines[:self.start_line]
445 lines_before = lines[:self.start_line]
445 lines_after = lines[self.q_line + 1:]
446 lines_after = lines[self.q_line + 1:]
446
447
447 m = _help_end_re.search(content)
448 m = _help_end_re.search(content)
448 if not m:
449 if not m:
449 raise SyntaxError(content)
450 raise SyntaxError(content)
450 assert m is not None, content
451 assert m is not None, content
451 target = m.group(1)
452 target = m.group(1)
452 esc = m.group(3)
453 esc = m.group(3)
453
454
454 # If we're mid-command, put it back on the next prompt for the user.
455 # If we're mid-command, put it back on the next prompt for the user.
455 next_input = None
456 next_input = None
456 if (not lines_before) and (not lines_after) \
457 if (not lines_before) and (not lines_after) \
457 and content.strip() != m.group(0):
458 and content.strip() != m.group(0):
458 next_input = content.rstrip('?\n')
459 next_input = content.rstrip('?\n')
459
460
460 call = _make_help_call(target, esc, next_input=next_input)
461 call = _make_help_call(target, esc, next_input=next_input)
461 new_line = indent + call + '\n'
462 new_line = indent + call + '\n'
462
463
463 return lines_before + [new_line] + lines_after
464 return lines_before + [new_line] + lines_after
464
465
465 def make_tokens_by_line(lines:List[str]):
466 def make_tokens_by_line(lines:List[str]):
466 """Tokenize a series of lines and group tokens by line.
467 """Tokenize a series of lines and group tokens by line.
467
468
468 The tokens for a multiline Python string or expression are grouped as one
469 The tokens for a multiline Python string or expression are grouped as one
469 line. All lines except the last lines should keep their line ending ('\\n',
470 line. All lines except the last lines should keep their line ending ('\\n',
470 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
471 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
471 for example when passing block of text to this function.
472 for example when passing block of text to this function.
472
473
473 """
474 """
474 # NL tokens are used inside multiline expressions, but also after blank
475 # NL tokens are used inside multiline expressions, but also after blank
475 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
476 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
476 # We want to group the former case together but split the latter, so we
477 # We want to group the former case together but split the latter, so we
477 # track parentheses level, similar to the internals of tokenize.
478 # track parentheses level, similar to the internals of tokenize.
478 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
479
479 tokens_by_line = [[]]
480 # reexported from token on 3.7+
481 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
482 tokens_by_line:List[List[Any]] = [[]]
480 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
483 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
481 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
484 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
482 parenlev = 0
485 parenlev = 0
483 try:
486 try:
484 for token in tokenize.generate_tokens(iter(lines).__next__):
487 for token in tokenize.generate_tokens(iter(lines).__next__):
485 tokens_by_line[-1].append(token)
488 tokens_by_line[-1].append(token)
486 if (token.type == NEWLINE) \
489 if (token.type == NEWLINE) \
487 or ((token.type == NL) and (parenlev <= 0)):
490 or ((token.type == NL) and (parenlev <= 0)):
488 tokens_by_line.append([])
491 tokens_by_line.append([])
489 elif token.string in {'(', '[', '{'}:
492 elif token.string in {'(', '[', '{'}:
490 parenlev += 1
493 parenlev += 1
491 elif token.string in {')', ']', '}'}:
494 elif token.string in {')', ']', '}'}:
492 if parenlev > 0:
495 if parenlev > 0:
493 parenlev -= 1
496 parenlev -= 1
494 except tokenize.TokenError:
497 except tokenize.TokenError:
495 # Input ended in a multiline string or expression. That's OK for us.
498 # Input ended in a multiline string or expression. That's OK for us.
496 pass
499 pass
497
500
498
501
499 if not tokens_by_line[-1]:
502 if not tokens_by_line[-1]:
500 tokens_by_line.pop()
503 tokens_by_line.pop()
501
504
502
505
503 return tokens_by_line
506 return tokens_by_line
504
507
505 def show_linewise_tokens(s: str):
508 def show_linewise_tokens(s: str):
506 """For investigation and debugging"""
509 """For investigation and debugging"""
507 if not s.endswith('\n'):
510 if not s.endswith('\n'):
508 s += '\n'
511 s += '\n'
509 lines = s.splitlines(keepends=True)
512 lines = s.splitlines(keepends=True)
510 for line in make_tokens_by_line(lines):
513 for line in make_tokens_by_line(lines):
511 print("Line -------")
514 print("Line -------")
512 for tokinfo in line:
515 for tokinfo in line:
513 print(" ", tokinfo)
516 print(" ", tokinfo)
514
517
515 # Arbitrary limit to prevent getting stuck in infinite loops
518 # Arbitrary limit to prevent getting stuck in infinite loops
516 TRANSFORM_LOOP_LIMIT = 500
519 TRANSFORM_LOOP_LIMIT = 500
517
520
518 class TransformerManager:
521 class TransformerManager:
519 """Applies various transformations to a cell or code block.
522 """Applies various transformations to a cell or code block.
520
523
521 The key methods for external use are ``transform_cell()``
524 The key methods for external use are ``transform_cell()``
522 and ``check_complete()``.
525 and ``check_complete()``.
523 """
526 """
524 def __init__(self):
527 def __init__(self):
525 self.cleanup_transforms = [
528 self.cleanup_transforms = [
526 leading_empty_lines,
529 leading_empty_lines,
527 leading_indent,
530 leading_indent,
528 classic_prompt,
531 classic_prompt,
529 ipython_prompt,
532 ipython_prompt,
530 ]
533 ]
531 self.line_transforms = [
534 self.line_transforms = [
532 cell_magic,
535 cell_magic,
533 ]
536 ]
534 self.token_transformers = [
537 self.token_transformers = [
535 MagicAssign,
538 MagicAssign,
536 SystemAssign,
539 SystemAssign,
537 EscapedCommand,
540 EscapedCommand,
538 HelpEnd,
541 HelpEnd,
539 ]
542 ]
540
543
541 def do_one_token_transform(self, lines):
544 def do_one_token_transform(self, lines):
542 """Find and run the transform earliest in the code.
545 """Find and run the transform earliest in the code.
543
546
544 Returns (changed, lines).
547 Returns (changed, lines).
545
548
546 This method is called repeatedly until changed is False, indicating
549 This method is called repeatedly until changed is False, indicating
547 that all available transformations are complete.
550 that all available transformations are complete.
548
551
549 The tokens following IPython special syntax might not be valid, so
552 The tokens following IPython special syntax might not be valid, so
550 the transformed code is retokenised every time to identify the next
553 the transformed code is retokenised every time to identify the next
551 piece of special syntax. Hopefully long code cells are mostly valid
554 piece of special syntax. Hopefully long code cells are mostly valid
552 Python, not using lots of IPython special syntax, so this shouldn't be
555 Python, not using lots of IPython special syntax, so this shouldn't be
553 a performance issue.
556 a performance issue.
554 """
557 """
555 tokens_by_line = make_tokens_by_line(lines)
558 tokens_by_line = make_tokens_by_line(lines)
556 candidates = []
559 candidates = []
557 for transformer_cls in self.token_transformers:
560 for transformer_cls in self.token_transformers:
558 transformer = transformer_cls.find(tokens_by_line)
561 transformer = transformer_cls.find(tokens_by_line)
559 if transformer:
562 if transformer:
560 candidates.append(transformer)
563 candidates.append(transformer)
561
564
562 if not candidates:
565 if not candidates:
563 # Nothing to transform
566 # Nothing to transform
564 return False, lines
567 return False, lines
565 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
568 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
566 for transformer in ordered_transformers:
569 for transformer in ordered_transformers:
567 try:
570 try:
568 return True, transformer.transform(lines)
571 return True, transformer.transform(lines)
569 except SyntaxError:
572 except SyntaxError:
570 pass
573 pass
571 return False, lines
574 return False, lines
572
575
573 def do_token_transforms(self, lines):
576 def do_token_transforms(self, lines):
574 for _ in range(TRANSFORM_LOOP_LIMIT):
577 for _ in range(TRANSFORM_LOOP_LIMIT):
575 changed, lines = self.do_one_token_transform(lines)
578 changed, lines = self.do_one_token_transform(lines)
576 if not changed:
579 if not changed:
577 return lines
580 return lines
578
581
579 raise RuntimeError("Input transformation still changing after "
582 raise RuntimeError("Input transformation still changing after "
580 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
583 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
581
584
582 def transform_cell(self, cell: str) -> str:
585 def transform_cell(self, cell: str) -> str:
583 """Transforms a cell of input code"""
586 """Transforms a cell of input code"""
584 if not cell.endswith('\n'):
587 if not cell.endswith('\n'):
585 cell += '\n' # Ensure the cell has a trailing newline
588 cell += '\n' # Ensure the cell has a trailing newline
586 lines = cell.splitlines(keepends=True)
589 lines = cell.splitlines(keepends=True)
587 for transform in self.cleanup_transforms + self.line_transforms:
590 for transform in self.cleanup_transforms + self.line_transforms:
588 lines = transform(lines)
591 lines = transform(lines)
589
592
590 lines = self.do_token_transforms(lines)
593 lines = self.do_token_transforms(lines)
591 return ''.join(lines)
594 return ''.join(lines)
592
595
593 def check_complete(self, cell: str):
596 def check_complete(self, cell: str):
594 """Return whether a block of code is ready to execute, or should be continued
597 """Return whether a block of code is ready to execute, or should be continued
595
598
596 Parameters
599 Parameters
597 ----------
600 ----------
598 source : string
601 source : string
599 Python input code, which can be multiline.
602 Python input code, which can be multiline.
600
603
601 Returns
604 Returns
602 -------
605 -------
603 status : str
606 status : str
604 One of 'complete', 'incomplete', or 'invalid' if source is not a
607 One of 'complete', 'incomplete', or 'invalid' if source is not a
605 prefix of valid code.
608 prefix of valid code.
606 indent_spaces : int or None
609 indent_spaces : int or None
607 The number of spaces by which to indent the next line of code. If
610 The number of spaces by which to indent the next line of code. If
608 status is not 'incomplete', this is None.
611 status is not 'incomplete', this is None.
609 """
612 """
610 # Remember if the lines ends in a new line.
613 # Remember if the lines ends in a new line.
611 ends_with_newline = False
614 ends_with_newline = False
612 for character in reversed(cell):
615 for character in reversed(cell):
613 if character == '\n':
616 if character == '\n':
614 ends_with_newline = True
617 ends_with_newline = True
615 break
618 break
616 elif character.strip():
619 elif character.strip():
617 break
620 break
618 else:
621 else:
619 continue
622 continue
620
623
621 if not ends_with_newline:
624 if not ends_with_newline:
622 # Append an newline for consistent tokenization
625 # Append an newline for consistent tokenization
623 # See https://bugs.python.org/issue33899
626 # See https://bugs.python.org/issue33899
624 cell += '\n'
627 cell += '\n'
625
628
626 lines = cell.splitlines(keepends=True)
629 lines = cell.splitlines(keepends=True)
627
630
628 if not lines:
631 if not lines:
629 return 'complete', None
632 return 'complete', None
630
633
631 if lines[-1].endswith('\\'):
634 if lines[-1].endswith('\\'):
632 # Explicit backslash continuation
635 # Explicit backslash continuation
633 return 'incomplete', find_last_indent(lines)
636 return 'incomplete', find_last_indent(lines)
634
637
635 try:
638 try:
636 for transform in self.cleanup_transforms:
639 for transform in self.cleanup_transforms:
637 lines = transform(lines)
640 lines = transform(lines)
638 except SyntaxError:
641 except SyntaxError:
639 return 'invalid', None
642 return 'invalid', None
640
643
641 if lines[0].startswith('%%'):
644 if lines[0].startswith('%%'):
642 # Special case for cell magics - completion marked by blank line
645 # Special case for cell magics - completion marked by blank line
643 if lines[-1].strip():
646 if lines[-1].strip():
644 return 'incomplete', find_last_indent(lines)
647 return 'incomplete', find_last_indent(lines)
645 else:
648 else:
646 return 'complete', None
649 return 'complete', None
647
650
648 try:
651 try:
649 for transform in self.line_transforms:
652 for transform in self.line_transforms:
650 lines = transform(lines)
653 lines = transform(lines)
651 lines = self.do_token_transforms(lines)
654 lines = self.do_token_transforms(lines)
652 except SyntaxError:
655 except SyntaxError:
653 return 'invalid', None
656 return 'invalid', None
654
657
655 tokens_by_line = make_tokens_by_line(lines)
658 tokens_by_line = make_tokens_by_line(lines)
656
659
657 if not tokens_by_line:
660 if not tokens_by_line:
658 return 'incomplete', find_last_indent(lines)
661 return 'incomplete', find_last_indent(lines)
659
662
660 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
663 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
661 # We're in a multiline string or expression
664 # We're in a multiline string or expression
662 return 'incomplete', find_last_indent(lines)
665 return 'incomplete', find_last_indent(lines)
663
666
664 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
667 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
665
668
666 # Pop the last line which only contains DEDENTs and ENDMARKER
669 # Pop the last line which only contains DEDENTs and ENDMARKER
667 last_token_line = None
670 last_token_line = None
668 if {t.type for t in tokens_by_line[-1]} in [
671 if {t.type for t in tokens_by_line[-1]} in [
669 {tokenize.DEDENT, tokenize.ENDMARKER},
672 {tokenize.DEDENT, tokenize.ENDMARKER},
670 {tokenize.ENDMARKER}
673 {tokenize.ENDMARKER}
671 ] and len(tokens_by_line) > 1:
674 ] and len(tokens_by_line) > 1:
672 last_token_line = tokens_by_line.pop()
675 last_token_line = tokens_by_line.pop()
673
676
674 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
677 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
675 tokens_by_line[-1].pop()
678 tokens_by_line[-1].pop()
676
679
677 if not tokens_by_line[-1]:
680 if not tokens_by_line[-1]:
678 return 'incomplete', find_last_indent(lines)
681 return 'incomplete', find_last_indent(lines)
679
682
680 if tokens_by_line[-1][-1].string == ':':
683 if tokens_by_line[-1][-1].string == ':':
681 # The last line starts a block (e.g. 'if foo:')
684 # The last line starts a block (e.g. 'if foo:')
682 ix = 0
685 ix = 0
683 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
686 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
684 ix += 1
687 ix += 1
685
688
686 indent = tokens_by_line[-1][ix].start[1]
689 indent = tokens_by_line[-1][ix].start[1]
687 return 'incomplete', indent + 4
690 return 'incomplete', indent + 4
688
691
689 if tokens_by_line[-1][0].line.endswith('\\'):
692 if tokens_by_line[-1][0].line.endswith('\\'):
690 return 'incomplete', None
693 return 'incomplete', None
691
694
692 # At this point, our checks think the code is complete (or invalid).
695 # At this point, our checks think the code is complete (or invalid).
693 # We'll use codeop.compile_command to check this with the real parser
696 # We'll use codeop.compile_command to check this with the real parser
694 try:
697 try:
695 with warnings.catch_warnings():
698 with warnings.catch_warnings():
696 warnings.simplefilter('error', SyntaxWarning)
699 warnings.simplefilter('error', SyntaxWarning)
697 res = compile_command(''.join(lines), symbol='exec')
700 res = compile_command(''.join(lines), symbol='exec')
698 except (SyntaxError, OverflowError, ValueError, TypeError,
701 except (SyntaxError, OverflowError, ValueError, TypeError,
699 MemoryError, SyntaxWarning):
702 MemoryError, SyntaxWarning):
700 return 'invalid', None
703 return 'invalid', None
701 else:
704 else:
702 if res is None:
705 if res is None:
703 return 'incomplete', find_last_indent(lines)
706 return 'incomplete', find_last_indent(lines)
704
707
705 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
708 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
706 if ends_with_newline:
709 if ends_with_newline:
707 return 'complete', None
710 return 'complete', None
708 return 'incomplete', find_last_indent(lines)
711 return 'incomplete', find_last_indent(lines)
709
712
710 # If there's a blank line at the end, assume we're ready to execute
713 # If there's a blank line at the end, assume we're ready to execute
711 if not lines[-1].strip():
714 if not lines[-1].strip():
712 return 'complete', None
715 return 'complete', None
713
716
714 return 'complete', None
717 return 'complete', None
715
718
716
719
717 def find_last_indent(lines):
720 def find_last_indent(lines):
718 m = _indent_re.match(lines[-1])
721 m = _indent_re.match(lines[-1])
719 if not m:
722 if not m:
720 return 0
723 return 0
721 return len(m.group(0).replace('\t', ' '*4))
724 return len(m.group(0).replace('\t', ' '*4))
@@ -1,5 +1,5 b''
1 """
1 """
2 This package contains all third-party modules bundled with IPython.
2 This package contains all third-party modules bundled with IPython.
3 """
3 """
4
4
5 __all__ = []
5 __all__: List[str] = []
General Comments 0
You need to be logged in to leave comments. Login now