##// END OF EJS Templates
Change logic to adding newline implicitly instead of removing the automatically added one
Nguyen Duy Hai -
Show More
@@ -1,719 +1,702 b''
1 """Input transformer machinery to support IPython special syntax.
1 """Input transformer machinery to support IPython special syntax.
2
2
3 This includes the machinery to recognise and transform ``%magic`` commands,
3 This includes the machinery to recognise and transform ``%magic`` commands,
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
5
5
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7 deprecated in 7.0.
7 deprecated in 7.0.
8 """
8 """
9
9
10 # Copyright (c) IPython Development Team.
10 # Copyright (c) IPython Development Team.
11 # Distributed under the terms of the Modified BSD License.
11 # Distributed under the terms of the Modified BSD License.
12
12
13 from codeop import compile_command
13 from codeop import compile_command
14 import re
14 import re
15 import tokenize
15 import tokenize
16 from typing import List, Tuple
16 from typing import List, Tuple
17 import warnings
17 import warnings
18
18
19 _indent_re = re.compile(r'^[ \t]+')
19 _indent_re = re.compile(r'^[ \t]+')
20
20
21 def leading_indent(lines):
21 def leading_indent(lines):
22 """Remove leading indentation.
22 """Remove leading indentation.
23
23
24 If the first line starts with a spaces or tabs, the same whitespace will be
24 If the first line starts with a spaces or tabs, the same whitespace will be
25 removed from each following line in the cell.
25 removed from each following line in the cell.
26 """
26 """
27 if not lines:
27 if not lines:
28 return lines
28 return lines
29 m = _indent_re.match(lines[0])
29 m = _indent_re.match(lines[0])
30 if not m:
30 if not m:
31 return lines
31 return lines
32 space = m.group(0)
32 space = m.group(0)
33 n = len(space)
33 n = len(space)
34 return [l[n:] if l.startswith(space) else l
34 return [l[n:] if l.startswith(space) else l
35 for l in lines]
35 for l in lines]
36
36
37 class PromptStripper:
37 class PromptStripper:
38 """Remove matching input prompts from a block of input.
38 """Remove matching input prompts from a block of input.
39
39
40 Parameters
40 Parameters
41 ----------
41 ----------
42 prompt_re : regular expression
42 prompt_re : regular expression
43 A regular expression matching any input prompt (including continuation,
43 A regular expression matching any input prompt (including continuation,
44 e.g. ``...``)
44 e.g. ``...``)
45 initial_re : regular expression, optional
45 initial_re : regular expression, optional
46 A regular expression matching only the initial prompt, but not continuation.
46 A regular expression matching only the initial prompt, but not continuation.
47 If no initial expression is given, prompt_re will be used everywhere.
47 If no initial expression is given, prompt_re will be used everywhere.
48 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
48 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
49 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
49 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
50
50
51 If initial_re and prompt_re differ,
51 If initial_re and prompt_re differ,
52 only initial_re will be tested against the first line.
52 only initial_re will be tested against the first line.
53 If any prompt is found on the first two lines,
53 If any prompt is found on the first two lines,
54 prompts will be stripped from the rest of the block.
54 prompts will be stripped from the rest of the block.
55 """
55 """
56 def __init__(self, prompt_re, initial_re=None):
56 def __init__(self, prompt_re, initial_re=None):
57 self.prompt_re = prompt_re
57 self.prompt_re = prompt_re
58 self.initial_re = initial_re or prompt_re
58 self.initial_re = initial_re or prompt_re
59
59
60 def _strip(self, lines):
60 def _strip(self, lines):
61 return [self.prompt_re.sub('', l, count=1) for l in lines]
61 return [self.prompt_re.sub('', l, count=1) for l in lines]
62
62
63 def __call__(self, lines):
63 def __call__(self, lines):
64 if not lines:
64 if not lines:
65 return lines
65 return lines
66 if self.initial_re.match(lines[0]) or \
66 if self.initial_re.match(lines[0]) or \
67 (len(lines) > 1 and self.prompt_re.match(lines[1])):
67 (len(lines) > 1 and self.prompt_re.match(lines[1])):
68 return self._strip(lines)
68 return self._strip(lines)
69 return lines
69 return lines
70
70
71 classic_prompt = PromptStripper(
71 classic_prompt = PromptStripper(
72 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
72 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
73 initial_re=re.compile(r'^>>>( |$)')
73 initial_re=re.compile(r'^>>>( |$)')
74 )
74 )
75
75
76 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
76 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
77
77
78 def cell_magic(lines):
78 def cell_magic(lines):
79 if not lines or not lines[0].startswith('%%'):
79 if not lines or not lines[0].startswith('%%'):
80 return lines
80 return lines
81 if re.match('%%\w+\?', lines[0]):
81 if re.match('%%\w+\?', lines[0]):
82 # This case will be handled by help_end
82 # This case will be handled by help_end
83 return lines
83 return lines
84 magic_name, _, first_line = lines[0][2:-1].partition(' ')
84 magic_name, _, first_line = lines[0][2:-1].partition(' ')
85 body = ''.join(lines[1:])
85 body = ''.join(lines[1:])
86 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
86 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
87 % (magic_name, first_line, body)]
87 % (magic_name, first_line, body)]
88
88
89
89
90 def _find_assign_op(token_line):
90 def _find_assign_op(token_line):
91 """Get the index of the first assignment in the line ('=' not inside brackets)
91 """Get the index of the first assignment in the line ('=' not inside brackets)
92
92
93 Note: We don't try to support multiple special assignment (a = b = %foo)
93 Note: We don't try to support multiple special assignment (a = b = %foo)
94 """
94 """
95 paren_level = 0
95 paren_level = 0
96 for i, ti in enumerate(token_line):
96 for i, ti in enumerate(token_line):
97 s = ti.string
97 s = ti.string
98 if s == '=' and paren_level == 0:
98 if s == '=' and paren_level == 0:
99 return i
99 return i
100 if s in '([{':
100 if s in '([{':
101 paren_level += 1
101 paren_level += 1
102 elif s in ')]}':
102 elif s in ')]}':
103 if paren_level > 0:
103 if paren_level > 0:
104 paren_level -= 1
104 paren_level -= 1
105
105
106 def find_end_of_continued_line(lines, start_line: int):
106 def find_end_of_continued_line(lines, start_line: int):
107 """Find the last line of a line explicitly extended using backslashes.
107 """Find the last line of a line explicitly extended using backslashes.
108
108
109 Uses 0-indexed line numbers.
109 Uses 0-indexed line numbers.
110 """
110 """
111 end_line = start_line
111 end_line = start_line
112 while lines[end_line].endswith('\\\n'):
112 while lines[end_line].endswith('\\\n'):
113 end_line += 1
113 end_line += 1
114 if end_line >= len(lines):
114 if end_line >= len(lines):
115 break
115 break
116 return end_line
116 return end_line
117
117
118 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
118 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
119 """Assemble a single line from multiple continued line pieces
119 """Assemble a single line from multiple continued line pieces
120
120
121 Continued lines are lines ending in ``\``, and the line following the last
121 Continued lines are lines ending in ``\``, and the line following the last
122 ``\`` in the block.
122 ``\`` in the block.
123
123
124 For example, this code continues over multiple lines::
124 For example, this code continues over multiple lines::
125
125
126 if (assign_ix is not None) \
126 if (assign_ix is not None) \
127 and (len(line) >= assign_ix + 2) \
127 and (len(line) >= assign_ix + 2) \
128 and (line[assign_ix+1].string == '%') \
128 and (line[assign_ix+1].string == '%') \
129 and (line[assign_ix+2].type == tokenize.NAME):
129 and (line[assign_ix+2].type == tokenize.NAME):
130
130
131 This statement contains four continued line pieces.
131 This statement contains four continued line pieces.
132 Assembling these pieces into a single line would give::
132 Assembling these pieces into a single line would give::
133
133
134 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
134 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
135
135
136 This uses 0-indexed line numbers. *start* is (lineno, colno).
136 This uses 0-indexed line numbers. *start* is (lineno, colno).
137
137
138 Used to allow ``%magic`` and ``!system`` commands to be continued over
138 Used to allow ``%magic`` and ``!system`` commands to be continued over
139 multiple lines.
139 multiple lines.
140 """
140 """
141 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
141 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
142 return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
142 return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
143 + [parts[-1][:-1]]) # Strip newline from last line
143 + [parts[-1][:-1]]) # Strip newline from last line
144
144
145 class TokenTransformBase:
145 class TokenTransformBase:
146 """Base class for transformations which examine tokens.
146 """Base class for transformations which examine tokens.
147
147
148 Special syntax should not be transformed when it occurs inside strings or
148 Special syntax should not be transformed when it occurs inside strings or
149 comments. This is hard to reliably avoid with regexes. The solution is to
149 comments. This is hard to reliably avoid with regexes. The solution is to
150 tokenise the code as Python, and recognise the special syntax in the tokens.
150 tokenise the code as Python, and recognise the special syntax in the tokens.
151
151
152 IPython's special syntax is not valid Python syntax, so tokenising may go
152 IPython's special syntax is not valid Python syntax, so tokenising may go
153 wrong after the special syntax starts. These classes therefore find and
153 wrong after the special syntax starts. These classes therefore find and
154 transform *one* instance of special syntax at a time into regular Python
154 transform *one* instance of special syntax at a time into regular Python
155 syntax. After each transformation, tokens are regenerated to find the next
155 syntax. After each transformation, tokens are regenerated to find the next
156 piece of special syntax.
156 piece of special syntax.
157
157
158 Subclasses need to implement one class method (find)
158 Subclasses need to implement one class method (find)
159 and one regular method (transform).
159 and one regular method (transform).
160
160
161 The priority attribute can select which transformation to apply if multiple
161 The priority attribute can select which transformation to apply if multiple
162 transformers match in the same place. Lower numbers have higher priority.
162 transformers match in the same place. Lower numbers have higher priority.
163 This allows "%magic?" to be turned into a help call rather than a magic call.
163 This allows "%magic?" to be turned into a help call rather than a magic call.
164 """
164 """
165 # Lower numbers -> higher priority (for matches in the same location)
165 # Lower numbers -> higher priority (for matches in the same location)
166 priority = 10
166 priority = 10
167
167
168 def sortby(self):
168 def sortby(self):
169 return self.start_line, self.start_col, self.priority
169 return self.start_line, self.start_col, self.priority
170
170
171 def __init__(self, start):
171 def __init__(self, start):
172 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
172 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
173 self.start_col = start[1]
173 self.start_col = start[1]
174
174
175 @classmethod
175 @classmethod
176 def find(cls, tokens_by_line):
176 def find(cls, tokens_by_line):
177 """Find one instance of special syntax in the provided tokens.
177 """Find one instance of special syntax in the provided tokens.
178
178
179 Tokens are grouped into logical lines for convenience,
179 Tokens are grouped into logical lines for convenience,
180 so it is easy to e.g. look at the first token of each line.
180 so it is easy to e.g. look at the first token of each line.
181 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
181 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
182
182
183 This should return an instance of its class, pointing to the start
183 This should return an instance of its class, pointing to the start
184 position it has found, or None if it found no match.
184 position it has found, or None if it found no match.
185 """
185 """
186 raise NotImplementedError
186 raise NotImplementedError
187
187
188 def transform(self, lines: List[str]):
188 def transform(self, lines: List[str]):
189 """Transform one instance of special syntax found by ``find()``
189 """Transform one instance of special syntax found by ``find()``
190
190
191 Takes a list of strings representing physical lines,
191 Takes a list of strings representing physical lines,
192 returns a similar list of transformed lines.
192 returns a similar list of transformed lines.
193 """
193 """
194 raise NotImplementedError
194 raise NotImplementedError
195
195
196 class MagicAssign(TokenTransformBase):
196 class MagicAssign(TokenTransformBase):
197 """Transformer for assignments from magics (a = %foo)"""
197 """Transformer for assignments from magics (a = %foo)"""
198 @classmethod
198 @classmethod
199 def find(cls, tokens_by_line):
199 def find(cls, tokens_by_line):
200 """Find the first magic assignment (a = %foo) in the cell.
200 """Find the first magic assignment (a = %foo) in the cell.
201 """
201 """
202 for line in tokens_by_line:
202 for line in tokens_by_line:
203 assign_ix = _find_assign_op(line)
203 assign_ix = _find_assign_op(line)
204 if (assign_ix is not None) \
204 if (assign_ix is not None) \
205 and (len(line) >= assign_ix + 2) \
205 and (len(line) >= assign_ix + 2) \
206 and (line[assign_ix+1].string == '%') \
206 and (line[assign_ix+1].string == '%') \
207 and (line[assign_ix+2].type == tokenize.NAME):
207 and (line[assign_ix+2].type == tokenize.NAME):
208 return cls(line[assign_ix+1].start)
208 return cls(line[assign_ix+1].start)
209
209
210 def transform(self, lines: List[str]):
210 def transform(self, lines: List[str]):
211 """Transform a magic assignment found by the ``find()`` classmethod.
211 """Transform a magic assignment found by the ``find()`` classmethod.
212 """
212 """
213 start_line, start_col = self.start_line, self.start_col
213 start_line, start_col = self.start_line, self.start_col
214 lhs = lines[start_line][:start_col]
214 lhs = lines[start_line][:start_col]
215 end_line = find_end_of_continued_line(lines, start_line)
215 end_line = find_end_of_continued_line(lines, start_line)
216 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
216 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
217 assert rhs.startswith('%'), rhs
217 assert rhs.startswith('%'), rhs
218 magic_name, _, args = rhs[1:].partition(' ')
218 magic_name, _, args = rhs[1:].partition(' ')
219
219
220 lines_before = lines[:start_line]
220 lines_before = lines[:start_line]
221 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
221 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
222 new_line = lhs + call + '\n'
222 new_line = lhs + call + '\n'
223 lines_after = lines[end_line+1:]
223 lines_after = lines[end_line+1:]
224
224
225 return lines_before + [new_line] + lines_after
225 return lines_before + [new_line] + lines_after
226
226
227
227
228 class SystemAssign(TokenTransformBase):
228 class SystemAssign(TokenTransformBase):
229 """Transformer for assignments from system commands (a = !foo)"""
229 """Transformer for assignments from system commands (a = !foo)"""
230 @classmethod
230 @classmethod
231 def find(cls, tokens_by_line):
231 def find(cls, tokens_by_line):
232 """Find the first system assignment (a = !foo) in the cell.
232 """Find the first system assignment (a = !foo) in the cell.
233 """
233 """
234 for line in tokens_by_line:
234 for line in tokens_by_line:
235 assign_ix = _find_assign_op(line)
235 assign_ix = _find_assign_op(line)
236 if (assign_ix is not None) \
236 if (assign_ix is not None) \
237 and not line[assign_ix].line.strip().startswith('=') \
237 and not line[assign_ix].line.strip().startswith('=') \
238 and (len(line) >= assign_ix + 2) \
238 and (len(line) >= assign_ix + 2) \
239 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
239 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
240 ix = assign_ix + 1
240 ix = assign_ix + 1
241
241
242 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
242 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
243 if line[ix].string == '!':
243 if line[ix].string == '!':
244 return cls(line[ix].start)
244 return cls(line[ix].start)
245 elif not line[ix].string.isspace():
245 elif not line[ix].string.isspace():
246 break
246 break
247 ix += 1
247 ix += 1
248
248
249 def transform(self, lines: List[str]):
249 def transform(self, lines: List[str]):
250 """Transform a system assignment found by the ``find()`` classmethod.
250 """Transform a system assignment found by the ``find()`` classmethod.
251 """
251 """
252 start_line, start_col = self.start_line, self.start_col
252 start_line, start_col = self.start_line, self.start_col
253
253
254 lhs = lines[start_line][:start_col]
254 lhs = lines[start_line][:start_col]
255 end_line = find_end_of_continued_line(lines, start_line)
255 end_line = find_end_of_continued_line(lines, start_line)
256 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
256 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
257 assert rhs.startswith('!'), rhs
257 assert rhs.startswith('!'), rhs
258 cmd = rhs[1:]
258 cmd = rhs[1:]
259
259
260 lines_before = lines[:start_line]
260 lines_before = lines[:start_line]
261 call = "get_ipython().getoutput({!r})".format(cmd)
261 call = "get_ipython().getoutput({!r})".format(cmd)
262 new_line = lhs + call + '\n'
262 new_line = lhs + call + '\n'
263 lines_after = lines[end_line + 1:]
263 lines_after = lines[end_line + 1:]
264
264
265 return lines_before + [new_line] + lines_after
265 return lines_before + [new_line] + lines_after
266
266
267 # The escape sequences that define the syntax transformations IPython will
267 # The escape sequences that define the syntax transformations IPython will
268 # apply to user input. These can NOT be just changed here: many regular
268 # apply to user input. These can NOT be just changed here: many regular
269 # expressions and other parts of the code may use their hardcoded values, and
269 # expressions and other parts of the code may use their hardcoded values, and
270 # for all intents and purposes they constitute the 'IPython syntax', so they
270 # for all intents and purposes they constitute the 'IPython syntax', so they
271 # should be considered fixed.
271 # should be considered fixed.
272
272
273 ESC_SHELL = '!' # Send line to underlying system shell
273 ESC_SHELL = '!' # Send line to underlying system shell
274 ESC_SH_CAP = '!!' # Send line to system shell and capture output
274 ESC_SH_CAP = '!!' # Send line to system shell and capture output
275 ESC_HELP = '?' # Find information about object
275 ESC_HELP = '?' # Find information about object
276 ESC_HELP2 = '??' # Find extra-detailed information about object
276 ESC_HELP2 = '??' # Find extra-detailed information about object
277 ESC_MAGIC = '%' # Call magic function
277 ESC_MAGIC = '%' # Call magic function
278 ESC_MAGIC2 = '%%' # Call cell-magic function
278 ESC_MAGIC2 = '%%' # Call cell-magic function
279 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
279 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
280 ESC_QUOTE2 = ';' # Quote all args as a single string, call
280 ESC_QUOTE2 = ';' # Quote all args as a single string, call
281 ESC_PAREN = '/' # Call first argument with rest of line as arguments
281 ESC_PAREN = '/' # Call first argument with rest of line as arguments
282
282
283 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
283 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
284 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
284 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
285
285
286 def _make_help_call(target, esc, next_input=None):
286 def _make_help_call(target, esc, next_input=None):
287 """Prepares a pinfo(2)/psearch call from a target name and the escape
287 """Prepares a pinfo(2)/psearch call from a target name and the escape
288 (i.e. ? or ??)"""
288 (i.e. ? or ??)"""
289 method = 'pinfo2' if esc == '??' \
289 method = 'pinfo2' if esc == '??' \
290 else 'psearch' if '*' in target \
290 else 'psearch' if '*' in target \
291 else 'pinfo'
291 else 'pinfo'
292 arg = " ".join([method, target])
292 arg = " ".join([method, target])
293 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
293 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
294 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
294 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
295 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
295 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
296 if next_input is None:
296 if next_input is None:
297 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
297 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
298 else:
298 else:
299 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
299 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
300 (next_input, t_magic_name, t_magic_arg_s)
300 (next_input, t_magic_name, t_magic_arg_s)
301
301
302 def _tr_help(content):
302 def _tr_help(content):
303 """Translate lines escaped with: ?
303 """Translate lines escaped with: ?
304
304
305 A naked help line should fire the intro help screen (shell.show_usage())
305 A naked help line should fire the intro help screen (shell.show_usage())
306 """
306 """
307 if not content:
307 if not content:
308 return 'get_ipython().show_usage()'
308 return 'get_ipython().show_usage()'
309
309
310 return _make_help_call(content, '?')
310 return _make_help_call(content, '?')
311
311
312 def _tr_help2(content):
312 def _tr_help2(content):
313 """Translate lines escaped with: ??
313 """Translate lines escaped with: ??
314
314
315 A naked help line should fire the intro help screen (shell.show_usage())
315 A naked help line should fire the intro help screen (shell.show_usage())
316 """
316 """
317 if not content:
317 if not content:
318 return 'get_ipython().show_usage()'
318 return 'get_ipython().show_usage()'
319
319
320 return _make_help_call(content, '??')
320 return _make_help_call(content, '??')
321
321
322 def _tr_magic(content):
322 def _tr_magic(content):
323 "Translate lines escaped with a percent sign: %"
323 "Translate lines escaped with a percent sign: %"
324 name, _, args = content.partition(' ')
324 name, _, args = content.partition(' ')
325 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
325 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
326
326
327 def _tr_quote(content):
327 def _tr_quote(content):
328 "Translate lines escaped with a comma: ,"
328 "Translate lines escaped with a comma: ,"
329 name, _, args = content.partition(' ')
329 name, _, args = content.partition(' ')
330 return '%s("%s")' % (name, '", "'.join(args.split()) )
330 return '%s("%s")' % (name, '", "'.join(args.split()) )
331
331
332 def _tr_quote2(content):
332 def _tr_quote2(content):
333 "Translate lines escaped with a semicolon: ;"
333 "Translate lines escaped with a semicolon: ;"
334 name, _, args = content.partition(' ')
334 name, _, args = content.partition(' ')
335 return '%s("%s")' % (name, args)
335 return '%s("%s")' % (name, args)
336
336
337 def _tr_paren(content):
337 def _tr_paren(content):
338 "Translate lines escaped with a slash: /"
338 "Translate lines escaped with a slash: /"
339 name, _, args = content.partition(' ')
339 name, _, args = content.partition(' ')
340 return '%s(%s)' % (name, ", ".join(args.split()))
340 return '%s(%s)' % (name, ", ".join(args.split()))
341
341
342 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
342 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
343 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
343 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
344 ESC_HELP : _tr_help,
344 ESC_HELP : _tr_help,
345 ESC_HELP2 : _tr_help2,
345 ESC_HELP2 : _tr_help2,
346 ESC_MAGIC : _tr_magic,
346 ESC_MAGIC : _tr_magic,
347 ESC_QUOTE : _tr_quote,
347 ESC_QUOTE : _tr_quote,
348 ESC_QUOTE2 : _tr_quote2,
348 ESC_QUOTE2 : _tr_quote2,
349 ESC_PAREN : _tr_paren }
349 ESC_PAREN : _tr_paren }
350
350
351 class EscapedCommand(TokenTransformBase):
351 class EscapedCommand(TokenTransformBase):
352 """Transformer for escaped commands like %foo, !foo, or /foo"""
352 """Transformer for escaped commands like %foo, !foo, or /foo"""
353 @classmethod
353 @classmethod
354 def find(cls, tokens_by_line):
354 def find(cls, tokens_by_line):
355 """Find the first escaped command (%foo, !foo, etc.) in the cell.
355 """Find the first escaped command (%foo, !foo, etc.) in the cell.
356 """
356 """
357 for line in tokens_by_line:
357 for line in tokens_by_line:
358 if not line:
358 if not line:
359 continue
359 continue
360 ix = 0
360 ix = 0
361 ll = len(line)
361 ll = len(line)
362 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
362 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
363 ix += 1
363 ix += 1
364 if ix >= ll:
364 if ix >= ll:
365 continue
365 continue
366 if line[ix].string in ESCAPE_SINGLES:
366 if line[ix].string in ESCAPE_SINGLES:
367 return cls(line[ix].start)
367 return cls(line[ix].start)
368
368
369 def transform(self, lines):
369 def transform(self, lines):
370 """Transform an escaped line found by the ``find()`` classmethod.
370 """Transform an escaped line found by the ``find()`` classmethod.
371 """
371 """
372 start_line, start_col = self.start_line, self.start_col
372 start_line, start_col = self.start_line, self.start_col
373
373
374 indent = lines[start_line][:start_col]
374 indent = lines[start_line][:start_col]
375 end_line = find_end_of_continued_line(lines, start_line)
375 end_line = find_end_of_continued_line(lines, start_line)
376 line = assemble_continued_line(lines, (start_line, start_col), end_line)
376 line = assemble_continued_line(lines, (start_line, start_col), end_line)
377
377
378 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
378 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
379 escape, content = line[:2], line[2:]
379 escape, content = line[:2], line[2:]
380 else:
380 else:
381 escape, content = line[:1], line[1:]
381 escape, content = line[:1], line[1:]
382
382
383 if escape in tr:
383 if escape in tr:
384 call = tr[escape](content)
384 call = tr[escape](content)
385 else:
385 else:
386 call = ''
386 call = ''
387
387
388 lines_before = lines[:start_line]
388 lines_before = lines[:start_line]
389 new_line = indent + call + '\n'
389 new_line = indent + call + '\n'
390 lines_after = lines[end_line + 1:]
390 lines_after = lines[end_line + 1:]
391
391
392 return lines_before + [new_line] + lines_after
392 return lines_before + [new_line] + lines_after
393
393
394 _help_end_re = re.compile(r"""(%{0,2}
394 _help_end_re = re.compile(r"""(%{0,2}
395 [a-zA-Z_*][\w*]* # Variable name
395 [a-zA-Z_*][\w*]* # Variable name
396 (\.[a-zA-Z_*][\w*]*)* # .etc.etc
396 (\.[a-zA-Z_*][\w*]*)* # .etc.etc
397 )
397 )
398 (\?\??)$ # ? or ??
398 (\?\??)$ # ? or ??
399 """,
399 """,
400 re.VERBOSE)
400 re.VERBOSE)
401
401
402 class HelpEnd(TokenTransformBase):
402 class HelpEnd(TokenTransformBase):
403 """Transformer for help syntax: obj? and obj??"""
403 """Transformer for help syntax: obj? and obj??"""
404 # This needs to be higher priority (lower number) than EscapedCommand so
404 # This needs to be higher priority (lower number) than EscapedCommand so
405 # that inspecting magics (%foo?) works.
405 # that inspecting magics (%foo?) works.
406 priority = 5
406 priority = 5
407
407
408 def __init__(self, start, q_locn):
408 def __init__(self, start, q_locn):
409 super().__init__(start)
409 super().__init__(start)
410 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
410 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
411 self.q_col = q_locn[1]
411 self.q_col = q_locn[1]
412
412
413 @classmethod
413 @classmethod
414 def find(cls, tokens_by_line):
414 def find(cls, tokens_by_line):
415 """Find the first help command (foo?) in the cell.
415 """Find the first help command (foo?) in the cell.
416 """
416 """
417 for line in tokens_by_line:
417 for line in tokens_by_line:
418 # Last token is NEWLINE; look at last but one
418 # Last token is NEWLINE; look at last but one
419 if len(line) > 2 and line[-2].string == '?':
419 if len(line) > 2 and line[-2].string == '?':
420 # Find the first token that's not INDENT/DEDENT
420 # Find the first token that's not INDENT/DEDENT
421 ix = 0
421 ix = 0
422 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
422 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
423 ix += 1
423 ix += 1
424 return cls(line[ix].start, line[-2].start)
424 return cls(line[ix].start, line[-2].start)
425
425
426 def transform(self, lines):
426 def transform(self, lines):
427 """Transform a help command found by the ``find()`` classmethod.
427 """Transform a help command found by the ``find()`` classmethod.
428 """
428 """
429 piece = ''.join(lines[self.start_line:self.q_line+1])
429 piece = ''.join(lines[self.start_line:self.q_line+1])
430 indent, content = piece[:self.start_col], piece[self.start_col:]
430 indent, content = piece[:self.start_col], piece[self.start_col:]
431 lines_before = lines[:self.start_line]
431 lines_before = lines[:self.start_line]
432 lines_after = lines[self.q_line + 1:]
432 lines_after = lines[self.q_line + 1:]
433
433
434 m = _help_end_re.search(content)
434 m = _help_end_re.search(content)
435 if not m:
435 if not m:
436 raise SyntaxError(content)
436 raise SyntaxError(content)
437 assert m is not None, content
437 assert m is not None, content
438 target = m.group(1)
438 target = m.group(1)
439 esc = m.group(3)
439 esc = m.group(3)
440
440
441 # If we're mid-command, put it back on the next prompt for the user.
441 # If we're mid-command, put it back on the next prompt for the user.
442 next_input = None
442 next_input = None
443 if (not lines_before) and (not lines_after) \
443 if (not lines_before) and (not lines_after) \
444 and content.strip() != m.group(0):
444 and content.strip() != m.group(0):
445 next_input = content.rstrip('?\n')
445 next_input = content.rstrip('?\n')
446
446
447 call = _make_help_call(target, esc, next_input=next_input)
447 call = _make_help_call(target, esc, next_input=next_input)
448 new_line = indent + call + '\n'
448 new_line = indent + call + '\n'
449
449
450 return lines_before + [new_line] + lines_after
450 return lines_before + [new_line] + lines_after
451
451
452 def make_tokens_by_line(lines):
452 def make_tokens_by_line(lines):
453 """Tokenize a series of lines and group tokens by line.
453 """Tokenize a series of lines and group tokens by line.
454
454
455 The tokens for a multiline Python string or expression are
455 The tokens for a multiline Python string or expression are
456 grouped as one line.
456 grouped as one line.
457 """
457 """
458 # NL tokens are used inside multiline expressions, but also after blank
458 # NL tokens are used inside multiline expressions, but also after blank
459 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
459 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
460 # We want to group the former case together but split the latter, so we
460 # We want to group the former case together but split the latter, so we
461 # track parentheses level, similar to the internals of tokenize.
461 # track parentheses level, similar to the internals of tokenize.
462 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
462 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
463 tokens_by_line = [[]]
463 tokens_by_line = [[]]
464 parenlev = 0
464 parenlev = 0
465 try:
465 try:
466 for token in tokenize.generate_tokens(iter(lines).__next__):
466 for token in tokenize.generate_tokens(iter(lines).__next__):
467 tokens_by_line[-1].append(token)
467 tokens_by_line[-1].append(token)
468 if (token.type == NEWLINE) \
468 if (token.type == NEWLINE) \
469 or ((token.type == NL) and (parenlev <= 0)):
469 or ((token.type == NL) and (parenlev <= 0)):
470 tokens_by_line.append([])
470 tokens_by_line.append([])
471 elif token.string in {'(', '[', '{'}:
471 elif token.string in {'(', '[', '{'}:
472 parenlev += 1
472 parenlev += 1
473 elif token.string in {')', ']', '}'}:
473 elif token.string in {')', ']', '}'}:
474 if parenlev > 0:
474 if parenlev > 0:
475 parenlev -= 1
475 parenlev -= 1
476 except tokenize.TokenError:
476 except tokenize.TokenError:
477 # Input ended in a multiline string or expression. That's OK for us.
477 # Input ended in a multiline string or expression. That's OK for us.
478 pass
478 pass
479
479
480
480
481 if not tokens_by_line[-1]:
481 if not tokens_by_line[-1]:
482 tokens_by_line.pop()
482 tokens_by_line.pop()
483
483
484 # Convert if using cpython tokenize
485 # upstream bug was fixed in Python 3.7.1, so once we drop 3.7 this can likely be removed.
486 if (list(map(lambda x: x.type, tokens_by_line[-1])) ==
487 [tokenize.DEDENT] * (len(tokens_by_line[-1]) - 1) + [tokenize.ENDMARKER]):
488 if (
489 len(tokens_by_line) > 1 and
490 len(tokens_by_line[-2]) > 0 and
491 tokens_by_line[-2][-1].type == tokenize.NEWLINE
492 ):
493 tokens_by_line[-2].pop()
494 tokens_by_line[-2] += tokens_by_line[-1]
495 tokens_by_line.pop()
496
484
497 return tokens_by_line
485 return tokens_by_line
498
486
499 def show_linewise_tokens(s: str):
487 def show_linewise_tokens(s: str):
500 """For investigation and debugging"""
488 """For investigation and debugging"""
501 if not s.endswith('\n'):
489 if not s.endswith('\n'):
502 s += '\n'
490 s += '\n'
503 lines = s.splitlines(keepends=True)
491 lines = s.splitlines(keepends=True)
504 for line in make_tokens_by_line(lines):
492 for line in make_tokens_by_line(lines):
505 print("Line -------")
493 print("Line -------")
506 for tokinfo in line:
494 for tokinfo in line:
507 print(" ", tokinfo)
495 print(" ", tokinfo)
508
496
509 # Arbitrary limit to prevent getting stuck in infinite loops
497 # Arbitrary limit to prevent getting stuck in infinite loops
510 TRANSFORM_LOOP_LIMIT = 500
498 TRANSFORM_LOOP_LIMIT = 500
511
499
512 class TransformerManager:
500 class TransformerManager:
513 """Applies various transformations to a cell or code block.
501 """Applies various transformations to a cell or code block.
514
502
515 The key methods for external use are ``transform_cell()``
503 The key methods for external use are ``transform_cell()``
516 and ``check_complete()``.
504 and ``check_complete()``.
517 """
505 """
518 def __init__(self):
506 def __init__(self):
519 self.cleanup_transforms = [
507 self.cleanup_transforms = [
520 leading_indent,
508 leading_indent,
521 classic_prompt,
509 classic_prompt,
522 ipython_prompt,
510 ipython_prompt,
523 ]
511 ]
524 self.line_transforms = [
512 self.line_transforms = [
525 cell_magic,
513 cell_magic,
526 ]
514 ]
527 self.token_transformers = [
515 self.token_transformers = [
528 MagicAssign,
516 MagicAssign,
529 SystemAssign,
517 SystemAssign,
530 EscapedCommand,
518 EscapedCommand,
531 HelpEnd,
519 HelpEnd,
532 ]
520 ]
533
521
534 def do_one_token_transform(self, lines):
522 def do_one_token_transform(self, lines):
535 """Find and run the transform earliest in the code.
523 """Find and run the transform earliest in the code.
536
524
537 Returns (changed, lines).
525 Returns (changed, lines).
538
526
539 This method is called repeatedly until changed is False, indicating
527 This method is called repeatedly until changed is False, indicating
540 that all available transformations are complete.
528 that all available transformations are complete.
541
529
542 The tokens following IPython special syntax might not be valid, so
530 The tokens following IPython special syntax might not be valid, so
543 the transformed code is retokenised every time to identify the next
531 the transformed code is retokenised every time to identify the next
544 piece of special syntax. Hopefully long code cells are mostly valid
532 piece of special syntax. Hopefully long code cells are mostly valid
545 Python, not using lots of IPython special syntax, so this shouldn't be
533 Python, not using lots of IPython special syntax, so this shouldn't be
546 a performance issue.
534 a performance issue.
547 """
535 """
548 tokens_by_line = make_tokens_by_line(lines)
536 tokens_by_line = make_tokens_by_line(lines)
549 candidates = []
537 candidates = []
550 for transformer_cls in self.token_transformers:
538 for transformer_cls in self.token_transformers:
551 transformer = transformer_cls.find(tokens_by_line)
539 transformer = transformer_cls.find(tokens_by_line)
552 if transformer:
540 if transformer:
553 candidates.append(transformer)
541 candidates.append(transformer)
554
542
555 if not candidates:
543 if not candidates:
556 # Nothing to transform
544 # Nothing to transform
557 return False, lines
545 return False, lines
558 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
546 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
559 for transformer in ordered_transformers:
547 for transformer in ordered_transformers:
560 try:
548 try:
561 return True, transformer.transform(lines)
549 return True, transformer.transform(lines)
562 except SyntaxError:
550 except SyntaxError:
563 pass
551 pass
564 return False, lines
552 return False, lines
565
553
566 def do_token_transforms(self, lines):
554 def do_token_transforms(self, lines):
567 for _ in range(TRANSFORM_LOOP_LIMIT):
555 for _ in range(TRANSFORM_LOOP_LIMIT):
568 changed, lines = self.do_one_token_transform(lines)
556 changed, lines = self.do_one_token_transform(lines)
569 if not changed:
557 if not changed:
570 return lines
558 return lines
571
559
572 raise RuntimeError("Input transformation still changing after "
560 raise RuntimeError("Input transformation still changing after "
573 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
561 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
574
562
575 def transform_cell(self, cell: str) -> str:
563 def transform_cell(self, cell: str) -> str:
576 """Transforms a cell of input code"""
564 """Transforms a cell of input code"""
577 if not cell.endswith('\n'):
565 if not cell.endswith('\n'):
578 cell += '\n' # Ensure the cell has a trailing newline
566 cell += '\n' # Ensure the cell has a trailing newline
579 lines = cell.splitlines(keepends=True)
567 lines = cell.splitlines(keepends=True)
580 for transform in self.cleanup_transforms + self.line_transforms:
568 for transform in self.cleanup_transforms + self.line_transforms:
581 lines = transform(lines)
569 lines = transform(lines)
582
570
583 lines = self.do_token_transforms(lines)
571 lines = self.do_token_transforms(lines)
584 return ''.join(lines)
572 return ''.join(lines)
585
573
586 def check_complete(self, cell: str):
574 def check_complete(self, cell: str):
587 """Return whether a block of code is ready to execute, or should be continued
575 """Return whether a block of code is ready to execute, or should be continued
588
576
589 Parameters
577 Parameters
590 ----------
578 ----------
591 source : string
579 source : string
592 Python input code, which can be multiline.
580 Python input code, which can be multiline.
593
581
594 Returns
582 Returns
595 -------
583 -------
596 status : str
584 status : str
597 One of 'complete', 'incomplete', or 'invalid' if source is not a
585 One of 'complete', 'incomplete', or 'invalid' if source is not a
598 prefix of valid code.
586 prefix of valid code.
599 indent_spaces : int or None
587 indent_spaces : int or None
600 The number of spaces by which to indent the next line of code. If
588 The number of spaces by which to indent the next line of code. If
601 status is not 'incomplete', this is None.
589 status is not 'incomplete', this is None.
602 """
590 """
603 # Remember if the lines ends in a new line.
591 # Remember if the lines ends in a new line.
604 ends_with_newline = False
592 ends_with_newline = False
605 for character in reversed(cell):
593 for character in reversed(cell):
606 if character == '\n':
594 if character == '\n':
607 ends_with_newline = True
595 ends_with_newline = True
608 break
596 break
609 elif character.strip():
597 elif character.strip():
610 break
598 break
611 else:
599 else:
612 continue
600 continue
613
601
614 if ends_with_newline:
602 if not ends_with_newline:
615 # Append an newline for consistent tokenization
603 # Append an newline for consistent tokenization
616 # See https://bugs.python.org/issue33899
604 # See https://bugs.python.org/issue33899
617 cell += '\n'
605 cell += '\n'
618
606
619 lines = cell.splitlines(keepends=True)
607 lines = cell.splitlines(keepends=True)
620
608
621 if not lines:
609 if not lines:
622 return 'complete', None
610 return 'complete', None
623
611
624 if lines[-1].endswith('\\'):
612 if lines[-1].endswith('\\'):
625 # Explicit backslash continuation
613 # Explicit backslash continuation
626 return 'incomplete', find_last_indent(lines)
614 return 'incomplete', find_last_indent(lines)
627
615
628 try:
616 try:
629 for transform in self.cleanup_transforms:
617 for transform in self.cleanup_transforms:
630 lines = transform(lines)
618 lines = transform(lines)
631 except SyntaxError:
619 except SyntaxError:
632 return 'invalid', None
620 return 'invalid', None
633
621
634 if lines[0].startswith('%%'):
622 if lines[0].startswith('%%'):
635 # Special case for cell magics - completion marked by blank line
623 # Special case for cell magics - completion marked by blank line
636 if lines[-1].strip():
624 if lines[-1].strip():
637 return 'incomplete', find_last_indent(lines)
625 return 'incomplete', find_last_indent(lines)
638 else:
626 else:
639 return 'complete', None
627 return 'complete', None
640
628
641 try:
629 try:
642 for transform in self.line_transforms:
630 for transform in self.line_transforms:
643 lines = transform(lines)
631 lines = transform(lines)
644 lines = self.do_token_transforms(lines)
632 lines = self.do_token_transforms(lines)
645 except SyntaxError:
633 except SyntaxError:
646 return 'invalid', None
634 return 'invalid', None
647
635
648 tokens_by_line = make_tokens_by_line(lines)
636 tokens_by_line = make_tokens_by_line(lines)
649
637
650 if not tokens_by_line:
638 if not tokens_by_line:
651 return 'incomplete', find_last_indent(lines)
639 return 'incomplete', find_last_indent(lines)
652
640
653 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
641 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
654 # We're in a multiline string or expression
642 # We're in a multiline string or expression
655 return 'incomplete', find_last_indent(lines)
643 return 'incomplete', find_last_indent(lines)
656
644
657 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
645 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
658
646
659 # Remove newline_types for the list of tokens
647 # Pop the last line which only contains DEDENTs and ENDMARKER
660 while len(tokens_by_line) > 1 and len(tokens_by_line[-1]) == 1 \
648 last_token_line = None
661 and tokens_by_line[-1][-1].type in newline_types:
649 if {t.type for t in tokens_by_line[-1]} in [
662 tokens_by_line.pop()
650 {tokenize.DEDENT, tokenize.ENDMARKER},
651 {tokenize.ENDMARKER}
652 ] and len(tokens_by_line) > 1:
653 last_token_line = tokens_by_line.pop()
663
654
664 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
655 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
665 tokens_by_line[-1].pop()
656 tokens_by_line[-1].pop()
666
657
667 if len(tokens_by_line) == 1 and not tokens_by_line[-1]:
658 if len(tokens_by_line) == 1 and not tokens_by_line[-1]:
668 return 'incomplete', 0
659 return 'incomplete', 0
669
660
670 new_block = False
661 if tokens_by_line[-1][-1].string == ':':
671 for token in reversed(tokens_by_line[-1]):
672 if token.type == tokenize.DEDENT:
673 continue
674 elif token.string == ':':
675 new_block = True
676 break
677
678 if new_block:
679 # The last line starts a block (e.g. 'if foo:')
662 # The last line starts a block (e.g. 'if foo:')
680 ix = 0
663 ix = 0
681 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
664 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
682 ix += 1
665 ix += 1
683
666
684 indent = tokens_by_line[-1][ix].start[1]
667 indent = tokens_by_line[-1][ix].start[1]
685 return 'incomplete', indent + 4
668 return 'incomplete', indent + 4
686
669
687 if tokens_by_line[-1][0].line.endswith('\\'):
670 if tokens_by_line[-1][0].line.endswith('\\'):
688 return 'incomplete', None
671 return 'incomplete', None
689
672
690 # At this point, our checks think the code is complete (or invalid).
673 # At this point, our checks think the code is complete (or invalid).
691 # We'll use codeop.compile_command to check this with the real parser
674 # We'll use codeop.compile_command to check this with the real parser
692 try:
675 try:
693 with warnings.catch_warnings():
676 with warnings.catch_warnings():
694 warnings.simplefilter('error', SyntaxWarning)
677 warnings.simplefilter('error', SyntaxWarning)
695 res = compile_command(''.join(lines), symbol='exec')
678 res = compile_command(''.join(lines), symbol='exec')
696 except (SyntaxError, OverflowError, ValueError, TypeError,
679 except (SyntaxError, OverflowError, ValueError, TypeError,
697 MemoryError, SyntaxWarning):
680 MemoryError, SyntaxWarning):
698 return 'invalid', None
681 return 'invalid', None
699 else:
682 else:
700 if res is None:
683 if res is None:
701 return 'incomplete', find_last_indent(lines)
684 return 'incomplete', find_last_indent(lines)
702
685
703 if tokens_by_line[-1][-1].type == tokenize.DEDENT:
686 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
704 if ends_with_newline:
687 if ends_with_newline:
705 return 'complete', None
688 return 'complete', None
706 return 'incomplete', find_last_indent(lines)
689 return 'incomplete', find_last_indent(lines)
707
690
708 # If there's a blank line at the end, assume we're ready to execute
691 # If there's a blank line at the end, assume we're ready to execute
709 if not lines[-1].strip():
692 if not lines[-1].strip():
710 return 'complete', None
693 return 'complete', None
711
694
712 return 'complete', None
695 return 'complete', None
713
696
714
697
715 def find_last_indent(lines):
698 def find_last_indent(lines):
716 m = _indent_re.match(lines[-1])
699 m = _indent_re.match(lines[-1])
717 if not m:
700 if not m:
718 return 0
701 return 0
719 return len(m.group(0).replace('\t', ' '*4))
702 return len(m.group(0).replace('\t', ' '*4))
General Comments 0
You need to be logged in to leave comments. Login now