##// END OF EJS Templates
Bail on singleline cells with sunken brackets...
Blazej Michalik -
Show More
@@ -1,729 +1,748 b''
1 """Input transformer machinery to support IPython special syntax.
1 """Input transformer machinery to support IPython special syntax.
2
2
3 This includes the machinery to recognise and transform ``%magic`` commands,
3 This includes the machinery to recognise and transform ``%magic`` commands,
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
5
5
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7 deprecated in 7.0.
7 deprecated in 7.0.
8 """
8 """
9
9
10 # Copyright (c) IPython Development Team.
10 # Copyright (c) IPython Development Team.
11 # Distributed under the terms of the Modified BSD License.
11 # Distributed under the terms of the Modified BSD License.
12
12
13 from codeop import compile_command
13 from codeop import compile_command
14 import re
14 import re
15 import tokenize
15 import tokenize
16 from typing import List, Tuple, Optional, Any
16 from typing import List, Tuple, Optional, Any
17 import warnings
17 import warnings
18
18
19 _indent_re = re.compile(r'^[ \t]+')
19 _indent_re = re.compile(r'^[ \t]+')
20
20
21 def leading_empty_lines(lines):
21 def leading_empty_lines(lines):
22 """Remove leading empty lines
22 """Remove leading empty lines
23
23
24 If the leading lines are empty or contain only whitespace, they will be
24 If the leading lines are empty or contain only whitespace, they will be
25 removed.
25 removed.
26 """
26 """
27 if not lines:
27 if not lines:
28 return lines
28 return lines
29 for i, line in enumerate(lines):
29 for i, line in enumerate(lines):
30 if line and not line.isspace():
30 if line and not line.isspace():
31 return lines[i:]
31 return lines[i:]
32 return lines
32 return lines
33
33
34 def leading_indent(lines):
34 def leading_indent(lines):
35 """Remove leading indentation.
35 """Remove leading indentation.
36
36
37 If the first line starts with a spaces or tabs, the same whitespace will be
37 If the first line starts with a spaces or tabs, the same whitespace will be
38 removed from each following line in the cell.
38 removed from each following line in the cell.
39 """
39 """
40 if not lines:
40 if not lines:
41 return lines
41 return lines
42 m = _indent_re.match(lines[0])
42 m = _indent_re.match(lines[0])
43 if not m:
43 if not m:
44 return lines
44 return lines
45 space = m.group(0)
45 space = m.group(0)
46 n = len(space)
46 n = len(space)
47 return [l[n:] if l.startswith(space) else l
47 return [l[n:] if l.startswith(space) else l
48 for l in lines]
48 for l in lines]
49
49
50 class PromptStripper:
50 class PromptStripper:
51 """Remove matching input prompts from a block of input.
51 """Remove matching input prompts from a block of input.
52
52
53 Parameters
53 Parameters
54 ----------
54 ----------
55 prompt_re : regular expression
55 prompt_re : regular expression
56 A regular expression matching any input prompt (including continuation,
56 A regular expression matching any input prompt (including continuation,
57 e.g. ``...``)
57 e.g. ``...``)
58 initial_re : regular expression, optional
58 initial_re : regular expression, optional
59 A regular expression matching only the initial prompt, but not continuation.
59 A regular expression matching only the initial prompt, but not continuation.
60 If no initial expression is given, prompt_re will be used everywhere.
60 If no initial expression is given, prompt_re will be used everywhere.
61 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
61 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
62 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
62 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
63
63
64 Notes
64 Notes
65 -----
65 -----
66
66
67 If initial_re and prompt_re differ,
67 If initial_re and prompt_re differ,
68 only initial_re will be tested against the first line.
68 only initial_re will be tested against the first line.
69 If any prompt is found on the first two lines,
69 If any prompt is found on the first two lines,
70 prompts will be stripped from the rest of the block.
70 prompts will be stripped from the rest of the block.
71 """
71 """
72 def __init__(self, prompt_re, initial_re=None):
72 def __init__(self, prompt_re, initial_re=None):
73 self.prompt_re = prompt_re
73 self.prompt_re = prompt_re
74 self.initial_re = initial_re or prompt_re
74 self.initial_re = initial_re or prompt_re
75
75
76 def _strip(self, lines):
76 def _strip(self, lines):
77 return [self.prompt_re.sub('', l, count=1) for l in lines]
77 return [self.prompt_re.sub('', l, count=1) for l in lines]
78
78
79 def __call__(self, lines):
79 def __call__(self, lines):
80 if not lines:
80 if not lines:
81 return lines
81 return lines
82 if self.initial_re.match(lines[0]) or \
82 if self.initial_re.match(lines[0]) or \
83 (len(lines) > 1 and self.prompt_re.match(lines[1])):
83 (len(lines) > 1 and self.prompt_re.match(lines[1])):
84 return self._strip(lines)
84 return self._strip(lines)
85 return lines
85 return lines
86
86
87 classic_prompt = PromptStripper(
87 classic_prompt = PromptStripper(
88 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
88 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
89 initial_re=re.compile(r'^>>>( |$)')
89 initial_re=re.compile(r'^>>>( |$)')
90 )
90 )
91
91
92 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
92 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
93
93
94 def cell_magic(lines):
94 def cell_magic(lines):
95 if not lines or not lines[0].startswith('%%'):
95 if not lines or not lines[0].startswith('%%'):
96 return lines
96 return lines
97 if re.match(r'%%\w+\?', lines[0]):
97 if re.match(r'%%\w+\?', lines[0]):
98 # This case will be handled by help_end
98 # This case will be handled by help_end
99 return lines
99 return lines
100 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
100 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
101 body = ''.join(lines[1:])
101 body = ''.join(lines[1:])
102 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
102 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
103 % (magic_name, first_line, body)]
103 % (magic_name, first_line, body)]
104
104
105
105
106 def _find_assign_op(token_line) -> Optional[int]:
106 def _find_assign_op(token_line) -> Optional[int]:
107 """Get the index of the first assignment in the line ('=' not inside brackets)
107 """Get the index of the first assignment in the line ('=' not inside brackets)
108
108
109 Note: We don't try to support multiple special assignment (a = b = %foo)
109 Note: We don't try to support multiple special assignment (a = b = %foo)
110 """
110 """
111 paren_level = 0
111 paren_level = 0
112 for i, ti in enumerate(token_line):
112 for i, ti in enumerate(token_line):
113 s = ti.string
113 s = ti.string
114 if s == '=' and paren_level == 0:
114 if s == '=' and paren_level == 0:
115 return i
115 return i
116 if s in {'(','[','{'}:
116 if s in {'(','[','{'}:
117 paren_level += 1
117 paren_level += 1
118 elif s in {')', ']', '}'}:
118 elif s in {')', ']', '}'}:
119 if paren_level > 0:
119 if paren_level > 0:
120 paren_level -= 1
120 paren_level -= 1
121 return None
121 return None
122
122
123 def find_end_of_continued_line(lines, start_line: int):
123 def find_end_of_continued_line(lines, start_line: int):
124 """Find the last line of a line explicitly extended using backslashes.
124 """Find the last line of a line explicitly extended using backslashes.
125
125
126 Uses 0-indexed line numbers.
126 Uses 0-indexed line numbers.
127 """
127 """
128 end_line = start_line
128 end_line = start_line
129 while lines[end_line].endswith('\\\n'):
129 while lines[end_line].endswith('\\\n'):
130 end_line += 1
130 end_line += 1
131 if end_line >= len(lines):
131 if end_line >= len(lines):
132 break
132 break
133 return end_line
133 return end_line
134
134
135 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
135 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
136 r"""Assemble a single line from multiple continued line pieces
136 r"""Assemble a single line from multiple continued line pieces
137
137
138 Continued lines are lines ending in ``\``, and the line following the last
138 Continued lines are lines ending in ``\``, and the line following the last
139 ``\`` in the block.
139 ``\`` in the block.
140
140
141 For example, this code continues over multiple lines::
141 For example, this code continues over multiple lines::
142
142
143 if (assign_ix is not None) \
143 if (assign_ix is not None) \
144 and (len(line) >= assign_ix + 2) \
144 and (len(line) >= assign_ix + 2) \
145 and (line[assign_ix+1].string == '%') \
145 and (line[assign_ix+1].string == '%') \
146 and (line[assign_ix+2].type == tokenize.NAME):
146 and (line[assign_ix+2].type == tokenize.NAME):
147
147
148 This statement contains four continued line pieces.
148 This statement contains four continued line pieces.
149 Assembling these pieces into a single line would give::
149 Assembling these pieces into a single line would give::
150
150
151 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
151 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
152
152
153 This uses 0-indexed line numbers. *start* is (lineno, colno).
153 This uses 0-indexed line numbers. *start* is (lineno, colno).
154
154
155 Used to allow ``%magic`` and ``!system`` commands to be continued over
155 Used to allow ``%magic`` and ``!system`` commands to be continued over
156 multiple lines.
156 multiple lines.
157 """
157 """
158 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
158 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
159 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
159 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
160 + [parts[-1].rstrip()]) # Strip newline from last line
160 + [parts[-1].rstrip()]) # Strip newline from last line
161
161
162 class TokenTransformBase:
162 class TokenTransformBase:
163 """Base class for transformations which examine tokens.
163 """Base class for transformations which examine tokens.
164
164
165 Special syntax should not be transformed when it occurs inside strings or
165 Special syntax should not be transformed when it occurs inside strings or
166 comments. This is hard to reliably avoid with regexes. The solution is to
166 comments. This is hard to reliably avoid with regexes. The solution is to
167 tokenise the code as Python, and recognise the special syntax in the tokens.
167 tokenise the code as Python, and recognise the special syntax in the tokens.
168
168
169 IPython's special syntax is not valid Python syntax, so tokenising may go
169 IPython's special syntax is not valid Python syntax, so tokenising may go
170 wrong after the special syntax starts. These classes therefore find and
170 wrong after the special syntax starts. These classes therefore find and
171 transform *one* instance of special syntax at a time into regular Python
171 transform *one* instance of special syntax at a time into regular Python
172 syntax. After each transformation, tokens are regenerated to find the next
172 syntax. After each transformation, tokens are regenerated to find the next
173 piece of special syntax.
173 piece of special syntax.
174
174
175 Subclasses need to implement one class method (find)
175 Subclasses need to implement one class method (find)
176 and one regular method (transform).
176 and one regular method (transform).
177
177
178 The priority attribute can select which transformation to apply if multiple
178 The priority attribute can select which transformation to apply if multiple
179 transformers match in the same place. Lower numbers have higher priority.
179 transformers match in the same place. Lower numbers have higher priority.
180 This allows "%magic?" to be turned into a help call rather than a magic call.
180 This allows "%magic?" to be turned into a help call rather than a magic call.
181 """
181 """
182 # Lower numbers -> higher priority (for matches in the same location)
182 # Lower numbers -> higher priority (for matches in the same location)
183 priority = 10
183 priority = 10
184
184
185 def sortby(self):
185 def sortby(self):
186 return self.start_line, self.start_col, self.priority
186 return self.start_line, self.start_col, self.priority
187
187
188 def __init__(self, start):
188 def __init__(self, start):
189 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
189 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
190 self.start_col = start[1]
190 self.start_col = start[1]
191
191
192 @classmethod
192 @classmethod
193 def find(cls, tokens_by_line):
193 def find(cls, tokens_by_line):
194 """Find one instance of special syntax in the provided tokens.
194 """Find one instance of special syntax in the provided tokens.
195
195
196 Tokens are grouped into logical lines for convenience,
196 Tokens are grouped into logical lines for convenience,
197 so it is easy to e.g. look at the first token of each line.
197 so it is easy to e.g. look at the first token of each line.
198 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
198 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
199
199
200 This should return an instance of its class, pointing to the start
200 This should return an instance of its class, pointing to the start
201 position it has found, or None if it found no match.
201 position it has found, or None if it found no match.
202 """
202 """
203 raise NotImplementedError
203 raise NotImplementedError
204
204
205 def transform(self, lines: List[str]):
205 def transform(self, lines: List[str]):
206 """Transform one instance of special syntax found by ``find()``
206 """Transform one instance of special syntax found by ``find()``
207
207
208 Takes a list of strings representing physical lines,
208 Takes a list of strings representing physical lines,
209 returns a similar list of transformed lines.
209 returns a similar list of transformed lines.
210 """
210 """
211 raise NotImplementedError
211 raise NotImplementedError
212
212
213 class MagicAssign(TokenTransformBase):
213 class MagicAssign(TokenTransformBase):
214 """Transformer for assignments from magics (a = %foo)"""
214 """Transformer for assignments from magics (a = %foo)"""
215 @classmethod
215 @classmethod
216 def find(cls, tokens_by_line):
216 def find(cls, tokens_by_line):
217 """Find the first magic assignment (a = %foo) in the cell.
217 """Find the first magic assignment (a = %foo) in the cell.
218 """
218 """
219 for line in tokens_by_line:
219 for line in tokens_by_line:
220 assign_ix = _find_assign_op(line)
220 assign_ix = _find_assign_op(line)
221 if (assign_ix is not None) \
221 if (assign_ix is not None) \
222 and (len(line) >= assign_ix + 2) \
222 and (len(line) >= assign_ix + 2) \
223 and (line[assign_ix+1].string == '%') \
223 and (line[assign_ix+1].string == '%') \
224 and (line[assign_ix+2].type == tokenize.NAME):
224 and (line[assign_ix+2].type == tokenize.NAME):
225 return cls(line[assign_ix+1].start)
225 return cls(line[assign_ix+1].start)
226
226
227 def transform(self, lines: List[str]):
227 def transform(self, lines: List[str]):
228 """Transform a magic assignment found by the ``find()`` classmethod.
228 """Transform a magic assignment found by the ``find()`` classmethod.
229 """
229 """
230 start_line, start_col = self.start_line, self.start_col
230 start_line, start_col = self.start_line, self.start_col
231 lhs = lines[start_line][:start_col]
231 lhs = lines[start_line][:start_col]
232 end_line = find_end_of_continued_line(lines, start_line)
232 end_line = find_end_of_continued_line(lines, start_line)
233 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
233 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
234 assert rhs.startswith('%'), rhs
234 assert rhs.startswith('%'), rhs
235 magic_name, _, args = rhs[1:].partition(' ')
235 magic_name, _, args = rhs[1:].partition(' ')
236
236
237 lines_before = lines[:start_line]
237 lines_before = lines[:start_line]
238 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
238 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
239 new_line = lhs + call + '\n'
239 new_line = lhs + call + '\n'
240 lines_after = lines[end_line+1:]
240 lines_after = lines[end_line+1:]
241
241
242 return lines_before + [new_line] + lines_after
242 return lines_before + [new_line] + lines_after
243
243
244
244
245 class SystemAssign(TokenTransformBase):
245 class SystemAssign(TokenTransformBase):
246 """Transformer for assignments from system commands (a = !foo)"""
246 """Transformer for assignments from system commands (a = !foo)"""
247 @classmethod
247 @classmethod
248 def find(cls, tokens_by_line):
248 def find(cls, tokens_by_line):
249 """Find the first system assignment (a = !foo) in the cell.
249 """Find the first system assignment (a = !foo) in the cell.
250 """
250 """
251 for line in tokens_by_line:
251 for line in tokens_by_line:
252 assign_ix = _find_assign_op(line)
252 assign_ix = _find_assign_op(line)
253 if (assign_ix is not None) \
253 if (assign_ix is not None) \
254 and not line[assign_ix].line.strip().startswith('=') \
254 and not line[assign_ix].line.strip().startswith('=') \
255 and (len(line) >= assign_ix + 2) \
255 and (len(line) >= assign_ix + 2) \
256 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
256 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
257 ix = assign_ix + 1
257 ix = assign_ix + 1
258
258
259 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
259 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
260 if line[ix].string == '!':
260 if line[ix].string == '!':
261 return cls(line[ix].start)
261 return cls(line[ix].start)
262 elif not line[ix].string.isspace():
262 elif not line[ix].string.isspace():
263 break
263 break
264 ix += 1
264 ix += 1
265
265
266 def transform(self, lines: List[str]):
266 def transform(self, lines: List[str]):
267 """Transform a system assignment found by the ``find()`` classmethod.
267 """Transform a system assignment found by the ``find()`` classmethod.
268 """
268 """
269 start_line, start_col = self.start_line, self.start_col
269 start_line, start_col = self.start_line, self.start_col
270
270
271 lhs = lines[start_line][:start_col]
271 lhs = lines[start_line][:start_col]
272 end_line = find_end_of_continued_line(lines, start_line)
272 end_line = find_end_of_continued_line(lines, start_line)
273 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
273 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
274 assert rhs.startswith('!'), rhs
274 assert rhs.startswith('!'), rhs
275 cmd = rhs[1:]
275 cmd = rhs[1:]
276
276
277 lines_before = lines[:start_line]
277 lines_before = lines[:start_line]
278 call = "get_ipython().getoutput({!r})".format(cmd)
278 call = "get_ipython().getoutput({!r})".format(cmd)
279 new_line = lhs + call + '\n'
279 new_line = lhs + call + '\n'
280 lines_after = lines[end_line + 1:]
280 lines_after = lines[end_line + 1:]
281
281
282 return lines_before + [new_line] + lines_after
282 return lines_before + [new_line] + lines_after
283
283
284 # The escape sequences that define the syntax transformations IPython will
284 # The escape sequences that define the syntax transformations IPython will
285 # apply to user input. These can NOT be just changed here: many regular
285 # apply to user input. These can NOT be just changed here: many regular
286 # expressions and other parts of the code may use their hardcoded values, and
286 # expressions and other parts of the code may use their hardcoded values, and
287 # for all intents and purposes they constitute the 'IPython syntax', so they
287 # for all intents and purposes they constitute the 'IPython syntax', so they
288 # should be considered fixed.
288 # should be considered fixed.
289
289
290 ESC_SHELL = '!' # Send line to underlying system shell
290 ESC_SHELL = '!' # Send line to underlying system shell
291 ESC_SH_CAP = '!!' # Send line to system shell and capture output
291 ESC_SH_CAP = '!!' # Send line to system shell and capture output
292 ESC_HELP = '?' # Find information about object
292 ESC_HELP = '?' # Find information about object
293 ESC_HELP2 = '??' # Find extra-detailed information about object
293 ESC_HELP2 = '??' # Find extra-detailed information about object
294 ESC_MAGIC = '%' # Call magic function
294 ESC_MAGIC = '%' # Call magic function
295 ESC_MAGIC2 = '%%' # Call cell-magic function
295 ESC_MAGIC2 = '%%' # Call cell-magic function
296 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
296 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
297 ESC_QUOTE2 = ';' # Quote all args as a single string, call
297 ESC_QUOTE2 = ';' # Quote all args as a single string, call
298 ESC_PAREN = '/' # Call first argument with rest of line as arguments
298 ESC_PAREN = '/' # Call first argument with rest of line as arguments
299
299
300 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
300 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
301 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
301 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
302
302
303 def _make_help_call(target, esc, next_input=None):
303 def _make_help_call(target, esc, next_input=None):
304 """Prepares a pinfo(2)/psearch call from a target name and the escape
304 """Prepares a pinfo(2)/psearch call from a target name and the escape
305 (i.e. ? or ??)"""
305 (i.e. ? or ??)"""
306 method = 'pinfo2' if esc == '??' \
306 method = 'pinfo2' if esc == '??' \
307 else 'psearch' if '*' in target \
307 else 'psearch' if '*' in target \
308 else 'pinfo'
308 else 'pinfo'
309 arg = " ".join([method, target])
309 arg = " ".join([method, target])
310 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
310 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
311 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
311 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
312 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
312 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
313 if next_input is None:
313 if next_input is None:
314 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
314 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
315 else:
315 else:
316 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
316 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
317 (next_input, t_magic_name, t_magic_arg_s)
317 (next_input, t_magic_name, t_magic_arg_s)
318
318
319 def _tr_help(content):
319 def _tr_help(content):
320 """Translate lines escaped with: ?
320 """Translate lines escaped with: ?
321
321
322 A naked help line should fire the intro help screen (shell.show_usage())
322 A naked help line should fire the intro help screen (shell.show_usage())
323 """
323 """
324 if not content:
324 if not content:
325 return 'get_ipython().show_usage()'
325 return 'get_ipython().show_usage()'
326
326
327 return _make_help_call(content, '?')
327 return _make_help_call(content, '?')
328
328
329 def _tr_help2(content):
329 def _tr_help2(content):
330 """Translate lines escaped with: ??
330 """Translate lines escaped with: ??
331
331
332 A naked help line should fire the intro help screen (shell.show_usage())
332 A naked help line should fire the intro help screen (shell.show_usage())
333 """
333 """
334 if not content:
334 if not content:
335 return 'get_ipython().show_usage()'
335 return 'get_ipython().show_usage()'
336
336
337 return _make_help_call(content, '??')
337 return _make_help_call(content, '??')
338
338
339 def _tr_magic(content):
339 def _tr_magic(content):
340 "Translate lines escaped with a percent sign: %"
340 "Translate lines escaped with a percent sign: %"
341 name, _, args = content.partition(' ')
341 name, _, args = content.partition(' ')
342 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
342 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
343
343
344 def _tr_quote(content):
344 def _tr_quote(content):
345 "Translate lines escaped with a comma: ,"
345 "Translate lines escaped with a comma: ,"
346 name, _, args = content.partition(' ')
346 name, _, args = content.partition(' ')
347 return '%s("%s")' % (name, '", "'.join(args.split()) )
347 return '%s("%s")' % (name, '", "'.join(args.split()) )
348
348
349 def _tr_quote2(content):
349 def _tr_quote2(content):
350 "Translate lines escaped with a semicolon: ;"
350 "Translate lines escaped with a semicolon: ;"
351 name, _, args = content.partition(' ')
351 name, _, args = content.partition(' ')
352 return '%s("%s")' % (name, args)
352 return '%s("%s")' % (name, args)
353
353
354 def _tr_paren(content):
354 def _tr_paren(content):
355 "Translate lines escaped with a slash: /"
355 "Translate lines escaped with a slash: /"
356 name, _, args = content.partition(' ')
356 name, _, args = content.partition(' ')
357 return '%s(%s)' % (name, ", ".join(args.split()))
357 return '%s(%s)' % (name, ", ".join(args.split()))
358
358
359 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
359 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
360 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
360 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
361 ESC_HELP : _tr_help,
361 ESC_HELP : _tr_help,
362 ESC_HELP2 : _tr_help2,
362 ESC_HELP2 : _tr_help2,
363 ESC_MAGIC : _tr_magic,
363 ESC_MAGIC : _tr_magic,
364 ESC_QUOTE : _tr_quote,
364 ESC_QUOTE : _tr_quote,
365 ESC_QUOTE2 : _tr_quote2,
365 ESC_QUOTE2 : _tr_quote2,
366 ESC_PAREN : _tr_paren }
366 ESC_PAREN : _tr_paren }
367
367
368 class EscapedCommand(TokenTransformBase):
368 class EscapedCommand(TokenTransformBase):
369 """Transformer for escaped commands like %foo, !foo, or /foo"""
369 """Transformer for escaped commands like %foo, !foo, or /foo"""
370 @classmethod
370 @classmethod
371 def find(cls, tokens_by_line):
371 def find(cls, tokens_by_line):
372 """Find the first escaped command (%foo, !foo, etc.) in the cell.
372 """Find the first escaped command (%foo, !foo, etc.) in the cell.
373 """
373 """
374 for line in tokens_by_line:
374 for line in tokens_by_line:
375 if not line:
375 if not line:
376 continue
376 continue
377 ix = 0
377 ix = 0
378 ll = len(line)
378 ll = len(line)
379 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
379 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
380 ix += 1
380 ix += 1
381 if ix >= ll:
381 if ix >= ll:
382 continue
382 continue
383 if line[ix].string in ESCAPE_SINGLES:
383 if line[ix].string in ESCAPE_SINGLES:
384 return cls(line[ix].start)
384 return cls(line[ix].start)
385
385
386 def transform(self, lines):
386 def transform(self, lines):
387 """Transform an escaped line found by the ``find()`` classmethod.
387 """Transform an escaped line found by the ``find()`` classmethod.
388 """
388 """
389 start_line, start_col = self.start_line, self.start_col
389 start_line, start_col = self.start_line, self.start_col
390
390
391 indent = lines[start_line][:start_col]
391 indent = lines[start_line][:start_col]
392 end_line = find_end_of_continued_line(lines, start_line)
392 end_line = find_end_of_continued_line(lines, start_line)
393 line = assemble_continued_line(lines, (start_line, start_col), end_line)
393 line = assemble_continued_line(lines, (start_line, start_col), end_line)
394
394
395 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
395 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
396 escape, content = line[:2], line[2:]
396 escape, content = line[:2], line[2:]
397 else:
397 else:
398 escape, content = line[:1], line[1:]
398 escape, content = line[:1], line[1:]
399
399
400 if escape in tr:
400 if escape in tr:
401 call = tr[escape](content)
401 call = tr[escape](content)
402 else:
402 else:
403 call = ''
403 call = ''
404
404
405 lines_before = lines[:start_line]
405 lines_before = lines[:start_line]
406 new_line = indent + call + '\n'
406 new_line = indent + call + '\n'
407 lines_after = lines[end_line + 1:]
407 lines_after = lines[end_line + 1:]
408
408
409 return lines_before + [new_line] + lines_after
409 return lines_before + [new_line] + lines_after
410
410
411 _help_end_re = re.compile(r"""(%{0,2}
411 _help_end_re = re.compile(r"""(%{0,2}
412 (?!\d)[\w*]+ # Variable name
412 (?!\d)[\w*]+ # Variable name
413 (\.(?!\d)[\w*]+)* # .etc.etc
413 (\.(?!\d)[\w*]+)* # .etc.etc
414 )
414 )
415 (\?\??)$ # ? or ??
415 (\?\??)$ # ? or ??
416 """,
416 """,
417 re.VERBOSE)
417 re.VERBOSE)
418
418
419 class HelpEnd(TokenTransformBase):
419 class HelpEnd(TokenTransformBase):
420 """Transformer for help syntax: obj? and obj??"""
420 """Transformer for help syntax: obj? and obj??"""
421 # This needs to be higher priority (lower number) than EscapedCommand so
421 # This needs to be higher priority (lower number) than EscapedCommand so
422 # that inspecting magics (%foo?) works.
422 # that inspecting magics (%foo?) works.
423 priority = 5
423 priority = 5
424
424
425 def __init__(self, start, q_locn):
425 def __init__(self, start, q_locn):
426 super().__init__(start)
426 super().__init__(start)
427 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
427 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
428 self.q_col = q_locn[1]
428 self.q_col = q_locn[1]
429
429
430 @classmethod
430 @classmethod
431 def find(cls, tokens_by_line):
431 def find(cls, tokens_by_line):
432 """Find the first help command (foo?) in the cell.
432 """Find the first help command (foo?) in the cell.
433 """
433 """
434 for line in tokens_by_line:
434 for line in tokens_by_line:
435 # Last token is NEWLINE; look at last but one
435 # Last token is NEWLINE; look at last but one
436 if len(line) > 2 and line[-2].string == '?':
436 if len(line) > 2 and line[-2].string == '?':
437 # Find the first token that's not INDENT/DEDENT
437 # Find the first token that's not INDENT/DEDENT
438 ix = 0
438 ix = 0
439 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
439 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
440 ix += 1
440 ix += 1
441 return cls(line[ix].start, line[-2].start)
441 return cls(line[ix].start, line[-2].start)
442
442
443 def transform(self, lines):
443 def transform(self, lines):
444 """Transform a help command found by the ``find()`` classmethod.
444 """Transform a help command found by the ``find()`` classmethod.
445 """
445 """
446 piece = ''.join(lines[self.start_line:self.q_line+1])
446 piece = ''.join(lines[self.start_line:self.q_line+1])
447 indent, content = piece[:self.start_col], piece[self.start_col:]
447 indent, content = piece[:self.start_col], piece[self.start_col:]
448 lines_before = lines[:self.start_line]
448 lines_before = lines[:self.start_line]
449 lines_after = lines[self.q_line + 1:]
449 lines_after = lines[self.q_line + 1:]
450
450
451 m = _help_end_re.search(content)
451 m = _help_end_re.search(content)
452 if not m:
452 if not m:
453 raise SyntaxError(content)
453 raise SyntaxError(content)
454 assert m is not None, content
454 assert m is not None, content
455 target = m.group(1)
455 target = m.group(1)
456 esc = m.group(3)
456 esc = m.group(3)
457
457
458 # If we're mid-command, put it back on the next prompt for the user.
458 # If we're mid-command, put it back on the next prompt for the user.
459 next_input = None
459 next_input = None
460 if (not lines_before) and (not lines_after) \
460 if (not lines_before) and (not lines_after) \
461 and content.strip() != m.group(0):
461 and content.strip() != m.group(0):
462 next_input = content.rstrip('?\n')
462 next_input = content.rstrip('?\n')
463
463
464 call = _make_help_call(target, esc, next_input=next_input)
464 call = _make_help_call(target, esc, next_input=next_input)
465 new_line = indent + call + '\n'
465 new_line = indent + call + '\n'
466
466
467 return lines_before + [new_line] + lines_after
467 return lines_before + [new_line] + lines_after
468
468
469 def make_tokens_by_line(lines:List[str]):
469 def make_tokens_by_line(lines:List[str]):
470 """Tokenize a series of lines and group tokens by line.
470 """Tokenize a series of lines and group tokens by line.
471
471
472 The tokens for a multiline Python string or expression are grouped as one
472 The tokens for a multiline Python string or expression are grouped as one
473 line. All lines except the last lines should keep their line ending ('\\n',
473 line. All lines except the last lines should keep their line ending ('\\n',
474 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
474 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
475 for example when passing block of text to this function.
475 for example when passing block of text to this function.
476
476
477 """
477 """
478 # NL tokens are used inside multiline expressions, but also after blank
478 # NL tokens are used inside multiline expressions, but also after blank
479 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
479 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
480 # We want to group the former case together but split the latter, so we
480 # We want to group the former case together but split the latter, so we
481 # track parentheses level, similar to the internals of tokenize.
481 # track parentheses level, similar to the internals of tokenize.
482
482
483 # reexported from token on 3.7+
483 # reexported from token on 3.7+
484 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
484 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
485 tokens_by_line:List[List[Any]] = [[]]
485 tokens_by_line:List[List[Any]] = [[]]
486 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
486 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
487 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
487 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
488 parenlev = 0
488 parenlev = 0
489 try:
489 try:
490 for token in tokenize.generate_tokens(iter(lines).__next__):
490 for token in tokenize.generate_tokens(iter(lines).__next__):
491 tokens_by_line[-1].append(token)
491 tokens_by_line[-1].append(token)
492 if (token.type == NEWLINE) \
492 if (token.type == NEWLINE) \
493 or ((token.type == NL) and (parenlev <= 0)):
493 or ((token.type == NL) and (parenlev <= 0)):
494 tokens_by_line.append([])
494 tokens_by_line.append([])
495 elif token.string in {'(', '[', '{'}:
495 elif token.string in {'(', '[', '{'}:
496 parenlev += 1
496 parenlev += 1
497 elif token.string in {')', ']', '}'}:
497 elif token.string in {')', ']', '}'}:
498 if parenlev > 0:
498 if parenlev > 0:
499 parenlev -= 1
499 parenlev -= 1
500 except tokenize.TokenError:
500 except tokenize.TokenError:
501 # Input ended in a multiline string or expression. That's OK for us.
501 # Input ended in a multiline string or expression. That's OK for us.
502 pass
502 pass
503
503
504
504
505 if not tokens_by_line[-1]:
505 if not tokens_by_line[-1]:
506 tokens_by_line.pop()
506 tokens_by_line.pop()
507
507
508
508
509 return tokens_by_line
509 return tokens_by_line
510
510
511
512 def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
513 """Check if the depth of brackets in the list of tokens drops below 0"""
514 parenlev = 0
515 for token in tokens:
516 if token.string in {"(", "[", "{"}:
517 parenlev += 1
518 elif token.string in {")", "]", "}"}:
519 parenlev -= 1
520 if parenlev < 0:
521 return True
522 return False
523
524
511 def show_linewise_tokens(s: str):
525 def show_linewise_tokens(s: str):
512 """For investigation and debugging"""
526 """For investigation and debugging"""
513 if not s.endswith('\n'):
527 if not s.endswith('\n'):
514 s += '\n'
528 s += '\n'
515 lines = s.splitlines(keepends=True)
529 lines = s.splitlines(keepends=True)
516 for line in make_tokens_by_line(lines):
530 for line in make_tokens_by_line(lines):
517 print("Line -------")
531 print("Line -------")
518 for tokinfo in line:
532 for tokinfo in line:
519 print(" ", tokinfo)
533 print(" ", tokinfo)
520
534
521 # Arbitrary limit to prevent getting stuck in infinite loops
535 # Arbitrary limit to prevent getting stuck in infinite loops
522 TRANSFORM_LOOP_LIMIT = 500
536 TRANSFORM_LOOP_LIMIT = 500
523
537
524 class TransformerManager:
538 class TransformerManager:
525 """Applies various transformations to a cell or code block.
539 """Applies various transformations to a cell or code block.
526
540
527 The key methods for external use are ``transform_cell()``
541 The key methods for external use are ``transform_cell()``
528 and ``check_complete()``.
542 and ``check_complete()``.
529 """
543 """
530 def __init__(self):
544 def __init__(self):
531 self.cleanup_transforms = [
545 self.cleanup_transforms = [
532 leading_empty_lines,
546 leading_empty_lines,
533 leading_indent,
547 leading_indent,
534 classic_prompt,
548 classic_prompt,
535 ipython_prompt,
549 ipython_prompt,
536 ]
550 ]
537 self.line_transforms = [
551 self.line_transforms = [
538 cell_magic,
552 cell_magic,
539 ]
553 ]
540 self.token_transformers = [
554 self.token_transformers = [
541 MagicAssign,
555 MagicAssign,
542 SystemAssign,
556 SystemAssign,
543 EscapedCommand,
557 EscapedCommand,
544 HelpEnd,
558 HelpEnd,
545 ]
559 ]
546
560
547 def do_one_token_transform(self, lines):
561 def do_one_token_transform(self, lines):
548 """Find and run the transform earliest in the code.
562 """Find and run the transform earliest in the code.
549
563
550 Returns (changed, lines).
564 Returns (changed, lines).
551
565
552 This method is called repeatedly until changed is False, indicating
566 This method is called repeatedly until changed is False, indicating
553 that all available transformations are complete.
567 that all available transformations are complete.
554
568
555 The tokens following IPython special syntax might not be valid, so
569 The tokens following IPython special syntax might not be valid, so
556 the transformed code is retokenised every time to identify the next
570 the transformed code is retokenised every time to identify the next
557 piece of special syntax. Hopefully long code cells are mostly valid
571 piece of special syntax. Hopefully long code cells are mostly valid
558 Python, not using lots of IPython special syntax, so this shouldn't be
572 Python, not using lots of IPython special syntax, so this shouldn't be
559 a performance issue.
573 a performance issue.
560 """
574 """
561 tokens_by_line = make_tokens_by_line(lines)
575 tokens_by_line = make_tokens_by_line(lines)
562 candidates = []
576 candidates = []
563 for transformer_cls in self.token_transformers:
577 for transformer_cls in self.token_transformers:
564 transformer = transformer_cls.find(tokens_by_line)
578 transformer = transformer_cls.find(tokens_by_line)
565 if transformer:
579 if transformer:
566 candidates.append(transformer)
580 candidates.append(transformer)
567
581
568 if not candidates:
582 if not candidates:
569 # Nothing to transform
583 # Nothing to transform
570 return False, lines
584 return False, lines
571 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
585 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
572 for transformer in ordered_transformers:
586 for transformer in ordered_transformers:
573 try:
587 try:
574 return True, transformer.transform(lines)
588 return True, transformer.transform(lines)
575 except SyntaxError:
589 except SyntaxError:
576 pass
590 pass
577 return False, lines
591 return False, lines
578
592
579 def do_token_transforms(self, lines):
593 def do_token_transforms(self, lines):
580 for _ in range(TRANSFORM_LOOP_LIMIT):
594 for _ in range(TRANSFORM_LOOP_LIMIT):
581 changed, lines = self.do_one_token_transform(lines)
595 changed, lines = self.do_one_token_transform(lines)
582 if not changed:
596 if not changed:
583 return lines
597 return lines
584
598
585 raise RuntimeError("Input transformation still changing after "
599 raise RuntimeError("Input transformation still changing after "
586 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
600 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
587
601
588 def transform_cell(self, cell: str) -> str:
602 def transform_cell(self, cell: str) -> str:
589 """Transforms a cell of input code"""
603 """Transforms a cell of input code"""
590 if not cell.endswith('\n'):
604 if not cell.endswith('\n'):
591 cell += '\n' # Ensure the cell has a trailing newline
605 cell += '\n' # Ensure the cell has a trailing newline
592 lines = cell.splitlines(keepends=True)
606 lines = cell.splitlines(keepends=True)
593 for transform in self.cleanup_transforms + self.line_transforms:
607 for transform in self.cleanup_transforms + self.line_transforms:
594 lines = transform(lines)
608 lines = transform(lines)
595
609
596 lines = self.do_token_transforms(lines)
610 lines = self.do_token_transforms(lines)
597 return ''.join(lines)
611 return ''.join(lines)
598
612
599 def check_complete(self, cell: str):
613 def check_complete(self, cell: str):
600 """Return whether a block of code is ready to execute, or should be continued
614 """Return whether a block of code is ready to execute, or should be continued
601
615
602 Parameters
616 Parameters
603 ----------
617 ----------
604 source : string
618 source : string
605 Python input code, which can be multiline.
619 Python input code, which can be multiline.
606
620
607 Returns
621 Returns
608 -------
622 -------
609 status : str
623 status : str
610 One of 'complete', 'incomplete', or 'invalid' if source is not a
624 One of 'complete', 'incomplete', or 'invalid' if source is not a
611 prefix of valid code.
625 prefix of valid code.
612 indent_spaces : int or None
626 indent_spaces : int or None
613 The number of spaces by which to indent the next line of code. If
627 The number of spaces by which to indent the next line of code. If
614 status is not 'incomplete', this is None.
628 status is not 'incomplete', this is None.
615 """
629 """
616 # Remember if the lines ends in a new line.
630 # Remember if the lines ends in a new line.
617 ends_with_newline = False
631 ends_with_newline = False
618 for character in reversed(cell):
632 for character in reversed(cell):
619 if character == '\n':
633 if character == '\n':
620 ends_with_newline = True
634 ends_with_newline = True
621 break
635 break
622 elif character.strip():
636 elif character.strip():
623 break
637 break
624 else:
638 else:
625 continue
639 continue
626
640
627 if not ends_with_newline:
641 if not ends_with_newline:
628 # Append an newline for consistent tokenization
642 # Append an newline for consistent tokenization
629 # See https://bugs.python.org/issue33899
643 # See https://bugs.python.org/issue33899
630 cell += '\n'
644 cell += '\n'
631
645
632 lines = cell.splitlines(keepends=True)
646 lines = cell.splitlines(keepends=True)
633
647
634 if not lines:
648 if not lines:
635 return 'complete', None
649 return 'complete', None
636
650
637 if lines[-1].endswith('\\'):
651 if lines[-1].endswith('\\'):
638 # Explicit backslash continuation
652 # Explicit backslash continuation
639 return 'incomplete', find_last_indent(lines)
653 return 'incomplete', find_last_indent(lines)
640
654
641 try:
655 try:
642 for transform in self.cleanup_transforms:
656 for transform in self.cleanup_transforms:
643 if not getattr(transform, 'has_side_effects', False):
657 if not getattr(transform, 'has_side_effects', False):
644 lines = transform(lines)
658 lines = transform(lines)
645 except SyntaxError:
659 except SyntaxError:
646 return 'invalid', None
660 return 'invalid', None
647
661
648 if lines[0].startswith('%%'):
662 if lines[0].startswith('%%'):
649 # Special case for cell magics - completion marked by blank line
663 # Special case for cell magics - completion marked by blank line
650 if lines[-1].strip():
664 if lines[-1].strip():
651 return 'incomplete', find_last_indent(lines)
665 return 'incomplete', find_last_indent(lines)
652 else:
666 else:
653 return 'complete', None
667 return 'complete', None
654
668
655 try:
669 try:
656 for transform in self.line_transforms:
670 for transform in self.line_transforms:
657 if not getattr(transform, 'has_side_effects', False):
671 if not getattr(transform, 'has_side_effects', False):
658 lines = transform(lines)
672 lines = transform(lines)
659 lines = self.do_token_transforms(lines)
673 lines = self.do_token_transforms(lines)
660 except SyntaxError:
674 except SyntaxError:
661 return 'invalid', None
675 return 'invalid', None
662
676
663 tokens_by_line = make_tokens_by_line(lines)
677 tokens_by_line = make_tokens_by_line(lines)
664
678
679 # Bail if we got one line and there are more closing parentheses than
680 # the opening ones
681 if len(lines) == 1 and has_sunken_brackets(tokens_by_line[0]):
682 return "invalid", None
683
665 if not tokens_by_line:
684 if not tokens_by_line:
666 return 'incomplete', find_last_indent(lines)
685 return 'incomplete', find_last_indent(lines)
667
686
668 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
687 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
669 # We're in a multiline string or expression
688 # We're in a multiline string or expression
670 return 'incomplete', find_last_indent(lines)
689 return 'incomplete', find_last_indent(lines)
671
690
672 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
691 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
673
692
674 # Pop the last line which only contains DEDENTs and ENDMARKER
693 # Pop the last line which only contains DEDENTs and ENDMARKER
675 last_token_line = None
694 last_token_line = None
676 if {t.type for t in tokens_by_line[-1]} in [
695 if {t.type for t in tokens_by_line[-1]} in [
677 {tokenize.DEDENT, tokenize.ENDMARKER},
696 {tokenize.DEDENT, tokenize.ENDMARKER},
678 {tokenize.ENDMARKER}
697 {tokenize.ENDMARKER}
679 ] and len(tokens_by_line) > 1:
698 ] and len(tokens_by_line) > 1:
680 last_token_line = tokens_by_line.pop()
699 last_token_line = tokens_by_line.pop()
681
700
682 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
701 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
683 tokens_by_line[-1].pop()
702 tokens_by_line[-1].pop()
684
703
685 if not tokens_by_line[-1]:
704 if not tokens_by_line[-1]:
686 return 'incomplete', find_last_indent(lines)
705 return 'incomplete', find_last_indent(lines)
687
706
688 if tokens_by_line[-1][-1].string == ':':
707 if tokens_by_line[-1][-1].string == ':':
689 # The last line starts a block (e.g. 'if foo:')
708 # The last line starts a block (e.g. 'if foo:')
690 ix = 0
709 ix = 0
691 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
710 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
692 ix += 1
711 ix += 1
693
712
694 indent = tokens_by_line[-1][ix].start[1]
713 indent = tokens_by_line[-1][ix].start[1]
695 return 'incomplete', indent + 4
714 return 'incomplete', indent + 4
696
715
697 if tokens_by_line[-1][0].line.endswith('\\'):
716 if tokens_by_line[-1][0].line.endswith('\\'):
698 return 'incomplete', None
717 return 'incomplete', None
699
718
700 # At this point, our checks think the code is complete (or invalid).
719 # At this point, our checks think the code is complete (or invalid).
701 # We'll use codeop.compile_command to check this with the real parser
720 # We'll use codeop.compile_command to check this with the real parser
702 try:
721 try:
703 with warnings.catch_warnings():
722 with warnings.catch_warnings():
704 warnings.simplefilter('error', SyntaxWarning)
723 warnings.simplefilter('error', SyntaxWarning)
705 res = compile_command(''.join(lines), symbol='exec')
724 res = compile_command(''.join(lines), symbol='exec')
706 except (SyntaxError, OverflowError, ValueError, TypeError,
725 except (SyntaxError, OverflowError, ValueError, TypeError,
707 MemoryError, SyntaxWarning):
726 MemoryError, SyntaxWarning):
708 return 'invalid', None
727 return 'invalid', None
709 else:
728 else:
710 if res is None:
729 if res is None:
711 return 'incomplete', find_last_indent(lines)
730 return 'incomplete', find_last_indent(lines)
712
731
713 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
732 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
714 if ends_with_newline:
733 if ends_with_newline:
715 return 'complete', None
734 return 'complete', None
716 return 'incomplete', find_last_indent(lines)
735 return 'incomplete', find_last_indent(lines)
717
736
718 # If there's a blank line at the end, assume we're ready to execute
737 # If there's a blank line at the end, assume we're ready to execute
719 if not lines[-1].strip():
738 if not lines[-1].strip():
720 return 'complete', None
739 return 'complete', None
721
740
722 return 'complete', None
741 return 'complete', None
723
742
724
743
725 def find_last_indent(lines):
744 def find_last_indent(lines):
726 m = _indent_re.match(lines[-1])
745 m = _indent_re.match(lines[-1])
727 if not m:
746 if not m:
728 return 0
747 return 0
729 return len(m.group(0).replace('\t', ' '*4))
748 return len(m.group(0).replace('\t', ' '*4))
@@ -1,337 +1,355 b''
1 """Tests for the token-based transformers in IPython.core.inputtransformer2
1 """Tests for the token-based transformers in IPython.core.inputtransformer2
2
2
3 Line-based transformers are the simpler ones; token-based transformers are
3 Line-based transformers are the simpler ones; token-based transformers are
4 more complex. See test_inputtransformer2_line for tests for line-based
4 more complex. See test_inputtransformer2_line for tests for line-based
5 transformations.
5 transformations.
6 """
6 """
7 import nose.tools as nt
7 import nose.tools as nt
8 import string
8 import string
9
9
10 from IPython.core import inputtransformer2 as ipt2
10 from IPython.core import inputtransformer2 as ipt2
11 from IPython.core.inputtransformer2 import make_tokens_by_line, _find_assign_op
11 from IPython.core.inputtransformer2 import make_tokens_by_line, _find_assign_op
12
12
13 from textwrap import dedent
13 from textwrap import dedent
14
14
15 MULTILINE_MAGIC = ("""\
15 MULTILINE_MAGIC = ("""\
16 a = f()
16 a = f()
17 %foo \\
17 %foo \\
18 bar
18 bar
19 g()
19 g()
20 """.splitlines(keepends=True), (2, 0), """\
20 """.splitlines(keepends=True), (2, 0), """\
21 a = f()
21 a = f()
22 get_ipython().run_line_magic('foo', ' bar')
22 get_ipython().run_line_magic('foo', ' bar')
23 g()
23 g()
24 """.splitlines(keepends=True))
24 """.splitlines(keepends=True))
25
25
26 INDENTED_MAGIC = ("""\
26 INDENTED_MAGIC = ("""\
27 for a in range(5):
27 for a in range(5):
28 %ls
28 %ls
29 """.splitlines(keepends=True), (2, 4), """\
29 """.splitlines(keepends=True), (2, 4), """\
30 for a in range(5):
30 for a in range(5):
31 get_ipython().run_line_magic('ls', '')
31 get_ipython().run_line_magic('ls', '')
32 """.splitlines(keepends=True))
32 """.splitlines(keepends=True))
33
33
34 CRLF_MAGIC = ([
34 CRLF_MAGIC = ([
35 "a = f()\n",
35 "a = f()\n",
36 "%ls\r\n",
36 "%ls\r\n",
37 "g()\n"
37 "g()\n"
38 ], (2, 0), [
38 ], (2, 0), [
39 "a = f()\n",
39 "a = f()\n",
40 "get_ipython().run_line_magic('ls', '')\n",
40 "get_ipython().run_line_magic('ls', '')\n",
41 "g()\n"
41 "g()\n"
42 ])
42 ])
43
43
44 MULTILINE_MAGIC_ASSIGN = ("""\
44 MULTILINE_MAGIC_ASSIGN = ("""\
45 a = f()
45 a = f()
46 b = %foo \\
46 b = %foo \\
47 bar
47 bar
48 g()
48 g()
49 """.splitlines(keepends=True), (2, 4), """\
49 """.splitlines(keepends=True), (2, 4), """\
50 a = f()
50 a = f()
51 b = get_ipython().run_line_magic('foo', ' bar')
51 b = get_ipython().run_line_magic('foo', ' bar')
52 g()
52 g()
53 """.splitlines(keepends=True))
53 """.splitlines(keepends=True))
54
54
55 MULTILINE_SYSTEM_ASSIGN = ("""\
55 MULTILINE_SYSTEM_ASSIGN = ("""\
56 a = f()
56 a = f()
57 b = !foo \\
57 b = !foo \\
58 bar
58 bar
59 g()
59 g()
60 """.splitlines(keepends=True), (2, 4), """\
60 """.splitlines(keepends=True), (2, 4), """\
61 a = f()
61 a = f()
62 b = get_ipython().getoutput('foo bar')
62 b = get_ipython().getoutput('foo bar')
63 g()
63 g()
64 """.splitlines(keepends=True))
64 """.splitlines(keepends=True))
65
65
66 #####
66 #####
67
67
68 MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT = ("""\
68 MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT = ("""\
69 def test():
69 def test():
70 for i in range(1):
70 for i in range(1):
71 print(i)
71 print(i)
72 res =! ls
72 res =! ls
73 """.splitlines(keepends=True), (4, 7), '''\
73 """.splitlines(keepends=True), (4, 7), '''\
74 def test():
74 def test():
75 for i in range(1):
75 for i in range(1):
76 print(i)
76 print(i)
77 res =get_ipython().getoutput(\' ls\')
77 res =get_ipython().getoutput(\' ls\')
78 '''.splitlines(keepends=True))
78 '''.splitlines(keepends=True))
79
79
80 ######
80 ######
81
81
82 AUTOCALL_QUOTE = (
82 AUTOCALL_QUOTE = (
83 [",f 1 2 3\n"], (1, 0),
83 [",f 1 2 3\n"], (1, 0),
84 ['f("1", "2", "3")\n']
84 ['f("1", "2", "3")\n']
85 )
85 )
86
86
87 AUTOCALL_QUOTE2 = (
87 AUTOCALL_QUOTE2 = (
88 [";f 1 2 3\n"], (1, 0),
88 [";f 1 2 3\n"], (1, 0),
89 ['f("1 2 3")\n']
89 ['f("1 2 3")\n']
90 )
90 )
91
91
92 AUTOCALL_PAREN = (
92 AUTOCALL_PAREN = (
93 ["/f 1 2 3\n"], (1, 0),
93 ["/f 1 2 3\n"], (1, 0),
94 ['f(1, 2, 3)\n']
94 ['f(1, 2, 3)\n']
95 )
95 )
96
96
97 SIMPLE_HELP = (
97 SIMPLE_HELP = (
98 ["foo?\n"], (1, 0),
98 ["foo?\n"], (1, 0),
99 ["get_ipython().run_line_magic('pinfo', 'foo')\n"]
99 ["get_ipython().run_line_magic('pinfo', 'foo')\n"]
100 )
100 )
101
101
102 DETAILED_HELP = (
102 DETAILED_HELP = (
103 ["foo??\n"], (1, 0),
103 ["foo??\n"], (1, 0),
104 ["get_ipython().run_line_magic('pinfo2', 'foo')\n"]
104 ["get_ipython().run_line_magic('pinfo2', 'foo')\n"]
105 )
105 )
106
106
107 MAGIC_HELP = (
107 MAGIC_HELP = (
108 ["%foo?\n"], (1, 0),
108 ["%foo?\n"], (1, 0),
109 ["get_ipython().run_line_magic('pinfo', '%foo')\n"]
109 ["get_ipython().run_line_magic('pinfo', '%foo')\n"]
110 )
110 )
111
111
112 HELP_IN_EXPR = (
112 HELP_IN_EXPR = (
113 ["a = b + c?\n"], (1, 0),
113 ["a = b + c?\n"], (1, 0),
114 ["get_ipython().set_next_input('a = b + c');"
114 ["get_ipython().set_next_input('a = b + c');"
115 "get_ipython().run_line_magic('pinfo', 'c')\n"]
115 "get_ipython().run_line_magic('pinfo', 'c')\n"]
116 )
116 )
117
117
118 HELP_CONTINUED_LINE = ("""\
118 HELP_CONTINUED_LINE = ("""\
119 a = \\
119 a = \\
120 zip?
120 zip?
121 """.splitlines(keepends=True), (1, 0),
121 """.splitlines(keepends=True), (1, 0),
122 [r"get_ipython().set_next_input('a = \\\nzip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
122 [r"get_ipython().set_next_input('a = \\\nzip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
123 )
123 )
124
124
125 HELP_MULTILINE = ("""\
125 HELP_MULTILINE = ("""\
126 (a,
126 (a,
127 b) = zip?
127 b) = zip?
128 """.splitlines(keepends=True), (1, 0),
128 """.splitlines(keepends=True), (1, 0),
129 [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
129 [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
130 )
130 )
131
131
132 HELP_UNICODE = (
132 HELP_UNICODE = (
133 ["Ο€.foo?\n"], (1, 0),
133 ["Ο€.foo?\n"], (1, 0),
134 ["get_ipython().run_line_magic('pinfo', 'Ο€.foo')\n"]
134 ["get_ipython().run_line_magic('pinfo', 'Ο€.foo')\n"]
135 )
135 )
136
136
137
137
138 def null_cleanup_transformer(lines):
138 def null_cleanup_transformer(lines):
139 """
139 """
140 A cleanup transform that returns an empty list.
140 A cleanup transform that returns an empty list.
141 """
141 """
142 return []
142 return []
143
143
144 def check_make_token_by_line_never_ends_empty():
144 def check_make_token_by_line_never_ends_empty():
145 """
145 """
146 Check that not sequence of single or double characters ends up leading to en empty list of tokens
146 Check that not sequence of single or double characters ends up leading to en empty list of tokens
147 """
147 """
148 from string import printable
148 from string import printable
149 for c in printable:
149 for c in printable:
150 nt.assert_not_equal(make_tokens_by_line(c)[-1], [])
150 nt.assert_not_equal(make_tokens_by_line(c)[-1], [])
151 for k in printable:
151 for k in printable:
152 nt.assert_not_equal(make_tokens_by_line(c+k)[-1], [])
152 nt.assert_not_equal(make_tokens_by_line(c+k)[-1], [])
153
153
154 def check_find(transformer, case, match=True):
154 def check_find(transformer, case, match=True):
155 sample, expected_start, _ = case
155 sample, expected_start, _ = case
156 tbl = make_tokens_by_line(sample)
156 tbl = make_tokens_by_line(sample)
157 res = transformer.find(tbl)
157 res = transformer.find(tbl)
158 if match:
158 if match:
159 # start_line is stored 0-indexed, expected values are 1-indexed
159 # start_line is stored 0-indexed, expected values are 1-indexed
160 nt.assert_equal((res.start_line+1, res.start_col), expected_start)
160 nt.assert_equal((res.start_line+1, res.start_col), expected_start)
161 return res
161 return res
162 else:
162 else:
163 nt.assert_is(res, None)
163 nt.assert_is(res, None)
164
164
165 def check_transform(transformer_cls, case):
165 def check_transform(transformer_cls, case):
166 lines, start, expected = case
166 lines, start, expected = case
167 transformer = transformer_cls(start)
167 transformer = transformer_cls(start)
168 nt.assert_equal(transformer.transform(lines), expected)
168 nt.assert_equal(transformer.transform(lines), expected)
169
169
170 def test_continued_line():
170 def test_continued_line():
171 lines = MULTILINE_MAGIC_ASSIGN[0]
171 lines = MULTILINE_MAGIC_ASSIGN[0]
172 nt.assert_equal(ipt2.find_end_of_continued_line(lines, 1), 2)
172 nt.assert_equal(ipt2.find_end_of_continued_line(lines, 1), 2)
173
173
174 nt.assert_equal(ipt2.assemble_continued_line(lines, (1, 5), 2), "foo bar")
174 nt.assert_equal(ipt2.assemble_continued_line(lines, (1, 5), 2), "foo bar")
175
175
176 def test_find_assign_magic():
176 def test_find_assign_magic():
177 check_find(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
177 check_find(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
178 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN, match=False)
178 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN, match=False)
179 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT, match=False)
179 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT, match=False)
180
180
181 def test_transform_assign_magic():
181 def test_transform_assign_magic():
182 check_transform(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
182 check_transform(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
183
183
184 def test_find_assign_system():
184 def test_find_assign_system():
185 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
185 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
186 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
186 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
187 check_find(ipt2.SystemAssign, (["a = !ls\n"], (1, 5), None))
187 check_find(ipt2.SystemAssign, (["a = !ls\n"], (1, 5), None))
188 check_find(ipt2.SystemAssign, (["a=!ls\n"], (1, 2), None))
188 check_find(ipt2.SystemAssign, (["a=!ls\n"], (1, 2), None))
189 check_find(ipt2.SystemAssign, MULTILINE_MAGIC_ASSIGN, match=False)
189 check_find(ipt2.SystemAssign, MULTILINE_MAGIC_ASSIGN, match=False)
190
190
191 def test_transform_assign_system():
191 def test_transform_assign_system():
192 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
192 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
193 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
193 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
194
194
195 def test_find_magic_escape():
195 def test_find_magic_escape():
196 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC)
196 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC)
197 check_find(ipt2.EscapedCommand, INDENTED_MAGIC)
197 check_find(ipt2.EscapedCommand, INDENTED_MAGIC)
198 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC_ASSIGN, match=False)
198 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC_ASSIGN, match=False)
199
199
200 def test_transform_magic_escape():
200 def test_transform_magic_escape():
201 check_transform(ipt2.EscapedCommand, MULTILINE_MAGIC)
201 check_transform(ipt2.EscapedCommand, MULTILINE_MAGIC)
202 check_transform(ipt2.EscapedCommand, INDENTED_MAGIC)
202 check_transform(ipt2.EscapedCommand, INDENTED_MAGIC)
203 check_transform(ipt2.EscapedCommand, CRLF_MAGIC)
203 check_transform(ipt2.EscapedCommand, CRLF_MAGIC)
204
204
205 def test_find_autocalls():
205 def test_find_autocalls():
206 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
206 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
207 print("Testing %r" % case[0])
207 print("Testing %r" % case[0])
208 check_find(ipt2.EscapedCommand, case)
208 check_find(ipt2.EscapedCommand, case)
209
209
210 def test_transform_autocall():
210 def test_transform_autocall():
211 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
211 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
212 print("Testing %r" % case[0])
212 print("Testing %r" % case[0])
213 check_transform(ipt2.EscapedCommand, case)
213 check_transform(ipt2.EscapedCommand, case)
214
214
215 def test_find_help():
215 def test_find_help():
216 for case in [SIMPLE_HELP, DETAILED_HELP, MAGIC_HELP, HELP_IN_EXPR]:
216 for case in [SIMPLE_HELP, DETAILED_HELP, MAGIC_HELP, HELP_IN_EXPR]:
217 check_find(ipt2.HelpEnd, case)
217 check_find(ipt2.HelpEnd, case)
218
218
219 tf = check_find(ipt2.HelpEnd, HELP_CONTINUED_LINE)
219 tf = check_find(ipt2.HelpEnd, HELP_CONTINUED_LINE)
220 nt.assert_equal(tf.q_line, 1)
220 nt.assert_equal(tf.q_line, 1)
221 nt.assert_equal(tf.q_col, 3)
221 nt.assert_equal(tf.q_col, 3)
222
222
223 tf = check_find(ipt2.HelpEnd, HELP_MULTILINE)
223 tf = check_find(ipt2.HelpEnd, HELP_MULTILINE)
224 nt.assert_equal(tf.q_line, 1)
224 nt.assert_equal(tf.q_line, 1)
225 nt.assert_equal(tf.q_col, 8)
225 nt.assert_equal(tf.q_col, 8)
226
226
227 # ? in a comment does not trigger help
227 # ? in a comment does not trigger help
228 check_find(ipt2.HelpEnd, (["foo # bar?\n"], None, None), match=False)
228 check_find(ipt2.HelpEnd, (["foo # bar?\n"], None, None), match=False)
229 # Nor in a string
229 # Nor in a string
230 check_find(ipt2.HelpEnd, (["foo = '''bar?\n"], None, None), match=False)
230 check_find(ipt2.HelpEnd, (["foo = '''bar?\n"], None, None), match=False)
231
231
232 def test_transform_help():
232 def test_transform_help():
233 tf = ipt2.HelpEnd((1, 0), (1, 9))
233 tf = ipt2.HelpEnd((1, 0), (1, 9))
234 nt.assert_equal(tf.transform(HELP_IN_EXPR[0]), HELP_IN_EXPR[2])
234 nt.assert_equal(tf.transform(HELP_IN_EXPR[0]), HELP_IN_EXPR[2])
235
235
236 tf = ipt2.HelpEnd((1, 0), (2, 3))
236 tf = ipt2.HelpEnd((1, 0), (2, 3))
237 nt.assert_equal(tf.transform(HELP_CONTINUED_LINE[0]), HELP_CONTINUED_LINE[2])
237 nt.assert_equal(tf.transform(HELP_CONTINUED_LINE[0]), HELP_CONTINUED_LINE[2])
238
238
239 tf = ipt2.HelpEnd((1, 0), (2, 8))
239 tf = ipt2.HelpEnd((1, 0), (2, 8))
240 nt.assert_equal(tf.transform(HELP_MULTILINE[0]), HELP_MULTILINE[2])
240 nt.assert_equal(tf.transform(HELP_MULTILINE[0]), HELP_MULTILINE[2])
241
241
242 tf = ipt2.HelpEnd((1, 0), (1, 0))
242 tf = ipt2.HelpEnd((1, 0), (1, 0))
243 nt.assert_equal(tf.transform(HELP_UNICODE[0]), HELP_UNICODE[2])
243 nt.assert_equal(tf.transform(HELP_UNICODE[0]), HELP_UNICODE[2])
244
244
245 def test_find_assign_op_dedent():
245 def test_find_assign_op_dedent():
246 """
246 """
247 be careful that empty token like dedent are not counted as parens
247 be careful that empty token like dedent are not counted as parens
248 """
248 """
249 class Tk:
249 class Tk:
250 def __init__(self, s):
250 def __init__(self, s):
251 self.string = s
251 self.string = s
252
252
253 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','a','=','b')]), 2)
253 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','a','=','b')]), 2)
254 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','(', 'a','=','b', ')', '=' ,'5')]), 6)
254 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','(', 'a','=','b', ')', '=' ,'5')]), 6)
255
255
256 def test_check_complete():
256 def test_check_complete():
257 cc = ipt2.TransformerManager().check_complete
257 cc = ipt2.TransformerManager().check_complete
258 nt.assert_equal(cc("a = 1"), ('complete', None))
258 nt.assert_equal(cc("a = 1"), ('complete', None))
259 nt.assert_equal(cc("for a in range(5):"), ('incomplete', 4))
259 nt.assert_equal(cc("for a in range(5):"), ('incomplete', 4))
260 nt.assert_equal(cc("for a in range(5):\n if a > 0:"), ('incomplete', 8))
260 nt.assert_equal(cc("for a in range(5):\n if a > 0:"), ('incomplete', 8))
261 nt.assert_equal(cc("raise = 2"), ('invalid', None))
261 nt.assert_equal(cc("raise = 2"), ('invalid', None))
262 nt.assert_equal(cc("a = [1,\n2,"), ('incomplete', 0))
262 nt.assert_equal(cc("a = [1,\n2,"), ('incomplete', 0))
263 nt.assert_equal(cc(")"), ('incomplete', 0))
263 nt.assert_equal(cc(")"), ('incomplete', 0))
264 nt.assert_equal(cc("\\\r\n"), ('incomplete', 0))
264 nt.assert_equal(cc("\\\r\n"), ('incomplete', 0))
265 nt.assert_equal(cc("a = '''\n hi"), ('incomplete', 3))
265 nt.assert_equal(cc("a = '''\n hi"), ('incomplete', 3))
266 nt.assert_equal(cc("def a():\n x=1\n global x"), ('invalid', None))
266 nt.assert_equal(cc("def a():\n x=1\n global x"), ('invalid', None))
267 nt.assert_equal(cc("a \\ "), ('invalid', None)) # Nothing allowed after backslash
267 nt.assert_equal(cc("a \\ "), ('invalid', None)) # Nothing allowed after backslash
268 nt.assert_equal(cc("1\\\n+2"), ('complete', None))
268 nt.assert_equal(cc("1\\\n+2"), ('complete', None))
269 nt.assert_equal(cc("exit"), ('complete', None))
269 nt.assert_equal(cc("exit"), ('complete', None))
270
270
271 example = dedent("""
271 example = dedent("""
272 if True:
272 if True:
273 a=1""" )
273 a=1""" )
274
274
275 nt.assert_equal(cc(example), ('incomplete', 4))
275 nt.assert_equal(cc(example), ('incomplete', 4))
276 nt.assert_equal(cc(example+'\n'), ('complete', None))
276 nt.assert_equal(cc(example+'\n'), ('complete', None))
277 nt.assert_equal(cc(example+'\n '), ('complete', None))
277 nt.assert_equal(cc(example+'\n '), ('complete', None))
278
278
279 # no need to loop on all the letters/numbers.
279 # no need to loop on all the letters/numbers.
280 short = '12abAB'+string.printable[62:]
280 short = '12abAB'+string.printable[62:]
281 for c in short:
281 for c in short:
282 # test does not raise:
282 # test does not raise:
283 cc(c)
283 cc(c)
284 for k in short:
284 for k in short:
285 cc(c+k)
285 cc(c+k)
286
286
287 nt.assert_equal(cc("def f():\n x=0\n \\\n "), ('incomplete', 2))
287 nt.assert_equal(cc("def f():\n x=0\n \\\n "), ('incomplete', 2))
288
288
289 def test_check_complete_II():
289 def test_check_complete_II():
290 """
290 """
291 Test that multiple line strings are properly handled.
291 Test that multiple line strings are properly handled.
292
292
293 Separate test function for convenience
293 Separate test function for convenience
294
294
295 """
295 """
296 cc = ipt2.TransformerManager().check_complete
296 cc = ipt2.TransformerManager().check_complete
297 nt.assert_equal(cc('''def foo():\n """'''), ('incomplete', 4))
297 nt.assert_equal(cc('''def foo():\n """'''), ('incomplete', 4))
298
298
299
299
300 def test_check_complete_invalidates_sunken_brackets():
301 """
302 Test that a single line with more closing brackets than the opening ones is
303 interpretted as invalid
304 """
305 cc = ipt2.TransformerManager().check_complete
306 nt.assert_equal(cc(")"), ("invalid", None))
307 nt.assert_equal(cc("]"), ("invalid", None))
308 nt.assert_equal(cc("}"), ("invalid", None))
309 nt.assert_equal(cc(")("), ("invalid", None))
310 nt.assert_equal(cc("]["), ("invalid", None))
311 nt.assert_equal(cc("}{"), ("invalid", None))
312 nt.assert_equal(cc("[()("), ("invalid", None))
313 nt.assert_equal(cc("())("), ("invalid", None))
314 nt.assert_equal(cc(")[]("), ("invalid", None))
315 nt.assert_equal(cc("()]("), ("invalid", None))
316
317
300 def test_null_cleanup_transformer():
318 def test_null_cleanup_transformer():
301 manager = ipt2.TransformerManager()
319 manager = ipt2.TransformerManager()
302 manager.cleanup_transforms.insert(0, null_cleanup_transformer)
320 manager.cleanup_transforms.insert(0, null_cleanup_transformer)
303 assert manager.transform_cell("") == ""
321 assert manager.transform_cell("") == ""
304
322
305
323
306
324
307
325
308 def test_side_effects_I():
326 def test_side_effects_I():
309 count = 0
327 count = 0
310 def counter(lines):
328 def counter(lines):
311 nonlocal count
329 nonlocal count
312 count += 1
330 count += 1
313 return lines
331 return lines
314
332
315 counter.has_side_effects = True
333 counter.has_side_effects = True
316
334
317 manager = ipt2.TransformerManager()
335 manager = ipt2.TransformerManager()
318 manager.cleanup_transforms.insert(0, counter)
336 manager.cleanup_transforms.insert(0, counter)
319 assert manager.check_complete("a=1\n") == ('complete', None)
337 assert manager.check_complete("a=1\n") == ('complete', None)
320 assert count == 0
338 assert count == 0
321
339
322
340
323
341
324
342
325 def test_side_effects_II():
343 def test_side_effects_II():
326 count = 0
344 count = 0
327 def counter(lines):
345 def counter(lines):
328 nonlocal count
346 nonlocal count
329 count += 1
347 count += 1
330 return lines
348 return lines
331
349
332 counter.has_side_effects = True
350 counter.has_side_effects = True
333
351
334 manager = ipt2.TransformerManager()
352 manager = ipt2.TransformerManager()
335 manager.line_transforms.insert(0, counter)
353 manager.line_transforms.insert(0, counter)
336 assert manager.check_complete("b=1\n") == ('complete', None)
354 assert manager.check_complete("b=1\n") == ('complete', None)
337 assert count == 0
355 assert count == 0
General Comments 0
You need to be logged in to leave comments. Login now