##// END OF EJS Templates
Merge pull request #12482 from meeseeksmachine/auto-backport-of-pr-12475-on-7.x
Matthias Bussonnier -
r25950:16a0884d merge
parent child Browse files
Show More
@@ -1,723 +1,723 b''
1 """Input transformer machinery to support IPython special syntax.
1 """Input transformer machinery to support IPython special syntax.
2
2
3 This includes the machinery to recognise and transform ``%magic`` commands,
3 This includes the machinery to recognise and transform ``%magic`` commands,
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
4 ``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
5
5
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
6 Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7 deprecated in 7.0.
7 deprecated in 7.0.
8 """
8 """
9
9
10 # Copyright (c) IPython Development Team.
10 # Copyright (c) IPython Development Team.
11 # Distributed under the terms of the Modified BSD License.
11 # Distributed under the terms of the Modified BSD License.
12
12
13 from codeop import compile_command
13 from codeop import compile_command
14 import re
14 import re
15 import tokenize
15 import tokenize
16 from typing import List, Tuple, Union
16 from typing import List, Tuple, Union
17 import warnings
17 import warnings
18
18
19 _indent_re = re.compile(r'^[ \t]+')
19 _indent_re = re.compile(r'^[ \t]+')
20
20
21 def leading_empty_lines(lines):
21 def leading_empty_lines(lines):
22 """Remove leading empty lines
22 """Remove leading empty lines
23
23
24 If the leading lines are empty or contain only whitespace, they will be
24 If the leading lines are empty or contain only whitespace, they will be
25 removed.
25 removed.
26 """
26 """
27 if not lines:
27 if not lines:
28 return lines
28 return lines
29 for i, line in enumerate(lines):
29 for i, line in enumerate(lines):
30 if line and not line.isspace():
30 if line and not line.isspace():
31 return lines[i:]
31 return lines[i:]
32 return lines
32 return lines
33
33
34 def leading_indent(lines):
34 def leading_indent(lines):
35 """Remove leading indentation.
35 """Remove leading indentation.
36
36
37 If the first line starts with a spaces or tabs, the same whitespace will be
37 If the first line starts with a spaces or tabs, the same whitespace will be
38 removed from each following line in the cell.
38 removed from each following line in the cell.
39 """
39 """
40 if not lines:
40 if not lines:
41 return lines
41 return lines
42 m = _indent_re.match(lines[0])
42 m = _indent_re.match(lines[0])
43 if not m:
43 if not m:
44 return lines
44 return lines
45 space = m.group(0)
45 space = m.group(0)
46 n = len(space)
46 n = len(space)
47 return [l[n:] if l.startswith(space) else l
47 return [l[n:] if l.startswith(space) else l
48 for l in lines]
48 for l in lines]
49
49
50 class PromptStripper:
50 class PromptStripper:
51 """Remove matching input prompts from a block of input.
51 """Remove matching input prompts from a block of input.
52
52
53 Parameters
53 Parameters
54 ----------
54 ----------
55 prompt_re : regular expression
55 prompt_re : regular expression
56 A regular expression matching any input prompt (including continuation,
56 A regular expression matching any input prompt (including continuation,
57 e.g. ``...``)
57 e.g. ``...``)
58 initial_re : regular expression, optional
58 initial_re : regular expression, optional
59 A regular expression matching only the initial prompt, but not continuation.
59 A regular expression matching only the initial prompt, but not continuation.
60 If no initial expression is given, prompt_re will be used everywhere.
60 If no initial expression is given, prompt_re will be used everywhere.
61 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
61 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
62 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
62 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
63
63
64 If initial_re and prompt_re differ,
64 If initial_re and prompt_re differ,
65 only initial_re will be tested against the first line.
65 only initial_re will be tested against the first line.
66 If any prompt is found on the first two lines,
66 If any prompt is found on the first two lines,
67 prompts will be stripped from the rest of the block.
67 prompts will be stripped from the rest of the block.
68 """
68 """
69 def __init__(self, prompt_re, initial_re=None):
69 def __init__(self, prompt_re, initial_re=None):
70 self.prompt_re = prompt_re
70 self.prompt_re = prompt_re
71 self.initial_re = initial_re or prompt_re
71 self.initial_re = initial_re or prompt_re
72
72
73 def _strip(self, lines):
73 def _strip(self, lines):
74 return [self.prompt_re.sub('', l, count=1) for l in lines]
74 return [self.prompt_re.sub('', l, count=1) for l in lines]
75
75
76 def __call__(self, lines):
76 def __call__(self, lines):
77 if not lines:
77 if not lines:
78 return lines
78 return lines
79 if self.initial_re.match(lines[0]) or \
79 if self.initial_re.match(lines[0]) or \
80 (len(lines) > 1 and self.prompt_re.match(lines[1])):
80 (len(lines) > 1 and self.prompt_re.match(lines[1])):
81 return self._strip(lines)
81 return self._strip(lines)
82 return lines
82 return lines
83
83
84 classic_prompt = PromptStripper(
84 classic_prompt = PromptStripper(
85 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
85 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
86 initial_re=re.compile(r'^>>>( |$)')
86 initial_re=re.compile(r'^>>>( |$)')
87 )
87 )
88
88
89 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
89 ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
90
90
91 def cell_magic(lines):
91 def cell_magic(lines):
92 if not lines or not lines[0].startswith('%%'):
92 if not lines or not lines[0].startswith('%%'):
93 return lines
93 return lines
94 if re.match(r'%%\w+\?', lines[0]):
94 if re.match(r'%%\w+\?', lines[0]):
95 # This case will be handled by help_end
95 # This case will be handled by help_end
96 return lines
96 return lines
97 magic_name, _, first_line = lines[0][2:-1].partition(' ')
97 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
98 body = ''.join(lines[1:])
98 body = ''.join(lines[1:])
99 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
99 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
100 % (magic_name, first_line, body)]
100 % (magic_name, first_line, body)]
101
101
102
102
103 def _find_assign_op(token_line) -> Union[int, None]:
103 def _find_assign_op(token_line) -> Union[int, None]:
104 """Get the index of the first assignment in the line ('=' not inside brackets)
104 """Get the index of the first assignment in the line ('=' not inside brackets)
105
105
106 Note: We don't try to support multiple special assignment (a = b = %foo)
106 Note: We don't try to support multiple special assignment (a = b = %foo)
107 """
107 """
108 paren_level = 0
108 paren_level = 0
109 for i, ti in enumerate(token_line):
109 for i, ti in enumerate(token_line):
110 s = ti.string
110 s = ti.string
111 if s == '=' and paren_level == 0:
111 if s == '=' and paren_level == 0:
112 return i
112 return i
113 if s in {'(','[','{'}:
113 if s in {'(','[','{'}:
114 paren_level += 1
114 paren_level += 1
115 elif s in {')', ']', '}'}:
115 elif s in {')', ']', '}'}:
116 if paren_level > 0:
116 if paren_level > 0:
117 paren_level -= 1
117 paren_level -= 1
118
118
119 def find_end_of_continued_line(lines, start_line: int):
119 def find_end_of_continued_line(lines, start_line: int):
120 """Find the last line of a line explicitly extended using backslashes.
120 """Find the last line of a line explicitly extended using backslashes.
121
121
122 Uses 0-indexed line numbers.
122 Uses 0-indexed line numbers.
123 """
123 """
124 end_line = start_line
124 end_line = start_line
125 while lines[end_line].endswith('\\\n'):
125 while lines[end_line].endswith('\\\n'):
126 end_line += 1
126 end_line += 1
127 if end_line >= len(lines):
127 if end_line >= len(lines):
128 break
128 break
129 return end_line
129 return end_line
130
130
131 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
131 def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
132 r"""Assemble a single line from multiple continued line pieces
132 r"""Assemble a single line from multiple continued line pieces
133
133
134 Continued lines are lines ending in ``\``, and the line following the last
134 Continued lines are lines ending in ``\``, and the line following the last
135 ``\`` in the block.
135 ``\`` in the block.
136
136
137 For example, this code continues over multiple lines::
137 For example, this code continues over multiple lines::
138
138
139 if (assign_ix is not None) \
139 if (assign_ix is not None) \
140 and (len(line) >= assign_ix + 2) \
140 and (len(line) >= assign_ix + 2) \
141 and (line[assign_ix+1].string == '%') \
141 and (line[assign_ix+1].string == '%') \
142 and (line[assign_ix+2].type == tokenize.NAME):
142 and (line[assign_ix+2].type == tokenize.NAME):
143
143
144 This statement contains four continued line pieces.
144 This statement contains four continued line pieces.
145 Assembling these pieces into a single line would give::
145 Assembling these pieces into a single line would give::
146
146
147 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
147 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
148
148
149 This uses 0-indexed line numbers. *start* is (lineno, colno).
149 This uses 0-indexed line numbers. *start* is (lineno, colno).
150
150
151 Used to allow ``%magic`` and ``!system`` commands to be continued over
151 Used to allow ``%magic`` and ``!system`` commands to be continued over
152 multiple lines.
152 multiple lines.
153 """
153 """
154 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
154 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
155 return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
155 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
156 + [parts[-1][:-1]]) # Strip newline from last line
156 + [parts[-1].rstrip()]) # Strip newline from last line
157
157
158 class TokenTransformBase:
158 class TokenTransformBase:
159 """Base class for transformations which examine tokens.
159 """Base class for transformations which examine tokens.
160
160
161 Special syntax should not be transformed when it occurs inside strings or
161 Special syntax should not be transformed when it occurs inside strings or
162 comments. This is hard to reliably avoid with regexes. The solution is to
162 comments. This is hard to reliably avoid with regexes. The solution is to
163 tokenise the code as Python, and recognise the special syntax in the tokens.
163 tokenise the code as Python, and recognise the special syntax in the tokens.
164
164
165 IPython's special syntax is not valid Python syntax, so tokenising may go
165 IPython's special syntax is not valid Python syntax, so tokenising may go
166 wrong after the special syntax starts. These classes therefore find and
166 wrong after the special syntax starts. These classes therefore find and
167 transform *one* instance of special syntax at a time into regular Python
167 transform *one* instance of special syntax at a time into regular Python
168 syntax. After each transformation, tokens are regenerated to find the next
168 syntax. After each transformation, tokens are regenerated to find the next
169 piece of special syntax.
169 piece of special syntax.
170
170
171 Subclasses need to implement one class method (find)
171 Subclasses need to implement one class method (find)
172 and one regular method (transform).
172 and one regular method (transform).
173
173
174 The priority attribute can select which transformation to apply if multiple
174 The priority attribute can select which transformation to apply if multiple
175 transformers match in the same place. Lower numbers have higher priority.
175 transformers match in the same place. Lower numbers have higher priority.
176 This allows "%magic?" to be turned into a help call rather than a magic call.
176 This allows "%magic?" to be turned into a help call rather than a magic call.
177 """
177 """
178 # Lower numbers -> higher priority (for matches in the same location)
178 # Lower numbers -> higher priority (for matches in the same location)
179 priority = 10
179 priority = 10
180
180
181 def sortby(self):
181 def sortby(self):
182 return self.start_line, self.start_col, self.priority
182 return self.start_line, self.start_col, self.priority
183
183
184 def __init__(self, start):
184 def __init__(self, start):
185 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
185 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
186 self.start_col = start[1]
186 self.start_col = start[1]
187
187
188 @classmethod
188 @classmethod
189 def find(cls, tokens_by_line):
189 def find(cls, tokens_by_line):
190 """Find one instance of special syntax in the provided tokens.
190 """Find one instance of special syntax in the provided tokens.
191
191
192 Tokens are grouped into logical lines for convenience,
192 Tokens are grouped into logical lines for convenience,
193 so it is easy to e.g. look at the first token of each line.
193 so it is easy to e.g. look at the first token of each line.
194 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
194 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
195
195
196 This should return an instance of its class, pointing to the start
196 This should return an instance of its class, pointing to the start
197 position it has found, or None if it found no match.
197 position it has found, or None if it found no match.
198 """
198 """
199 raise NotImplementedError
199 raise NotImplementedError
200
200
201 def transform(self, lines: List[str]):
201 def transform(self, lines: List[str]):
202 """Transform one instance of special syntax found by ``find()``
202 """Transform one instance of special syntax found by ``find()``
203
203
204 Takes a list of strings representing physical lines,
204 Takes a list of strings representing physical lines,
205 returns a similar list of transformed lines.
205 returns a similar list of transformed lines.
206 """
206 """
207 raise NotImplementedError
207 raise NotImplementedError
208
208
209 class MagicAssign(TokenTransformBase):
209 class MagicAssign(TokenTransformBase):
210 """Transformer for assignments from magics (a = %foo)"""
210 """Transformer for assignments from magics (a = %foo)"""
211 @classmethod
211 @classmethod
212 def find(cls, tokens_by_line):
212 def find(cls, tokens_by_line):
213 """Find the first magic assignment (a = %foo) in the cell.
213 """Find the first magic assignment (a = %foo) in the cell.
214 """
214 """
215 for line in tokens_by_line:
215 for line in tokens_by_line:
216 assign_ix = _find_assign_op(line)
216 assign_ix = _find_assign_op(line)
217 if (assign_ix is not None) \
217 if (assign_ix is not None) \
218 and (len(line) >= assign_ix + 2) \
218 and (len(line) >= assign_ix + 2) \
219 and (line[assign_ix+1].string == '%') \
219 and (line[assign_ix+1].string == '%') \
220 and (line[assign_ix+2].type == tokenize.NAME):
220 and (line[assign_ix+2].type == tokenize.NAME):
221 return cls(line[assign_ix+1].start)
221 return cls(line[assign_ix+1].start)
222
222
223 def transform(self, lines: List[str]):
223 def transform(self, lines: List[str]):
224 """Transform a magic assignment found by the ``find()`` classmethod.
224 """Transform a magic assignment found by the ``find()`` classmethod.
225 """
225 """
226 start_line, start_col = self.start_line, self.start_col
226 start_line, start_col = self.start_line, self.start_col
227 lhs = lines[start_line][:start_col]
227 lhs = lines[start_line][:start_col]
228 end_line = find_end_of_continued_line(lines, start_line)
228 end_line = find_end_of_continued_line(lines, start_line)
229 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
229 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
230 assert rhs.startswith('%'), rhs
230 assert rhs.startswith('%'), rhs
231 magic_name, _, args = rhs[1:].partition(' ')
231 magic_name, _, args = rhs[1:].partition(' ')
232
232
233 lines_before = lines[:start_line]
233 lines_before = lines[:start_line]
234 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
234 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
235 new_line = lhs + call + '\n'
235 new_line = lhs + call + '\n'
236 lines_after = lines[end_line+1:]
236 lines_after = lines[end_line+1:]
237
237
238 return lines_before + [new_line] + lines_after
238 return lines_before + [new_line] + lines_after
239
239
240
240
241 class SystemAssign(TokenTransformBase):
241 class SystemAssign(TokenTransformBase):
242 """Transformer for assignments from system commands (a = !foo)"""
242 """Transformer for assignments from system commands (a = !foo)"""
243 @classmethod
243 @classmethod
244 def find(cls, tokens_by_line):
244 def find(cls, tokens_by_line):
245 """Find the first system assignment (a = !foo) in the cell.
245 """Find the first system assignment (a = !foo) in the cell.
246 """
246 """
247 for line in tokens_by_line:
247 for line in tokens_by_line:
248 assign_ix = _find_assign_op(line)
248 assign_ix = _find_assign_op(line)
249 if (assign_ix is not None) \
249 if (assign_ix is not None) \
250 and not line[assign_ix].line.strip().startswith('=') \
250 and not line[assign_ix].line.strip().startswith('=') \
251 and (len(line) >= assign_ix + 2) \
251 and (len(line) >= assign_ix + 2) \
252 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
252 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
253 ix = assign_ix + 1
253 ix = assign_ix + 1
254
254
255 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
255 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
256 if line[ix].string == '!':
256 if line[ix].string == '!':
257 return cls(line[ix].start)
257 return cls(line[ix].start)
258 elif not line[ix].string.isspace():
258 elif not line[ix].string.isspace():
259 break
259 break
260 ix += 1
260 ix += 1
261
261
262 def transform(self, lines: List[str]):
262 def transform(self, lines: List[str]):
263 """Transform a system assignment found by the ``find()`` classmethod.
263 """Transform a system assignment found by the ``find()`` classmethod.
264 """
264 """
265 start_line, start_col = self.start_line, self.start_col
265 start_line, start_col = self.start_line, self.start_col
266
266
267 lhs = lines[start_line][:start_col]
267 lhs = lines[start_line][:start_col]
268 end_line = find_end_of_continued_line(lines, start_line)
268 end_line = find_end_of_continued_line(lines, start_line)
269 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
269 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
270 assert rhs.startswith('!'), rhs
270 assert rhs.startswith('!'), rhs
271 cmd = rhs[1:]
271 cmd = rhs[1:]
272
272
273 lines_before = lines[:start_line]
273 lines_before = lines[:start_line]
274 call = "get_ipython().getoutput({!r})".format(cmd)
274 call = "get_ipython().getoutput({!r})".format(cmd)
275 new_line = lhs + call + '\n'
275 new_line = lhs + call + '\n'
276 lines_after = lines[end_line + 1:]
276 lines_after = lines[end_line + 1:]
277
277
278 return lines_before + [new_line] + lines_after
278 return lines_before + [new_line] + lines_after
279
279
280 # The escape sequences that define the syntax transformations IPython will
280 # The escape sequences that define the syntax transformations IPython will
281 # apply to user input. These can NOT be just changed here: many regular
281 # apply to user input. These can NOT be just changed here: many regular
282 # expressions and other parts of the code may use their hardcoded values, and
282 # expressions and other parts of the code may use their hardcoded values, and
283 # for all intents and purposes they constitute the 'IPython syntax', so they
283 # for all intents and purposes they constitute the 'IPython syntax', so they
284 # should be considered fixed.
284 # should be considered fixed.
285
285
286 ESC_SHELL = '!' # Send line to underlying system shell
286 ESC_SHELL = '!' # Send line to underlying system shell
287 ESC_SH_CAP = '!!' # Send line to system shell and capture output
287 ESC_SH_CAP = '!!' # Send line to system shell and capture output
288 ESC_HELP = '?' # Find information about object
288 ESC_HELP = '?' # Find information about object
289 ESC_HELP2 = '??' # Find extra-detailed information about object
289 ESC_HELP2 = '??' # Find extra-detailed information about object
290 ESC_MAGIC = '%' # Call magic function
290 ESC_MAGIC = '%' # Call magic function
291 ESC_MAGIC2 = '%%' # Call cell-magic function
291 ESC_MAGIC2 = '%%' # Call cell-magic function
292 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
292 ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
293 ESC_QUOTE2 = ';' # Quote all args as a single string, call
293 ESC_QUOTE2 = ';' # Quote all args as a single string, call
294 ESC_PAREN = '/' # Call first argument with rest of line as arguments
294 ESC_PAREN = '/' # Call first argument with rest of line as arguments
295
295
296 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
296 ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
297 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
297 ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
298
298
299 def _make_help_call(target, esc, next_input=None):
299 def _make_help_call(target, esc, next_input=None):
300 """Prepares a pinfo(2)/psearch call from a target name and the escape
300 """Prepares a pinfo(2)/psearch call from a target name and the escape
301 (i.e. ? or ??)"""
301 (i.e. ? or ??)"""
302 method = 'pinfo2' if esc == '??' \
302 method = 'pinfo2' if esc == '??' \
303 else 'psearch' if '*' in target \
303 else 'psearch' if '*' in target \
304 else 'pinfo'
304 else 'pinfo'
305 arg = " ".join([method, target])
305 arg = " ".join([method, target])
306 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
306 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
307 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
307 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
308 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
308 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
309 if next_input is None:
309 if next_input is None:
310 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
310 return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
311 else:
311 else:
312 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
312 return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
313 (next_input, t_magic_name, t_magic_arg_s)
313 (next_input, t_magic_name, t_magic_arg_s)
314
314
315 def _tr_help(content):
315 def _tr_help(content):
316 """Translate lines escaped with: ?
316 """Translate lines escaped with: ?
317
317
318 A naked help line should fire the intro help screen (shell.show_usage())
318 A naked help line should fire the intro help screen (shell.show_usage())
319 """
319 """
320 if not content:
320 if not content:
321 return 'get_ipython().show_usage()'
321 return 'get_ipython().show_usage()'
322
322
323 return _make_help_call(content, '?')
323 return _make_help_call(content, '?')
324
324
325 def _tr_help2(content):
325 def _tr_help2(content):
326 """Translate lines escaped with: ??
326 """Translate lines escaped with: ??
327
327
328 A naked help line should fire the intro help screen (shell.show_usage())
328 A naked help line should fire the intro help screen (shell.show_usage())
329 """
329 """
330 if not content:
330 if not content:
331 return 'get_ipython().show_usage()'
331 return 'get_ipython().show_usage()'
332
332
333 return _make_help_call(content, '??')
333 return _make_help_call(content, '??')
334
334
335 def _tr_magic(content):
335 def _tr_magic(content):
336 "Translate lines escaped with a percent sign: %"
336 "Translate lines escaped with a percent sign: %"
337 name, _, args = content.partition(' ')
337 name, _, args = content.partition(' ')
338 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
338 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
339
339
340 def _tr_quote(content):
340 def _tr_quote(content):
341 "Translate lines escaped with a comma: ,"
341 "Translate lines escaped with a comma: ,"
342 name, _, args = content.partition(' ')
342 name, _, args = content.partition(' ')
343 return '%s("%s")' % (name, '", "'.join(args.split()) )
343 return '%s("%s")' % (name, '", "'.join(args.split()) )
344
344
345 def _tr_quote2(content):
345 def _tr_quote2(content):
346 "Translate lines escaped with a semicolon: ;"
346 "Translate lines escaped with a semicolon: ;"
347 name, _, args = content.partition(' ')
347 name, _, args = content.partition(' ')
348 return '%s("%s")' % (name, args)
348 return '%s("%s")' % (name, args)
349
349
350 def _tr_paren(content):
350 def _tr_paren(content):
351 "Translate lines escaped with a slash: /"
351 "Translate lines escaped with a slash: /"
352 name, _, args = content.partition(' ')
352 name, _, args = content.partition(' ')
353 return '%s(%s)' % (name, ", ".join(args.split()))
353 return '%s(%s)' % (name, ", ".join(args.split()))
354
354
355 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
355 tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
356 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
356 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
357 ESC_HELP : _tr_help,
357 ESC_HELP : _tr_help,
358 ESC_HELP2 : _tr_help2,
358 ESC_HELP2 : _tr_help2,
359 ESC_MAGIC : _tr_magic,
359 ESC_MAGIC : _tr_magic,
360 ESC_QUOTE : _tr_quote,
360 ESC_QUOTE : _tr_quote,
361 ESC_QUOTE2 : _tr_quote2,
361 ESC_QUOTE2 : _tr_quote2,
362 ESC_PAREN : _tr_paren }
362 ESC_PAREN : _tr_paren }
363
363
364 class EscapedCommand(TokenTransformBase):
364 class EscapedCommand(TokenTransformBase):
365 """Transformer for escaped commands like %foo, !foo, or /foo"""
365 """Transformer for escaped commands like %foo, !foo, or /foo"""
366 @classmethod
366 @classmethod
367 def find(cls, tokens_by_line):
367 def find(cls, tokens_by_line):
368 """Find the first escaped command (%foo, !foo, etc.) in the cell.
368 """Find the first escaped command (%foo, !foo, etc.) in the cell.
369 """
369 """
370 for line in tokens_by_line:
370 for line in tokens_by_line:
371 if not line:
371 if not line:
372 continue
372 continue
373 ix = 0
373 ix = 0
374 ll = len(line)
374 ll = len(line)
375 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
375 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
376 ix += 1
376 ix += 1
377 if ix >= ll:
377 if ix >= ll:
378 continue
378 continue
379 if line[ix].string in ESCAPE_SINGLES:
379 if line[ix].string in ESCAPE_SINGLES:
380 return cls(line[ix].start)
380 return cls(line[ix].start)
381
381
382 def transform(self, lines):
382 def transform(self, lines):
383 """Transform an escaped line found by the ``find()`` classmethod.
383 """Transform an escaped line found by the ``find()`` classmethod.
384 """
384 """
385 start_line, start_col = self.start_line, self.start_col
385 start_line, start_col = self.start_line, self.start_col
386
386
387 indent = lines[start_line][:start_col]
387 indent = lines[start_line][:start_col]
388 end_line = find_end_of_continued_line(lines, start_line)
388 end_line = find_end_of_continued_line(lines, start_line)
389 line = assemble_continued_line(lines, (start_line, start_col), end_line)
389 line = assemble_continued_line(lines, (start_line, start_col), end_line)
390
390
391 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
391 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
392 escape, content = line[:2], line[2:]
392 escape, content = line[:2], line[2:]
393 else:
393 else:
394 escape, content = line[:1], line[1:]
394 escape, content = line[:1], line[1:]
395
395
396 if escape in tr:
396 if escape in tr:
397 call = tr[escape](content)
397 call = tr[escape](content)
398 else:
398 else:
399 call = ''
399 call = ''
400
400
401 lines_before = lines[:start_line]
401 lines_before = lines[:start_line]
402 new_line = indent + call + '\n'
402 new_line = indent + call + '\n'
403 lines_after = lines[end_line + 1:]
403 lines_after = lines[end_line + 1:]
404
404
405 return lines_before + [new_line] + lines_after
405 return lines_before + [new_line] + lines_after
406
406
407 _help_end_re = re.compile(r"""(%{0,2}
407 _help_end_re = re.compile(r"""(%{0,2}
408 (?!\d)[\w*]+ # Variable name
408 (?!\d)[\w*]+ # Variable name
409 (\.(?!\d)[\w*]+)* # .etc.etc
409 (\.(?!\d)[\w*]+)* # .etc.etc
410 )
410 )
411 (\?\??)$ # ? or ??
411 (\?\??)$ # ? or ??
412 """,
412 """,
413 re.VERBOSE)
413 re.VERBOSE)
414
414
415 class HelpEnd(TokenTransformBase):
415 class HelpEnd(TokenTransformBase):
416 """Transformer for help syntax: obj? and obj??"""
416 """Transformer for help syntax: obj? and obj??"""
417 # This needs to be higher priority (lower number) than EscapedCommand so
417 # This needs to be higher priority (lower number) than EscapedCommand so
418 # that inspecting magics (%foo?) works.
418 # that inspecting magics (%foo?) works.
419 priority = 5
419 priority = 5
420
420
421 def __init__(self, start, q_locn):
421 def __init__(self, start, q_locn):
422 super().__init__(start)
422 super().__init__(start)
423 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
423 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
424 self.q_col = q_locn[1]
424 self.q_col = q_locn[1]
425
425
426 @classmethod
426 @classmethod
427 def find(cls, tokens_by_line):
427 def find(cls, tokens_by_line):
428 """Find the first help command (foo?) in the cell.
428 """Find the first help command (foo?) in the cell.
429 """
429 """
430 for line in tokens_by_line:
430 for line in tokens_by_line:
431 # Last token is NEWLINE; look at last but one
431 # Last token is NEWLINE; look at last but one
432 if len(line) > 2 and line[-2].string == '?':
432 if len(line) > 2 and line[-2].string == '?':
433 # Find the first token that's not INDENT/DEDENT
433 # Find the first token that's not INDENT/DEDENT
434 ix = 0
434 ix = 0
435 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
435 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
436 ix += 1
436 ix += 1
437 return cls(line[ix].start, line[-2].start)
437 return cls(line[ix].start, line[-2].start)
438
438
439 def transform(self, lines):
439 def transform(self, lines):
440 """Transform a help command found by the ``find()`` classmethod.
440 """Transform a help command found by the ``find()`` classmethod.
441 """
441 """
442 piece = ''.join(lines[self.start_line:self.q_line+1])
442 piece = ''.join(lines[self.start_line:self.q_line+1])
443 indent, content = piece[:self.start_col], piece[self.start_col:]
443 indent, content = piece[:self.start_col], piece[self.start_col:]
444 lines_before = lines[:self.start_line]
444 lines_before = lines[:self.start_line]
445 lines_after = lines[self.q_line + 1:]
445 lines_after = lines[self.q_line + 1:]
446
446
447 m = _help_end_re.search(content)
447 m = _help_end_re.search(content)
448 if not m:
448 if not m:
449 raise SyntaxError(content)
449 raise SyntaxError(content)
450 assert m is not None, content
450 assert m is not None, content
451 target = m.group(1)
451 target = m.group(1)
452 esc = m.group(3)
452 esc = m.group(3)
453
453
454 # If we're mid-command, put it back on the next prompt for the user.
454 # If we're mid-command, put it back on the next prompt for the user.
455 next_input = None
455 next_input = None
456 if (not lines_before) and (not lines_after) \
456 if (not lines_before) and (not lines_after) \
457 and content.strip() != m.group(0):
457 and content.strip() != m.group(0):
458 next_input = content.rstrip('?\n')
458 next_input = content.rstrip('?\n')
459
459
460 call = _make_help_call(target, esc, next_input=next_input)
460 call = _make_help_call(target, esc, next_input=next_input)
461 new_line = indent + call + '\n'
461 new_line = indent + call + '\n'
462
462
463 return lines_before + [new_line] + lines_after
463 return lines_before + [new_line] + lines_after
464
464
465 def make_tokens_by_line(lines:List[str]):
465 def make_tokens_by_line(lines:List[str]):
466 """Tokenize a series of lines and group tokens by line.
466 """Tokenize a series of lines and group tokens by line.
467
467
468 The tokens for a multiline Python string or expression are grouped as one
468 The tokens for a multiline Python string or expression are grouped as one
469 line. All lines except the last lines should keep their line ending ('\\n',
469 line. All lines except the last lines should keep their line ending ('\\n',
470 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
470 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
471 for example when passing block of text to this function.
471 for example when passing block of text to this function.
472
472
473 """
473 """
474 # NL tokens are used inside multiline expressions, but also after blank
474 # NL tokens are used inside multiline expressions, but also after blank
475 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
475 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
476 # We want to group the former case together but split the latter, so we
476 # We want to group the former case together but split the latter, so we
477 # track parentheses level, similar to the internals of tokenize.
477 # track parentheses level, similar to the internals of tokenize.
478 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
478 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
479 tokens_by_line = [[]]
479 tokens_by_line = [[]]
480 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
480 if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
481 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
481 warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
482 parenlev = 0
482 parenlev = 0
483 try:
483 try:
484 for token in tokenize.generate_tokens(iter(lines).__next__):
484 for token in tokenize.generate_tokens(iter(lines).__next__):
485 tokens_by_line[-1].append(token)
485 tokens_by_line[-1].append(token)
486 if (token.type == NEWLINE) \
486 if (token.type == NEWLINE) \
487 or ((token.type == NL) and (parenlev <= 0)):
487 or ((token.type == NL) and (parenlev <= 0)):
488 tokens_by_line.append([])
488 tokens_by_line.append([])
489 elif token.string in {'(', '[', '{'}:
489 elif token.string in {'(', '[', '{'}:
490 parenlev += 1
490 parenlev += 1
491 elif token.string in {')', ']', '}'}:
491 elif token.string in {')', ']', '}'}:
492 if parenlev > 0:
492 if parenlev > 0:
493 parenlev -= 1
493 parenlev -= 1
494 except tokenize.TokenError:
494 except tokenize.TokenError:
495 # Input ended in a multiline string or expression. That's OK for us.
495 # Input ended in a multiline string or expression. That's OK for us.
496 pass
496 pass
497
497
498
498
499 if not tokens_by_line[-1]:
499 if not tokens_by_line[-1]:
500 tokens_by_line.pop()
500 tokens_by_line.pop()
501
501
502
502
503 return tokens_by_line
503 return tokens_by_line
504
504
505 def show_linewise_tokens(s: str):
505 def show_linewise_tokens(s: str):
506 """For investigation and debugging"""
506 """For investigation and debugging"""
507 if not s.endswith('\n'):
507 if not s.endswith('\n'):
508 s += '\n'
508 s += '\n'
509 lines = s.splitlines(keepends=True)
509 lines = s.splitlines(keepends=True)
510 for line in make_tokens_by_line(lines):
510 for line in make_tokens_by_line(lines):
511 print("Line -------")
511 print("Line -------")
512 for tokinfo in line:
512 for tokinfo in line:
513 print(" ", tokinfo)
513 print(" ", tokinfo)
514
514
515 # Arbitrary limit to prevent getting stuck in infinite loops
515 # Arbitrary limit to prevent getting stuck in infinite loops
516 TRANSFORM_LOOP_LIMIT = 500
516 TRANSFORM_LOOP_LIMIT = 500
517
517
518 class TransformerManager:
518 class TransformerManager:
519 """Applies various transformations to a cell or code block.
519 """Applies various transformations to a cell or code block.
520
520
521 The key methods for external use are ``transform_cell()``
521 The key methods for external use are ``transform_cell()``
522 and ``check_complete()``.
522 and ``check_complete()``.
523 """
523 """
524 def __init__(self):
524 def __init__(self):
525 self.cleanup_transforms = [
525 self.cleanup_transforms = [
526 leading_empty_lines,
526 leading_empty_lines,
527 leading_indent,
527 leading_indent,
528 classic_prompt,
528 classic_prompt,
529 ipython_prompt,
529 ipython_prompt,
530 ]
530 ]
531 self.line_transforms = [
531 self.line_transforms = [
532 cell_magic,
532 cell_magic,
533 ]
533 ]
534 self.token_transformers = [
534 self.token_transformers = [
535 MagicAssign,
535 MagicAssign,
536 SystemAssign,
536 SystemAssign,
537 EscapedCommand,
537 EscapedCommand,
538 HelpEnd,
538 HelpEnd,
539 ]
539 ]
540
540
541 def do_one_token_transform(self, lines):
541 def do_one_token_transform(self, lines):
542 """Find and run the transform earliest in the code.
542 """Find and run the transform earliest in the code.
543
543
544 Returns (changed, lines).
544 Returns (changed, lines).
545
545
546 This method is called repeatedly until changed is False, indicating
546 This method is called repeatedly until changed is False, indicating
547 that all available transformations are complete.
547 that all available transformations are complete.
548
548
549 The tokens following IPython special syntax might not be valid, so
549 The tokens following IPython special syntax might not be valid, so
550 the transformed code is retokenised every time to identify the next
550 the transformed code is retokenised every time to identify the next
551 piece of special syntax. Hopefully long code cells are mostly valid
551 piece of special syntax. Hopefully long code cells are mostly valid
552 Python, not using lots of IPython special syntax, so this shouldn't be
552 Python, not using lots of IPython special syntax, so this shouldn't be
553 a performance issue.
553 a performance issue.
554 """
554 """
555 tokens_by_line = make_tokens_by_line(lines)
555 tokens_by_line = make_tokens_by_line(lines)
556 candidates = []
556 candidates = []
557 for transformer_cls in self.token_transformers:
557 for transformer_cls in self.token_transformers:
558 transformer = transformer_cls.find(tokens_by_line)
558 transformer = transformer_cls.find(tokens_by_line)
559 if transformer:
559 if transformer:
560 candidates.append(transformer)
560 candidates.append(transformer)
561
561
562 if not candidates:
562 if not candidates:
563 # Nothing to transform
563 # Nothing to transform
564 return False, lines
564 return False, lines
565 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
565 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
566 for transformer in ordered_transformers:
566 for transformer in ordered_transformers:
567 try:
567 try:
568 return True, transformer.transform(lines)
568 return True, transformer.transform(lines)
569 except SyntaxError:
569 except SyntaxError:
570 pass
570 pass
571 return False, lines
571 return False, lines
572
572
573 def do_token_transforms(self, lines):
573 def do_token_transforms(self, lines):
574 for _ in range(TRANSFORM_LOOP_LIMIT):
574 for _ in range(TRANSFORM_LOOP_LIMIT):
575 changed, lines = self.do_one_token_transform(lines)
575 changed, lines = self.do_one_token_transform(lines)
576 if not changed:
576 if not changed:
577 return lines
577 return lines
578
578
579 raise RuntimeError("Input transformation still changing after "
579 raise RuntimeError("Input transformation still changing after "
580 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
580 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
581
581
582 def transform_cell(self, cell: str) -> str:
582 def transform_cell(self, cell: str) -> str:
583 """Transforms a cell of input code"""
583 """Transforms a cell of input code"""
584 if not cell.endswith('\n'):
584 if not cell.endswith('\n'):
585 cell += '\n' # Ensure the cell has a trailing newline
585 cell += '\n' # Ensure the cell has a trailing newline
586 lines = cell.splitlines(keepends=True)
586 lines = cell.splitlines(keepends=True)
587 for transform in self.cleanup_transforms + self.line_transforms:
587 for transform in self.cleanup_transforms + self.line_transforms:
588 lines = transform(lines)
588 lines = transform(lines)
589
589
590 lines = self.do_token_transforms(lines)
590 lines = self.do_token_transforms(lines)
591 return ''.join(lines)
591 return ''.join(lines)
592
592
593 def check_complete(self, cell: str):
593 def check_complete(self, cell: str):
594 """Return whether a block of code is ready to execute, or should be continued
594 """Return whether a block of code is ready to execute, or should be continued
595
595
596 Parameters
596 Parameters
597 ----------
597 ----------
598 source : string
598 source : string
599 Python input code, which can be multiline.
599 Python input code, which can be multiline.
600
600
601 Returns
601 Returns
602 -------
602 -------
603 status : str
603 status : str
604 One of 'complete', 'incomplete', or 'invalid' if source is not a
604 One of 'complete', 'incomplete', or 'invalid' if source is not a
605 prefix of valid code.
605 prefix of valid code.
606 indent_spaces : int or None
606 indent_spaces : int or None
607 The number of spaces by which to indent the next line of code. If
607 The number of spaces by which to indent the next line of code. If
608 status is not 'incomplete', this is None.
608 status is not 'incomplete', this is None.
609 """
609 """
610 # Remember if the lines ends in a new line.
610 # Remember if the lines ends in a new line.
611 ends_with_newline = False
611 ends_with_newline = False
612 for character in reversed(cell):
612 for character in reversed(cell):
613 if character == '\n':
613 if character == '\n':
614 ends_with_newline = True
614 ends_with_newline = True
615 break
615 break
616 elif character.strip():
616 elif character.strip():
617 break
617 break
618 else:
618 else:
619 continue
619 continue
620
620
621 if not ends_with_newline:
621 if not ends_with_newline:
622 # Append an newline for consistent tokenization
622 # Append an newline for consistent tokenization
623 # See https://bugs.python.org/issue33899
623 # See https://bugs.python.org/issue33899
624 cell += '\n'
624 cell += '\n'
625
625
626 lines = cell.splitlines(keepends=True)
626 lines = cell.splitlines(keepends=True)
627
627
628 if not lines:
628 if not lines:
629 return 'complete', None
629 return 'complete', None
630
630
631 if lines[-1].endswith('\\'):
631 if lines[-1].endswith('\\'):
632 # Explicit backslash continuation
632 # Explicit backslash continuation
633 return 'incomplete', find_last_indent(lines)
633 return 'incomplete', find_last_indent(lines)
634
634
635 try:
635 try:
636 for transform in self.cleanup_transforms:
636 for transform in self.cleanup_transforms:
637 if not getattr(transform, 'has_side_effects', False):
637 if not getattr(transform, 'has_side_effects', False):
638 lines = transform(lines)
638 lines = transform(lines)
639 except SyntaxError:
639 except SyntaxError:
640 return 'invalid', None
640 return 'invalid', None
641
641
642 if lines[0].startswith('%%'):
642 if lines[0].startswith('%%'):
643 # Special case for cell magics - completion marked by blank line
643 # Special case for cell magics - completion marked by blank line
644 if lines[-1].strip():
644 if lines[-1].strip():
645 return 'incomplete', find_last_indent(lines)
645 return 'incomplete', find_last_indent(lines)
646 else:
646 else:
647 return 'complete', None
647 return 'complete', None
648
648
649 try:
649 try:
650 for transform in self.line_transforms:
650 for transform in self.line_transforms:
651 if not getattr(transform, 'has_side_effects', False):
651 if not getattr(transform, 'has_side_effects', False):
652 lines = transform(lines)
652 lines = transform(lines)
653 lines = self.do_token_transforms(lines)
653 lines = self.do_token_transforms(lines)
654 except SyntaxError:
654 except SyntaxError:
655 return 'invalid', None
655 return 'invalid', None
656
656
657 tokens_by_line = make_tokens_by_line(lines)
657 tokens_by_line = make_tokens_by_line(lines)
658
658
659 if not tokens_by_line:
659 if not tokens_by_line:
660 return 'incomplete', find_last_indent(lines)
660 return 'incomplete', find_last_indent(lines)
661
661
662 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
662 if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
663 # We're in a multiline string or expression
663 # We're in a multiline string or expression
664 return 'incomplete', find_last_indent(lines)
664 return 'incomplete', find_last_indent(lines)
665
665
666 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
666 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
667
667
668 # Pop the last line which only contains DEDENTs and ENDMARKER
668 # Pop the last line which only contains DEDENTs and ENDMARKER
669 last_token_line = None
669 last_token_line = None
670 if {t.type for t in tokens_by_line[-1]} in [
670 if {t.type for t in tokens_by_line[-1]} in [
671 {tokenize.DEDENT, tokenize.ENDMARKER},
671 {tokenize.DEDENT, tokenize.ENDMARKER},
672 {tokenize.ENDMARKER}
672 {tokenize.ENDMARKER}
673 ] and len(tokens_by_line) > 1:
673 ] and len(tokens_by_line) > 1:
674 last_token_line = tokens_by_line.pop()
674 last_token_line = tokens_by_line.pop()
675
675
676 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
676 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
677 tokens_by_line[-1].pop()
677 tokens_by_line[-1].pop()
678
678
679 if not tokens_by_line[-1]:
679 if not tokens_by_line[-1]:
680 return 'incomplete', find_last_indent(lines)
680 return 'incomplete', find_last_indent(lines)
681
681
682 if tokens_by_line[-1][-1].string == ':':
682 if tokens_by_line[-1][-1].string == ':':
683 # The last line starts a block (e.g. 'if foo:')
683 # The last line starts a block (e.g. 'if foo:')
684 ix = 0
684 ix = 0
685 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
685 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
686 ix += 1
686 ix += 1
687
687
688 indent = tokens_by_line[-1][ix].start[1]
688 indent = tokens_by_line[-1][ix].start[1]
689 return 'incomplete', indent + 4
689 return 'incomplete', indent + 4
690
690
691 if tokens_by_line[-1][0].line.endswith('\\'):
691 if tokens_by_line[-1][0].line.endswith('\\'):
692 return 'incomplete', None
692 return 'incomplete', None
693
693
694 # At this point, our checks think the code is complete (or invalid).
694 # At this point, our checks think the code is complete (or invalid).
695 # We'll use codeop.compile_command to check this with the real parser
695 # We'll use codeop.compile_command to check this with the real parser
696 try:
696 try:
697 with warnings.catch_warnings():
697 with warnings.catch_warnings():
698 warnings.simplefilter('error', SyntaxWarning)
698 warnings.simplefilter('error', SyntaxWarning)
699 res = compile_command(''.join(lines), symbol='exec')
699 res = compile_command(''.join(lines), symbol='exec')
700 except (SyntaxError, OverflowError, ValueError, TypeError,
700 except (SyntaxError, OverflowError, ValueError, TypeError,
701 MemoryError, SyntaxWarning):
701 MemoryError, SyntaxWarning):
702 return 'invalid', None
702 return 'invalid', None
703 else:
703 else:
704 if res is None:
704 if res is None:
705 return 'incomplete', find_last_indent(lines)
705 return 'incomplete', find_last_indent(lines)
706
706
707 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
707 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
708 if ends_with_newline:
708 if ends_with_newline:
709 return 'complete', None
709 return 'complete', None
710 return 'incomplete', find_last_indent(lines)
710 return 'incomplete', find_last_indent(lines)
711
711
712 # If there's a blank line at the end, assume we're ready to execute
712 # If there's a blank line at the end, assume we're ready to execute
713 if not lines[-1].strip():
713 if not lines[-1].strip():
714 return 'complete', None
714 return 'complete', None
715
715
716 return 'complete', None
716 return 'complete', None
717
717
718
718
719 def find_last_indent(lines):
719 def find_last_indent(lines):
720 m = _indent_re.match(lines[-1])
720 m = _indent_re.match(lines[-1])
721 if not m:
721 if not m:
722 return 0
722 return 0
723 return len(m.group(0).replace('\t', ' '*4))
723 return len(m.group(0).replace('\t', ' '*4))
@@ -1,326 +1,337 b''
1 """Tests for the token-based transformers in IPython.core.inputtransformer2
1 """Tests for the token-based transformers in IPython.core.inputtransformer2
2
2
3 Line-based transformers are the simpler ones; token-based transformers are
3 Line-based transformers are the simpler ones; token-based transformers are
4 more complex. See test_inputtransformer2_line for tests for line-based
4 more complex. See test_inputtransformer2_line for tests for line-based
5 transformations.
5 transformations.
6 """
6 """
7 import nose.tools as nt
7 import nose.tools as nt
8 import string
8 import string
9
9
10 from IPython.core import inputtransformer2 as ipt2
10 from IPython.core import inputtransformer2 as ipt2
11 from IPython.core.inputtransformer2 import make_tokens_by_line, _find_assign_op
11 from IPython.core.inputtransformer2 import make_tokens_by_line, _find_assign_op
12
12
13 from textwrap import dedent
13 from textwrap import dedent
14
14
15 MULTILINE_MAGIC = ("""\
15 MULTILINE_MAGIC = ("""\
16 a = f()
16 a = f()
17 %foo \\
17 %foo \\
18 bar
18 bar
19 g()
19 g()
20 """.splitlines(keepends=True), (2, 0), """\
20 """.splitlines(keepends=True), (2, 0), """\
21 a = f()
21 a = f()
22 get_ipython().run_line_magic('foo', ' bar')
22 get_ipython().run_line_magic('foo', ' bar')
23 g()
23 g()
24 """.splitlines(keepends=True))
24 """.splitlines(keepends=True))
25
25
26 INDENTED_MAGIC = ("""\
26 INDENTED_MAGIC = ("""\
27 for a in range(5):
27 for a in range(5):
28 %ls
28 %ls
29 """.splitlines(keepends=True), (2, 4), """\
29 """.splitlines(keepends=True), (2, 4), """\
30 for a in range(5):
30 for a in range(5):
31 get_ipython().run_line_magic('ls', '')
31 get_ipython().run_line_magic('ls', '')
32 """.splitlines(keepends=True))
32 """.splitlines(keepends=True))
33
33
34 CRLF_MAGIC = ([
35 "a = f()\n",
36 "%ls\r\n",
37 "g()\n"
38 ], (2, 0), [
39 "a = f()\n",
40 "get_ipython().run_line_magic('ls', '')\n",
41 "g()\n"
42 ])
43
34 MULTILINE_MAGIC_ASSIGN = ("""\
44 MULTILINE_MAGIC_ASSIGN = ("""\
35 a = f()
45 a = f()
36 b = %foo \\
46 b = %foo \\
37 bar
47 bar
38 g()
48 g()
39 """.splitlines(keepends=True), (2, 4), """\
49 """.splitlines(keepends=True), (2, 4), """\
40 a = f()
50 a = f()
41 b = get_ipython().run_line_magic('foo', ' bar')
51 b = get_ipython().run_line_magic('foo', ' bar')
42 g()
52 g()
43 """.splitlines(keepends=True))
53 """.splitlines(keepends=True))
44
54
45 MULTILINE_SYSTEM_ASSIGN = ("""\
55 MULTILINE_SYSTEM_ASSIGN = ("""\
46 a = f()
56 a = f()
47 b = !foo \\
57 b = !foo \\
48 bar
58 bar
49 g()
59 g()
50 """.splitlines(keepends=True), (2, 4), """\
60 """.splitlines(keepends=True), (2, 4), """\
51 a = f()
61 a = f()
52 b = get_ipython().getoutput('foo bar')
62 b = get_ipython().getoutput('foo bar')
53 g()
63 g()
54 """.splitlines(keepends=True))
64 """.splitlines(keepends=True))
55
65
56 #####
66 #####
57
67
58 MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT = ("""\
68 MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT = ("""\
59 def test():
69 def test():
60 for i in range(1):
70 for i in range(1):
61 print(i)
71 print(i)
62 res =! ls
72 res =! ls
63 """.splitlines(keepends=True), (4, 7), '''\
73 """.splitlines(keepends=True), (4, 7), '''\
64 def test():
74 def test():
65 for i in range(1):
75 for i in range(1):
66 print(i)
76 print(i)
67 res =get_ipython().getoutput(\' ls\')
77 res =get_ipython().getoutput(\' ls\')
68 '''.splitlines(keepends=True))
78 '''.splitlines(keepends=True))
69
79
70 ######
80 ######
71
81
72 AUTOCALL_QUOTE = (
82 AUTOCALL_QUOTE = (
73 [",f 1 2 3\n"], (1, 0),
83 [",f 1 2 3\n"], (1, 0),
74 ['f("1", "2", "3")\n']
84 ['f("1", "2", "3")\n']
75 )
85 )
76
86
77 AUTOCALL_QUOTE2 = (
87 AUTOCALL_QUOTE2 = (
78 [";f 1 2 3\n"], (1, 0),
88 [";f 1 2 3\n"], (1, 0),
79 ['f("1 2 3")\n']
89 ['f("1 2 3")\n']
80 )
90 )
81
91
82 AUTOCALL_PAREN = (
92 AUTOCALL_PAREN = (
83 ["/f 1 2 3\n"], (1, 0),
93 ["/f 1 2 3\n"], (1, 0),
84 ['f(1, 2, 3)\n']
94 ['f(1, 2, 3)\n']
85 )
95 )
86
96
87 SIMPLE_HELP = (
97 SIMPLE_HELP = (
88 ["foo?\n"], (1, 0),
98 ["foo?\n"], (1, 0),
89 ["get_ipython().run_line_magic('pinfo', 'foo')\n"]
99 ["get_ipython().run_line_magic('pinfo', 'foo')\n"]
90 )
100 )
91
101
92 DETAILED_HELP = (
102 DETAILED_HELP = (
93 ["foo??\n"], (1, 0),
103 ["foo??\n"], (1, 0),
94 ["get_ipython().run_line_magic('pinfo2', 'foo')\n"]
104 ["get_ipython().run_line_magic('pinfo2', 'foo')\n"]
95 )
105 )
96
106
97 MAGIC_HELP = (
107 MAGIC_HELP = (
98 ["%foo?\n"], (1, 0),
108 ["%foo?\n"], (1, 0),
99 ["get_ipython().run_line_magic('pinfo', '%foo')\n"]
109 ["get_ipython().run_line_magic('pinfo', '%foo')\n"]
100 )
110 )
101
111
102 HELP_IN_EXPR = (
112 HELP_IN_EXPR = (
103 ["a = b + c?\n"], (1, 0),
113 ["a = b + c?\n"], (1, 0),
104 ["get_ipython().set_next_input('a = b + c');"
114 ["get_ipython().set_next_input('a = b + c');"
105 "get_ipython().run_line_magic('pinfo', 'c')\n"]
115 "get_ipython().run_line_magic('pinfo', 'c')\n"]
106 )
116 )
107
117
108 HELP_CONTINUED_LINE = ("""\
118 HELP_CONTINUED_LINE = ("""\
109 a = \\
119 a = \\
110 zip?
120 zip?
111 """.splitlines(keepends=True), (1, 0),
121 """.splitlines(keepends=True), (1, 0),
112 [r"get_ipython().set_next_input('a = \\\nzip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
122 [r"get_ipython().set_next_input('a = \\\nzip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
113 )
123 )
114
124
115 HELP_MULTILINE = ("""\
125 HELP_MULTILINE = ("""\
116 (a,
126 (a,
117 b) = zip?
127 b) = zip?
118 """.splitlines(keepends=True), (1, 0),
128 """.splitlines(keepends=True), (1, 0),
119 [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
129 [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
120 )
130 )
121
131
122 HELP_UNICODE = (
132 HELP_UNICODE = (
123 ["Ο€.foo?\n"], (1, 0),
133 ["Ο€.foo?\n"], (1, 0),
124 ["get_ipython().run_line_magic('pinfo', 'Ο€.foo')\n"]
134 ["get_ipython().run_line_magic('pinfo', 'Ο€.foo')\n"]
125 )
135 )
126
136
127
137
128 def null_cleanup_transformer(lines):
138 def null_cleanup_transformer(lines):
129 """
139 """
130 A cleanup transform that returns an empty list.
140 A cleanup transform that returns an empty list.
131 """
141 """
132 return []
142 return []
133
143
134 def check_make_token_by_line_never_ends_empty():
144 def check_make_token_by_line_never_ends_empty():
135 """
145 """
136 Check that not sequence of single or double characters ends up leading to en empty list of tokens
146 Check that not sequence of single or double characters ends up leading to en empty list of tokens
137 """
147 """
138 from string import printable
148 from string import printable
139 for c in printable:
149 for c in printable:
140 nt.assert_not_equal(make_tokens_by_line(c)[-1], [])
150 nt.assert_not_equal(make_tokens_by_line(c)[-1], [])
141 for k in printable:
151 for k in printable:
142 nt.assert_not_equal(make_tokens_by_line(c+k)[-1], [])
152 nt.assert_not_equal(make_tokens_by_line(c+k)[-1], [])
143
153
144 def check_find(transformer, case, match=True):
154 def check_find(transformer, case, match=True):
145 sample, expected_start, _ = case
155 sample, expected_start, _ = case
146 tbl = make_tokens_by_line(sample)
156 tbl = make_tokens_by_line(sample)
147 res = transformer.find(tbl)
157 res = transformer.find(tbl)
148 if match:
158 if match:
149 # start_line is stored 0-indexed, expected values are 1-indexed
159 # start_line is stored 0-indexed, expected values are 1-indexed
150 nt.assert_equal((res.start_line+1, res.start_col), expected_start)
160 nt.assert_equal((res.start_line+1, res.start_col), expected_start)
151 return res
161 return res
152 else:
162 else:
153 nt.assert_is(res, None)
163 nt.assert_is(res, None)
154
164
155 def check_transform(transformer_cls, case):
165 def check_transform(transformer_cls, case):
156 lines, start, expected = case
166 lines, start, expected = case
157 transformer = transformer_cls(start)
167 transformer = transformer_cls(start)
158 nt.assert_equal(transformer.transform(lines), expected)
168 nt.assert_equal(transformer.transform(lines), expected)
159
169
160 def test_continued_line():
170 def test_continued_line():
161 lines = MULTILINE_MAGIC_ASSIGN[0]
171 lines = MULTILINE_MAGIC_ASSIGN[0]
162 nt.assert_equal(ipt2.find_end_of_continued_line(lines, 1), 2)
172 nt.assert_equal(ipt2.find_end_of_continued_line(lines, 1), 2)
163
173
164 nt.assert_equal(ipt2.assemble_continued_line(lines, (1, 5), 2), "foo bar")
174 nt.assert_equal(ipt2.assemble_continued_line(lines, (1, 5), 2), "foo bar")
165
175
166 def test_find_assign_magic():
176 def test_find_assign_magic():
167 check_find(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
177 check_find(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
168 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN, match=False)
178 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN, match=False)
169 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT, match=False)
179 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT, match=False)
170
180
171 def test_transform_assign_magic():
181 def test_transform_assign_magic():
172 check_transform(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
182 check_transform(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
173
183
174 def test_find_assign_system():
184 def test_find_assign_system():
175 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
185 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
176 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
186 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
177 check_find(ipt2.SystemAssign, (["a = !ls\n"], (1, 5), None))
187 check_find(ipt2.SystemAssign, (["a = !ls\n"], (1, 5), None))
178 check_find(ipt2.SystemAssign, (["a=!ls\n"], (1, 2), None))
188 check_find(ipt2.SystemAssign, (["a=!ls\n"], (1, 2), None))
179 check_find(ipt2.SystemAssign, MULTILINE_MAGIC_ASSIGN, match=False)
189 check_find(ipt2.SystemAssign, MULTILINE_MAGIC_ASSIGN, match=False)
180
190
181 def test_transform_assign_system():
191 def test_transform_assign_system():
182 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
192 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
183 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
193 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
184
194
185 def test_find_magic_escape():
195 def test_find_magic_escape():
186 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC)
196 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC)
187 check_find(ipt2.EscapedCommand, INDENTED_MAGIC)
197 check_find(ipt2.EscapedCommand, INDENTED_MAGIC)
188 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC_ASSIGN, match=False)
198 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC_ASSIGN, match=False)
189
199
190 def test_transform_magic_escape():
200 def test_transform_magic_escape():
191 check_transform(ipt2.EscapedCommand, MULTILINE_MAGIC)
201 check_transform(ipt2.EscapedCommand, MULTILINE_MAGIC)
192 check_transform(ipt2.EscapedCommand, INDENTED_MAGIC)
202 check_transform(ipt2.EscapedCommand, INDENTED_MAGIC)
203 check_transform(ipt2.EscapedCommand, CRLF_MAGIC)
193
204
194 def test_find_autocalls():
205 def test_find_autocalls():
195 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
206 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
196 print("Testing %r" % case[0])
207 print("Testing %r" % case[0])
197 check_find(ipt2.EscapedCommand, case)
208 check_find(ipt2.EscapedCommand, case)
198
209
199 def test_transform_autocall():
210 def test_transform_autocall():
200 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
211 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
201 print("Testing %r" % case[0])
212 print("Testing %r" % case[0])
202 check_transform(ipt2.EscapedCommand, case)
213 check_transform(ipt2.EscapedCommand, case)
203
214
204 def test_find_help():
215 def test_find_help():
205 for case in [SIMPLE_HELP, DETAILED_HELP, MAGIC_HELP, HELP_IN_EXPR]:
216 for case in [SIMPLE_HELP, DETAILED_HELP, MAGIC_HELP, HELP_IN_EXPR]:
206 check_find(ipt2.HelpEnd, case)
217 check_find(ipt2.HelpEnd, case)
207
218
208 tf = check_find(ipt2.HelpEnd, HELP_CONTINUED_LINE)
219 tf = check_find(ipt2.HelpEnd, HELP_CONTINUED_LINE)
209 nt.assert_equal(tf.q_line, 1)
220 nt.assert_equal(tf.q_line, 1)
210 nt.assert_equal(tf.q_col, 3)
221 nt.assert_equal(tf.q_col, 3)
211
222
212 tf = check_find(ipt2.HelpEnd, HELP_MULTILINE)
223 tf = check_find(ipt2.HelpEnd, HELP_MULTILINE)
213 nt.assert_equal(tf.q_line, 1)
224 nt.assert_equal(tf.q_line, 1)
214 nt.assert_equal(tf.q_col, 8)
225 nt.assert_equal(tf.q_col, 8)
215
226
216 # ? in a comment does not trigger help
227 # ? in a comment does not trigger help
217 check_find(ipt2.HelpEnd, (["foo # bar?\n"], None, None), match=False)
228 check_find(ipt2.HelpEnd, (["foo # bar?\n"], None, None), match=False)
218 # Nor in a string
229 # Nor in a string
219 check_find(ipt2.HelpEnd, (["foo = '''bar?\n"], None, None), match=False)
230 check_find(ipt2.HelpEnd, (["foo = '''bar?\n"], None, None), match=False)
220
231
221 def test_transform_help():
232 def test_transform_help():
222 tf = ipt2.HelpEnd((1, 0), (1, 9))
233 tf = ipt2.HelpEnd((1, 0), (1, 9))
223 nt.assert_equal(tf.transform(HELP_IN_EXPR[0]), HELP_IN_EXPR[2])
234 nt.assert_equal(tf.transform(HELP_IN_EXPR[0]), HELP_IN_EXPR[2])
224
235
225 tf = ipt2.HelpEnd((1, 0), (2, 3))
236 tf = ipt2.HelpEnd((1, 0), (2, 3))
226 nt.assert_equal(tf.transform(HELP_CONTINUED_LINE[0]), HELP_CONTINUED_LINE[2])
237 nt.assert_equal(tf.transform(HELP_CONTINUED_LINE[0]), HELP_CONTINUED_LINE[2])
227
238
228 tf = ipt2.HelpEnd((1, 0), (2, 8))
239 tf = ipt2.HelpEnd((1, 0), (2, 8))
229 nt.assert_equal(tf.transform(HELP_MULTILINE[0]), HELP_MULTILINE[2])
240 nt.assert_equal(tf.transform(HELP_MULTILINE[0]), HELP_MULTILINE[2])
230
241
231 tf = ipt2.HelpEnd((1, 0), (1, 0))
242 tf = ipt2.HelpEnd((1, 0), (1, 0))
232 nt.assert_equal(tf.transform(HELP_UNICODE[0]), HELP_UNICODE[2])
243 nt.assert_equal(tf.transform(HELP_UNICODE[0]), HELP_UNICODE[2])
233
244
234 def test_find_assign_op_dedent():
245 def test_find_assign_op_dedent():
235 """
246 """
236 be careful that empty token like dedent are not counted as parens
247 be careful that empty token like dedent are not counted as parens
237 """
248 """
238 class Tk:
249 class Tk:
239 def __init__(self, s):
250 def __init__(self, s):
240 self.string = s
251 self.string = s
241
252
242 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','a','=','b')]), 2)
253 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','a','=','b')]), 2)
243 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','(', 'a','=','b', ')', '=' ,'5')]), 6)
254 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','(', 'a','=','b', ')', '=' ,'5')]), 6)
244
255
245 def test_check_complete():
256 def test_check_complete():
246 cc = ipt2.TransformerManager().check_complete
257 cc = ipt2.TransformerManager().check_complete
247 nt.assert_equal(cc("a = 1"), ('complete', None))
258 nt.assert_equal(cc("a = 1"), ('complete', None))
248 nt.assert_equal(cc("for a in range(5):"), ('incomplete', 4))
259 nt.assert_equal(cc("for a in range(5):"), ('incomplete', 4))
249 nt.assert_equal(cc("for a in range(5):\n if a > 0:"), ('incomplete', 8))
260 nt.assert_equal(cc("for a in range(5):\n if a > 0:"), ('incomplete', 8))
250 nt.assert_equal(cc("raise = 2"), ('invalid', None))
261 nt.assert_equal(cc("raise = 2"), ('invalid', None))
251 nt.assert_equal(cc("a = [1,\n2,"), ('incomplete', 0))
262 nt.assert_equal(cc("a = [1,\n2,"), ('incomplete', 0))
252 nt.assert_equal(cc(")"), ('incomplete', 0))
263 nt.assert_equal(cc(")"), ('incomplete', 0))
253 nt.assert_equal(cc("\\\r\n"), ('incomplete', 0))
264 nt.assert_equal(cc("\\\r\n"), ('incomplete', 0))
254 nt.assert_equal(cc("a = '''\n hi"), ('incomplete', 3))
265 nt.assert_equal(cc("a = '''\n hi"), ('incomplete', 3))
255 nt.assert_equal(cc("def a():\n x=1\n global x"), ('invalid', None))
266 nt.assert_equal(cc("def a():\n x=1\n global x"), ('invalid', None))
256 nt.assert_equal(cc("a \\ "), ('invalid', None)) # Nothing allowed after backslash
267 nt.assert_equal(cc("a \\ "), ('invalid', None)) # Nothing allowed after backslash
257 nt.assert_equal(cc("1\\\n+2"), ('complete', None))
268 nt.assert_equal(cc("1\\\n+2"), ('complete', None))
258 nt.assert_equal(cc("exit"), ('complete', None))
269 nt.assert_equal(cc("exit"), ('complete', None))
259
270
260 example = dedent("""
271 example = dedent("""
261 if True:
272 if True:
262 a=1""" )
273 a=1""" )
263
274
264 nt.assert_equal(cc(example), ('incomplete', 4))
275 nt.assert_equal(cc(example), ('incomplete', 4))
265 nt.assert_equal(cc(example+'\n'), ('complete', None))
276 nt.assert_equal(cc(example+'\n'), ('complete', None))
266 nt.assert_equal(cc(example+'\n '), ('complete', None))
277 nt.assert_equal(cc(example+'\n '), ('complete', None))
267
278
268 # no need to loop on all the letters/numbers.
279 # no need to loop on all the letters/numbers.
269 short = '12abAB'+string.printable[62:]
280 short = '12abAB'+string.printable[62:]
270 for c in short:
281 for c in short:
271 # test does not raise:
282 # test does not raise:
272 cc(c)
283 cc(c)
273 for k in short:
284 for k in short:
274 cc(c+k)
285 cc(c+k)
275
286
276 nt.assert_equal(cc("def f():\n x=0\n \\\n "), ('incomplete', 2))
287 nt.assert_equal(cc("def f():\n x=0\n \\\n "), ('incomplete', 2))
277
288
278 def test_check_complete_II():
289 def test_check_complete_II():
279 """
290 """
280 Test that multiple line strings are properly handled.
291 Test that multiple line strings are properly handled.
281
292
282 Separate test function for convenience
293 Separate test function for convenience
283
294
284 """
295 """
285 cc = ipt2.TransformerManager().check_complete
296 cc = ipt2.TransformerManager().check_complete
286 nt.assert_equal(cc('''def foo():\n """'''), ('incomplete', 4))
297 nt.assert_equal(cc('''def foo():\n """'''), ('incomplete', 4))
287
298
288
299
289 def test_null_cleanup_transformer():
300 def test_null_cleanup_transformer():
290 manager = ipt2.TransformerManager()
301 manager = ipt2.TransformerManager()
291 manager.cleanup_transforms.insert(0, null_cleanup_transformer)
302 manager.cleanup_transforms.insert(0, null_cleanup_transformer)
292 assert manager.transform_cell("") == ""
303 assert manager.transform_cell("") == ""
293
304
294
305
295
306
296
307
297 def test_side_effects_I():
308 def test_side_effects_I():
298 count = 0
309 count = 0
299 def counter(lines):
310 def counter(lines):
300 nonlocal count
311 nonlocal count
301 count += 1
312 count += 1
302 return lines
313 return lines
303
314
304 counter.has_side_effects = True
315 counter.has_side_effects = True
305
316
306 manager = ipt2.TransformerManager()
317 manager = ipt2.TransformerManager()
307 manager.cleanup_transforms.insert(0, counter)
318 manager.cleanup_transforms.insert(0, counter)
308 assert manager.check_complete("a=1\n") == ('complete', None)
319 assert manager.check_complete("a=1\n") == ('complete', None)
309 assert count == 0
320 assert count == 0
310
321
311
322
312
323
313
324
314 def test_side_effects_II():
325 def test_side_effects_II():
315 count = 0
326 count = 0
316 def counter(lines):
327 def counter(lines):
317 nonlocal count
328 nonlocal count
318 count += 1
329 count += 1
319 return lines
330 return lines
320
331
321 counter.has_side_effects = True
332 counter.has_side_effects = True
322
333
323 manager = ipt2.TransformerManager()
334 manager = ipt2.TransformerManager()
324 manager.line_transforms.insert(0, counter)
335 manager.line_transforms.insert(0, counter)
325 assert manager.check_complete("b=1\n") == ('complete', None)
336 assert manager.check_complete("b=1\n") == ('complete', None)
326 assert count == 0
337 assert count == 0
@@ -1,116 +1,126 b''
1 """Tests for the line-based transformers in IPython.core.inputtransformer2
1 """Tests for the line-based transformers in IPython.core.inputtransformer2
2
2
3 Line-based transformers are the simpler ones; token-based transformers are
3 Line-based transformers are the simpler ones; token-based transformers are
4 more complex. See test_inputtransformer2 for tests for token-based transformers.
4 more complex. See test_inputtransformer2 for tests for token-based transformers.
5 """
5 """
6 import nose.tools as nt
6 import nose.tools as nt
7
7
8 from IPython.core import inputtransformer2 as ipt2
8 from IPython.core import inputtransformer2 as ipt2
9
9
10 CELL_MAGIC = ("""\
10 CELL_MAGIC = ("""\
11 %%foo arg
11 %%foo arg
12 body 1
12 body 1
13 body 2
13 body 2
14 """, """\
14 """, """\
15 get_ipython().run_cell_magic('foo', 'arg', 'body 1\\nbody 2\\n')
15 get_ipython().run_cell_magic('foo', 'arg', 'body 1\\nbody 2\\n')
16 """)
16 """)
17
17
18 def test_cell_magic():
18 def test_cell_magic():
19 for sample, expected in [CELL_MAGIC]:
19 for sample, expected in [CELL_MAGIC]:
20 nt.assert_equal(ipt2.cell_magic(sample.splitlines(keepends=True)),
20 nt.assert_equal(ipt2.cell_magic(sample.splitlines(keepends=True)),
21 expected.splitlines(keepends=True))
21 expected.splitlines(keepends=True))
22
22
23 CLASSIC_PROMPT = ("""\
23 CLASSIC_PROMPT = ("""\
24 >>> for a in range(5):
24 >>> for a in range(5):
25 ... print(a)
25 ... print(a)
26 """, """\
26 """, """\
27 for a in range(5):
27 for a in range(5):
28 print(a)
28 print(a)
29 """)
29 """)
30
30
31 CLASSIC_PROMPT_L2 = ("""\
31 CLASSIC_PROMPT_L2 = ("""\
32 for a in range(5):
32 for a in range(5):
33 ... print(a)
33 ... print(a)
34 ... print(a ** 2)
34 ... print(a ** 2)
35 """, """\
35 """, """\
36 for a in range(5):
36 for a in range(5):
37 print(a)
37 print(a)
38 print(a ** 2)
38 print(a ** 2)
39 """)
39 """)
40
40
41 def test_classic_prompt():
41 def test_classic_prompt():
42 for sample, expected in [CLASSIC_PROMPT, CLASSIC_PROMPT_L2]:
42 for sample, expected in [CLASSIC_PROMPT, CLASSIC_PROMPT_L2]:
43 nt.assert_equal(ipt2.classic_prompt(sample.splitlines(keepends=True)),
43 nt.assert_equal(ipt2.classic_prompt(sample.splitlines(keepends=True)),
44 expected.splitlines(keepends=True))
44 expected.splitlines(keepends=True))
45
45
46 IPYTHON_PROMPT = ("""\
46 IPYTHON_PROMPT = ("""\
47 In [1]: for a in range(5):
47 In [1]: for a in range(5):
48 ...: print(a)
48 ...: print(a)
49 """, """\
49 """, """\
50 for a in range(5):
50 for a in range(5):
51 print(a)
51 print(a)
52 """)
52 """)
53
53
54 IPYTHON_PROMPT_L2 = ("""\
54 IPYTHON_PROMPT_L2 = ("""\
55 for a in range(5):
55 for a in range(5):
56 ...: print(a)
56 ...: print(a)
57 ...: print(a ** 2)
57 ...: print(a ** 2)
58 """, """\
58 """, """\
59 for a in range(5):
59 for a in range(5):
60 print(a)
60 print(a)
61 print(a ** 2)
61 print(a ** 2)
62 """)
62 """)
63
63
64 def test_ipython_prompt():
64 def test_ipython_prompt():
65 for sample, expected in [IPYTHON_PROMPT, IPYTHON_PROMPT_L2]:
65 for sample, expected in [IPYTHON_PROMPT, IPYTHON_PROMPT_L2]:
66 nt.assert_equal(ipt2.ipython_prompt(sample.splitlines(keepends=True)),
66 nt.assert_equal(ipt2.ipython_prompt(sample.splitlines(keepends=True)),
67 expected.splitlines(keepends=True))
67 expected.splitlines(keepends=True))
68
68
69 INDENT_SPACES = ("""\
69 INDENT_SPACES = ("""\
70 if True:
70 if True:
71 a = 3
71 a = 3
72 """, """\
72 """, """\
73 if True:
73 if True:
74 a = 3
74 a = 3
75 """)
75 """)
76
76
77 INDENT_TABS = ("""\
77 INDENT_TABS = ("""\
78 \tif True:
78 \tif True:
79 \t\tb = 4
79 \t\tb = 4
80 """, """\
80 """, """\
81 if True:
81 if True:
82 \tb = 4
82 \tb = 4
83 """)
83 """)
84
84
85 def test_leading_indent():
85 def test_leading_indent():
86 for sample, expected in [INDENT_SPACES, INDENT_TABS]:
86 for sample, expected in [INDENT_SPACES, INDENT_TABS]:
87 nt.assert_equal(ipt2.leading_indent(sample.splitlines(keepends=True)),
87 nt.assert_equal(ipt2.leading_indent(sample.splitlines(keepends=True)),
88 expected.splitlines(keepends=True))
88 expected.splitlines(keepends=True))
89
89
90 LEADING_EMPTY_LINES = ("""\
90 LEADING_EMPTY_LINES = ("""\
91 \t
91 \t
92
92
93 if True:
93 if True:
94 a = 3
94 a = 3
95
95
96 b = 4
96 b = 4
97 """, """\
97 """, """\
98 if True:
98 if True:
99 a = 3
99 a = 3
100
100
101 b = 4
101 b = 4
102 """)
102 """)
103
103
104 ONLY_EMPTY_LINES = ("""\
104 ONLY_EMPTY_LINES = ("""\
105 \t
105 \t
106
106
107 """, """\
107 """, """\
108 \t
108 \t
109
109
110 """)
110 """)
111
111
112 def test_leading_empty_lines():
112 def test_leading_empty_lines():
113 for sample, expected in [LEADING_EMPTY_LINES, ONLY_EMPTY_LINES]:
113 for sample, expected in [LEADING_EMPTY_LINES, ONLY_EMPTY_LINES]:
114 nt.assert_equal(
114 nt.assert_equal(
115 ipt2.leading_empty_lines(sample.splitlines(keepends=True)),
115 ipt2.leading_empty_lines(sample.splitlines(keepends=True)),
116 expected.splitlines(keepends=True))
116 expected.splitlines(keepends=True))
117
118 CRLF_MAGIC = ([
119 "%%ls\r\n"
120 ], [
121 "get_ipython().run_cell_magic('ls', '', '')\n"
122 ])
123
124 def test_crlf_magic():
125 for sample, expected in [CRLF_MAGIC]:
126 nt.assert_equal(ipt2.cell_magic(sample), expected) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now