Show More
@@ -418,6 +418,8 b' class HelpEnd(TokenTransformBase):' | |||||
418 | lines_after = lines[self.q_line + 1:] |
|
418 | lines_after = lines[self.q_line + 1:] | |
419 |
|
419 | |||
420 | m = _help_end_re.search(content) |
|
420 | m = _help_end_re.search(content) | |
|
421 | if not m: | |||
|
422 | raise SyntaxError(content) | |||
421 | assert m is not None, content |
|
423 | assert m is not None, content | |
422 | target = m.group(1) |
|
424 | target = m.group(1) | |
423 | esc = m.group(3) |
|
425 | esc = m.group(3) | |
@@ -460,6 +462,8 b' def make_tokens_by_line(lines):' | |||||
460 | except tokenize.TokenError: |
|
462 | except tokenize.TokenError: | |
461 | # Input ended in a multiline string or expression. That's OK for us. |
|
463 | # Input ended in a multiline string or expression. That's OK for us. | |
462 | pass |
|
464 | pass | |
|
465 | if not tokens_by_line[-1]: | |||
|
466 | tokens_by_line.pop() | |||
463 |
|
467 | |||
464 | return tokens_by_line |
|
468 | return tokens_by_line | |
465 |
|
469 | |||
@@ -522,9 +526,13 b' class TransformerManager:' | |||||
522 | if not candidates: |
|
526 | if not candidates: | |
523 | # Nothing to transform |
|
527 | # Nothing to transform | |
524 | return False, lines |
|
528 | return False, lines | |
525 |
|
529 | ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby) | ||
526 | transformer = min(candidates, key=TokenTransformBase.sortby) |
|
530 | for transformer in ordered_transformers: | |
527 | return True, transformer.transform(lines) |
|
531 | try: | |
|
532 | return True, transformer.transform(lines) | |||
|
533 | except SyntaxError: | |||
|
534 | pass | |||
|
535 | return False, lines | |||
528 |
|
536 | |||
529 | def do_token_transforms(self, lines): |
|
537 | def do_token_transforms(self, lines): | |
530 | for _ in range(TRANSFORM_LOOP_LIMIT): |
|
538 | for _ in range(TRANSFORM_LOOP_LIMIT): | |
@@ -591,10 +599,13 b' class TransformerManager:' | |||||
591 | return 'invalid', None |
|
599 | return 'invalid', None | |
592 |
|
600 | |||
593 | tokens_by_line = make_tokens_by_line(lines) |
|
601 | tokens_by_line = make_tokens_by_line(lines) | |
|
602 | if not tokens_by_line: | |||
|
603 | return 'incomplete', find_last_indent(lines) | |||
594 | if tokens_by_line[-1][-1].type != tokenize.ENDMARKER: |
|
604 | if tokens_by_line[-1][-1].type != tokenize.ENDMARKER: | |
595 | # We're in a multiline string or expression |
|
605 | # We're in a multiline string or expression | |
596 | return 'incomplete', find_last_indent(lines) |
|
606 | return 'incomplete', find_last_indent(lines) | |
597 |
|
607 | if len(tokens_by_line) == 1: | ||
|
608 | return 'incomplete', find_last_indent(lines) | |||
598 | # Find the last token on the previous line that's not NEWLINE or COMMENT |
|
609 | # Find the last token on the previous line that's not NEWLINE or COMMENT | |
599 | toks_last_line = tokens_by_line[-2] |
|
610 | toks_last_line = tokens_by_line[-2] | |
600 | ix = len(toks_last_line) - 1 |
|
611 | ix = len(toks_last_line) - 1 |
@@ -5,6 +5,7 b' more complex. See test_inputtransformer2_line for tests for line-based' | |||||
5 | transformations. |
|
5 | transformations. | |
6 | """ |
|
6 | """ | |
7 | import nose.tools as nt |
|
7 | import nose.tools as nt | |
|
8 | import string | |||
8 |
|
9 | |||
9 | from IPython.core import inputtransformer2 as ipt2 |
|
10 | from IPython.core import inputtransformer2 as ipt2 | |
10 | from IPython.core.inputtransformer2 import make_tokens_by_line |
|
11 | from IPython.core.inputtransformer2 import make_tokens_by_line | |
@@ -100,6 +101,16 b' b) = zip?' | |||||
100 | [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"] |
|
101 | [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"] | |
101 | ) |
|
102 | ) | |
102 |
|
103 | |||
|
104 | def check_make_token_by_line_never_ends_empty(): | |||
|
105 | """ | |||
|
106 | Check that not sequence of single or double characters ends up leading to en empty list of tokens | |||
|
107 | """ | |||
|
108 | from string import printable | |||
|
109 | for c in printable: | |||
|
110 | nt.assert_not_equal(make_tokens_by_line(c)[-1], []) | |||
|
111 | for k in printable: | |||
|
112 | nt.assert_not_equal(make_tokens_by_line(c+k)[-1], []) | |||
|
113 | ||||
103 | def check_find(transformer, case, match=True): |
|
114 | def check_find(transformer, case, match=True): | |
104 | sample, expected_start, _ = case |
|
115 | sample, expected_start, _ = case | |
105 | tbl = make_tokens_by_line(sample) |
|
116 | tbl = make_tokens_by_line(sample) | |
@@ -190,6 +201,17 b' def test_check_complete():' | |||||
190 | nt.assert_equal(cc("for a in range(5):"), ('incomplete', 4)) |
|
201 | nt.assert_equal(cc("for a in range(5):"), ('incomplete', 4)) | |
191 | nt.assert_equal(cc("raise = 2"), ('invalid', None)) |
|
202 | nt.assert_equal(cc("raise = 2"), ('invalid', None)) | |
192 | nt.assert_equal(cc("a = [1,\n2,"), ('incomplete', 0)) |
|
203 | nt.assert_equal(cc("a = [1,\n2,"), ('incomplete', 0)) | |
|
204 | nt.assert_equal(cc(")"), ('incomplete', 0)) | |||
|
205 | nt.assert_equal(cc("\\\r\n"), ('incomplete', 0)) | |||
193 | nt.assert_equal(cc("a = '''\n hi"), ('incomplete', 3)) |
|
206 | nt.assert_equal(cc("a = '''\n hi"), ('incomplete', 3)) | |
194 | nt.assert_equal(cc("def a():\n x=1\n global x"), ('invalid', None)) |
|
207 | nt.assert_equal(cc("def a():\n x=1\n global x"), ('invalid', None)) | |
195 | nt.assert_equal(cc("a \\ "), ('invalid', None)) # Nothing allowed after backslash |
|
208 | nt.assert_equal(cc("a \\ "), ('invalid', None)) # Nothing allowed after backslash | |
|
209 | ||||
|
210 | # no need to loop on all the letters/numbers. | |||
|
211 | short = '12abAB'+string.printable[62:] | |||
|
212 | for c in short: | |||
|
213 | # test does not raise: | |||
|
214 | cc(c) | |||
|
215 | for k in short: | |||
|
216 | cc(c+k) | |||
|
217 |
General Comments 0
You need to be logged in to leave comments.
Login now