diff --git a/IPython/core/inputsplitter.py b/IPython/core/inputsplitter.py index d33bd57..a440118 100644 --- a/IPython/core/inputsplitter.py +++ b/IPython/core/inputsplitter.py @@ -152,7 +152,12 @@ def find_next_indent(code): if not tokens: return 0 - while (tokens[-1].type in {tokenize.DEDENT, tokenize.NEWLINE, tokenize.COMMENT, tokenize.ERRORTOKEN}): + while tokens[-1].type in { + tokenize.DEDENT, + tokenize.NEWLINE, + tokenize.COMMENT, + tokenize.ERRORTOKEN, + }: tokens.pop() # Starting in Python 3.12, the tokenize module adds implicit newlines at the end diff --git a/IPython/core/inputtransformer2.py b/IPython/core/inputtransformer2.py index 7e22e26..949cf38 100644 --- a/IPython/core/inputtransformer2.py +++ b/IPython/core/inputtransformer2.py @@ -292,17 +292,18 @@ class SystemAssign(TokenTransformBase): def find_post_312(cls, tokens_by_line): for line in tokens_by_line: assign_ix = _find_assign_op(line) - if (assign_ix is not None) \ - and not line[assign_ix].line.strip().startswith('=') \ - and (len(line) >= assign_ix + 2) \ - and (line[assign_ix + 1].type == tokenize.OP) \ - and (line[assign_ix + 1].string == '!'): + if ( + (assign_ix is not None) + and not line[assign_ix].line.strip().startswith("=") + and (len(line) >= assign_ix + 2) + and (line[assign_ix + 1].type == tokenize.OP) + and (line[assign_ix + 1].string == "!") + ): return cls(line[assign_ix + 1].start) @classmethod def find(cls, tokens_by_line): - """Find the first system assignment (a = !foo) in the cell. - """ + """Find the first system assignment (a = !foo) in the cell.""" if sys.version_info < (3, 12): return cls.find_pre_312(tokens_by_line) return cls.find_post_312(tokens_by_line) @@ -531,8 +532,9 @@ def make_tokens_by_line(lines:List[str]): ) parenlev = 0 try: - for token in tokenutil.generate_tokens_catch_errors(iter(lines).__next__, - extra_errors_to_catch=['expected EOF']): + for token in tokenutil.generate_tokens_catch_errors( + iter(lines).__next__, extra_errors_to_catch=["expected EOF"] + ): tokens_by_line[-1].append(token) if (token.type == NEWLINE) \ or ((token.type == NL) and (parenlev <= 0)): @@ -701,8 +703,8 @@ class TransformerManager: for line in reversed(lines): if not line.strip(): continue - elif line.strip('\n').endswith('\\'): - return 'incomplete', find_last_indent(lines) + elif line.strip("\n").endswith("\\"): + return "incomplete", find_last_indent(lines) else: break @@ -742,8 +744,10 @@ class TransformerManager: if not tokens_by_line: return 'incomplete', find_last_indent(lines) - if (tokens_by_line[-1][-1].type != tokenize.ENDMARKER - and tokens_by_line[-1][-1].type != tokenize.ERRORTOKEN): + if ( + tokens_by_line[-1][-1].type != tokenize.ENDMARKER + and tokens_by_line[-1][-1].type != tokenize.ERRORTOKEN + ): # We're in a multiline string or expression return 'incomplete', find_last_indent(lines) diff --git a/IPython/core/tests/test_inputtransformer2.py b/IPython/core/tests/test_inputtransformer2.py index 0792f7c..ec7cb91 100644 --- a/IPython/core/tests/test_inputtransformer2.py +++ b/IPython/core/tests/test_inputtransformer2.py @@ -297,6 +297,7 @@ def test_find_assign_op_dedent(): _find_assign_op([Tk(s) for s in ("", "(", "a", "=", "b", ")", "=", "5")]) == 6 ) + extra_closing_paren_param = ( pytest.param("(\n))", "invalid", None) if sys.version_info >= (3, 12) diff --git a/IPython/utils/tokenutil.py b/IPython/utils/tokenutil.py index c9228dc..5fd8a1f 100644 --- a/IPython/utils/tokenutil.py +++ b/IPython/utils/tokenutil.py @@ -21,9 +21,13 @@ def generate_tokens(readline): # catch EOF error return + def generate_tokens_catch_errors(readline, extra_errors_to_catch=None): - default_errors_to_catch = ['unterminated string literal', 'invalid non-printable character', - 'after line continuation character'] + default_errors_to_catch = [ + "unterminated string literal", + "invalid non-printable character", + "after line continuation character", + ] assert extra_errors_to_catch is None or isinstance(extra_errors_to_catch, list) errors_to_catch = default_errors_to_catch + (extra_errors_to_catch or []) @@ -40,12 +44,13 @@ def generate_tokens_catch_errors(readline, extra_errors_to_catch=None): line = tokens[-1].line else: start = end = (1, 0) - line = '' - yield tokenize.TokenInfo(tokenize.ERRORTOKEN, '', start, end, line) + line = "" + yield tokenize.TokenInfo(tokenize.ERRORTOKEN, "", start, end, line) else: # Catch EOF raise + def line_at_cursor(cell, cursor_pos=0): """Return the line in a cell at a given cursor position