From 6a5220d032f5a60267b051783721aa0d5a0e3373 2016-10-25 10:40:30 From: Thomas Kluyver Date: 2016-10-25 10:40:30 Subject: [PATCH] Merge pull request #10021 from ivanov/remove-sys-version-checks Remove sys version checks --- diff --git a/IPython/__init__.py b/IPython/__init__.py index c551eb2..d3a5d63 100644 --- a/IPython/__init__.py +++ b/IPython/__init__.py @@ -18,7 +18,6 @@ http://ipython.org #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import absolute_import import os import sys diff --git a/IPython/core/compilerop.py b/IPython/core/compilerop.py index e39ded6..723a6ab 100644 --- a/IPython/core/compilerop.py +++ b/IPython/core/compilerop.py @@ -25,7 +25,6 @@ Authors #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib imports import __future__ diff --git a/IPython/core/completer.py b/IPython/core/completer.py index fb98cb7..18688e3 100644 --- a/IPython/core/completer.py +++ b/IPython/core/completer.py @@ -13,7 +13,6 @@ upstream and were accepted as of Python 2.3, # Some of this code originated from rlcompleter in the Python standard library # Copyright (C) 2001 Python Software Foundation, www.python.org -from __future__ import print_function import __main__ import glob diff --git a/IPython/core/completerlib.py b/IPython/core/completerlib.py index 00bbefd..cd2ac20 100644 --- a/IPython/core/completerlib.py +++ b/IPython/core/completerlib.py @@ -14,7 +14,6 @@ These are all loaded by default by IPython. #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib imports import glob diff --git a/IPython/core/crashhandler.py b/IPython/core/crashhandler.py index 2cbe133..c85c766 100644 --- a/IPython/core/crashhandler.py +++ b/IPython/core/crashhandler.py @@ -18,7 +18,6 @@ Authors: #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function import os import sys diff --git a/IPython/core/debugger.py b/IPython/core/debugger.py index 671fb88..a7fefba 100644 --- a/IPython/core/debugger.py +++ b/IPython/core/debugger.py @@ -24,7 +24,6 @@ http://www.python.org/2.2.3/license.html""" # # #***************************************************************************** -from __future__ import print_function import bdb import functools @@ -604,22 +603,21 @@ class Pdb(OldPdb, object): ('Globals', self.curframe.f_globals)] self.shell.find_line_magic('psource')(arg, namespaces=namespaces) - if sys.version_info > (3, ): - def do_where(self, arg): - """w(here) - Print a stack trace, with the most recent frame at the bottom. - An arrow indicates the "current frame", which determines the - context of most commands. 'bt' is an alias for this command. - - Take a number as argument as an (optional) number of context line to - print""" - if arg: - context = int(arg) - self.print_stack_trace(context) - else: - self.print_stack_trace() + def do_where(self, arg): + """w(here) + Print a stack trace, with the most recent frame at the bottom. + An arrow indicates the "current frame", which determines the + context of most commands. 'bt' is an alias for this command. + + Take a number as argument as an (optional) number of context line to + print""" + if arg: + context = int(arg) + self.print_stack_trace(context) + else: + self.print_stack_trace() - do_w = do_where + do_w = do_where def set_trace(frame=None): diff --git a/IPython/core/display.py b/IPython/core/display.py index 4fe28d9..7c6ce94 100644 --- a/IPython/core/display.py +++ b/IPython/core/display.py @@ -4,7 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function try: from base64 import encodebytes as base64_encode diff --git a/IPython/core/displayhook.py b/IPython/core/displayhook.py index 9258c61..4877d02 100644 --- a/IPython/core/displayhook.py +++ b/IPython/core/displayhook.py @@ -7,7 +7,6 @@ This defines a callable class that IPython uses for `sys.displayhook`. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import sys import io as _io diff --git a/IPython/core/displaypub.py b/IPython/core/displaypub.py index 26996b0..aac1d93 100644 --- a/IPython/core/displaypub.py +++ b/IPython/core/displaypub.py @@ -15,7 +15,6 @@ spec. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import sys diff --git a/IPython/core/events.py b/IPython/core/events.py index bfd09fe..a8591d9 100644 --- a/IPython/core/events.py +++ b/IPython/core/events.py @@ -12,7 +12,6 @@ events and the arguments which will be passed to them. This API is experimental in IPython 2.0, and may be revised in future versions. """ -from __future__ import print_function class EventManager(object): """Manage a collection of events and a sequence of callbacks for each. diff --git a/IPython/core/history.py b/IPython/core/history.py index 682032b..58d20ec 100644 --- a/IPython/core/history.py +++ b/IPython/core/history.py @@ -3,7 +3,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import atexit import datetime diff --git a/IPython/core/historyapp.py b/IPython/core/historyapp.py index d51426d..3bcc697 100644 --- a/IPython/core/historyapp.py +++ b/IPython/core/historyapp.py @@ -4,7 +4,6 @@ An application for managing IPython history. To be invoked as the `ipython history` subcommand. """ -from __future__ import print_function import os import sqlite3 diff --git a/IPython/core/interactiveshell.py b/IPython/core/interactiveshell.py index d5bcd44..4fdbbdc 100644 --- a/IPython/core/interactiveshell.py +++ b/IPython/core/interactiveshell.py @@ -10,7 +10,6 @@ # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- -from __future__ import absolute_import, print_function import __future__ import abc @@ -57,7 +56,7 @@ from IPython.core.payload import PayloadManager from IPython.core.prefilter import PrefilterManager from IPython.core.profiledir import ProfileDir from IPython.core.usage import default_banner -from IPython.testing.skipdoctest import skip_doctest_py2, skip_doctest +from IPython.testing.skipdoctest import skip_doctest from IPython.utils import PyColorize from IPython.utils import io from IPython.utils import py3compat @@ -187,10 +186,7 @@ class ExecutionResult(object): raise self.error_in_exec def __repr__(self): - if sys.version_info > (3,): - name = self.__class__.__qualname__ - else: - name = self.__class__.__name__ + name = self.__class__.__qualname__ return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s result=%s>' %\ (name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.result)) @@ -1922,7 +1918,6 @@ class InteractiveShell(SingletonConfigurable): self.set_hook('complete_command', reset_completer, str_key = '%reset') - @skip_doctest_py2 def complete(self, text, line=None, cursor_pos=None): """Return the completed text and a list of completions. diff --git a/IPython/core/logger.py b/IPython/core/logger.py index 0e41db5..091da50 100644 --- a/IPython/core/logger.py +++ b/IPython/core/logger.py @@ -1,6 +1,5 @@ """Logger class for IPython's logging facilities. """ -from __future__ import print_function #***************************************************************************** # Copyright (C) 2001 Janko Hauser and diff --git a/IPython/core/magic.py b/IPython/core/magic.py index 61a929f..e139bed 100644 --- a/IPython/core/magic.py +++ b/IPython/core/magic.py @@ -1,7 +1,6 @@ # encoding: utf-8 """Magic functions for InteractiveShell. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2001 Janko Hauser and diff --git a/IPython/core/magics/auto.py b/IPython/core/magics/auto.py index f87bafd..79de8d8 100644 --- a/IPython/core/magics/auto.py +++ b/IPython/core/magics/auto.py @@ -1,7 +1,5 @@ """Implementation of magic functions that control various automatic behaviors. """ -from __future__ import print_function -from __future__ import absolute_import #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/basic.py b/IPython/core/magics/basic.py index 781fa72..c2427c9 100644 --- a/IPython/core/magics/basic.py +++ b/IPython/core/magics/basic.py @@ -1,7 +1,5 @@ """Implementation of basic magic functions.""" -from __future__ import print_function -from __future__ import absolute_import import argparse import io diff --git a/IPython/core/magics/code.py b/IPython/core/magics/code.py index 4c1a40f..6439df7 100644 --- a/IPython/core/magics/code.py +++ b/IPython/core/magics/code.py @@ -1,7 +1,5 @@ """Implementation of code management magic functions. """ -from __future__ import print_function -from __future__ import absolute_import #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/config.py b/IPython/core/magics/config.py index 9505697..230a2c6 100644 --- a/IPython/core/magics/config.py +++ b/IPython/core/magics/config.py @@ -1,7 +1,5 @@ """Implementation of configuration-related magic functions. """ -from __future__ import print_function -from __future__ import absolute_import #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/execution.py b/IPython/core/magics/execution.py index a5eebb4..36a3645 100644 --- a/IPython/core/magics/execution.py +++ b/IPython/core/magics/execution.py @@ -4,8 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function -from __future__ import absolute_import import ast import bdb diff --git a/IPython/core/magics/extension.py b/IPython/core/magics/extension.py index 2991d55..7c751fa 100644 --- a/IPython/core/magics/extension.py +++ b/IPython/core/magics/extension.py @@ -1,6 +1,5 @@ """Implementation of magic functions for the extension machinery. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/history.py b/IPython/core/magics/history.py index 5967591..af3d0b1 100644 --- a/IPython/core/magics/history.py +++ b/IPython/core/magics/history.py @@ -11,7 +11,6 @@ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib import os diff --git a/IPython/core/magics/namespace.py b/IPython/core/magics/namespace.py index c02b387..68442f9 100644 --- a/IPython/core/magics/namespace.py +++ b/IPython/core/magics/namespace.py @@ -1,6 +1,5 @@ """Implementation of namespace-related magic functions. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/osm.py b/IPython/core/magics/osm.py index 352cf2d..0d85a01 100644 --- a/IPython/core/magics/osm.py +++ b/IPython/core/magics/osm.py @@ -3,7 +3,6 @@ Note: this module is named 'osm' instead of 'os' to avoid a collision with the builtin. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py index 6c5cd68..f3c70a3 100644 --- a/IPython/core/magics/pylab.py +++ b/IPython/core/magics/pylab.py @@ -1,6 +1,5 @@ """Implementation of magic functions for matplotlib/pylab support. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # diff --git a/IPython/core/magics/script.py b/IPython/core/magics/script.py index be8fa94..b0532c5 100644 --- a/IPython/core/magics/script.py +++ b/IPython/core/magics/script.py @@ -1,5 +1,4 @@ """Magic functions for running cells in various scripts.""" -from __future__ import print_function # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/IPython/core/oinspect.py b/IPython/core/oinspect.py index c5b9f77..892b6f0 100644 --- a/IPython/core/oinspect.py +++ b/IPython/core/oinspect.py @@ -10,8 +10,6 @@ reference the name under which an object is being read. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function - __all__ = ['Inspector','InspectColors'] # stdlib modules @@ -31,7 +29,7 @@ except ImportError: # IPython's own from IPython.core import page from IPython.lib.pretty import pretty -from IPython.testing.skipdoctest import skip_doctest_py3 +from IPython.testing.skipdoctest import skip_doctest from IPython.utils import PyColorize from IPython.utils import openpy from IPython.utils import py3compat @@ -437,7 +435,7 @@ class Inspector(Colorable): print(header,self.format(output), end=' ') # In Python 3, all classes are new-style, so they all have __init__. - @skip_doctest_py3 + @skip_doctest def pdoc(self, obj, oname='', formatter=None): """Print the docstring for any object. diff --git a/IPython/core/page.py b/IPython/core/page.py index 6d213c9..0a9e4f0 100644 --- a/IPython/core/page.py +++ b/IPython/core/page.py @@ -13,7 +13,6 @@ rid of that dependency, we could move it there. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import os import re diff --git a/IPython/core/profileapp.py b/IPython/core/profileapp.py index b8e5fd2..ee78ab6 100644 --- a/IPython/core/profileapp.py +++ b/IPython/core/profileapp.py @@ -9,7 +9,6 @@ Authors: * Min RK """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2008 The IPython Development Team diff --git a/IPython/core/pylabtools.py b/IPython/core/pylabtools.py index 04b5e39..6134d4b 100644 --- a/IPython/core/pylabtools.py +++ b/IPython/core/pylabtools.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- """Pylab (matplotlib) support utilities.""" -from __future__ import print_function # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/IPython/core/shellapp.py b/IPython/core/shellapp.py index ab8fbe4..22e7543 100644 --- a/IPython/core/shellapp.py +++ b/IPython/core/shellapp.py @@ -7,9 +7,6 @@ launch InteractiveShell instances, load extensions, etc. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import absolute_import -from __future__ import print_function - import glob import os import sys diff --git a/IPython/core/tests/nonascii.py b/IPython/core/tests/nonascii.py index 17ff7d3..78801df 100644 --- a/IPython/core/tests/nonascii.py +++ b/IPython/core/tests/nonascii.py @@ -1,5 +1,4 @@ # coding: iso-8859-5 # (Unlikely to be the default encoding for most testers.) # ������������������� <- Cyrillic characters -from __future__ import unicode_literals u = '����' diff --git a/IPython/core/tests/print_argv.py b/IPython/core/tests/print_argv.py index a9acbaa..0e92bdd 100644 --- a/IPython/core/tests/print_argv.py +++ b/IPython/core/tests/print_argv.py @@ -1,3 +1,2 @@ -from __future__ import print_function import sys print(sys.argv[1:]) diff --git a/IPython/core/tests/refbug.py b/IPython/core/tests/refbug.py index 69767e2..b8de4c8 100644 --- a/IPython/core/tests/refbug.py +++ b/IPython/core/tests/refbug.py @@ -12,7 +12,6 @@ This script is meant to be called by other parts of the test suite that call it via %run as if it were executed interactively by the user. As of 2011-05-29, test_run.py calls it. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Module imports diff --git a/IPython/core/tests/tclass.py b/IPython/core/tests/tclass.py index aa8fa2d..6bd9ffc 100644 --- a/IPython/core/tests/tclass.py +++ b/IPython/core/tests/tclass.py @@ -2,7 +2,6 @@ See test_run for details.""" -from __future__ import print_function import sys diff --git a/IPython/core/tests/test_compilerop.py b/IPython/core/tests/test_compilerop.py index 125a567..ac9cb9e 100644 --- a/IPython/core/tests/test_compilerop.py +++ b/IPython/core/tests/test_compilerop.py @@ -12,7 +12,6 @@ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib imports import linecache diff --git a/IPython/core/tests/test_completer.py b/IPython/core/tests/test_completer.py index 7678351..03c0a4f 100644 --- a/IPython/core/tests/test_completer.py +++ b/IPython/core/tests/test_completer.py @@ -155,7 +155,6 @@ def test_latex_completions(): -@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3') def test_back_latex_completion(): ip = get_ipython() @@ -164,7 +163,6 @@ def test_back_latex_completion(): nt.assert_equal(len(matches), 1) nt.assert_equal(matches[0], '\\beta') -@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3') def test_back_unicode_completion(): ip = get_ipython() @@ -173,7 +171,6 @@ def test_back_unicode_completion(): nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE') -@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3') def test_forward_unicode_completion(): ip = get_ipython() @@ -181,7 +178,6 @@ def test_forward_unicode_completion(): nt.assert_equal(len(matches), 1) nt.assert_equal(matches[0], 'Ⅴ') -@dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3') @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path') def test_no_ascii_back_completion(): ip = get_ipython() @@ -588,7 +584,6 @@ def test_dict_key_completion_contexts(): -@dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3') def test_dict_key_completion_bytes(): """Test handling of bytes in dict key completion""" ip = get_ipython() @@ -618,68 +613,6 @@ def test_dict_key_completion_bytes(): nt.assert_not_in("abd", matches) -@dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3') -def test_dict_key_completion_unicode_py2(): - """Test handling of unicode in dict key completion""" - ip = get_ipython() - complete = ip.Completer.complete - - ip.user_ns['d'] = {u'abc': None, - u'a\u05d0b': None} - - _, matches = complete(line_buffer="d[") - nt.assert_in("u'abc'", matches) - nt.assert_in("u'a\\u05d0b'", matches) - - _, matches = complete(line_buffer="d['a") - nt.assert_in("abc", matches) - nt.assert_not_in("a\\u05d0b", matches) - - _, matches = complete(line_buffer="d[u'a") - nt.assert_in("abc", matches) - nt.assert_in("a\\u05d0b", matches) - - _, matches = complete(line_buffer="d[U'a") - nt.assert_in("abc", matches) - nt.assert_in("a\\u05d0b", matches) - - # query using escape - if sys.platform != 'win32': - # Known failure on Windows - _, matches = complete(line_buffer=u"d[u'a\\u05d0") - nt.assert_in("u05d0b", matches) # tokenized after \\ - - # query using character - _, matches = complete(line_buffer=u"d[u'a\u05d0") - nt.assert_in(u"a\u05d0b", matches) - - with greedy_completion(): - _, matches = complete(line_buffer="d[") - nt.assert_in("d[u'abc']", matches) - nt.assert_in("d[u'a\\u05d0b']", matches) - - _, matches = complete(line_buffer="d['a") - nt.assert_in("d['abc']", matches) - nt.assert_not_in("d[u'a\\u05d0b']", matches) - - _, matches = complete(line_buffer="d[u'a") - nt.assert_in("d[u'abc']", matches) - nt.assert_in("d[u'a\\u05d0b']", matches) - - _, matches = complete(line_buffer="d[U'a") - nt.assert_in("d[U'abc']", matches) - nt.assert_in("d[U'a\\u05d0b']", matches) - - # query using escape - _, matches = complete(line_buffer=u"d[u'a\\u05d0") - nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\ - - # query using character - _, matches = complete(line_buffer=u"d[u'a\u05d0") - nt.assert_in(u"d[u'a\u05d0b']", matches) - - -@dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3') def test_dict_key_completion_unicode_py3(): """Test handling of unicode in dict key completion""" ip = get_ipython() diff --git a/IPython/core/tests/test_completerlib.py b/IPython/core/tests/test_completerlib.py index 71a6cd2..82d246b 100644 --- a/IPython/core/tests/test_completerlib.py +++ b/IPython/core/tests/test_completerlib.py @@ -2,7 +2,6 @@ """Tests for completerlib. """ -from __future__ import absolute_import #----------------------------------------------------------------------------- # Imports diff --git a/IPython/core/tests/test_debugger.py b/IPython/core/tests/test_debugger.py index d118638..dcfd9a4 100644 --- a/IPython/core/tests/test_debugger.py +++ b/IPython/core/tests/test_debugger.py @@ -1,6 +1,5 @@ """Tests for debugging machinery. """ -from __future__ import print_function # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/IPython/core/tests/test_hooks.py b/IPython/core/tests/test_hooks.py index f44674c..35d3f31 100644 --- a/IPython/core/tests/test_hooks.py +++ b/IPython/core/tests/test_hooks.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- """Tests for CommandChainDispatcher.""" -from __future__ import absolute_import #----------------------------------------------------------------------------- # Imports diff --git a/IPython/core/tests/test_inputsplitter.py b/IPython/core/tests/test_inputsplitter.py index b5ebad9..17a7e37 100644 --- a/IPython/core/tests/test_inputsplitter.py +++ b/IPython/core/tests/test_inputsplitter.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- """Tests for the inputsplitter module.""" -from __future__ import print_function # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/IPython/core/tests/test_interactiveshell.py b/IPython/core/tests/test_interactiveshell.py index c23ce40..a350316 100644 --- a/IPython/core/tests/test_interactiveshell.py +++ b/IPython/core/tests/test_interactiveshell.py @@ -143,26 +143,14 @@ class InteractiveShellTestCase(unittest.TestCase): def test_future_flags(self): """Check that future flags are used for parsing code (gh-777)""" - ip.run_cell('from __future__ import print_function') + ip.run_cell('from __future__ import barry_as_FLUFL') try: - ip.run_cell('prfunc_return_val = print(1,2, sep=" ")') + ip.run_cell('prfunc_return_val = 1 <> 2') assert 'prfunc_return_val' in ip.user_ns finally: # Reset compiler flags so we don't mess up other tests. ip.compile.reset_compiler_flags() - def test_future_unicode(self): - """Check that unicode_literals is imported from __future__ (gh #786)""" - try: - ip.run_cell(u'byte_str = "a"') - assert isinstance(ip.user_ns['byte_str'], str) # string literals are byte strings by default - ip.run_cell('from __future__ import unicode_literals') - ip.run_cell(u'unicode_str = "a"') - assert isinstance(ip.user_ns['unicode_str'], unicode_type) # strings literals are now unicode - finally: - # Reset compiler flags so we don't mess up other tests. - ip.compile.reset_compiler_flags() - def test_can_pickle(self): "Can we pickle objects defined interactively (GH-29)" ip = get_ipython() @@ -338,19 +326,6 @@ class InteractiveShellTestCase(unittest.TestCase): finally: trap.hook = save_hook - @skipif(sys.version_info[0] >= 3, "softspace removed in py3") - def test_print_softspace(self): - """Verify that softspace is handled correctly when executing multiple - statements. - - In [1]: print 1; print 2 - 1 - 2 - - In [2]: print 1,; print 2 - 1 2 - """ - def test_ofind_line_magic(self): from IPython.core.magic import register_line_magic @@ -466,22 +441,6 @@ class InteractiveShellTestCase(unittest.TestCase): # Reset the custom exception hook ip.set_custom_exc((), None) - @skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3") - def test_future_environment(self): - "Can we run code with & without the shell's __future__ imports?" - ip.run_cell("from __future__ import division") - ip.run_cell("a = 1/2", shell_futures=True) - self.assertEqual(ip.user_ns['a'], 0.5) - ip.run_cell("b = 1/2", shell_futures=False) - self.assertEqual(ip.user_ns['b'], 0) - - ip.compile.reset_compiler_flags() - # This shouldn't leak to the shell's compiler - ip.run_cell("from __future__ import division \nc=1/2", shell_futures=False) - self.assertEqual(ip.user_ns['c'], 0.5) - ip.run_cell("d = 1/2", shell_futures=True) - self.assertEqual(ip.user_ns['d'], 0) - def test_mktempfile(self): filename = ip.mktempfile() # Check that we can open the file again on Windows @@ -509,10 +468,7 @@ class InteractiveShellTestCase(unittest.TestCase): raise DerivedInterrupt("foo") except KeyboardInterrupt: msg = ip.get_exception_only() - if sys.version_info[0] <= 2: - self.assertEqual(msg, 'DerivedInterrupt: foo\n') - else: - self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n') + self.assertEqual(msg, 'IPython.core.tests.test_interactiveshell.DerivedInterrupt: foo\n') def test_inspect_text(self): ip.run_cell('a = 5') diff --git a/IPython/core/tests/test_magic.py b/IPython/core/tests/test_magic.py index eb40331..6fea789 100644 --- a/IPython/core/tests/test_magic.py +++ b/IPython/core/tests/test_magic.py @@ -3,7 +3,6 @@ Needs to be run by nose (to make ipython session available). """ -from __future__ import absolute_import import io import os @@ -377,18 +376,6 @@ def test_time3(): "run = 0\n" "run += 1") -@dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3") -def test_time_futures(): - "Test %time with __future__ environments" - ip = get_ipython() - ip.autocall = 0 - ip.run_cell("from __future__ import division") - with tt.AssertPrints('0.25'): - ip.run_line_magic('time', 'print(1/4)') - ip.compile.reset_compiler_flags() - with tt.AssertNotPrints('0.25'): - ip.run_line_magic('time', 'print(1/4)') - def test_doctest_mode(): "Toggle doctest_mode twice, it should be a no-op and run without error" _ip.magic('doctest_mode') @@ -573,17 +560,6 @@ def test_timeit_return_quiet(): res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1') assert (res is not None) -@dec.skipif(sys.version_info[0] >= 3, "no differences with __future__ in py3") -def test_timeit_futures(): - "Test %timeit with __future__ environments" - ip = get_ipython() - ip.run_cell("from __future__ import division") - with tt.AssertPrints('0.25'): - ip.run_line_magic('timeit', '-n1 -r1 print(1/4)') - ip.compile.reset_compiler_flags() - with tt.AssertNotPrints('0.25'): - ip.run_line_magic('timeit', '-n1 -r1 print(1/4)') - @dec.skipif(execution.profile is None) def test_prun_special_syntax(): "Test %%prun with IPython special syntax" diff --git a/IPython/core/tests/test_magic_terminal.py b/IPython/core/tests/test_magic_terminal.py index 9643ecf..d204856 100644 --- a/IPython/core/tests/test_magic_terminal.py +++ b/IPython/core/tests/test_magic_terminal.py @@ -2,7 +2,6 @@ Needs to be run by nose (to make ipython session available). """ -from __future__ import absolute_import #----------------------------------------------------------------------------- # Imports @@ -57,8 +56,6 @@ def check_cpaste(code, should_fail=False): finally: sys.stdin = stdin_save -PY31 = sys.version_info[:2] == (3,1) - def test_cpaste(): """Test cpaste magic""" @@ -77,13 +74,8 @@ def test_cpaste(): ], 'fail': ["1 + runf()", + "++ runf()", ]} - - # I don't know why this is failing specifically on Python 3.1. I've - # checked it manually interactively, but we don't care enough about 3.1 - # to spend time fiddling with the tests, so we just skip it. - if not PY31: - tests['fail'].append("++ runf()") ip.user_ns['runf'] = runf diff --git a/IPython/core/tests/test_oinspect.py b/IPython/core/tests/test_oinspect.py index f6a8cc2..3db81fb 100644 --- a/IPython/core/tests/test_oinspect.py +++ b/IPython/core/tests/test_oinspect.py @@ -4,7 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import os import re @@ -40,7 +39,7 @@ ip = get_ipython() # defined, if any code is inserted above, the following line will need to be # updated. Do NOT insert any whitespace between the next line and the function # definition below. -THIS_LINE_NUMBER = 43 # Put here the actual number of this line +THIS_LINE_NUMBER = 42 # Put here the actual number of this line from unittest import TestCase @@ -275,8 +274,7 @@ def test_info(): nt.assert_equal(i['type_name'], 'type') expted_class = str(type(type)) # (Python 3) or nt.assert_equal(i['base_class'], expted_class) - if sys.version_info > (3,): - nt.assert_regex(i['string_form'], "") + nt.assert_regex(i['string_form'], "") fname = __file__ if fname.endswith(".pyc"): fname = fname[:-1] diff --git a/IPython/core/tests/test_profile.py b/IPython/core/tests/test_profile.py index 4c938ed..79e3d4b 100644 --- a/IPython/core/tests/test_profile.py +++ b/IPython/core/tests/test_profile.py @@ -15,7 +15,6 @@ Authors * MinRK """ -from __future__ import absolute_import #----------------------------------------------------------------------------- # Imports diff --git a/IPython/core/tests/test_pylabtools.py b/IPython/core/tests/test_pylabtools.py index aa9d16b..71dd737 100644 --- a/IPython/core/tests/test_pylabtools.py +++ b/IPython/core/tests/test_pylabtools.py @@ -4,7 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function from io import UnsupportedOperation, BytesIO diff --git a/IPython/core/tests/test_run.py b/IPython/core/tests/test_run.py index e6c20ec..212a22f 100644 --- a/IPython/core/tests/test_run.py +++ b/IPython/core/tests/test_run.py @@ -11,7 +11,6 @@ and we do so in a common test_magic file. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import absolute_import import functools diff --git a/IPython/core/tests/test_ultratb.py b/IPython/core/tests/test_ultratb.py index 27b6fc4..c9c86c0 100644 --- a/IPython/core/tests/test_ultratb.py +++ b/IPython/core/tests/test_ultratb.py @@ -313,44 +313,43 @@ def r3o2(): #---------------------------------------------------------------------------- # module testing (minimal) -if sys.version_info > (3,): - def test_handlers(): - def spam(c, d_e): - (d, e) = d_e - x = c + d - y = c * d - foo(x, y) - - def foo(a, b, bar=1): - eggs(a, b + bar) - - def eggs(f, g, z=globals()): - h = f + g - i = f - g - return h / i - - buff = io.StringIO() - - buff.write('') - buff.write('*** Before ***') - try: - buff.write(spam(1, (2, 3))) - except: - traceback.print_exc(file=buff) - - handler = ColorTB(ostream=buff) - buff.write('*** ColorTB ***') - try: - buff.write(spam(1, (2, 3))) - except: - handler(*sys.exc_info()) - buff.write('') - - handler = VerboseTB(ostream=buff) - buff.write('*** VerboseTB ***') - try: - buff.write(spam(1, (2, 3))) - except: - handler(*sys.exc_info()) - buff.write('') +def test_handlers(): + def spam(c, d_e): + (d, e) = d_e + x = c + d + y = c * d + foo(x, y) + + def foo(a, b, bar=1): + eggs(a, b + bar) + + def eggs(f, g, z=globals()): + h = f + g + i = f - g + return h / i + + buff = io.StringIO() + + buff.write('') + buff.write('*** Before ***') + try: + buff.write(spam(1, (2, 3))) + except: + traceback.print_exc(file=buff) + + handler = ColorTB(ostream=buff) + buff.write('*** ColorTB ***') + try: + buff.write(spam(1, (2, 3))) + except: + handler(*sys.exc_info()) + buff.write('') + + handler = VerboseTB(ostream=buff) + buff.write('*** VerboseTB ***') + try: + buff.write(spam(1, (2, 3))) + except: + handler(*sys.exc_info()) + buff.write('') diff --git a/IPython/core/ultratb.py b/IPython/core/ultratb.py index e03fda4..df451e0 100644 --- a/IPython/core/ultratb.py +++ b/IPython/core/ultratb.py @@ -88,9 +88,6 @@ Inheritance diagram: # the file COPYING, distributed as part of this software. #***************************************************************************** -from __future__ import absolute_import -from __future__ import unicode_literals -from __future__ import print_function import dis import inspect diff --git a/IPython/extensions/autoreload.py b/IPython/extensions/autoreload.py index d5d8381..a4dfc90 100644 --- a/IPython/extensions/autoreload.py +++ b/IPython/extensions/autoreload.py @@ -87,7 +87,6 @@ Some of the known remaining caveats are: - C extension modules cannot be reloaded, and so cannot be autoreloaded. """ -from __future__ import print_function skip_doctest = True diff --git a/IPython/extensions/storemagic.py b/IPython/extensions/storemagic.py index 2fd1abf..32a3505 100644 --- a/IPython/extensions/storemagic.py +++ b/IPython/extensions/storemagic.py @@ -9,7 +9,6 @@ To automatically restore stored variables at startup, add this to your c.StoreMagics.autorestore = True """ -from __future__ import print_function # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/IPython/lib/backgroundjobs.py b/IPython/lib/backgroundjobs.py index b724126..a3a02ab 100644 --- a/IPython/lib/backgroundjobs.py +++ b/IPython/lib/backgroundjobs.py @@ -21,7 +21,6 @@ separate implementation). An example notebook is provided in our documentation illustrating interactive use of the system. """ -from __future__ import print_function #***************************************************************************** # Copyright (C) 2005-2006 Fernando Perez diff --git a/IPython/lib/deepreload.py b/IPython/lib/deepreload.py index f8a786a..c5c4adb 100644 --- a/IPython/lib/deepreload.py +++ b/IPython/lib/deepreload.py @@ -25,7 +25,6 @@ A reference to the original :func:`python:reload` is stored in this module as This code is almost entirely based on knee.py, which is a Python re-implementation of hierarchical module import. """ -from __future__ import print_function #***************************************************************************** # Copyright (C) 2001 Nathaniel Gray # diff --git a/IPython/lib/demo.py b/IPython/lib/demo.py index 51c44b5..8ca418c 100644 --- a/IPython/lib/demo.py +++ b/IPython/lib/demo.py @@ -170,7 +170,6 @@ The following is a very simple example of a valid demo file. ################### END EXAMPLE DEMO ############################ """ -from __future__ import unicode_literals #***************************************************************************** # Copyright (C) 2005-2006 Fernando Perez. @@ -179,7 +178,6 @@ from __future__ import unicode_literals # the file COPYING, distributed as part of this software. # #***************************************************************************** -from __future__ import print_function import os import re diff --git a/IPython/lib/editorhooks.py b/IPython/lib/editorhooks.py index b76a89b..2d263a0 100644 --- a/IPython/lib/editorhooks.py +++ b/IPython/lib/editorhooks.py @@ -4,7 +4,6 @@ They should honor the line number argument, at least. Contributions are *very* welcome. """ -from __future__ import print_function import os import pipes diff --git a/IPython/lib/inputhookglut.py b/IPython/lib/inputhookglut.py index 14bafe1..b4a7f3c 100644 --- a/IPython/lib/inputhookglut.py +++ b/IPython/lib/inputhookglut.py @@ -2,7 +2,6 @@ """ GLUT Inputhook support functions """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team diff --git a/IPython/lib/pretty.py b/IPython/lib/pretty.py index 97529f1..3eaa0b3 100644 --- a/IPython/lib/pretty.py +++ b/IPython/lib/pretty.py @@ -77,7 +77,6 @@ Inheritance diagram: Portions (c) 2009 by Robert Kern. :license: BSD License. """ -from __future__ import print_function from contextlib import contextmanager import sys import types diff --git a/IPython/lib/tests/test_backgroundjobs.py b/IPython/lib/tests/test_backgroundjobs.py index 0441eab..d7793f5 100644 --- a/IPython/lib/tests/test_backgroundjobs.py +++ b/IPython/lib/tests/test_backgroundjobs.py @@ -11,7 +11,6 @@ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib imports import time diff --git a/IPython/lib/tests/test_display.py b/IPython/lib/tests/test_display.py index 43fb66e..e78870a 100644 --- a/IPython/lib/tests/test_display.py +++ b/IPython/lib/tests/test_display.py @@ -12,7 +12,6 @@ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function from tempfile import NamedTemporaryFile, mkdtemp from os.path import split, join as pjoin, dirname diff --git a/IPython/lib/tests/test_pretty.py b/IPython/lib/tests/test_pretty.py index 4cf8041..268de06 100644 --- a/IPython/lib/tests/test_pretty.py +++ b/IPython/lib/tests/test_pretty.py @@ -4,7 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function from collections import Counter, defaultdict, deque, OrderedDict import types, string @@ -12,14 +11,9 @@ import types, string import nose.tools as nt from IPython.lib import pretty -from IPython.testing.decorators import (skip_without, py2_only, py3_only) - -from IPython.utils.py3compat import PY3, unicode_to_str +from IPython.testing.decorators import skip_without -if PY3: - from io import StringIO -else: - from StringIO import StringIO +from io import StringIO class MyList(object): @@ -245,7 +239,7 @@ def test_metaclass_repr(): def test_unicode_repr(): u = u"üniçodé" - ustr = unicode_to_str(u) + ustr = u class C(object): def __repr__(self): @@ -276,83 +270,6 @@ def test_basic_class(): nt.assert_true(type_pprint_wrapper.called) -# This is only run on Python 2 because in Python 3 the language prevents you -# from setting a non-unicode value for __qualname__ on a metaclass, and it -# doesn't respect the descriptor protocol if you subclass unicode and implement -# __get__. -@py2_only -def test_fallback_to__name__on_type(): - # Test that we correctly repr types that have non-string values for - # __qualname__ by falling back to __name__ - - class Type(object): - __qualname__ = 5 - - # Test repring of the type. - stream = StringIO() - printer = pretty.RepresentationPrinter(stream) - - printer.pretty(Type) - printer.flush() - output = stream.getvalue() - - # If __qualname__ is malformed, we should fall back to __name__. - expected = '.'.join([__name__, Type.__name__]) - nt.assert_equal(output, expected) - - # Clear stream buffer. - stream.buf = '' - - # Test repring of an instance of the type. - instance = Type() - printer.pretty(instance) - printer.flush() - output = stream.getvalue() - - # Should look like: - # - prefix = '<' + '.'.join([__name__, Type.__name__]) + ' at 0x' - nt.assert_true(output.startswith(prefix)) - - -@py2_only -def test_fail_gracefully_on_bogus__qualname__and__name__(): - # Test that we correctly repr types that have non-string values for both - # __qualname__ and __name__ - - class Meta(type): - __name__ = 5 - - class Type(object): - __metaclass__ = Meta - __qualname__ = 5 - - stream = StringIO() - printer = pretty.RepresentationPrinter(stream) - - printer.pretty(Type) - printer.flush() - output = stream.getvalue() - - # If we can't find __name__ or __qualname__ just use a sentinel string. - expected = '.'.join([__name__, '']) - nt.assert_equal(output, expected) - - # Clear stream buffer. - stream.buf = '' - - # Test repring of an instance of the type. - instance = Type() - printer.pretty(instance) - printer.flush() - output = stream.getvalue() - - # Should look like: - # at 0x7f7658ae07d0> - prefix = '<' + '.'.join([__name__, '']) + ' at 0x' - nt.assert_true(output.startswith(prefix)) - - def test_collections_defaultdict(): # Create defaultdicts with cycles a = defaultdict() @@ -441,7 +358,6 @@ def test_collections_counter(): for obj, expected in cases: nt.assert_equal(pretty.pretty(obj), expected) -@py3_only def test_mappingproxy(): MP = types.MappingProxyType underlying_dict = {} diff --git a/IPython/sphinxext/ipython_directive.py b/IPython/sphinxext/ipython_directive.py index 75c9e70..29edc00 100644 --- a/IPython/sphinxext/ipython_directive.py +++ b/IPython/sphinxext/ipython_directive.py @@ -119,7 +119,6 @@ Authors - VáclavŠmilauer : Prompt generalizations. - Skipper Seabold, refactoring, cleanups, pure python addition """ -from __future__ import print_function #----------------------------------------------------------------------------- # Imports diff --git a/IPython/terminal/embed.py b/IPython/terminal/embed.py index 892a6af..a194f0e 100644 --- a/IPython/terminal/embed.py +++ b/IPython/terminal/embed.py @@ -5,8 +5,6 @@ An embedded IPython shell. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import with_statement -from __future__ import print_function import sys import warnings diff --git a/IPython/terminal/interactiveshell.py b/IPython/terminal/interactiveshell.py index 0de3aaa..763761b 100644 --- a/IPython/terminal/interactiveshell.py +++ b/IPython/terminal/interactiveshell.py @@ -1,5 +1,4 @@ """IPython terminal interface using prompt_toolkit""" -from __future__ import print_function import os import sys diff --git a/IPython/terminal/ipapp.py b/IPython/terminal/ipapp.py index 8add461..32f97e0 100755 --- a/IPython/terminal/ipapp.py +++ b/IPython/terminal/ipapp.py @@ -8,8 +8,6 @@ line :command:`ipython` program. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import absolute_import -from __future__ import print_function import logging import os diff --git a/IPython/terminal/magics.py b/IPython/terminal/magics.py index d5ea51f..474d9e6 100644 --- a/IPython/terminal/magics.py +++ b/IPython/terminal/magics.py @@ -3,7 +3,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function from logging import error import os diff --git a/IPython/terminal/prompts.py b/IPython/terminal/prompts.py index f52862b..55e08be 100644 --- a/IPython/terminal/prompts.py +++ b/IPython/terminal/prompts.py @@ -1,5 +1,4 @@ """Terminal input and output prompts.""" -from __future__ import print_function from pygments.token import Token import sys diff --git a/IPython/terminal/pt_inputhooks/glut.py b/IPython/terminal/pt_inputhooks/glut.py index f336e68..f6d54a5 100644 --- a/IPython/terminal/pt_inputhooks/glut.py +++ b/IPython/terminal/pt_inputhooks/glut.py @@ -1,6 +1,5 @@ """GLUT Input hook for interactive use with prompt_toolkit """ -from __future__ import print_function # GLUT is quite an old library and it is difficult to ensure proper diff --git a/IPython/terminal/pt_inputhooks/gtk.py b/IPython/terminal/pt_inputhooks/gtk.py index 8f27e12..6e246ba 100644 --- a/IPython/terminal/pt_inputhooks/gtk.py +++ b/IPython/terminal/pt_inputhooks/gtk.py @@ -35,7 +35,6 @@ PyGTK input hook for prompt_toolkit. Listens on the pipe prompt_toolkit sets up for a notification that it should return control to the terminal event loop. """ -from __future__ import absolute_import import gtk, gobject diff --git a/IPython/terminal/pt_inputhooks/pyglet.py b/IPython/terminal/pt_inputhooks/pyglet.py index 1c5ec44..a063873 100644 --- a/IPython/terminal/pt_inputhooks/pyglet.py +++ b/IPython/terminal/pt_inputhooks/pyglet.py @@ -1,6 +1,5 @@ """Enable pyglet to be used interacively with prompt_toolkit """ -from __future__ import absolute_import import os import sys diff --git a/IPython/terminal/pt_inputhooks/wx.py b/IPython/terminal/pt_inputhooks/wx.py index 4371b21..2f416d1 100644 --- a/IPython/terminal/pt_inputhooks/wx.py +++ b/IPython/terminal/pt_inputhooks/wx.py @@ -1,6 +1,5 @@ """Enable wxPython to be used interacively in prompt_toolkit """ -from __future__ import absolute_import import sys import signal diff --git a/IPython/terminal/tests/test_embed.py b/IPython/terminal/tests/test_embed.py index 9f164ea..5d75ad0 100644 --- a/IPython/terminal/tests/test_embed.py +++ b/IPython/terminal/tests/test_embed.py @@ -24,7 +24,6 @@ from IPython.testing.decorators import skip_win32 _sample_embed = b""" -from __future__ import print_function import IPython a = 3 @@ -74,8 +73,6 @@ def test_nest_embed(): child = pexpect.spawn(sys.executable, ['-m', 'IPython', '--colors=nocolor'], env=env) child.expect(ipy_prompt) - child.sendline("from __future__ import print_function") - child.expect(ipy_prompt) child.sendline("import IPython") child.expect(ipy_prompt) child.sendline("ip0 = get_ipython()") diff --git a/IPython/testing/decorators.py b/IPython/testing/decorators.py index 6a62fd6..9fcf492 100644 --- a/IPython/testing/decorators.py +++ b/IPython/testing/decorators.py @@ -334,12 +334,6 @@ skipif_not_sympy = skip_without('sympy') skip_known_failure = knownfailureif(True,'This test is known to fail') -known_failure_py3 = knownfailureif(sys.version_info[0] >= 3, - 'This test is known to fail on Python 3.') - -py2_only = skipif(PY3, "This test only runs on Python 2.") -py3_only = skipif(PY2, "This test only runs on Python 3.") - # A null 'decorator', useful to make more readable code that needs to pick # between different decorators based on OS or other conditions null_deco = lambda f: f diff --git a/IPython/testing/globalipapp.py b/IPython/testing/globalipapp.py index 3983393..2f065da 100644 --- a/IPython/testing/globalipapp.py +++ b/IPython/testing/globalipapp.py @@ -5,8 +5,6 @@ modifications IPython makes to system behavior don't send the doctest machinery into a fit. This code should be considered a gross hack, but it gets the job done. """ -from __future__ import absolute_import -from __future__ import print_function # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/IPython/testing/iptest.py b/IPython/testing/iptest.py index b2f19a4..fc462b3 100644 --- a/IPython/testing/iptest.py +++ b/IPython/testing/iptest.py @@ -17,7 +17,6 @@ itself from the command line. There are two ways of running this script: # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import glob from io import BytesIO @@ -45,8 +44,7 @@ pjoin = path.join # Enable printing all warnings raise by IPython's modules warnings.filterwarnings('ignore', message='.*Matplotlib is building the font cache.*', category=UserWarning, module='.*') -if sys.version_info > (3,0): - warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*') +warnings.filterwarnings('error', message='.*', category=ResourceWarning, module='.*') warnings.filterwarnings('error', message=".*{'config': True}.*", category=DeprecationWarning, module='IPy.*') warnings.filterwarnings('default', message='.*', category=Warning, module='IPy.*') diff --git a/IPython/testing/iptestcontroller.py b/IPython/testing/iptestcontroller.py index 95aa06e..05183a5 100644 --- a/IPython/testing/iptestcontroller.py +++ b/IPython/testing/iptestcontroller.py @@ -9,7 +9,6 @@ test suite. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import argparse import json diff --git a/IPython/testing/ipunittest.py b/IPython/testing/ipunittest.py index ae134f2..2aeaab0 100644 --- a/IPython/testing/ipunittest.py +++ b/IPython/testing/ipunittest.py @@ -22,7 +22,6 @@ Authors - Fernando Perez """ -from __future__ import absolute_import #----------------------------------------------------------------------------- # Copyright (C) 2009-2011 The IPython Development Team diff --git a/IPython/testing/plugin/dtexample.py b/IPython/testing/plugin/dtexample.py index 5e02629..d73cd24 100644 --- a/IPython/testing/plugin/dtexample.py +++ b/IPython/testing/plugin/dtexample.py @@ -3,7 +3,6 @@ This file just contains doctests both using plain python and IPython prompts. All tests should be loaded by nose. """ -from __future__ import print_function def pyfunc(): """Some pure python tests... diff --git a/IPython/testing/plugin/iptest.py b/IPython/testing/plugin/iptest.py index a75cab9..e24e22a 100755 --- a/IPython/testing/plugin/iptest.py +++ b/IPython/testing/plugin/iptest.py @@ -1,7 +1,6 @@ #!/usr/bin/env python """Nose-based test runner. """ -from __future__ import print_function from nose.core import main from nose.plugins.builtin import plugins diff --git a/IPython/testing/plugin/show_refs.py b/IPython/testing/plugin/show_refs.py index ef7dd15..b2c70ad 100644 --- a/IPython/testing/plugin/show_refs.py +++ b/IPython/testing/plugin/show_refs.py @@ -2,7 +2,6 @@ This is used by a companion test case. """ -from __future__ import print_function import gc diff --git a/IPython/testing/plugin/simple.py b/IPython/testing/plugin/simple.py index a7d33d9..3861977 100644 --- a/IPython/testing/plugin/simple.py +++ b/IPython/testing/plugin/simple.py @@ -3,7 +3,6 @@ This file just contains doctests both using plain python and IPython prompts. All tests should be loaded by nose. """ -from __future__ import print_function def pyfunc(): """Some pure python tests... diff --git a/IPython/testing/plugin/simplevars.py b/IPython/testing/plugin/simplevars.py index 5134c6e..cac0b75 100644 --- a/IPython/testing/plugin/simplevars.py +++ b/IPython/testing/plugin/simplevars.py @@ -1,3 +1,2 @@ -from __future__ import print_function x = 1 print('x is:',x) diff --git a/IPython/testing/skipdoctest.py b/IPython/testing/skipdoctest.py index 564ca54..b0cf83c 100644 --- a/IPython/testing/skipdoctest.py +++ b/IPython/testing/skipdoctest.py @@ -1,26 +1,13 @@ -"""Decorators marks that a doctest should be skipped, for both python 2 and 3. +"""Decorators marks that a doctest should be skipped. The IPython.testing.decorators module triggers various extra imports, including numpy and sympy if they're present. Since this decorator is used in core parts of IPython, it's in a separate module so that running IPython doesn't trigger those imports.""" -#----------------------------------------------------------------------------- -# Copyright (C) 2009-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- +# Copyright (C) IPython Development Team +# Distributed under the terms of the Modified BSD License. -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -import sys - -#----------------------------------------------------------------------------- -# Decorators -#----------------------------------------------------------------------------- def skip_doctest(f): """Decorator - mark a function or method for skipping its doctest. @@ -30,14 +17,3 @@ def skip_doctest(f): etc.""" f.skip_doctest = True return f - - -def skip_doctest_py3(f): - """Decorator - skip the doctest under Python 3.""" - f.skip_doctest = (sys.version_info[0] >= 3) - return f - -def skip_doctest_py2(f): - """Decorator - skip the doctest under Python 3.""" - f.skip_doctest = (sys.version_info[0] < 3) - return f diff --git a/IPython/testing/tests/test_decorators.py b/IPython/testing/tests/test_decorators.py index 2046211..ef34625 100644 --- a/IPython/testing/tests/test_decorators.py +++ b/IPython/testing/tests/test_decorators.py @@ -1,6 +1,5 @@ """Tests for the decorators we've created for IPython. """ -from __future__ import print_function # Module imports # Std lib diff --git a/IPython/testing/tests/test_tools.py b/IPython/testing/tests/test_tools.py index 9c6db65..4fa5c96 100644 --- a/IPython/testing/tests/test_tools.py +++ b/IPython/testing/tests/test_tools.py @@ -13,8 +13,6 @@ Tests for testing.tools #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import with_statement -from __future__ import print_function import os import unittest @@ -110,8 +108,7 @@ class Test_ipexec_validate(unittest.TestCase, tt.TempFileMixin): def test_exception_path(self): """Test exception path in exception_validate. """ - self.mktmp("from __future__ import print_function\n" - "import sys\n" + self.mktmp("import sys\n" "print('A')\n" "print('B')\n" "print('C', file=sys.stderr)\n" @@ -123,8 +120,7 @@ class Test_ipexec_validate(unittest.TestCase, tt.TempFileMixin): def test_exception_path2(self): """Test exception path in exception_validate, expecting windows line endings. """ - self.mktmp("from __future__ import print_function\n" - "import sys\n" + self.mktmp("import sys\n" "print('A')\n" "print('B')\n" "print('C', file=sys.stderr)\n" diff --git a/IPython/testing/tools.py b/IPython/testing/tools.py index bf62f30..ec10735 100644 --- a/IPython/testing/tools.py +++ b/IPython/testing/tools.py @@ -5,7 +5,6 @@ Authors - Fernando Perez """ -from __future__ import absolute_import #----------------------------------------------------------------------------- # Copyright (C) 2009 The IPython Development Team diff --git a/IPython/utils/PyColorize.py b/IPython/utils/PyColorize.py index 15e77e3..50ce8e5 100644 --- a/IPython/utils/PyColorize.py +++ b/IPython/utils/PyColorize.py @@ -28,9 +28,6 @@ It shows how to use the built-in keyword, token and tokenize modules to scan Python source code and re-emit it with no changes to its original formatting (which is the hard part). """ -from __future__ import print_function -from __future__ import absolute_import -from __future__ import unicode_literals __all__ = ['ANSICodeColors','Parser'] diff --git a/IPython/utils/_process_posix.py b/IPython/utils/_process_posix.py index ac3a9a0..5907e7d 100644 --- a/IPython/utils/_process_posix.py +++ b/IPython/utils/_process_posix.py @@ -13,7 +13,6 @@ This file is only meant to be imported by process.py, not by end-users. #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib import errno diff --git a/IPython/utils/_process_win32.py b/IPython/utils/_process_win32.py index 3ac59b2..554cf9f 100644 --- a/IPython/utils/_process_win32.py +++ b/IPython/utils/_process_win32.py @@ -13,7 +13,6 @@ This file is only meant to be imported by process.py, not by end-users. #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # stdlib import os diff --git a/IPython/utils/_process_win32_controller.py b/IPython/utils/_process_win32_controller.py index 555eec2..85a342e 100644 --- a/IPython/utils/_process_win32_controller.py +++ b/IPython/utils/_process_win32_controller.py @@ -10,7 +10,6 @@ This file is meant to be used by process.py # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- -from __future__ import print_function # stdlib import os, sys, threading diff --git a/IPython/utils/_signatures.py b/IPython/utils/_signatures.py index 3b53e89..27c6385 100644 --- a/IPython/utils/_signatures.py +++ b/IPython/utils/_signatures.py @@ -16,7 +16,6 @@ modified to be compatible with Python 2.7 and 3.2+. # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- -from __future__ import absolute_import, division, print_function import itertools import functools import re diff --git a/IPython/utils/_tokenize_py2.py b/IPython/utils/_tokenize_py2.py deleted file mode 100644 index 195df96..0000000 --- a/IPython/utils/_tokenize_py2.py +++ /dev/null @@ -1,439 +0,0 @@ -"""Patched version of standard library tokenize, to deal with various bugs. - -Patches - -- Relevant parts of Gareth Rees' patch for Python issue #12691 (untokenizing), - manually applied. -- Newlines in comments and blank lines should be either NL or NEWLINE, depending - on whether they are in a multi-line statement. Filed as Python issue #17061. - -------------------------------------------------------------------------------- -Tokenization help for Python programs. - -generate_tokens(readline) is a generator that breaks a stream of -text into Python tokens. It accepts a readline-like method which is called -repeatedly to get the next line of input (or "" for EOF). It generates -5-tuples with these members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators - -Older entry points - tokenize_loop(readline, tokeneater) - tokenize(readline, tokeneater=printtoken) -are the same, except instead of generating tokens, tokeneater is a callback -function to which the 5 fields described above are passed as 5 arguments, -each time a new token is found.""" -from __future__ import print_function - -__author__ = 'Ka-Ping Yee ' -__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' - 'Skip Montanaro, Raymond Hettinger') - -import string, re -from token import * - -import token -__all__ = [x for x in dir(token) if not x.startswith("_")] -__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"] -del x -del token - -__all__ += ["TokenError"] - -COMMENT = N_TOKENS -tok_name[COMMENT] = 'COMMENT' -NL = N_TOKENS + 1 -tok_name[NL] = 'NL' -N_TOKENS += 2 - -def group(*choices): return '(' + '|'.join(choices) + ')' -def any(*choices): return group(*choices) + '*' -def maybe(*choices): return group(*choices) + '?' - -Whitespace = r'[ \f\t]*' -Comment = r'#[^\r\n]*' -Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) -Name = r'[a-zA-Z_]\w*' - -Hexnumber = r'0[xX][\da-fA-F]+[lL]?' -Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?' -Binnumber = r'0[bB][01]+[lL]?' -Decnumber = r'[1-9]\d*[lL]?' -Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) -Exponent = r'[eE][-+]?\d+' -Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) -Expfloat = r'\d+' + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') -Number = group(Imagnumber, Floatnumber, Intnumber) - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') -# Single-line ' or " string. -String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", - r"//=?", - r"[+\-*/%&|^=<>]=?", - r"~") - -Bracket = '[][(){}]' -Special = group(r'\r?\n', r'[:;.,`@]') -Funny = group(Operator, Bracket, Special) - -PlainToken = group(Number, Funny, String, Name) -Token = Ignore + PlainToken - -# First (or only) line of ' or " string. -ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -PseudoExtras = group(r'\\\r?\n', Comment, Triple) -PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) - -tokenprog, pseudoprog, single3prog, double3prog = map( - re.compile, (Token, PseudoToken, Single3, Double3)) -endprogs = {"'": re.compile(Single), '"': re.compile(Double), - "'''": single3prog, '"""': double3prog, - "r'''": single3prog, 'r"""': double3prog, - "u'''": single3prog, 'u"""': double3prog, - "ur'''": single3prog, 'ur"""': double3prog, - "R'''": single3prog, 'R"""': double3prog, - "U'''": single3prog, 'U"""': double3prog, - "uR'''": single3prog, 'uR"""': double3prog, - "Ur'''": single3prog, 'Ur"""': double3prog, - "UR'''": single3prog, 'UR"""': double3prog, - "b'''": single3prog, 'b"""': double3prog, - "br'''": single3prog, 'br"""': double3prog, - "B'''": single3prog, 'B"""': double3prog, - "bR'''": single3prog, 'bR"""': double3prog, - "Br'''": single3prog, 'Br"""': double3prog, - "BR'''": single3prog, 'BR"""': double3prog, - 'r': None, 'R': None, 'u': None, 'U': None, - 'b': None, 'B': None} - -triple_quoted = {} -for t in ("'''", '"""', - "r'''", 'r"""', "R'''", 'R"""', - "u'''", 'u"""', "U'''", 'U"""', - "ur'''", 'ur"""', "Ur'''", 'Ur"""', - "uR'''", 'uR"""', "UR'''", 'UR"""', - "b'''", 'b"""', "B'''", 'B"""', - "br'''", 'br"""', "Br'''", 'Br"""', - "bR'''", 'bR"""', "BR'''", 'BR"""'): - triple_quoted[t] = t -single_quoted = {} -for t in ("'", '"', - "r'", 'r"', "R'", 'R"', - "u'", 'u"', "U'", 'U"', - "ur'", 'ur"', "Ur'", 'Ur"', - "uR'", 'uR"', "UR'", 'UR"', - "b'", 'b"', "B'", 'B"', - "br'", 'br"', "Br'", 'Br"', - "bR'", 'bR"', "BR'", 'BR"' ): - single_quoted[t] = t - -tabsize = 8 - -class TokenError(Exception): pass - -class StopTokenizing(Exception): pass - -def printtoken(type, token, srow_scol, erow_ecol, line): # for testing - srow, scol = srow_scol - erow, ecol = erow_ecol - print("%d,%d-%d,%d:\t%s\t%s" % \ - (srow, scol, erow, ecol, tok_name[type], repr(token))) - -def tokenize(readline, tokeneater=printtoken): - """ - The tokenize() function accepts two parameters: one representing the - input stream, and one providing an output mechanism for tokenize(). - - The first parameter, readline, must be a callable object which provides - the same interface as the readline() method of built-in file objects. - Each call to the function should return one line of input as a string. - - The second parameter, tokeneater, must also be a callable object. It is - called once for each token, with five arguments, corresponding to the - tuples generated by generate_tokens(). - """ - try: - tokenize_loop(readline, tokeneater) - except StopTokenizing: - pass - -# backwards compatible interface -def tokenize_loop(readline, tokeneater): - for token_info in generate_tokens(readline): - tokeneater(*token_info) - -class Untokenizer: - - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - - def add_whitespace(self, start): - row, col = start - assert row >= self.prev_row - col_offset = col - self.prev_col - if col_offset > 0: - self.tokens.append(" " * col_offset) - elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER): - # Line was backslash-continued - self.tokens.append(" ") - - def untokenize(self, tokens): - iterable = iter(tokens) - for t in iterable: - if len(t) == 2: - self.compat(t, iterable) - break - tok_type, token, start, end = t[:4] - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - return "".join(self.tokens) - - def compat(self, token, iterable): - # This import is here to avoid problems when the itertools - # module is not built yet and tokenize is imported. - from itertools import chain - startline = False - prevstring = False - indents = [] - toks_append = self.tokens.append - for tok in chain([token], iterable): - toknum, tokval = tok[:2] - - if toknum in (NAME, NUMBER): - tokval += ' ' - - # Insert a space between two consecutive strings - if toknum == STRING: - if prevstring: - tokval = ' ' + tokval - prevstring = True - else: - prevstring = False - - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - toks_append(tokval) - -def untokenize(iterable): - """Transform tokens back into Python source code. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - Round-trip invariant for full input: - Untokenized source will match input source exactly - - Round-trip invariant for limited intput: - # Output text will tokenize the back to the input - t1 = [tok[:2] for tok in generate_tokens(f.readline)] - newcode = untokenize(t1) - readline = iter(newcode.splitlines(1)).next - t2 = [tok[:2] for tok in generate_tokens(readline)] - assert t1 == t2 - """ - ut = Untokenizer() - return ut.untokenize(iterable) - -def generate_tokens(readline): - """ - The generate_tokens() generator requires one argment, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as a string. Alternately, readline - can be a callable function terminating with StopIteration: - readline = open(myfile).next # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - logical line; continuation lines are included. - """ - lnum = parenlev = continued = 0 - namechars, numchars = string.ascii_letters + '_', '0123456789' - contstr, needcont = '', 0 - contline = None - indents = [0] - - while 1: # loop over lines in stream - try: - line = readline() - except StopIteration: - line = '' - lnum += 1 - pos, max = 0, len(line) - - if contstr: # continued string - if not line: - raise TokenError("EOF in multi-line string", strstart) - endmatch = endprog.match(line) - if endmatch: - pos = end = endmatch.end(0) - yield (STRING, contstr + line[:end], - strstart, (lnum, end), contline + line) - contstr, needcont = '', 0 - contline = None - elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': - yield (ERRORTOKEN, contstr + line, - strstart, (lnum, len(line)), contline) - contstr = '' - contline = None - continue - else: - contstr = contstr + line - contline = contline + line - continue - - elif parenlev == 0 and not continued: # new statement - if not line: break - column = 0 - while pos < max: # measure leading whitespace - if line[pos] == ' ': - column += 1 - elif line[pos] == '\t': - column = (column//tabsize + 1)*tabsize - elif line[pos] == '\f': - column = 0 - else: - break - pos += 1 - if pos == max: - break - - if line[pos] in '#\r\n': # skip comments or blank lines - if line[pos] == '#': - comment_token = line[pos:].rstrip('\r\n') - nl_pos = pos + len(comment_token) - yield (COMMENT, comment_token, - (lnum, pos), (lnum, pos + len(comment_token)), line) - yield (NEWLINE, line[nl_pos:], - (lnum, nl_pos), (lnum, len(line)), line) - else: - yield (NEWLINE, line[pos:], - (lnum, pos), (lnum, len(line)), line) - continue - - if column > indents[-1]: # count indents or dedents - indents.append(column) - yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) - while column < indents[-1]: - if column not in indents: - raise IndentationError( - "unindent does not match any outer indentation level", - ("", lnum, pos, line)) - indents = indents[:-1] - yield (DEDENT, '', (lnum, pos), (lnum, pos), line) - - else: # continued statement - if not line: - raise TokenError("EOF in multi-line statement", (lnum, 0)) - continued = 0 - - while pos < max: - pseudomatch = pseudoprog.match(line, pos) - if pseudomatch: # scan for tokens - start, end = pseudomatch.span(1) - spos, epos, pos = (lnum, start), (lnum, end), end - token, initial = line[start:end], line[start] - - if initial in numchars or \ - (initial == '.' and token != '.'): # ordinary number - yield (NUMBER, token, spos, epos, line) - elif initial in '\r\n': - yield (NL if parenlev > 0 else NEWLINE, - token, spos, epos, line) - elif initial == '#': - assert not token.endswith("\n") - yield (COMMENT, token, spos, epos, line) - elif token in triple_quoted: - endprog = endprogs[token] - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - yield (STRING, token, spos, (lnum, pos), line) - else: - strstart = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif initial in single_quoted or \ - token[:2] in single_quoted or \ - token[:3] in single_quoted: - if token[-1] == '\n': # continued string - strstart = (lnum, start) - endprog = (endprogs[initial] or endprogs[token[1]] or - endprogs[token[2]]) - contstr, needcont = line[start:], 1 - contline = line - break - else: # ordinary string - yield (STRING, token, spos, epos, line) - elif initial in namechars: # ordinary name - yield (NAME, token, spos, epos, line) - elif initial == '\\': # continued stmt - continued = 1 - else: - if initial in '([{': - parenlev += 1 - elif initial in ')]}': - parenlev -= 1 - yield (OP, token, spos, epos, line) - else: - yield (ERRORTOKEN, line[pos], - (lnum, pos), (lnum, pos+1), line) - pos += 1 - - for indent in indents[1:]: # pop remaining indent levels - yield (DEDENT, '', (lnum, 0), (lnum, 0), '') - yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') - -if __name__ == '__main__': # testing - import sys - if len(sys.argv) > 1: - tokenize(open(sys.argv[1]).readline) - else: - tokenize(sys.stdin.readline) diff --git a/IPython/utils/_tokenize_py3.py b/IPython/utils/_tokenize_py3.py deleted file mode 100644 index ee1fd9e..0000000 --- a/IPython/utils/_tokenize_py3.py +++ /dev/null @@ -1,595 +0,0 @@ -"""Patched version of standard library tokenize, to deal with various bugs. - -Based on Python 3.2 code. - -Patches: - -- Gareth Rees' patch for Python issue #12691 (untokenizing) - - Except we don't encode the output of untokenize - - Python 2 compatible syntax, so that it can be byte-compiled at installation -- Newlines in comments and blank lines should be either NL or NEWLINE, depending - on whether they are in a multi-line statement. Filed as Python issue #17061. -- Export generate_tokens & TokenError -- u and rb literals are allowed under Python 3.3 and above. - ------------------------------------------------------------------------------- -Tokenization help for Python programs. - -tokenize(readline) is a generator that breaks a stream of bytes into -Python tokens. It decodes the bytes according to PEP-0263 for -determining source file encoding. - -It accepts a readline-like method which is called repeatedly to get the -next line of input (or b"" for EOF). It generates 5-tuples with these -members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators. Additionally, all token lists start with an ENCODING token -which tells you which encoding was used to decode the bytes stream. -""" -from __future__ import absolute_import - -__author__ = 'Ka-Ping Yee ' -__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' - 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' - 'Michael Foord') -import builtins -import re -import sys -from token import * -from codecs import lookup, BOM_UTF8 -import collections -from io import TextIOWrapper -cookie_re = re.compile("coding[:=]\s*([-\w.]+)") - -import token -__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", - "NL", "untokenize", "ENCODING", "TokenInfo"] -del token - -__all__ += ["generate_tokens", "TokenError"] - -COMMENT = N_TOKENS -tok_name[COMMENT] = 'COMMENT' -NL = N_TOKENS + 1 -tok_name[NL] = 'NL' -ENCODING = N_TOKENS + 2 -tok_name[ENCODING] = 'ENCODING' -N_TOKENS += 3 - -class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): - def __repr__(self): - annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) - return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % - self._replace(type=annotated_type)) - -def group(*choices): return '(' + '|'.join(choices) + ')' -def any(*choices): return group(*choices) + '*' -def maybe(*choices): return group(*choices) + '?' - -# Note: we use unicode matching for names ("\w") but ascii matching for -# number literals. -Whitespace = r'[ \f\t]*' -Comment = r'#[^\r\n]*' -Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) -Name = r'\w+' - -Hexnumber = r'0[xX][0-9a-fA-F]+' -Binnumber = r'0[bB][01]+' -Octnumber = r'0[oO][0-7]+' -Decnumber = r'(?:0+|[1-9][0-9]*)' -Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) -Exponent = r'[eE][-+]?[0-9]+' -Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) -Expfloat = r'[0-9]+' + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') -Number = group(Imagnumber, Floatnumber, Intnumber) - -if sys.version_info.minor >= 3: - StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' -else: - StringPrefix = r'(?:[bB]?[rR]?)?' - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group(StringPrefix + "'''", StringPrefix + '"""') -# Single-line ' or " string. -String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", - r"//=?", r"->", - r"[+\-*/%&|^=<>]=?", - r"~") - -Bracket = '[][(){}]' -Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') -Funny = group(Operator, Bracket, Special) - -PlainToken = group(Number, Funny, String, Name) -Token = Ignore + PlainToken - -# First (or only) line of ' or " string. -ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + - group("'", r'\\\r?\n'), - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + - group('"', r'\\\r?\n')) -PseudoExtras = group(r'\\\r?\n', Comment, Triple) -PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) - -def _compile(expr): - return re.compile(expr, re.UNICODE) - -tokenprog, pseudoprog, single3prog, double3prog = map( - _compile, (Token, PseudoToken, Single3, Double3)) -endprogs = {"'": _compile(Single), '"': _compile(Double), - "'''": single3prog, '"""': double3prog, - "r'''": single3prog, 'r"""': double3prog, - "b'''": single3prog, 'b"""': double3prog, - "R'''": single3prog, 'R"""': double3prog, - "B'''": single3prog, 'B"""': double3prog, - "br'''": single3prog, 'br"""': double3prog, - "bR'''": single3prog, 'bR"""': double3prog, - "Br'''": single3prog, 'Br"""': double3prog, - "BR'''": single3prog, 'BR"""': double3prog, - 'r': None, 'R': None, 'b': None, 'B': None} - -triple_quoted = {} -for t in ("'''", '"""', - "r'''", 'r"""', "R'''", 'R"""', - "b'''", 'b"""', "B'''", 'B"""', - "br'''", 'br"""', "Br'''", 'Br"""', - "bR'''", 'bR"""', "BR'''", 'BR"""'): - triple_quoted[t] = t -single_quoted = {} -for t in ("'", '"', - "r'", 'r"', "R'", 'R"', - "b'", 'b"', "B'", 'B"', - "br'", 'br"', "Br'", 'Br"', - "bR'", 'bR"', "BR'", 'BR"' ): - single_quoted[t] = t - -if sys.version_info.minor >= 3: - # Python 3.3 - for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']: - _t2 = _prefix+'"""' - endprogs[_t2] = double3prog - triple_quoted[_t2] = _t2 - _t1 = _prefix + "'''" - endprogs[_t1] = single3prog - triple_quoted[_t1] = _t1 - single_quoted[_prefix+'"'] = _prefix+'"' - single_quoted[_prefix+"'"] = _prefix+"'" - del _prefix, _t2, _t1 - endprogs['u'] = None - endprogs['U'] = None - -del _compile - -tabsize = 8 - -class TokenError(Exception): pass - -class StopTokenizing(Exception): pass - - -class Untokenizer: - - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - self.encoding = 'utf-8' - - def add_whitespace(self, tok_type, start): - row, col = start - assert row >= self.prev_row - col_offset = col - self.prev_col - if col_offset > 0: - self.tokens.append(" " * col_offset) - elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER): - # Line was backslash-continued. - self.tokens.append(" ") - - def untokenize(self, tokens): - iterable = iter(tokens) - for t in iterable: - if len(t) == 2: - self.compat(t, iterable) - break - tok_type, token, start, end = t[:4] - if tok_type == ENCODING: - self.encoding = token - continue - self.add_whitespace(tok_type, start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - return "".join(self.tokens) - - def compat(self, token, iterable): - # This import is here to avoid problems when the itertools - # module is not built yet and tokenize is imported. - from itertools import chain - startline = False - prevstring = False - indents = [] - toks_append = self.tokens.append - - for tok in chain([token], iterable): - toknum, tokval = tok[:2] - if toknum == ENCODING: - self.encoding = tokval - continue - - if toknum in (NAME, NUMBER): - tokval += ' ' - - # Insert a space between two consecutive strings - if toknum == STRING: - if prevstring: - tokval = ' ' + tokval - prevstring = True - else: - prevstring = False - - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - toks_append(tokval) - - -def untokenize(tokens): - """ - Convert ``tokens`` (an iterable) back into Python source code. Return - a bytes object, encoded using the encoding specified by the last - ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found. - - The result is guaranteed to tokenize back to match the input so that - the conversion is lossless and round-trips are assured. The - guarantee applies only to the token type and token string as the - spacing between tokens (column positions) may change. - - :func:`untokenize` has two modes. If the input tokens are sequences - of length 2 (``type``, ``string``) then spaces are added as necessary to - preserve the round-trip property. - - If the input tokens are sequences of length 4 or more (``type``, - ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then - spaces are added so that each token appears in the result at the - position indicated by ``start`` and ``end``, if possible. - """ - return Untokenizer().untokenize(tokens) - - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ - enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): - return "iso-8859-1" - return orig_enc - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argment, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - bom_found = False - encoding = None - default = 'utf-8' - def read_or_stop(): - try: - return readline() - except StopIteration: - return b'' - - def find_cookie(line): - try: - # Decode as UTF-8. Either the line is an encoding declaration, - # in which case it should be pure ASCII, or it must be UTF-8 - # per default encoding. - line_string = line.decode('utf-8') - except UnicodeDecodeError: - raise SyntaxError("invalid or missing encoding declaration") - - matches = cookie_re.findall(line_string) - if not matches: - return None - encoding = _get_normal_name(matches[0]) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - raise SyntaxError("unknown encoding: " + encoding) - - if bom_found: - if encoding != 'utf-8': - # This behaviour mimics the Python interpreter - raise SyntaxError('encoding problem: utf-8') - encoding += '-sig' - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = 'utf-8-sig' - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - - -def open(filename): - """Open a file in read only mode using the encoding detected by - detect_encoding(). - """ - buffer = builtins.open(filename, 'rb') - encoding, lines = detect_encoding(buffer.readline) - buffer.seek(0) - text = TextIOWrapper(buffer, encoding, line_buffering=True) - text.mode = 'r' - return text - - -def tokenize(readline): - """ - The tokenize() generator requires one argment, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as bytes. Alternately, readline - can be a callable function terminating with StopIteration: - readline = open(myfile, 'rb').__next__ # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - logical line; continuation lines are included. - - The first token sequence will always be an ENCODING token - which tells you which encoding was used to decode the bytes stream. - """ - # This import is here to avoid problems when the itertools module is not - # built yet and tokenize is imported. - from itertools import chain, repeat - encoding, consumed = detect_encoding(readline) - rl_gen = iter(readline, b"") - empty = repeat(b"") - return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) - - -def _tokenize(readline, encoding): - lnum = parenlev = continued = 0 - numchars = '0123456789' - contstr, needcont = '', 0 - contline = None - indents = [0] - - if encoding is not None: - if encoding == "utf-8-sig": - # BOM will already have been stripped. - encoding = "utf-8" - yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') - while True: # loop over lines in stream - try: - line = readline() - except StopIteration: - line = b'' - - if encoding is not None: - line = line.decode(encoding) - lnum += 1 - pos, max = 0, len(line) - - if contstr: # continued string - if not line: - raise TokenError("EOF in multi-line string", strstart) - endmatch = endprog.match(line) - if endmatch: - pos = end = endmatch.end(0) - yield TokenInfo(STRING, contstr + line[:end], - strstart, (lnum, end), contline + line) - contstr, needcont = '', 0 - contline = None - elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': - yield TokenInfo(ERRORTOKEN, contstr + line, - strstart, (lnum, len(line)), contline) - contstr = '' - contline = None - continue - else: - contstr = contstr + line - contline = contline + line - continue - - elif parenlev == 0 and not continued: # new statement - if not line: break - column = 0 - while pos < max: # measure leading whitespace - if line[pos] == ' ': - column += 1 - elif line[pos] == '\t': - column = (column//tabsize + 1)*tabsize - elif line[pos] == '\f': - column = 0 - else: - break - pos += 1 - if pos == max: - break - - if line[pos] in '#\r\n': # skip comments or blank lines - if line[pos] == '#': - comment_token = line[pos:].rstrip('\r\n') - nl_pos = pos + len(comment_token) - yield TokenInfo(COMMENT, comment_token, - (lnum, pos), (lnum, pos + len(comment_token)), line) - yield TokenInfo(NEWLINE, line[nl_pos:], - (lnum, nl_pos), (lnum, len(line)), line) - else: - yield TokenInfo(NEWLINE, line[pos:], - (lnum, pos), (lnum, len(line)), line) - continue - - if column > indents[-1]: # count indents or dedents - indents.append(column) - yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) - while column < indents[-1]: - if column not in indents: - raise IndentationError( - "unindent does not match any outer indentation level", - ("", lnum, pos, line)) - indents = indents[:-1] - yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) - - else: # continued statement - if not line: - raise TokenError("EOF in multi-line statement", (lnum, 0)) - continued = 0 - - while pos < max: - pseudomatch = pseudoprog.match(line, pos) - if pseudomatch: # scan for tokens - start, end = pseudomatch.span(1) - spos, epos, pos = (lnum, start), (lnum, end), end - token, initial = line[start:end], line[start] - - if (initial in numchars or # ordinary number - (initial == '.' and token != '.' and token != '...')): - yield TokenInfo(NUMBER, token, spos, epos, line) - elif initial in '\r\n': - yield TokenInfo(NL if parenlev > 0 else NEWLINE, - token, spos, epos, line) - elif initial == '#': - assert not token.endswith("\n") - yield TokenInfo(COMMENT, token, spos, epos, line) - elif token in triple_quoted: - endprog = endprogs[token] - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - yield TokenInfo(STRING, token, spos, (lnum, pos), line) - else: - strstart = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif initial in single_quoted or \ - token[:2] in single_quoted or \ - token[:3] in single_quoted: - if token[-1] == '\n': # continued string - strstart = (lnum, start) - endprog = (endprogs[initial] or endprogs[token[1]] or - endprogs[token[2]]) - contstr, needcont = line[start:], 1 - contline = line - break - else: # ordinary string - yield TokenInfo(STRING, token, spos, epos, line) - elif initial.isidentifier(): # ordinary name - yield TokenInfo(NAME, token, spos, epos, line) - elif initial == '\\': # continued stmt - continued = 1 - else: - if initial in '([{': - parenlev += 1 - elif initial in ')]}': - parenlev -= 1 - yield TokenInfo(OP, token, spos, epos, line) - else: - yield TokenInfo(ERRORTOKEN, line[pos], - (lnum, pos), (lnum, pos+1), line) - pos += 1 - - for indent in indents[1:]: # pop remaining indent levels - yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') - yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') - - -# An undocumented, backwards compatible, API for all the places in the standard -# library that expect to be able to use tokenize with strings -def generate_tokens(readline): - return _tokenize(readline, None) - -if __name__ == "__main__": - # Quick sanity check - s = b'''def parseline(self, line): - """Parse the line into a command name and a string containing - the arguments. Returns a tuple containing (command, args, line). - 'command' and 'args' may be None if the line couldn't be parsed. - """ - line = line.strip() - if not line: - return None, None, line - elif line[0] == '?': - line = 'help ' + line[1:] - elif line[0] == '!': - if hasattr(self, 'do_shell'): - line = 'shell ' + line[1:] - else: - return None, None, line - i, n = 0, len(line) - while i < n and line[i] in self.identchars: i = i+1 - cmd, arg = line[:i], line[i:].strip() - return cmd, arg, line - ''' - for tok in tokenize(iter(s.splitlines()).__next__): - print(tok) diff --git a/IPython/utils/capture.py b/IPython/utils/capture.py index d129c03..64044ce 100644 --- a/IPython/utils/capture.py +++ b/IPython/utils/capture.py @@ -4,7 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function, absolute_import import sys diff --git a/IPython/utils/colorable.py b/IPython/utils/colorable.py index 611f19f..1e3caef 100644 --- a/IPython/utils/colorable.py +++ b/IPython/utils/colorable.py @@ -4,7 +4,6 @@ # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #***************************************************************************** -from __future__ import absolute_import """ Color managing related utilities diff --git a/IPython/utils/eventful.py b/IPython/utils/eventful.py index fc0f7ae..9f904ae 100644 --- a/IPython/utils/eventful.py +++ b/IPython/utils/eventful.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import from warnings import warn diff --git a/IPython/utils/frame.py b/IPython/utils/frame.py index 76ccc71..60cd642 100644 --- a/IPython/utils/frame.py +++ b/IPython/utils/frame.py @@ -2,7 +2,6 @@ """ Utilities for working with stack frames. """ -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team diff --git a/IPython/utils/io.py b/IPython/utils/io.py index 3eaa680..6e86670 100644 --- a/IPython/utils/io.py +++ b/IPython/utils/io.py @@ -6,8 +6,6 @@ IO related utilities. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function -from __future__ import absolute_import import atexit diff --git a/IPython/utils/log.py b/IPython/utils/log.py index 3eb9bda..4c820dd 100644 --- a/IPython/utils/log.py +++ b/IPython/utils/log.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import from warnings import warn diff --git a/IPython/utils/module_paths.py b/IPython/utils/module_paths.py index 45a711c..f984580 100644 --- a/IPython/utils/module_paths.py +++ b/IPython/utils/module_paths.py @@ -23,7 +23,6 @@ path to module and not an open file object as well. #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- -from __future__ import print_function # Stdlib imports import imp diff --git a/IPython/utils/openpy.py b/IPython/utils/openpy.py index 0a7cc0f..1ec22a4 100644 --- a/IPython/utils/openpy.py +++ b/IPython/utils/openpy.py @@ -4,7 +4,6 @@ as per PEP 263. Much of the code is taken from the tokenize module in Python 3.2. """ -from __future__ import absolute_import import io from io import TextIOWrapper, BytesIO diff --git a/IPython/utils/process.py b/IPython/utils/process.py index bdcf8ef..169d3c3 100644 --- a/IPython/utils/process.py +++ b/IPython/utils/process.py @@ -6,7 +6,6 @@ Utilities for working with external processes. # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import os import sys diff --git a/IPython/utils/tempdir.py b/IPython/utils/tempdir.py index 951abd6..98681b4 100644 --- a/IPython/utils/tempdir.py +++ b/IPython/utils/tempdir.py @@ -2,7 +2,6 @@ This is copied from the stdlib and will be standard in Python 3.2 and onwards. """ -from __future__ import print_function import os as _os import warnings as _warnings diff --git a/IPython/utils/tests/test_capture.py b/IPython/utils/tests/test_capture.py index 30345d7..4794a80 100644 --- a/IPython/utils/tests/test_capture.py +++ b/IPython/utils/tests/test_capture.py @@ -12,7 +12,6 @@ # Imports #----------------------------------------------------------------------------- -from __future__ import print_function import sys diff --git a/IPython/utils/tests/test_io.py b/IPython/utils/tests/test_io.py index 04c4e9e..5f9e87e 100644 --- a/IPython/utils/tests/test_io.py +++ b/IPython/utils/tests/test_io.py @@ -4,8 +4,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function -from __future__ import absolute_import import io as stdlib_io import os.path diff --git a/IPython/utils/tests/test_module_paths.py b/IPython/utils/tests/test_module_paths.py index 98fac78..5b24647 100644 --- a/IPython/utils/tests/test_module_paths.py +++ b/IPython/utils/tests/test_module_paths.py @@ -12,7 +12,6 @@ # Imports #----------------------------------------------------------------------------- -from __future__ import with_statement import os import shutil diff --git a/IPython/utils/tests/test_process.py b/IPython/utils/tests/test_process.py index 7228482..249f90c 100644 --- a/IPython/utils/tests/test_process.py +++ b/IPython/utils/tests/test_process.py @@ -93,8 +93,7 @@ def test_arg_split_win32(): class SubProcessTestCase(TestCase, tt.TempFileMixin): def setUp(self): """Make a valid python temp file.""" - lines = ["from __future__ import print_function", - "import sys", + lines = [ "import sys", "print('on stdout', end='', file=sys.stdout)", "print('on stderr', end='', file=sys.stderr)", "sys.stdout.flush()", diff --git a/IPython/utils/tests/test_text.py b/IPython/utils/tests/test_text.py index 57171a9..0a2f567 100644 --- a/IPython/utils/tests/test_text.py +++ b/IPython/utils/tests/test_text.py @@ -1,6 +1,5 @@ # encoding: utf-8 """Tests for IPython.utils.text""" -from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team diff --git a/IPython/utils/text.py b/IPython/utils/text.py index 5ed1a84..f61808f 100644 --- a/IPython/utils/text.py +++ b/IPython/utils/text.py @@ -7,7 +7,6 @@ Inheritance diagram: .. inheritance-diagram:: IPython.utils.text :parts: 3 """ -from __future__ import absolute_import import os import re @@ -20,7 +19,6 @@ except ImportError: # Python 2 backport from pathlib2 import Path -from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest from IPython.utils import py3compat # datetime.strftime date format for ipython @@ -517,7 +515,6 @@ class EvalFormatter(Formatter): # inside [], so EvalFormatter can handle slicing. Once we only support 3.4 and # above, it should be possible to remove FullEvalFormatter. -@skip_doctest_py3 class FullEvalFormatter(Formatter): """A String Formatter that allows evaluation of simple expressions. @@ -533,13 +530,13 @@ class FullEvalFormatter(Formatter): In [1]: f = FullEvalFormatter() In [2]: f.format('{n//4}', n=8) - Out[2]: u'2' + Out[2]: '2' In [3]: f.format('{list(range(5))[2:4]}') - Out[3]: u'[2, 3]' + Out[3]: '[2, 3]' In [4]: f.format('{3*2}') - Out[4]: u'6' + Out[4]: '6' """ # copied from Formatter._vformat with minor changes to allow eval # and replace the format_spec code with slicing @@ -574,7 +571,6 @@ class FullEvalFormatter(Formatter): return u''.join(py3compat.cast_unicode(s) for s in result) -@skip_doctest_py3 class DollarFormatter(FullEvalFormatter): """Formatter allowing Itpl style $foo replacement, for names and attribute access only. Standard {foo} replacement also works, and allows full @@ -586,13 +582,13 @@ class DollarFormatter(FullEvalFormatter): In [1]: f = DollarFormatter() In [2]: f.format('{n//4}', n=8) - Out[2]: u'2' + Out[2]: '2' In [3]: f.format('23 * 76 is $result', result=23*76) - Out[3]: u'23 * 76 is 1748' + Out[3]: '23 * 76 is 1748' In [4]: f.format('$a or {b}', a=1, b=2) - Out[4]: u'1 or 2' + Out[4]: '1 or 2' """ _dollar_pattern = re.compile("(.*?)\$(\$?[\w\.]+)") def parse(self, fmt_string): diff --git a/IPython/utils/tokenize2.py b/IPython/utils/tokenize2.py index cbb5292..510dbf1 100644 --- a/IPython/utils/tokenize2.py +++ b/IPython/utils/tokenize2.py @@ -1,9 +1,594 @@ -"""Load our patched versions of tokenize. +"""Patched version of standard library tokenize, to deal with various bugs. + +Based on Python 3.2 code. + +Patches: + +- Gareth Rees' patch for Python issue #12691 (untokenizing) + - Except we don't encode the output of untokenize + - Python 2 compatible syntax, so that it can be byte-compiled at installation +- Newlines in comments and blank lines should be either NL or NEWLINE, depending + on whether they are in a multi-line statement. Filed as Python issue #17061. +- Export generate_tokens & TokenError +- u and rb literals are allowed under Python 3.3 and above. + +------------------------------------------------------------------------------ +Tokenization help for Python programs. + +tokenize(readline) is a generator that breaks a stream of bytes into +Python tokens. It decodes the bytes according to PEP-0263 for +determining source file encoding. + +It accepts a readline-like method which is called repeatedly to get the +next line of input (or b"" for EOF). It generates 5-tuples with these +members: + + the token type (see token.py) + the token (a string) + the starting (row, column) indices of the token (a 2-tuple of ints) + the ending (row, column) indices of the token (a 2-tuple of ints) + the original line (string) + +It is designed to match the working of the Python tokenizer exactly, except +that it produces COMMENT tokens for comments and gives type OP for all +operators. Additionally, all token lists start with an ENCODING token +which tells you which encoding was used to decode the bytes stream. """ +__author__ = 'Ka-Ping Yee ' +__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' + 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' + 'Michael Foord') +import builtins +import re import sys +from token import * +from codecs import lookup, BOM_UTF8 +import collections +from io import TextIOWrapper +cookie_re = re.compile("coding[:=]\s*([-\w.]+)") + +import token +__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding", + "NL", "untokenize", "ENCODING", "TokenInfo"] +del token + +__all__ += ["generate_tokens", "TokenError"] -if sys.version_info[0] >= 3: - from ._tokenize_py3 import * +COMMENT = N_TOKENS +tok_name[COMMENT] = 'COMMENT' +NL = N_TOKENS + 1 +tok_name[NL] = 'NL' +ENCODING = N_TOKENS + 2 +tok_name[ENCODING] = 'ENCODING' +N_TOKENS += 3 + +class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')): + def __repr__(self): + annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) + return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % + self._replace(type=annotated_type)) + +def group(*choices): return '(' + '|'.join(choices) + ')' +def any(*choices): return group(*choices) + '*' +def maybe(*choices): return group(*choices) + '?' + +# Note: we use unicode matching for names ("\w") but ascii matching for +# number literals. +Whitespace = r'[ \f\t]*' +Comment = r'#[^\r\n]*' +Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) +Name = r'\w+' + +Hexnumber = r'0[xX][0-9a-fA-F]+' +Binnumber = r'0[bB][01]+' +Octnumber = r'0[oO][0-7]+' +Decnumber = r'(?:0+|[1-9][0-9]*)' +Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) +Exponent = r'[eE][-+]?[0-9]+' +Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent) +Expfloat = r'[0-9]+' + Exponent +Floatnumber = group(Pointfloat, Expfloat) +Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]') +Number = group(Imagnumber, Floatnumber, Intnumber) + +if sys.version_info.minor >= 3: + StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?' else: - from ._tokenize_py2 import * + StringPrefix = r'(?:[bB]?[rR]?)?' + +# Tail end of ' string. +Single = r"[^'\\]*(?:\\.[^'\\]*)*'" +# Tail end of " string. +Double = r'[^"\\]*(?:\\.[^"\\]*)*"' +# Tail end of ''' string. +Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" +# Tail end of """ string. +Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' +Triple = group(StringPrefix + "'''", StringPrefix + '"""') +# Single-line ' or " string. +String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"') + +# Because of leftmost-then-longest match semantics, be sure to put the +# longest operators first (e.g., if = came before ==, == would get +# recognized as two instances of =). +Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", + r"//=?", r"->", + r"[+\-*/%&|^=<>]=?", + r"~") + +Bracket = '[][(){}]' +Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') +Funny = group(Operator, Bracket, Special) + +PlainToken = group(Number, Funny, String, Name) +Token = Ignore + PlainToken + +# First (or only) line of ' or " string. +ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + + group("'", r'\\\r?\n'), + StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + + group('"', r'\\\r?\n')) +PseudoExtras = group(r'\\\r?\n', Comment, Triple) +PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) + +def _compile(expr): + return re.compile(expr, re.UNICODE) + +tokenprog, pseudoprog, single3prog, double3prog = map( + _compile, (Token, PseudoToken, Single3, Double3)) +endprogs = {"'": _compile(Single), '"': _compile(Double), + "'''": single3prog, '"""': double3prog, + "r'''": single3prog, 'r"""': double3prog, + "b'''": single3prog, 'b"""': double3prog, + "R'''": single3prog, 'R"""': double3prog, + "B'''": single3prog, 'B"""': double3prog, + "br'''": single3prog, 'br"""': double3prog, + "bR'''": single3prog, 'bR"""': double3prog, + "Br'''": single3prog, 'Br"""': double3prog, + "BR'''": single3prog, 'BR"""': double3prog, + 'r': None, 'R': None, 'b': None, 'B': None} + +triple_quoted = {} +for t in ("'''", '"""', + "r'''", 'r"""', "R'''", 'R"""', + "b'''", 'b"""', "B'''", 'B"""', + "br'''", 'br"""', "Br'''", 'Br"""', + "bR'''", 'bR"""', "BR'''", 'BR"""'): + triple_quoted[t] = t +single_quoted = {} +for t in ("'", '"', + "r'", 'r"', "R'", 'R"', + "b'", 'b"', "B'", 'B"', + "br'", 'br"', "Br'", 'Br"', + "bR'", 'bR"', "BR'", 'BR"' ): + single_quoted[t] = t + +if sys.version_info.minor >= 3: + # Python 3.3 + for _prefix in ['rb', 'rB', 'Rb', 'RB', 'u', 'U']: + _t2 = _prefix+'"""' + endprogs[_t2] = double3prog + triple_quoted[_t2] = _t2 + _t1 = _prefix + "'''" + endprogs[_t1] = single3prog + triple_quoted[_t1] = _t1 + single_quoted[_prefix+'"'] = _prefix+'"' + single_quoted[_prefix+"'"] = _prefix+"'" + del _prefix, _t2, _t1 + endprogs['u'] = None + endprogs['U'] = None + +del _compile + +tabsize = 8 + +class TokenError(Exception): pass + +class StopTokenizing(Exception): pass + + +class Untokenizer: + + def __init__(self): + self.tokens = [] + self.prev_row = 1 + self.prev_col = 0 + self.encoding = 'utf-8' + + def add_whitespace(self, tok_type, start): + row, col = start + assert row >= self.prev_row + col_offset = col - self.prev_col + if col_offset > 0: + self.tokens.append(" " * col_offset) + elif row > self.prev_row and tok_type not in (NEWLINE, NL, ENDMARKER): + # Line was backslash-continued. + self.tokens.append(" ") + + def untokenize(self, tokens): + iterable = iter(tokens) + for t in iterable: + if len(t) == 2: + self.compat(t, iterable) + break + tok_type, token, start, end = t[:4] + if tok_type == ENCODING: + self.encoding = token + continue + self.add_whitespace(tok_type, start) + self.tokens.append(token) + self.prev_row, self.prev_col = end + if tok_type in (NEWLINE, NL): + self.prev_row += 1 + self.prev_col = 0 + return "".join(self.tokens) + + def compat(self, token, iterable): + # This import is here to avoid problems when the itertools + # module is not built yet and tokenize is imported. + from itertools import chain + startline = False + prevstring = False + indents = [] + toks_append = self.tokens.append + + for tok in chain([token], iterable): + toknum, tokval = tok[:2] + if toknum == ENCODING: + self.encoding = tokval + continue + + if toknum in (NAME, NUMBER): + tokval += ' ' + + # Insert a space between two consecutive strings + if toknum == STRING: + if prevstring: + tokval = ' ' + tokval + prevstring = True + else: + prevstring = False + + if toknum == INDENT: + indents.append(tokval) + continue + elif toknum == DEDENT: + indents.pop() + continue + elif toknum in (NEWLINE, NL): + startline = True + elif startline and indents: + toks_append(indents[-1]) + startline = False + toks_append(tokval) + + +def untokenize(tokens): + """ + Convert ``tokens`` (an iterable) back into Python source code. Return + a bytes object, encoded using the encoding specified by the last + ENCODING token in ``tokens``, or UTF-8 if no ENCODING token is found. + + The result is guaranteed to tokenize back to match the input so that + the conversion is lossless and round-trips are assured. The + guarantee applies only to the token type and token string as the + spacing between tokens (column positions) may change. + + :func:`untokenize` has two modes. If the input tokens are sequences + of length 2 (``type``, ``string``) then spaces are added as necessary to + preserve the round-trip property. + + If the input tokens are sequences of length 4 or more (``type``, + ``string``, ``start``, ``end``), as returned by :func:`tokenize`, then + spaces are added so that each token appears in the result at the + position indicated by ``start`` and ``end``, if possible. + """ + return Untokenizer().untokenize(tokens) + + +def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + +def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that should + be used to decode a Python source file. It requires one argment, readline, + in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are present, + but disagree, a SyntaxError will be raised. If the encoding cookie is an + invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, + 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be returned. + """ + bom_found = False + encoding = None + default = 'utf-8' + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' + + def find_cookie(line): + try: + # Decode as UTF-8. Either the line is an encoding declaration, + # in which case it should be pure ASCII, or it must be UTF-8 + # per default encoding. + line_string = line.decode('utf-8') + except UnicodeDecodeError: + raise SyntaxError("invalid or missing encoding declaration") + + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + raise SyntaxError("unknown encoding: " + encoding) + + if bom_found: + if encoding != 'utf-8': + # This behaviour mimics the Python interpreter + raise SyntaxError('encoding problem: utf-8') + encoding += '-sig' + return encoding + + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] + + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + + +def open(filename): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buffer = builtins.open(filename, 'rb') + encoding, lines = detect_encoding(buffer.readline) + buffer.seek(0) + text = TextIOWrapper(buffer, encoding, line_buffering=True) + text.mode = 'r' + return text + + +def tokenize(readline): + """ + The tokenize() generator requires one argment, readline, which + must be a callable object which provides the same interface as the + readline() method of built-in file objects. Each call to the function + should return one line of input as bytes. Alternately, readline + can be a callable function terminating with StopIteration: + readline = open(myfile, 'rb').__next__ # Example of alternate readline + + The generator produces 5-tuples with these members: the token type; the + token string; a 2-tuple (srow, scol) of ints specifying the row and + column where the token begins in the source; a 2-tuple (erow, ecol) of + ints specifying the row and column where the token ends in the source; + and the line on which the token was found. The line passed is the + logical line; continuation lines are included. + + The first token sequence will always be an ENCODING token + which tells you which encoding was used to decode the bytes stream. + """ + # This import is here to avoid problems when the itertools module is not + # built yet and tokenize is imported. + from itertools import chain, repeat + encoding, consumed = detect_encoding(readline) + rl_gen = iter(readline, b"") + empty = repeat(b"") + return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) + + +def _tokenize(readline, encoding): + lnum = parenlev = continued = 0 + numchars = '0123456789' + contstr, needcont = '', 0 + contline = None + indents = [0] + + if encoding is not None: + if encoding == "utf-8-sig": + # BOM will already have been stripped. + encoding = "utf-8" + yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '') + while True: # loop over lines in stream + try: + line = readline() + except StopIteration: + line = b'' + + if encoding is not None: + line = line.decode(encoding) + lnum += 1 + pos, max = 0, len(line) + + if contstr: # continued string + if not line: + raise TokenError("EOF in multi-line string", strstart) + endmatch = endprog.match(line) + if endmatch: + pos = end = endmatch.end(0) + yield TokenInfo(STRING, contstr + line[:end], + strstart, (lnum, end), contline + line) + contstr, needcont = '', 0 + contline = None + elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': + yield TokenInfo(ERRORTOKEN, contstr + line, + strstart, (lnum, len(line)), contline) + contstr = '' + contline = None + continue + else: + contstr = contstr + line + contline = contline + line + continue + + elif parenlev == 0 and not continued: # new statement + if not line: break + column = 0 + while pos < max: # measure leading whitespace + if line[pos] == ' ': + column += 1 + elif line[pos] == '\t': + column = (column//tabsize + 1)*tabsize + elif line[pos] == '\f': + column = 0 + else: + break + pos += 1 + if pos == max: + break + + if line[pos] in '#\r\n': # skip comments or blank lines + if line[pos] == '#': + comment_token = line[pos:].rstrip('\r\n') + nl_pos = pos + len(comment_token) + yield TokenInfo(COMMENT, comment_token, + (lnum, pos), (lnum, pos + len(comment_token)), line) + yield TokenInfo(NEWLINE, line[nl_pos:], + (lnum, nl_pos), (lnum, len(line)), line) + else: + yield TokenInfo(NEWLINE, line[pos:], + (lnum, pos), (lnum, len(line)), line) + continue + + if column > indents[-1]: # count indents or dedents + indents.append(column) + yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) + while column < indents[-1]: + if column not in indents: + raise IndentationError( + "unindent does not match any outer indentation level", + ("", lnum, pos, line)) + indents = indents[:-1] + yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line) + + else: # continued statement + if not line: + raise TokenError("EOF in multi-line statement", (lnum, 0)) + continued = 0 + + while pos < max: + pseudomatch = pseudoprog.match(line, pos) + if pseudomatch: # scan for tokens + start, end = pseudomatch.span(1) + spos, epos, pos = (lnum, start), (lnum, end), end + token, initial = line[start:end], line[start] + + if (initial in numchars or # ordinary number + (initial == '.' and token != '.' and token != '...')): + yield TokenInfo(NUMBER, token, spos, epos, line) + elif initial in '\r\n': + yield TokenInfo(NL if parenlev > 0 else NEWLINE, + token, spos, epos, line) + elif initial == '#': + assert not token.endswith("\n") + yield TokenInfo(COMMENT, token, spos, epos, line) + elif token in triple_quoted: + endprog = endprogs[token] + endmatch = endprog.match(line, pos) + if endmatch: # all on one line + pos = endmatch.end(0) + token = line[start:pos] + yield TokenInfo(STRING, token, spos, (lnum, pos), line) + else: + strstart = (lnum, start) # multiple lines + contstr = line[start:] + contline = line + break + elif initial in single_quoted or \ + token[:2] in single_quoted or \ + token[:3] in single_quoted: + if token[-1] == '\n': # continued string + strstart = (lnum, start) + endprog = (endprogs[initial] or endprogs[token[1]] or + endprogs[token[2]]) + contstr, needcont = line[start:], 1 + contline = line + break + else: # ordinary string + yield TokenInfo(STRING, token, spos, epos, line) + elif initial.isidentifier(): # ordinary name + yield TokenInfo(NAME, token, spos, epos, line) + elif initial == '\\': # continued stmt + continued = 1 + else: + if initial in '([{': + parenlev += 1 + elif initial in ')]}': + parenlev -= 1 + yield TokenInfo(OP, token, spos, epos, line) + else: + yield TokenInfo(ERRORTOKEN, line[pos], + (lnum, pos), (lnum, pos+1), line) + pos += 1 + + for indent in indents[1:]: # pop remaining indent levels + yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '') + yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '') + + +# An undocumented, backwards compatible, API for all the places in the standard +# library that expect to be able to use tokenize with strings +def generate_tokens(readline): + return _tokenize(readline, None) + +if __name__ == "__main__": + # Quick sanity check + s = b'''def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + ''' + for tok in tokenize(iter(s.splitlines()).__next__): + print(tok) diff --git a/IPython/utils/tokenutil.py b/IPython/utils/tokenutil.py index f52d3b7..e7fbc5d 100644 --- a/IPython/utils/tokenutil.py +++ b/IPython/utils/tokenutil.py @@ -3,7 +3,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import absolute_import, print_function from collections import namedtuple from io import StringIO diff --git a/IPython/utils/traitlets.py b/IPython/utils/traitlets.py index b4ff7a2..64b5937 100644 --- a/IPython/utils/traitlets.py +++ b/IPython/utils/traitlets.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import from warnings import warn diff --git a/docs/sphinxext/apigen.py b/docs/sphinxext/apigen.py index 2155ace..0b3c80d 100644 --- a/docs/sphinxext/apigen.py +++ b/docs/sphinxext/apigen.py @@ -17,7 +17,6 @@ NOTE: this is a modified version of a script originally shipped with the PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed project.""" -from __future__ import print_function # Stdlib imports import ast diff --git a/examples/Embedding/embed_class_long.py b/examples/Embedding/embed_class_long.py index cbe9acb..943fa8c 100755 --- a/examples/Embedding/embed_class_long.py +++ b/examples/Embedding/embed_class_long.py @@ -8,7 +8,6 @@ embedding which you can cut and paste in your code once you understand how things work. The code in this file is deliberately extra-verbose, meant for learning.""" -from __future__ import print_function # The basics to get you going: diff --git a/examples/IPython Kernel/example-demo.py b/examples/IPython Kernel/example-demo.py index fcdce76..89e3741 100644 --- a/examples/IPython Kernel/example-demo.py +++ b/examples/IPython Kernel/example-demo.py @@ -8,7 +8,6 @@ resuming execution later. This is a unicode test, åäö """ -from __future__ import print_function print('Hello, welcome to an interactive IPython demo.') print('Executing this block should require confirmation before proceeding,') diff --git a/scripts/ipython_win_post_install.py b/scripts/ipython_win_post_install.py index 42a8bc5..99e3952 100755 --- a/scripts/ipython_win_post_install.py +++ b/scripts/ipython_win_post_install.py @@ -5,7 +5,6 @@ http://docs.python.org/2/distutils/builtdist.html#the-postinstallation-script """ -from __future__ import print_function import os import sys diff --git a/setup.py b/setup.py index 8429aa9..4d418db 100755 --- a/setup.py +++ b/setup.py @@ -41,8 +41,6 @@ See IPython `README.rst` file for more information: print(error, file=sys.stderr) sys.exit(1) -PY3 = (sys.version_info[0] >= 3) - # At least we're on the python version we need, move on. #------------------------------------------------------------------------------- diff --git a/setupbase.py b/setupbase.py index dc94cb8..0fedf16 100644 --- a/setupbase.py +++ b/setupbase.py @@ -12,7 +12,6 @@ This includes: # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from __future__ import print_function import re import os diff --git a/tools/backport_pr.py b/tools/backport_pr.py index a8da3af..6eee816 100755 --- a/tools/backport_pr.py +++ b/tools/backport_pr.py @@ -26,7 +26,6 @@ of that repo. """ -from __future__ import print_function import os import re diff --git a/tools/check_sources.py b/tools/check_sources.py index dc0761e..333d2a7 100755 --- a/tools/check_sources.py +++ b/tools/check_sources.py @@ -8,7 +8,6 @@ Usage: It prints summaries and if chosen, line-by-line info of where \\t or \\r characters can be found in our source tree. """ -from __future__ import print_function # Config # If true, all lines that have tabs are printed, with line number diff --git a/tools/gen_latex_symbols.py b/tools/gen_latex_symbols.py index 00cd842..9a2421f 100644 --- a/tools/gen_latex_symbols.py +++ b/tools/gen_latex_symbols.py @@ -9,7 +9,6 @@ # # The original mapping of latex symbols to unicode comes from the `latex_symbols.jl` files from Julia. -from __future__ import print_function import os, sys if not sys.version_info[0] == 3: diff --git a/tools/gh_api.py b/tools/gh_api.py index a21506b..52c6621 100644 --- a/tools/gh_api.py +++ b/tools/gh_api.py @@ -1,5 +1,4 @@ """Functions for Github API requests.""" -from __future__ import print_function try: input = raw_input diff --git a/tools/git-mpr.py b/tools/git-mpr.py index 372455d..4d8423e 100755 --- a/tools/git-mpr.py +++ b/tools/git-mpr.py @@ -7,7 +7,6 @@ Usage: Type `git mpr -h` for details. """ -from __future__ import print_function import io, os import argparse diff --git a/tools/github_stats.py b/tools/github_stats.py index a333dd2..bbacc0a 100755 --- a/tools/github_stats.py +++ b/tools/github_stats.py @@ -9,7 +9,6 @@ To generate a report for IPython 2.0, run: # Imports #----------------------------------------------------------------------------- -from __future__ import print_function import codecs import sys @@ -114,9 +113,6 @@ if __name__ == "__main__": print("DEPRECATE: backport_pr.py is deprecated and is is now recommended" "to install `ghpro` from PyPI.", file=sys.stderr) - # deal with unicode - if sys.version_info < (3,): - sys.stdout = codecs.getwriter('utf8')(sys.stdout) # Whether to add reST urls for all issues in printout. show_urls = True diff --git a/tools/test_pr.py b/tools/test_pr.py index a0785f5..9739f47 100755 --- a/tools/test_pr.py +++ b/tools/test_pr.py @@ -7,7 +7,6 @@ Python, and posts the results to Gist if any tests fail. Usage: python test_pr.py 1657 """ -from __future__ import print_function import errno from glob import glob diff --git a/tools/tests/embed/embed_division.py b/tools/tests/embed/embed_division.py deleted file mode 100644 index 373846a..0000000 --- a/tools/tests/embed/embed_division.py +++ /dev/null @@ -1,5 +0,0 @@ -"""This tests that future compiler flags are passed to the embedded IPython.""" -from __future__ import division -from IPython import embed -embed(banner1='', header='check 1/2 == 0.5 in Python 2') -embed(banner1='', header='check 1/2 = 0 in Python 2', compile_flags=0) diff --git a/tools/tests/embed/embed_flufl.py b/tools/tests/embed/embed_flufl.py new file mode 100644 index 0000000..76682fa --- /dev/null +++ b/tools/tests/embed/embed_flufl.py @@ -0,0 +1,5 @@ +"""This tests that future compiler flags are passed to the embedded IPython.""" +from __future__ import barry_as_FLUFL +from IPython import embed +embed(banner1='', header='check 1 <> 2 == True') +embed(banner1='', header='check 1 <> 2 cause SyntaxError', compile_flags=0) diff --git a/tools/tests/embed/embed_no_division.py b/tools/tests/embed/embed_no_flufl.py similarity index 50% rename from tools/tests/embed/embed_no_division.py rename to tools/tests/embed/embed_no_flufl.py index cdfd804..a65061c 100644 --- a/tools/tests/embed/embed_no_division.py +++ b/tools/tests/embed/embed_no_flufl.py @@ -1,6 +1,6 @@ """This tests that future compiler flags are passed to the embedded IPython.""" from IPython import embed import __future__ -embed(banner1='', header='check 1/2 == 0 in Python 2') -embed(banner1='', header='check 1/2 == 0.5 in Python 2', - compile_flags=__future__.division.compiler_flag) +embed(banner1='', header='check 1 <> 2 cause SyntaxError') +embed(banner1='', header='check 1 <> 2 == True', + compile_flags=__future__.barry_as_FLUFL.compiler_flag) diff --git a/tools/toollib.py b/tools/toollib.py index bc0ed61..f5a117d 100644 --- a/tools/toollib.py +++ b/tools/toollib.py @@ -1,6 +1,5 @@ """Various utilities common to IPython release and maintenance tools. """ -from __future__ import print_function # Library imports import os