diff --git a/IPython/config/application.py b/IPython/config/application.py index 264d379..3b848c3 100644 --- a/IPython/config/application.py +++ b/IPython/config/application.py @@ -14,7 +14,7 @@ import sys from copy import deepcopy from collections import defaultdict -from IPython.external.decorator import decorator +from decorator import decorator from IPython.config.configurable import SingletonConfigurable from IPython.config.loader import ( diff --git a/IPython/core/formatters.py b/IPython/core/formatters.py index f5126c9..3c622cf 100644 --- a/IPython/core/formatters.py +++ b/IPython/core/formatters.py @@ -17,7 +17,7 @@ import sys import traceback import warnings -from IPython.external.decorator import decorator +from decorator import decorator from IPython.config.configurable import Configurable from IPython.core.getipython import get_ipython diff --git a/IPython/core/history.py b/IPython/core/history.py index c615ad8..7c1076c 100644 --- a/IPython/core/history.py +++ b/IPython/core/history.py @@ -28,7 +28,7 @@ import threading # Our own packages from IPython.config.configurable import Configurable -from IPython.external.decorator import decorator +from decorator import decorator from IPython.utils.decorators import undoc from IPython.utils.path import locate_profile from IPython.utils import py3compat diff --git a/IPython/core/interactiveshell.py b/IPython/core/interactiveshell.py index 13d1cb5..0c2d4df 100644 --- a/IPython/core/interactiveshell.py +++ b/IPython/core/interactiveshell.py @@ -27,6 +27,8 @@ import types import subprocess from io import open as io_open +from pickleshare import PickleShareDB + from IPython.config.configurable import SingletonConfigurable from IPython.core import debugger, oinspect from IPython.core import magic @@ -63,7 +65,6 @@ from IPython.utils.decorators import undoc from IPython.utils.io import ask_yes_no from IPython.utils.ipstruct import Struct from IPython.utils.path import get_home_dir, get_ipython_dir, get_py_filename, unquote_filename, ensure_dir_exists -from IPython.utils.pickleshare import PickleShareDB from IPython.utils.process import system, getoutput from IPython.utils.py3compat import (builtin_mod, unicode_type, string_types, with_metaclass, iteritems) diff --git a/IPython/core/magic.py b/IPython/core/magic.py index f085717..7d8b981 100644 --- a/IPython/core/magic.py +++ b/IPython/core/magic.py @@ -27,7 +27,7 @@ from IPython.config.configurable import Configurable from IPython.core import oinspect from IPython.core.error import UsageError from IPython.core.inputsplitter import ESC_MAGIC, ESC_MAGIC2 -from IPython.external.decorator import decorator +from decorator import decorator from IPython.utils.ipstruct import Struct from IPython.utils.process import arg_split from IPython.utils.py3compat import string_types, iteritems diff --git a/IPython/core/tests/test_oinspect.py b/IPython/core/tests/test_oinspect.py index d8f1dd3..e0b2905 100644 --- a/IPython/core/tests/test_oinspect.py +++ b/IPython/core/tests/test_oinspect.py @@ -26,7 +26,7 @@ from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic, register_line_magic, register_cell_magic, register_line_cell_magic) -from IPython.external.decorator import decorator +from decorator import decorator from IPython.testing.decorators import skipif from IPython.testing.tools import AssertPrints from IPython.utils.path import compress_user diff --git a/IPython/extensions/rmagic.py b/IPython/extensions/rmagic.py index d0501a1..6e052ad 100644 --- a/IPython/extensions/rmagic.py +++ b/IPython/extensions/rmagic.py @@ -73,7 +73,7 @@ from IPython.testing.skipdoctest import skip_doctest from IPython.core.magic_arguments import ( argument, magic_arguments, parse_argstring ) -from IPython.external.simplegeneric import generic +from simplegeneric import generic from IPython.utils.py3compat import (str_to_unicode, unicode_to_str, PY3, unicode_type) from IPython.utils.text import dedent diff --git a/IPython/external/appnope/__init__.py b/IPython/external/appnope/__init__.py deleted file mode 100644 index e1f1e68..0000000 --- a/IPython/external/appnope/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ - -try: - from appnope import * -except ImportError: - __version__ = '0.0.5' - import sys - import platform - from distutils.version import LooseVersion as V - - if sys.platform != "darwin" or V(platform.mac_ver()[0]) < V("10.9"): - from ._dummy import * - else: - from ._nope import * - - del sys, platform, V diff --git a/IPython/external/appnope/_dummy.py b/IPython/external/appnope/_dummy.py deleted file mode 100644 index a55ec5b..0000000 --- a/IPython/external/appnope/_dummy.py +++ /dev/null @@ -1,30 +0,0 @@ -#----------------------------------------------------------------------------- -# Copyright (C) 2013 Min RK -# -# Distributed under the terms of the 2-clause BSD License. -#----------------------------------------------------------------------------- - -from contextlib import contextmanager - -def beginActivityWithOptions(options, reason=""): - return - -def endActivity(activity): - return - -def nope(): - return - -def nap(): - return - - -@contextmanager -def nope_scope( - options=0, - reason="Because Reasons" - ): - yield - -def napping_allowed(): - return True \ No newline at end of file diff --git a/IPython/external/appnope/_nope.py b/IPython/external/appnope/_nope.py deleted file mode 100644 index 70a3493..0000000 --- a/IPython/external/appnope/_nope.py +++ /dev/null @@ -1,126 +0,0 @@ -#----------------------------------------------------------------------------- -# Copyright (C) 2013 Min RK -# -# Distributed under the terms of the 2-clause BSD License. -#----------------------------------------------------------------------------- - -from contextlib import contextmanager - -import ctypes -import ctypes.util - -objc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('objc')) - -void_p = ctypes.c_void_p -ull = ctypes.c_uint64 - -objc.objc_getClass.restype = void_p -objc.sel_registerName.restype = void_p -objc.objc_msgSend.restype = void_p -objc.objc_msgSend.argtypes = [void_p, void_p] - -msg = objc.objc_msgSend - -def _utf8(s): - """ensure utf8 bytes""" - if not isinstance(s, bytes): - s = s.encode('utf8') - return s - -def n(name): - """create a selector name (for methods)""" - return objc.sel_registerName(_utf8(name)) - -def C(classname): - """get an ObjC Class by name""" - return objc.objc_getClass(_utf8(classname)) - -# constants from Foundation - -NSActivityIdleDisplaySleepDisabled = (1 << 40) -NSActivityIdleSystemSleepDisabled = (1 << 20) -NSActivitySuddenTerminationDisabled = (1 << 14) -NSActivityAutomaticTerminationDisabled = (1 << 15) -NSActivityUserInitiated = (0x00FFFFFF | NSActivityIdleSystemSleepDisabled) -NSActivityUserInitiatedAllowingIdleSystemSleep = (NSActivityUserInitiated & ~NSActivityIdleSystemSleepDisabled) -NSActivityBackground = 0x000000FF -NSActivityLatencyCritical = 0xFF00000000 - -def beginActivityWithOptions(options, reason=""): - """Wrapper for: - - [ [ NSProcessInfo processInfo] - beginActivityWithOptions: (uint64)options - reason: (str)reason - ] - """ - NSProcessInfo = C('NSProcessInfo') - NSString = C('NSString') - - reason = msg(NSString, n("stringWithUTF8String:"), _utf8(reason)) - info = msg(NSProcessInfo, n('processInfo')) - activity = msg(info, - n('beginActivityWithOptions:reason:'), - ull(options), - void_p(reason) - ) - return activity - -def endActivity(activity): - """end a process activity assertion""" - NSProcessInfo = C('NSProcessInfo') - info = msg(NSProcessInfo, n('processInfo')) - msg(info, n("endActivity:"), void_p(activity)) - -_theactivity = None - -def nope(): - """disable App Nap by setting NSActivityUserInitiatedAllowingIdleSystemSleep""" - global _theactivity - _theactivity = beginActivityWithOptions( - NSActivityUserInitiatedAllowingIdleSystemSleep, - "Because Reasons" - ) - -def nap(): - """end the caffeinated state started by `nope`""" - global _theactivity - if _theactivity is not None: - endActivity(_theactivity) - _theactivity = None - -def napping_allowed(): - """is napping allowed?""" - return _theactivity is None - -@contextmanager -def nope_scope( - options=NSActivityUserInitiatedAllowingIdleSystemSleep, - reason="Because Reasons" - ): - """context manager for beginActivityWithOptions. - - Within this context, App Nap will be disabled. - """ - activity = beginActivityWithOptions(options, reason) - try: - yield - finally: - endActivity(activity) - -__all__ = [ - "NSActivityIdleDisplaySleepDisabled", - "NSActivityIdleSystemSleepDisabled", - "NSActivitySuddenTerminationDisabled", - "NSActivityAutomaticTerminationDisabled", - "NSActivityUserInitiated", - "NSActivityUserInitiatedAllowingIdleSystemSleep", - "NSActivityBackground", - "NSActivityLatencyCritical", - "beginActivityWithOptions", - "endActivity", - "nope", - "nap", - "napping_allowed", - "nope_scope", -] diff --git a/IPython/external/decorator/__init__.py b/IPython/external/decorator/__init__.py deleted file mode 100644 index d0e95b4..0000000 --- a/IPython/external/decorator/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -try: - from decorator import * -except ImportError: - from ._decorator import * diff --git a/IPython/external/decorator/_decorator.py b/IPython/external/decorator/_decorator.py deleted file mode 100644 index ef56922..0000000 --- a/IPython/external/decorator/_decorator.py +++ /dev/null @@ -1,229 +0,0 @@ -########################## LICENCE ############################### - -# Copyright (c) 2005-2012, Michele Simionato -# All rights reserved. - -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: - -# Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# Redistributions in bytecode form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS -# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -# DAMAGE. - -""" -Decorator module, see http://pypi.python.org/pypi/decorator -for the documentation. - -NOTE: this is an IPython-patched version to work on IronPython. See - FIXED comment below. -""" -from __future__ import print_function - -__version__ = '3.3.3' - -__all__ = ["decorator", "FunctionMaker", "partial"] - -import sys, re, inspect - -try: - from functools import partial -except ImportError: # for Python version < 2.5 - class partial(object): - "A simple replacement of functools.partial" - def __init__(self, func, *args, **kw): - self.func = func - self.args = args - self.keywords = kw - def __call__(self, *otherargs, **otherkw): - kw = self.keywords.copy() - kw.update(otherkw) - return self.func(*(self.args + otherargs), **kw) - -if sys.version >= '3': - from inspect import getfullargspec -else: - class getfullargspec(object): - "A quick and dirty replacement for getfullargspec for Python 2.X" - def __init__(self, f): - self.args, self.varargs, self.varkw, self.defaults = \ - inspect.getargspec(f) - self.kwonlyargs = [] - self.kwonlydefaults = None - def __iter__(self): - yield self.args - yield self.varargs - yield self.varkw - yield self.defaults - -DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(') - -# basic functionality -class FunctionMaker(object): - """ - An object with the ability to create functions with a given signature. - It has attributes name, doc, module, signature, defaults, dict and - methods update and make. - """ - def __init__(self, func=None, name=None, signature=None, - defaults=None, doc=None, module=None, funcdict=None): - self.shortsignature = signature - if func: - # func can be a class or a callable, but not an instance method - self.name = func.__name__ - if self.name == '': # small hack for lambda functions - self.name = '_lambda_' - self.doc = func.__doc__ - self.module = func.__module__ - if inspect.isfunction(func): - argspec = getfullargspec(func) - self.annotations = getattr(func, '__annotations__', {}) - for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', - 'kwonlydefaults'): - setattr(self, a, getattr(argspec, a)) - for i, arg in enumerate(self.args): - setattr(self, 'arg%d' % i, arg) - if sys.version < '3': # easy way - self.shortsignature = self.signature = \ - inspect.formatargspec( - formatvalue=lambda val: "", *argspec)[1:-1] - else: # Python 3 way - self.signature = self.shortsignature = ', '.join(self.args) - if self.varargs: - self.signature += ', *' + self.varargs - self.shortsignature += ', *' + self.varargs - if self.kwonlyargs: - for a in self.kwonlyargs: - self.signature += ', %s=None' % a - self.shortsignature += ', %s=%s' % (a, a) - if self.varkw: - self.signature += ', **' + self.varkw - self.shortsignature += ', **' + self.varkw - self.dict = func.__dict__.copy() - # func=None happens when decorating a caller - if name: - self.name = name - if signature is not None: - self.signature = signature - if defaults: - self.defaults = defaults - if doc: - self.doc = doc - if module: - self.module = module - if funcdict: - self.dict = funcdict - # check existence required attributes - assert hasattr(self, 'name') - if not hasattr(self, 'signature'): - raise TypeError('You are decorating a non function: %s' % func) - - def update(self, func, **kw): - "Update the signature of func with the data in self" - func.__name__ = self.name - func.__doc__ = getattr(self, 'doc', None) - func.__dict__ = getattr(self, 'dict', {}) - func.__defaults__ = getattr(self, 'defaults', ()) - func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) - func.__annotations__ = getattr(self, 'annotations', None) - # FIXED: The following is try/excepted in IPython to work - # with IronPython. - try: - callermodule = sys._getframe(3).f_globals.get('__name__', '?') - except AttributeError: # IronPython _getframe only exists with FullFrames - callermodule = '?' - func.__module__ = getattr(self, 'module', callermodule) - func.__dict__.update(kw) - - def make(self, src_templ, evaldict=None, addsource=False, **attrs): - "Make a new function from a given template and update the signature" - src = src_templ % vars(self) # expand name and signature - evaldict = evaldict or {} - mo = DEF.match(src) - if mo is None: - raise SyntaxError('not a valid function template\n%s' % src) - name = mo.group(1) # extract the function name - names = set([name] + [arg.strip(' *') for arg in - self.shortsignature.split(',')]) - for n in names: - if n in ('_func_', '_call_'): - raise NameError('%s is overridden in\n%s' % (n, src)) - if not src.endswith('\n'): # add a newline just for safety - src += '\n' # this is needed in old versions of Python - try: - code = compile(src, '', 'single') - # print >> sys.stderr, 'Compiling %s' % src - exec(code, evaldict) - except: - print('Error in generated code:', file=sys.stderr) - print(src, file=sys.stderr) - raise - func = evaldict[name] - if addsource: - attrs['__source__'] = src - self.update(func, **attrs) - return func - - @classmethod - def create(cls, obj, body, evaldict, defaults=None, - doc=None, module=None, addsource=True, **attrs): - """ - Create a function from the strings name, signature and body. - evaldict is the evaluation dictionary. If addsource is true an attribute - __source__ is added to the result. The attributes attrs are added, - if any. - """ - if isinstance(obj, str): # "name(signature)" - name, rest = obj.strip().split('(', 1) - signature = rest[:-1] #strip a right parens - func = None - else: # a function - name = None - signature = None - func = obj - self = cls(func, name, signature, defaults, doc, module) - ibody = '\n'.join(' ' + line for line in body.splitlines()) - return self.make('def %(name)s(%(signature)s):\n' + ibody, - evaldict, addsource, **attrs) - -def decorator(caller, func=None): - """ - decorator(caller) converts a caller function into a decorator; - decorator(caller, func) decorates a function using a caller. - """ - if func is not None: # returns a decorated function - evaldict = func.__globals__.copy() - evaldict['_call_'] = caller - evaldict['_func_'] = func - return FunctionMaker.create( - func, "return _call_(_func_, %(shortsignature)s)", - evaldict, undecorated=func, __wrapped__=func) - else: # returns a decorator - if isinstance(caller, partial): - return partial(decorator, caller) - # otherwise assume caller is a function - first = inspect.getargspec(caller)[0][0] # first arg - evaldict = caller.__globals__.copy() - evaldict['_call_'] = caller - evaldict['decorator'] = decorator - return FunctionMaker.create( - '%s(%s)' % (caller.__name__, first), - 'return decorator(_call_, %s)' % first, - evaldict, undecorated=caller, __wrapped__=caller, - doc=caller.__doc__, module=caller.__module__) diff --git a/IPython/external/path/__init__.py b/IPython/external/path/__init__.py deleted file mode 100644 index 391a808..0000000 --- a/IPython/external/path/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -try: - from path import * -except ImportError: - from ._path import * diff --git a/IPython/external/path/_path.py b/IPython/external/path/_path.py deleted file mode 100644 index 3591850..0000000 --- a/IPython/external/path/_path.py +++ /dev/null @@ -1,1267 +0,0 @@ -# -# Copyright (c) 2010 Mikhail Gusarov -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# - -""" path.py - An object representing a path to a file or directory. - -Original author: - Jason Orendorff - -Current maintainer: - Jason R. Coombs - -Contributors: - Mikhail Gusarov - Marc Abramowitz - Jason R. Coombs - Jason Chu - Vojislav Stojkovic - -Example:: - - from path import path - d = path('/home/guido/bin') - for f in d.files('*.py'): - f.chmod(0755) - -path.py requires Python 2.5 or later. -""" - -from __future__ import with_statement - -import sys -import warnings -import os -import fnmatch -import glob -import shutil -import codecs -import hashlib -import errno -import tempfile -import functools -import operator -import re - -try: - import win32security -except ImportError: - pass - -try: - import pwd -except ImportError: - pass - -################################ -# Monkey patchy python 3 support -try: - basestring -except NameError: - basestring = str - -try: - unicode -except NameError: - unicode = str - -try: - getcwdu = os.getcwdu -except AttributeError: - getcwdu = os.getcwd - -if sys.version < '3': - def u(x): - return codecs.unicode_escape_decode(x)[0] -else: - def u(x): - return x - -o777 = 511 -o766 = 502 -o666 = 438 -o554 = 364 -################################ - -__version__ = '4.3' -__all__ = ['path'] - - -class TreeWalkWarning(Warning): - pass - - -def simple_cache(func): - """ - Save results for the 'using_module' classmethod. - When Python 3.2 is available, use functools.lru_cache instead. - """ - saved_results = {} - - def wrapper(cls, module): - if module in saved_results: - return saved_results[module] - saved_results[module] = func(cls, module) - return saved_results[module] - return wrapper - - -class ClassProperty(property): - def __get__(self, cls, owner): - return self.fget.__get__(None, owner)() - - -class multimethod(object): - """ - Acts like a classmethod when invoked from the class and like an - instancemethod when invoked from the instance. - """ - def __init__(self, func): - self.func = func - - def __get__(self, instance, owner): - return ( - functools.partial(self.func, owner) if instance is None - else functools.partial(self.func, owner, instance) - ) - - -class path(unicode): - """ Represents a filesystem path. - - For documentation on individual methods, consult their - counterparts in os.path. - """ - - module = os.path - "The path module to use for path operations." - - def __init__(self, other=''): - if other is None: - raise TypeError("Invalid initial value for path: None") - - @classmethod - @simple_cache - def using_module(cls, module): - subclass_name = cls.__name__ + '_' + module.__name__ - bases = (cls,) - ns = {'module': module} - return type(subclass_name, bases, ns) - - @ClassProperty - @classmethod - def _next_class(cls): - """ - What class should be used to construct new instances from this class - """ - return cls - - # --- Special Python methods. - - def __repr__(self): - return '%s(%s)' % (type(self).__name__, super(path, self).__repr__()) - - # Adding a path and a string yields a path. - def __add__(self, more): - try: - return self._next_class(super(path, self).__add__(more)) - except TypeError: # Python bug - return NotImplemented - - def __radd__(self, other): - if not isinstance(other, basestring): - return NotImplemented - return self._next_class(other.__add__(self)) - - # The / operator joins paths. - def __div__(self, rel): - """ fp.__div__(rel) == fp / rel == fp.joinpath(rel) - - Join two path components, adding a separator character if - needed. - """ - return self._next_class(self.module.join(self, rel)) - - # Make the / operator work even when true division is enabled. - __truediv__ = __div__ - - def __enter__(self): - self._old_dir = self.getcwd() - os.chdir(self) - return self - - def __exit__(self, *_): - os.chdir(self._old_dir) - - @classmethod - def getcwd(cls): - """ Return the current working directory as a path object. """ - return cls(getcwdu()) - - # - # --- Operations on path strings. - - def abspath(self): - return self._next_class(self.module.abspath(self)) - - def normcase(self): - return self._next_class(self.module.normcase(self)) - - def normpath(self): - return self._next_class(self.module.normpath(self)) - - def realpath(self): - return self._next_class(self.module.realpath(self)) - - def expanduser(self): - return self._next_class(self.module.expanduser(self)) - - def expandvars(self): - return self._next_class(self.module.expandvars(self)) - - def dirname(self): - return self._next_class(self.module.dirname(self)) - - def basename(self): - return self._next_class(self.module.basename(self)) - - def expand(self): - """ Clean up a filename by calling expandvars(), - expanduser(), and normpath() on it. - - This is commonly everything needed to clean up a filename - read from a configuration file, for example. - """ - return self.expandvars().expanduser().normpath() - - @property - def namebase(self): - """ The same as path.name, but with one file extension stripped off. - - For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz', - but path('/home/guido/python.tar.gz').namebase == 'python.tar' - """ - base, ext = self.module.splitext(self.name) - return base - - @property - def ext(self): - """ The file extension, for example '.py'. """ - f, ext = self.module.splitext(self) - return ext - - @property - def drive(self): - """ The drive specifier, for example 'C:'. - This is always empty on systems that don't use drive specifiers. - """ - drive, r = self.module.splitdrive(self) - return self._next_class(drive) - - parent = property( - dirname, None, None, - """ This path's parent directory, as a new path object. - - For example, - path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib') - """) - - name = property( - basename, None, None, - """ The name of this file or directory without the full path. - - For example, path('/usr/local/lib/libpython.so').name == 'libpython.so' - """) - - def splitpath(self): - """ p.splitpath() -> Return (p.parent, p.name). """ - parent, child = self.module.split(self) - return self._next_class(parent), child - - def splitdrive(self): - """ p.splitdrive() -> Return (p.drive, ). - - Split the drive specifier from this path. If there is - no drive specifier, p.drive is empty, so the return value - is simply (path(''), p). This is always the case on Unix. - """ - drive, rel = self.module.splitdrive(self) - return self._next_class(drive), rel - - def splitext(self): - """ p.splitext() -> Return (p.stripext(), p.ext). - - Split the filename extension from this path and return - the two parts. Either part may be empty. - - The extension is everything from '.' to the end of the - last path segment. This has the property that if - (a, b) == p.splitext(), then a + b == p. - """ - filename, ext = self.module.splitext(self) - return self._next_class(filename), ext - - def stripext(self): - """ p.stripext() -> Remove one file extension from the path. - - For example, path('/home/guido/python.tar.gz').stripext() - returns path('/home/guido/python.tar'). - """ - return self.splitext()[0] - - def splitunc(self): - unc, rest = self.module.splitunc(self) - return self._next_class(unc), rest - - @property - def uncshare(self): - """ - The UNC mount point for this path. - This is empty for paths on local drives. - """ - unc, r = self.module.splitunc(self) - return self._next_class(unc) - - @multimethod - def joinpath(cls, first, *others): - """ - Join first to zero or more path components, adding a separator - character (first.module.sep) if needed. Returns a new instance of - first._next_class. - """ - if not isinstance(first, cls): - first = cls(first) - return first._next_class(first.module.join(first, *others)) - - def splitall(self): - r""" Return a list of the path components in this path. - - The first item in the list will be a path. Its value will be - either os.curdir, os.pardir, empty, or the root directory of - this path (for example, ``'/'`` or ``'C:\\'``). The other items in - the list will be strings. - - ``path.path.joinpath(*result)`` will yield the original path. - """ - parts = [] - loc = self - while loc != os.curdir and loc != os.pardir: - prev = loc - loc, child = prev.splitpath() - if loc == prev: - break - parts.append(child) - parts.append(loc) - parts.reverse() - return parts - - def relpath(self, start='.'): - """ Return this path as a relative path, - based from start, which defaults to the current working directory. - """ - cwd = self._next_class(start) - return cwd.relpathto(self) - - def relpathto(self, dest): - """ Return a relative path from self to dest. - - If there is no relative path from self to dest, for example if - they reside on different drives in Windows, then this returns - dest.abspath(). - """ - origin = self.abspath() - dest = self._next_class(dest).abspath() - - orig_list = origin.normcase().splitall() - # Don't normcase dest! We want to preserve the case. - dest_list = dest.splitall() - - if orig_list[0] != self.module.normcase(dest_list[0]): - # Can't get here from there. - return dest - - # Find the location where the two paths start to differ. - i = 0 - for start_seg, dest_seg in zip(orig_list, dest_list): - if start_seg != self.module.normcase(dest_seg): - break - i += 1 - - # Now i is the point where the two paths diverge. - # Need a certain number of "os.pardir"s to work up - # from the origin to the point of divergence. - segments = [os.pardir] * (len(orig_list) - i) - # Need to add the diverging part of dest_list. - segments += dest_list[i:] - if len(segments) == 0: - # If they happen to be identical, use os.curdir. - relpath = os.curdir - else: - relpath = self.module.join(*segments) - return self._next_class(relpath) - - # --- Listing, searching, walking, and matching - - def listdir(self, pattern=None): - """ D.listdir() -> List of items in this directory. - - Use D.files() or D.dirs() instead if you want a listing - of just files or just subdirectories. - - The elements of the list are path objects. - - With the optional 'pattern' argument, this only lists - items whose names match the given pattern. - """ - names = os.listdir(self) - if pattern is not None: - names = fnmatch.filter(names, pattern) - return [self / child for child in names] - - def dirs(self, pattern=None): - """ D.dirs() -> List of this directory's subdirectories. - - The elements of the list are path objects. - This does not walk recursively into subdirectories - (but see path.walkdirs). - - With the optional 'pattern' argument, this only lists - directories whose names match the given pattern. For - example, ``d.dirs('build-*')``. - """ - return [p for p in self.listdir(pattern) if p.isdir()] - - def files(self, pattern=None): - """ D.files() -> List of the files in this directory. - - The elements of the list are path objects. - This does not walk into subdirectories (see path.walkfiles). - - With the optional 'pattern' argument, this only lists files - whose names match the given pattern. For example, - ``d.files('*.pyc')``. - """ - - return [p for p in self.listdir(pattern) if p.isfile()] - - def walk(self, pattern=None, errors='strict'): - """ D.walk() -> iterator over files and subdirs, recursively. - - The iterator yields path objects naming each child item of - this directory and its descendants. This requires that - D.isdir(). - - This performs a depth-first traversal of the directory tree. - Each directory is returned just before all its children. - - The errors= keyword argument controls behavior when an - error occurs. The default is 'strict', which causes an - exception. The other allowed values are 'warn', which - reports the error via warnings.warn(), and 'ignore'. - """ - if errors not in ('strict', 'warn', 'ignore'): - raise ValueError("invalid errors parameter") - - try: - childList = self.listdir() - except Exception: - if errors == 'ignore': - return - elif errors == 'warn': - warnings.warn( - "Unable to list directory '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - return - else: - raise - - for child in childList: - if pattern is None or child.fnmatch(pattern): - yield child - try: - isdir = child.isdir() - except Exception: - if errors == 'ignore': - isdir = False - elif errors == 'warn': - warnings.warn( - "Unable to access '%s': %s" - % (child, sys.exc_info()[1]), - TreeWalkWarning) - isdir = False - else: - raise - - if isdir: - for item in child.walk(pattern, errors): - yield item - - def walkdirs(self, pattern=None, errors='strict'): - """ D.walkdirs() -> iterator over subdirs, recursively. - - With the optional 'pattern' argument, this yields only - directories whose names match the given pattern. For - example, ``mydir.walkdirs('*test')`` yields only directories - with names ending in 'test'. - - The errors= keyword argument controls behavior when an - error occurs. The default is 'strict', which causes an - exception. The other allowed values are 'warn', which - reports the error via warnings.warn(), and 'ignore'. - """ - if errors not in ('strict', 'warn', 'ignore'): - raise ValueError("invalid errors parameter") - - try: - dirs = self.dirs() - except Exception: - if errors == 'ignore': - return - elif errors == 'warn': - warnings.warn( - "Unable to list directory '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - return - else: - raise - - for child in dirs: - if pattern is None or child.fnmatch(pattern): - yield child - for subsubdir in child.walkdirs(pattern, errors): - yield subsubdir - - def walkfiles(self, pattern=None, errors='strict'): - """ D.walkfiles() -> iterator over files in D, recursively. - - The optional argument, pattern, limits the results to files - with names that match the pattern. For example, - ``mydir.walkfiles('*.tmp')`` yields only files with the .tmp - extension. - """ - if errors not in ('strict', 'warn', 'ignore'): - raise ValueError("invalid errors parameter") - - try: - childList = self.listdir() - except Exception: - if errors == 'ignore': - return - elif errors == 'warn': - warnings.warn( - "Unable to list directory '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - return - else: - raise - - for child in childList: - try: - isfile = child.isfile() - isdir = not isfile and child.isdir() - except: - if errors == 'ignore': - continue - elif errors == 'warn': - warnings.warn( - "Unable to access '%s': %s" - % (self, sys.exc_info()[1]), - TreeWalkWarning) - continue - else: - raise - - if isfile: - if pattern is None or child.fnmatch(pattern): - yield child - elif isdir: - for f in child.walkfiles(pattern, errors): - yield f - - def fnmatch(self, pattern): - """ Return True if self.name matches the given pattern. - - pattern - A filename pattern with wildcards, - for example ``'*.py'``. - """ - return fnmatch.fnmatch(self.name, pattern) - - def glob(self, pattern): - """ Return a list of path objects that match the pattern. - - pattern - a path relative to this directory, with wildcards. - - For example, path('/users').glob('*/bin/*') returns a list - of all the files users have in their bin directories. - """ - cls = self._next_class - return [cls(s) for s in glob.glob(self / pattern)] - - # - # --- Reading or writing an entire file at once. - - def open(self, *args, **kwargs): - """ Open this file. Return a file object. """ - return open(self, *args, **kwargs) - - def bytes(self): - """ Open this file, read all bytes, return them as a string. """ - with self.open('rb') as f: - return f.read() - - def chunks(self, size, *args, **kwargs): - """ Returns a generator yielding chunks of the file, so it can - be read piece by piece with a simple for loop. - - Any argument you pass after `size` will be passed to `open()`. - - :example: - - >>> for chunk in path("file.txt").chunk(8192): - ... print(chunk) - - This will read the file by chunks of 8192 bytes. - """ - with open(self, *args, **kwargs) as f: - while True: - d = f.read(size) - if not d: - break - yield d - - def write_bytes(self, bytes, append=False): - """ Open this file and write the given bytes to it. - - Default behavior is to overwrite any existing file. - Call p.write_bytes(bytes, append=True) to append instead. - """ - if append: - mode = 'ab' - else: - mode = 'wb' - with self.open(mode) as f: - f.write(bytes) - - def text(self, encoding=None, errors='strict'): - r""" Open this file, read it in, return the content as a string. - - This method uses 'U' mode, so '\r\n' and '\r' are automatically - translated to '\n'. - - Optional arguments: - - encoding - The Unicode encoding (or character set) of - the file. If present, the content of the file is - decoded and returned as a unicode object; otherwise - it is returned as an 8-bit str. - errors - How to handle Unicode errors; see help(str.decode) - for the options. Default is 'strict'. - """ - if encoding is None: - # 8-bit - with self.open('U') as f: - return f.read() - else: - # Unicode - with codecs.open(self, 'r', encoding, errors) as f: - # (Note - Can't use 'U' mode here, since codecs.open - # doesn't support 'U' mode.) - t = f.read() - return (t.replace(u('\r\n'), u('\n')) - .replace(u('\r\x85'), u('\n')) - .replace(u('\r'), u('\n')) - .replace(u('\x85'), u('\n')) - .replace(u('\u2028'), u('\n'))) - - def write_text(self, text, encoding=None, errors='strict', - linesep=os.linesep, append=False): - r""" Write the given text to this file. - - The default behavior is to overwrite any existing file; - to append instead, use the 'append=True' keyword argument. - - There are two differences between path.write_text() and - path.write_bytes(): newline handling and Unicode handling. - See below. - - Parameters: - - - text - str/unicode - The text to be written. - - - encoding - str - The Unicode encoding that will be used. - This is ignored if 'text' isn't a Unicode string. - - - errors - str - How to handle Unicode encoding errors. - Default is 'strict'. See help(unicode.encode) for the - options. This is ignored if 'text' isn't a Unicode - string. - - - linesep - keyword argument - str/unicode - The sequence of - characters to be used to mark end-of-line. The default is - os.linesep. You can also specify None; this means to - leave all newlines as they are in 'text'. - - - append - keyword argument - bool - Specifies what to do if - the file already exists (True: append to the end of it; - False: overwrite it.) The default is False. - - - --- Newline handling. - - write_text() converts all standard end-of-line sequences - ('\n', '\r', and '\r\n') to your platform's default end-of-line - sequence (see os.linesep; on Windows, for example, the - end-of-line marker is '\r\n'). - - If you don't like your platform's default, you can override it - using the 'linesep=' keyword argument. If you specifically want - write_text() to preserve the newlines as-is, use 'linesep=None'. - - This applies to Unicode text the same as to 8-bit text, except - there are three additional standard Unicode end-of-line sequences: - u'\x85', u'\r\x85', and u'\u2028'. - - (This is slightly different from when you open a file for - writing with fopen(filename, "w") in C or open(filename, 'w') - in Python.) - - - --- Unicode - - If 'text' isn't Unicode, then apart from newline handling, the - bytes are written verbatim to the file. The 'encoding' and - 'errors' arguments are not used and must be omitted. - - If 'text' is Unicode, it is first converted to bytes using the - specified 'encoding' (or the default encoding if 'encoding' - isn't specified). The 'errors' argument applies only to this - conversion. - - """ - if isinstance(text, unicode): - if linesep is not None: - # Convert all standard end-of-line sequences to - # ordinary newline characters. - text = (text.replace(u('\r\n'), u('\n')) - .replace(u('\r\x85'), u('\n')) - .replace(u('\r'), u('\n')) - .replace(u('\x85'), u('\n')) - .replace(u('\u2028'), u('\n'))) - text = text.replace(u('\n'), linesep) - if encoding is None: - encoding = sys.getdefaultencoding() - bytes = text.encode(encoding, errors) - else: - # It is an error to specify an encoding if 'text' is - # an 8-bit string. - assert encoding is None - - if linesep is not None: - text = (text.replace('\r\n', '\n') - .replace('\r', '\n')) - bytes = text.replace('\n', linesep) - - self.write_bytes(bytes, append) - - def lines(self, encoding=None, errors='strict', retain=True): - r""" Open this file, read all lines, return them in a list. - - Optional arguments: - encoding - The Unicode encoding (or character set) of - the file. The default is None, meaning the content - of the file is read as 8-bit characters and returned - as a list of (non-Unicode) str objects. - errors - How to handle Unicode errors; see help(str.decode) - for the options. Default is 'strict' - retain - If true, retain newline characters; but all newline - character combinations ('\r', '\n', '\r\n') are - translated to '\n'. If false, newline characters are - stripped off. Default is True. - - This uses 'U' mode. - """ - if encoding is None and retain: - with self.open('U') as f: - return f.readlines() - else: - return self.text(encoding, errors).splitlines(retain) - - def write_lines(self, lines, encoding=None, errors='strict', - linesep=os.linesep, append=False): - r""" Write the given lines of text to this file. - - By default this overwrites any existing file at this path. - - This puts a platform-specific newline sequence on every line. - See 'linesep' below. - - lines - A list of strings. - - encoding - A Unicode encoding to use. This applies only if - 'lines' contains any Unicode strings. - - errors - How to handle errors in Unicode encoding. This - also applies only to Unicode strings. - - linesep - The desired line-ending. This line-ending is - applied to every line. If a line already has any - standard line ending ('\r', '\n', '\r\n', u'\x85', - u'\r\x85', u'\u2028'), that will be stripped off and - this will be used instead. The default is os.linesep, - which is platform-dependent ('\r\n' on Windows, '\n' on - Unix, etc.) Specify None to write the lines as-is, - like file.writelines(). - - Use the keyword argument append=True to append lines to the - file. The default is to overwrite the file. Warning: - When you use this with Unicode data, if the encoding of the - existing data in the file is different from the encoding - you specify with the encoding= parameter, the result is - mixed-encoding data, which can really confuse someone trying - to read the file later. - """ - if append: - mode = 'ab' - else: - mode = 'wb' - with self.open(mode) as f: - for line in lines: - isUnicode = isinstance(line, unicode) - if linesep is not None: - # Strip off any existing line-end and add the - # specified linesep string. - if isUnicode: - if line[-2:] in (u('\r\n'), u('\x0d\x85')): - line = line[:-2] - elif line[-1:] in (u('\r'), u('\n'), - u('\x85'), u('\u2028')): - line = line[:-1] - else: - if line[-2:] == '\r\n': - line = line[:-2] - elif line[-1:] in ('\r', '\n'): - line = line[:-1] - line += linesep - if isUnicode: - if encoding is None: - encoding = sys.getdefaultencoding() - line = line.encode(encoding, errors) - f.write(line) - - def read_md5(self): - """ Calculate the md5 hash for this file. - - This reads through the entire file. - """ - return self.read_hash('md5') - - def _hash(self, hash_name): - """ Returns a hash object for the file at the current path. - - `hash_name` should be a hash algo name such as 'md5' or 'sha1' - that's available in the `hashlib` module. - """ - m = hashlib.new(hash_name) - for chunk in self.chunks(8192): - m.update(chunk) - return m - - def read_hash(self, hash_name): - """ Calculate given hash for this file. - - List of supported hashes can be obtained from hashlib package. This - reads the entire file. - """ - return self._hash(hash_name).digest() - - def read_hexhash(self, hash_name): - """ Calculate given hash for this file, returning hexdigest. - - List of supported hashes can be obtained from hashlib package. This - reads the entire file. - """ - return self._hash(hash_name).hexdigest() - - # --- Methods for querying the filesystem. - # N.B. On some platforms, the os.path functions may be implemented in C - # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get - # bound. Playing it safe and wrapping them all in method calls. - - def isabs(self): - return self.module.isabs(self) - - def exists(self): - return self.module.exists(self) - - def isdir(self): - return self.module.isdir(self) - - def isfile(self): - return self.module.isfile(self) - - def islink(self): - return self.module.islink(self) - - def ismount(self): - return self.module.ismount(self) - - def samefile(self, other): - return self.module.samefile(self, other) - - def getatime(self): - return self.module.getatime(self) - - atime = property( - getatime, None, None, - """ Last access time of the file. """) - - def getmtime(self): - return self.module.getmtime(self) - - mtime = property( - getmtime, None, None, - """ Last-modified time of the file. """) - - def getctime(self): - return self.module.getctime(self) - - ctime = property( - getctime, None, None, - """ Creation time of the file. """) - - def getsize(self): - return self.module.getsize(self) - - size = property( - getsize, None, None, - """ Size of the file, in bytes. """) - - if hasattr(os, 'access'): - def access(self, mode): - """ Return true if current user has access to this path. - - mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK - """ - return os.access(self, mode) - - def stat(self): - """ Perform a stat() system call on this path. """ - return os.stat(self) - - def lstat(self): - """ Like path.stat(), but do not follow symbolic links. """ - return os.lstat(self) - - def __get_owner_windows(self): - r""" - Return the name of the owner of this file or directory. Follow - symbolic links. - - Return a name of the form ur'DOMAIN\User Name'; may be a group. - """ - desc = win32security.GetFileSecurity( - self, win32security.OWNER_SECURITY_INFORMATION) - sid = desc.GetSecurityDescriptorOwner() - account, domain, typecode = win32security.LookupAccountSid(None, sid) - return domain + u('\\') + account - - def __get_owner_unix(self): - """ - Return the name of the owner of this file or directory. Follow - symbolic links. - """ - st = self.stat() - return pwd.getpwuid(st.st_uid).pw_name - - def __get_owner_not_implemented(self): - raise NotImplementedError("Ownership not available on this platform.") - - if 'win32security' in globals(): - get_owner = __get_owner_windows - elif 'pwd' in globals(): - get_owner = __get_owner_unix - else: - get_owner = __get_owner_not_implemented - - owner = property( - get_owner, None, None, - """ Name of the owner of this file or directory. """) - - if hasattr(os, 'statvfs'): - def statvfs(self): - """ Perform a statvfs() system call on this path. """ - return os.statvfs(self) - - if hasattr(os, 'pathconf'): - def pathconf(self, name): - return os.pathconf(self, name) - - # - # --- Modifying operations on files and directories - - def utime(self, times): - """ Set the access and modified times of this file. """ - os.utime(self, times) - return self - - def chmod(self, mode): - os.chmod(self, mode) - return self - - if hasattr(os, 'chown'): - def chown(self, uid=-1, gid=-1): - os.chown(self, uid, gid) - return self - - def rename(self, new): - os.rename(self, new) - return self._next_class(new) - - def renames(self, new): - os.renames(self, new) - return self._next_class(new) - - # - # --- Create/delete operations on directories - - def mkdir(self, mode=o777): - os.mkdir(self, mode) - return self - - def mkdir_p(self, mode=o777): - try: - self.mkdir(mode) - except OSError: - _, e, _ = sys.exc_info() - if e.errno != errno.EEXIST: - raise - return self - - def makedirs(self, mode=o777): - os.makedirs(self, mode) - return self - - def makedirs_p(self, mode=o777): - try: - self.makedirs(mode) - except OSError: - _, e, _ = sys.exc_info() - if e.errno != errno.EEXIST: - raise - return self - - def rmdir(self): - os.rmdir(self) - return self - - def rmdir_p(self): - try: - self.rmdir() - except OSError: - _, e, _ = sys.exc_info() - if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: - raise - return self - - def removedirs(self): - os.removedirs(self) - return self - - def removedirs_p(self): - try: - self.removedirs() - except OSError: - _, e, _ = sys.exc_info() - if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST: - raise - return self - - # --- Modifying operations on files - - def touch(self): - """ Set the access/modified times of this file to the current time. - Create the file if it does not exist. - """ - fd = os.open(self, os.O_WRONLY | os.O_CREAT, o666) - os.close(fd) - os.utime(self, None) - return self - - def remove(self): - os.remove(self) - return self - - def remove_p(self): - try: - self.unlink() - except OSError: - _, e, _ = sys.exc_info() - if e.errno != errno.ENOENT: - raise - return self - - def unlink(self): - os.unlink(self) - return self - - def unlink_p(self): - self.remove_p() - return self - - # --- Links - - if hasattr(os, 'link'): - def link(self, newpath): - """ Create a hard link at 'newpath', pointing to this file. """ - os.link(self, newpath) - return self._next_class(newpath) - - if hasattr(os, 'symlink'): - def symlink(self, newlink): - """ Create a symbolic link at 'newlink', pointing here. """ - os.symlink(self, newlink) - return self._next_class(newlink) - - if hasattr(os, 'readlink'): - def readlink(self): - """ Return the path to which this symbolic link points. - - The result may be an absolute or a relative path. - """ - return self._next_class(os.readlink(self)) - - def readlinkabs(self): - """ Return the path to which this symbolic link points. - - The result is always an absolute path. - """ - p = self.readlink() - if p.isabs(): - return p - else: - return (self.parent / p).abspath() - - # - # --- High-level functions from shutil - - copyfile = shutil.copyfile - copymode = shutil.copymode - copystat = shutil.copystat - copy = shutil.copy - copy2 = shutil.copy2 - copytree = shutil.copytree - if hasattr(shutil, 'move'): - move = shutil.move - rmtree = shutil.rmtree - - def rmtree_p(self): - try: - self.rmtree() - except OSError: - _, e, _ = sys.exc_info() - if e.errno != errno.ENOENT: - raise - return self - - def chdir(self): - os.chdir(self) - - cd = chdir - - # - # --- Special stuff from os - - if hasattr(os, 'chroot'): - def chroot(self): - os.chroot(self) - - if hasattr(os, 'startfile'): - def startfile(self): - os.startfile(self) - return self - - -class tempdir(path): - """ - A temporary directory via tempfile.mkdtemp, and constructed with the - same parameters that you can use as a context manager. - - Example: - - with tempdir() as d: - # do stuff with the path object "d" - - # here the directory is deleted automatically - """ - - @ClassProperty - @classmethod - def _next_class(cls): - return path - - def __new__(cls, *args, **kwargs): - dirname = tempfile.mkdtemp(*args, **kwargs) - return super(tempdir, cls).__new__(cls, dirname) - - def __init__(self, *args, **kwargs): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - if not exc_value: - self.rmtree() - - -def _permission_mask(mode): - """ - Convert a Unix chmod symbolic mode like 'ugo+rwx' to a function - suitable for applying to a mask to affect that change. - - >>> mask = _permission_mask('ugo+rwx') - >>> oct(mask(o554)) - 'o777' - - >>> oct(_permission_mask('gw-x')(o777)) - 'o766' - """ - parsed = re.match('(?P[ugo]+)(?P[-+])(?P[rwx]+)$', mode) - if not parsed: - raise ValueError("Unrecognized symbolic mode", mode) - spec_map = dict(r=4, w=2, x=1) - spec = reduce(operator.or_, [spec_map[perm] - for perm in parsed.group('what')]) - # now apply spec to each in who - shift_map = dict(u=6, g=3, o=0) - mask = reduce(operator.or_, [spec << shift_map[subj] - for subj in parsed.group('who')]) - - op = parsed.group('op') - # if op is -, invert the mask - if op == '-': - mask ^= o777 - - op_map = {'+': operator.or_, '-': operator.and_} - return functools.partial(op_map[op], mask) diff --git a/IPython/external/pexpect/__init__.py b/IPython/external/pexpect/__init__.py deleted file mode 100644 index a0cf671..0000000 --- a/IPython/external/pexpect/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -try: - import pexpect - from pexpect import * -except ImportError: - from ._pexpect import * diff --git a/IPython/external/pexpect/_pexpect.py b/IPython/external/pexpect/_pexpect.py deleted file mode 100644 index cace43b..0000000 --- a/IPython/external/pexpect/_pexpect.py +++ /dev/null @@ -1,2123 +0,0 @@ -'''Pexpect is a Python module for spawning child applications and controlling -them automatically. Pexpect can be used for automating interactive applications -such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup -scripts for duplicating software package installations on different servers. It -can be used for automated software testing. Pexpect is in the spirit of Don -Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python -require TCL and Expect or require C extensions to be compiled. Pexpect does not -use C, Expect, or TCL extensions. It should work on any platform that supports -the standard Python pty module. The Pexpect interface focuses on ease of use so -that simple tasks are easy. - -There are two main interfaces to the Pexpect system; these are the function, -run() and the class, spawn. The spawn class is more powerful. The run() -function is simpler than spawn, and is good for quickly calling program. When -you call the run() function it executes a given program and then returns the -output. This is a handy replacement for os.system(). - -For example:: - - pexpect.run('ls -la') - -The spawn class is the more powerful interface to the Pexpect system. You can -use this to spawn a child program then interact with it by sending input and -expecting responses (waiting for patterns in the child's output). - -For example:: - - child = pexpect.spawn('scp foo user@example.com:.') - child.expect('Password:') - child.sendline(mypassword) - -This works even for commands that ask for passwords or other input outside of -the normal stdio streams. For example, ssh reads input directly from the TTY -device which bypasses stdin. - -Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett, -Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids -vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin, -Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey, -Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume -Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John -Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone. - -Pexpect is free, open source, and all that good stuff. -http://pexpect.sourceforge.net/ - -PEXPECT LICENSE - - This license is approved by the OSI and FSF as GPL-compatible. - http://opensource.org/licenses/isc-license.txt - - Copyright (c) 2012, Noah Spurrier - PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY - PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE - COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES. - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -''' - -try: - import os - import sys - import time - import select - import re - import struct - import resource - import types - import pty - import tty - import termios - import fcntl - import errno - import traceback - import signal - import codecs - import stat -except ImportError: # pragma: no cover - err = sys.exc_info()[1] - raise ImportError(str(err) + ''' - -A critical module was not found. Probably this operating system does not -support it. Pexpect is intended for UNIX-like operating systems.''') - -__version__ = '3.3' -__revision__ = '' -__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu', - 'which', 'split_command_line', '__version__', '__revision__'] - -PY3 = (sys.version_info[0] >= 3) - -# Exception classes used by this module. -class ExceptionPexpect(Exception): - '''Base class for all exceptions raised by this module. - ''' - - def __init__(self, value): - super(ExceptionPexpect, self).__init__(value) - self.value = value - - def __str__(self): - return str(self.value) - - def get_trace(self): - '''This returns an abbreviated stack trace with lines that only concern - the caller. In other words, the stack trace inside the Pexpect module - is not included. ''' - - tblist = traceback.extract_tb(sys.exc_info()[2]) - tblist = [item for item in tblist if 'pexpect/__init__' not in item[0]] - tblist = traceback.format_list(tblist) - return ''.join(tblist) - - -class EOF(ExceptionPexpect): - '''Raised when EOF is read from a child. - This usually means the child has exited.''' - - -class TIMEOUT(ExceptionPexpect): - '''Raised when a read time exceeds the timeout. ''' - -##class TIMEOUT_PATTERN(TIMEOUT): -## '''Raised when the pattern match time exceeds the timeout. -## This is different than a read TIMEOUT because the child process may -## give output, thus never give a TIMEOUT, but the output -## may never match a pattern. -## ''' -##class MAXBUFFER(ExceptionPexpect): -## '''Raised when a buffer fills before matching an expected pattern.''' - - -def run(command, timeout=-1, withexitstatus=False, events=None, - extra_args=None, logfile=None, cwd=None, env=None): - - ''' - This function runs the given command; waits for it to finish; then - returns all output as a string. STDERR is included in output. If the full - path to the command is not given then the path is searched. - - Note that lines are terminated by CR/LF (\\r\\n) combination even on - UNIX-like systems because this is the standard for pseudottys. If you set - 'withexitstatus' to true, then run will return a tuple of (command_output, - exitstatus). If 'withexitstatus' is false then this returns just - command_output. - - The run() function can often be used instead of creating a spawn instance. - For example, the following code uses spawn:: - - from pexpect import * - child = spawn('scp foo user@example.com:.') - child.expect('(?i)password') - child.sendline(mypassword) - - The previous code can be replace with the following:: - - from pexpect import * - run('scp foo user@example.com:.', events={'(?i)password': mypassword}) - - **Examples** - - Start the apache daemon on the local machine:: - - from pexpect import * - run("/usr/local/apache/bin/apachectl start") - - Check in a file using SVN:: - - from pexpect import * - run("svn ci -m 'automatic commit' my_file.py") - - Run a command and capture exit status:: - - from pexpect import * - (command_output, exitstatus) = run('ls -l /bin', withexitstatus=1) - - The following will run SSH and execute 'ls -l' on the remote machine. The - password 'secret' will be sent if the '(?i)password' pattern is ever seen:: - - run("ssh username@machine.example.com 'ls -l'", - events={'(?i)password':'secret\\n'}) - - This will start mencoder to rip a video from DVD. This will also display - progress ticks every 5 seconds as it runs. For example:: - - from pexpect import * - def print_ticks(d): - print d['event_count'], - run("mencoder dvd://1 -o video.avi -oac copy -ovc copy", - events={TIMEOUT:print_ticks}, timeout=5) - - The 'events' argument should be a dictionary of patterns and responses. - Whenever one of the patterns is seen in the command out run() will send the - associated response string. Note that you should put newlines in your - string if Enter is necessary. The responses may also contain callback - functions. Any callback is function that takes a dictionary as an argument. - The dictionary contains all the locals from the run() function, so you can - access the child spawn object or any other variable defined in run() - (event_count, child, and extra_args are the most useful). A callback may - return True to stop the current run process otherwise run() continues until - the next event. A callback may also return a string which will be sent to - the child. 'extra_args' is not used by directly run(). It provides a way to - pass data to a callback function through run() through the locals - dictionary passed to a callback. - ''' - return _run(command, timeout=timeout, withexitstatus=withexitstatus, - events=events, extra_args=extra_args, logfile=logfile, cwd=cwd, - env=env, _spawn=spawn) - -def runu(command, timeout=-1, withexitstatus=False, events=None, - extra_args=None, logfile=None, cwd=None, env=None, **kwargs): - """This offers the same interface as :func:`run`, but using unicode. - - Like :class:`spawnu`, you can pass ``encoding`` and ``errors`` parameters, - which will be used for both input and output. - """ - return _run(command, timeout=timeout, withexitstatus=withexitstatus, - events=events, extra_args=extra_args, logfile=logfile, cwd=cwd, - env=env, _spawn=spawnu, **kwargs) - -def _run(command, timeout, withexitstatus, events, extra_args, logfile, cwd, - env, _spawn, **kwargs): - if timeout == -1: - child = _spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env, - **kwargs) - else: - child = _spawn(command, timeout=timeout, maxread=2000, logfile=logfile, - cwd=cwd, env=env, **kwargs) - if events is not None: - patterns = list(events.keys()) - responses = list(events.values()) - else: - # This assumes EOF or TIMEOUT will eventually cause run to terminate. - patterns = None - responses = None - child_result_list = [] - event_count = 0 - while True: - try: - index = child.expect(patterns) - if isinstance(child.after, child.allowed_string_types): - child_result_list.append(child.before + child.after) - else: - # child.after may have been a TIMEOUT or EOF, - # which we don't want appended to the list. - child_result_list.append(child.before) - if isinstance(responses[index], child.allowed_string_types): - child.send(responses[index]) - elif isinstance(responses[index], types.FunctionType): - callback_result = responses[index](locals()) - sys.stdout.flush() - if isinstance(callback_result, child.allowed_string_types): - child.send(callback_result) - elif callback_result: - break - else: - raise TypeError('The callback must be a string or function.') - event_count = event_count + 1 - except TIMEOUT: - child_result_list.append(child.before) - break - except EOF: - child_result_list.append(child.before) - break - child_result = child.string_type().join(child_result_list) - if withexitstatus: - child.close() - return (child_result, child.exitstatus) - else: - return child_result - -class spawn(object): - '''This is the main class interface for Pexpect. Use this class to start - and control child applications. ''' - string_type = bytes - if PY3: - allowed_string_types = (bytes, str) - @staticmethod - def _chr(c): - return bytes([c]) - linesep = os.linesep.encode('ascii') - crlf = '\r\n'.encode('ascii') - - @staticmethod - def write_to_stdout(b): - try: - return sys.stdout.buffer.write(b) - except AttributeError: - # If stdout has been replaced, it may not have .buffer - return sys.stdout.write(b.decode('ascii', 'replace')) - else: - allowed_string_types = (basestring,) # analysis:ignore - _chr = staticmethod(chr) - linesep = os.linesep - crlf = '\r\n' - write_to_stdout = sys.stdout.write - - encoding = None - - def __init__(self, command, args=[], timeout=30, maxread=2000, - searchwindowsize=None, logfile=None, cwd=None, env=None, - ignore_sighup=True, echo=True): - - '''This is the constructor. The command parameter may be a string that - includes a command and any arguments to the command. For example:: - - child = pexpect.spawn('/usr/bin/ftp') - child = pexpect.spawn('/usr/bin/ssh user@example.com') - child = pexpect.spawn('ls -latr /tmp') - - You may also construct it with a list of arguments like so:: - - child = pexpect.spawn('/usr/bin/ftp', []) - child = pexpect.spawn('/usr/bin/ssh', ['user@example.com']) - child = pexpect.spawn('ls', ['-latr', '/tmp']) - - After this the child application will be created and will be ready to - talk to. For normal use, see expect() and send() and sendline(). - - Remember that Pexpect does NOT interpret shell meta characters such as - redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a - common mistake. If you want to run a command and pipe it through - another command then you must also start a shell. For example:: - - child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"') - child.expect(pexpect.EOF) - - The second form of spawn (where you pass a list of arguments) is useful - in situations where you wish to spawn a command and pass it its own - argument list. This can make syntax more clear. For example, the - following is equivalent to the previous example:: - - shell_cmd = 'ls -l | grep LOG > logs.txt' - child = pexpect.spawn('/bin/bash', ['-c', shell_cmd]) - child.expect(pexpect.EOF) - - The maxread attribute sets the read buffer size. This is maximum number - of bytes that Pexpect will try to read from a TTY at one time. Setting - the maxread size to 1 will turn off buffering. Setting the maxread - value higher may help performance in cases where large amounts of - output are read back from the child. This feature is useful in - conjunction with searchwindowsize. - - The searchwindowsize attribute sets the how far back in the incoming - seach buffer Pexpect will search for pattern matches. Every time - Pexpect reads some data from the child it will append the data to the - incoming buffer. The default is to search from the beginning of the - incoming buffer each time new data is read from the child. But this is - very inefficient if you are running a command that generates a large - amount of data where you want to match. The searchwindowsize does not - affect the size of the incoming data buffer. You will still have - access to the full buffer after expect() returns. - - The logfile member turns on or off logging. All input and output will - be copied to the given file object. Set logfile to None to stop - logging. This is the default. Set logfile to sys.stdout to echo - everything to standard output. The logfile is flushed after each write. - - Example log input and output to a file:: - - child = pexpect.spawn('some_command') - fout = file('mylog.txt','w') - child.logfile = fout - - Example log to stdout:: - - child = pexpect.spawn('some_command') - child.logfile = sys.stdout - - The logfile_read and logfile_send members can be used to separately log - the input from the child and output sent to the child. Sometimes you - don't want to see everything you write to the child. You only want to - log what the child sends back. For example:: - - child = pexpect.spawn('some_command') - child.logfile_read = sys.stdout - - To separately log output sent to the child use logfile_send:: - - self.logfile_send = fout - - If ``ignore_sighup`` is True, the child process will ignore SIGHUP - signals. For now, the default is True, to preserve the behaviour of - earlier versions of Pexpect, but you should pass this explicitly if you - want to rely on it. - - The delaybeforesend helps overcome a weird behavior that many users - were experiencing. The typical problem was that a user would expect() a - "Password:" prompt and then immediately call sendline() to send the - password. The user would then see that their password was echoed back - to them. Passwords don't normally echo. The problem is caused by the - fact that most applications print out the "Password" prompt and then - turn off stdin echo, but if you send your password before the - application turned off echo, then you get your password echoed. - Normally this wouldn't be a problem when interacting with a human at a - real keyboard. If you introduce a slight delay just before writing then - this seems to clear up the problem. This was such a common problem for - many users that I decided that the default pexpect behavior should be - to sleep just before writing to the child application. 1/20th of a - second (50 ms) seems to be enough to clear up the problem. You can set - delaybeforesend to 0 to return to the old behavior. Most Linux machines - don't like this to be below 0.03. I don't know why. - - Note that spawn is clever about finding commands on your path. - It uses the same logic that "which" uses to find executables. - - If you wish to get the exit status of the child you must call the - close() method. The exit or signal status of the child will be stored - in self.exitstatus or self.signalstatus. If the child exited normally - then exitstatus will store the exit return code and signalstatus will - be None. If the child was terminated abnormally with a signal then - signalstatus will store the signal value and exitstatus will be None. - If you need more detail you can also read the self.status member which - stores the status returned by os.waitpid. You can interpret this using - os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. - - The echo attribute may be set to False to disable echoing of input. - As a pseudo-terminal, all input echoed by the "keyboard" (send() - or sendline()) will be repeated to output. For many cases, it is - not desirable to have echo enabled, and it may be later disabled - using setecho(False) followed by waitnoecho(). However, for some - platforms such as Solaris, this is not possible, and should be - disabled immediately on spawn. - ''' - - self.STDIN_FILENO = pty.STDIN_FILENO - self.STDOUT_FILENO = pty.STDOUT_FILENO - self.STDERR_FILENO = pty.STDERR_FILENO - self.stdin = sys.stdin - self.stdout = sys.stdout - self.stderr = sys.stderr - - self.searcher = None - self.ignorecase = False - self.before = None - self.after = None - self.match = None - self.match_index = None - self.terminated = True - self.exitstatus = None - self.signalstatus = None - # status returned by os.waitpid - self.status = None - self.flag_eof = False - self.pid = None - # the child file descriptor is initially closed - self.child_fd = -1 - self.timeout = timeout - self.delimiter = EOF - self.logfile = logfile - # input from child (read_nonblocking) - self.logfile_read = None - # output to send (send, sendline) - self.logfile_send = None - # max bytes to read at one time into buffer - self.maxread = maxread - # This is the read buffer. See maxread. - self.buffer = self.string_type() - # Data before searchwindowsize point is preserved, but not searched. - self.searchwindowsize = searchwindowsize - # Delay used before sending data to child. Time in seconds. - # Most Linux machines don't like this to be below 0.03 (30 ms). - self.delaybeforesend = 0.05 - # Used by close() to give kernel time to update process status. - # Time in seconds. - self.delayafterclose = 0.1 - # Used by terminate() to give kernel time to update process status. - # Time in seconds. - self.delayafterterminate = 0.1 - self.softspace = False - self.name = '<' + repr(self) + '>' - self.closed = True - self.cwd = cwd - self.env = env - self.echo = echo - self.ignore_sighup = ignore_sighup - _platform = sys.platform.lower() - # This flags if we are running on irix - self.__irix_hack = _platform.startswith('irix') - # Solaris uses internal __fork_pty(). All others use pty.fork(). - self.use_native_pty_fork = not ( - _platform.startswith('solaris') or - _platform.startswith('sunos')) - # inherit EOF and INTR definitions from controlling process. - try: - from termios import VEOF, VINTR - fd = sys.__stdin__.fileno() - self._INTR = ord(termios.tcgetattr(fd)[6][VINTR]) - self._EOF = ord(termios.tcgetattr(fd)[6][VEOF]) - except (ImportError, OSError, IOError, termios.error): - # unless the controlling process is also not a terminal, - # such as cron(1). Fall-back to using CEOF and CINTR. - try: - from termios import CEOF, CINTR - (self._INTR, self._EOF) = (CINTR, CEOF) - except ImportError: - # ^C, ^D - (self._INTR, self._EOF) = (3, 4) - # Support subclasses that do not use command or args. - if command is None: - self.command = None - self.args = None - self.name = '' - else: - self._spawn(command, args) - - @staticmethod - def _coerce_expect_string(s): - if not isinstance(s, bytes): - return s.encode('ascii') - return s - - @staticmethod - def _coerce_send_string(s): - if not isinstance(s, bytes): - return s.encode('utf-8') - return s - - @staticmethod - def _coerce_read_string(s): - return s - - def __del__(self): - '''This makes sure that no system resources are left open. Python only - garbage collects Python objects. OS file descriptors are not Python - objects, so they must be handled explicitly. If the child file - descriptor was opened outside of this class (passed to the constructor) - then this does not close it. ''' - - if not self.closed: - # It is possible for __del__ methods to execute during the - # teardown of the Python VM itself. Thus self.close() may - # trigger an exception because os.close may be None. - try: - self.close() - # which exception, shouldnt' we catch explicitly .. ? - except: - pass - - def __str__(self): - '''This returns a human-readable string that represents the state of - the object. ''' - - s = [] - s.append(repr(self)) - s.append('version: ' + __version__) - s.append('command: ' + str(self.command)) - s.append('args: %r' % (self.args,)) - s.append('searcher: %r' % (self.searcher,)) - s.append('buffer (last 100 chars): %r' % (self.buffer)[-100:],) - s.append('before (last 100 chars): %r' % (self.before)[-100:],) - s.append('after: %r' % (self.after,)) - s.append('match: %r' % (self.match,)) - s.append('match_index: ' + str(self.match_index)) - s.append('exitstatus: ' + str(self.exitstatus)) - s.append('flag_eof: ' + str(self.flag_eof)) - s.append('pid: ' + str(self.pid)) - s.append('child_fd: ' + str(self.child_fd)) - s.append('closed: ' + str(self.closed)) - s.append('timeout: ' + str(self.timeout)) - s.append('delimiter: ' + str(self.delimiter)) - s.append('logfile: ' + str(self.logfile)) - s.append('logfile_read: ' + str(self.logfile_read)) - s.append('logfile_send: ' + str(self.logfile_send)) - s.append('maxread: ' + str(self.maxread)) - s.append('ignorecase: ' + str(self.ignorecase)) - s.append('searchwindowsize: ' + str(self.searchwindowsize)) - s.append('delaybeforesend: ' + str(self.delaybeforesend)) - s.append('delayafterclose: ' + str(self.delayafterclose)) - s.append('delayafterterminate: ' + str(self.delayafterterminate)) - return '\n'.join(s) - - def _spawn(self, command, args=[]): - '''This starts the given command in a child process. This does all the - fork/exec type of stuff for a pty. This is called by __init__. If args - is empty then command will be parsed (split on spaces) and args will be - set to parsed arguments. ''' - - # The pid and child_fd of this object get set by this method. - # Note that it is difficult for this method to fail. - # You cannot detect if the child process cannot start. - # So the only way you can tell if the child process started - # or not is to try to read from the file descriptor. If you get - # EOF immediately then it means that the child is already dead. - # That may not necessarily be bad because you may have spawned a child - # that performs some task; creates no stdout output; and then dies. - - # If command is an int type then it may represent a file descriptor. - if isinstance(command, type(0)): - raise ExceptionPexpect('Command is an int type. ' + - 'If this is a file descriptor then maybe you want to ' + - 'use fdpexpect.fdspawn which takes an existing ' + - 'file descriptor instead of a command string.') - - if not isinstance(args, type([])): - raise TypeError('The argument, args, must be a list.') - - if args == []: - self.args = split_command_line(command) - self.command = self.args[0] - else: - # Make a shallow copy of the args list. - self.args = args[:] - self.args.insert(0, command) - self.command = command - - command_with_path = which(self.command) - if command_with_path is None: - raise ExceptionPexpect('The command was not found or was not ' + - 'executable: %s.' % self.command) - self.command = command_with_path - self.args[0] = self.command - - self.name = '<' + ' '.join(self.args) + '>' - - assert self.pid is None, 'The pid member must be None.' - assert self.command is not None, 'The command member must not be None.' - - if self.use_native_pty_fork: - try: - self.pid, self.child_fd = pty.fork() - except OSError: # pragma: no cover - err = sys.exc_info()[1] - raise ExceptionPexpect('pty.fork() failed: ' + str(err)) - else: - # Use internal __fork_pty - self.pid, self.child_fd = self.__fork_pty() - - # Some platforms must call setwinsize() and setecho() from the - # child process, and others from the master process. We do both, - # allowing IOError for either. - - if self.pid == pty.CHILD: - # Child - self.child_fd = self.STDIN_FILENO - - # set default window size of 24 rows by 80 columns - try: - self.setwinsize(24, 80) - except IOError as err: - if err.args[0] not in (errno.EINVAL, errno.ENOTTY): - raise - - # disable echo if spawn argument echo was unset - if not self.echo: - try: - self.setecho(self.echo) - except (IOError, termios.error) as err: - if err.args[0] not in (errno.EINVAL, errno.ENOTTY): - raise - - # Do not allow child to inherit open file descriptors from parent. - max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0] - os.closerange(3, max_fd) - - if self.ignore_sighup: - signal.signal(signal.SIGHUP, signal.SIG_IGN) - - if self.cwd is not None: - os.chdir(self.cwd) - if self.env is None: - os.execv(self.command, self.args) - else: - os.execvpe(self.command, self.args, self.env) - - # Parent - try: - self.setwinsize(24, 80) - except IOError as err: - if err.args[0] not in (errno.EINVAL, errno.ENOTTY): - raise - - - self.terminated = False - self.closed = False - - def __fork_pty(self): - '''This implements a substitute for the forkpty system call. This - should be more portable than the pty.fork() function. Specifically, - this should work on Solaris. - - Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to - resolve the issue with Python's pty.fork() not supporting Solaris, - particularly ssh. Based on patch to posixmodule.c authored by Noah - Spurrier:: - - http://mail.python.org/pipermail/python-dev/2003-May/035281.html - - ''' - - parent_fd, child_fd = os.openpty() - if parent_fd < 0 or child_fd < 0: - raise ExceptionPexpect("Could not open with os.openpty().") - - pid = os.fork() - if pid == pty.CHILD: - # Child. - os.close(parent_fd) - self.__pty_make_controlling_tty(child_fd) - - os.dup2(child_fd, self.STDIN_FILENO) - os.dup2(child_fd, self.STDOUT_FILENO) - os.dup2(child_fd, self.STDERR_FILENO) - - else: - # Parent. - os.close(child_fd) - - return pid, parent_fd - - def __pty_make_controlling_tty(self, tty_fd): - '''This makes the pseudo-terminal the controlling tty. This should be - more portable than the pty.fork() function. Specifically, this should - work on Solaris. ''' - - child_name = os.ttyname(tty_fd) - - # Disconnect from controlling tty, if any. Raises OSError of ENXIO - # if there was no controlling tty to begin with, such as when - # executed by a cron(1) job. - try: - fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY) - os.close(fd) - except OSError as err: - if err.errno != errno.ENXIO: - raise - - os.setsid() - - # Verify we are disconnected from controlling tty by attempting to open - # it again. We expect that OSError of ENXIO should always be raised. - try: - fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY) - os.close(fd) - raise ExceptionPexpect("OSError of errno.ENXIO should be raised.") - except OSError as err: - if err.errno != errno.ENXIO: - raise - - # Verify we can open child pty. - fd = os.open(child_name, os.O_RDWR) - os.close(fd) - - # Verify we now have a controlling tty. - fd = os.open("/dev/tty", os.O_WRONLY) - os.close(fd) - - - def fileno(self): - '''This returns the file descriptor of the pty for the child. - ''' - return self.child_fd - - def close(self, force=True): - '''This closes the connection with the child application. Note that - calling close() more than once is valid. This emulates standard Python - behavior with files. Set force to True if you want to make sure that - the child is terminated (SIGKILL is sent if the child ignores SIGHUP - and SIGINT). ''' - - if not self.closed: - self.flush() - os.close(self.child_fd) - # Give kernel time to update process status. - time.sleep(self.delayafterclose) - if self.isalive(): - if not self.terminate(force): - raise ExceptionPexpect('Could not terminate the child.') - self.child_fd = -1 - self.closed = True - #self.pid = None - - def flush(self): - '''This does nothing. It is here to support the interface for a - File-like object. ''' - - pass - - def isatty(self): - '''This returns True if the file descriptor is open and connected to a - tty(-like) device, else False. - - On SVR4-style platforms implementing streams, such as SunOS and HP-UX, - the child pty may not appear as a terminal device. This means - methods such as setecho(), setwinsize(), getwinsize() may raise an - IOError. ''' - - return os.isatty(self.child_fd) - - def waitnoecho(self, timeout=-1): - '''This waits until the terminal ECHO flag is set False. This returns - True if the echo mode is off. This returns False if the ECHO flag was - not set False before the timeout. This can be used to detect when the - child is waiting for a password. Usually a child application will turn - off echo mode when it is waiting for the user to enter a password. For - example, instead of expecting the "password:" prompt you can wait for - the child to set ECHO off:: - - p = pexpect.spawn('ssh user@example.com') - p.waitnoecho() - p.sendline(mypassword) - - If timeout==-1 then this method will use the value in self.timeout. - If timeout==None then this method to block until ECHO flag is False. - ''' - - if timeout == -1: - timeout = self.timeout - if timeout is not None: - end_time = time.time() + timeout - while True: - if not self.getecho(): - return True - if timeout < 0 and timeout is not None: - return False - if timeout is not None: - timeout = end_time - time.time() - time.sleep(0.1) - - def getecho(self): - '''This returns the terminal echo mode. This returns True if echo is - on or False if echo is off. Child applications that are expecting you - to enter a password often set ECHO False. See waitnoecho(). - - Not supported on platforms where ``isatty()`` returns False. ''' - - try: - attr = termios.tcgetattr(self.child_fd) - except termios.error as err: - errmsg = 'getecho() may not be called on this platform' - if err.args[0] == errno.EINVAL: - raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg)) - raise - - self.echo = bool(attr[3] & termios.ECHO) - return self.echo - - def setecho(self, state): - '''This sets the terminal echo mode on or off. Note that anything the - child sent before the echo will be lost, so you should be sure that - your input buffer is empty before you call setecho(). For example, the - following will work as expected:: - - p = pexpect.spawn('cat') # Echo is on by default. - p.sendline('1234') # We expect see this twice from the child... - p.expect(['1234']) # ... once from the tty echo... - p.expect(['1234']) # ... and again from cat itself. - p.setecho(False) # Turn off tty echo - p.sendline('abcd') # We will set this only once (echoed by cat). - p.sendline('wxyz') # We will set this only once (echoed by cat) - p.expect(['abcd']) - p.expect(['wxyz']) - - The following WILL NOT WORK because the lines sent before the setecho - will be lost:: - - p = pexpect.spawn('cat') - p.sendline('1234') - p.setecho(False) # Turn off tty echo - p.sendline('abcd') # We will set this only once (echoed by cat). - p.sendline('wxyz') # We will set this only once (echoed by cat) - p.expect(['1234']) - p.expect(['1234']) - p.expect(['abcd']) - p.expect(['wxyz']) - - - Not supported on platforms where ``isatty()`` returns False. - ''' - - errmsg = 'setecho() may not be called on this platform' - - try: - attr = termios.tcgetattr(self.child_fd) - except termios.error as err: - if err.args[0] == errno.EINVAL: - raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg)) - raise - - if state: - attr[3] = attr[3] | termios.ECHO - else: - attr[3] = attr[3] & ~termios.ECHO - - try: - # I tried TCSADRAIN and TCSAFLUSH, but these were inconsistent and - # blocked on some platforms. TCSADRAIN would probably be ideal. - termios.tcsetattr(self.child_fd, termios.TCSANOW, attr) - except IOError as err: - if err.args[0] == errno.EINVAL: - raise IOError(err.args[0], '%s: %s.' % (err.args[1], errmsg)) - raise - - self.echo = state - - def _log(self, s, direction): - if self.logfile is not None: - self.logfile.write(s) - self.logfile.flush() - second_log = self.logfile_send if (direction=='send') else self.logfile_read - if second_log is not None: - second_log.write(s) - second_log.flush() - - def read_nonblocking(self, size=1, timeout=-1): - '''This reads at most size characters from the child application. It - includes a timeout. If the read does not complete within the timeout - period then a TIMEOUT exception is raised. If the end of file is read - then an EOF exception will be raised. If a log file was set using - setlog() then all data will also be written to the log file. - - If timeout is None then the read may block indefinitely. - If timeout is -1 then the self.timeout value is used. If timeout is 0 - then the child is polled and if there is no data immediately ready - then this will raise a TIMEOUT exception. - - The timeout refers only to the amount of time to read at least one - character. This is not effected by the 'size' parameter, so if you call - read_nonblocking(size=100, timeout=30) and only one character is - available right away then one character will be returned immediately. - It will not wait for 30 seconds for another 99 characters to come in. - - This is a wrapper around os.read(). It uses select.select() to - implement the timeout. ''' - - if self.closed: - raise ValueError('I/O operation on closed file.') - - if timeout == -1: - timeout = self.timeout - - # Note that some systems such as Solaris do not give an EOF when - # the child dies. In fact, you can still try to read - # from the child_fd -- it will block forever or until TIMEOUT. - # For this case, I test isalive() before doing any reading. - # If isalive() is false, then I pretend that this is the same as EOF. - if not self.isalive(): - # timeout of 0 means "poll" - r, w, e = self.__select([self.child_fd], [], [], 0) - if not r: - self.flag_eof = True - raise EOF('End Of File (EOF). Braindead platform.') - elif self.__irix_hack: - # Irix takes a long time before it realizes a child was terminated. - # FIXME So does this mean Irix systems are forced to always have - # FIXME a 2 second delay when calling read_nonblocking? That sucks. - r, w, e = self.__select([self.child_fd], [], [], 2) - if not r and not self.isalive(): - self.flag_eof = True - raise EOF('End Of File (EOF). Slow platform.') - - r, w, e = self.__select([self.child_fd], [], [], timeout) - - if not r: - if not self.isalive(): - # Some platforms, such as Irix, will claim that their - # processes are alive; timeout on the select; and - # then finally admit that they are not alive. - self.flag_eof = True - raise EOF('End of File (EOF). Very slow platform.') - else: - raise TIMEOUT('Timeout exceeded.') - - if self.child_fd in r: - try: - s = os.read(self.child_fd, size) - except OSError as err: - if err.args[0] == errno.EIO: - # Linux-style EOF - self.flag_eof = True - raise EOF('End Of File (EOF). Exception style platform.') - raise - if s == b'': - # BSD-style EOF - self.flag_eof = True - raise EOF('End Of File (EOF). Empty string style platform.') - - s = self._coerce_read_string(s) - self._log(s, 'read') - return s - - raise ExceptionPexpect('Reached an unexpected state.') # pragma: no cover - - def read(self, size=-1): - '''This reads at most "size" bytes from the file (less if the read hits - EOF before obtaining size bytes). If the size argument is negative or - omitted, read all data until EOF is reached. The bytes are returned as - a string object. An empty string is returned when EOF is encountered - immediately. ''' - - if size == 0: - return self.string_type() - if size < 0: - # delimiter default is EOF - self.expect(self.delimiter) - return self.before - - # I could have done this more directly by not using expect(), but - # I deliberately decided to couple read() to expect() so that - # I would catch any bugs early and ensure consistant behavior. - # It's a little less efficient, but there is less for me to - # worry about if I have to later modify read() or expect(). - # Note, it's OK if size==-1 in the regex. That just means it - # will never match anything in which case we stop only on EOF. - cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL) - # delimiter default is EOF - index = self.expect([cre, self.delimiter]) - if index == 0: - ### FIXME self.before should be ''. Should I assert this? - return self.after - return self.before - - def readline(self, size=-1): - '''This reads and returns one entire line. The newline at the end of - line is returned as part of the string, unless the file ends without a - newline. An empty string is returned if EOF is encountered immediately. - This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because - this is what the pseudotty device returns. So contrary to what you may - expect you will receive newlines as \\r\\n. - - If the size argument is 0 then an empty string is returned. In all - other cases the size argument is ignored, which is not standard - behavior for a file-like object. ''' - - if size == 0: - return self.string_type() - # delimiter default is EOF - index = self.expect([self.crlf, self.delimiter]) - if index == 0: - return self.before + self.crlf - else: - return self.before - - def __iter__(self): - '''This is to support iterators over a file-like object. - ''' - return iter(self.readline, self.string_type()) - - def readlines(self, sizehint=-1): - '''This reads until EOF using readline() and returns a list containing - the lines thus read. The optional 'sizehint' argument is ignored. - Remember, because this reads until EOF that means the child - process should have closed its stdout. If you run this method on - a child that is still running with its stdout open then this - method will block until it timesout.''' - - lines = [] - while True: - line = self.readline() - if not line: - break - lines.append(line) - return lines - - def write(self, s): - '''This is similar to send() except that there is no return value. - ''' - - self.send(s) - - def writelines(self, sequence): - '''This calls write() for each element in the sequence. The sequence - can be any iterable object producing strings, typically a list of - strings. This does not add line separators. There is no return value. - ''' - - for s in sequence: - self.write(s) - - def send(self, s): - '''Sends string ``s`` to the child process, returning the number of - bytes written. If a logfile is specified, a copy is written to that - log. ''' - - time.sleep(self.delaybeforesend) - - s = self._coerce_send_string(s) - self._log(s, 'send') - - return self._send(s) - - def _send(self, s): - return os.write(self.child_fd, s) - - def sendline(self, s=''): - '''Wraps send(), sending string ``s`` to child process, with os.linesep - automatically appended. Returns number of bytes written. ''' - - n = self.send(s) - n = n + self.send(self.linesep) - return n - - def sendcontrol(self, char): - - '''Helper method that wraps send() with mnemonic access for sending control - character to the child (such as Ctrl-C or Ctrl-D). For example, to send - Ctrl-G (ASCII 7, bell, '\a'):: - - child.sendcontrol('g') - - See also, sendintr() and sendeof(). - ''' - - char = char.lower() - a = ord(char) - if a >= 97 and a <= 122: - a = a - ord('a') + 1 - return self.send(self._chr(a)) - d = {'@': 0, '`': 0, - '[': 27, '{': 27, - '\\': 28, '|': 28, - ']': 29, '}': 29, - '^': 30, '~': 30, - '_': 31, - '?': 127} - if char not in d: - return 0 - return self.send(self._chr(d[char])) - - def sendeof(self): - - '''This sends an EOF to the child. This sends a character which causes - the pending parent output buffer to be sent to the waiting child - program without waiting for end-of-line. If it is the first character - of the line, the read() in the user program returns 0, which signifies - end-of-file. This means to work as expected a sendeof() has to be - called at the beginning of a line. This method does not send a newline. - It is the responsibility of the caller to ensure the eof is sent at the - beginning of a line. ''' - - self.send(self._chr(self._EOF)) - - def sendintr(self): - - '''This sends a SIGINT to the child. It does not require - the SIGINT to be the first character on a line. ''' - - self.send(self._chr(self._INTR)) - - def eof(self): - - '''This returns True if the EOF exception was ever raised. - ''' - - return self.flag_eof - - def terminate(self, force=False): - - '''This forces a child process to terminate. It starts nicely with - SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This - returns True if the child was terminated. This returns False if the - child could not be terminated. ''' - - if not self.isalive(): - return True - try: - self.kill(signal.SIGHUP) - time.sleep(self.delayafterterminate) - if not self.isalive(): - return True - self.kill(signal.SIGCONT) - time.sleep(self.delayafterterminate) - if not self.isalive(): - return True - self.kill(signal.SIGINT) - time.sleep(self.delayafterterminate) - if not self.isalive(): - return True - if force: - self.kill(signal.SIGKILL) - time.sleep(self.delayafterterminate) - if not self.isalive(): - return True - else: - return False - return False - except OSError: - # I think there are kernel timing issues that sometimes cause - # this to happen. I think isalive() reports True, but the - # process is dead to the kernel. - # Make one last attempt to see if the kernel is up to date. - time.sleep(self.delayafterterminate) - if not self.isalive(): - return True - else: - return False - - def wait(self): - - '''This waits until the child exits. This is a blocking call. This will - not read any data from the child, so this will block forever if the - child has unread output and has terminated. In other words, the child - may have printed output then called exit(), but, the child is - technically still alive until its output is read by the parent. ''' - - if self.isalive(): - pid, status = os.waitpid(self.pid, 0) - else: - raise ExceptionPexpect('Cannot wait for dead child process.') - self.exitstatus = os.WEXITSTATUS(status) - if os.WIFEXITED(status): - self.status = status - self.exitstatus = os.WEXITSTATUS(status) - self.signalstatus = None - self.terminated = True - elif os.WIFSIGNALED(status): - self.status = status - self.exitstatus = None - self.signalstatus = os.WTERMSIG(status) - self.terminated = True - elif os.WIFSTOPPED(status): # pragma: no cover - # You can't call wait() on a child process in the stopped state. - raise ExceptionPexpect('Called wait() on a stopped child ' + - 'process. This is not supported. Is some other ' + - 'process attempting job control with our child pid?') - return self.exitstatus - - def isalive(self): - - '''This tests if the child process is running or not. This is - non-blocking. If the child was terminated then this will read the - exitstatus or signalstatus of the child. This returns True if the child - process appears to be running or False if not. It can take literally - SECONDS for Solaris to return the right status. ''' - - if self.terminated: - return False - - if self.flag_eof: - # This is for Linux, which requires the blocking form - # of waitpid to get the status of a defunct process. - # This is super-lame. The flag_eof would have been set - # in read_nonblocking(), so this should be safe. - waitpid_options = 0 - else: - waitpid_options = os.WNOHANG - - try: - pid, status = os.waitpid(self.pid, waitpid_options) - except OSError: - err = sys.exc_info()[1] - # No child processes - if err.errno == errno.ECHILD: - raise ExceptionPexpect('isalive() encountered condition ' + - 'where "terminated" is 0, but there was no child ' + - 'process. Did someone else call waitpid() ' + - 'on our process?') - else: - raise err - - # I have to do this twice for Solaris. - # I can't even believe that I figured this out... - # If waitpid() returns 0 it means that no child process - # wishes to report, and the value of status is undefined. - if pid == 0: - try: - ### os.WNOHANG) # Solaris! - pid, status = os.waitpid(self.pid, waitpid_options) - except OSError as e: # pragma: no cover - # This should never happen... - if e.errno == errno.ECHILD: - raise ExceptionPexpect('isalive() encountered condition ' + - 'that should never happen. There was no child ' + - 'process. Did someone else call waitpid() ' + - 'on our process?') - else: - raise - - # If pid is still 0 after two calls to waitpid() then the process - # really is alive. This seems to work on all platforms, except for - # Irix which seems to require a blocking call on waitpid or select, - # so I let read_nonblocking take care of this situation - # (unfortunately, this requires waiting through the timeout). - if pid == 0: - return True - - if pid == 0: - return True - - if os.WIFEXITED(status): - self.status = status - self.exitstatus = os.WEXITSTATUS(status) - self.signalstatus = None - self.terminated = True - elif os.WIFSIGNALED(status): - self.status = status - self.exitstatus = None - self.signalstatus = os.WTERMSIG(status) - self.terminated = True - elif os.WIFSTOPPED(status): - raise ExceptionPexpect('isalive() encountered condition ' + - 'where child process is stopped. This is not ' + - 'supported. Is some other process attempting ' + - 'job control with our child pid?') - return False - - def kill(self, sig): - - '''This sends the given signal to the child application. In keeping - with UNIX tradition it has a misleading name. It does not necessarily - kill the child unless you send the right signal. ''' - - # Same as os.kill, but the pid is given for you. - if self.isalive(): - os.kill(self.pid, sig) - - def _pattern_type_err(self, pattern): - raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one' - ' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\ - .format(badtype=type(pattern), - badobj=pattern, - goodtypes=', '.join([str(ast)\ - for ast in self.allowed_string_types]) - ) - ) - - def compile_pattern_list(self, patterns): - - '''This compiles a pattern-string or a list of pattern-strings. - Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of - those. Patterns may also be None which results in an empty list (you - might do this if waiting for an EOF or TIMEOUT condition without - expecting any pattern). - - This is used by expect() when calling expect_list(). Thus expect() is - nothing more than:: - - cpl = self.compile_pattern_list(pl) - return self.expect_list(cpl, timeout) - - If you are using expect() within a loop it may be more - efficient to compile the patterns first and then call expect_list(). - This avoid calls in a loop to compile_pattern_list():: - - cpl = self.compile_pattern_list(my_pattern) - while some_condition: - ... - i = self.expect_list(clp, timeout) - ... - ''' - - if patterns is None: - return [] - if not isinstance(patterns, list): - patterns = [patterns] - - # Allow dot to match \n - compile_flags = re.DOTALL - if self.ignorecase: - compile_flags = compile_flags | re.IGNORECASE - compiled_pattern_list = [] - for idx, p in enumerate(patterns): - if isinstance(p, self.allowed_string_types): - p = self._coerce_expect_string(p) - compiled_pattern_list.append(re.compile(p, compile_flags)) - elif p is EOF: - compiled_pattern_list.append(EOF) - elif p is TIMEOUT: - compiled_pattern_list.append(TIMEOUT) - elif isinstance(p, type(re.compile(''))): - compiled_pattern_list.append(p) - else: - self._pattern_type_err(p) - return compiled_pattern_list - - def expect(self, pattern, timeout=-1, searchwindowsize=-1): - - '''This seeks through the stream until a pattern is matched. The - pattern is overloaded and may take several types. The pattern can be a - StringType, EOF, a compiled re, or a list of any of those types. - Strings will be compiled to re types. This returns the index into the - pattern list. If the pattern was not a list this returns index 0 on a - successful match. This may raise exceptions for EOF or TIMEOUT. To - avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern - list. That will cause expect to match an EOF or TIMEOUT condition - instead of raising an exception. - - If you pass a list of patterns and more than one matches, the first - match in the stream is chosen. If more than one pattern matches at that - point, the leftmost in the pattern list is chosen. For example:: - - # the input is 'foobar' - index = p.expect(['bar', 'foo', 'foobar']) - # returns 1('foo') even though 'foobar' is a "better" match - - Please note, however, that buffering can affect this behavior, since - input arrives in unpredictable chunks. For example:: - - # the input is 'foobar' - index = p.expect(['foobar', 'foo']) - # returns 0('foobar') if all input is available at once, - # but returs 1('foo') if parts of the final 'bar' arrive late - - After a match is found the instance attributes 'before', 'after' and - 'match' will be set. You can see all the data read before the match in - 'before'. You can see the data that was matched in 'after'. The - re.MatchObject used in the re match will be in 'match'. If an error - occurred then 'before' will be set to all the data read so far and - 'after' and 'match' will be None. - - If timeout is -1 then timeout will be set to the self.timeout value. - - A list entry may be EOF or TIMEOUT instead of a string. This will - catch these exceptions and return the index of the list entry instead - of raising the exception. The attribute 'after' will be set to the - exception type. The attribute 'match' will be None. This allows you to - write code like this:: - - index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT]) - if index == 0: - do_something() - elif index == 1: - do_something_else() - elif index == 2: - do_some_other_thing() - elif index == 3: - do_something_completely_different() - - instead of code like this:: - - try: - index = p.expect(['good', 'bad']) - if index == 0: - do_something() - elif index == 1: - do_something_else() - except EOF: - do_some_other_thing() - except TIMEOUT: - do_something_completely_different() - - These two forms are equivalent. It all depends on what you want. You - can also just expect the EOF if you are waiting for all output of a - child to finish. For example:: - - p = pexpect.spawn('/bin/ls') - p.expect(pexpect.EOF) - print p.before - - If you are trying to optimize for speed then see expect_list(). - ''' - - compiled_pattern_list = self.compile_pattern_list(pattern) - return self.expect_list(compiled_pattern_list, - timeout, searchwindowsize) - - def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1): - - '''This takes a list of compiled regular expressions and returns the - index into the pattern_list that matched the child output. The list may - also contain EOF or TIMEOUT(which are not compiled regular - expressions). This method is similar to the expect() method except that - expect_list() does not recompile the pattern list on every call. This - may help if you are trying to optimize for speed, otherwise just use - the expect() method. This is called by expect(). If timeout==-1 then - the self.timeout value is used. If searchwindowsize==-1 then the - self.searchwindowsize value is used. ''' - - return self.expect_loop(searcher_re(pattern_list), - timeout, searchwindowsize) - - def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1): - - '''This is similar to expect(), but uses plain string matching instead - of compiled regular expressions in 'pattern_list'. The 'pattern_list' - may be a string; a list or other sequence of strings; or TIMEOUT and - EOF. - - This call might be faster than expect() for two reasons: string - searching is faster than RE matching and it is possible to limit the - search to just the end of the input buffer. - - This method is also useful when you don't want to have to worry about - escaping regular expression characters that you want to match.''' - - if (isinstance(pattern_list, self.allowed_string_types) or - pattern_list in (TIMEOUT, EOF)): - pattern_list = [pattern_list] - - def prepare_pattern(pattern): - if pattern in (TIMEOUT, EOF): - return pattern - if isinstance(pattern, self.allowed_string_types): - return self._coerce_expect_string(pattern) - self._pattern_type_err(pattern) - - try: - pattern_list = iter(pattern_list) - except TypeError: - self._pattern_type_err(pattern_list) - pattern_list = [prepare_pattern(p) for p in pattern_list] - return self.expect_loop(searcher_string(pattern_list), - timeout, searchwindowsize) - - def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1): - - '''This is the common loop used inside expect. The 'searcher' should be - an instance of searcher_re or searcher_string, which describes how and - what to search for in the input. - - See expect() for other arguments, return value and exceptions. ''' - - self.searcher = searcher - - if timeout == -1: - timeout = self.timeout - if timeout is not None: - end_time = time.time() + timeout - if searchwindowsize == -1: - searchwindowsize = self.searchwindowsize - - try: - incoming = self.buffer - freshlen = len(incoming) - while True: - # Keep reading until exception or return. - index = searcher.search(incoming, freshlen, searchwindowsize) - if index >= 0: - self.buffer = incoming[searcher.end:] - self.before = incoming[: searcher.start] - self.after = incoming[searcher.start: searcher.end] - self.match = searcher.match - self.match_index = index - return self.match_index - # No match at this point - if (timeout is not None) and (timeout < 0): - raise TIMEOUT('Timeout exceeded in expect_any().') - # Still have time left, so read more data - c = self.read_nonblocking(self.maxread, timeout) - freshlen = len(c) - time.sleep(0.0001) - incoming = incoming + c - if timeout is not None: - timeout = end_time - time.time() - except EOF: - err = sys.exc_info()[1] - self.buffer = self.string_type() - self.before = incoming - self.after = EOF - index = searcher.eof_index - if index >= 0: - self.match = EOF - self.match_index = index - return self.match_index - else: - self.match = None - self.match_index = None - raise EOF(str(err) + '\n' + str(self)) - except TIMEOUT: - err = sys.exc_info()[1] - self.buffer = incoming - self.before = incoming - self.after = TIMEOUT - index = searcher.timeout_index - if index >= 0: - self.match = TIMEOUT - self.match_index = index - return self.match_index - else: - self.match = None - self.match_index = None - raise TIMEOUT(str(err) + '\n' + str(self)) - except: - self.before = incoming - self.after = None - self.match = None - self.match_index = None - raise - - def getwinsize(self): - - '''This returns the terminal window size of the child tty. The return - value is a tuple of (rows, cols). ''' - - TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912) - s = struct.pack('HHHH', 0, 0, 0, 0) - x = fcntl.ioctl(self.child_fd, TIOCGWINSZ, s) - return struct.unpack('HHHH', x)[0:2] - - def setwinsize(self, rows, cols): - - '''This sets the terminal window size of the child tty. This will cause - a SIGWINCH signal to be sent to the child. This does not change the - physical window size. It changes the size reported to TTY-aware - applications like vi or curses -- applications that respond to the - SIGWINCH signal. ''' - - # Some very old platforms have a bug that causes the value for - # termios.TIOCSWINSZ to be truncated. There was a hack here to work - # around this, but it caused problems with newer platforms so has been - # removed. For details see https://github.com/pexpect/pexpect/issues/39 - TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561) - # Note, assume ws_xpixel and ws_ypixel are zero. - s = struct.pack('HHHH', rows, cols, 0, 0) - fcntl.ioctl(self.fileno(), TIOCSWINSZ, s) - - def interact(self, escape_character=chr(29), - input_filter=None, output_filter=None): - - '''This gives control of the child process to the interactive user (the - human at the keyboard). Keystrokes are sent to the child process, and - the stdout and stderr output of the child process is printed. This - simply echos the child stdout and child stderr to the real stdout and - it echos the real stdin to the child stdin. When the user types the - escape_character this method will stop. The default for - escape_character is ^]. This should not be confused with ASCII 27 -- - the ESC character. ASCII 29 was chosen for historical merit because - this is the character used by 'telnet' as the escape character. The - escape_character will not be sent to the child process. - - You may pass in optional input and output filter functions. These - functions should take a string and return a string. The output_filter - will be passed all the output from the child process. The input_filter - will be passed all the keyboard input from the user. The input_filter - is run BEFORE the check for the escape_character. - - Note that if you change the window size of the parent the SIGWINCH - signal will not be passed through to the child. If you want the child - window size to change when the parent's window size changes then do - something like the following example:: - - import pexpect, struct, fcntl, termios, signal, sys - def sigwinch_passthrough (sig, data): - s = struct.pack("HHHH", 0, 0, 0, 0) - a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(), - termios.TIOCGWINSZ , s)) - global p - p.setwinsize(a[0],a[1]) - # Note this 'p' global and used in sigwinch_passthrough. - p = pexpect.spawn('/bin/bash') - signal.signal(signal.SIGWINCH, sigwinch_passthrough) - p.interact() - ''' - - # Flush the buffer. - self.write_to_stdout(self.buffer) - self.stdout.flush() - self.buffer = self.string_type() - mode = tty.tcgetattr(self.STDIN_FILENO) - tty.setraw(self.STDIN_FILENO) - if PY3: - escape_character = escape_character.encode('latin-1') - try: - self.__interact_copy(escape_character, input_filter, output_filter) - finally: - tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode) - - def __interact_writen(self, fd, data): - '''This is used by the interact() method. - ''' - - while data != b'' and self.isalive(): - n = os.write(fd, data) - data = data[n:] - - def __interact_read(self, fd): - '''This is used by the interact() method. - ''' - - return os.read(fd, 1000) - - def __interact_copy(self, escape_character=None, - input_filter=None, output_filter=None): - - '''This is used by the interact() method. - ''' - - while self.isalive(): - r, w, e = self.__select([self.child_fd, self.STDIN_FILENO], [], []) - if self.child_fd in r: - try: - data = self.__interact_read(self.child_fd) - except OSError as err: - if err.args[0] == errno.EIO: - # Linux-style EOF - break - raise - if data == b'': - # BSD-style EOF - break - if output_filter: - data = output_filter(data) - if self.logfile is not None: - self.logfile.write(data) - self.logfile.flush() - os.write(self.STDOUT_FILENO, data) - if self.STDIN_FILENO in r: - data = self.__interact_read(self.STDIN_FILENO) - if input_filter: - data = input_filter(data) - i = data.rfind(escape_character) - if i != -1: - data = data[:i] - self.__interact_writen(self.child_fd, data) - break - self.__interact_writen(self.child_fd, data) - - def __select(self, iwtd, owtd, ewtd, timeout=None): - - '''This is a wrapper around select.select() that ignores signals. If - select.select raises a select.error exception and errno is an EINTR - error then it is ignored. Mainly this is used to ignore sigwinch - (terminal resize). ''' - - # if select() is interrupted by a signal (errno==EINTR) then - # we loop back and enter the select() again. - if timeout is not None: - end_time = time.time() + timeout - while True: - try: - return select.select(iwtd, owtd, ewtd, timeout) - except select.error: - err = sys.exc_info()[1] - if err.args[0] == errno.EINTR: - # if we loop back we have to subtract the - # amount of time we already waited. - if timeout is not None: - timeout = end_time - time.time() - if timeout < 0: - return([], [], []) - else: - # something else caused the select.error, so - # this actually is an exception. - raise - -############################################################################## -# The following methods are no longer supported or allowed. - - def setmaxread(self, maxread): # pragma: no cover - - '''This method is no longer supported or allowed. I don't like getters - and setters without a good reason. ''' - - raise ExceptionPexpect('This method is no longer supported ' + - 'or allowed. Just assign a value to the ' + - 'maxread member variable.') - - def setlog(self, fileobject): # pragma: no cover - - '''This method is no longer supported or allowed. - ''' - - raise ExceptionPexpect('This method is no longer supported ' + - 'or allowed. Just assign a value to the logfile ' + - 'member variable.') - -############################################################################## -# End of spawn class -############################################################################## - -class spawnu(spawn): - """Works like spawn, but accepts and returns unicode strings. - - Extra parameters: - - :param encoding: The encoding to use for communications (default: 'utf-8') - :param errors: How to handle encoding/decoding errors; one of 'strict' - (the default), 'ignore', or 'replace', as described - for :meth:`~bytes.decode` and :meth:`~str.encode`. - """ - if PY3: - string_type = str - allowed_string_types = (str, ) - _chr = staticmethod(chr) - linesep = os.linesep - crlf = '\r\n' - else: - string_type = unicode - allowed_string_types = (unicode, ) - _chr = staticmethod(unichr) - linesep = os.linesep.decode('ascii') - crlf = '\r\n'.decode('ascii') - # This can handle unicode in both Python 2 and 3 - write_to_stdout = sys.stdout.write - - def __init__(self, *args, **kwargs): - self.encoding = kwargs.pop('encoding', 'utf-8') - self.errors = kwargs.pop('errors', 'strict') - self._decoder = codecs.getincrementaldecoder(self.encoding)(errors=self.errors) - super(spawnu, self).__init__(*args, **kwargs) - - @staticmethod - def _coerce_expect_string(s): - return s - - @staticmethod - def _coerce_send_string(s): - return s - - def _coerce_read_string(self, s): - return self._decoder.decode(s, final=False) - - def _send(self, s): - return os.write(self.child_fd, s.encode(self.encoding, self.errors)) - - -class searcher_string(object): - - '''This is a plain string search helper for the spawn.expect_any() method. - This helper class is for speed. For more powerful regex patterns - see the helper class, searcher_re. - - Attributes: - - eof_index - index of EOF, or -1 - timeout_index - index of TIMEOUT, or -1 - - After a successful match by the search() method the following attributes - are available: - - start - index into the buffer, first byte of match - end - index into the buffer, first byte after match - match - the matching string itself - - ''' - - def __init__(self, strings): - - '''This creates an instance of searcher_string. This argument 'strings' - may be a list; a sequence of strings; or the EOF or TIMEOUT types. ''' - - self.eof_index = -1 - self.timeout_index = -1 - self._strings = [] - for n, s in enumerate(strings): - if s is EOF: - self.eof_index = n - continue - if s is TIMEOUT: - self.timeout_index = n - continue - self._strings.append((n, s)) - - def __str__(self): - - '''This returns a human-readable string that represents the state of - the object.''' - - ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings] - ss.append((-1, 'searcher_string:')) - if self.eof_index >= 0: - ss.append((self.eof_index, ' %d: EOF' % self.eof_index)) - if self.timeout_index >= 0: - ss.append((self.timeout_index, - ' %d: TIMEOUT' % self.timeout_index)) - ss.sort() - ss = list(zip(*ss))[1] - return '\n'.join(ss) - - def search(self, buffer, freshlen, searchwindowsize=None): - - '''This searches 'buffer' for the first occurence of one of the search - strings. 'freshlen' must indicate the number of bytes at the end of - 'buffer' which have not been searched before. It helps to avoid - searching the same, possibly big, buffer over and over again. - - See class spawn for the 'searchwindowsize' argument. - - If there is a match this returns the index of that string, and sets - 'start', 'end' and 'match'. Otherwise, this returns -1. ''' - - first_match = None - - # 'freshlen' helps a lot here. Further optimizations could - # possibly include: - # - # using something like the Boyer-Moore Fast String Searching - # Algorithm; pre-compiling the search through a list of - # strings into something that can scan the input once to - # search for all N strings; realize that if we search for - # ['bar', 'baz'] and the input is '...foo' we need not bother - # rescanning until we've read three more bytes. - # - # Sadly, I don't know enough about this interesting topic. /grahn - - for index, s in self._strings: - if searchwindowsize is None: - # the match, if any, can only be in the fresh data, - # or at the very end of the old data - offset = -(freshlen + len(s)) - else: - # better obey searchwindowsize - offset = -searchwindowsize - n = buffer.find(s, offset) - if n >= 0 and (first_match is None or n < first_match): - first_match = n - best_index, best_match = index, s - if first_match is None: - return -1 - self.match = best_match - self.start = first_match - self.end = self.start + len(self.match) - return best_index - - -class searcher_re(object): - - '''This is regular expression string search helper for the - spawn.expect_any() method. This helper class is for powerful - pattern matching. For speed, see the helper class, searcher_string. - - Attributes: - - eof_index - index of EOF, or -1 - timeout_index - index of TIMEOUT, or -1 - - After a successful match by the search() method the following attributes - are available: - - start - index into the buffer, first byte of match - end - index into the buffer, first byte after match - match - the re.match object returned by a succesful re.search - - ''' - - def __init__(self, patterns): - - '''This creates an instance that searches for 'patterns' Where - 'patterns' may be a list or other sequence of compiled regular - expressions, or the EOF or TIMEOUT types.''' - - self.eof_index = -1 - self.timeout_index = -1 - self._searches = [] - for n, s in zip(list(range(len(patterns))), patterns): - if s is EOF: - self.eof_index = n - continue - if s is TIMEOUT: - self.timeout_index = n - continue - self._searches.append((n, s)) - - def __str__(self): - - '''This returns a human-readable string that represents the state of - the object.''' - - #ss = [(n, ' %d: re.compile("%s")' % - # (n, repr(s.pattern))) for n, s in self._searches] - ss = list() - for n, s in self._searches: - try: - ss.append((n, ' %d: re.compile("%s")' % (n, s.pattern))) - except UnicodeEncodeError: - # for test cases that display __str__ of searches, dont throw - # another exception just because stdout is ascii-only, using - # repr() - ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern))) - ss.append((-1, 'searcher_re:')) - if self.eof_index >= 0: - ss.append((self.eof_index, ' %d: EOF' % self.eof_index)) - if self.timeout_index >= 0: - ss.append((self.timeout_index, ' %d: TIMEOUT' % - self.timeout_index)) - ss.sort() - ss = list(zip(*ss))[1] - return '\n'.join(ss) - - def search(self, buffer, freshlen, searchwindowsize=None): - - '''This searches 'buffer' for the first occurence of one of the regular - expressions. 'freshlen' must indicate the number of bytes at the end of - 'buffer' which have not been searched before. - - See class spawn for the 'searchwindowsize' argument. - - If there is a match this returns the index of that string, and sets - 'start', 'end' and 'match'. Otherwise, returns -1.''' - - first_match = None - # 'freshlen' doesn't help here -- we cannot predict the - # length of a match, and the re module provides no help. - if searchwindowsize is None: - searchstart = 0 - else: - searchstart = max(0, len(buffer) - searchwindowsize) - for index, s in self._searches: - match = s.search(buffer, searchstart) - if match is None: - continue - n = match.start() - if first_match is None or n < first_match: - first_match = n - the_match = match - best_index = index - if first_match is None: - return -1 - self.start = first_match - self.match = the_match - self.end = self.match.end() - return best_index - - -def is_executable_file(path): - """Checks that path is an executable regular file (or a symlink to a file). - - This is roughly ``os.path isfile(path) and os.access(path, os.X_OK)``, but - on some platforms :func:`os.access` gives us the wrong answer, so this - checks permission bits directly. - """ - # follow symlinks, - fpath = os.path.realpath(path) - - # return False for non-files (directories, fifo, etc.) - if not os.path.isfile(fpath): - return False - - # On Solaris, etc., "If the process has appropriate privileges, an - # implementation may indicate success for X_OK even if none of the - # execute file permission bits are set." - # - # For this reason, it is necessary to explicitly check st_mode - - # get file mode using os.stat, and check if `other', - # that is anybody, may read and execute. - mode = os.stat(fpath).st_mode - if mode & stat.S_IROTH and mode & stat.S_IXOTH: - return True - - # get current user's group ids, and check if `group', - # when matching ours, may read and execute. - user_gids = os.getgroups() + [os.getgid()] - if (os.stat(fpath).st_gid in user_gids and - mode & stat.S_IRGRP and mode & stat.S_IXGRP): - return True - - # finally, if file owner matches our effective userid, - # check if `user', may read and execute. - user_gids = os.getgroups() + [os.getgid()] - if (os.stat(fpath).st_uid == os.geteuid() and - mode & stat.S_IRUSR and mode & stat.S_IXUSR): - return True - - return False - -def which(filename): - '''This takes a given filename; tries to find it in the environment path; - then checks if it is executable. This returns the full path to the filename - if found and executable. Otherwise this returns None.''' - - # Special case where filename contains an explicit path. - if os.path.dirname(filename) != '' and is_executable_file(filename): - return filename - if 'PATH' not in os.environ or os.environ['PATH'] == '': - p = os.defpath - else: - p = os.environ['PATH'] - pathlist = p.split(os.pathsep) - for path in pathlist: - ff = os.path.join(path, filename) - if is_executable_file(ff): - return ff - return None - - -def split_command_line(command_line): - - '''This splits a command line into a list of arguments. It splits arguments - on spaces, but handles embedded quotes, doublequotes, and escaped - characters. It's impossible to do this with a regular expression, so I - wrote a little state machine to parse the command line. ''' - - arg_list = [] - arg = '' - - # Constants to name the states we can be in. - state_basic = 0 - state_esc = 1 - state_singlequote = 2 - state_doublequote = 3 - # The state when consuming whitespace between commands. - state_whitespace = 4 - state = state_basic - - for c in command_line: - if state == state_basic or state == state_whitespace: - if c == '\\': - # Escape the next character - state = state_esc - elif c == r"'": - # Handle single quote - state = state_singlequote - elif c == r'"': - # Handle double quote - state = state_doublequote - elif c.isspace(): - # Add arg to arg_list if we aren't in the middle of whitespace. - if state == state_whitespace: - # Do nothing. - None - else: - arg_list.append(arg) - arg = '' - state = state_whitespace - else: - arg = arg + c - state = state_basic - elif state == state_esc: - arg = arg + c - state = state_basic - elif state == state_singlequote: - if c == r"'": - state = state_basic - else: - arg = arg + c - elif state == state_doublequote: - if c == r'"': - state = state_basic - else: - arg = arg + c - - if arg != '': - arg_list.append(arg) - return arg_list - -# vim: set shiftround expandtab tabstop=4 shiftwidth=4 ft=python autoindent : diff --git a/IPython/external/simplegeneric/__init__.py b/IPython/external/simplegeneric/__init__.py deleted file mode 100644 index a81972a..0000000 --- a/IPython/external/simplegeneric/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -try: - from simplegeneric import * -except ImportError: - from ._simplegeneric import * diff --git a/IPython/external/simplegeneric/_simplegeneric.py b/IPython/external/simplegeneric/_simplegeneric.py deleted file mode 100644 index d417cc9..0000000 --- a/IPython/external/simplegeneric/_simplegeneric.py +++ /dev/null @@ -1,109 +0,0 @@ -"""This is version 0.7 of Philip J. Eby's simplegeneric module -(http://pypi.python.org/pypi/simplegeneric), patched to work with Python 3, -which doesn't support old-style classes. -""" - -#Name: simplegeneric -#Version: 0.7 -#Summary: Simple generic functions (similar to Python's own len(), pickle.dump(), etc.) -#Home-page: http://pypi.python.org/pypi/simplegeneric -#Author: Phillip J. Eby -#Author-email: peak@eby-sarna.com -#License: PSF or ZPL - -__all__ = ["generic"] - -try: - from types import ClassType, InstanceType -except ImportError: - classtypes = type -else: - classtypes = type, ClassType - -def generic(func): - """Create a simple generic function""" - - _sentinel = object() - - def _by_class(*args, **kw): - cls = args[0].__class__ - for t in type(cls.__name__, (cls,object), {}).__mro__: - f = _gbt(t, _sentinel) - if f is not _sentinel: - return f(*args, **kw) - else: - return func(*args, **kw) - - _by_type = {object: func} - try: - _by_type[InstanceType] = _by_class - except NameError: # Python 3 - pass - - _gbt = _by_type.get - - def when_type(*types): - """Decorator to add a method that will be called for the given types""" - for t in types: - if not isinstance(t, classtypes): - raise TypeError( - "%r is not a type or class" % (t,) - ) - def decorate(f): - for t in types: - if _by_type.setdefault(t,f) is not f: - raise TypeError( - "%r already has method for type %r" % (func, t) - ) - return f - return decorate - - - - - _by_object = {} - _gbo = _by_object.get - - def when_object(*obs): - """Decorator to add a method to be called for the given object(s)""" - def decorate(f): - for o in obs: - if _by_object.setdefault(id(o), (o,f))[1] is not f: - raise TypeError( - "%r already has method for object %r" % (func, o) - ) - return f - return decorate - - - def dispatch(*args, **kw): - f = _gbo(id(args[0]), _sentinel) - if f is _sentinel: - for t in type(args[0]).__mro__: - f = _gbt(t, _sentinel) - if f is not _sentinel: - return f(*args, **kw) - else: - return func(*args, **kw) - else: - return f[1](*args, **kw) - - dispatch.__name__ = func.__name__ - dispatch.__dict__ = func.__dict__.copy() - dispatch.__doc__ = func.__doc__ - dispatch.__module__ = func.__module__ - - dispatch.when_type = when_type - dispatch.when_object = when_object - dispatch.default = func - dispatch.has_object = lambda o: id(o) in _by_object - dispatch.has_type = lambda t: t in _by_type - return dispatch - - -def test_suite(): - import doctest - return doctest.DocFileSuite( - 'README.txt', - optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE, - ) diff --git a/IPython/kernel/zmq/eventloops.py b/IPython/kernel/zmq/eventloops.py index 33eea0e..309413e 100644 --- a/IPython/kernel/zmq/eventloops.py +++ b/IPython/kernel/zmq/eventloops.py @@ -11,19 +11,14 @@ import zmq from IPython.config.application import Application from IPython.utils import io - - -def _on_os_x_10_9(): - import platform - from distutils.version import LooseVersion as V - return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9') +from IPython.lib.inputhook import _use_appnope def _notify_stream_qt(kernel, stream): from IPython.external.qt_for_kernel import QtCore - if _on_os_x_10_9() and kernel._darwin_app_nap: - from IPython.external.appnope import nope_scope as context + if _use_appnope() and kernel._darwin_app_nap: + from appnope import nope_scope as context else: from IPython.core.interactiveshell import NoOpContext as context @@ -93,10 +88,10 @@ def loop_wx(kernel): import wx from IPython.lib.guisupport import start_event_loop_wx - if _on_os_x_10_9() and kernel._darwin_app_nap: + if _use_appnope() and kernel._darwin_app_nap: # we don't hook up App Nap contexts for Wx, # just disable it outright. - from IPython.external.appnope import nope + from appnope import nope nope() doi = kernel.do_one_iteration diff --git a/IPython/lib/inputhook.py b/IPython/lib/inputhook.py index a3d741b..baeed8f 100644 --- a/IPython/lib/inputhook.py +++ b/IPython/lib/inputhook.py @@ -3,16 +3,8 @@ Inputhook management for GUI event loop integration. """ -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. try: import ctypes @@ -21,6 +13,7 @@ except ImportError: except SystemError: # IronPython issue, 2/8/2014 ctypes = None import os +import platform import sys from distutils.version import LooseVersion as V @@ -57,8 +50,14 @@ def _stdin_ready_nt(): def _stdin_ready_other(): """Return True, assuming there's something to read on stdin.""" - return True # + return True + +def _use_appnope(): + """Should we use appnope for dealing with OS X app nap? + Checks if we are on OS X 10.9 or greater. + """ + return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9') def _ignore_CTRL_C_posix(): """Ignore CTRL+C (SIGINT).""" @@ -317,9 +316,10 @@ class WxInputHook(InputHookBase): raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) from IPython.lib.inputhookwx import inputhook_wx - from IPython.external.appnope import nope self.manager.set_inputhook(inputhook_wx) - nope() + if _use_appnope(): + from appnope import nope + nope() import wx if app is None: @@ -334,8 +334,9 @@ class WxInputHook(InputHookBase): This restores appnapp on OS X """ - from IPython.external.appnope import nap - nap() + if _use_appnope(): + from appnope import nap + nap() @inputhook_manager.register('qt', 'qt4') class Qt4InputHook(InputHookBase): @@ -362,10 +363,11 @@ class Qt4InputHook(InputHookBase): app = QtGui.QApplication(sys.argv) """ from IPython.lib.inputhookqt4 import create_inputhook_qt4 - from IPython.external.appnope import nope - app, inputhook_qt4 = create_inputhook_qt4(self.manager, app) + app, inputhook_qt4 = create_inputhook_qt4(self, app) self.manager.set_inputhook(inputhook_qt4) - nope() + if _use_appnope(): + from appnope import nope + nope() return app @@ -374,8 +376,9 @@ class Qt4InputHook(InputHookBase): This restores appnapp on OS X """ - from IPython.external.appnope import nap - nap() + if _use_appnope(): + from appnope import nap + nap() @inputhook_manager.register('qt5') diff --git a/IPython/parallel/client/asyncresult.py b/IPython/parallel/client/asyncresult.py index 7c45a70..340398b 100644 --- a/IPython/parallel/client/asyncresult.py +++ b/IPython/parallel/client/asyncresult.py @@ -12,7 +12,7 @@ from datetime import datetime from zmq import MessageTracker from IPython.core.display import clear_output, display, display_pretty -from IPython.external.decorator import decorator +from decorator import decorator from IPython.parallel import error from IPython.utils.py3compat import string_types diff --git a/IPython/parallel/client/client.py b/IPython/parallel/client/client.py index 44c6d1a..ea17490 100644 --- a/IPython/parallel/client/client.py +++ b/IPython/parallel/client/client.py @@ -31,7 +31,7 @@ from IPython.utils.path import get_ipython_dir, compress_user from IPython.utils.py3compat import cast_bytes, string_types, xrange, iteritems from IPython.utils.traitlets import (HasTraits, Integer, Instance, Unicode, Dict, List, Bool, Set, Any) -from IPython.external.decorator import decorator +from decorator import decorator from IPython.parallel import Reference from IPython.parallel import error diff --git a/IPython/parallel/client/remotefunction.py b/IPython/parallel/client/remotefunction.py index ebf5c3b..4453ff2 100644 --- a/IPython/parallel/client/remotefunction.py +++ b/IPython/parallel/client/remotefunction.py @@ -8,7 +8,7 @@ from __future__ import division import sys import warnings -from IPython.external.decorator import decorator +from decorator import decorator from IPython.testing.skipdoctest import skip_doctest from . import map as Map diff --git a/IPython/parallel/client/view.py b/IPython/parallel/client/view.py index 63f73aa..f6b17f6 100644 --- a/IPython/parallel/client/view.py +++ b/IPython/parallel/client/view.py @@ -18,7 +18,7 @@ from IPython.utils import pickleutil from IPython.utils.traitlets import ( HasTraits, Any, Bool, List, Dict, Set, Instance, CFloat, Integer ) -from IPython.external.decorator import decorator +from decorator import decorator from IPython.parallel import util from IPython.parallel.controller.dependency import Dependency, dependent diff --git a/IPython/parallel/controller/scheduler.py b/IPython/parallel/controller/scheduler.py index 55cc37f..26589b9 100644 --- a/IPython/parallel/controller/scheduler.py +++ b/IPython/parallel/controller/scheduler.py @@ -26,7 +26,7 @@ import zmq from zmq.eventloop import ioloop, zmqstream # local imports -from IPython.external.decorator import decorator +from decorator import decorator from IPython.config.application import Application from IPython.config.loader import Config from IPython.utils.traitlets import Instance, Dict, List, Set, Integer, Enum, CBytes diff --git a/IPython/parallel/tests/clienttest.py b/IPython/parallel/tests/clienttest.py index 28cd73d..1bf3403 100644 --- a/IPython/parallel/tests/clienttest.py +++ b/IPython/parallel/tests/clienttest.py @@ -22,7 +22,7 @@ from nose import SkipTest import zmq from zmq.tests import BaseZMQTestCase -from IPython.external.decorator import decorator +from decorator import decorator from IPython.parallel import error from IPython.parallel import Client diff --git a/IPython/parallel/util.py b/IPython/parallel/util.py index 870063e..3596edf 100644 --- a/IPython/parallel/util.py +++ b/IPython/parallel/util.py @@ -28,7 +28,7 @@ import zmq from zmq.log import handlers from IPython.utils.log import get_logger -from IPython.external.decorator import decorator +from decorator import decorator from IPython.config.application import Application from IPython.utils.localinterfaces import localhost, is_public_ip, public_ips diff --git a/IPython/terminal/console/tests/test_console.py b/IPython/terminal/console/tests/test_console.py index 09f1a51..d406039 100644 --- a/IPython/terminal/console/tests/test_console.py +++ b/IPython/terminal/console/tests/test_console.py @@ -62,7 +62,7 @@ def stop_console(p, pexpect, t): def start_console(): "Start `ipython console` using pexpect" - from IPython.external import pexpect + import pexpect args = ['-m', 'IPython', 'console', '--colors=NoColor'] cmd = sys.executable diff --git a/IPython/terminal/tests/test_embed.py b/IPython/terminal/tests/test_embed.py index ec51316..2767392 100644 --- a/IPython/terminal/tests/test_embed.py +++ b/IPython/terminal/tests/test_embed.py @@ -60,7 +60,7 @@ def test_ipython_embed(): @skip_win32 def test_nest_embed(): """test that `IPython.embed()` is nestable""" - from IPython.external import pexpect + import pexpect ipy_prompt = r']:' #ansi color codes give problems matching beyond this diff --git a/IPython/testing/decorators.py b/IPython/testing/decorators.py index 9537f11..2014c07 100644 --- a/IPython/testing/decorators.py +++ b/IPython/testing/decorators.py @@ -52,7 +52,7 @@ import unittest # Third-party imports # This is Michele Simionato's decorator module, kept verbatim. -from IPython.external.decorator import decorator +from decorator import decorator # Expose the unittest-driven decorators from .ipunittest import ipdoctest, ipdocstring diff --git a/IPython/testing/iptest.py b/IPython/testing/iptest.py index ecc1ee4..ef6f637 100644 --- a/IPython/testing/iptest.py +++ b/IPython/testing/iptest.py @@ -42,12 +42,6 @@ from IPython.external.decorators import KnownFailure, knownfailureif pjoin = path.join - -#----------------------------------------------------------------------------- -# Globals -#----------------------------------------------------------------------------- - - #----------------------------------------------------------------------------- # Warnings control #----------------------------------------------------------------------------- @@ -127,7 +121,7 @@ have = {} have['curses'] = test_for('_curses') have['matplotlib'] = test_for('matplotlib') have['numpy'] = test_for('numpy') -have['pexpect'] = test_for('IPython.external.pexpect') +have['pexpect'] = test_for('pexpect') have['pymongo'] = test_for('pymongo') have['pygments'] = test_for('pygments') have['qt'] = test_for('IPython.external.qt') diff --git a/IPython/utils/_process_posix.py b/IPython/utils/_process_posix.py index 07be022..ac3a9a0 100644 --- a/IPython/utils/_process_posix.py +++ b/IPython/utils/_process_posix.py @@ -21,7 +21,7 @@ import os import subprocess as sp import sys -from IPython.external import pexpect +import pexpect # Our own from ._process_common import getoutput, arg_split diff --git a/IPython/utils/generics.py b/IPython/utils/generics.py index 419e27b..5ffdc86 100644 --- a/IPython/utils/generics.py +++ b/IPython/utils/generics.py @@ -1,26 +1,11 @@ # encoding: utf-8 """Generic functions for extending IPython. -See http://cheeseshop.python.org/pypi/simplegeneric. +See http://pypi.python.org/pypi/simplegeneric. """ -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - from IPython.core.error import TryNext -from IPython.external.simplegeneric import generic - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- +from simplegeneric import generic @generic diff --git a/IPython/utils/pickleshare.py b/IPython/utils/pickleshare.py deleted file mode 100755 index e6d2400..0000000 --- a/IPython/utils/pickleshare.py +++ /dev/null @@ -1,325 +0,0 @@ -#!/usr/bin/env python - -""" PickleShare - a small 'shelve' like datastore with concurrency support - -Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike -shelve, many processes can access the database simultaneously. Changing a -value in database is immediately visible to other processes accessing the -same database. - -Concurrency is possible because the values are stored in separate files. Hence -the "database" is a directory where *all* files are governed by PickleShare. - -Example usage:: - - from pickleshare import * - db = PickleShareDB('~/testpickleshare') - db.clear() - print "Should be empty:",db.items() - db['hello'] = 15 - db['aku ankka'] = [1,2,313] - db['paths/are/ok/key'] = [1,(5,46)] - print db.keys() - del db['aku ankka'] - -This module is certainly not ZODB, but can be used for low-load -(non-mission-critical) situations where tiny code size trumps the -advanced features of a "real" object database. - -Installation guide: easy_install pickleshare - -Author: Ville Vainio -License: MIT open source license. - -""" -from __future__ import print_function - -from IPython.external.path import path as Path -import stat, time -import collections -try: - import cPickle as pickle -except ImportError: - import pickle -import glob - -def gethashfile(key): - return ("%02x" % abs(hash(key) % 256))[-2:] - -_sentinel = object() - -class PickleShareDB(collections.MutableMapping): - """ The main 'connection' object for PickleShare database """ - def __init__(self,root): - """ Return a db object that will manage the specied directory""" - self.root = Path(root).expanduser().abspath() - if not self.root.isdir(): - self.root.makedirs_p() - # cache has { 'key' : (obj, orig_mod_time) } - self.cache = {} - - - def __getitem__(self,key): - """ db['key'] reading """ - fil = self.root / key - try: - mtime = (fil.stat()[stat.ST_MTIME]) - except OSError: - raise KeyError(key) - - if fil in self.cache and mtime == self.cache[fil][1]: - return self.cache[fil][0] - try: - # The cached item has expired, need to read - with fil.open("rb") as f: - obj = pickle.loads(f.read()) - except: - raise KeyError(key) - - self.cache[fil] = (obj,mtime) - return obj - - def __setitem__(self,key,value): - """ db['key'] = 5 """ - fil = self.root / key - parent = fil.parent - if parent and not parent.isdir(): - parent.makedirs() - # We specify protocol 2, so that we can mostly go between Python 2 - # and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete. - with fil.open('wb') as f: - pickled = pickle.dump(value, f, protocol=2) - try: - self.cache[fil] = (value,fil.mtime) - except OSError as e: - if e.errno != 2: - raise - - def hset(self, hashroot, key, value): - """ hashed set """ - hroot = self.root / hashroot - if not hroot.isdir(): - hroot.makedirs() - hfile = hroot / gethashfile(key) - d = self.get(hfile, {}) - d.update( {key : value}) - self[hfile] = d - - - - def hget(self, hashroot, key, default = _sentinel, fast_only = True): - """ hashed get """ - hroot = self.root / hashroot - hfile = hroot / gethashfile(key) - - d = self.get(hfile, _sentinel ) - #print "got dict",d,"from",hfile - if d is _sentinel: - if fast_only: - if default is _sentinel: - raise KeyError(key) - - return default - - # slow mode ok, works even after hcompress() - d = self.hdict(hashroot) - - return d.get(key, default) - - def hdict(self, hashroot): - """ Get all data contained in hashed category 'hashroot' as dict """ - hfiles = self.keys(hashroot + "/*") - hfiles.sort() - last = len(hfiles) and hfiles[-1] or '' - if last.endswith('xx'): - # print "using xx" - hfiles = [last] + hfiles[:-1] - - all = {} - - for f in hfiles: - # print "using",f - try: - all.update(self[f]) - except KeyError: - print("Corrupt",f,"deleted - hset is not threadsafe!") - del self[f] - - self.uncache(f) - - return all - - def hcompress(self, hashroot): - """ Compress category 'hashroot', so hset is fast again - - hget will fail if fast_only is True for compressed items (that were - hset before hcompress). - - """ - hfiles = self.keys(hashroot + "/*") - all = {} - for f in hfiles: - # print "using",f - all.update(self[f]) - self.uncache(f) - - self[hashroot + '/xx'] = all - for f in hfiles: - p = self.root / f - if p.basename() == 'xx': - continue - p.remove() - - - - def __delitem__(self,key): - """ del db["key"] """ - fil = self.root / key - self.cache.pop(fil,None) - try: - fil.remove() - except OSError: - # notfound and permission denied are ok - we - # lost, the other process wins the conflict - pass - - def _normalized(self, p): - """ Make a key suitable for user's eyes """ - return str(self.root.relpathto(p)).replace('\\','/') - - def keys(self, globpat = None): - """ All keys in DB, or all keys matching a glob""" - - if globpat is None: - files = self.root.walkfiles() - else: - files = [Path(p) for p in glob.glob(self.root/globpat)] - return [self._normalized(p) for p in files if p.isfile()] - - def __iter__(self): - return iter(self.keys()) - - def __len__(self): - return len(self.keys()) - - def uncache(self,*items): - """ Removes all, or specified items from cache - - Use this after reading a large amount of large objects - to free up memory, when you won't be needing the objects - for a while. - - """ - if not items: - self.cache = {} - for it in items: - self.cache.pop(it,None) - - def waitget(self,key, maxwaittime = 60 ): - """ Wait (poll) for a key to get a value - - Will wait for `maxwaittime` seconds before raising a KeyError. - The call exits normally if the `key` field in db gets a value - within the timeout period. - - Use this for synchronizing different processes or for ensuring - that an unfortunately timed "db['key'] = newvalue" operation - in another process (which causes all 'get' operation to cause a - KeyError for the duration of pickling) won't screw up your program - logic. - """ - - wtimes = [0.2] * 3 + [0.5] * 2 + [1] - tries = 0 - waited = 0 - while 1: - try: - val = self[key] - return val - except KeyError: - pass - - if waited > maxwaittime: - raise KeyError(key) - - time.sleep(wtimes[tries]) - waited+=wtimes[tries] - if tries < len(wtimes) -1: - tries+=1 - - def getlink(self,folder): - """ Get a convenient link for accessing items """ - return PickleShareLink(self, folder) - - def __repr__(self): - return "PickleShareDB('%s')" % self.root - - - -class PickleShareLink: - """ A shortdand for accessing nested PickleShare data conveniently. - - Created through PickleShareDB.getlink(), example:: - - lnk = db.getlink('myobjects/test') - lnk.foo = 2 - lnk.bar = lnk.foo + 5 - - """ - def __init__(self, db, keydir ): - self.__dict__.update(locals()) - - def __getattr__(self,key): - return self.__dict__['db'][self.__dict__['keydir']+'/' + key] - def __setattr__(self,key,val): - self.db[self.keydir+'/' + key] = val - def __repr__(self): - db = self.__dict__['db'] - keys = db.keys( self.__dict__['keydir'] +"/*") - return "" % ( - self.__dict__['keydir'], - ";".join([Path(k).basename() for k in keys])) - -def main(): - import textwrap - usage = textwrap.dedent("""\ - pickleshare - manage PickleShare databases - - Usage: - - pickleshare dump /path/to/db > dump.txt - pickleshare load /path/to/db < dump.txt - pickleshare test /path/to/db - """) - DB = PickleShareDB - import sys - if len(sys.argv) < 2: - print(usage) - return - - cmd = sys.argv[1] - args = sys.argv[2:] - if cmd == 'dump': - if not args: args= ['.'] - db = DB(args[0]) - import pprint - pprint.pprint(db.items()) - elif cmd == 'load': - cont = sys.stdin.read() - db = DB(args[0]) - data = eval(cont) - db.clear() - for k,v in db.items(): - db[k] = v - elif cmd == 'testwait': - db = DB(args[0]) - db.clear() - print(db.waitget('250')) - elif cmd == 'test': - test() - stress() - -if __name__== "__main__": - main() - - diff --git a/IPython/utils/tests/test_pickleshare.py b/IPython/utils/tests/test_pickleshare.py deleted file mode 100644 index 7d4f577..0000000 --- a/IPython/utils/tests/test_pickleshare.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import print_function - -import os -from unittest import TestCase - -from IPython.testing.decorators import skip -from IPython.utils.tempdir import TemporaryDirectory -from IPython.utils.pickleshare import PickleShareDB - - -class PickleShareDBTestCase(TestCase): - def setUp(self): - self.tempdir = TemporaryDirectory() - - def tearDown(self): - self.tempdir.cleanup() - - def test_picklesharedb(self): - db = PickleShareDB(self.tempdir.name) - db.clear() - print("Should be empty:",db.items()) - db['hello'] = 15 - db['aku ankka'] = [1,2,313] - db['paths/nest/ok/keyname'] = [1,(5,46)] - db.hset('hash', 'aku', 12) - db.hset('hash', 'ankka', 313) - self.assertEqual(db.hget('hash','aku'), 12) - self.assertEqual(db.hget('hash','ankka'), 313) - print("all hashed",db.hdict('hash')) - print(db.keys()) - print(db.keys('paths/nest/ok/k*')) - print(dict(db)) # snapsot of whole db - db.uncache() # frees memory, causes re-reads later - - # shorthand for accessing deeply nested files - lnk = db.getlink('myobjects/test') - lnk.foo = 2 - lnk.bar = lnk.foo + 5 - self.assertEqual(lnk.bar, 7) - - @skip("Too slow for regular running.") - def test_stress(self): - db = PickleShareDB('~/fsdbtest') - import time,sys - for i in range(1000): - for j in range(1000): - if i % 15 == 0 and i < 200: - if str(j) in db: - del db[str(j)] - continue - - if j%33 == 0: - time.sleep(0.02) - - db[str(j)] = db.get(str(j), []) + [(i,j,"proc %d" % os.getpid())] - db.hset('hash',j, db.hget('hash',j,15) + 1 ) - - print(i, end=' ') - sys.stdout.flush() - if i % 10 == 0: - db.uncache() \ No newline at end of file diff --git a/IPython/utils/text.py b/IPython/utils/text.py index 6cd9955..59b8e7e 100644 --- a/IPython/utils/text.py +++ b/IPython/utils/text.py @@ -8,42 +8,21 @@ Inheritance diagram: :parts: 3 """ -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - import os import re import sys import textwrap from string import Formatter -from IPython.external.path import path from IPython.testing.skipdoctest import skip_doctest_py3, skip_doctest from IPython.utils import py3compat -#----------------------------------------------------------------------------- -# Declarations -#----------------------------------------------------------------------------- - # datetime.strftime date format for ipython if sys.platform == 'win32': date_format = "%B %d, %Y" else: date_format = "%B %-d, %Y" - -#----------------------------------------------------------------------------- -# Code -#----------------------------------------------------------------------------- - class LSString(str): """String derivative with a special access attributes. @@ -52,7 +31,7 @@ class LSString(str): .l (or .list) : value as list (split on newlines). .n (or .nlstr): original value (the string itself). .s (or .spstr): value as whitespace-separated string. - .p (or .paths): list of path objects + .p (or .paths): list of path objects (requires path.py package) Any values which require transformations are computed only once and cached. @@ -84,6 +63,7 @@ class LSString(str): n = nlstr = property(get_nlstr) def get_paths(self): + from path import path try: return self.__paths except AttributeError: @@ -113,7 +93,7 @@ class SList(list): * .l (or .list) : value as list (the list itself). * .n (or .nlstr): value as a string, joined on newlines. * .s (or .spstr): value as a string, joined on spaces. - * .p (or .paths): list of path objects + * .p (or .paths): list of path objects (requires path.py package) Any values which require transformations are computed only once and cached.""" @@ -142,6 +122,7 @@ class SList(list): n = nlstr = property(get_nlstr) def get_paths(self): + from path import path try: return self.__paths except AttributeError: diff --git a/setup.py b/setup.py index d57540d..a2887bd 100755 --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ from setupbase import ( find_entry_points, build_scripts_entrypt, find_data_files, - check_for_dependencies, + check_for_readline, git_prebuild, check_submodule_status, update_submodules, @@ -78,7 +78,6 @@ from setupbase import ( install_scripts_for_symlink, unsymlink, ) -from setupext import setupext isfile = os.path.isfile pjoin = os.path.join @@ -268,14 +267,22 @@ if sys.version_info < (3, 3): extras_require['notebook'].extend(extras_require['nbformat']) extras_require['nbconvert'].extend(extras_require['nbformat']) -install_requires = [] +install_requires = [ + 'decorator', + 'pickleshare', + 'simplegeneric>0.8', +] -# add readline +# add platform-specific dependencies if sys.platform == 'darwin': - if 'bdist_wheel' in sys.argv[1:] or not setupext.check_for_readline(): + install_requires.append('appnope') + if 'bdist_wheel' in sys.argv[1:] or not check_for_readline(): install_requires.append('gnureadline') -elif sys.platform.startswith('win'): + +if sys.platform.startswith('win'): extras_require['terminal'].append('pyreadline>=2.0') +else: + install_requires.append('pexpect') everything = set() for deps in extras_require.values(): @@ -317,13 +324,6 @@ if 'setuptools' in sys.modules: "ipython_win_post_install.py"}} else: - # If we are installing without setuptools, call this function which will - # check for dependencies an inform the user what is needed. This is - # just to make life easy for users. - for install_cmd in ('install', 'symlink'): - if install_cmd in sys.argv: - check_for_dependencies() - break # scripts has to be a non-empty list, or install_scripts isn't called setup_args['scripts'] = [e.split('=')[0].strip() for e in find_entry_points()] diff --git a/setupbase.py b/setupbase.py index 4fe4666..956a51e 100644 --- a/setupbase.py +++ b/setupbase.py @@ -490,37 +490,23 @@ class install_scripts_for_symlink(install_scripts): # Verify all dependencies #--------------------------------------------------------------------------- -def check_for_dependencies(): - """Check for IPython's dependencies. - - This function should NOT be called if running under setuptools! - """ - from setupext.setupext import ( - print_line, print_raw, print_status, - check_for_sphinx, check_for_pygments, - check_for_nose, check_for_pexpect, - check_for_pyzmq, check_for_readline, - check_for_jinja2, check_for_tornado - ) - print_line() - print_raw("BUILDING IPYTHON") - print_status('python', sys.version) - print_status('platform', sys.platform) - if sys.platform == 'win32': - print_status('Windows version', sys.getwindowsversion()) - - print_raw("") - print_raw("OPTIONAL DEPENDENCIES") - - check_for_sphinx() - check_for_pygments() - check_for_nose() - if os.name == 'posix': - check_for_pexpect() - check_for_pyzmq() - check_for_tornado() - check_for_readline() - check_for_jinja2() +def check_for_readline(): + """Check for GNU readline""" + try: + import gnureadline as readline + except ImportError: + pass + else: + return True + try: + import readline + except ImportError: + return False + else: + if sys.platform == 'darwin' and 'libedit' in readline.__doc__: + print("Ignoring readline linked to libedit", file=sys.stderr) + return False + return True #--------------------------------------------------------------------------- # VCS related @@ -670,7 +656,7 @@ def get_bdist_wheel(): if found: lis.pop(idx) - for pkg in ("gnureadline", "pyreadline", "mock", "terminado"): + for pkg in ("gnureadline", "pyreadline", "mock", "terminado", "appnope", "pexpect"): _remove_startswith(requires, pkg) requires.append("gnureadline; sys.platform == 'darwin' and platform.python_implementation == 'CPython'") requires.append("terminado (>=0.3.3); extra == 'notebook' and sys.platform != 'win32'") @@ -678,6 +664,8 @@ def get_bdist_wheel(): requires.append("pyreadline (>=2.0); extra == 'terminal' and sys.platform == 'win32' and platform.python_implementation == 'CPython'") requires.append("pyreadline (>=2.0); extra == 'all' and sys.platform == 'win32' and platform.python_implementation == 'CPython'") requires.append("mock; extra == 'test' and python_version < '3.3'") + requires.append("appnope; sys.platform == 'darwin'") + requires.append("pexpect; sys.platform != 'win32'") for r in requires: pkg_info['Requires-Dist'] = r write_pkg_info(metadata_path, pkg_info) diff --git a/setupext/setupext.py b/setupext/setupext.py deleted file mode 100644 index 070b3e3..0000000 --- a/setupext/setupext.py +++ /dev/null @@ -1,177 +0,0 @@ -# encoding: utf-8 -from __future__ import print_function - -__docformat__ = "restructuredtext en" - -#------------------------------------------------------------------------------- -# Copyright (C) 2008 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- -# Imports -#------------------------------------------------------------------------------- - -import sys -from textwrap import fill - -display_status=True - -def check_display(f): - """decorator to allow display methods to be muted by mod.display_status""" - def maybe_display(*args, **kwargs): - if display_status: - return f(*args, **kwargs) - return maybe_display - -@check_display -def print_line(char='='): - print(char * 76) - -@check_display -def print_status(package, status): - initial_indent = "%22s: " % package - indent = ' ' * 24 - print(fill(str(status), width=76, - initial_indent=initial_indent, - subsequent_indent=indent)) - -@check_display -def print_message(message): - indent = ' ' * 24 + "* " - print(fill(str(message), width=76, - initial_indent=indent, - subsequent_indent=indent)) - -@check_display -def print_raw(section): - print(section) - -#------------------------------------------------------------------------------- -# Tests for specific packages -#------------------------------------------------------------------------------- - -def check_for_ipython(): - try: - import IPython - except ImportError: - print_status("IPython", "Not found") - return False - else: - print_status("IPython", IPython.__version__) - return True - -def check_for_sphinx(): - try: - import sphinx - except ImportError: - print_status('sphinx', "Not found (required for docs and nbconvert)") - return False - else: - print_status('sphinx', sphinx.__version__) - return True - -def check_for_pygments(): - try: - import pygments - except ImportError: - print_status('pygments', "Not found (required for docs and nbconvert)") - return False - else: - print_status('pygments', pygments.__version__) - return True - -def check_for_jinja2(): - try: - import jinja2 - except ImportError: - print_status('jinja2', "Not found (required for notebook and nbconvert)") - return False - else: - print_status('jinja2', jinja2.__version__) - return True - -def check_for_nose(): - try: - import nose - except ImportError: - print_status('nose', "Not found (required for running the test suite)") - return False - else: - print_status('nose', nose.__version__) - return True - -def check_for_pexpect(): - try: - import pexpect - except ImportError: - print_status("pexpect", "no (will use bundled version in IPython.external)") - return False - else: - print_status("pexpect", pexpect.__version__) - return True - -def check_for_pyzmq(): - try: - import zmq - except ImportError: - print_status('pyzmq', "no (required for qtconsole, notebook, and parallel computing capabilities)") - return False - else: - # pyzmq 2.1.10 adds pyzmq_version_info funtion for returning - # version as a tuple - if hasattr(zmq, 'pyzmq_version_info') and zmq.pyzmq_version_info() >= (2,1,11): - print_status("pyzmq", zmq.__version__) - return True - else: - print_status('pyzmq', "no (have %s, but require >= 2.1.11 for" - " qtconsole, notebook, and parallel computing capabilities)" % zmq.__version__) - return False - -def check_for_tornado(): - try: - import tornado - except ImportError: - print_status('tornado', "no (required for notebook)") - return False - else: - if getattr(tornado, 'version_info', (0,)) < (3,1): - print_status('tornado', "no (have %s, but require >= 3.1.0)" % tornado.version) - return False - else: - print_status('tornado', tornado.version) - return True - -def check_for_readline(): - from distutils.version import LooseVersion - readline = None - try: - import gnureadline as readline - except ImportError: - pass - if readline is None: - try: - import readline - except ImportError: - pass - if readline is None: - try: - import pyreadline - vs = pyreadline.release.version - except (ImportError, AttributeError): - print_status('readline', "no (required for good interactive behavior)") - return False - if LooseVersion(vs).version >= [1,7,1]: - print_status('readline', "yes pyreadline-" + vs) - return True - else: - print_status('readline', "no pyreadline-%s < 1.7.1" % vs) - return False - else: - if sys.platform == 'darwin' and 'libedit' in readline.__doc__: - print_status('readline', "no (libedit detected)") - return False - print_status('readline', "yes") - return True