diff --git a/IPython/testing/iptest.py b/IPython/testing/iptest.py index d0e9350..9f41a88 100644 --- a/IPython/testing/iptest.py +++ b/IPython/testing/iptest.py @@ -145,7 +145,7 @@ have['zmq'] = test_for('zmq.pyzmq_version_info', min_zmq, callback=lambda x: x() # Test suite definitions #----------------------------------------------------------------------------- -test_group_names = ['parallel', 'kernel', 'kernel.inprocess', 'config', 'core', +test_group_names = ['parallel', 'config', 'core', 'extensions', 'lib', 'terminal', 'testing', 'utils', 'qt', 'html', 'nbconvert' ] @@ -172,8 +172,6 @@ class TestSection(object): shims = { 'parallel': 'ipython_parallel', - 'kernel': 'ipython_kernel', - 'kernel.inprocess': 'ipython_kernel.inprocess', 'config': 'traitlets', 'html': 'jupyter_notebook', } @@ -228,20 +226,6 @@ if not have['pymongo']: sec.exclude('controller.mongodb') sec.exclude('tests.test_mongodb') -# kernel: -sec = test_sections['kernel'] -sec.requires('zmq') -# The in-process kernel tests are done in a separate section -sec.exclude('inprocess') -# importing gtk sets the default encoding, which we want to avoid -sec.exclude('gui.gtkembed') -sec.exclude('gui.gtk3embed') -if not have['matplotlib']: - sec.exclude('pylab') - -# kernel.inprocess: -test_sections['kernel.inprocess'].requires('zmq') - # extensions: sec = test_sections['extensions'] # This is deprecated in favour of rpy2 diff --git a/jupyter_client/__init__.py b/jupyter_client/__init__.py deleted file mode 100644 index 69ab55f..0000000 --- a/jupyter_client/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -"""IPython kernels and associated utilities""" - -from .connect import * -from .launcher import * -from .client import KernelClient -from .manager import KernelManager, run_kernel -from .blocking import BlockingKernelClient -from .multikernelmanager import MultiKernelManager diff --git a/jupyter_client/adapter.py b/jupyter_client/adapter.py deleted file mode 100644 index 18d9984..0000000 --- a/jupyter_client/adapter.py +++ /dev/null @@ -1,401 +0,0 @@ -"""Adapters for IPython msg spec versions.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import re -import json - -from IPython.core.release import kernel_protocol_version_info - - -def code_to_line(code, cursor_pos): - """Turn a multiline code block and cursor position into a single line - and new cursor position. - - For adapting ``complete_`` and ``object_info_request``. - """ - if not code: - return "", 0 - for line in code.splitlines(True): - n = len(line) - if cursor_pos > n: - cursor_pos -= n - else: - break - return line, cursor_pos - - -_match_bracket = re.compile(r'\([^\(\)]+\)', re.UNICODE) -_end_bracket = re.compile(r'\([^\(]*$', re.UNICODE) -_identifier = re.compile(r'[a-z_][0-9a-z._]*', re.I|re.UNICODE) - -def extract_oname_v4(code, cursor_pos): - """Reimplement token-finding logic from IPython 2.x javascript - - for adapting object_info_request from v5 to v4 - """ - - line, _ = code_to_line(code, cursor_pos) - - oldline = line - line = _match_bracket.sub(u'', line) - while oldline != line: - oldline = line - line = _match_bracket.sub(u'', line) - - # remove everything after last open bracket - line = _end_bracket.sub('', line) - matches = _identifier.findall(line) - if matches: - return matches[-1] - else: - return '' - - -class Adapter(object): - """Base class for adapting messages - - Override message_type(msg) methods to create adapters. - """ - - msg_type_map = {} - - def update_header(self, msg): - return msg - - def update_metadata(self, msg): - return msg - - def update_msg_type(self, msg): - header = msg['header'] - msg_type = header['msg_type'] - if msg_type in self.msg_type_map: - msg['msg_type'] = header['msg_type'] = self.msg_type_map[msg_type] - return msg - - def handle_reply_status_error(self, msg): - """This will be called *instead of* the regular handler - - on any reply with status != ok - """ - return msg - - def __call__(self, msg): - msg = self.update_header(msg) - msg = self.update_metadata(msg) - msg = self.update_msg_type(msg) - header = msg['header'] - - handler = getattr(self, header['msg_type'], None) - if handler is None: - return msg - - # handle status=error replies separately (no change, at present) - if msg['content'].get('status', None) in {'error', 'aborted'}: - return self.handle_reply_status_error(msg) - return handler(msg) - -def _version_str_to_list(version): - """convert a version string to a list of ints - - non-int segments are excluded - """ - v = [] - for part in version.split('.'): - try: - v.append(int(part)) - except ValueError: - pass - return v - -class V5toV4(Adapter): - """Adapt msg protocol v5 to v4""" - - version = '4.1' - - msg_type_map = { - 'execute_result' : 'pyout', - 'execute_input' : 'pyin', - 'error' : 'pyerr', - 'inspect_request' : 'object_info_request', - 'inspect_reply' : 'object_info_reply', - } - - def update_header(self, msg): - msg['header'].pop('version', None) - return msg - - # shell channel - - def kernel_info_reply(self, msg): - v4c = {} - content = msg['content'] - for key in ('language_version', 'protocol_version'): - if key in content: - v4c[key] = _version_str_to_list(content[key]) - if content.get('implementation', '') == 'ipython' \ - and 'implementation_version' in content: - v4c['ipython_version'] = _version_str_to_list(content['implementation_version']) - language_info = content.get('language_info', {}) - language = language_info.get('name', '') - v4c.setdefault('language', language) - if 'version' in language_info: - v4c.setdefault('language_version', _version_str_to_list(language_info['version'])) - msg['content'] = v4c - return msg - - def execute_request(self, msg): - content = msg['content'] - content.setdefault('user_variables', []) - return msg - - def execute_reply(self, msg): - content = msg['content'] - content.setdefault('user_variables', {}) - # TODO: handle payloads - return msg - - def complete_request(self, msg): - content = msg['content'] - code = content['code'] - cursor_pos = content['cursor_pos'] - line, cursor_pos = code_to_line(code, cursor_pos) - - new_content = msg['content'] = {} - new_content['text'] = '' - new_content['line'] = line - new_content['block'] = None - new_content['cursor_pos'] = cursor_pos - return msg - - def complete_reply(self, msg): - content = msg['content'] - cursor_start = content.pop('cursor_start') - cursor_end = content.pop('cursor_end') - match_len = cursor_end - cursor_start - content['matched_text'] = content['matches'][0][:match_len] - content.pop('metadata', None) - return msg - - def object_info_request(self, msg): - content = msg['content'] - code = content['code'] - cursor_pos = content['cursor_pos'] - line, _ = code_to_line(code, cursor_pos) - - new_content = msg['content'] = {} - new_content['oname'] = extract_oname_v4(code, cursor_pos) - new_content['detail_level'] = content['detail_level'] - return msg - - def object_info_reply(self, msg): - """inspect_reply can't be easily backward compatible""" - msg['content'] = {'found' : False, 'oname' : 'unknown'} - return msg - - # iopub channel - - def stream(self, msg): - content = msg['content'] - content['data'] = content.pop('text') - return msg - - def display_data(self, msg): - content = msg['content'] - content.setdefault("source", "display") - data = content['data'] - if 'application/json' in data: - try: - data['application/json'] = json.dumps(data['application/json']) - except Exception: - # warn? - pass - return msg - - # stdin channel - - def input_request(self, msg): - msg['content'].pop('password', None) - return msg - - -class V4toV5(Adapter): - """Convert msg spec V4 to V5""" - version = '5.0' - - # invert message renames above - msg_type_map = {v:k for k,v in V5toV4.msg_type_map.items()} - - def update_header(self, msg): - msg['header']['version'] = self.version - return msg - - # shell channel - - def kernel_info_reply(self, msg): - content = msg['content'] - for key in ('protocol_version', 'ipython_version'): - if key in content: - content[key] = '.'.join(map(str, content[key])) - - content.setdefault('protocol_version', '4.1') - - if content['language'].startswith('python') and 'ipython_version' in content: - content['implementation'] = 'ipython' - content['implementation_version'] = content.pop('ipython_version') - - language = content.pop('language') - language_info = content.setdefault('language_info', {}) - language_info.setdefault('name', language) - if 'language_version' in content: - language_version = '.'.join(map(str, content.pop('language_version'))) - language_info.setdefault('version', language_version) - - content['banner'] = '' - return msg - - def execute_request(self, msg): - content = msg['content'] - user_variables = content.pop('user_variables', []) - user_expressions = content.setdefault('user_expressions', {}) - for v in user_variables: - user_expressions[v] = v - return msg - - def execute_reply(self, msg): - content = msg['content'] - user_expressions = content.setdefault('user_expressions', {}) - user_variables = content.pop('user_variables', {}) - if user_variables: - user_expressions.update(user_variables) - - # Pager payloads became a mime bundle - for payload in content.get('payload', []): - if payload.get('source', None) == 'page' and ('text' in payload): - if 'data' not in payload: - payload['data'] = {} - payload['data']['text/plain'] = payload.pop('text') - - return msg - - def complete_request(self, msg): - old_content = msg['content'] - - new_content = msg['content'] = {} - new_content['code'] = old_content['line'] - new_content['cursor_pos'] = old_content['cursor_pos'] - return msg - - def complete_reply(self, msg): - # complete_reply needs more context than we have to get cursor_start and end. - # use special end=null to indicate current cursor position and negative offset - # for start relative to the cursor. - # start=None indicates that start == end (accounts for no -0). - content = msg['content'] - new_content = msg['content'] = {'status' : 'ok'} - new_content['matches'] = content['matches'] - if content['matched_text']: - new_content['cursor_start'] = -len(content['matched_text']) - else: - # no -0, use None to indicate that start == end - new_content['cursor_start'] = None - new_content['cursor_end'] = None - new_content['metadata'] = {} - return msg - - def inspect_request(self, msg): - content = msg['content'] - name = content['oname'] - - new_content = msg['content'] = {} - new_content['code'] = name - new_content['cursor_pos'] = len(name) - new_content['detail_level'] = content['detail_level'] - return msg - - def inspect_reply(self, msg): - """inspect_reply can't be easily backward compatible""" - content = msg['content'] - new_content = msg['content'] = {'status' : 'ok'} - found = new_content['found'] = content['found'] - new_content['name'] = content['oname'] - new_content['data'] = data = {} - new_content['metadata'] = {} - if found: - lines = [] - for key in ('call_def', 'init_definition', 'definition'): - if content.get(key, False): - lines.append(content[key]) - break - for key in ('call_docstring', 'init_docstring', 'docstring'): - if content.get(key, False): - lines.append(content[key]) - break - if not lines: - lines.append("") - data['text/plain'] = '\n'.join(lines) - return msg - - # iopub channel - - def stream(self, msg): - content = msg['content'] - content['text'] = content.pop('data') - return msg - - def display_data(self, msg): - content = msg['content'] - content.pop("source", None) - data = content['data'] - if 'application/json' in data: - try: - data['application/json'] = json.loads(data['application/json']) - except Exception: - # warn? - pass - return msg - - # stdin channel - - def input_request(self, msg): - msg['content'].setdefault('password', False) - return msg - - - -def adapt(msg, to_version=kernel_protocol_version_info[0]): - """Adapt a single message to a target version - - Parameters - ---------- - - msg : dict - An IPython message. - to_version : int, optional - The target major version. - If unspecified, adapt to the current version for IPython. - - Returns - ------- - - msg : dict - An IPython message appropriate in the new version. - """ - header = msg['header'] - if 'version' in header: - from_version = int(header['version'].split('.')[0]) - else: - # assume last version before adding the key to the header - from_version = 4 - adapter = adapters.get((from_version, to_version), None) - if adapter is None: - return msg - return adapter(msg) - - -# one adapter per major version from,to -adapters = { - (5,4) : V5toV4(), - (4,5) : V4toV5(), -} diff --git a/jupyter_client/blocking/__init__.py b/jupyter_client/blocking/__init__.py deleted file mode 100644 index dc38f24..0000000 --- a/jupyter_client/blocking/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .client import BlockingKernelClient \ No newline at end of file diff --git a/jupyter_client/blocking/channels.py b/jupyter_client/blocking/channels.py deleted file mode 100644 index 0612314..0000000 --- a/jupyter_client/blocking/channels.py +++ /dev/null @@ -1,91 +0,0 @@ -"""Blocking channels - -Useful for test suites and blocking terminal interfaces. -""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -try: - from queue import Queue, Empty # Py 3 -except ImportError: - from Queue import Queue, Empty # Py 2 - - -class ZMQSocketChannel(object): - """A ZMQ socket in a simple blocking API""" - session = None - socket = None - stream = None - _exiting = False - proxy_methods = [] - - def __init__(self, socket, session, loop=None): - """Create a channel. - - Parameters - ---------- - socket : :class:`zmq.Socket` - The ZMQ socket to use. - session : :class:`session.Session` - The session to use. - loop - Unused here, for other implementations - """ - super(ZMQSocketChannel, self).__init__() - - self.socket = socket - self.session = session - - def _recv(self, **kwargs): - msg = self.socket.recv_multipart(**kwargs) - ident,smsg = self.session.feed_identities(msg) - return self.session.deserialize(smsg) - - def get_msg(self, block=True, timeout=None): - """ Gets a message if there is one that is ready. """ - if block: - if timeout is not None: - timeout *= 1000 # seconds to ms - ready = self.socket.poll(timeout) - else: - ready = self.socket.poll(timeout=0) - - if ready: - return self._recv() - else: - raise Empty - - def get_msgs(self): - """ Get all messages that are currently ready. """ - msgs = [] - while True: - try: - msgs.append(self.get_msg(block=False)) - except Empty: - break - return msgs - - def msg_ready(self): - """ Is there a message that has been received? """ - return bool(self.socket.poll(timeout=0)) - - def close(self): - if self.socket is not None: - try: - self.socket.close(linger=0) - except Exception: - pass - self.socket = None - stop = close - - def is_alive(self): - return (self.socket is not None) - - def send(self, msg): - """Pass a message to the ZMQ socket to send - """ - self.session.send(self.socket, msg) - - def start(self): - pass diff --git a/jupyter_client/blocking/client.py b/jupyter_client/blocking/client.py deleted file mode 100644 index 1315abe..0000000 --- a/jupyter_client/blocking/client.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Implements a fully blocking kernel client. - -Useful for test suites and blocking terminal interfaces. -""" -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -try: - from queue import Empty # Python 3 -except ImportError: - from Queue import Empty # Python 2 - -from IPython.utils.traitlets import Type -from jupyter_client.channels import HBChannel -from jupyter_client.client import KernelClient -from .channels import ZMQSocketChannel - -class BlockingKernelClient(KernelClient): - def wait_for_ready(self): - # Wait for kernel info reply on shell channel - while True: - msg = self.shell_channel.get_msg(block=True) - if msg['msg_type'] == 'kernel_info_reply': - self._handle_kernel_info_reply(msg) - break - - # Flush IOPub channel - while True: - try: - msg = self.iopub_channel.get_msg(block=True, timeout=0.2) - except Empty: - break - - # The classes to use for the various channels - shell_channel_class = Type(ZMQSocketChannel) - iopub_channel_class = Type(ZMQSocketChannel) - stdin_channel_class = Type(ZMQSocketChannel) - hb_channel_class = Type(HBChannel) diff --git a/jupyter_client/channels.py b/jupyter_client/channels.py deleted file mode 100644 index f9b8d93..0000000 --- a/jupyter_client/channels.py +++ /dev/null @@ -1,203 +0,0 @@ -"""Base classes to manage a Client's interaction with a running kernel""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from __future__ import absolute_import - -import atexit -import errno -from threading import Thread -import time - -import zmq -# import ZMQError in top-level namespace, to avoid ugly attribute-error messages -# during garbage collection of threads at exit: -from zmq import ZMQError - -from IPython.core.release import kernel_protocol_version_info - -from .channelsabc import HBChannelABC - -#----------------------------------------------------------------------------- -# Constants and exceptions -#----------------------------------------------------------------------------- - -major_protocol_version = kernel_protocol_version_info[0] - -class InvalidPortNumber(Exception): - pass - -class HBChannel(Thread): - """The heartbeat channel which monitors the kernel heartbeat. - - Note that the heartbeat channel is paused by default. As long as you start - this channel, the kernel manager will ensure that it is paused and un-paused - as appropriate. - """ - context = None - session = None - socket = None - address = None - _exiting = False - - time_to_dead = 1. - poller = None - _running = None - _pause = None - _beating = None - - def __init__(self, context=None, session=None, address=None): - """Create the heartbeat monitor thread. - - Parameters - ---------- - context : :class:`zmq.Context` - The ZMQ context to use. - session : :class:`session.Session` - The session to use. - address : zmq url - Standard (ip, port) tuple that the kernel is listening on. - """ - super(HBChannel, self).__init__() - self.daemon = True - - self.context = context - self.session = session - if isinstance(address, tuple): - if address[1] == 0: - message = 'The port number for a channel cannot be 0.' - raise InvalidPortNumber(message) - address = "tcp://%s:%i" % address - self.address = address - atexit.register(self._notice_exit) - - self._running = False - self._pause = True - self.poller = zmq.Poller() - - def _notice_exit(self): - self._exiting = True - - def _create_socket(self): - if self.socket is not None: - # close previous socket, before opening a new one - self.poller.unregister(self.socket) - self.socket.close() - self.socket = self.context.socket(zmq.REQ) - self.socket.linger = 1000 - self.socket.connect(self.address) - - self.poller.register(self.socket, zmq.POLLIN) - - def _poll(self, start_time): - """poll for heartbeat replies until we reach self.time_to_dead. - - Ignores interrupts, and returns the result of poll(), which - will be an empty list if no messages arrived before the timeout, - or the event tuple if there is a message to receive. - """ - - until_dead = self.time_to_dead - (time.time() - start_time) - # ensure poll at least once - until_dead = max(until_dead, 1e-3) - events = [] - while True: - try: - events = self.poller.poll(1000 * until_dead) - except ZMQError as e: - if e.errno == errno.EINTR: - # ignore interrupts during heartbeat - # this may never actually happen - until_dead = self.time_to_dead - (time.time() - start_time) - until_dead = max(until_dead, 1e-3) - pass - else: - raise - except Exception: - if self._exiting: - break - else: - raise - else: - break - return events - - def run(self): - """The thread's main activity. Call start() instead.""" - self._create_socket() - self._running = True - self._beating = True - - while self._running: - if self._pause: - # just sleep, and skip the rest of the loop - time.sleep(self.time_to_dead) - continue - - since_last_heartbeat = 0.0 - # io.rprint('Ping from HB channel') # dbg - # no need to catch EFSM here, because the previous event was - # either a recv or connect, which cannot be followed by EFSM - self.socket.send(b'ping') - request_time = time.time() - ready = self._poll(request_time) - if ready: - self._beating = True - # the poll above guarantees we have something to recv - self.socket.recv() - # sleep the remainder of the cycle - remainder = self.time_to_dead - (time.time() - request_time) - if remainder > 0: - time.sleep(remainder) - continue - else: - # nothing was received within the time limit, signal heart failure - self._beating = False - since_last_heartbeat = time.time() - request_time - self.call_handlers(since_last_heartbeat) - # and close/reopen the socket, because the REQ/REP cycle has been broken - self._create_socket() - continue - - def pause(self): - """Pause the heartbeat.""" - self._pause = True - - def unpause(self): - """Unpause the heartbeat.""" - self._pause = False - - def is_beating(self): - """Is the heartbeat running and responsive (and not paused).""" - if self.is_alive() and not self._pause and self._beating: - return True - else: - return False - - def stop(self): - """Stop the channel's event loop and join its thread.""" - self._running = False - self.join() - self.close() - - def close(self): - if self.socket is not None: - try: - self.socket.close(linger=0) - except Exception: - pass - self.socket = None - - def call_handlers(self, since_last_heartbeat): - """This method is called in the ioloop thread when a message arrives. - - Subclasses should override this method to handle incoming messages. - It is important to remember that this method is called in the thread - so that some logic must be done to ensure that the application level - handlers are called in the application thread. - """ - pass - - -HBChannelABC.register(HBChannel) diff --git a/jupyter_client/channelsabc.py b/jupyter_client/channelsabc.py deleted file mode 100644 index e7f388a..0000000 --- a/jupyter_client/channelsabc.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Abstract base classes for kernel client channels""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import abc - -from IPython.utils.py3compat import with_metaclass - - -class ChannelABC(with_metaclass(abc.ABCMeta, object)): - """A base class for all channel ABCs.""" - - @abc.abstractmethod - def start(self): - pass - - @abc.abstractmethod - def stop(self): - pass - - @abc.abstractmethod - def is_alive(self): - pass - - -class HBChannelABC(ChannelABC): - """HBChannel ABC. - - The docstrings for this class can be found in the base implementation: - - `jupyter_client.channels.HBChannel` - """ - - @abc.abstractproperty - def time_to_dead(self): - pass - - @abc.abstractmethod - def pause(self): - pass - - @abc.abstractmethod - def unpause(self): - pass - - @abc.abstractmethod - def is_beating(self): - pass diff --git a/jupyter_client/client.py b/jupyter_client/client.py deleted file mode 100644 index 6dc99c2..0000000 --- a/jupyter_client/client.py +++ /dev/null @@ -1,390 +0,0 @@ -"""Base class to manage the interaction with a running kernel""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from __future__ import absolute_import -from jupyter_client.channels import major_protocol_version -from IPython.utils.py3compat import string_types, iteritems - -import zmq - -from IPython.utils.traitlets import ( - Any, Instance, Type, -) - -from .channelsabc import (ChannelABC, HBChannelABC) -from .clientabc import KernelClientABC -from .connect import ConnectionFileMixin - - -# some utilities to validate message structure, these might get moved elsewhere -# if they prove to have more generic utility - -def validate_string_dict(dct): - """Validate that the input is a dict with string keys and values. - - Raises ValueError if not.""" - for k,v in iteritems(dct): - if not isinstance(k, string_types): - raise ValueError('key %r in dict must be a string' % k) - if not isinstance(v, string_types): - raise ValueError('value %r in dict must be a string' % v) - - -class KernelClient(ConnectionFileMixin): - """Communicates with a single kernel on any host via zmq channels. - - There are four channels associated with each kernel: - - * shell: for request/reply calls to the kernel. - * iopub: for the kernel to publish results to frontends. - * hb: for monitoring the kernel's heartbeat. - * stdin: for frontends to reply to raw_input calls in the kernel. - - The methods of the channels are exposed as methods of the client itself - (KernelClient.execute, complete, history, etc.). - See the channels themselves for documentation of these methods. - - """ - - # The PyZMQ Context to use for communication with the kernel. - context = Instance(zmq.Context) - def _context_default(self): - return zmq.Context.instance() - - # The classes to use for the various channels - shell_channel_class = Type(ChannelABC) - iopub_channel_class = Type(ChannelABC) - stdin_channel_class = Type(ChannelABC) - hb_channel_class = Type(HBChannelABC) - - # Protected traits - _shell_channel = Any - _iopub_channel = Any - _stdin_channel = Any - _hb_channel = Any - - # flag for whether execute requests should be allowed to call raw_input: - allow_stdin = True - - #-------------------------------------------------------------------------- - # Channel proxy methods - #-------------------------------------------------------------------------- - - def _get_msg(channel, *args, **kwargs): - return channel.get_msg(*args, **kwargs) - - def get_shell_msg(self, *args, **kwargs): - """Get a message from the shell channel""" - return self.shell_channel.get_msg(*args, **kwargs) - - def get_iopub_msg(self, *args, **kwargs): - """Get a message from the iopub channel""" - return self.iopub_channel.get_msg(*args, **kwargs) - - def get_stdin_msg(self, *args, **kwargs): - """Get a message from the stdin channel""" - return self.stdin_channel.get_msg(*args, **kwargs) - - #-------------------------------------------------------------------------- - # Channel management methods - #-------------------------------------------------------------------------- - - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): - """Starts the channels for this kernel. - - This will create the channels if they do not exist and then start - them (their activity runs in a thread). If port numbers of 0 are - being used (random ports) then you must first call - :meth:`start_kernel`. If the channels have been stopped and you - call this, :class:`RuntimeError` will be raised. - """ - if shell: - self.shell_channel.start() - self.kernel_info() - if iopub: - self.iopub_channel.start() - if stdin: - self.stdin_channel.start() - self.allow_stdin = True - else: - self.allow_stdin = False - if hb: - self.hb_channel.start() - - def stop_channels(self): - """Stops all the running channels for this kernel. - - This stops their event loops and joins their threads. - """ - if self.shell_channel.is_alive(): - self.shell_channel.stop() - if self.iopub_channel.is_alive(): - self.iopub_channel.stop() - if self.stdin_channel.is_alive(): - self.stdin_channel.stop() - if self.hb_channel.is_alive(): - self.hb_channel.stop() - - @property - def channels_running(self): - """Are any of the channels created and running?""" - return (self.shell_channel.is_alive() or self.iopub_channel.is_alive() or - self.stdin_channel.is_alive() or self.hb_channel.is_alive()) - - ioloop = None # Overridden in subclasses that use pyzmq event loop - - @property - def shell_channel(self): - """Get the shell channel object for this kernel.""" - if self._shell_channel is None: - url = self._make_url('shell') - self.log.debug("connecting shell channel to %s", url) - socket = self.connect_shell(identity=self.session.bsession) - self._shell_channel = self.shell_channel_class( - socket, self.session, self.ioloop - ) - return self._shell_channel - - @property - def iopub_channel(self): - """Get the iopub channel object for this kernel.""" - if self._iopub_channel is None: - url = self._make_url('iopub') - self.log.debug("connecting iopub channel to %s", url) - socket = self.connect_iopub() - self._iopub_channel = self.iopub_channel_class( - socket, self.session, self.ioloop - ) - return self._iopub_channel - - @property - def stdin_channel(self): - """Get the stdin channel object for this kernel.""" - if self._stdin_channel is None: - url = self._make_url('stdin') - self.log.debug("connecting stdin channel to %s", url) - socket = self.connect_stdin(identity=self.session.bsession) - self._stdin_channel = self.stdin_channel_class( - socket, self.session, self.ioloop - ) - return self._stdin_channel - - @property - def hb_channel(self): - """Get the hb channel object for this kernel.""" - if self._hb_channel is None: - url = self._make_url('hb') - self.log.debug("connecting heartbeat channel to %s", url) - self._hb_channel = self.hb_channel_class( - self.context, self.session, url - ) - return self._hb_channel - - def is_alive(self): - """Is the kernel process still running?""" - if self._hb_channel is not None: - # We didn't start the kernel with this KernelManager so we - # use the heartbeat. - return self._hb_channel.is_beating() - else: - # no heartbeat and not local, we can't tell if it's running, - # so naively return True - return True - - - # Methods to send specific messages on channels - def execute(self, code, silent=False, store_history=True, - user_expressions=None, allow_stdin=None, stop_on_error=True): - """Execute code in the kernel. - - Parameters - ---------- - code : str - A string of Python code. - - silent : bool, optional (default False) - If set, the kernel will execute the code as quietly possible, and - will force store_history to be False. - - store_history : bool, optional (default True) - If set, the kernel will store command history. This is forced - to be False if silent is True. - - user_expressions : dict, optional - A dict mapping names to expressions to be evaluated in the user's - dict. The expression values are returned as strings formatted using - :func:`repr`. - - allow_stdin : bool, optional (default self.allow_stdin) - Flag for whether the kernel can send stdin requests to frontends. - - Some frontends (e.g. the Notebook) do not support stdin requests. - If raw_input is called from code executed from such a frontend, a - StdinNotImplementedError will be raised. - - stop_on_error: bool, optional (default True) - Flag whether to abort the execution queue, if an exception is encountered. - - Returns - ------- - The msg_id of the message sent. - """ - if user_expressions is None: - user_expressions = {} - if allow_stdin is None: - allow_stdin = self.allow_stdin - - - # Don't waste network traffic if inputs are invalid - if not isinstance(code, string_types): - raise ValueError('code %r must be a string' % code) - validate_string_dict(user_expressions) - - # Create class for content/msg creation. Related to, but possibly - # not in Session. - content = dict(code=code, silent=silent, store_history=store_history, - user_expressions=user_expressions, - allow_stdin=allow_stdin, stop_on_error=stop_on_error - ) - msg = self.session.msg('execute_request', content) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def complete(self, code, cursor_pos=None): - """Tab complete text in the kernel's namespace. - - Parameters - ---------- - code : str - The context in which completion is requested. - Can be anything between a variable name and an entire cell. - cursor_pos : int, optional - The position of the cursor in the block of code where the completion was requested. - Default: ``len(code)`` - - Returns - ------- - The msg_id of the message sent. - """ - if cursor_pos is None: - cursor_pos = len(code) - content = dict(code=code, cursor_pos=cursor_pos) - msg = self.session.msg('complete_request', content) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def inspect(self, code, cursor_pos=None, detail_level=0): - """Get metadata information about an object in the kernel's namespace. - - It is up to the kernel to determine the appropriate object to inspect. - - Parameters - ---------- - code : str - The context in which info is requested. - Can be anything between a variable name and an entire cell. - cursor_pos : int, optional - The position of the cursor in the block of code where the info was requested. - Default: ``len(code)`` - detail_level : int, optional - The level of detail for the introspection (0-2) - - Returns - ------- - The msg_id of the message sent. - """ - if cursor_pos is None: - cursor_pos = len(code) - content = dict(code=code, cursor_pos=cursor_pos, - detail_level=detail_level, - ) - msg = self.session.msg('inspect_request', content) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def history(self, raw=True, output=False, hist_access_type='range', **kwargs): - """Get entries from the kernel's history list. - - Parameters - ---------- - raw : bool - If True, return the raw input. - output : bool - If True, then return the output as well. - hist_access_type : str - 'range' (fill in session, start and stop params), 'tail' (fill in n) - or 'search' (fill in pattern param). - - session : int - For a range request, the session from which to get lines. Session - numbers are positive integers; negative ones count back from the - current session. - start : int - The first line number of a history range. - stop : int - The final (excluded) line number of a history range. - - n : int - The number of lines of history to get for a tail request. - - pattern : str - The glob-syntax pattern for a search request. - - Returns - ------- - The msg_id of the message sent. - """ - content = dict(raw=raw, output=output, hist_access_type=hist_access_type, - **kwargs) - msg = self.session.msg('history_request', content) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def kernel_info(self): - """Request kernel info.""" - msg = self.session.msg('kernel_info_request') - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def _handle_kernel_info_reply(self, msg): - """handle kernel info reply - - sets protocol adaptation version. This might - be run from a separate thread. - """ - adapt_version = int(msg['content']['protocol_version'].split('.')[0]) - if adapt_version != major_protocol_version: - self.session.adapt_version = adapt_version - - def shutdown(self, restart=False): - """Request an immediate kernel shutdown. - - Upon receipt of the (empty) reply, client code can safely assume that - the kernel has shut down and it's safe to forcefully terminate it if - it's still alive. - - The kernel will send the reply via a function registered with Python's - atexit module, ensuring it's truly done as the kernel is done with all - normal operation. - """ - # Send quit message to kernel. Once we implement kernel-side setattr, - # this should probably be done that way, but for now this will do. - msg = self.session.msg('shutdown_request', {'restart':restart}) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def is_complete(self, code): - msg = self.session.msg('is_complete_request', {'code': code}) - self.shell_channel.send(msg) - return msg['header']['msg_id'] - - def input(self, string): - """Send a string of raw input to the kernel.""" - content = dict(value=string) - msg = self.session.msg('input_reply', content) - self.stdin_channel.send(msg) - - -KernelClientABC.register(KernelClient) diff --git a/jupyter_client/clientabc.py b/jupyter_client/clientabc.py deleted file mode 100644 index af8339e..0000000 --- a/jupyter_client/clientabc.py +++ /dev/null @@ -1,80 +0,0 @@ -"""Abstract base class for kernel clients""" - -#----------------------------------------------------------------------------- -# Copyright (C) 2013 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -import abc - -from IPython.utils.py3compat import with_metaclass - -#----------------------------------------------------------------------------- -# Main kernel client class -#----------------------------------------------------------------------------- - -class KernelClientABC(with_metaclass(abc.ABCMeta, object)): - """KernelManager ABC. - - The docstrings for this class can be found in the base implementation: - - `jupyter_client.client.KernelClient` - """ - - @abc.abstractproperty - def kernel(self): - pass - - @abc.abstractproperty - def shell_channel_class(self): - pass - - @abc.abstractproperty - def iopub_channel_class(self): - pass - - @abc.abstractproperty - def hb_channel_class(self): - pass - - @abc.abstractproperty - def stdin_channel_class(self): - pass - - #-------------------------------------------------------------------------- - # Channel management methods - #-------------------------------------------------------------------------- - - @abc.abstractmethod - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): - pass - - @abc.abstractmethod - def stop_channels(self): - pass - - @abc.abstractproperty - def channels_running(self): - pass - - @abc.abstractproperty - def shell_channel(self): - pass - - @abc.abstractproperty - def iopub_channel(self): - pass - - @abc.abstractproperty - def stdin_channel(self): - pass - - @abc.abstractproperty - def hb_channel(self): - pass diff --git a/jupyter_client/connect.py b/jupyter_client/connect.py deleted file mode 100644 index 23a00fb..0000000 --- a/jupyter_client/connect.py +++ /dev/null @@ -1,448 +0,0 @@ -"""Utilities for connecting to jupyter kernels - -The :class:`ConnectionFileMixin` class in this module encapsulates the logic -related to writing and reading connections files. -""" - -# Copyright (c) Jupyter Development Team. -# Distributed under the terms of the Modified BSD License. - - -from __future__ import absolute_import - -import glob -import json -import os -import socket -from getpass import getpass -import tempfile - -import zmq - -from IPython.config import LoggingConfigurable -from .localinterfaces import localhost -from IPython.utils.path import filefind -from IPython.utils.py3compat import (str_to_bytes, bytes_to_str, cast_bytes_py2, - string_types) -from IPython.utils.traitlets import ( - Bool, Integer, Unicode, CaselessStrEnum, Instance, -) - - -def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0, - control_port=0, ip='', key=b'', transport='tcp', - signature_scheme='hmac-sha256', - ): - """Generates a JSON config file, including the selection of random ports. - - Parameters - ---------- - - fname : unicode - The path to the file to write - - shell_port : int, optional - The port to use for ROUTER (shell) channel. - - iopub_port : int, optional - The port to use for the SUB channel. - - stdin_port : int, optional - The port to use for the ROUTER (raw input) channel. - - control_port : int, optional - The port to use for the ROUTER (control) channel. - - hb_port : int, optional - The port to use for the heartbeat REP channel. - - ip : str, optional - The ip address the kernel will bind to. - - key : str, optional - The Session key used for message authentication. - - signature_scheme : str, optional - The scheme used for message authentication. - This has the form 'digest-hash', where 'digest' - is the scheme used for digests, and 'hash' is the name of the hash function - used by the digest scheme. - Currently, 'hmac' is the only supported digest scheme, - and 'sha256' is the default hash function. - - """ - if not ip: - ip = localhost() - # default to temporary connector file - if not fname: - fd, fname = tempfile.mkstemp('.json') - os.close(fd) - - # Find open ports as necessary. - - ports = [] - ports_needed = int(shell_port <= 0) + \ - int(iopub_port <= 0) + \ - int(stdin_port <= 0) + \ - int(control_port <= 0) + \ - int(hb_port <= 0) - if transport == 'tcp': - for i in range(ports_needed): - sock = socket.socket() - # struct.pack('ii', (0,0)) is 8 null bytes - sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, b'\0' * 8) - sock.bind(('', 0)) - ports.append(sock) - for i, sock in enumerate(ports): - port = sock.getsockname()[1] - sock.close() - ports[i] = port - else: - N = 1 - for i in range(ports_needed): - while os.path.exists("%s-%s" % (ip, str(N))): - N += 1 - ports.append(N) - N += 1 - if shell_port <= 0: - shell_port = ports.pop(0) - if iopub_port <= 0: - iopub_port = ports.pop(0) - if stdin_port <= 0: - stdin_port = ports.pop(0) - if control_port <= 0: - control_port = ports.pop(0) - if hb_port <= 0: - hb_port = ports.pop(0) - - cfg = dict( shell_port=shell_port, - iopub_port=iopub_port, - stdin_port=stdin_port, - control_port=control_port, - hb_port=hb_port, - ) - cfg['ip'] = ip - cfg['key'] = bytes_to_str(key) - cfg['transport'] = transport - cfg['signature_scheme'] = signature_scheme - - with open(fname, 'w') as f: - f.write(json.dumps(cfg, indent=2)) - - return fname, cfg - - -def find_connection_file(filename='kernel-*.json', path=None): - """find a connection file, and return its absolute path. - - The current working directory and the profile's security - directory will be searched for the file if it is not given by - absolute path. - - If profile is unspecified, then the current running application's - profile will be used, or 'default', if not run from IPython. - - If the argument does not match an existing file, it will be interpreted as a - fileglob, and the matching file in the profile's security dir with - the latest access time will be used. - - Parameters - ---------- - filename : str - The connection file or fileglob to search for. - path : str or list of strs[optional] - Paths in which to search for connection files. - - Returns - ------- - str : The absolute path of the connection file. - """ - if path is None: - path = ['.'] - if isinstance(path, string_types): - path = [path] - - try: - # first, try explicit name - return filefind(filename, path) - except IOError: - pass - - # not found by full name - - if '*' in filename: - # given as a glob already - pat = filename - else: - # accept any substring match - pat = '*%s*' % filename - - matches = [] - for p in path: - matches.extend(glob.glob(os.path.join(p, pat))) - - if not matches: - raise IOError("Could not find %r in %r" % (filename, path)) - elif len(matches) == 1: - return matches[0] - else: - # get most recent match, by access time: - return sorted(matches, key=lambda f: os.stat(f).st_atime)[-1] - - -def tunnel_to_kernel(connection_info, sshserver, sshkey=None): - """tunnel connections to a kernel via ssh - - This will open four SSH tunnels from localhost on this machine to the - ports associated with the kernel. They can be either direct - localhost-localhost tunnels, or if an intermediate server is necessary, - the kernel must be listening on a public IP. - - Parameters - ---------- - connection_info : dict or str (path) - Either a connection dict, or the path to a JSON connection file - sshserver : str - The ssh sever to use to tunnel to the kernel. Can be a full - `user@server:port` string. ssh config aliases are respected. - sshkey : str [optional] - Path to file containing ssh key to use for authentication. - Only necessary if your ssh config does not already associate - a keyfile with the host. - - Returns - ------- - - (shell, iopub, stdin, hb) : ints - The four ports on localhost that have been forwarded to the kernel. - """ - from zmq.ssh import tunnel - if isinstance(connection_info, string_types): - # it's a path, unpack it - with open(connection_info) as f: - connection_info = json.loads(f.read()) - - cf = connection_info - - lports = tunnel.select_random_ports(4) - rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'] - - remote_ip = cf['ip'] - - if tunnel.try_passwordless_ssh(sshserver, sshkey): - password=False - else: - password = getpass("SSH Password for %s: " % cast_bytes_py2(sshserver)) - - for lp,rp in zip(lports, rports): - tunnel.ssh_tunnel(lp, rp, sshserver, remote_ip, sshkey, password) - - return tuple(lports) - - -#----------------------------------------------------------------------------- -# Mixin for classes that work with connection files -#----------------------------------------------------------------------------- - -channel_socket_types = { - 'hb' : zmq.REQ, - 'shell' : zmq.DEALER, - 'iopub' : zmq.SUB, - 'stdin' : zmq.DEALER, - 'control': zmq.DEALER, -} - -port_names = [ "%s_port" % channel for channel in ('shell', 'stdin', 'iopub', 'hb', 'control')] - -class ConnectionFileMixin(LoggingConfigurable): - """Mixin for configurable classes that work with connection files""" - - # The addresses for the communication channels - connection_file = Unicode('', config=True, - help="""JSON file in which to store connection info [default: kernel-.json] - - This file will contain the IP, ports, and authentication key needed to connect - clients to this kernel. By default, this file will be created in the security dir - of the current profile, but can be specified by absolute path. - """) - _connection_file_written = Bool(False) - - transport = CaselessStrEnum(['tcp', 'ipc'], default_value='tcp', config=True) - - ip = Unicode(config=True, - help="""Set the kernel\'s IP address [default localhost]. - If the IP address is something other than localhost, then - Consoles on other machines will be able to connect - to the Kernel, so be careful!""" - ) - - def _ip_default(self): - if self.transport == 'ipc': - if self.connection_file: - return os.path.splitext(self.connection_file)[0] + '-ipc' - else: - return 'kernel-ipc' - else: - return localhost() - - def _ip_changed(self, name, old, new): - if new == '*': - self.ip = '0.0.0.0' - - # protected traits - - hb_port = Integer(0, config=True, - help="set the heartbeat port [default: random]") - shell_port = Integer(0, config=True, - help="set the shell (ROUTER) port [default: random]") - iopub_port = Integer(0, config=True, - help="set the iopub (PUB) port [default: random]") - stdin_port = Integer(0, config=True, - help="set the stdin (ROUTER) port [default: random]") - control_port = Integer(0, config=True, - help="set the control (ROUTER) port [default: random]") - - @property - def ports(self): - return [ getattr(self, name) for name in port_names ] - - # The Session to use for communication with the kernel. - session = Instance('jupyter_client.session.Session') - def _session_default(self): - from jupyter_client.session import Session - return Session(parent=self) - - #-------------------------------------------------------------------------- - # Connection and ipc file management - #-------------------------------------------------------------------------- - - def get_connection_info(self): - """return the connection info as a dict""" - return dict( - transport=self.transport, - ip=self.ip, - shell_port=self.shell_port, - iopub_port=self.iopub_port, - stdin_port=self.stdin_port, - hb_port=self.hb_port, - control_port=self.control_port, - signature_scheme=self.session.signature_scheme, - key=self.session.key, - ) - - def cleanup_connection_file(self): - """Cleanup connection file *if we wrote it* - - Will not raise if the connection file was already removed somehow. - """ - if self._connection_file_written: - # cleanup connection files on full shutdown of kernel we started - self._connection_file_written = False - try: - os.remove(self.connection_file) - except (IOError, OSError, AttributeError): - pass - - def cleanup_ipc_files(self): - """Cleanup ipc files if we wrote them.""" - if self.transport != 'ipc': - return - for port in self.ports: - ipcfile = "%s-%i" % (self.ip, port) - try: - os.remove(ipcfile) - except (IOError, OSError): - pass - - def write_connection_file(self): - """Write connection info to JSON dict in self.connection_file.""" - if self._connection_file_written and os.path.exists(self.connection_file): - return - - self.connection_file, cfg = write_connection_file(self.connection_file, - transport=self.transport, ip=self.ip, key=self.session.key, - stdin_port=self.stdin_port, iopub_port=self.iopub_port, - shell_port=self.shell_port, hb_port=self.hb_port, - control_port=self.control_port, - signature_scheme=self.session.signature_scheme, - ) - # write_connection_file also sets default ports: - for name in port_names: - setattr(self, name, cfg[name]) - - self._connection_file_written = True - - def load_connection_file(self): - """Load connection info from JSON dict in self.connection_file.""" - self.log.debug(u"Loading connection file %s", self.connection_file) - with open(self.connection_file) as f: - cfg = json.load(f) - self.transport = cfg.get('transport', self.transport) - self.ip = cfg.get('ip', self._ip_default()) - - for name in port_names: - if getattr(self, name) == 0 and name in cfg: - # not overridden by config or cl_args - setattr(self, name, cfg[name]) - - if 'key' in cfg: - self.session.key = str_to_bytes(cfg['key']) - if 'signature_scheme' in cfg: - self.session.signature_scheme = cfg['signature_scheme'] - - #-------------------------------------------------------------------------- - # Creating connected sockets - #-------------------------------------------------------------------------- - - def _make_url(self, channel): - """Make a ZeroMQ URL for a given channel.""" - transport = self.transport - ip = self.ip - port = getattr(self, '%s_port' % channel) - - if transport == 'tcp': - return "tcp://%s:%i" % (ip, port) - else: - return "%s://%s-%s" % (transport, ip, port) - - def _create_connected_socket(self, channel, identity=None): - """Create a zmq Socket and connect it to the kernel.""" - url = self._make_url(channel) - socket_type = channel_socket_types[channel] - self.log.debug("Connecting to: %s" % url) - sock = self.context.socket(socket_type) - # set linger to 1s to prevent hangs at exit - sock.linger = 1000 - if identity: - sock.identity = identity - sock.connect(url) - return sock - - def connect_iopub(self, identity=None): - """return zmq Socket connected to the IOPub channel""" - sock = self._create_connected_socket('iopub', identity=identity) - sock.setsockopt(zmq.SUBSCRIBE, b'') - return sock - - def connect_shell(self, identity=None): - """return zmq Socket connected to the Shell channel""" - return self._create_connected_socket('shell', identity=identity) - - def connect_stdin(self, identity=None): - """return zmq Socket connected to the StdIn channel""" - return self._create_connected_socket('stdin', identity=identity) - - def connect_hb(self, identity=None): - """return zmq Socket connected to the Heartbeat channel""" - return self._create_connected_socket('hb', identity=identity) - - def connect_control(self, identity=None): - """return zmq Socket connected to the Control channel""" - return self._create_connected_socket('control', identity=identity) - - -__all__ = [ - 'write_connection_file', - 'find_connection_file', - 'tunnel_to_kernel', -] diff --git a/jupyter_client/consoleapp.py b/jupyter_client/consoleapp.py deleted file mode 100644 index 6bb09bb..0000000 --- a/jupyter_client/consoleapp.py +++ /dev/null @@ -1,331 +0,0 @@ -""" A minimal application base mixin for all ZMQ based IPython frontends. - -This is not a complete console app, as subprocess will not be able to receive -input, there is no real readline support, among other limitations. This is a -refactoring of what used to be the IPython/qt/console/qtconsoleapp.py -""" -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import atexit -import os -import signal -import sys -import uuid - - -from IPython.config.application import boolean_flag -from IPython.core.profiledir import ProfileDir -from IPython.utils.path import filefind -from IPython.utils.traitlets import ( - Dict, List, Unicode, CUnicode, CBool, Any -) - -from .blocking import BlockingKernelClient -from . import KernelManager, tunnel_to_kernel, find_connection_file, connect -from .kernelspec import NoSuchKernel -from .session import Session - -ConnectionFileMixin = connect.ConnectionFileMixin - -from .localinterfaces import localhost - -#----------------------------------------------------------------------------- -# Aliases and Flags -#----------------------------------------------------------------------------- - -flags = {} - -# the flags that are specific to the frontend -# these must be scrubbed before being passed to the kernel, -# or it will raise an error on unrecognized flags -app_flags = { - 'existing' : ({'IPythonConsoleApp' : {'existing' : 'kernel*.json'}}, - "Connect to an existing kernel. If no argument specified, guess most recent"), -} -app_flags.update(boolean_flag( - 'confirm-exit', 'IPythonConsoleApp.confirm_exit', - """Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', - to force a direct exit without any confirmation. - """, - """Don't prompt the user when exiting. This will terminate the kernel - if it is owned by the frontend, and leave it alive if it is external. - """ -)) -flags.update(app_flags) - -aliases = {} - -# also scrub aliases from the frontend -app_aliases = dict( - ip = 'IPythonConsoleApp.ip', - transport = 'IPythonConsoleApp.transport', - hb = 'IPythonConsoleApp.hb_port', - shell = 'IPythonConsoleApp.shell_port', - iopub = 'IPythonConsoleApp.iopub_port', - stdin = 'IPythonConsoleApp.stdin_port', - existing = 'IPythonConsoleApp.existing', - f = 'IPythonConsoleApp.connection_file', - - kernel = 'IPythonConsoleApp.kernel_name', - - ssh = 'IPythonConsoleApp.sshserver', -) -aliases.update(app_aliases) - -#----------------------------------------------------------------------------- -# Classes -#----------------------------------------------------------------------------- - -classes = [KernelManager, ProfileDir, Session] - -class IPythonConsoleApp(ConnectionFileMixin): - name = 'ipython-console-mixin' - - description = """ - The IPython Mixin Console. - - This class contains the common portions of console client (QtConsole, - ZMQ-based terminal console, etc). It is not a full console, in that - launched terminal subprocesses will not be able to accept input. - - The Console using this mixing supports various extra features beyond - the single-process Terminal IPython shell, such as connecting to - existing kernel, via: - - ipython --existing - - as well as tunnel via SSH - - """ - - classes = classes - flags = Dict(flags) - aliases = Dict(aliases) - kernel_manager_class = KernelManager - kernel_client_class = BlockingKernelClient - - kernel_argv = List(Unicode) - # frontend flags&aliases to be stripped when building kernel_argv - frontend_flags = Any(app_flags) - frontend_aliases = Any(app_aliases) - - # create requested profiles by default, if they don't exist: - auto_create = CBool(True) - # connection info: - - sshserver = Unicode('', config=True, - help="""The SSH server to use to connect to the kernel.""") - sshkey = Unicode('', config=True, - help="""Path to the ssh key to use for logging in to the ssh server.""") - - def _connection_file_default(self): - return 'kernel-%i.json' % os.getpid() - - existing = CUnicode('', config=True, - help="""Connect to an already running kernel""") - - kernel_name = Unicode('python', config=True, - help="""The name of the default kernel to start.""") - - confirm_exit = CBool(True, config=True, - help=""" - Set to display confirmation dialog on exit. You can always use 'exit' or 'quit', - to force a direct exit without any confirmation.""", - ) - - def build_kernel_argv(self, argv=None): - """build argv to be passed to kernel subprocess - - Override in subclasses if any args should be passed to the kernel - """ - self.kernel_argv = self.extra_args - - def init_connection_file(self): - """find the connection file, and load the info if found. - - The current working directory and the current profile's security - directory will be searched for the file if it is not given by - absolute path. - - When attempting to connect to an existing kernel and the `--existing` - argument does not match an existing file, it will be interpreted as a - fileglob, and the matching file in the current profile's security dir - with the latest access time will be used. - - After this method is called, self.connection_file contains the *full path* - to the connection file, never just its name. - """ - if self.existing: - try: - cf = find_connection_file(self.existing) - except Exception: - self.log.critical("Could not find existing kernel connection file %s", self.existing) - self.exit(1) - self.log.debug("Connecting to existing kernel: %s" % cf) - self.connection_file = cf - else: - # not existing, check if we are going to write the file - # and ensure that self.connection_file is a full path, not just the shortname - try: - cf = find_connection_file(self.connection_file) - except Exception: - # file might not exist - if self.connection_file == os.path.basename(self.connection_file): - # just shortname, put it in security dir - cf = os.path.join(self.profile_dir.security_dir, self.connection_file) - else: - cf = self.connection_file - self.connection_file = cf - try: - self.connection_file = filefind(self.connection_file, ['.', self.profile_dir.security_dir]) - except IOError: - self.log.debug("Connection File not found: %s", self.connection_file) - return - - # should load_connection_file only be used for existing? - # as it is now, this allows reusing ports if an existing - # file is requested - try: - self.load_connection_file() - except Exception: - self.log.error("Failed to load connection file: %r", self.connection_file, exc_info=True) - self.exit(1) - - def init_ssh(self): - """set up ssh tunnels, if needed.""" - if not self.existing or (not self.sshserver and not self.sshkey): - return - self.load_connection_file() - - transport = self.transport - ip = self.ip - - if transport != 'tcp': - self.log.error("Can only use ssh tunnels with TCP sockets, not %s", transport) - sys.exit(-1) - - if self.sshkey and not self.sshserver: - # specifying just the key implies that we are connecting directly - self.sshserver = ip - ip = localhost() - - # build connection dict for tunnels: - info = dict(ip=ip, - shell_port=self.shell_port, - iopub_port=self.iopub_port, - stdin_port=self.stdin_port, - hb_port=self.hb_port - ) - - self.log.info("Forwarding connections to %s via %s"%(ip, self.sshserver)) - - # tunnels return a new set of ports, which will be on localhost: - self.ip = localhost() - try: - newports = tunnel_to_kernel(info, self.sshserver, self.sshkey) - except: - # even catch KeyboardInterrupt - self.log.error("Could not setup tunnels", exc_info=True) - self.exit(1) - - self.shell_port, self.iopub_port, self.stdin_port, self.hb_port = newports - - cf = self.connection_file - base,ext = os.path.splitext(cf) - base = os.path.basename(base) - self.connection_file = os.path.basename(base)+'-ssh'+ext - self.log.info("To connect another client via this tunnel, use:") - self.log.info("--existing %s" % self.connection_file) - - def _new_connection_file(self): - cf = '' - while not cf: - # we don't need a 128b id to distinguish kernels, use more readable - # 48b node segment (12 hex chars). Users running more than 32k simultaneous - # kernels can subclass. - ident = str(uuid.uuid4()).split('-')[-1] - cf = os.path.join(self.profile_dir.security_dir, 'kernel-%s.json' % ident) - # only keep if it's actually new. Protect against unlikely collision - # in 48b random search space - cf = cf if not os.path.exists(cf) else '' - return cf - - def init_kernel_manager(self): - # Don't let Qt or ZMQ swallow KeyboardInterupts. - if self.existing: - self.kernel_manager = None - return - signal.signal(signal.SIGINT, signal.SIG_DFL) - - # Create a KernelManager and start a kernel. - try: - self.kernel_manager = self.kernel_manager_class( - ip=self.ip, - session=self.session, - transport=self.transport, - shell_port=self.shell_port, - iopub_port=self.iopub_port, - stdin_port=self.stdin_port, - hb_port=self.hb_port, - connection_file=self.connection_file, - kernel_name=self.kernel_name, - parent=self, - ipython_dir=self.ipython_dir, - ) - except NoSuchKernel: - self.log.critical("Could not find kernel %s", self.kernel_name) - self.exit(1) - - self.kernel_manager.client_factory = self.kernel_client_class - # FIXME: remove special treatment of IPython kernels - kwargs = {} - if self.kernel_manager.ipython_kernel: - kwargs['extra_arguments'] = self.kernel_argv - self.kernel_manager.start_kernel(**kwargs) - atexit.register(self.kernel_manager.cleanup_ipc_files) - - if self.sshserver: - # ssh, write new connection file - self.kernel_manager.write_connection_file() - - # in case KM defaults / ssh writing changes things: - km = self.kernel_manager - self.shell_port=km.shell_port - self.iopub_port=km.iopub_port - self.stdin_port=km.stdin_port - self.hb_port=km.hb_port - self.connection_file = km.connection_file - - atexit.register(self.kernel_manager.cleanup_connection_file) - - def init_kernel_client(self): - if self.kernel_manager is not None: - self.kernel_client = self.kernel_manager.client() - else: - self.kernel_client = self.kernel_client_class( - session=self.session, - ip=self.ip, - transport=self.transport, - shell_port=self.shell_port, - iopub_port=self.iopub_port, - stdin_port=self.stdin_port, - hb_port=self.hb_port, - connection_file=self.connection_file, - parent=self, - ) - - self.kernel_client.start_channels() - - - - def initialize(self, argv=None): - """ - Classes which mix this class in should call: - IPythonConsoleApp.initialize(self,argv) - """ - self.init_connection_file() - self.init_ssh() - self.init_kernel_manager() - self.init_kernel_client() - diff --git a/jupyter_client/ioloop/__init__.py b/jupyter_client/ioloop/__init__.py deleted file mode 100644 index d64f06d..0000000 --- a/jupyter_client/ioloop/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .manager import IOLoopKernelManager -from .restarter import IOLoopKernelRestarter diff --git a/jupyter_client/ioloop/manager.py b/jupyter_client/ioloop/manager.py deleted file mode 100644 index c8c42b1..0000000 --- a/jupyter_client/ioloop/manager.py +++ /dev/null @@ -1,62 +0,0 @@ -"""A kernel manager with a tornado IOLoop""" - -#----------------------------------------------------------------------------- -# Copyright (C) 2013 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -from __future__ import absolute_import - -from zmq.eventloop import ioloop -from zmq.eventloop.zmqstream import ZMQStream - -from IPython.utils.traitlets import ( - Instance -) - -from jupyter_client.manager import KernelManager -from .restarter import IOLoopKernelRestarter - -#----------------------------------------------------------------------------- -# Code -#----------------------------------------------------------------------------- - - -def as_zmqstream(f): - def wrapped(self, *args, **kwargs): - socket = f(self, *args, **kwargs) - return ZMQStream(socket, self.loop) - return wrapped - -class IOLoopKernelManager(KernelManager): - - loop = Instance('zmq.eventloop.ioloop.IOLoop') - def _loop_default(self): - return ioloop.IOLoop.instance() - - _restarter = Instance('jupyter_client.ioloop.IOLoopKernelRestarter', allow_none=True) - - def start_restarter(self): - if self.autorestart and self.has_kernel: - if self._restarter is None: - self._restarter = IOLoopKernelRestarter( - kernel_manager=self, loop=self.loop, - parent=self, log=self.log - ) - self._restarter.start() - - def stop_restarter(self): - if self.autorestart: - if self._restarter is not None: - self._restarter.stop() - - connect_shell = as_zmqstream(KernelManager.connect_shell) - connect_iopub = as_zmqstream(KernelManager.connect_iopub) - connect_stdin = as_zmqstream(KernelManager.connect_stdin) - connect_hb = as_zmqstream(KernelManager.connect_hb) diff --git a/jupyter_client/ioloop/restarter.py b/jupyter_client/ioloop/restarter.py deleted file mode 100644 index 3f7b3e6..0000000 --- a/jupyter_client/ioloop/restarter.py +++ /dev/null @@ -1,53 +0,0 @@ -"""A basic in process kernel monitor with autorestarting. - -This watches a kernel's state using KernelManager.is_alive and auto -restarts the kernel if it dies. -""" - -#----------------------------------------------------------------------------- -# Copyright (C) 2013 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -from __future__ import absolute_import - -from zmq.eventloop import ioloop - - -from jupyter_client.restarter import KernelRestarter -from IPython.utils.traitlets import ( - Instance, -) - -#----------------------------------------------------------------------------- -# Code -#----------------------------------------------------------------------------- - -class IOLoopKernelRestarter(KernelRestarter): - """Monitor and autorestart a kernel.""" - - loop = Instance('zmq.eventloop.ioloop.IOLoop') - def _loop_default(self): - return ioloop.IOLoop.instance() - - _pcallback = None - - def start(self): - """Start the polling of the kernel.""" - if self._pcallback is None: - self._pcallback = ioloop.PeriodicCallback( - self.poll, 1000*self.time_to_dead, self.loop - ) - self._pcallback.start() - - def stop(self): - """Stop the kernel polling.""" - if self._pcallback is not None: - self._pcallback.stop() - self._pcallback = None diff --git a/jupyter_client/jsonutil.py b/jupyter_client/jsonutil.py deleted file mode 100644 index b41a422..0000000 --- a/jupyter_client/jsonutil.py +++ /dev/null @@ -1,239 +0,0 @@ -"""Utilities to manipulate JSON objects.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import math -import re -import types -from datetime import datetime - -try: - # base64.encodestring is deprecated in Python 3.x - from base64 import encodebytes -except ImportError: - # Python 2.x - from base64 import encodestring as encodebytes - -from IPython.utils import py3compat -from IPython.utils.py3compat import string_types, unicode_type, iteritems -from IPython.utils.encoding import DEFAULT_ENCODING -next_attr_name = '__next__' if py3compat.PY3 else 'next' - -#----------------------------------------------------------------------------- -# Globals and constants -#----------------------------------------------------------------------------- - -# timestamp formats -ISO8601 = "%Y-%m-%dT%H:%M:%S.%f" -ISO8601_PAT=re.compile(r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2})(\.\d{1,6})?Z?([\+\-]\d{2}:?\d{2})?$") - -# holy crap, strptime is not threadsafe. -# Calling it once at import seems to help. -datetime.strptime("1", "%d") - -#----------------------------------------------------------------------------- -# Classes and functions -#----------------------------------------------------------------------------- - -def rekey(dikt): - """Rekey a dict that has been forced to use str keys where there should be - ints by json.""" - for k in list(dikt): - if isinstance(k, string_types): - nk = None - try: - nk = int(k) - except ValueError: - try: - nk = float(k) - except ValueError: - continue - if nk in dikt: - raise KeyError("already have key %r" % nk) - dikt[nk] = dikt.pop(k) - return dikt - -def parse_date(s): - """parse an ISO8601 date string - - If it is None or not a valid ISO8601 timestamp, - it will be returned unmodified. - Otherwise, it will return a datetime object. - """ - if s is None: - return s - m = ISO8601_PAT.match(s) - if m: - # FIXME: add actual timezone support - # this just drops the timezone info - notz, ms, tz = m.groups() - if not ms: - ms = '.0' - notz = notz + ms - return datetime.strptime(notz, ISO8601) - return s - -def extract_dates(obj): - """extract ISO8601 dates from unpacked JSON""" - if isinstance(obj, dict): - new_obj = {} # don't clobber - for k,v in iteritems(obj): - new_obj[k] = extract_dates(v) - obj = new_obj - elif isinstance(obj, (list, tuple)): - obj = [ extract_dates(o) for o in obj ] - elif isinstance(obj, string_types): - obj = parse_date(obj) - return obj - -def squash_dates(obj): - """squash datetime objects into ISO8601 strings""" - if isinstance(obj, dict): - obj = dict(obj) # don't clobber - for k,v in iteritems(obj): - obj[k] = squash_dates(v) - elif isinstance(obj, (list, tuple)): - obj = [ squash_dates(o) for o in obj ] - elif isinstance(obj, datetime): - obj = obj.isoformat() - return obj - -def date_default(obj): - """default function for packing datetime objects in JSON.""" - if isinstance(obj, datetime): - return obj.isoformat() - else: - raise TypeError("%r is not JSON serializable"%obj) - - -# constants for identifying png/jpeg data -PNG = b'\x89PNG\r\n\x1a\n' -# front of PNG base64-encoded -PNG64 = b'iVBORw0KG' -JPEG = b'\xff\xd8' -# front of JPEG base64-encoded -JPEG64 = b'/9' -# front of PDF base64-encoded -PDF64 = b'JVBER' - -def encode_images(format_dict): - """b64-encodes images in a displaypub format dict - - Perhaps this should be handled in json_clean itself? - - Parameters - ---------- - - format_dict : dict - A dictionary of display data keyed by mime-type - - Returns - ------- - - format_dict : dict - A copy of the same dictionary, - but binary image data ('image/png', 'image/jpeg' or 'application/pdf') - is base64-encoded. - - """ - encoded = format_dict.copy() - - pngdata = format_dict.get('image/png') - if isinstance(pngdata, bytes): - # make sure we don't double-encode - if not pngdata.startswith(PNG64): - pngdata = encodebytes(pngdata) - encoded['image/png'] = pngdata.decode('ascii') - - jpegdata = format_dict.get('image/jpeg') - if isinstance(jpegdata, bytes): - # make sure we don't double-encode - if not jpegdata.startswith(JPEG64): - jpegdata = encodebytes(jpegdata) - encoded['image/jpeg'] = jpegdata.decode('ascii') - - pdfdata = format_dict.get('application/pdf') - if isinstance(pdfdata, bytes): - # make sure we don't double-encode - if not pdfdata.startswith(PDF64): - pdfdata = encodebytes(pdfdata) - encoded['application/pdf'] = pdfdata.decode('ascii') - - return encoded - - -def json_clean(obj): - """Clean an object to ensure it's safe to encode in JSON. - - Atomic, immutable objects are returned unmodified. Sets and tuples are - converted to lists, lists are copied and dicts are also copied. - - Note: dicts whose keys could cause collisions upon encoding (such as a dict - with both the number 1 and the string '1' as keys) will cause a ValueError - to be raised. - - Parameters - ---------- - obj : any python object - - Returns - ------- - out : object - - A version of the input which will not cause an encoding error when - encoded as JSON. Note that this function does not *encode* its inputs, - it simply sanitizes it so that there will be no encoding errors later. - - """ - # types that are 'atomic' and ok in json as-is. - atomic_ok = (unicode_type, type(None)) - - # containers that we need to convert into lists - container_to_list = (tuple, set, types.GeneratorType) - - if isinstance(obj, float): - # cast out-of-range floats to their reprs - if math.isnan(obj) or math.isinf(obj): - return repr(obj) - return float(obj) - - if isinstance(obj, int): - # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598) - if isinstance(obj, bool): - # bools are ints, but we don't want to cast them to 0,1 - return obj - return int(obj) - - if isinstance(obj, atomic_ok): - return obj - - if isinstance(obj, bytes): - return obj.decode(DEFAULT_ENCODING, 'replace') - - if isinstance(obj, container_to_list) or ( - hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)): - obj = list(obj) - - if isinstance(obj, list): - return [json_clean(x) for x in obj] - - if isinstance(obj, dict): - # First, validate that the dict won't lose data in conversion due to - # key collisions after stringification. This can happen with keys like - # True and 'true' or 1 and '1', which collide in JSON. - nkeys = len(obj) - nkeys_collapsed = len(set(map(unicode_type, obj))) - if nkeys != nkeys_collapsed: - raise ValueError('dict cannot be safely converted to JSON: ' - 'key collision would lead to dropped values') - # If all OK, proceed by making the new dict that will be json-safe - out = {} - for k,v in iteritems(obj): - out[unicode_type(k)] = json_clean(v) - return out - - # If we get here, we don't know how to handle the object, so we just get - # its repr and return that. This will catch lambdas, open sockets, class - # objects, and any other complicated contraption that json can't encode - return repr(obj) diff --git a/jupyter_client/kernelspec.py b/jupyter_client/kernelspec.py deleted file mode 100644 index 71bb543..0000000 --- a/jupyter_client/kernelspec.py +++ /dev/null @@ -1,194 +0,0 @@ -import io -import json -import os -import shutil -import sys -import warnings - -pjoin = os.path.join - -from IPython.utils.path import get_ipython_dir -from IPython.utils.py3compat import PY3 -from IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Set -from IPython.config import Configurable - -if os.name == 'nt': - programdata = os.environ.get('PROGRAMDATA', None) - if programdata: - SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')] - else: # PROGRAMDATA is not defined by default on XP. - SYSTEM_KERNEL_DIRS = [] -else: - SYSTEM_KERNEL_DIRS = ["/usr/share/jupyter/kernels", - "/usr/local/share/jupyter/kernels", - ] - -NATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2' - - -class KernelSpec(HasTraits): - argv = List() - display_name = Unicode() - language = Unicode() - env = Dict() - resource_dir = Unicode() - - @classmethod - def from_resource_dir(cls, resource_dir): - """Create a KernelSpec object by reading kernel.json - - Pass the path to the *directory* containing kernel.json. - """ - kernel_file = pjoin(resource_dir, 'kernel.json') - with io.open(kernel_file, 'r', encoding='utf-8') as f: - kernel_dict = json.load(f) - return cls(resource_dir=resource_dir, **kernel_dict) - - def to_dict(self): - d = dict(argv=self.argv, - env=self.env, - display_name=self.display_name, - language=self.language, - ) - - return d - - def to_json(self): - return json.dumps(self.to_dict()) - -def _is_kernel_dir(path): - """Is ``path`` a kernel directory?""" - return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json')) - -def _list_kernels_in(dir): - """Return a mapping of kernel names to resource directories from dir. - - If dir is None or does not exist, returns an empty dict. - """ - if dir is None or not os.path.isdir(dir): - return {} - return {f.lower(): pjoin(dir, f) for f in os.listdir(dir) - if _is_kernel_dir(pjoin(dir, f))} - -class NoSuchKernel(KeyError): - def __init__(self, name): - self.name = name - -class KernelSpecManager(Configurable): - ipython_dir = Unicode() - def _ipython_dir_default(self): - return get_ipython_dir() - - user_kernel_dir = Unicode() - def _user_kernel_dir_default(self): - return pjoin(self.ipython_dir, 'kernels') - - @property - def env_kernel_dir(self): - return pjoin(sys.prefix, 'share', 'jupyter', 'kernels') - - whitelist = Set(config=True, - help="""Whitelist of allowed kernel names. - - By default, all installed kernels are allowed. - """ - ) - kernel_dirs = List( - help="List of kernel directories to search. Later ones take priority over earlier." - ) - def _kernel_dirs_default(self): - dirs = SYSTEM_KERNEL_DIRS[:] - if self.env_kernel_dir not in dirs: - dirs.append(self.env_kernel_dir) - dirs.append(self.user_kernel_dir) - return dirs - - def find_kernel_specs(self): - """Returns a dict mapping kernel names to resource directories.""" - d = {} - for kernel_dir in self.kernel_dirs: - d.update(_list_kernels_in(kernel_dir)) - - if self.whitelist: - # filter if there's a whitelist - d = {name:spec for name,spec in d.items() if name in self.whitelist} - return d - # TODO: Caching? - - def get_kernel_spec(self, kernel_name): - """Returns a :class:`KernelSpec` instance for the given kernel_name. - - Raises :exc:`NoSuchKernel` if the given kernel name is not found. - """ - d = self.find_kernel_specs() - try: - resource_dir = d[kernel_name.lower()] - except KeyError: - raise NoSuchKernel(kernel_name) - return KernelSpec.from_resource_dir(resource_dir) - - def _get_destination_dir(self, kernel_name, user=False): - if user: - return os.path.join(self.user_kernel_dir, kernel_name) - else: - if SYSTEM_KERNEL_DIRS: - return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name) - else: - raise EnvironmentError("No system kernel directory is available") - - - def install_kernel_spec(self, source_dir, kernel_name=None, user=False, - replace=False): - """Install a kernel spec by copying its directory. - - If ``kernel_name`` is not given, the basename of ``source_dir`` will - be used. - - If ``user`` is False, it will attempt to install into the systemwide - kernel registry. If the process does not have appropriate permissions, - an :exc:`OSError` will be raised. - - If ``replace`` is True, this will replace an existing kernel of the same - name. Otherwise, if the destination already exists, an :exc:`OSError` - will be raised. - """ - if not kernel_name: - kernel_name = os.path.basename(source_dir) - kernel_name = kernel_name.lower() - - destination = self._get_destination_dir(kernel_name, user=user) - - if replace and os.path.isdir(destination): - shutil.rmtree(destination) - - shutil.copytree(source_dir, destination) - - def install_native_kernel_spec(self, user=False): - """DEPRECATED: Use ipython_kernel.kenelspec.install""" - warnings.warn("install_native_kernel_spec is deprecated." - " Use ipython_kernel.kernelspec import install.") - from ipython_kernel.kernelspec import install - install(self, user=user) - - -def find_kernel_specs(): - """Returns a dict mapping kernel names to resource directories.""" - return KernelSpecManager().find_kernel_specs() - -def get_kernel_spec(kernel_name): - """Returns a :class:`KernelSpec` instance for the given kernel_name. - - Raises KeyError if the given kernel name is not found. - """ - return KernelSpecManager().get_kernel_spec(kernel_name) - -def install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False): - return KernelSpecManager().install_kernel_spec(source_dir, kernel_name, - user, replace) - -install_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__ - -def install_native_kernel_spec(user=False): - return KernelSpecManager().install_native_kernel_spec(user=user) - -install_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__ diff --git a/jupyter_client/kernelspecapp.py b/jupyter_client/kernelspecapp.py deleted file mode 100644 index 09cdee3..0000000 --- a/jupyter_client/kernelspecapp.py +++ /dev/null @@ -1,146 +0,0 @@ - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import errno -import os.path - -from IPython.config.application import Application -from IPython.core.application import ( - BaseIPythonApplication, base_flags, base_aliases -) -from IPython.utils.traitlets import Instance, Dict, Unicode, Bool - -from .kernelspec import KernelSpecManager - -class ListKernelSpecs(BaseIPythonApplication): - description = """List installed kernel specifications.""" - kernel_spec_manager = Instance(KernelSpecManager) - - # Not all of the base aliases are meaningful (e.g. profile) - aliases = {k: base_aliases[k] for k in ['ipython-dir', 'log-level']} - flags = {'debug': base_flags['debug'],} - - def _kernel_spec_manager_default(self): - return KernelSpecManager(parent=self, ipython_dir=self.ipython_dir) - - def start(self): - print("Available kernels:") - for kernelname in sorted(self.kernel_spec_manager.find_kernel_specs()): - print(" %s" % kernelname) - - -class InstallKernelSpec(BaseIPythonApplication): - description = """Install a kernel specification directory.""" - kernel_spec_manager = Instance(KernelSpecManager) - - def _kernel_spec_manager_default(self): - return KernelSpecManager(ipython_dir=self.ipython_dir) - - sourcedir = Unicode() - kernel_name = Unicode("", config=True, - help="Install the kernel spec with this name" - ) - def _kernel_name_default(self): - return os.path.basename(self.sourcedir) - - user = Bool(False, config=True, - help=""" - Try to install the kernel spec to the per-user directory instead of - the system or environment directory. - """ - ) - replace = Bool(False, config=True, - help="Replace any existing kernel spec with this name." - ) - - aliases = {'name': 'InstallKernelSpec.kernel_name'} - for k in ['ipython-dir', 'log-level']: - aliases[k] = base_aliases[k] - - flags = {'user': ({'InstallKernelSpec': {'user': True}}, - "Install to the per-user kernel registry"), - 'replace': ({'InstallKernelSpec': {'replace': True}}, - "Replace any existing kernel spec with this name."), - 'debug': base_flags['debug'], - } - - def parse_command_line(self, argv): - super(InstallKernelSpec, self).parse_command_line(argv) - # accept positional arg as profile name - if self.extra_args: - self.sourcedir = self.extra_args[0] - else: - print("No source directory specified.") - self.exit(1) - - def start(self): - try: - self.kernel_spec_manager.install_kernel_spec(self.sourcedir, - kernel_name=self.kernel_name, - user=self.user, - replace=self.replace, - ) - except OSError as e: - if e.errno == errno.EACCES: - print("Permission denied") - self.exit(1) - elif e.errno == errno.EEXIST: - print("A kernel spec is already present at %s" % e.filename) - self.exit(1) - raise - -class InstallNativeKernelSpec(BaseIPythonApplication): - description = """[DEPRECATED] Install the IPython kernel spec directory for this Python.""" - kernel_spec_manager = Instance(KernelSpecManager) - - def _kernel_spec_manager_default(self): - return KernelSpecManager(ipython_dir=self.ipython_dir) - - user = Bool(False, config=True, - help=""" - Try to install the kernel spec to the per-user directory instead of - the system or environment directory. - """ - ) - - # Not all of the base aliases are meaningful (e.g. profile) - aliases = {k: base_aliases[k] for k in ['ipython-dir', 'log-level']} - flags = {'user': ({'InstallNativeKernelSpec': {'user': True}}, - "Install to the per-user kernel registry"), - 'debug': base_flags['debug'], - } - - def start(self): - try: - from ipython_kernel import kernelspec - except ImportError: - print("ipython_kernel not available, can't install its spec.", file=sys.stderr) - self.exit(1) - try: - kernelspec.install(self.kernel_spec_manager, user=self.user) - except OSError as e: - self.exit(e) - -class KernelSpecApp(Application): - name = "ipython kernelspec" - description = """Manage IPython kernel specifications.""" - - subcommands = Dict({ - 'list': (ListKernelSpecs, ListKernelSpecs.description.splitlines()[0]), - 'install': (InstallKernelSpec, InstallKernelSpec.description.splitlines()[0]), - 'install-self': (InstallNativeKernelSpec, InstallNativeKernelSpec.description.splitlines()[0]), - }) - - aliases = {} - flags = {} - - def start(self): - if self.subapp is None: - print("No subcommand specified. Must specify one of: %s"% list(self.subcommands)) - print() - self.print_description() - self.print_subcommands() - self.exit(1) - else: - return self.subapp.start() diff --git a/jupyter_client/launcher.py b/jupyter_client/launcher.py deleted file mode 100644 index fbef486..0000000 --- a/jupyter_client/launcher.py +++ /dev/null @@ -1,130 +0,0 @@ -"""Utilities for launching kernels""" - -# Copyright (c) Jupyter Development Team. -# Distributed under the terms of the Modified BSD License. - -import os -import sys -from subprocess import Popen, PIPE - -from IPython.utils.encoding import getdefaultencoding -from IPython.utils.py3compat import cast_bytes_py2 - - -def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None, - independent=False, - cwd=None, - **kw - ): - """ Launches a localhost kernel, binding to the specified ports. - - Parameters - ---------- - cmd : Popen list, - A string of Python code that imports and executes a kernel entry point. - - stdin, stdout, stderr : optional (default None) - Standards streams, as defined in subprocess.Popen. - - independent : bool, optional (default False) - If set, the kernel process is guaranteed to survive if this process - dies. If not set, an effort is made to ensure that the kernel is killed - when this process dies. Note that in this case it is still good practice - to kill kernels manually before exiting. - - cwd : path, optional - The working dir of the kernel process (default: cwd of this process). - - Returns - ------- - - Popen instance for the kernel subprocess - """ - - # Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr - # are invalid. Unfortunately, there is in general no way to detect whether - # they are valid. The following two blocks redirect them to (temporary) - # pipes in certain important cases. - - # If this process has been backgrounded, our stdin is invalid. Since there - # is no compelling reason for the kernel to inherit our stdin anyway, we'll - # place this one safe and always redirect. - redirect_in = True - _stdin = PIPE if stdin is None else stdin - - # If this process in running on pythonw, we know that stdin, stdout, and - # stderr are all invalid. - redirect_out = sys.executable.endswith('pythonw.exe') - if redirect_out: - blackhole = open(os.devnull, 'w') - _stdout = blackhole if stdout is None else stdout - _stderr = blackhole if stderr is None else stderr - else: - _stdout, _stderr = stdout, stderr - - env = env if (env is not None) else os.environ.copy() - - encoding = getdefaultencoding(prefer_stream=False) - kwargs = dict( - stdin=_stdin, - stdout=_stdout, - stderr=_stderr, - cwd=cwd, - env=env, - ) - - # Spawn a kernel. - if sys.platform == 'win32': - # Popen on Python 2 on Windows cannot handle unicode args or cwd - cmd = [ cast_bytes_py2(c, encoding) for c in cmd ] - if cwd: - cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii') - kwargs['cwd'] = cwd - - from .win_interrupt import create_interrupt_event - # Create a Win32 event for interrupting the kernel - # and store it in an environment variable. - interrupt_event = create_interrupt_event() - env["JPY_INTERRUPT_EVENT"] = str(interrupt_event) - # deprecated old env name: - env["IPY_INTERRUPT_EVENT"] = env["JPY_INTERRUPT_EVENT"] - - try: - from _winapi import DuplicateHandle, GetCurrentProcess, \ - DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP - except: - from _subprocess import DuplicateHandle, GetCurrentProcess, \ - DUPLICATE_SAME_ACCESS, CREATE_NEW_PROCESS_GROUP - # Launch the kernel process - if independent: - kwargs['creationflags'] = CREATE_NEW_PROCESS_GROUP - else: - pid = GetCurrentProcess() - handle = DuplicateHandle(pid, pid, pid, 0, - True, # Inheritable by new processes. - DUPLICATE_SAME_ACCESS) - env['JPY_PARENT_PID'] = str(int(handle)) - - proc = Popen(cmd, **kwargs) - - # Attach the interrupt event to the Popen objet so it can be used later. - proc.win32_interrupt_event = interrupt_event - - else: - if independent: - kwargs['preexec_fn'] = lambda: os.setsid() - else: - env['JPY_PARENT_PID'] = str(os.getpid()) - - proc = Popen(cmd, **kwargs) - - # Clean up pipes created to work around Popen bug. - if redirect_in: - if stdin is None: - proc.stdin.close() - - return proc - -__all__ = [ - 'launch_kernel', -] diff --git a/jupyter_client/localinterfaces.py b/jupyter_client/localinterfaces.py deleted file mode 100644 index 40b8dfa..0000000 --- a/jupyter_client/localinterfaces.py +++ /dev/null @@ -1,270 +0,0 @@ -"""Utilities for identifying local IP addresses.""" - -# Copyright (c) Jupyter Development Team. -# Distributed under the terms of the Modified BSD License. - -import os -import re -import socket -from subprocess import Popen, PIPE - -from warnings import warn - - -LOCAL_IPS = [] -PUBLIC_IPS = [] - -LOCALHOST = '' - - -def _uniq_stable(elems): - """uniq_stable(elems) -> list - - Return from an iterable, a list of all the unique elements in the input, - maintaining the order in which they first appear. - - From IPython.utils.data - """ - seen = set() - return [x for x in elems if x not in seen and not seen.add(x)] - -def _get_output(cmd): - """Get output of a command, raising IOError if it fails""" - p = Popen(cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = p.communicate() - if p.returncode: - raise IOError("Failed to run %s: %s" % (cmd, stderr.decode('utf8', 'replace'))) - return stdout.decode('utf8', 'replace') - -def _only_once(f): - """decorator to only run a function once""" - f.called = False - def wrapped(**kwargs): - if f.called: - return - ret = f(**kwargs) - f.called = True - return ret - return wrapped - -def _requires_ips(f): - """decorator to ensure load_ips has been run before f""" - def ips_loaded(*args, **kwargs): - _load_ips() - return f(*args, **kwargs) - return ips_loaded - -# subprocess-parsing ip finders -class NoIPAddresses(Exception): - pass - -def _populate_from_list(addrs): - """populate local and public IPs from flat list of all IPs""" - if not addrs: - raise NoIPAddresses - - global LOCALHOST - public_ips = [] - local_ips = [] - - for ip in addrs: - local_ips.append(ip) - if not ip.startswith('127.'): - public_ips.append(ip) - elif not LOCALHOST: - LOCALHOST = ip - - if not LOCALHOST: - LOCALHOST = '127.0.0.1' - local_ips.insert(0, LOCALHOST) - - local_ips.extend(['0.0.0.0', '']) - - LOCAL_IPS[:] = _uniq_stable(local_ips) - PUBLIC_IPS[:] = _uniq_stable(public_ips) - -def _load_ips_ifconfig(): - """load ip addresses from `ifconfig` output (posix)""" - - try: - out = _get_output('ifconfig') - except (IOError, OSError): - # no ifconfig, it's usually in /sbin and /sbin is not on everyone's PATH - out = _get_output('/sbin/ifconfig') - - lines = out.splitlines() - addrs = [] - for line in lines: - blocks = line.lower().split() - if (len(blocks) >= 2) and (blocks[0] == 'inet'): - if blocks[1].startswith("addr:"): - addrs.append(blocks[1].split(":")[1]) - else: - addrs.append(blocks[1]) - _populate_from_list(addrs) - - -def _load_ips_ip(): - """load ip addresses from `ip addr` output (Linux)""" - out = _get_output(['ip', 'addr']) - - lines = out.splitlines() - addrs = [] - for line in lines: - blocks = line.lower().split() - if (len(blocks) >= 2) and (blocks[0] == 'inet'): - addrs.append(blocks[1].split('/')[0]) - _populate_from_list(addrs) - -_ipconfig_ipv4_pat = re.compile(r'ipv4.*?(\d+\.\d+\.\d+\.\d+)$', re.IGNORECASE) - -def _load_ips_ipconfig(): - """load ip addresses from `ipconfig` output (Windows)""" - out = _get_output('ipconfig') - - lines = out.splitlines() - addrs = [] - for line in lines: - m = _ipconfig_ipv4_pat.match(line.strip()) - if m: - addrs.append(m.group(1)) - _populate_from_list(addrs) - - -def _load_ips_netifaces(): - """load ip addresses with netifaces""" - import netifaces - global LOCALHOST - local_ips = [] - public_ips = [] - - # list of iface names, 'lo0', 'eth0', etc. - for iface in netifaces.interfaces(): - # list of ipv4 addrinfo dicts - ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, []) - for entry in ipv4s: - addr = entry.get('addr') - if not addr: - continue - if not (iface.startswith('lo') or addr.startswith('127.')): - public_ips.append(addr) - elif not LOCALHOST: - LOCALHOST = addr - local_ips.append(addr) - if not LOCALHOST: - # we never found a loopback interface (can this ever happen?), assume common default - LOCALHOST = '127.0.0.1' - local_ips.insert(0, LOCALHOST) - local_ips.extend(['0.0.0.0', '']) - LOCAL_IPS[:] = _uniq_stable(local_ips) - PUBLIC_IPS[:] = _uniq_stable(public_ips) - - -def _load_ips_gethostbyname(): - """load ip addresses with socket.gethostbyname_ex - - This can be slow. - """ - global LOCALHOST - try: - LOCAL_IPS[:] = socket.gethostbyname_ex('localhost')[2] - except socket.error: - # assume common default - LOCAL_IPS[:] = ['127.0.0.1'] - - try: - hostname = socket.gethostname() - PUBLIC_IPS[:] = socket.gethostbyname_ex(hostname)[2] - # try hostname.local, in case hostname has been short-circuited to loopback - if not hostname.endswith('.local') and all(ip.startswith('127') for ip in PUBLIC_IPS): - PUBLIC_IPS[:] = socket.gethostbyname_ex(socket.gethostname() + '.local')[2] - except socket.error: - pass - finally: - PUBLIC_IPS[:] = _uniq_stable(PUBLIC_IPS) - LOCAL_IPS.extend(PUBLIC_IPS) - - # include all-interface aliases: 0.0.0.0 and '' - LOCAL_IPS.extend(['0.0.0.0', '']) - - LOCAL_IPS[:] = _uniq_stable(LOCAL_IPS) - - LOCALHOST = LOCAL_IPS[0] - -def _load_ips_dumb(): - """Fallback in case of unexpected failure""" - global LOCALHOST - LOCALHOST = '127.0.0.1' - LOCAL_IPS[:] = [LOCALHOST, '0.0.0.0', ''] - PUBLIC_IPS[:] = [] - -@_only_once -def _load_ips(suppress_exceptions=True): - """load the IPs that point to this machine - - This function will only ever be called once. - - It will use netifaces to do it quickly if available. - Then it will fallback on parsing the output of ifconfig / ip addr / ipconfig, as appropriate. - Finally, it will fallback on socket.gethostbyname_ex, which can be slow. - """ - - try: - # first priority, use netifaces - try: - return _load_ips_netifaces() - except ImportError: - pass - - # second priority, parse subprocess output (how reliable is this?) - - if os.name == 'nt': - try: - return _load_ips_ipconfig() - except (IOError, NoIPAddresses): - pass - else: - try: - return _load_ips_ifconfig() - except (IOError, NoIPAddresses): - pass - try: - return _load_ips_ip() - except (IOError, NoIPAddresses): - pass - - # lowest priority, use gethostbyname - - return _load_ips_gethostbyname() - except Exception as e: - if not suppress_exceptions: - raise - # unexpected error shouldn't crash, load dumb default values instead. - warn("Unexpected error discovering local network interfaces: %s" % e) - _load_ips_dumb() - - -@_requires_ips -def local_ips(): - """return the IP addresses that point to this machine""" - return LOCAL_IPS - -@_requires_ips -def public_ips(): - """return the IP addresses for this machine that are visible to other machines""" - return PUBLIC_IPS - -@_requires_ips -def localhost(): - """return ip for localhost (almost always 127.0.0.1)""" - return LOCALHOST - -@_requires_ips -def is_local_ip(ip): - """does `ip` point to this machine?""" - return ip in LOCAL_IPS - -@_requires_ips -def is_public_ip(ip): - """is `ip` a publicly visible address?""" - return ip in PUBLIC_IPS diff --git a/jupyter_client/manager.py b/jupyter_client/manager.py deleted file mode 100644 index 10b79cc..0000000 --- a/jupyter_client/manager.py +++ /dev/null @@ -1,442 +0,0 @@ -"""Base class to manage a running kernel""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from __future__ import absolute_import - -from contextlib import contextmanager -import os -import re -import signal -import sys -import time -import warnings -try: - from queue import Empty # Py 3 -except ImportError: - from Queue import Empty # Py 2 - -import zmq - -from IPython.utils.importstring import import_item -from .localinterfaces import is_local_ip, local_ips -from IPython.utils.path import get_ipython_dir -from IPython.utils.traitlets import ( - Any, Instance, Unicode, List, Bool, Type, DottedObjectName -) -from jupyter_client import ( - launch_kernel, - kernelspec, -) -from .connect import ConnectionFileMixin -from .session import Session -from .managerabc import ( - KernelManagerABC -) - - -class KernelManager(ConnectionFileMixin): - """Manages a single kernel in a subprocess on this host. - - This version starts kernels with Popen. - """ - - # The PyZMQ Context to use for communication with the kernel. - context = Instance(zmq.Context) - def _context_default(self): - return zmq.Context.instance() - - # the class to create with our `client` method - client_class = DottedObjectName('jupyter_client.blocking.BlockingKernelClient') - client_factory = Type(allow_none=True) - def _client_class_changed(self, name, old, new): - self.client_factory = import_item(str(new)) - - # The kernel process with which the KernelManager is communicating. - # generally a Popen instance - kernel = Any() - - kernel_spec_manager = Instance(kernelspec.KernelSpecManager) - - def _kernel_spec_manager_default(self): - return kernelspec.KernelSpecManager(ipython_dir=self.ipython_dir) - - kernel_name = Unicode(kernelspec.NATIVE_KERNEL_NAME) - - kernel_spec = Instance(kernelspec.KernelSpec) - - def _kernel_spec_default(self): - return self.kernel_spec_manager.get_kernel_spec(self.kernel_name) - - def _kernel_name_changed(self, name, old, new): - if new == 'python': - self.kernel_name = kernelspec.NATIVE_KERNEL_NAME - # This triggered another run of this function, so we can exit now - return - self.kernel_spec = self.kernel_spec_manager.get_kernel_spec(new) - self.ipython_kernel = new in {'python', 'python2', 'python3'} - - kernel_cmd = List(Unicode, config=True, - help="""DEPRECATED: Use kernel_name instead. - - The Popen Command to launch the kernel. - Override this if you have a custom kernel. - If kernel_cmd is specified in a configuration file, - IPython does not pass any arguments to the kernel, - because it cannot make any assumptions about the - arguments that the kernel understands. In particular, - this means that the kernel does not receive the - option --debug if it given on the IPython command line. - """ - ) - - def _kernel_cmd_changed(self, name, old, new): - warnings.warn("Setting kernel_cmd is deprecated, use kernel_spec to " - "start different kernels.") - self.ipython_kernel = False - - ipython_kernel = Bool(True) - - ipython_dir = Unicode() - def _ipython_dir_default(self): - return get_ipython_dir() - - # Protected traits - _launch_args = Any() - _control_socket = Any() - - _restarter = Any() - - autorestart = Bool(False, config=True, - help="""Should we autorestart the kernel if it dies.""" - ) - - def __del__(self): - self._close_control_socket() - self.cleanup_connection_file() - - #-------------------------------------------------------------------------- - # Kernel restarter - #-------------------------------------------------------------------------- - - def start_restarter(self): - pass - - def stop_restarter(self): - pass - - def add_restart_callback(self, callback, event='restart'): - """register a callback to be called when a kernel is restarted""" - if self._restarter is None: - return - self._restarter.add_callback(callback, event) - - def remove_restart_callback(self, callback, event='restart'): - """unregister a callback to be called when a kernel is restarted""" - if self._restarter is None: - return - self._restarter.remove_callback(callback, event) - - #-------------------------------------------------------------------------- - # create a Client connected to our Kernel - #-------------------------------------------------------------------------- - - def client(self, **kwargs): - """Create a client configured to connect to our kernel""" - if self.client_factory is None: - self.client_factory = import_item(self.client_class) - - kw = {} - kw.update(self.get_connection_info()) - kw.update(dict( - connection_file=self.connection_file, - session=self.session, - parent=self, - )) - - # add kwargs last, for manual overrides - kw.update(kwargs) - return self.client_factory(**kw) - - #-------------------------------------------------------------------------- - # Kernel management - #-------------------------------------------------------------------------- - - def format_kernel_cmd(self, extra_arguments=None): - """replace templated args (e.g. {connection_file})""" - extra_arguments = extra_arguments or [] - if self.kernel_cmd: - cmd = self.kernel_cmd + extra_arguments - else: - cmd = self.kernel_spec.argv + extra_arguments - - ns = dict(connection_file=self.connection_file) - ns.update(self._launch_args) - - pat = re.compile(r'\{([A-Za-z0-9_]+)\}') - def from_ns(match): - """Get the key out of ns if it's there, otherwise no change.""" - return ns.get(match.group(1), match.group()) - - return [ pat.sub(from_ns, arg) for arg in cmd ] - - def _launch_kernel(self, kernel_cmd, **kw): - """actually launch the kernel - - override in a subclass to launch kernel subprocesses differently - """ - return launch_kernel(kernel_cmd, **kw) - - # Control socket used for polite kernel shutdown - - def _connect_control_socket(self): - if self._control_socket is None: - self._control_socket = self.connect_control() - self._control_socket.linger = 100 - - def _close_control_socket(self): - if self._control_socket is None: - return - self._control_socket.close() - self._control_socket = None - - def start_kernel(self, **kw): - """Starts a kernel on this host in a separate process. - - If random ports (port=0) are being used, this method must be called - before the channels are created. - - Parameters - ---------- - **kw : optional - keyword arguments that are passed down to build the kernel_cmd - and launching the kernel (e.g. Popen kwargs). - """ - if self.transport == 'tcp' and not is_local_ip(self.ip): - raise RuntimeError("Can only launch a kernel on a local interface. " - "Make sure that the '*_address' attributes are " - "configured properly. " - "Currently valid addresses are: %s" % local_ips() - ) - - # write connection file / get default ports - self.write_connection_file() - - # save kwargs for use in restart - self._launch_args = kw.copy() - # build the Popen cmd - extra_arguments = kw.pop('extra_arguments', []) - kernel_cmd = self.format_kernel_cmd(extra_arguments=extra_arguments) - if self.kernel_cmd: - # If kernel_cmd has been set manually, don't refer to a kernel spec - env = os.environ - else: - # Environment variables from kernel spec are added to os.environ - env = os.environ.copy() - env.update(self.kernel_spec.env or {}) - # launch the kernel subprocess - self.kernel = self._launch_kernel(kernel_cmd, env=env, - **kw) - self.start_restarter() - self._connect_control_socket() - - def request_shutdown(self, restart=False): - """Send a shutdown request via control channel - - On Windows, this just kills kernels instead, because the shutdown - messages don't work. - """ - content = dict(restart=restart) - msg = self.session.msg("shutdown_request", content=content) - self.session.send(self._control_socket, msg) - - def finish_shutdown(self, waittime=1, pollinterval=0.1): - """Wait for kernel shutdown, then kill process if it doesn't shutdown. - - This does not send shutdown requests - use :meth:`request_shutdown` - first. - """ - for i in range(int(waittime/pollinterval)): - if self.is_alive(): - time.sleep(pollinterval) - else: - break - else: - # OK, we've waited long enough. - if self.has_kernel: - self._kill_kernel() - - def cleanup(self, connection_file=True): - """Clean up resources when the kernel is shut down""" - if connection_file: - self.cleanup_connection_file() - - self.cleanup_ipc_files() - self._close_control_socket() - - def shutdown_kernel(self, now=False, restart=False): - """Attempts to the stop the kernel process cleanly. - - This attempts to shutdown the kernels cleanly by: - - 1. Sending it a shutdown message over the shell channel. - 2. If that fails, the kernel is shutdown forcibly by sending it - a signal. - - Parameters - ---------- - now : bool - Should the kernel be forcible killed *now*. This skips the - first, nice shutdown attempt. - restart: bool - Will this kernel be restarted after it is shutdown. When this - is True, connection files will not be cleaned up. - """ - # Stop monitoring for restarting while we shutdown. - self.stop_restarter() - - if now: - self._kill_kernel() - else: - self.request_shutdown(restart=restart) - # Don't send any additional kernel kill messages immediately, to give - # the kernel a chance to properly execute shutdown actions. Wait for at - # most 1s, checking every 0.1s. - self.finish_shutdown() - - self.cleanup(connection_file=not restart) - - def restart_kernel(self, now=False, **kw): - """Restarts a kernel with the arguments that were used to launch it. - - If the old kernel was launched with random ports, the same ports will be - used for the new kernel. The same connection file is used again. - - Parameters - ---------- - now : bool, optional - If True, the kernel is forcefully restarted *immediately*, without - having a chance to do any cleanup action. Otherwise the kernel is - given 1s to clean up before a forceful restart is issued. - - In all cases the kernel is restarted, the only difference is whether - it is given a chance to perform a clean shutdown or not. - - **kw : optional - Any options specified here will overwrite those used to launch the - kernel. - """ - if self._launch_args is None: - raise RuntimeError("Cannot restart the kernel. " - "No previous call to 'start_kernel'.") - else: - # Stop currently running kernel. - self.shutdown_kernel(now=now, restart=True) - - # Start new kernel. - self._launch_args.update(kw) - self.start_kernel(**self._launch_args) - - @property - def has_kernel(self): - """Has a kernel been started that we are managing.""" - return self.kernel is not None - - def _kill_kernel(self): - """Kill the running kernel. - - This is a private method, callers should use shutdown_kernel(now=True). - """ - if self.has_kernel: - - # Signal the kernel to terminate (sends SIGKILL on Unix and calls - # TerminateProcess() on Win32). - try: - self.kernel.kill() - except OSError as e: - # In Windows, we will get an Access Denied error if the process - # has already terminated. Ignore it. - if sys.platform == 'win32': - if e.winerror != 5: - raise - # On Unix, we may get an ESRCH error if the process has already - # terminated. Ignore it. - else: - from errno import ESRCH - if e.errno != ESRCH: - raise - - # Block until the kernel terminates. - self.kernel.wait() - self.kernel = None - else: - raise RuntimeError("Cannot kill kernel. No kernel is running!") - - def interrupt_kernel(self): - """Interrupts the kernel by sending it a signal. - - Unlike ``signal_kernel``, this operation is well supported on all - platforms. - """ - if self.has_kernel: - if sys.platform == 'win32': - from .win_interrupt import send_interrupt - send_interrupt(self.kernel.win32_interrupt_event) - else: - self.kernel.send_signal(signal.SIGINT) - else: - raise RuntimeError("Cannot interrupt kernel. No kernel is running!") - - def signal_kernel(self, signum): - """Sends a signal to the kernel. - - Note that since only SIGTERM is supported on Windows, this function is - only useful on Unix systems. - """ - if self.has_kernel: - self.kernel.send_signal(signum) - else: - raise RuntimeError("Cannot signal kernel. No kernel is running!") - - def is_alive(self): - """Is the kernel process still running?""" - if self.has_kernel: - if self.kernel.poll() is None: - return True - else: - return False - else: - # we don't have a kernel - return False - - -KernelManagerABC.register(KernelManager) - - -def start_new_kernel(startup_timeout=60, kernel_name='python', **kwargs): - """Start a new kernel, and return its Manager and Client""" - km = KernelManager(kernel_name=kernel_name) - km.start_kernel(**kwargs) - kc = km.client() - kc.start_channels() - kc.wait_for_ready() - - return km, kc - -@contextmanager -def run_kernel(**kwargs): - """Context manager to create a kernel in a subprocess. - - The kernel is shut down when the context exits. - - Returns - ------- - kernel_client: connected KernelClient instance - """ - km, kc = start_new_kernel(**kwargs) - try: - yield kc - finally: - kc.stop_channels() - km.shutdown_kernel(now=True) diff --git a/jupyter_client/managerabc.py b/jupyter_client/managerabc.py deleted file mode 100644 index 7717007..0000000 --- a/jupyter_client/managerabc.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Abstract base class for kernel managers.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import abc - -from IPython.utils.py3compat import with_metaclass - - -class KernelManagerABC(with_metaclass(abc.ABCMeta, object)): - """KernelManager ABC. - - The docstrings for this class can be found in the base implementation: - - `jupyter_client.kernelmanager.KernelManager` - """ - - @abc.abstractproperty - def kernel(self): - pass - - #-------------------------------------------------------------------------- - # Kernel management - #-------------------------------------------------------------------------- - - @abc.abstractmethod - def start_kernel(self, **kw): - pass - - @abc.abstractmethod - def shutdown_kernel(self, now=False, restart=False): - pass - - @abc.abstractmethod - def restart_kernel(self, now=False, **kw): - pass - - @abc.abstractproperty - def has_kernel(self): - pass - - @abc.abstractmethod - def interrupt_kernel(self): - pass - - @abc.abstractmethod - def signal_kernel(self, signum): - pass - - @abc.abstractmethod - def is_alive(self): - pass diff --git a/jupyter_client/multikernelmanager.py b/jupyter_client/multikernelmanager.py deleted file mode 100644 index 95e6e78..0000000 --- a/jupyter_client/multikernelmanager.py +++ /dev/null @@ -1,315 +0,0 @@ -"""A kernel manager for multiple kernels""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from __future__ import absolute_import - -import os -import uuid - -import zmq - -from IPython.config.configurable import LoggingConfigurable -from IPython.utils.importstring import import_item -from IPython.utils.traitlets import ( - Instance, Dict, List, Unicode, Any, DottedObjectName -) -from IPython.utils.py3compat import unicode_type - -from .kernelspec import NATIVE_KERNEL_NAME - -class DuplicateKernelError(Exception): - pass - - -def kernel_method(f): - """decorator for proxying MKM.method(kernel_id) to individual KMs by ID""" - def wrapped(self, kernel_id, *args, **kwargs): - # get the kernel - km = self.get_kernel(kernel_id) - method = getattr(km, f.__name__) - # call the kernel's method - r = method(*args, **kwargs) - # last thing, call anything defined in the actual class method - # such as logging messages - f(self, kernel_id, *args, **kwargs) - # return the method result - return r - return wrapped - - -class MultiKernelManager(LoggingConfigurable): - """A class for managing multiple kernels.""" - - default_kernel_name = Unicode(NATIVE_KERNEL_NAME, config=True, - help="The name of the default kernel to start" - ) - - kernel_manager_class = DottedObjectName( - "jupyter_client.ioloop.IOLoopKernelManager", config=True, - help="""The kernel manager class. This is configurable to allow - subclassing of the KernelManager for customized behavior. - """ - ) - def _kernel_manager_class_changed(self, name, old, new): - self.kernel_manager_factory = import_item(new) - - kernel_manager_factory = Any(help="this is kernel_manager_class after import") - def _kernel_manager_factory_default(self): - return import_item(self.kernel_manager_class) - - context = Instance('zmq.Context') - def _context_default(self): - return zmq.Context.instance() - - connection_dir = Unicode('') - - _kernels = Dict() - - def list_kernel_ids(self): - """Return a list of the kernel ids of the active kernels.""" - # Create a copy so we can iterate over kernels in operations - # that delete keys. - return list(self._kernels.keys()) - - def __len__(self): - """Return the number of running kernels.""" - return len(self.list_kernel_ids()) - - def __contains__(self, kernel_id): - return kernel_id in self._kernels - - def start_kernel(self, kernel_name=None, **kwargs): - """Start a new kernel. - - The caller can pick a kernel_id by passing one in as a keyword arg, - otherwise one will be picked using a uuid. - - To silence the kernel's stdout/stderr, call this using:: - - km.start_kernel(stdout=PIPE, stderr=PIPE) - - """ - kernel_id = kwargs.pop('kernel_id', unicode_type(uuid.uuid4())) - if kernel_id in self: - raise DuplicateKernelError('Kernel already exists: %s' % kernel_id) - - if kernel_name is None: - kernel_name = self.default_kernel_name - # kernel_manager_factory is the constructor for the KernelManager - # subclass we are using. It can be configured as any Configurable, - # including things like its transport and ip. - km = self.kernel_manager_factory(connection_file=os.path.join( - self.connection_dir, "kernel-%s.json" % kernel_id), - parent=self, autorestart=True, log=self.log, kernel_name=kernel_name, - kernel_spec_manager=self.kernel_spec_manager, - ) - km.start_kernel(**kwargs) - self._kernels[kernel_id] = km - return kernel_id - - @kernel_method - def shutdown_kernel(self, kernel_id, now=False, restart=False): - """Shutdown a kernel by its kernel uuid. - - Parameters - ========== - kernel_id : uuid - The id of the kernel to shutdown. - now : bool - Should the kernel be shutdown forcibly using a signal. - restart : bool - Will the kernel be restarted? - """ - self.log.info("Kernel shutdown: %s" % kernel_id) - self.remove_kernel(kernel_id) - - @kernel_method - def request_shutdown(self, kernel_id, restart=False): - """Ask a kernel to shut down by its kernel uuid""" - - @kernel_method - def finish_shutdown(self, kernel_id, waittime=1, pollinterval=0.1): - """Wait for a kernel to finish shutting down, and kill it if it doesn't - """ - self.log.info("Kernel shutdown: %s" % kernel_id) - - @kernel_method - def cleanup(self, kernel_id, connection_file=True): - """Clean up a kernel's resources""" - - def remove_kernel(self, kernel_id): - """remove a kernel from our mapping. - - Mainly so that a kernel can be removed if it is already dead, - without having to call shutdown_kernel. - - The kernel object is returned. - """ - return self._kernels.pop(kernel_id) - - def shutdown_all(self, now=False): - """Shutdown all kernels.""" - kids = self.list_kernel_ids() - for kid in kids: - self.request_shutdown(kid) - for kid in kids: - self.finish_shutdown(kid) - self.cleanup(kid) - self.remove_kernel(kid) - - @kernel_method - def interrupt_kernel(self, kernel_id): - """Interrupt (SIGINT) the kernel by its uuid. - - Parameters - ========== - kernel_id : uuid - The id of the kernel to interrupt. - """ - self.log.info("Kernel interrupted: %s" % kernel_id) - - @kernel_method - def signal_kernel(self, kernel_id, signum): - """Sends a signal to the kernel by its uuid. - - Note that since only SIGTERM is supported on Windows, this function - is only useful on Unix systems. - - Parameters - ========== - kernel_id : uuid - The id of the kernel to signal. - """ - self.log.info("Signaled Kernel %s with %s" % (kernel_id, signum)) - - @kernel_method - def restart_kernel(self, kernel_id, now=False): - """Restart a kernel by its uuid, keeping the same ports. - - Parameters - ========== - kernel_id : uuid - The id of the kernel to interrupt. - """ - self.log.info("Kernel restarted: %s" % kernel_id) - - @kernel_method - def is_alive(self, kernel_id): - """Is the kernel alive. - - This calls KernelManager.is_alive() which calls Popen.poll on the - actual kernel subprocess. - - Parameters - ========== - kernel_id : uuid - The id of the kernel. - """ - - def _check_kernel_id(self, kernel_id): - """check that a kernel id is valid""" - if kernel_id not in self: - raise KeyError("Kernel with id not found: %s" % kernel_id) - - def get_kernel(self, kernel_id): - """Get the single KernelManager object for a kernel by its uuid. - - Parameters - ========== - kernel_id : uuid - The id of the kernel. - """ - self._check_kernel_id(kernel_id) - return self._kernels[kernel_id] - - @kernel_method - def add_restart_callback(self, kernel_id, callback, event='restart'): - """add a callback for the KernelRestarter""" - - @kernel_method - def remove_restart_callback(self, kernel_id, callback, event='restart'): - """remove a callback for the KernelRestarter""" - - @kernel_method - def get_connection_info(self, kernel_id): - """Return a dictionary of connection data for a kernel. - - Parameters - ========== - kernel_id : uuid - The id of the kernel. - - Returns - ======= - connection_dict : dict - A dict of the information needed to connect to a kernel. - This includes the ip address and the integer port - numbers of the different channels (stdin_port, iopub_port, - shell_port, hb_port). - """ - - @kernel_method - def connect_iopub(self, kernel_id, identity=None): - """Return a zmq Socket connected to the iopub channel. - - Parameters - ========== - kernel_id : uuid - The id of the kernel - identity : bytes (optional) - The zmq identity of the socket - - Returns - ======= - stream : zmq Socket or ZMQStream - """ - - @kernel_method - def connect_shell(self, kernel_id, identity=None): - """Return a zmq Socket connected to the shell channel. - - Parameters - ========== - kernel_id : uuid - The id of the kernel - identity : bytes (optional) - The zmq identity of the socket - - Returns - ======= - stream : zmq Socket or ZMQStream - """ - - @kernel_method - def connect_stdin(self, kernel_id, identity=None): - """Return a zmq Socket connected to the stdin channel. - - Parameters - ========== - kernel_id : uuid - The id of the kernel - identity : bytes (optional) - The zmq identity of the socket - - Returns - ======= - stream : zmq Socket or ZMQStream - """ - - @kernel_method - def connect_hb(self, kernel_id, identity=None): - """Return a zmq Socket connected to the hb channel. - - Parameters - ========== - kernel_id : uuid - The id of the kernel - identity : bytes (optional) - The zmq identity of the socket - - Returns - ======= - stream : zmq Socket or ZMQStream - """ diff --git a/jupyter_client/restarter.py b/jupyter_client/restarter.py deleted file mode 100644 index ca1c2ca..0000000 --- a/jupyter_client/restarter.py +++ /dev/null @@ -1,111 +0,0 @@ -"""A basic kernel monitor with autorestarting. - -This watches a kernel's state using KernelManager.is_alive and auto -restarts the kernel if it dies. - -It is an incomplete base class, and must be subclassed. -""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -from IPython.config.configurable import LoggingConfigurable -from IPython.utils.traitlets import ( - Instance, Float, Dict, Bool, Integer, -) - - -class KernelRestarter(LoggingConfigurable): - """Monitor and autorestart a kernel.""" - - kernel_manager = Instance('jupyter_client.KernelManager') - - debug = Bool(False, config=True, - help="""Whether to include every poll event in debugging output. - - Has to be set explicitly, because there will be *a lot* of output. - """ - ) - - time_to_dead = Float(3.0, config=True, - help="""Kernel heartbeat interval in seconds.""" - ) - - restart_limit = Integer(5, config=True, - help="""The number of consecutive autorestarts before the kernel is presumed dead.""" - ) - _restarting = Bool(False) - _restart_count = Integer(0) - - callbacks = Dict() - def _callbacks_default(self): - return dict(restart=[], dead=[]) - - def start(self): - """Start the polling of the kernel.""" - raise NotImplementedError("Must be implemented in a subclass") - - def stop(self): - """Stop the kernel polling.""" - raise NotImplementedError("Must be implemented in a subclass") - - def add_callback(self, f, event='restart'): - """register a callback to fire on a particular event - - Possible values for event: - - 'restart' (default): kernel has died, and will be restarted. - 'dead': restart has failed, kernel will be left dead. - - """ - self.callbacks[event].append(f) - - def remove_callback(self, f, event='restart'): - """unregister a callback to fire on a particular event - - Possible values for event: - - 'restart' (default): kernel has died, and will be restarted. - 'dead': restart has failed, kernel will be left dead. - - """ - try: - self.callbacks[event].remove(f) - except ValueError: - pass - - def _fire_callbacks(self, event): - """fire our callbacks for a particular event""" - for callback in self.callbacks[event]: - try: - callback() - except Exception as e: - self.log.error("KernelRestarter: %s callback %r failed", event, callback, exc_info=True) - - def poll(self): - if self.debug: - self.log.debug('Polling kernel...') - if not self.kernel_manager.is_alive(): - if self._restarting: - self._restart_count += 1 - else: - self._restart_count = 1 - - if self._restart_count >= self.restart_limit: - self.log.warn("KernelRestarter: restart failed") - self._fire_callbacks('dead') - self._restarting = False - self._restart_count = 0 - self.stop() - else: - self.log.info('KernelRestarter: restarting kernel (%i/%i)', - self._restart_count, - self.restart_limit - ) - self._fire_callbacks('restart') - self.kernel_manager.restart_kernel(now=True) - self._restarting = True - else: - if self._restarting: - self.log.debug("KernelRestarter: restart apparently succeeded") - self._restarting = False diff --git a/jupyter_client/session.py b/jupyter_client/session.py deleted file mode 100644 index 4ff1f58..0000000 --- a/jupyter_client/session.py +++ /dev/null @@ -1,891 +0,0 @@ -"""Session object for building, serializing, sending, and receiving messages in -IPython. The Session object supports serialization, HMAC signatures, and -metadata on messages. - -Also defined here are utilities for working with Sessions: -* A SessionFactory to be used as a base class for configurables that work with -Sessions. -* A Message object for convenience that allows attribute-access to the msg dict. -""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import hashlib -import hmac -import logging -import os -import pprint -import random -import uuid -import warnings -from datetime import datetime - -try: - import cPickle - pickle = cPickle -except: - cPickle = None - import pickle - -try: - # py3 - PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL -except AttributeError: - PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL - -try: - # We are using compare_digest to limit the surface of timing attacks - from hmac import compare_digest -except ImportError: - # Python < 2.7.7: When digests don't match no feedback is provided, - # limiting the surface of attack - def compare_digest(a,b): return a == b - -import zmq -from zmq.utils import jsonapi -from zmq.eventloop.ioloop import IOLoop -from zmq.eventloop.zmqstream import ZMQStream - -from IPython.core.release import kernel_protocol_version -from IPython.config.configurable import Configurable, LoggingConfigurable -from IPython.utils.importstring import import_item -from jupyter_client.jsonutil import extract_dates, squash_dates, date_default -from IPython.utils.py3compat import (str_to_bytes, str_to_unicode, unicode_type, - iteritems) -from IPython.utils.traitlets import (CBytes, Unicode, Bool, Any, Instance, Set, - DottedObjectName, CUnicode, Dict, Integer, - TraitError, -) -from jupyter_client.adapter import adapt -from traitlets.log import get_logger - - -#----------------------------------------------------------------------------- -# utility functions -#----------------------------------------------------------------------------- - -def squash_unicode(obj): - """coerce unicode back to bytestrings.""" - if isinstance(obj,dict): - for key in obj.keys(): - obj[key] = squash_unicode(obj[key]) - if isinstance(key, unicode_type): - obj[squash_unicode(key)] = obj.pop(key) - elif isinstance(obj, list): - for i,v in enumerate(obj): - obj[i] = squash_unicode(v) - elif isinstance(obj, unicode_type): - obj = obj.encode('utf8') - return obj - -#----------------------------------------------------------------------------- -# globals and defaults -#----------------------------------------------------------------------------- - -# default values for the thresholds: -MAX_ITEMS = 64 -MAX_BYTES = 1024 - -# ISO8601-ify datetime objects -# allow unicode -# disallow nan, because it's not actually valid JSON -json_packer = lambda obj: jsonapi.dumps(obj, default=date_default, - ensure_ascii=False, allow_nan=False, -) -json_unpacker = lambda s: jsonapi.loads(s) - -pickle_packer = lambda o: pickle.dumps(squash_dates(o), PICKLE_PROTOCOL) -pickle_unpacker = pickle.loads - -default_packer = json_packer -default_unpacker = json_unpacker - -DELIM = b"" -# singleton dummy tracker, which will always report as done -DONE = zmq.MessageTracker() - -#----------------------------------------------------------------------------- -# Mixin tools for apps that use Sessions -#----------------------------------------------------------------------------- - -session_aliases = dict( - ident = 'Session.session', - user = 'Session.username', - keyfile = 'Session.keyfile', -) - -session_flags = { - 'secure' : ({'Session' : { 'key' : str_to_bytes(str(uuid.uuid4())), - 'keyfile' : '' }}, - """Use HMAC digests for authentication of messages. - Setting this flag will generate a new UUID to use as the HMAC key. - """), - 'no-secure' : ({'Session' : { 'key' : b'', 'keyfile' : '' }}, - """Don't authenticate messages."""), -} - -def default_secure(cfg): - """Set the default behavior for a config environment to be secure. - - If Session.key/keyfile have not been set, set Session.key to - a new random UUID. - """ - warnings.warn("default_secure is deprecated", DeprecationWarning) - if 'Session' in cfg: - if 'key' in cfg.Session or 'keyfile' in cfg.Session: - return - # key/keyfile not specified, generate new UUID: - cfg.Session.key = str_to_bytes(str(uuid.uuid4())) - - -#----------------------------------------------------------------------------- -# Classes -#----------------------------------------------------------------------------- - -class SessionFactory(LoggingConfigurable): - """The Base class for configurables that have a Session, Context, logger, - and IOLoop. - """ - - logname = Unicode('') - def _logname_changed(self, name, old, new): - self.log = logging.getLogger(new) - - # not configurable: - context = Instance('zmq.Context') - def _context_default(self): - return zmq.Context.instance() - - session = Instance('jupyter_client.session.Session', - allow_none=True) - - loop = Instance('zmq.eventloop.ioloop.IOLoop') - def _loop_default(self): - return IOLoop.instance() - - def __init__(self, **kwargs): - super(SessionFactory, self).__init__(**kwargs) - - if self.session is None: - # construct the session - self.session = Session(**kwargs) - - -class Message(object): - """A simple message object that maps dict keys to attributes. - - A Message can be created from a dict and a dict from a Message instance - simply by calling dict(msg_obj).""" - - def __init__(self, msg_dict): - dct = self.__dict__ - for k, v in iteritems(dict(msg_dict)): - if isinstance(v, dict): - v = Message(v) - dct[k] = v - - # Having this iterator lets dict(msg_obj) work out of the box. - def __iter__(self): - return iter(iteritems(self.__dict__)) - - def __repr__(self): - return repr(self.__dict__) - - def __str__(self): - return pprint.pformat(self.__dict__) - - def __contains__(self, k): - return k in self.__dict__ - - def __getitem__(self, k): - return self.__dict__[k] - - -def msg_header(msg_id, msg_type, username, session): - date = datetime.now() - version = kernel_protocol_version - return locals() - -def extract_header(msg_or_header): - """Given a message or header, return the header.""" - if not msg_or_header: - return {} - try: - # See if msg_or_header is the entire message. - h = msg_or_header['header'] - except KeyError: - try: - # See if msg_or_header is just the header - h = msg_or_header['msg_id'] - except KeyError: - raise - else: - h = msg_or_header - if not isinstance(h, dict): - h = dict(h) - return h - -class Session(Configurable): - """Object for handling serialization and sending of messages. - - The Session object handles building messages and sending them - with ZMQ sockets or ZMQStream objects. Objects can communicate with each - other over the network via Session objects, and only need to work with the - dict-based IPython message spec. The Session will handle - serialization/deserialization, security, and metadata. - - Sessions support configurable serialization via packer/unpacker traits, - and signing with HMAC digests via the key/keyfile traits. - - Parameters - ---------- - - debug : bool - whether to trigger extra debugging statements - packer/unpacker : str : 'json', 'pickle' or import_string - importstrings for methods to serialize message parts. If just - 'json' or 'pickle', predefined JSON and pickle packers will be used. - Otherwise, the entire importstring must be used. - - The functions must accept at least valid JSON input, and output *bytes*. - - For example, to use msgpack: - packer = 'msgpack.packb', unpacker='msgpack.unpackb' - pack/unpack : callables - You can also set the pack/unpack callables for serialization directly. - session : bytes - the ID of this Session object. The default is to generate a new UUID. - username : unicode - username added to message headers. The default is to ask the OS. - key : bytes - The key used to initialize an HMAC signature. If unset, messages - will not be signed or checked. - keyfile : filepath - The file containing a key. If this is set, `key` will be initialized - to the contents of the file. - - """ - - debug=Bool(False, config=True, help="""Debug output in the Session""") - - packer = DottedObjectName('json',config=True, - help="""The name of the packer for serializing messages. - Should be one of 'json', 'pickle', or an import name - for a custom callable serializer.""") - def _packer_changed(self, name, old, new): - if new.lower() == 'json': - self.pack = json_packer - self.unpack = json_unpacker - self.unpacker = new - elif new.lower() == 'pickle': - self.pack = pickle_packer - self.unpack = pickle_unpacker - self.unpacker = new - else: - self.pack = import_item(str(new)) - - unpacker = DottedObjectName('json', config=True, - help="""The name of the unpacker for unserializing messages. - Only used with custom functions for `packer`.""") - def _unpacker_changed(self, name, old, new): - if new.lower() == 'json': - self.pack = json_packer - self.unpack = json_unpacker - self.packer = new - elif new.lower() == 'pickle': - self.pack = pickle_packer - self.unpack = pickle_unpacker - self.packer = new - else: - self.unpack = import_item(str(new)) - - session = CUnicode(u'', config=True, - help="""The UUID identifying this session.""") - def _session_default(self): - u = unicode_type(uuid.uuid4()) - self.bsession = u.encode('ascii') - return u - - def _session_changed(self, name, old, new): - self.bsession = self.session.encode('ascii') - - # bsession is the session as bytes - bsession = CBytes(b'') - - username = Unicode(str_to_unicode(os.environ.get('USER', 'username')), - help="""Username for the Session. Default is your system username.""", - config=True) - - metadata = Dict({}, config=True, - help="""Metadata dictionary, which serves as the default top-level metadata dict for each message.""") - - # if 0, no adapting to do. - adapt_version = Integer(0) - - # message signature related traits: - - key = CBytes(config=True, - help="""execution key, for signing messages.""") - def _key_default(self): - return str_to_bytes(str(uuid.uuid4())) - - def _key_changed(self): - self._new_auth() - - signature_scheme = Unicode('hmac-sha256', config=True, - help="""The digest scheme used to construct the message signatures. - Must have the form 'hmac-HASH'.""") - def _signature_scheme_changed(self, name, old, new): - if not new.startswith('hmac-'): - raise TraitError("signature_scheme must start with 'hmac-', got %r" % new) - hash_name = new.split('-', 1)[1] - try: - self.digest_mod = getattr(hashlib, hash_name) - except AttributeError: - raise TraitError("hashlib has no such attribute: %s" % hash_name) - self._new_auth() - - digest_mod = Any() - def _digest_mod_default(self): - return hashlib.sha256 - - auth = Instance(hmac.HMAC, allow_none=True) - - def _new_auth(self): - if self.key: - self.auth = hmac.HMAC(self.key, digestmod=self.digest_mod) - else: - self.auth = None - - digest_history = Set() - digest_history_size = Integer(2**16, config=True, - help="""The maximum number of digests to remember. - - The digest history will be culled when it exceeds this value. - """ - ) - - keyfile = Unicode('', config=True, - help="""path to file containing execution key.""") - def _keyfile_changed(self, name, old, new): - with open(new, 'rb') as f: - self.key = f.read().strip() - - # for protecting against sends from forks - pid = Integer() - - # serialization traits: - - pack = Any(default_packer) # the actual packer function - def _pack_changed(self, name, old, new): - if not callable(new): - raise TypeError("packer must be callable, not %s"%type(new)) - - unpack = Any(default_unpacker) # the actual packer function - def _unpack_changed(self, name, old, new): - # unpacker is not checked - it is assumed to be - if not callable(new): - raise TypeError("unpacker must be callable, not %s"%type(new)) - - # thresholds: - copy_threshold = Integer(2**16, config=True, - help="Threshold (in bytes) beyond which a buffer should be sent without copying.") - buffer_threshold = Integer(MAX_BYTES, config=True, - help="Threshold (in bytes) beyond which an object's buffer should be extracted to avoid pickling.") - item_threshold = Integer(MAX_ITEMS, config=True, - help="""The maximum number of items for a container to be introspected for custom serialization. - Containers larger than this are pickled outright. - """ - ) - - - def __init__(self, **kwargs): - """create a Session object - - Parameters - ---------- - - debug : bool - whether to trigger extra debugging statements - packer/unpacker : str : 'json', 'pickle' or import_string - importstrings for methods to serialize message parts. If just - 'json' or 'pickle', predefined JSON and pickle packers will be used. - Otherwise, the entire importstring must be used. - - The functions must accept at least valid JSON input, and output - *bytes*. - - For example, to use msgpack: - packer = 'msgpack.packb', unpacker='msgpack.unpackb' - pack/unpack : callables - You can also set the pack/unpack callables for serialization - directly. - session : unicode (must be ascii) - the ID of this Session object. The default is to generate a new - UUID. - bsession : bytes - The session as bytes - username : unicode - username added to message headers. The default is to ask the OS. - key : bytes - The key used to initialize an HMAC signature. If unset, messages - will not be signed or checked. - signature_scheme : str - The message digest scheme. Currently must be of the form 'hmac-HASH', - where 'HASH' is a hashing function available in Python's hashlib. - The default is 'hmac-sha256'. - This is ignored if 'key' is empty. - keyfile : filepath - The file containing a key. If this is set, `key` will be - initialized to the contents of the file. - """ - super(Session, self).__init__(**kwargs) - self._check_packers() - self.none = self.pack({}) - # ensure self._session_default() if necessary, so bsession is defined: - self.session - self.pid = os.getpid() - self._new_auth() - - @property - def msg_id(self): - """always return new uuid""" - return str(uuid.uuid4()) - - def _check_packers(self): - """check packers for datetime support.""" - pack = self.pack - unpack = self.unpack - - # check simple serialization - msg = dict(a=[1,'hi']) - try: - packed = pack(msg) - except Exception as e: - msg = "packer '{packer}' could not serialize a simple message: {e}{jsonmsg}" - if self.packer == 'json': - jsonmsg = "\nzmq.utils.jsonapi.jsonmod = %s" % jsonapi.jsonmod - else: - jsonmsg = "" - raise ValueError( - msg.format(packer=self.packer, e=e, jsonmsg=jsonmsg) - ) - - # ensure packed message is bytes - if not isinstance(packed, bytes): - raise ValueError("message packed to %r, but bytes are required"%type(packed)) - - # check that unpack is pack's inverse - try: - unpacked = unpack(packed) - assert unpacked == msg - except Exception as e: - msg = "unpacker '{unpacker}' could not handle output from packer '{packer}': {e}{jsonmsg}" - if self.packer == 'json': - jsonmsg = "\nzmq.utils.jsonapi.jsonmod = %s" % jsonapi.jsonmod - else: - jsonmsg = "" - raise ValueError( - msg.format(packer=self.packer, unpacker=self.unpacker, e=e, jsonmsg=jsonmsg) - ) - - # check datetime support - msg = dict(t=datetime.now()) - try: - unpacked = unpack(pack(msg)) - if isinstance(unpacked['t'], datetime): - raise ValueError("Shouldn't deserialize to datetime") - except Exception: - self.pack = lambda o: pack(squash_dates(o)) - self.unpack = lambda s: unpack(s) - - def msg_header(self, msg_type): - return msg_header(self.msg_id, msg_type, self.username, self.session) - - def msg(self, msg_type, content=None, parent=None, header=None, metadata=None): - """Return the nested message dict. - - This format is different from what is sent over the wire. The - serialize/deserialize methods converts this nested message dict to the wire - format, which is a list of message parts. - """ - msg = {} - header = self.msg_header(msg_type) if header is None else header - msg['header'] = header - msg['msg_id'] = header['msg_id'] - msg['msg_type'] = header['msg_type'] - msg['parent_header'] = {} if parent is None else extract_header(parent) - msg['content'] = {} if content is None else content - msg['metadata'] = self.metadata.copy() - if metadata is not None: - msg['metadata'].update(metadata) - return msg - - def sign(self, msg_list): - """Sign a message with HMAC digest. If no auth, return b''. - - Parameters - ---------- - msg_list : list - The [p_header,p_parent,p_content] part of the message list. - """ - if self.auth is None: - return b'' - h = self.auth.copy() - for m in msg_list: - h.update(m) - return str_to_bytes(h.hexdigest()) - - def serialize(self, msg, ident=None): - """Serialize the message components to bytes. - - This is roughly the inverse of deserialize. The serialize/deserialize - methods work with full message lists, whereas pack/unpack work with - the individual message parts in the message list. - - Parameters - ---------- - msg : dict or Message - The next message dict as returned by the self.msg method. - - Returns - ------- - msg_list : list - The list of bytes objects to be sent with the format:: - - [ident1, ident2, ..., DELIM, HMAC, p_header, p_parent, - p_metadata, p_content, buffer1, buffer2, ...] - - In this list, the ``p_*`` entities are the packed or serialized - versions, so if JSON is used, these are utf8 encoded JSON strings. - """ - content = msg.get('content', {}) - if content is None: - content = self.none - elif isinstance(content, dict): - content = self.pack(content) - elif isinstance(content, bytes): - # content is already packed, as in a relayed message - pass - elif isinstance(content, unicode_type): - # should be bytes, but JSON often spits out unicode - content = content.encode('utf8') - else: - raise TypeError("Content incorrect type: %s"%type(content)) - - real_message = [self.pack(msg['header']), - self.pack(msg['parent_header']), - self.pack(msg['metadata']), - content, - ] - - to_send = [] - - if isinstance(ident, list): - # accept list of idents - to_send.extend(ident) - elif ident is not None: - to_send.append(ident) - to_send.append(DELIM) - - signature = self.sign(real_message) - to_send.append(signature) - - to_send.extend(real_message) - - return to_send - - def send(self, stream, msg_or_type, content=None, parent=None, ident=None, - buffers=None, track=False, header=None, metadata=None): - """Build and send a message via stream or socket. - - The message format used by this function internally is as follows: - - [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content, - buffer1,buffer2,...] - - The serialize/deserialize methods convert the nested message dict into this - format. - - Parameters - ---------- - - stream : zmq.Socket or ZMQStream - The socket-like object used to send the data. - msg_or_type : str or Message/dict - Normally, msg_or_type will be a msg_type unless a message is being - sent more than once. If a header is supplied, this can be set to - None and the msg_type will be pulled from the header. - - content : dict or None - The content of the message (ignored if msg_or_type is a message). - header : dict or None - The header dict for the message (ignored if msg_to_type is a message). - parent : Message or dict or None - The parent or parent header describing the parent of this message - (ignored if msg_or_type is a message). - ident : bytes or list of bytes - The zmq.IDENTITY routing path. - metadata : dict or None - The metadata describing the message - buffers : list or None - The already-serialized buffers to be appended to the message. - track : bool - Whether to track. Only for use with Sockets, because ZMQStream - objects cannot track messages. - - - Returns - ------- - msg : dict - The constructed message. - """ - if not isinstance(stream, zmq.Socket): - # ZMQStreams and dummy sockets do not support tracking. - track = False - - if isinstance(msg_or_type, (Message, dict)): - # We got a Message or message dict, not a msg_type so don't - # build a new Message. - msg = msg_or_type - buffers = buffers or msg.get('buffers', []) - else: - msg = self.msg(msg_or_type, content=content, parent=parent, - header=header, metadata=metadata) - if not os.getpid() == self.pid: - get_logger().warn("WARNING: attempted to send message from fork\n%s", - msg - ) - return - buffers = [] if buffers is None else buffers - if self.adapt_version: - msg = adapt(msg, self.adapt_version) - to_send = self.serialize(msg, ident) - to_send.extend(buffers) - longest = max([ len(s) for s in to_send ]) - copy = (longest < self.copy_threshold) - - if buffers and track and not copy: - # only really track when we are doing zero-copy buffers - tracker = stream.send_multipart(to_send, copy=False, track=True) - else: - # use dummy tracker, which will be done immediately - tracker = DONE - stream.send_multipart(to_send, copy=copy) - - if self.debug: - pprint.pprint(msg) - pprint.pprint(to_send) - pprint.pprint(buffers) - - msg['tracker'] = tracker - - return msg - - def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None): - """Send a raw message via ident path. - - This method is used to send a already serialized message. - - Parameters - ---------- - stream : ZMQStream or Socket - The ZMQ stream or socket to use for sending the message. - msg_list : list - The serialized list of messages to send. This only includes the - [p_header,p_parent,p_metadata,p_content,buffer1,buffer2,...] portion of - the message. - ident : ident or list - A single ident or a list of idents to use in sending. - """ - to_send = [] - if isinstance(ident, bytes): - ident = [ident] - if ident is not None: - to_send.extend(ident) - - to_send.append(DELIM) - to_send.append(self.sign(msg_list)) - to_send.extend(msg_list) - stream.send_multipart(to_send, flags, copy=copy) - - def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True): - """Receive and unpack a message. - - Parameters - ---------- - socket : ZMQStream or Socket - The socket or stream to use in receiving. - - Returns - ------- - [idents], msg - [idents] is a list of idents and msg is a nested message dict of - same format as self.msg returns. - """ - if isinstance(socket, ZMQStream): - socket = socket.socket - try: - msg_list = socket.recv_multipart(mode, copy=copy) - except zmq.ZMQError as e: - if e.errno == zmq.EAGAIN: - # We can convert EAGAIN to None as we know in this case - # recv_multipart won't return None. - return None,None - else: - raise - # split multipart message into identity list and message dict - # invalid large messages can cause very expensive string comparisons - idents, msg_list = self.feed_identities(msg_list, copy) - try: - return idents, self.deserialize(msg_list, content=content, copy=copy) - except Exception as e: - # TODO: handle it - raise e - - def feed_identities(self, msg_list, copy=True): - """Split the identities from the rest of the message. - - Feed until DELIM is reached, then return the prefix as idents and - remainder as msg_list. This is easily broken by setting an IDENT to DELIM, - but that would be silly. - - Parameters - ---------- - msg_list : a list of Message or bytes objects - The message to be split. - copy : bool - flag determining whether the arguments are bytes or Messages - - Returns - ------- - (idents, msg_list) : two lists - idents will always be a list of bytes, each of which is a ZMQ - identity. msg_list will be a list of bytes or zmq.Messages of the - form [HMAC,p_header,p_parent,p_content,buffer1,buffer2,...] and - should be unpackable/unserializable via self.deserialize at this - point. - """ - if copy: - idx = msg_list.index(DELIM) - return msg_list[:idx], msg_list[idx+1:] - else: - failed = True - for idx,m in enumerate(msg_list): - if m.bytes == DELIM: - failed = False - break - if failed: - raise ValueError("DELIM not in msg_list") - idents, msg_list = msg_list[:idx], msg_list[idx+1:] - return [m.bytes for m in idents], msg_list - - def _add_digest(self, signature): - """add a digest to history to protect against replay attacks""" - if self.digest_history_size == 0: - # no history, never add digests - return - - self.digest_history.add(signature) - if len(self.digest_history) > self.digest_history_size: - # threshold reached, cull 10% - self._cull_digest_history() - - def _cull_digest_history(self): - """cull the digest history - - Removes a randomly selected 10% of the digest history - """ - current = len(self.digest_history) - n_to_cull = max(int(current // 10), current - self.digest_history_size) - if n_to_cull >= current: - self.digest_history = set() - return - to_cull = random.sample(self.digest_history, n_to_cull) - self.digest_history.difference_update(to_cull) - - def deserialize(self, msg_list, content=True, copy=True): - """Unserialize a msg_list to a nested message dict. - - This is roughly the inverse of serialize. The serialize/deserialize - methods work with full message lists, whereas pack/unpack work with - the individual message parts in the message list. - - Parameters - ---------- - msg_list : list of bytes or Message objects - The list of message parts of the form [HMAC,p_header,p_parent, - p_metadata,p_content,buffer1,buffer2,...]. - content : bool (True) - Whether to unpack the content dict (True), or leave it packed - (False). - copy : bool (True) - Whether msg_list contains bytes (True) or the non-copying Message - objects in each place (False). - - Returns - ------- - msg : dict - The nested message dict with top-level keys [header, parent_header, - content, buffers]. The buffers are returned as memoryviews. - """ - minlen = 5 - message = {} - if not copy: - # pyzmq didn't copy the first parts of the message, so we'll do it - for i in range(minlen): - msg_list[i] = msg_list[i].bytes - if self.auth is not None: - signature = msg_list[0] - if not signature: - raise ValueError("Unsigned Message") - if signature in self.digest_history: - raise ValueError("Duplicate Signature: %r" % signature) - self._add_digest(signature) - check = self.sign(msg_list[1:5]) - if not compare_digest(signature, check): - raise ValueError("Invalid Signature: %r" % signature) - if not len(msg_list) >= minlen: - raise TypeError("malformed message, must have at least %i elements"%minlen) - header = self.unpack(msg_list[1]) - message['header'] = extract_dates(header) - message['msg_id'] = header['msg_id'] - message['msg_type'] = header['msg_type'] - message['parent_header'] = extract_dates(self.unpack(msg_list[2])) - message['metadata'] = self.unpack(msg_list[3]) - if content: - message['content'] = self.unpack(msg_list[4]) - else: - message['content'] = msg_list[4] - buffers = [memoryview(b) for b in msg_list[5:]] - if buffers and buffers[0].shape is None: - # force copy to workaround pyzmq #646 - buffers = [memoryview(b.bytes) for b in msg_list[5:]] - message['buffers'] = buffers - # adapt to the current version - return adapt(message) - - def unserialize(self, *args, **kwargs): - warnings.warn( - "Session.unserialize is deprecated. Use Session.deserialize.", - DeprecationWarning, - ) - return self.deserialize(*args, **kwargs) - - -def test_msg2obj(): - am = dict(x=1) - ao = Message(am) - assert ao.x == am['x'] - - am['y'] = dict(z=1) - ao = Message(am) - assert ao.y.z == am['y']['z'] - - k1, k2 = 'y', 'z' - assert ao[k1][k2] == am[k1][k2] - - am2 = dict(ao) - assert am['x'] == am2['x'] - assert am['y']['z'] == am2['y']['z'] diff --git a/jupyter_client/tests/__init__.py b/jupyter_client/tests/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/jupyter_client/tests/__init__.py +++ /dev/null diff --git a/jupyter_client/tests/test_adapter.py b/jupyter_client/tests/test_adapter.py deleted file mode 100644 index 34f71f0..0000000 --- a/jupyter_client/tests/test_adapter.py +++ /dev/null @@ -1,389 +0,0 @@ -"""Tests for adapting IPython msg spec versions""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import copy -import json -from unittest import TestCase -import nose.tools as nt - -from jupyter_client.adapter import adapt, V4toV5, V5toV4, code_to_line -from jupyter_client.session import Session - - -def test_default_version(): - s = Session() - msg = s.msg("msg_type") - msg['header'].pop('version') - original = copy.deepcopy(msg) - adapted = adapt(original) - nt.assert_equal(adapted['header']['version'], V4toV5.version) - -def test_code_to_line_no_code(): - line, pos = code_to_line("", 0) - nt.assert_equal(line, "") - nt.assert_equal(pos, 0) - -class AdapterTest(TestCase): - - def setUp(self): - self.session = Session() - - def adapt(self, msg, version=None): - original = copy.deepcopy(msg) - adapted = adapt(msg, version or self.to_version) - return original, adapted - - def check_header(self, msg): - pass - - -class V4toV5TestCase(AdapterTest): - from_version = 4 - to_version = 5 - - def msg(self, msg_type, content): - """Create a v4 msg (same as v5, minus version header)""" - msg = self.session.msg(msg_type, content) - msg['header'].pop('version') - return msg - - def test_same_version(self): - msg = self.msg("execute_result", - content={'status' : 'ok'} - ) - original, adapted = self.adapt(msg, self.from_version) - - self.assertEqual(original, adapted) - - def test_no_adapt(self): - msg = self.msg("input_reply", {'value' : 'some text'}) - v4, v5 = self.adapt(msg) - self.assertEqual(v5['header']['version'], V4toV5.version) - v5['header'].pop('version') - self.assertEqual(v4, v5) - - def test_rename_type(self): - for v5_type, v4_type in [ - ('execute_result', 'pyout'), - ('execute_input', 'pyin'), - ('error', 'pyerr'), - ]: - msg = self.msg(v4_type, {'key' : 'value'}) - v4, v5 = self.adapt(msg) - self.assertEqual(v5['header']['version'], V4toV5.version) - self.assertEqual(v5['header']['msg_type'], v5_type) - self.assertEqual(v4['content'], v5['content']) - - def test_execute_request(self): - msg = self.msg("execute_request", { - 'code' : 'a=5', - 'silent' : False, - 'user_expressions' : {'a' : 'apple'}, - 'user_variables' : ['b'], - }) - v4, v5 = self.adapt(msg) - self.assertEqual(v4['header']['msg_type'], v5['header']['msg_type']) - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v5c['user_expressions'], {'a' : 'apple', 'b': 'b'}) - self.assertNotIn('user_variables', v5c) - self.assertEqual(v5c['code'], v4c['code']) - - def test_execute_reply(self): - msg = self.msg("execute_reply", { - 'status': 'ok', - 'execution_count': 7, - 'user_variables': {'a': 1}, - 'user_expressions': {'a+a': 2}, - 'payload': [{'source':'page', 'text':'blah'}] - }) - v4, v5 = self.adapt(msg) - v5c = v5['content'] - self.assertNotIn('user_variables', v5c) - self.assertEqual(v5c['user_expressions'], {'a': 1, 'a+a': 2}) - self.assertEqual(v5c['payload'], [{'source': 'page', - 'data': {'text/plain': 'blah'}} - ]) - - def test_complete_request(self): - msg = self.msg("complete_request", { - 'text' : 'a.is', - 'line' : 'foo = a.is', - 'block' : None, - 'cursor_pos' : 10, - }) - v4, v5 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - for key in ('text', 'line', 'block'): - self.assertNotIn(key, v5c) - self.assertEqual(v5c['cursor_pos'], v4c['cursor_pos']) - self.assertEqual(v5c['code'], v4c['line']) - - def test_complete_reply(self): - msg = self.msg("complete_reply", { - 'matched_text' : 'a.is', - 'matches' : ['a.isalnum', - 'a.isalpha', - 'a.isdigit', - 'a.islower', - ], - }) - v4, v5 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - - self.assertEqual(v5c['matches'], v4c['matches']) - self.assertEqual(v5c['metadata'], {}) - self.assertEqual(v5c['cursor_start'], -4) - self.assertEqual(v5c['cursor_end'], None) - - def test_object_info_request(self): - msg = self.msg("object_info_request", { - 'oname' : 'foo', - 'detail_level' : 1, - }) - v4, v5 = self.adapt(msg) - self.assertEqual(v5['header']['msg_type'], 'inspect_request') - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v5c['code'], v4c['oname']) - self.assertEqual(v5c['cursor_pos'], len(v4c['oname'])) - self.assertEqual(v5c['detail_level'], v4c['detail_level']) - - def test_object_info_reply(self): - msg = self.msg("object_info_reply", { - 'oname' : 'foo', - 'found' : True, - 'status' : 'ok', - 'definition' : 'foo(a=5)', - 'docstring' : "the docstring", - }) - v4, v5 = self.adapt(msg) - self.assertEqual(v5['header']['msg_type'], 'inspect_reply') - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(sorted(v5c), [ 'data', 'found', 'metadata', 'name', 'status']) - text = v5c['data']['text/plain'] - self.assertEqual(text, '\n'.join([v4c['definition'], v4c['docstring']])) - - def test_kernel_info_reply(self): - msg = self.msg("kernel_info_reply", { - 'language': 'python', - 'language_version': [2,8,0], - 'ipython_version': [1,2,3], - }) - v4, v5 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v5c, { - 'protocol_version': '4.1', - 'implementation': 'ipython', - 'implementation_version': '1.2.3', - 'language_info': { - 'name': 'python', - 'version': '2.8.0', - }, - 'banner' : '', - }) - - # iopub channel - - def test_display_data(self): - jsondata = dict(a=5) - msg = self.msg("display_data", { - 'data' : { - 'text/plain' : 'some text', - 'application/json' : json.dumps(jsondata) - }, - 'metadata' : {'text/plain' : { 'key' : 'value' }}, - }) - v4, v5 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v5c['metadata'], v4c['metadata']) - self.assertEqual(v5c['data']['text/plain'], v4c['data']['text/plain']) - self.assertEqual(v5c['data']['application/json'], jsondata) - - # stdin channel - - def test_input_request(self): - msg = self.msg('input_request', {'prompt': "$>"}) - v4, v5 = self.adapt(msg) - self.assertEqual(v5['content']['prompt'], v4['content']['prompt']) - self.assertFalse(v5['content']['password']) - - -class V5toV4TestCase(AdapterTest): - from_version = 5 - to_version = 4 - - def msg(self, msg_type, content): - return self.session.msg(msg_type, content) - - def test_same_version(self): - msg = self.msg("execute_result", - content={'status' : 'ok'} - ) - original, adapted = self.adapt(msg, self.from_version) - - self.assertEqual(original, adapted) - - def test_no_adapt(self): - msg = self.msg("input_reply", {'value' : 'some text'}) - v5, v4 = self.adapt(msg) - self.assertNotIn('version', v4['header']) - v5['header'].pop('version') - self.assertEqual(v4, v5) - - def test_rename_type(self): - for v5_type, v4_type in [ - ('execute_result', 'pyout'), - ('execute_input', 'pyin'), - ('error', 'pyerr'), - ]: - msg = self.msg(v5_type, {'key' : 'value'}) - v5, v4 = self.adapt(msg) - self.assertEqual(v4['header']['msg_type'], v4_type) - nt.assert_not_in('version', v4['header']) - self.assertEqual(v4['content'], v5['content']) - - def test_execute_request(self): - msg = self.msg("execute_request", { - 'code' : 'a=5', - 'silent' : False, - 'user_expressions' : {'a' : 'apple'}, - }) - v5, v4 = self.adapt(msg) - self.assertEqual(v4['header']['msg_type'], v5['header']['msg_type']) - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v4c['user_variables'], []) - self.assertEqual(v5c['code'], v4c['code']) - - def test_complete_request(self): - msg = self.msg("complete_request", { - 'code' : 'def foo():\n' - ' a.is\n' - 'foo()', - 'cursor_pos': 19, - }) - v5, v4 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - self.assertNotIn('code', v4c) - self.assertEqual(v4c['line'], v5c['code'].splitlines(True)[1]) - self.assertEqual(v4c['cursor_pos'], 8) - self.assertEqual(v4c['text'], '') - self.assertEqual(v4c['block'], None) - - def test_complete_reply(self): - msg = self.msg("complete_reply", { - 'cursor_start' : 10, - 'cursor_end' : 14, - 'matches' : ['a.isalnum', - 'a.isalpha', - 'a.isdigit', - 'a.islower', - ], - 'metadata' : {}, - }) - v5, v4 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v4c['matched_text'], 'a.is') - self.assertEqual(v4c['matches'], v5c['matches']) - - def test_inspect_request(self): - msg = self.msg("inspect_request", { - 'code' : 'def foo():\n' - ' apple\n' - 'bar()', - 'cursor_pos': 18, - 'detail_level' : 1, - }) - v5, v4 = self.adapt(msg) - self.assertEqual(v4['header']['msg_type'], 'object_info_request') - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v4c['oname'], 'apple') - self.assertEqual(v5c['detail_level'], v4c['detail_level']) - - def test_inspect_request_token(self): - line = 'something(range(10), kwarg=smth) ; xxx.xxx.xxx( firstarg, rand(234,23), kwarg1=2,' - msg = self.msg("inspect_request", { - 'code' : line, - 'cursor_pos': len(line)-1, - 'detail_level' : 1, - }) - v5, v4 = self.adapt(msg) - self.assertEqual(v4['header']['msg_type'], 'object_info_request') - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v4c['oname'], 'xxx.xxx.xxx') - self.assertEqual(v5c['detail_level'], v4c['detail_level']) - - def test_inspect_reply(self): - msg = self.msg("inspect_reply", { - 'name' : 'foo', - 'found' : True, - 'data' : {'text/plain' : 'some text'}, - 'metadata' : {}, - }) - v5, v4 = self.adapt(msg) - self.assertEqual(v4['header']['msg_type'], 'object_info_reply') - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(sorted(v4c), ['found', 'oname']) - self.assertEqual(v4c['found'], False) - - def test_kernel_info_reply(self): - msg = self.msg("kernel_info_reply", { - 'protocol_version': '5.0', - 'implementation': 'ipython', - 'implementation_version': '1.2.3', - 'language_info': { - 'name': 'python', - 'version': '2.8.0', - 'mimetype': 'text/x-python', - }, - 'banner' : 'the banner', - }) - v5, v4 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - info = v5c['language_info'] - self.assertEqual(v4c, { - 'protocol_version': [5,0], - 'language': 'python', - 'language_version': [2,8,0], - 'ipython_version': [1,2,3], - }) - - # iopub channel - - def test_display_data(self): - jsondata = dict(a=5) - msg = self.msg("display_data", { - 'data' : { - 'text/plain' : 'some text', - 'application/json' : jsondata, - }, - 'metadata' : {'text/plain' : { 'key' : 'value' }}, - }) - v5, v4 = self.adapt(msg) - v4c = v4['content'] - v5c = v5['content'] - self.assertEqual(v5c['metadata'], v4c['metadata']) - self.assertEqual(v5c['data']['text/plain'], v4c['data']['text/plain']) - self.assertEqual(v4c['data']['application/json'], json.dumps(jsondata)) - - # stdin channel - - def test_input_request(self): - msg = self.msg('input_request', {'prompt': "$>", 'password' : True}) - v5, v4 = self.adapt(msg) - self.assertEqual(v5['content']['prompt'], v4['content']['prompt']) - self.assertNotIn('password', v4['content']) diff --git a/jupyter_client/tests/test_connect.py b/jupyter_client/tests/test_connect.py deleted file mode 100644 index 6eaec5b..0000000 --- a/jupyter_client/tests/test_connect.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Tests for kernel connection utilities""" - -# Copyright (c) Jupyter Development Team. -# Distributed under the terms of the Modified BSD License. - -import json -import os - -import nose.tools as nt - -from IPython.config import Config -from IPython.consoleapp import IPythonConsoleApp -from IPython.core.application import BaseIPythonApplication -from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory -from IPython.utils.py3compat import str_to_bytes -from jupyter_client import connect -from jupyter_client.session import Session - - -class DummyConsoleApp(BaseIPythonApplication, IPythonConsoleApp): - def initialize(self, argv=[]): - BaseIPythonApplication.initialize(self, argv=argv) - self.init_connection_file() - -sample_info = dict(ip='1.2.3.4', transport='ipc', - shell_port=1, hb_port=2, iopub_port=3, stdin_port=4, control_port=5, - key=b'abc123', signature_scheme='hmac-md5', - ) - -def test_write_connection_file(): - with TemporaryDirectory() as d: - cf = os.path.join(d, 'kernel.json') - connect.write_connection_file(cf, **sample_info) - nt.assert_true(os.path.exists(cf)) - with open(cf, 'r') as f: - info = json.load(f) - info['key'] = str_to_bytes(info['key']) - nt.assert_equal(info, sample_info) - - -def test_load_connection_file_session(): - """test load_connection_file() after """ - session = Session() - app = DummyConsoleApp(session=Session()) - app.initialize(argv=[]) - session = app.session - - with TemporaryDirectory() as d: - cf = os.path.join(d, 'kernel.json') - connect.write_connection_file(cf, **sample_info) - app.connection_file = cf - app.load_connection_file() - - nt.assert_equal(session.key, sample_info['key']) - nt.assert_equal(session.signature_scheme, sample_info['signature_scheme']) - - -def test_app_load_connection_file(): - """test `ipython console --existing` loads a connection file""" - with TemporaryDirectory() as d: - cf = os.path.join(d, 'kernel.json') - connect.write_connection_file(cf, **sample_info) - app = DummyConsoleApp(connection_file=cf) - app.initialize(argv=[]) - - for attr, expected in sample_info.items(): - if attr in ('key', 'signature_scheme'): - continue - value = getattr(app, attr) - nt.assert_equal(value, expected, "app.%s = %s != %s" % (attr, value, expected)) - - -def test_find_connection_file(): - cfg = Config() - with TemporaryDirectory() as d: - cfg.ProfileDir.location = d - cf = 'kernel.json' - app = DummyConsoleApp(config=cfg, connection_file=cf) - app.initialize() - - security_dir = os.path.join(app.profile_dir.location, 'security') - profile_cf = os.path.join(security_dir, cf) - - with open(profile_cf, 'w') as f: - f.write("{}") - - for query in ( - 'kernel.json', - 'kern*', - '*ernel*', - 'k*', - ): - nt.assert_equal(connect.find_connection_file(query, path=security_dir), profile_cf) - - BaseIPythonApplication._instance = None - diff --git a/jupyter_client/tests/test_jsonutil.py b/jupyter_client/tests/test_jsonutil.py deleted file mode 100644 index 4b5c9da..0000000 --- a/jupyter_client/tests/test_jsonutil.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding: utf-8 -"""Test suite for our JSON utilities.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import datetime -import json -from base64 import decodestring - -import nose.tools as nt - -from jupyter_client import jsonutil -from ..jsonutil import json_clean, encode_images -from IPython.utils.py3compat import unicode_to_str, str_to_bytes, iteritems - - -class Int(int): - def __str__(self): - return 'Int(%i)' % self - -def test(): - # list of input/expected output. Use None for the expected output if it - # can be the same as the input. - pairs = [(1, None), # start with scalars - (1.0, None), - ('a', None), - (True, None), - (False, None), - (None, None), - # complex numbers for now just go to strings, as otherwise they - # are unserializable - (1j, '1j'), - # Containers - ([1, 2], None), - ((1, 2), [1, 2]), - (set([1, 2]), [1, 2]), - (dict(x=1), None), - ({'x': 1, 'y':[1,2,3], '1':'int'}, None), - # More exotic objects - ((x for x in range(3)), [0, 1, 2]), - (iter([1, 2]), [1, 2]), - (Int(5), 5), - ] - - for val, jval in pairs: - if jval is None: - jval = val - out = json_clean(val) - # validate our cleanup - nt.assert_equal(out, jval) - # and ensure that what we return, indeed encodes cleanly - json.loads(json.dumps(out)) - - -def test_rekey(): - # This could fail due to modifying the dict keys in-place on Python 3 - d = { i:i for i in map(str, range(128)) } - d = jsonutil.rekey(d) - for key in d: - nt.assert_is_instance(key, int) - - -def test_encode_images(): - # invalid data, but the header and footer are from real files - pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82' - jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9' - pdfdata = b'%PDF-1.\ntrailer<>]>>>>>>' - - fmt = { - 'image/png' : pngdata, - 'image/jpeg' : jpegdata, - 'application/pdf' : pdfdata - } - encoded = encode_images(fmt) - for key, value in iteritems(fmt): - # encoded has unicode, want bytes - decoded = decodestring(encoded[key].encode('ascii')) - nt.assert_equal(decoded, value) - encoded2 = encode_images(encoded) - nt.assert_equal(encoded, encoded2) - - b64_str = {} - for key, encoded in iteritems(encoded): - b64_str[key] = unicode_to_str(encoded) - encoded3 = encode_images(b64_str) - nt.assert_equal(encoded3, b64_str) - for key, value in iteritems(fmt): - # encoded3 has str, want bytes - decoded = decodestring(str_to_bytes(encoded3[key])) - nt.assert_equal(decoded, value) - -def test_lambda(): - jc = json_clean(lambda : 1) - nt.assert_is_instance(jc, str) - nt.assert_in('', jc) - json.dumps(jc) - -def test_extract_dates(): - timestamps = [ - '2013-07-03T16:34:52.249482', - '2013-07-03T16:34:52.249482Z', - '2013-07-03T16:34:52.249482Z-0800', - '2013-07-03T16:34:52.249482Z+0800', - '2013-07-03T16:34:52.249482Z+08:00', - '2013-07-03T16:34:52.249482Z-08:00', - '2013-07-03T16:34:52.249482-0800', - '2013-07-03T16:34:52.249482+0800', - '2013-07-03T16:34:52.249482+08:00', - '2013-07-03T16:34:52.249482-08:00', - ] - extracted = jsonutil.extract_dates(timestamps) - ref = extracted[0] - for dt in extracted: - nt.assert_true(isinstance(dt, datetime.datetime)) - nt.assert_equal(dt, ref) - -def test_parse_ms_precision(): - base = '2013-07-03T16:34:52' - digits = '1234567890' - - parsed = jsonutil.parse_date(base) - nt.assert_is_instance(parsed, datetime.datetime) - for i in range(len(digits)): - ts = base + '.' + digits[:i] - parsed = jsonutil.parse_date(ts) - if i >= 1 and i <= 6: - nt.assert_is_instance(parsed, datetime.datetime) - else: - nt.assert_is_instance(parsed, str) - - -ZERO = datetime.timedelta(0) - -class tzUTC(datetime.tzinfo): - """tzinfo object for UTC (zero offset)""" - - def utcoffset(self, d): - return ZERO - - def dst(self, d): - return ZERO - -UTC = tzUTC() - -def test_date_default(): - now = today=datetime.datetime.now() - utcnow = now.replace(tzinfo=UTC) - data = dict(now=now, utcnow=utcnow) - jsondata = json.dumps(data, default=jsonutil.date_default) - nt.assert_in("+00", jsondata) - nt.assert_equal(jsondata.count("+00"), 1) - extracted = jsonutil.extract_dates(json.loads(jsondata)) - for dt in extracted.values(): - nt.assert_is_instance(dt, datetime.datetime) - -def test_exception(): - bad_dicts = [{1:'number', '1':'string'}, - {True:'bool', 'True':'string'}, - ] - for d in bad_dicts: - nt.assert_raises(ValueError, json_clean, d) - -def test_unicode_dict(): - data = {u'üniço∂e': u'üniço∂e'} - clean = jsonutil.json_clean(data) - nt.assert_equal(data, clean) diff --git a/jupyter_client/tests/test_kernelmanager.py b/jupyter_client/tests/test_kernelmanager.py deleted file mode 100644 index 067dd4f..0000000 --- a/jupyter_client/tests/test_kernelmanager.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Tests for the notebook kernel and session manager""" - -from subprocess import PIPE -import time -from unittest import TestCase - -from IPython.testing import decorators as dec - -from IPython.config.loader import Config -from jupyter_client import KernelManager - -class TestKernelManager(TestCase): - - def _get_tcp_km(self): - c = Config() - km = KernelManager(config=c) - return km - - def _get_ipc_km(self): - c = Config() - c.KernelManager.transport = 'ipc' - c.KernelManager.ip = 'test' - km = KernelManager(config=c) - return km - - def _run_lifecycle(self, km): - km.start_kernel(stdout=PIPE, stderr=PIPE) - self.assertTrue(km.is_alive()) - km.restart_kernel(now=True) - self.assertTrue(km.is_alive()) - km.interrupt_kernel() - self.assertTrue(isinstance(km, KernelManager)) - km.shutdown_kernel(now=True) - - def test_tcp_lifecycle(self): - km = self._get_tcp_km() - self._run_lifecycle(km) - - @dec.skip_win32 - def test_ipc_lifecycle(self): - km = self._get_ipc_km() - self._run_lifecycle(km) - - def test_get_connect_info(self): - km = self._get_tcp_km() - cinfo = km.get_connection_info() - keys = sorted(cinfo.keys()) - expected = sorted([ - 'ip', 'transport', - 'hb_port', 'shell_port', 'stdin_port', 'iopub_port', 'control_port', - 'key', 'signature_scheme', - ]) - self.assertEqual(keys, expected) diff --git a/jupyter_client/tests/test_kernelspec.py b/jupyter_client/tests/test_kernelspec.py deleted file mode 100644 index 3625f34..0000000 --- a/jupyter_client/tests/test_kernelspec.py +++ /dev/null @@ -1,64 +0,0 @@ -import json -import os -from os.path import join as pjoin -import unittest - -from IPython.testing.decorators import onlyif -from IPython.utils.tempdir import TemporaryDirectory -from jupyter_client import kernelspec - -sample_kernel_json = {'argv':['cat', '{connection_file}'], - 'display_name':'Test kernel', - } - -class KernelSpecTests(unittest.TestCase): - def setUp(self): - td = TemporaryDirectory() - self.addCleanup(td.cleanup) - self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample') - os.makedirs(self.sample_kernel_dir) - json_file = pjoin(self.sample_kernel_dir, 'kernel.json') - with open(json_file, 'w') as f: - json.dump(sample_kernel_json, f) - - self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name) - - td2 = TemporaryDirectory() - self.addCleanup(td2.cleanup) - self.installable_kernel = td2.name - with open(pjoin(self.installable_kernel, 'kernel.json'), 'w') as f: - json.dump(sample_kernel_json, f) - - def test_find_kernel_specs(self): - kernels = self.ksm.find_kernel_specs() - self.assertEqual(kernels['sample'], self.sample_kernel_dir) - - def test_get_kernel_spec(self): - ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive - self.assertEqual(ks.resource_dir, self.sample_kernel_dir) - self.assertEqual(ks.argv, sample_kernel_json['argv']) - self.assertEqual(ks.display_name, sample_kernel_json['display_name']) - self.assertEqual(ks.env, {}) - - def test_install_kernel_spec(self): - self.ksm.install_kernel_spec(self.installable_kernel, - kernel_name='tstinstalled', - user=True) - self.assertIn('tstinstalled', self.ksm.find_kernel_specs()) - - with self.assertRaises(OSError): - self.ksm.install_kernel_spec(self.installable_kernel, - kernel_name='tstinstalled', - user=True) - - # Smoketest that this succeeds - self.ksm.install_kernel_spec(self.installable_kernel, - kernel_name='tstinstalled', - replace=True, user=True) - - @onlyif(os.name != 'nt' and not os.access('/usr/local/share', os.W_OK), "needs Unix system without root privileges") - def test_cant_install_kernel_spec(self): - with self.assertRaises(OSError): - self.ksm.install_kernel_spec(self.installable_kernel, - kernel_name='tstinstalled', - user=False) diff --git a/jupyter_client/tests/test_localinterfaces.py b/jupyter_client/tests/test_localinterfaces.py deleted file mode 100644 index 136e0f7..0000000 --- a/jupyter_client/tests/test_localinterfaces.py +++ /dev/null @@ -1,15 +0,0 @@ -#----------------------------------------------------------------------------- -# Copyright (C) 2013 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -from .. import localinterfaces - -def test_load_ips(): - # Override the machinery that skips it if it was called before - localinterfaces._load_ips.called = False - - # Just check this doesn't error - localinterfaces._load_ips(suppress_exceptions=False) \ No newline at end of file diff --git a/jupyter_client/tests/test_multikernelmanager.py b/jupyter_client/tests/test_multikernelmanager.py deleted file mode 100644 index 925dca2..0000000 --- a/jupyter_client/tests/test_multikernelmanager.py +++ /dev/null @@ -1,86 +0,0 @@ -"""Tests for the notebook kernel and session manager.""" - -from subprocess import PIPE -import time -from unittest import TestCase - -from IPython.testing import decorators as dec - -from IPython.config.loader import Config -from ..localinterfaces import localhost -from jupyter_client import KernelManager -from jupyter_client.multikernelmanager import MultiKernelManager - -class TestKernelManager(TestCase): - - def _get_tcp_km(self): - c = Config() - km = MultiKernelManager(config=c) - return km - - def _get_ipc_km(self): - c = Config() - c.KernelManager.transport = 'ipc' - c.KernelManager.ip = 'test' - km = MultiKernelManager(config=c) - return km - - def _run_lifecycle(self, km): - kid = km.start_kernel(stdout=PIPE, stderr=PIPE) - self.assertTrue(km.is_alive(kid)) - self.assertTrue(kid in km) - self.assertTrue(kid in km.list_kernel_ids()) - self.assertEqual(len(km),1) - km.restart_kernel(kid, now=True) - self.assertTrue(km.is_alive(kid)) - self.assertTrue(kid in km.list_kernel_ids()) - km.interrupt_kernel(kid) - k = km.get_kernel(kid) - self.assertTrue(isinstance(k, KernelManager)) - km.shutdown_kernel(kid, now=True) - self.assertTrue(not kid in km) - - def _run_cinfo(self, km, transport, ip): - kid = km.start_kernel(stdout=PIPE, stderr=PIPE) - k = km.get_kernel(kid) - cinfo = km.get_connection_info(kid) - self.assertEqual(transport, cinfo['transport']) - self.assertEqual(ip, cinfo['ip']) - self.assertTrue('stdin_port' in cinfo) - self.assertTrue('iopub_port' in cinfo) - stream = km.connect_iopub(kid) - stream.close() - self.assertTrue('shell_port' in cinfo) - stream = km.connect_shell(kid) - stream.close() - self.assertTrue('hb_port' in cinfo) - stream = km.connect_hb(kid) - stream.close() - km.shutdown_kernel(kid, now=True) - - def test_tcp_lifecycle(self): - km = self._get_tcp_km() - self._run_lifecycle(km) - - def test_shutdown_all(self): - km = self._get_tcp_km() - kid = km.start_kernel(stdout=PIPE, stderr=PIPE) - self.assertIn(kid, km) - km.shutdown_all() - self.assertNotIn(kid, km) - # shutdown again is okay, because we have no kernels - km.shutdown_all() - - def test_tcp_cinfo(self): - km = self._get_tcp_km() - self._run_cinfo(km, 'tcp', localhost()) - - @dec.skip_win32 - def test_ipc_lifecycle(self): - km = self._get_ipc_km() - self._run_lifecycle(km) - - @dec.skip_win32 - def test_ipc_cinfo(self): - km = self._get_ipc_km() - self._run_cinfo(km, 'ipc', 'test') diff --git a/jupyter_client/tests/test_public_api.py b/jupyter_client/tests/test_public_api.py deleted file mode 100644 index 66f57ef..0000000 --- a/jupyter_client/tests/test_public_api.py +++ /dev/null @@ -1,40 +0,0 @@ -"""Test the jupyter_client public API - -Authors -------- -* MinRK -""" -#----------------------------------------------------------------------------- -# Copyright (c) 2013, the IPython Development Team. -# -# Distributed under the terms of the Modified BSD License. -# -# The full license is in the file COPYING.txt, distributed with this software. -#----------------------------------------------------------------------------- - -import nose.tools as nt - -from jupyter_client import launcher, connect -from IPython import kernel - -#----------------------------------------------------------------------------- -# Classes and functions -#----------------------------------------------------------------------------- - -def test_kms(): - for base in ("", "Multi"): - KM = base + "KernelManager" - nt.assert_in(KM, dir(kernel)) - -def test_kcs(): - for base in ("", "Blocking"): - KM = base + "KernelClient" - nt.assert_in(KM, dir(kernel)) - -def test_launcher(): - for name in launcher.__all__: - nt.assert_in(name, dir(kernel)) - -def test_connect(): - for name in connect.__all__: - nt.assert_in(name, dir(kernel)) diff --git a/jupyter_client/tests/test_session.py b/jupyter_client/tests/test_session.py deleted file mode 100644 index 27a595d..0000000 --- a/jupyter_client/tests/test_session.py +++ /dev/null @@ -1,318 +0,0 @@ -"""test building messages with Session""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import hmac -import os -import uuid -from datetime import datetime - -import zmq - -from zmq.tests import BaseZMQTestCase -from zmq.eventloop.zmqstream import ZMQStream - -from jupyter_client import session as ss -from jupyter_client import jsonutil - -from IPython.testing.decorators import skipif, module_not_available -from IPython.utils.py3compat import string_types - -def _bad_packer(obj): - raise TypeError("I don't work") - -def _bad_unpacker(bytes): - raise TypeError("I don't work either") - -class SessionTestCase(BaseZMQTestCase): - - def setUp(self): - BaseZMQTestCase.setUp(self) - self.session = ss.Session() - - -class TestSession(SessionTestCase): - - def test_msg(self): - """message format""" - msg = self.session.msg('execute') - thekeys = set('header parent_header metadata content msg_type msg_id'.split()) - s = set(msg.keys()) - self.assertEqual(s, thekeys) - self.assertTrue(isinstance(msg['content'],dict)) - self.assertTrue(isinstance(msg['metadata'],dict)) - self.assertTrue(isinstance(msg['header'],dict)) - self.assertTrue(isinstance(msg['parent_header'],dict)) - self.assertTrue(isinstance(msg['msg_id'],str)) - self.assertTrue(isinstance(msg['msg_type'],str)) - self.assertEqual(msg['header']['msg_type'], 'execute') - self.assertEqual(msg['msg_type'], 'execute') - - def test_serialize(self): - msg = self.session.msg('execute', content=dict(a=10, b=1.1)) - msg_list = self.session.serialize(msg, ident=b'foo') - ident, msg_list = self.session.feed_identities(msg_list) - new_msg = self.session.deserialize(msg_list) - self.assertEqual(ident[0], b'foo') - self.assertEqual(new_msg['msg_id'],msg['msg_id']) - self.assertEqual(new_msg['msg_type'],msg['msg_type']) - self.assertEqual(new_msg['header'],msg['header']) - self.assertEqual(new_msg['content'],msg['content']) - self.assertEqual(new_msg['parent_header'],msg['parent_header']) - self.assertEqual(new_msg['metadata'],msg['metadata']) - # ensure floats don't come out as Decimal: - self.assertEqual(type(new_msg['content']['b']),type(new_msg['content']['b'])) - - def test_default_secure(self): - self.assertIsInstance(self.session.key, bytes) - self.assertIsInstance(self.session.auth, hmac.HMAC) - - def test_send(self): - ctx = zmq.Context.instance() - A = ctx.socket(zmq.PAIR) - B = ctx.socket(zmq.PAIR) - A.bind("inproc://test") - B.connect("inproc://test") - - msg = self.session.msg('execute', content=dict(a=10)) - self.session.send(A, msg, ident=b'foo', buffers=[b'bar']) - - ident, msg_list = self.session.feed_identities(B.recv_multipart()) - new_msg = self.session.deserialize(msg_list) - self.assertEqual(ident[0], b'foo') - self.assertEqual(new_msg['msg_id'],msg['msg_id']) - self.assertEqual(new_msg['msg_type'],msg['msg_type']) - self.assertEqual(new_msg['header'],msg['header']) - self.assertEqual(new_msg['content'],msg['content']) - self.assertEqual(new_msg['parent_header'],msg['parent_header']) - self.assertEqual(new_msg['metadata'],msg['metadata']) - self.assertEqual(new_msg['buffers'],[b'bar']) - - content = msg['content'] - header = msg['header'] - header['msg_id'] = self.session.msg_id - parent = msg['parent_header'] - metadata = msg['metadata'] - msg_type = header['msg_type'] - self.session.send(A, None, content=content, parent=parent, - header=header, metadata=metadata, ident=b'foo', buffers=[b'bar']) - ident, msg_list = self.session.feed_identities(B.recv_multipart()) - new_msg = self.session.deserialize(msg_list) - self.assertEqual(ident[0], b'foo') - self.assertEqual(new_msg['msg_id'],header['msg_id']) - self.assertEqual(new_msg['msg_type'],msg['msg_type']) - self.assertEqual(new_msg['header'],msg['header']) - self.assertEqual(new_msg['content'],msg['content']) - self.assertEqual(new_msg['metadata'],msg['metadata']) - self.assertEqual(new_msg['parent_header'],msg['parent_header']) - self.assertEqual(new_msg['buffers'],[b'bar']) - - header['msg_id'] = self.session.msg_id - - self.session.send(A, msg, ident=b'foo', buffers=[b'bar']) - ident, new_msg = self.session.recv(B) - self.assertEqual(ident[0], b'foo') - self.assertEqual(new_msg['msg_id'],header['msg_id']) - self.assertEqual(new_msg['msg_type'],msg['msg_type']) - self.assertEqual(new_msg['header'],msg['header']) - self.assertEqual(new_msg['content'],msg['content']) - self.assertEqual(new_msg['metadata'],msg['metadata']) - self.assertEqual(new_msg['parent_header'],msg['parent_header']) - self.assertEqual(new_msg['buffers'],[b'bar']) - - A.close() - B.close() - ctx.term() - - def test_args(self): - """initialization arguments for Session""" - s = self.session - self.assertTrue(s.pack is ss.default_packer) - self.assertTrue(s.unpack is ss.default_unpacker) - self.assertEqual(s.username, os.environ.get('USER', u'username')) - - s = ss.Session() - self.assertEqual(s.username, os.environ.get('USER', u'username')) - - self.assertRaises(TypeError, ss.Session, pack='hi') - self.assertRaises(TypeError, ss.Session, unpack='hi') - u = str(uuid.uuid4()) - s = ss.Session(username=u'carrot', session=u) - self.assertEqual(s.session, u) - self.assertEqual(s.username, u'carrot') - - def test_tracking(self): - """test tracking messages""" - a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR) - s = self.session - s.copy_threshold = 1 - stream = ZMQStream(a) - msg = s.send(a, 'hello', track=False) - self.assertTrue(msg['tracker'] is ss.DONE) - msg = s.send(a, 'hello', track=True) - self.assertTrue(isinstance(msg['tracker'], zmq.MessageTracker)) - M = zmq.Message(b'hi there', track=True) - msg = s.send(a, 'hello', buffers=[M], track=True) - t = msg['tracker'] - self.assertTrue(isinstance(t, zmq.MessageTracker)) - self.assertRaises(zmq.NotDone, t.wait, .1) - del M - t.wait(1) # this will raise - - - def test_unique_msg_ids(self): - """test that messages receive unique ids""" - ids = set() - for i in range(2**12): - h = self.session.msg_header('test') - msg_id = h['msg_id'] - self.assertTrue(msg_id not in ids) - ids.add(msg_id) - - def test_feed_identities(self): - """scrub the front for zmq IDENTITIES""" - theids = "engine client other".split() - content = dict(code='whoda',stuff=object()) - themsg = self.session.msg('execute',content=content) - pmsg = theids - - def test_session_id(self): - session = ss.Session() - # get bs before us - bs = session.bsession - us = session.session - self.assertEqual(us.encode('ascii'), bs) - session = ss.Session() - # get us before bs - us = session.session - bs = session.bsession - self.assertEqual(us.encode('ascii'), bs) - # change propagates: - session.session = 'something else' - bs = session.bsession - us = session.session - self.assertEqual(us.encode('ascii'), bs) - session = ss.Session(session='stuff') - # get us before bs - self.assertEqual(session.bsession, session.session.encode('ascii')) - self.assertEqual(b'stuff', session.bsession) - - def test_zero_digest_history(self): - session = ss.Session(digest_history_size=0) - for i in range(11): - session._add_digest(uuid.uuid4().bytes) - self.assertEqual(len(session.digest_history), 0) - - def test_cull_digest_history(self): - session = ss.Session(digest_history_size=100) - for i in range(100): - session._add_digest(uuid.uuid4().bytes) - self.assertTrue(len(session.digest_history) == 100) - session._add_digest(uuid.uuid4().bytes) - self.assertTrue(len(session.digest_history) == 91) - for i in range(9): - session._add_digest(uuid.uuid4().bytes) - self.assertTrue(len(session.digest_history) == 100) - session._add_digest(uuid.uuid4().bytes) - self.assertTrue(len(session.digest_history) == 91) - - def test_bad_pack(self): - try: - session = ss.Session(pack=_bad_packer) - except ValueError as e: - self.assertIn("could not serialize", str(e)) - self.assertIn("don't work", str(e)) - else: - self.fail("Should have raised ValueError") - - def test_bad_unpack(self): - try: - session = ss.Session(unpack=_bad_unpacker) - except ValueError as e: - self.assertIn("could not handle output", str(e)) - self.assertIn("don't work either", str(e)) - else: - self.fail("Should have raised ValueError") - - def test_bad_packer(self): - try: - session = ss.Session(packer=__name__ + '._bad_packer') - except ValueError as e: - self.assertIn("could not serialize", str(e)) - self.assertIn("don't work", str(e)) - else: - self.fail("Should have raised ValueError") - - def test_bad_unpacker(self): - try: - session = ss.Session(unpacker=__name__ + '._bad_unpacker') - except ValueError as e: - self.assertIn("could not handle output", str(e)) - self.assertIn("don't work either", str(e)) - else: - self.fail("Should have raised ValueError") - - def test_bad_roundtrip(self): - with self.assertRaises(ValueError): - session = ss.Session(unpack=lambda b: 5) - - def _datetime_test(self, session): - content = dict(t=datetime.now()) - metadata = dict(t=datetime.now()) - p = session.msg('msg') - msg = session.msg('msg', content=content, metadata=metadata, parent=p['header']) - smsg = session.serialize(msg) - msg2 = session.deserialize(session.feed_identities(smsg)[1]) - assert isinstance(msg2['header']['date'], datetime) - self.assertEqual(msg['header'], msg2['header']) - self.assertEqual(msg['parent_header'], msg2['parent_header']) - self.assertEqual(msg['parent_header'], msg2['parent_header']) - assert isinstance(msg['content']['t'], datetime) - assert isinstance(msg['metadata']['t'], datetime) - assert isinstance(msg2['content']['t'], string_types) - assert isinstance(msg2['metadata']['t'], string_types) - self.assertEqual(msg['content'], jsonutil.extract_dates(msg2['content'])) - self.assertEqual(msg['content'], jsonutil.extract_dates(msg2['content'])) - - def test_datetimes(self): - self._datetime_test(self.session) - - def test_datetimes_pickle(self): - session = ss.Session(packer='pickle') - self._datetime_test(session) - - @skipif(module_not_available('msgpack')) - def test_datetimes_msgpack(self): - import msgpack - - session = ss.Session( - pack=msgpack.packb, - unpack=lambda buf: msgpack.unpackb(buf, encoding='utf8'), - ) - self._datetime_test(session) - - def test_send_raw(self): - ctx = zmq.Context.instance() - A = ctx.socket(zmq.PAIR) - B = ctx.socket(zmq.PAIR) - A.bind("inproc://test") - B.connect("inproc://test") - - msg = self.session.msg('execute', content=dict(a=10)) - msg_list = [self.session.pack(msg[part]) for part in - ['header', 'parent_header', 'metadata', 'content']] - self.session.send_raw(A, msg_list, ident=b'foo') - - ident, new_msg_list = self.session.feed_identities(B.recv_multipart()) - new_msg = self.session.deserialize(new_msg_list) - self.assertEqual(ident[0], b'foo') - self.assertEqual(new_msg['msg_type'],msg['msg_type']) - self.assertEqual(new_msg['header'],msg['header']) - self.assertEqual(new_msg['parent_header'],msg['parent_header']) - self.assertEqual(new_msg['content'],msg['content']) - self.assertEqual(new_msg['metadata'],msg['metadata']) - - A.close() - B.close() - ctx.term() diff --git a/jupyter_client/threaded.py b/jupyter_client/threaded.py deleted file mode 100644 index 4f2e43e..0000000 --- a/jupyter_client/threaded.py +++ /dev/null @@ -1,230 +0,0 @@ -""" Defines a KernelClient that provides thread-safe sockets with async callbacks on message replies. -""" -from __future__ import absolute_import -import atexit -import errno -from threading import Thread -import time - -import zmq -# import ZMQError in top-level namespace, to avoid ugly attribute-error messages -# during garbage collection of threads at exit: -from zmq import ZMQError -from zmq.eventloop import ioloop, zmqstream - -# Local imports -from IPython.utils.traitlets import Type, Instance -from jupyter_client.channels import HBChannel -from jupyter_client import KernelClient -from jupyter_client.channels import HBChannel - -class ThreadedZMQSocketChannel(object): - """A ZMQ socket invoking a callback in the ioloop""" - session = None - socket = None - ioloop = None - stream = None - _inspect = None - - def __init__(self, socket, session, loop): - """Create a channel. - - Parameters - ---------- - socket : :class:`zmq.Socket` - The ZMQ socket to use. - session : :class:`session.Session` - The session to use. - loop - A pyzmq ioloop to connect the socket to using a ZMQStream - """ - super(ThreadedZMQSocketChannel, self).__init__() - - self.socket = socket - self.session = session - self.ioloop = loop - - self.stream = zmqstream.ZMQStream(self.socket, self.ioloop) - self.stream.on_recv(self._handle_recv) - - _is_alive = False - def is_alive(self): - return self._is_alive - - def start(self): - self._is_alive = True - - def stop(self): - self._is_alive = False - - def close(self): - if self.socket is not None: - try: - self.socket.close(linger=0) - except Exception: - pass - self.socket = None - - def send(self, msg): - """Queue a message to be sent from the IOLoop's thread. - - Parameters - ---------- - msg : message to send - - This is threadsafe, as it uses IOLoop.add_callback to give the loop's - thread control of the action. - """ - def thread_send(): - self.session.send(self.stream, msg) - self.ioloop.add_callback(thread_send) - - def _handle_recv(self, msg): - """Callback for stream.on_recv. - - Unpacks message, and calls handlers with it. - """ - ident,smsg = self.session.feed_identities(msg) - msg = self.session.deserialize(smsg) - # let client inspect messages - if self._inspect: - self._inspect(msg) - self.call_handlers(msg) - - def call_handlers(self, msg): - """This method is called in the ioloop thread when a message arrives. - - Subclasses should override this method to handle incoming messages. - It is important to remember that this method is called in the thread - so that some logic must be done to ensure that the application level - handlers are called in the application thread. - """ - pass - - def process_events(self): - """Subclasses should override this with a method - processing any pending GUI events. - """ - pass - - - def flush(self, timeout=1.0): - """Immediately processes all pending messages on this channel. - - This is only used for the IOPub channel. - - Callers should use this method to ensure that :meth:`call_handlers` - has been called for all messages that have been received on the - 0MQ SUB socket of this channel. - - This method is thread safe. - - Parameters - ---------- - timeout : float, optional - The maximum amount of time to spend flushing, in seconds. The - default is one second. - """ - # We do the IOLoop callback process twice to ensure that the IOLoop - # gets to perform at least one full poll. - stop_time = time.time() + timeout - for i in range(2): - self._flushed = False - self.ioloop.add_callback(self._flush) - while not self._flushed and time.time() < stop_time: - time.sleep(0.01) - - def _flush(self): - """Callback for :method:`self.flush`.""" - self.stream.flush() - self._flushed = True - - -class IOLoopThread(Thread): - """Run a pyzmq ioloop in a thread to send and receive messages - """ - def __init__(self, loop): - super(IOLoopThread, self).__init__() - self.daemon = True - atexit.register(self._notice_exit) - self.ioloop = loop or ioloop.IOLoop() - - def _notice_exit(self): - self._exiting = True - - def run(self): - """Run my loop, ignoring EINTR events in the poller""" - while True: - try: - self.ioloop.start() - except ZMQError as e: - if e.errno == errno.EINTR: - continue - else: - raise - except Exception: - if self._exiting: - break - else: - raise - else: - break - - def stop(self): - """Stop the channel's event loop and join its thread. - - This calls :meth:`~threading.Thread.join` and returns when the thread - terminates. :class:`RuntimeError` will be raised if - :meth:`~threading.Thread.start` is called again. - """ - if self.ioloop is not None: - self.ioloop.stop() - self.join() - self.close() - - def close(self): - if self.ioloop is not None: - try: - self.ioloop.close(all_fds=True) - except Exception: - pass - - -class ThreadedKernelClient(KernelClient): - """ A KernelClient that provides thread-safe sockets with async callbacks on message replies. - """ - - _ioloop = None - @property - def ioloop(self): - if self._ioloop is None: - self._ioloop = ioloop.IOLoop() - return self._ioloop - - ioloop_thread = Instance(IOLoopThread, allow_none=True) - - def start_channels(self, shell=True, iopub=True, stdin=True, hb=True): - if shell: - self.shell_channel._inspect = self._check_kernel_info_reply - - self.ioloop_thread = IOLoopThread(self.ioloop) - self.ioloop_thread.start() - - super(ThreadedKernelClient, self).start_channels(shell, iopub, stdin, hb) - - def _check_kernel_info_reply(self, msg): - """This is run in the ioloop thread when the kernel info reply is recieved - """ - if msg['msg_type'] == 'kernel_info_reply': - self._handle_kernel_info_reply(msg) - self.shell_channel._inspect = None - - def stop_channels(self): - super(ThreadedKernelClient, self).stop_channels() - if self.ioloop_thread.is_alive(): - self.ioloop_thread.stop() - - iopub_channel_class = Type(ThreadedZMQSocketChannel) - shell_channel_class = Type(ThreadedZMQSocketChannel) - stdin_channel_class = Type(ThreadedZMQSocketChannel) - hb_channel_class = Type(HBChannel) diff --git a/jupyter_client/win_interrupt.py b/jupyter_client/win_interrupt.py deleted file mode 100644 index b694295..0000000 --- a/jupyter_client/win_interrupt.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Use a Windows event to interrupt a child process like SIGINT. - -The child needs to explicitly listen for this - see -ipython_kernel.parentpoller.ParentPollerWindows for a Python implementation. -""" - -import ctypes - -def create_interrupt_event(): - """Create an interrupt event handle. - - The parent process should call this to create the - interrupt event that is passed to the child process. It should store - this handle and use it with ``send_interrupt`` to interrupt the child - process. - """ - # Create a security attributes struct that permits inheritance of the - # handle by new processes. - # FIXME: We can clean up this mess by requiring pywin32 for IPython. - class SECURITY_ATTRIBUTES(ctypes.Structure): - _fields_ = [ ("nLength", ctypes.c_int), - ("lpSecurityDescriptor", ctypes.c_void_p), - ("bInheritHandle", ctypes.c_int) ] - sa = SECURITY_ATTRIBUTES() - sa_p = ctypes.pointer(sa) - sa.nLength = ctypes.sizeof(SECURITY_ATTRIBUTES) - sa.lpSecurityDescriptor = 0 - sa.bInheritHandle = 1 - - return ctypes.windll.kernel32.CreateEventA( - sa_p, # lpEventAttributes - False, # bManualReset - False, # bInitialState - '') # lpName - -def send_interrupt(interrupt_handle): - """ Sends an interrupt event using the specified handle. - """ - ctypes.windll.kernel32.SetEvent(interrupt_handle) diff --git a/jupyter_notebook/notebookapp.py b/jupyter_notebook/notebookapp.py index 621739e..3b42915 100644 --- a/jupyter_notebook/notebookapp.py +++ b/jupyter_notebook/notebookapp.py @@ -782,6 +782,10 @@ class NotebookApp(BaseIPythonApplication): parent=self, ipython_dir=self.ipython_dir, ) + # FIXME: temporarily add .ipython/kernels to the kernel search path + self.kernel_spec_manager.kernel_dirs.append( + os.path.join(self.ipython_dir, 'kernels'), + ) self.kernel_manager = self.kernel_manager_class( parent=self, log=self.log, diff --git a/requirements.txt b/requirements.txt index 013b938..5790b72 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,8 @@ -e git+https://github.com/ipython/ipython_genutils.git#egg=ipython_genutils -e git+https://github.com/ipython/traitlets.git#egg=traitlets +# Below here aren't actually needed by IPython. +# Only while the split is partial +# and the IPython repo is still running Jupyter tests -e git+https://github.com/jupyter/jupyter_core.git#egg=jupyter_core -e git+https://github.com/jupyter/jupyter_nbformat.git#egg=jupyter_nbformat +-e git+https://github.com/jupyter/jupyter_client.git#egg=jupyter_client