##// END OF EJS Templates
Backport PR #2384: Adapt inline backend to changes in matplotlib...
Backport PR #2384: Adapt inline backend to changes in matplotlib Matplotlib recently merged https://github.com/matplotlib/matplotlib/pull/1125 that makes it simpler to use objective oriented figure creation by automatically creating the right canvas for the backend. To solve that all backends must provide a backend_xxx.FigureCanvas. This is obviosly missing from the inline backend. The change is needed to make the inline backend work with mpl's 1.2.x branch which is due to released soon. Simply setting the default canvas equal to a Agg canvas appears to work for both svg and png figures but I'm not sure weather that is the right approach. Should the canvas depend on the figure format and provide a svg canvas for a svg figure? (Note that before this change to matplotlib the canvas from a plt.figure call seams to be a agg type in all cases) Edit: I made the pull request against 0.13.1 since it would be good to have this in the stable branch for when mpl is released. Just let me know and I can rebase it against master

File last commit:

r5390:c82649ea
r8562:7d16877a
Show More
compilerop.py
131 lines | 5.2 KiB | text/x-python | PythonLexer
"""Compiler tools with improved interactive support.
Provides compilation machinery similar to codeop, but with caching support so
we can provide interactive tracebacks.
Authors
-------
* Robert Kern
* Fernando Perez
* Thomas Kluyver
"""
# Note: though it might be more natural to name this module 'compiler', that
# name is in the stdlib and name collisions with the stdlib tend to produce
# weird problems (often with third-party tools).
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
from ast import PyCF_ONLY_AST
import codeop
import hashlib
import linecache
import time
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
def code_name(code, number=0):
""" Compute a (probably) unique name for code for caching.
This now expects code to be unicode.
"""
hash_digest = hashlib.md5(code.encode("utf-8")).hexdigest()
# Include the number and 12 characters of the hash in the name. It's
# pretty much impossible that in a single session we'll have collisions
# even with truncated hashes, and the full one makes tracebacks too long
return '<ipython-input-{0}-{1}>'.format(number, hash_digest[:12])
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class CachingCompiler(codeop.Compile):
"""A compiler that caches code compiled from interactive statements.
"""
def __init__(self):
codeop.Compile.__init__(self)
# This is ugly, but it must be done this way to allow multiple
# simultaneous ipython instances to coexist. Since Python itself
# directly accesses the data structures in the linecache module, and
# the cache therein is global, we must work with that data structure.
# We must hold a reference to the original checkcache routine and call
# that in our own check_cache() below, but the special IPython cache
# must also be shared by all IPython instances. If we were to hold
# separate caches (one in each CachingCompiler instance), any call made
# by Python itself to linecache.checkcache() would obliterate the
# cached data from the other IPython instances.
if not hasattr(linecache, '_ipython_cache'):
linecache._ipython_cache = {}
if not hasattr(linecache, '_checkcache_ori'):
linecache._checkcache_ori = linecache.checkcache
# Now, we must monkeypatch the linecache directly so that parts of the
# stdlib that call it outside our control go through our codepath
# (otherwise we'd lose our tracebacks).
linecache.checkcache = self.check_cache
def ast_parse(self, source, filename='<unknown>', symbol='exec'):
"""Parse code to an AST with the current compiler flags active.
Arguments are exactly the same as ast.parse (in the standard library),
and are passed to the built-in compile function."""
return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
def reset_compiler_flags(self):
"""Reset compiler flags to default state."""
# This value is copied from codeop.Compile.__init__, so if that ever
# changes, it will need to be updated.
self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
@property
def compiler_flags(self):
"""Flags currently active in the compilation process.
"""
return self.flags
def cache(self, code, number=0):
"""Make a name for a block of code, and cache the code.
Parameters
----------
code : str
The Python source code to cache.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
"""
name = code_name(code, number)
entry = (len(code), time.time(),
[line+'\n' for line in code.splitlines()], name)
linecache.cache[name] = entry
linecache._ipython_cache[name] = entry
return name
def check_cache(self, *args):
"""Call linecache.checkcache() safely protecting our cached values.
"""
# First call the orignal checkcache as intended
linecache._checkcache_ori(*args)
# Then, update back the cache with our data, so that tracebacks related
# to our compiled codes can be produced.
linecache.cache.update(linecache._ipython_cache)