##// END OF EJS Templates
Reset the interactive namespace __warningregistry__ before executing code...
Reset the interactive namespace __warningregistry__ before executing code Fixes #6611. Idea: Right now, people often don't see important warnings when running code in IPython, because (to a first approximation) any given warning will only issue once per session. Blink and you'll miss it! This is a very common contributor to confused emails to numpy-discussion. E.g.: In [5]: 1 / my_array_with_random_contents /home/njs/.user-python2.7-64bit-3/bin/ipython:1: RuntimeWarning: divide by zero encountered in divide #!/home/njs/.user-python2.7-64bit-3/bin/python Out[5]: array([ 1.77073316, -2.29765021, -2.01800811, ..., 1.13871243, -1.08302964, -8.6185091 ]) Oo, right, guess I gotta be careful of those zeros -- thanks, numpy, for giving me that warning! A few days later: In [592]: 1 / some_other_array Out[592]: array([ 3.07735763, 0.50769289, 0.83984078, ..., -0.67563917, -0.85736257, -1.36511271]) Oops, it turns out that this array had a zero in it too, and that's going to bite me later. But no warning this time! The effect of this commit is to make it so that warnings triggered by the code in cell 5 do *not* suppress warnings triggered by the code in cell 592. Note that this only applies to warnings triggered *directly* by code entered interactively -- if somepkg.foo() calls anotherpkg.bad_func() which issues a warning, then this warning will still only be displayed once, even if multiple cells call somepkg.foo(). But if cell 5 and cell 592 both call anotherpkg.bad_func() directly, then both will get warnings. (Important exception: if foo() is defined *interactively*, and calls anotherpkg.bad_func(), then every cell that calls foo() will display the warning again. This is unavoidable without fixes to CPython upstream.) Explanation: Python's warning system has some weird quirks. By default, it tries to suppress duplicate warnings, where "duplicate" means the same warning message triggered twice by the same line of code. This requires determining which line of code is responsible for triggering a warning, and this is controlled by the stacklevel= argument to warnings.warn. Basically, though, the idea is that if foo() calls bar() which calls baz() which calls some_deprecated_api(), then baz() will get counted as being "responsible", and the warning system will make a note that the usage of some_deprecated_api() inside baz() has already been warned about and doesn't need to be warned about again. So far so good. To accomplish this, obviously, there has to be a record of somewhere which line this was. You might think that this would be done by recording the filename:linenumber pair in a dict inside the warnings module, or something like that. You would be wrong. What actually happens is that the warnings module will use stack introspection to reach into baz()'s execution environment, create a global (module-level) variable there named __warningregistry__, and then, inside this dictionary, record just the line number. Basically, it assumes that any given module contains only one line 1, only one line 2, etc., so storing the filename is irrelevant. Obviously for interactive code this is totally wrong -- all cells share the same execution environment and global namespace, and they all contain a new line 1. Currently the warnings module treats these as if they were all the same line. In fact they are not the same line; once we have executed a given chunk of code, we will never see those particular lines again. As soon as a given chunk of code finishes executing, its line number labels become meaningless, and the corresponding warning registry entries become meaningless as well. Therefore, with this patch we delete the __warningregistry__ each time we execute a new block of code.

File last commit:

r17748:a2ec27ac
r18548:61431d7d
Show More
launcher.py
258 lines | 9.4 KiB | text/x-python | PythonLexer
"""Utilities for launching kernels
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
from subprocess import Popen, PIPE
from IPython.utils.encoding import getdefaultencoding
from IPython.utils.py3compat import cast_bytes_py2
def swallow_argv(argv, aliases=None, flags=None):
"""strip frontend-specific aliases and flags from an argument list
For use primarily in frontend apps that want to pass a subset of command-line
arguments through to a subprocess, where frontend-specific flags and aliases
should be removed from the list.
Parameters
----------
argv : list(str)
The starting argv, to be filtered
aliases : container of aliases (dict, list, set, etc.)
The frontend-specific aliases to be removed
flags : container of flags (dict, list, set, etc.)
The frontend-specific flags to be removed
Returns
-------
argv : list(str)
The argv list, excluding flags and aliases that have been stripped
"""
if aliases is None:
aliases = set()
if flags is None:
flags = set()
stripped = list(argv) # copy
swallow_next = False
was_flag = False
for a in argv:
if a == '--':
break
if swallow_next:
swallow_next = False
# last arg was an alias, remove the next one
# *unless* the last alias has a no-arg flag version, in which
# case, don't swallow the next arg if it's also a flag:
if not (was_flag and a.startswith('-')):
stripped.remove(a)
continue
if a.startswith('-'):
split = a.lstrip('-').split('=')
name = split[0]
# we use startswith because argparse accepts any arg to be specified
# by any leading section, as long as it is unique,
# so `--no-br` means `--no-browser` in the notebook, etc.
if any(alias.startswith(name) for alias in aliases):
stripped.remove(a)
if len(split) == 1:
# alias passed with arg via space
swallow_next = True
# could have been a flag that matches an alias, e.g. `existing`
# in which case, we might not swallow the next arg
was_flag = name in flags
elif len(split) == 1 and any(flag.startswith(name) for flag in flags):
# strip flag, but don't swallow next, as flags don't take args
stripped.remove(a)
# return shortened list
return stripped
def make_ipkernel_cmd(code, executable=None, extra_arguments=[], **kw):
"""Build Popen command list for launching an IPython kernel.
Parameters
----------
code : str,
A string of Python code that imports and executes a kernel entry point.
executable : str, optional (default sys.executable)
The Python executable to use for the kernel process.
extra_arguments : list, optional
A list of extra arguments to pass when executing the launch code.
Returns
-------
A Popen command list
"""
if executable is None:
executable = sys.executable
arguments = [ executable, '-c', code, '-f', '{connection_file}' ]
arguments.extend(extra_arguments)
if sys.platform == 'win32':
# If the kernel is running on pythonw and stdout/stderr are not been
# re-directed, it will crash when more than 4KB of data is written to
# stdout or stderr. This is a bug that has been with Python for a very
# long time; see http://bugs.python.org/issue706263.
# A cleaner solution to this problem would be to pass os.devnull to
# Popen directly. Unfortunately, that does not work.
if executable.endswith('pythonw.exe'):
arguments.append('--no-stdout')
arguments.append('--no-stderr')
return arguments
def launch_kernel(cmd, stdin=None, stdout=None, stderr=None, env=None,
independent=False,
cwd=None, ipython_kernel=True,
**kw
):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
cmd : Popen list,
A string of Python code that imports and executes a kernel entry point.
stdin, stdout, stderr : optional (default None)
Standards streams, as defined in subprocess.Popen.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
cwd : path, optional
The working dir of the kernel process (default: cwd of this process).
ipython_kernel : bool, optional
Whether the kernel is an official IPython one,
and should get a bit of special treatment.
Returns
-------
Popen instance for the kernel subprocess
"""
# Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr
# are invalid. Unfortunately, there is in general no way to detect whether
# they are valid. The following two blocks redirect them to (temporary)
# pipes in certain important cases.
# If this process has been backgrounded, our stdin is invalid. Since there
# is no compelling reason for the kernel to inherit our stdin anyway, we'll
# place this one safe and always redirect.
redirect_in = True
_stdin = PIPE if stdin is None else stdin
# If this process in running on pythonw, we know that stdin, stdout, and
# stderr are all invalid.
redirect_out = sys.executable.endswith('pythonw.exe')
if redirect_out:
_stdout = PIPE if stdout is None else stdout
_stderr = PIPE if stderr is None else stderr
else:
_stdout, _stderr = stdout, stderr
env = env if (env is not None) else os.environ.copy()
encoding = getdefaultencoding(prefer_stream=False)
# Spawn a kernel.
if sys.platform == 'win32':
# Popen on Python 2 on Windows cannot handle unicode args or cwd
cmd = [ cast_bytes_py2(c, encoding) for c in cmd ]
if cwd:
cwd = cast_bytes_py2(cwd, sys.getfilesystemencoding() or 'ascii')
from IPython.kernel.zmq.parentpoller import ParentPollerWindows
# Create a Win32 event for interrupting the kernel.
interrupt_event = ParentPollerWindows.create_interrupt_event()
# Store this in an environment variable for third party kernels, but at
# present, our own kernel expects this as a command line argument.
env["IPY_INTERRUPT_EVENT"] = str(interrupt_event)
if ipython_kernel:
cmd += [ '--interrupt=%i' % interrupt_event ]
# If the kernel is running on pythonw and stdout/stderr are not been
# re-directed, it will crash when more than 4KB of data is written to
# stdout or stderr. This is a bug that has been with Python for a very
# long time; see http://bugs.python.org/issue706263.
# A cleaner solution to this problem would be to pass os.devnull to
# Popen directly. Unfortunately, that does not work.
if cmd[0].endswith('pythonw.exe'):
if stdout is None:
cmd.append('--no-stdout')
if stderr is None:
cmd.append('--no-stderr')
# Launch the kernel process.
if independent:
proc = Popen(cmd,
creationflags=512, # CREATE_NEW_PROCESS_GROUP
stdin=_stdin, stdout=_stdout, stderr=_stderr, env=env)
else:
if ipython_kernel:
try:
from _winapi import DuplicateHandle, GetCurrentProcess, \
DUPLICATE_SAME_ACCESS
except:
from _subprocess import DuplicateHandle, GetCurrentProcess, \
DUPLICATE_SAME_ACCESS
pid = GetCurrentProcess()
handle = DuplicateHandle(pid, pid, pid, 0,
True, # Inheritable by new processes.
DUPLICATE_SAME_ACCESS)
cmd +=[ '--parent=%i' % handle ]
proc = Popen(cmd,
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd, env=env)
# Attach the interrupt event to the Popen objet so it can be used later.
proc.win32_interrupt_event = interrupt_event
else:
if independent:
proc = Popen(cmd, preexec_fn=lambda: os.setsid(),
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd, env=env)
else:
if ipython_kernel:
cmd += ['--parent=1']
proc = Popen(cmd,
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd, env=env)
# Clean up pipes created to work around Popen bug.
if redirect_in:
if stdin is None:
proc.stdin.close()
if redirect_out:
if stdout is None:
proc.stdout.close()
if stderr is None:
proc.stderr.close()
return proc
__all__ = [
'swallow_argv',
'make_ipkernel_cmd',
'launch_kernel',
]