##// END OF EJS Templates
Reset the interactive namespace __warningregistry__ before executing code...
Reset the interactive namespace __warningregistry__ before executing code Fixes #6611. Idea: Right now, people often don't see important warnings when running code in IPython, because (to a first approximation) any given warning will only issue once per session. Blink and you'll miss it! This is a very common contributor to confused emails to numpy-discussion. E.g.: In [5]: 1 / my_array_with_random_contents /home/njs/.user-python2.7-64bit-3/bin/ipython:1: RuntimeWarning: divide by zero encountered in divide #!/home/njs/.user-python2.7-64bit-3/bin/python Out[5]: array([ 1.77073316, -2.29765021, -2.01800811, ..., 1.13871243, -1.08302964, -8.6185091 ]) Oo, right, guess I gotta be careful of those zeros -- thanks, numpy, for giving me that warning! A few days later: In [592]: 1 / some_other_array Out[592]: array([ 3.07735763, 0.50769289, 0.83984078, ..., -0.67563917, -0.85736257, -1.36511271]) Oops, it turns out that this array had a zero in it too, and that's going to bite me later. But no warning this time! The effect of this commit is to make it so that warnings triggered by the code in cell 5 do *not* suppress warnings triggered by the code in cell 592. Note that this only applies to warnings triggered *directly* by code entered interactively -- if somepkg.foo() calls anotherpkg.bad_func() which issues a warning, then this warning will still only be displayed once, even if multiple cells call somepkg.foo(). But if cell 5 and cell 592 both call anotherpkg.bad_func() directly, then both will get warnings. (Important exception: if foo() is defined *interactively*, and calls anotherpkg.bad_func(), then every cell that calls foo() will display the warning again. This is unavoidable without fixes to CPython upstream.) Explanation: Python's warning system has some weird quirks. By default, it tries to suppress duplicate warnings, where "duplicate" means the same warning message triggered twice by the same line of code. This requires determining which line of code is responsible for triggering a warning, and this is controlled by the stacklevel= argument to warnings.warn. Basically, though, the idea is that if foo() calls bar() which calls baz() which calls some_deprecated_api(), then baz() will get counted as being "responsible", and the warning system will make a note that the usage of some_deprecated_api() inside baz() has already been warned about and doesn't need to be warned about again. So far so good. To accomplish this, obviously, there has to be a record of somewhere which line this was. You might think that this would be done by recording the filename:linenumber pair in a dict inside the warnings module, or something like that. You would be wrong. What actually happens is that the warnings module will use stack introspection to reach into baz()'s execution environment, create a global (module-level) variable there named __warningregistry__, and then, inside this dictionary, record just the line number. Basically, it assumes that any given module contains only one line 1, only one line 2, etc., so storing the filename is irrelevant. Obviously for interactive code this is totally wrong -- all cells share the same execution environment and global namespace, and they all contain a new line 1. Currently the warnings module treats these as if they were all the same line. In fact they are not the same line; once we have executed a given chunk of code, we will never see those particular lines again. As soon as a given chunk of code finishes executing, its line number labels become meaningless, and the corresponding warning registry entries become meaningless as well. Therefore, with this patch we delete the __warningregistry__ each time we execute a new block of code.

File last commit:

r17148:99cdf189 merge
r18548:61431d7d
Show More
test_jsonutil.py
151 lines | 4.8 KiB | text/x-python | PythonLexer
# coding: utf-8
"""Test suite for our JSON utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import datetime
import json
from base64 import decodestring
import nose.tools as nt
from IPython.utils import jsonutil, tz
from ..jsonutil import json_clean, encode_images
from ..py3compat import unicode_to_str, str_to_bytes, iteritems
class Int(int):
def __str__(self):
return 'Int(%i)' % self
def test():
# list of input/expected output. Use None for the expected output if it
# can be the same as the input.
pairs = [(1, None), # start with scalars
(1.0, None),
('a', None),
(True, None),
(False, None),
(None, None),
# complex numbers for now just go to strings, as otherwise they
# are unserializable
(1j, '1j'),
# Containers
([1, 2], None),
((1, 2), [1, 2]),
(set([1, 2]), [1, 2]),
(dict(x=1), None),
({'x': 1, 'y':[1,2,3], '1':'int'}, None),
# More exotic objects
((x for x in range(3)), [0, 1, 2]),
(iter([1, 2]), [1, 2]),
(Int(5), 5),
]
for val, jval in pairs:
if jval is None:
jval = val
out = json_clean(val)
# validate our cleanup
nt.assert_equal(out, jval)
# and ensure that what we return, indeed encodes cleanly
json.loads(json.dumps(out))
def test_rekey():
# This could fail due to modifying the dict keys in-place on Python 3
d = { i:i for i in map(str, range(128)) }
d = jsonutil.rekey(d)
for key in d:
nt.assert_is_instance(key, int)
def test_encode_images():
# invalid data, but the header and footer are from real files
pngdata = b'\x89PNG\r\n\x1a\nblahblahnotactuallyvalidIEND\xaeB`\x82'
jpegdata = b'\xff\xd8\xff\xe0\x00\x10JFIFblahblahjpeg(\xa0\x0f\xff\xd9'
pdfdata = b'%PDF-1.\ntrailer<</Root<</Pages<</Kids[<</MediaBox[0 0 3 3]>>]>>>>>>'
fmt = {
'image/png' : pngdata,
'image/jpeg' : jpegdata,
'application/pdf' : pdfdata
}
encoded = encode_images(fmt)
for key, value in iteritems(fmt):
# encoded has unicode, want bytes
decoded = decodestring(encoded[key].encode('ascii'))
nt.assert_equal(decoded, value)
encoded2 = encode_images(encoded)
nt.assert_equal(encoded, encoded2)
b64_str = {}
for key, encoded in iteritems(encoded):
b64_str[key] = unicode_to_str(encoded)
encoded3 = encode_images(b64_str)
nt.assert_equal(encoded3, b64_str)
for key, value in iteritems(fmt):
# encoded3 has str, want bytes
decoded = decodestring(str_to_bytes(encoded3[key]))
nt.assert_equal(decoded, value)
def test_lambda():
jc = json_clean(lambda : 1)
nt.assert_is_instance(jc, str)
nt.assert_in('<lambda>', jc)
json.dumps(jc)
def test_extract_dates():
timestamps = [
'2013-07-03T16:34:52.249482',
'2013-07-03T16:34:52.249482Z',
'2013-07-03T16:34:52.249482Z-0800',
'2013-07-03T16:34:52.249482Z+0800',
'2013-07-03T16:34:52.249482Z+08:00',
'2013-07-03T16:34:52.249482Z-08:00',
'2013-07-03T16:34:52.249482-0800',
'2013-07-03T16:34:52.249482+0800',
'2013-07-03T16:34:52.249482+08:00',
'2013-07-03T16:34:52.249482-08:00',
]
extracted = jsonutil.extract_dates(timestamps)
ref = extracted[0]
for dt in extracted:
nt.assert_true(isinstance(dt, datetime.datetime))
nt.assert_equal(dt, ref)
def test_parse_ms_precision():
base = '2013-07-03T16:34:52'
digits = '1234567890'
parsed = jsonutil.parse_date(base)
nt.assert_is_instance(parsed, datetime.datetime)
for i in range(len(digits)):
ts = base + '.' + digits[:i]
parsed = jsonutil.parse_date(ts)
if i >= 1 and i <= 6:
nt.assert_is_instance(parsed, datetime.datetime)
else:
nt.assert_is_instance(parsed, str)
def test_date_default():
data = dict(today=datetime.datetime.now(), utcnow=tz.utcnow())
jsondata = json.dumps(data, default=jsonutil.date_default)
nt.assert_in("+00", jsondata)
nt.assert_equal(jsondata.count("+00"), 1)
extracted = jsonutil.extract_dates(json.loads(jsondata))
for dt in extracted.values():
nt.assert_is_instance(dt, datetime.datetime)
def test_exception():
bad_dicts = [{1:'number', '1':'string'},
{True:'bool', 'True':'string'},
]
for d in bad_dicts:
nt.assert_raises(ValueError, json_clean, d)
def test_unicode_dict():
data = {u'üniço∂e': u'üniço∂e'}
clean = jsonutil.json_clean(data)
nt.assert_equal(data, clean)