##// END OF EJS Templates
Fix showing SystemExit exception raise inside except handler (#14503)...
Fix showing SystemExit exception raise inside except handler (#14503) Doing something like this: ```python try: 5 / 0 except Exception as e: raise SystemExit ``` was hitting an error inside UltraTB, creating a long traceback of its internals (which, ironically, UltraTB itself then displays correctly :-). `ListTB.get_exception_only()` calls the `ListTB.structured_traceback()` method *specifically* - even if `self` is a subclass, it won't use the subclass's method. However, the exception chaining in that method uses recursion by calling `self.structured_traceback()`, which will use a subclass's method. Tuples were added as an option there to support exception chaining, but not all of the machinery in connected classes expects a tuple. This just skips the exception chaining logic for the `etb=None` case, when we're showing the exception only. I'm not sure this is necessarily the best fix, but I didn't want to spend too much time following code around a module that's old enough to vote. Closes #12104

File last commit:

r26419:7663c521
r28830:d5762c16 merge
Show More
openpy.py
105 lines | 3.3 KiB | text/x-python | PythonLexer
"""
Tools to open .py files as Unicode, using the encoding specified within the file,
as per PEP 263.
Much of the code is taken from the tokenize module in Python 3.2.
"""
import io
from io import TextIOWrapper, BytesIO
from pathlib import Path
import re
from tokenize import open, detect_encoding
cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE)
cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE)
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, str):
return txt
if isinstance(txt, bytes):
buffer = BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
with TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True) as text:
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read()
def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line
def read_py_file(filename, skip_encoding_cookie=True):
"""Read a Python file, using the encoding declared inside the file.
Parameters
----------
filename : str
The path to the file to read.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output.
Returns
-------
A unicode string containing the contents of the file.
"""
filepath = Path(filename)
with open(filepath) as f: # the open function defined in this module.
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(f))
else:
return f.read()
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output.
Returns
-------
A unicode string containing the contents of the file.
"""
# Deferred import for faster start
from urllib.request import urlopen
response = urlopen(url)
buffer = io.BytesIO(response.read())
return source_to_unicode(buffer, errors, skip_encoding_cookie)