##// END OF EJS Templates
lfs: add a repo requirement for this extension once an lfs file is committed...
lfs: add a repo requirement for this extension once an lfs file is committed Largefiles does the same thing (also delayed until the first largefile commit), to prevent access to the repo without the extension. In the case of this extension, not having the extension loaded while accessing an lfs file results in cryptic errors about "missing processor for flag '0x2000'". If enabled locally but not remotely, the cryptic error message is about no common changegroup version. (It wants '03', which is currently experimental.) The largefiles extension looks for any tracked file that starts with '.hglf/'. Unfortunately, that doesn't work here. I didn't see any way to get the files that were just committed, without doing a full status. But since there's no secondary check on adding an lfs file once the extension is loaded and a threshold set, the best practice is to only enable this locally on a repo that needs it. That should minimize the unnecessary overhead for repos without an lfs file.

File last commit:

r34215:aa877860 default
r35167:e0a1b9ee default
Show More
charencode.py
85 lines | 2.3 KiB | text/x-python | PythonLexer
# charencode.py - miscellaneous character encoding
#
# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import array
from .. import (
pycompat,
)
def isasciistr(s):
try:
s.decode('ascii')
return True
except UnicodeDecodeError:
return False
def asciilower(s):
'''convert a string to lowercase if ASCII
Raises UnicodeDecodeError if non-ASCII characters are found.'''
s.decode('ascii')
return s.lower()
def asciiupper(s):
'''convert a string to uppercase if ASCII
Raises UnicodeDecodeError if non-ASCII characters are found.'''
s.decode('ascii')
return s.upper()
_jsonmap = []
_jsonmap.extend("\\u%04x" % x for x in range(32))
_jsonmap.extend(pycompat.bytechr(x) for x in range(32, 127))
_jsonmap.append('\\u007f')
_jsonmap[0x09] = '\\t'
_jsonmap[0x0a] = '\\n'
_jsonmap[0x22] = '\\"'
_jsonmap[0x5c] = '\\\\'
_jsonmap[0x08] = '\\b'
_jsonmap[0x0c] = '\\f'
_jsonmap[0x0d] = '\\r'
_paranoidjsonmap = _jsonmap[:]
_paranoidjsonmap[0x3c] = '\\u003c' # '<' (e.g. escape "</script>")
_paranoidjsonmap[0x3e] = '\\u003e' # '>'
_jsonmap.extend(pycompat.bytechr(x) for x in range(128, 256))
def jsonescapeu8fast(u8chars, paranoid):
"""Convert a UTF-8 byte string to JSON-escaped form (fast path)
Raises ValueError if non-ASCII characters have to be escaped.
"""
if paranoid:
jm = _paranoidjsonmap
else:
jm = _jsonmap
try:
return ''.join(jm[x] for x in bytearray(u8chars))
except IndexError:
raise ValueError
if pycompat.ispy3:
_utf8strict = r'surrogatepass'
else:
_utf8strict = r'strict'
def jsonescapeu8fallback(u8chars, paranoid):
"""Convert a UTF-8 byte string to JSON-escaped form (slow path)
Escapes all non-ASCII characters no matter if paranoid is False.
"""
if paranoid:
jm = _paranoidjsonmap
else:
jm = _jsonmap
# non-BMP char is represented as UTF-16 surrogate pair
u16b = u8chars.decode('utf-8', _utf8strict).encode('utf-16', _utf8strict)
u16codes = array.array(r'H', u16b)
u16codes.pop(0) # drop BOM
return ''.join(jm[x] if x < 128 else '\\u%04x' % x for x in u16codes)