##// END OF EJS Templates
tests: use sha256line.py instead of /dev/random in test-censor.t (issue6858)...
tests: use sha256line.py instead of /dev/random in test-censor.t (issue6858) Sometimes the systems that run our test suite don't have enough entropy and they cannot produce target file of the expected size using /dev/random, which results in test failures. Switching to /dev/urandom would give us way more available data at the cost of it being less "random", but we don't really need to use entropy for this task at all, since we only care if the file size after compression is big enough to not be stored inline in the revlog. So let's use something that we already have used to generate this kind of data in other tests.

File last commit:

r50445:5f22c92d stable
r52255:e7be2ddf stable
Show More
highlight.py
100 lines | 3.1 KiB | text/x-python | PythonLexer
# highlight.py - highlight extension implementation file
#
# Copyright 2007-2009 Adam Hupp <adam@hupp.org> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
#
# The original module was split in an interface and an implementation
# file to defer pygments loading and speedup extension setup.
from mercurial import demandimport
demandimport.IGNORES.update(['pkgutil', 'pkg_resources', '__main__'])
from mercurial import (
encoding,
pycompat,
)
from mercurial.utils import stringutil
with demandimport.deactivated():
import pygments
import pygments.formatters
import pygments.lexers
import pygments.plugin
import pygments.util
for unused in pygments.plugin.find_plugin_lexers():
pass
highlight = pygments.highlight
ClassNotFound = pygments.util.ClassNotFound
guess_lexer = pygments.lexers.guess_lexer
guess_lexer_for_filename = pygments.lexers.guess_lexer_for_filename
TextLexer = pygments.lexers.TextLexer
HtmlFormatter = pygments.formatters.HtmlFormatter
SYNTAX_CSS = (
b'\n<link rel="stylesheet" href="{url}highlightcss" type="text/css" />'
)
def pygmentize(field, fctx, style, tmpl, guessfilenameonly=False):
# append a <link ...> to the syntax highlighting css
tmpl.load(b'header')
old_header = tmpl.cache[b'header']
if SYNTAX_CSS not in old_header:
new_header = old_header + SYNTAX_CSS
tmpl.cache[b'header'] = new_header
text = fctx.data()
if stringutil.binary(text):
return
# str.splitlines() != unicode.splitlines() because "reasons"
for c in b"\x0c", b"\x1c", b"\x1d", b"\x1e":
if c in text:
text = text.replace(c, b'')
# Pygments is best used with Unicode strings:
# <http://pygments.org/docs/unicode/>
text = text.decode(pycompat.sysstr(encoding.encoding), 'replace')
# To get multi-line strings right, we can't format line-by-line
try:
path = pycompat.sysstr(fctx.path())
lexer = guess_lexer_for_filename(path, text[:1024], stripnl=False)
except (ClassNotFound, ValueError):
# guess_lexer will return a lexer if *any* lexer matches. There is
# no way to specify a minimum match score. This can give a high rate of
# false positives on files with an unknown filename pattern.
if guessfilenameonly:
return
try:
lexer = guess_lexer(text[:1024], stripnl=False)
except (ClassNotFound, ValueError):
# Don't highlight unknown files
return
# Don't highlight text files
if isinstance(lexer, TextLexer):
return
formatter = HtmlFormatter(nowrap=True, style=pycompat.sysstr(style))
colorized = highlight(text, lexer, formatter)
coloriter = (
s.encode(pycompat.sysstr(encoding.encoding), 'replace')
for s in colorized.splitlines()
)
tmpl._filters[b'colorize'] = lambda x: next(coloriter)
oldl = tmpl.cache[field]
newl = oldl.replace(b'line|escape', b'line|colorize')
tmpl.cache[field] = newl