##// END OF EJS Templates
compression: introduce a `storage.revlog.zlib.level` configuration...
compression: introduce a `storage.revlog.zlib.level` configuration This option control the zlib compression level used when compression revlog chunk. This is also a good excuse to pave the way for a similar configuration option for the zstd compression engine. Having a dedicated option for each compression algorithm is useful because they don't support the same range of values. Using a higher zlib compression impact CPU consumption at compression time, but does not directly affected decompression time. However dealing with small compressed chunk can directly help decompression and indirectly help other revlog logic. I ran some basic test on repositories using different level. I am using the mercurial, pypy, netbeans and mozilla-central clone from our benchmark suite. All tested repository use sparse-revlog and got all their delta recomputed. The different compression level has a small effect on the repository size (about 10% variation in the total range). My quick analysis is that revlog mostly store small delta, that are not affected by the compression level much. So the variation probably mostly comes from better compression of the snapshots revisions, and snapshot revision only represent a small portion of the repository content. I also made some basic timings measurements. The "read" timings are gathered using simple run of `hg perfrevlogrevisions`, the "write" timings using `hg perfrevlogwrite` (restricted to the last 5000 revisions for netbeans and mozilla central). The timings are gathered on a generic machine, (not one of our performance locked machine), so small variation might not be meaningful. However large trend remains relevant. Keep in mind that these numbers are not pure compression/decompression time. They also involve the full revlog logic. In particular the difference in chunk size has an impact on the delta chain structure, affecting performance when writing or reading them. On read/write performance, the compression level has a bigger impact. Counter-intuitively, the higher compression levels improve "write" performance for the large repositories in our tested setting. Maybe because the last 5000 delta chain end up having a very different shape in this specific spot? Or maybe because of a more general trend of better delta chains thanks to the smaller chunk and snapshot. This series does not intend to change the default compression level. However, these result call for a deeper analysis of this performance difference in the future. Full data ========= repo level .hg/store size 00manifest.d read write ---------------------------------------------------------------- mercurial 1 49,402,813 5,963,475 0.170159 53.250304 mercurial 6 47,197,397 5,875,730 0.182820 56.264320 mercurial 9 47,121,596 5,849,781 0.189219 56.293612 pypy 1 370,830,572 28,462,425 2.679217 460.721984 pypy 6 340,112,317 27,648,747 2.768691 467.537158 pypy 9 338,360,736 27,639,003 2.763495 476.589918 netbeans 1 1,281,847,810 165,495,457 122.477027 520.560316 netbeans 6 1,205,284,353 159,161,207 139.876147 715.930400 netbeans 9 1,197,135,671 155,034,586 141.620281 678.297064 mozilla 1 2,775,497,186 298,527,987 147.867662 751.263721 mozilla 6 2,596,856,420 286,597,671 170.572118 987.056093 mozilla 9 2,587,542,494 287,018,264 163.622338 739.803002

File last commit:

r41443:2ff8994a default
r42210:1fac9b93 default
Show More
formatter.py
164 lines | 5.7 KiB | text/x-python | PythonLexer
# Copyright 2016-present Facebook. All Rights Reserved.
#
# format: defines the format used to output annotate result
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
from mercurial import (
encoding,
node,
pycompat,
templatefilters,
util,
)
from mercurial.utils import (
dateutil,
)
# imitating mercurial.commands.annotate, not using the vanilla formatter since
# the data structures are a bit different, and we have some fast paths.
class defaultformatter(object):
"""the default formatter that does leftpad and support some common flags"""
def __init__(self, ui, repo, opts):
self.ui = ui
self.opts = opts
if ui.quiet:
datefunc = dateutil.shortdate
else:
datefunc = dateutil.datestr
datefunc = util.cachefunc(datefunc)
getctx = util.cachefunc(lambda x: repo[x[0]])
hexfunc = self._hexfunc
# special handling working copy "changeset" and "rev" functions
if self.opts.get('rev') == 'wdir()':
orig = hexfunc
hexfunc = lambda x: None if x is None else orig(x)
wnode = hexfunc(repo['.'].node()) + '+'
wrev = '%d' % repo['.'].rev()
wrevpad = ''
if not opts.get('changeset'): # only show + if changeset is hidden
wrev += '+'
wrevpad = ' '
revenc = lambda x: wrev if x is None else ('%d' % x) + wrevpad
def csetenc(x):
if x is None:
return wnode
return pycompat.bytestr(x) + ' '
else:
revenc = csetenc = pycompat.bytestr
# opt name, separator, raw value (for json/plain), encoder (for plain)
opmap = [('user', ' ', lambda x: getctx(x).user(), ui.shortuser),
('number', ' ', lambda x: getctx(x).rev(), revenc),
('changeset', ' ', lambda x: hexfunc(x[0]), csetenc),
('date', ' ', lambda x: getctx(x).date(), datefunc),
('file', ' ', lambda x: x[2], pycompat.bytestr),
('line_number', ':', lambda x: x[1] + 1, pycompat.bytestr)]
fieldnamemap = {'number': 'rev', 'changeset': 'node'}
funcmap = [(get, sep, fieldnamemap.get(op, op), enc)
for op, sep, get, enc in opmap
if opts.get(op)]
# no separator for first column
funcmap[0] = list(funcmap[0])
funcmap[0][1] = ''
self.funcmap = funcmap
def write(self, annotatedresult, lines=None, existinglines=None):
"""(annotateresult, [str], set([rev, linenum])) -> None. write output.
annotateresult can be [(node, linenum, path)], or [(node, linenum)]
"""
pieces = [] # [[str]]
maxwidths = [] # [int]
# calculate padding
for f, sep, name, enc in self.funcmap:
l = [enc(f(x)) for x in annotatedresult]
pieces.append(l)
if name in ['node', 'date']: # node and date has fixed size
l = l[:1]
widths = pycompat.maplist(encoding.colwidth, set(l))
maxwidth = (max(widths) if widths else 0)
maxwidths.append(maxwidth)
# buffered output
result = ''
for i in pycompat.xrange(len(annotatedresult)):
for j, p in enumerate(pieces):
sep = self.funcmap[j][1]
padding = ' ' * (maxwidths[j] - len(p[i]))
result += sep + padding + p[i]
if lines:
if existinglines is None:
result += ': ' + lines[i]
else: # extra formatting showing whether a line exists
key = (annotatedresult[i][0], annotatedresult[i][1])
if key in existinglines:
result += ': ' + lines[i]
else:
result += ': ' + self.ui.label('-' + lines[i],
'diff.deleted')
if result[-1:] != '\n':
result += '\n'
self.ui.write(result)
@util.propertycache
def _hexfunc(self):
if self.ui.debugflag or self.opts.get('long_hash'):
return node.hex
else:
return node.short
def end(self):
pass
class jsonformatter(defaultformatter):
def __init__(self, ui, repo, opts):
super(jsonformatter, self).__init__(ui, repo, opts)
self.ui.write('[')
self.needcomma = False
def write(self, annotatedresult, lines=None, existinglines=None):
if annotatedresult:
self._writecomma()
pieces = [(name, pycompat.maplist(f, annotatedresult))
for f, sep, name, enc in self.funcmap]
if lines is not None:
pieces.append(('line', lines))
pieces.sort()
seps = [','] * len(pieces[:-1]) + ['']
result = ''
lasti = len(annotatedresult) - 1
for i in pycompat.xrange(len(annotatedresult)):
result += '\n {\n'
for j, p in enumerate(pieces):
k, vs = p
result += (' "%s": %s%s\n'
% (k, templatefilters.json(vs[i], paranoid=False),
seps[j]))
result += ' }%s' % ('' if i == lasti else ',')
if lasti >= 0:
self.needcomma = True
self.ui.write(result)
def _writecomma(self):
if self.needcomma:
self.ui.write(',')
self.needcomma = False
@util.propertycache
def _hexfunc(self):
return node.hex
def end(self):
self.ui.write('\n]\n')