##// END OF EJS Templates
compression: introduce a `storage.revlog.zlib.level` configuration...
compression: introduce a `storage.revlog.zlib.level` configuration This option control the zlib compression level used when compression revlog chunk. This is also a good excuse to pave the way for a similar configuration option for the zstd compression engine. Having a dedicated option for each compression algorithm is useful because they don't support the same range of values. Using a higher zlib compression impact CPU consumption at compression time, but does not directly affected decompression time. However dealing with small compressed chunk can directly help decompression and indirectly help other revlog logic. I ran some basic test on repositories using different level. I am using the mercurial, pypy, netbeans and mozilla-central clone from our benchmark suite. All tested repository use sparse-revlog and got all their delta recomputed. The different compression level has a small effect on the repository size (about 10% variation in the total range). My quick analysis is that revlog mostly store small delta, that are not affected by the compression level much. So the variation probably mostly comes from better compression of the snapshots revisions, and snapshot revision only represent a small portion of the repository content. I also made some basic timings measurements. The "read" timings are gathered using simple run of `hg perfrevlogrevisions`, the "write" timings using `hg perfrevlogwrite` (restricted to the last 5000 revisions for netbeans and mozilla central). The timings are gathered on a generic machine, (not one of our performance locked machine), so small variation might not be meaningful. However large trend remains relevant. Keep in mind that these numbers are not pure compression/decompression time. They also involve the full revlog logic. In particular the difference in chunk size has an impact on the delta chain structure, affecting performance when writing or reading them. On read/write performance, the compression level has a bigger impact. Counter-intuitively, the higher compression levels improve "write" performance for the large repositories in our tested setting. Maybe because the last 5000 delta chain end up having a very different shape in this specific spot? Or maybe because of a more general trend of better delta chains thanks to the smaller chunk and snapshot. This series does not intend to change the default compression level. However, these result call for a deeper analysis of this performance difference in the future. Full data ========= repo level .hg/store size 00manifest.d read write ---------------------------------------------------------------- mercurial 1 49,402,813 5,963,475 0.170159 53.250304 mercurial 6 47,197,397 5,875,730 0.182820 56.264320 mercurial 9 47,121,596 5,849,781 0.189219 56.293612 pypy 1 370,830,572 28,462,425 2.679217 460.721984 pypy 6 340,112,317 27,648,747 2.768691 467.537158 pypy 9 338,360,736 27,639,003 2.763495 476.589918 netbeans 1 1,281,847,810 165,495,457 122.477027 520.560316 netbeans 6 1,205,284,353 159,161,207 139.876147 715.930400 netbeans 9 1,197,135,671 155,034,586 141.620281 678.297064 mozilla 1 2,775,497,186 298,527,987 147.867662 751.263721 mozilla 6 2,596,856,420 286,597,671 170.572118 987.056093 mozilla 9 2,587,542,494 287,018,264 163.622338 739.803002

File last commit:

r41925:aaad36b8 default
r42210:1fac9b93 default
Show More
p4.py
378 lines | 12.4 KiB | text/x-python | PythonLexer
# Perforce source for convert extension.
#
# Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import marshal
import re
from mercurial.i18n import _
from mercurial import (
error,
util,
)
from mercurial.utils import (
dateutil,
procutil,
stringutil,
)
from . import common
def loaditer(f):
"Yield the dictionary objects generated by p4"
try:
while True:
d = marshal.load(f)
if not d:
break
yield d
except EOFError:
pass
def decodefilename(filename):
"""Perforce escapes special characters @, #, *, or %
with %40, %23, %2A, or %25 respectively
>>> decodefilename(b'portable-net45%252Bnetcore45%252Bwp8%252BMonoAndroid')
'portable-net45%2Bnetcore45%2Bwp8%2BMonoAndroid'
>>> decodefilename(b'//Depot/Directory/%2525/%2523/%23%40.%2A')
'//Depot/Directory/%25/%23/#@.*'
"""
replacements = [('%2A', '*'), ('%23', '#'), ('%40', '@'), ('%25', '%')]
for k, v in replacements:
filename = filename.replace(k, v)
return filename
class p4_source(common.converter_source):
def __init__(self, ui, repotype, path, revs=None):
# avoid import cycle
from . import convcmd
super(p4_source, self).__init__(ui, repotype, path, revs=revs)
if "/" in path and not path.startswith('//'):
raise common.NoRepo(_('%s does not look like a P4 repository') %
path)
common.checktool('p4', abort=False)
self.revmap = {}
self.encoding = self.ui.config('convert', 'p4.encoding',
convcmd.orig_encoding)
self.re_type = re.compile(
br"([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
br"(\+\w+)?$")
self.re_keywords = re.compile(
br"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
br":[^$\n]*\$")
self.re_keywords_old = re.compile(br"\$(Id|Header):[^$\n]*\$")
if revs and len(revs) > 1:
raise error.Abort(_("p4 source does not support specifying "
"multiple revisions"))
def setrevmap(self, revmap):
"""Sets the parsed revmap dictionary.
Revmap stores mappings from a source revision to a target revision.
It is set in convertcmd.convert and provided by the user as a file
on the commandline.
Revisions in the map are considered beeing present in the
repository and ignored during _parse(). This allows for incremental
imports if a revmap is provided.
"""
self.revmap = revmap
def _parse_view(self, path):
"Read changes affecting the path"
cmd = 'p4 -G changes -s submitted %s' % procutil.shellquote(path)
stdout = procutil.popen(cmd, mode='rb')
p4changes = {}
for d in loaditer(stdout):
c = d.get("change", None)
if c:
p4changes[c] = True
return p4changes
def _parse(self, ui, path):
"Prepare list of P4 filenames and revisions to import"
p4changes = {}
changeset = {}
files_map = {}
copies_map = {}
localname = {}
depotname = {}
heads = []
ui.status(_('reading p4 views\n'))
# read client spec or view
if "/" in path:
p4changes.update(self._parse_view(path))
if path.startswith("//") and path.endswith("/..."):
views = {path[:-3]:""}
else:
views = {"//": ""}
else:
cmd = 'p4 -G client -o %s' % procutil.shellquote(path)
clientspec = marshal.load(procutil.popen(cmd, mode='rb'))
views = {}
for client in clientspec:
if client.startswith("View"):
sview, cview = clientspec[client].split()
p4changes.update(self._parse_view(sview))
if sview.endswith("...") and cview.endswith("..."):
sview = sview[:-3]
cview = cview[:-3]
cview = cview[2:]
cview = cview[cview.find("/") + 1:]
views[sview] = cview
# list of changes that affect our source files
p4changes = p4changes.keys()
p4changes.sort(key=int)
# list with depot pathnames, longest first
vieworder = views.keys()
vieworder.sort(key=len, reverse=True)
# handle revision limiting
startrev = self.ui.config('convert', 'p4.startrev')
# now read the full changelists to get the list of file revisions
ui.status(_('collecting p4 changelists\n'))
lastid = None
for change in p4changes:
if startrev and int(change) < int(startrev):
continue
if self.revs and int(change) > int(self.revs[0]):
continue
if change in self.revmap:
# Ignore already present revisions, but set the parent pointer.
lastid = change
continue
if lastid:
parents = [lastid]
else:
parents = []
d = self._fetch_revision(change)
c = self._construct_commit(d, parents)
descarr = c.desc.splitlines(True)
if len(descarr) > 0:
shortdesc = descarr[0].rstrip('\r\n')
else:
shortdesc = '**empty changelist description**'
t = '%s %s' % (c.rev, repr(shortdesc)[1:-1])
ui.status(stringutil.ellipsis(t, 80) + '\n')
files = []
copies = {}
copiedfiles = []
i = 0
while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
oldname = d["depotFile%d" % i]
filename = None
for v in vieworder:
if oldname.lower().startswith(v.lower()):
filename = decodefilename(views[v] + oldname[len(v):])
break
if filename:
files.append((filename, d["rev%d" % i]))
depotname[filename] = oldname
if (d.get("action%d" % i) == "move/add"):
copiedfiles.append(filename)
localname[oldname] = filename
i += 1
# Collect information about copied files
for filename in copiedfiles:
oldname = depotname[filename]
flcmd = ('p4 -G filelog %s'
% procutil.shellquote(oldname))
flstdout = procutil.popen(flcmd, mode='rb')
copiedfilename = None
for d in loaditer(flstdout):
copiedoldname = None
i = 0
while ("change%d" % i) in d:
if (d["change%d" % i] == change and
d["action%d" % i] == "move/add"):
j = 0
while ("file%d,%d" % (i, j)) in d:
if d["how%d,%d" % (i, j)] == "moved from":
copiedoldname = d["file%d,%d" % (i, j)]
break
j += 1
i += 1
if copiedoldname and copiedoldname in localname:
copiedfilename = localname[copiedoldname]
break
if copiedfilename:
copies[filename] = copiedfilename
else:
ui.warn(_("cannot find source for copied file: %s@%s\n")
% (filename, change))
changeset[change] = c
files_map[change] = files
copies_map[change] = copies
lastid = change
if lastid and len(changeset) > 0:
heads = [lastid]
return {
'changeset': changeset,
'files': files_map,
'copies': copies_map,
'heads': heads,
'depotname': depotname,
}
@util.propertycache
def _parse_once(self):
return self._parse(self.ui, self.path)
@util.propertycache
def copies(self):
return self._parse_once['copies']
@util.propertycache
def files(self):
return self._parse_once['files']
@util.propertycache
def changeset(self):
return self._parse_once['changeset']
@util.propertycache
def heads(self):
return self._parse_once['heads']
@util.propertycache
def depotname(self):
return self._parse_once['depotname']
def getheads(self):
return self.heads
def getfile(self, name, rev):
cmd = ('p4 -G print %s'
% procutil.shellquote("%s#%s" % (self.depotname[name], rev)))
lasterror = None
while True:
stdout = procutil.popen(cmd, mode='rb')
mode = None
contents = []
keywords = None
for d in loaditer(stdout):
code = d["code"]
data = d.get("data")
if code == "error":
# if this is the first time error happened
# re-attempt getting the file
if not lasterror:
lasterror = IOError(d["generic"], data)
# this will exit inner-most for-loop
break
else:
raise lasterror
elif code == "stat":
action = d.get("action")
if action in ["purge", "delete", "move/delete"]:
return None, None
p4type = self.re_type.match(d["type"])
if p4type:
mode = ""
flags = ((p4type.group(1) or "")
+ (p4type.group(3) or ""))
if "x" in flags:
mode = "x"
if p4type.group(2) == "symlink":
mode = "l"
if "ko" in flags:
keywords = self.re_keywords_old
elif "k" in flags:
keywords = self.re_keywords
elif code == "text" or code == "binary":
contents.append(data)
lasterror = None
if not lasterror:
break
if mode is None:
return None, None
contents = ''.join(contents)
if keywords:
contents = keywords.sub("$\\1$", contents)
if mode == "l" and contents.endswith("\n"):
contents = contents[:-1]
return contents, mode
def getchanges(self, rev, full):
if full:
raise error.Abort(_("convert from p4 does not support --full"))
return self.files[rev], self.copies[rev], set()
def _construct_commit(self, obj, parents=None):
"""
Constructs a common.commit object from an unmarshalled
`p4 describe` output
"""
desc = self.recode(obj.get("desc", ""))
date = (int(obj["time"]), 0) # timezone not set
if parents is None:
parents = []
return common.commit(author=self.recode(obj["user"]),
date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'),
parents=parents, desc=desc, branch=None, rev=obj['change'],
extra={"p4": obj['change'], "convert_revision": obj['change']})
def _fetch_revision(self, rev):
"""Return an output of `p4 describe` including author, commit date as
a dictionary."""
cmd = "p4 -G describe -s %s" % rev
stdout = procutil.popen(cmd, mode='rb')
return marshal.load(stdout)
def getcommit(self, rev):
if rev in self.changeset:
return self.changeset[rev]
elif rev in self.revmap:
d = self._fetch_revision(rev)
return self._construct_commit(d, parents=None)
raise error.Abort(
_("cannot find %s in the revmap or parsed changesets") % rev)
def gettags(self):
return {}
def getchangedfiles(self, rev, i):
return sorted([x[0] for x in self.files[rev]])