##// END OF EJS Templates
branchmap: update cache of 'unserved' filter on new changesets...
branchmap: update cache of 'unserved' filter on new changesets The `commitctx` and `addchangegroup` methods of repo upgrade branchcache after completion. This behavior aims to keep the branchcache in sync for read only process as hgweb. See ee317dbfb9d0 for details. Since changelog filtering is used, those calls only update the cache for unfiltered repo. One of no interest for typical read only process like hgweb. Note: By chance in basic case, `repo.unfiltered() == repo.filtered('unserved')` This changesets have the "unserved" cache updated instead. I think this is the only cache that matter for hgweb. We could imagine updating all possible branchcaches instead but: - I'm not sure it would have any benefit impact. It may even increase the odd of all cache being invalidated. - This is more complicated change. So I'm going for updating a single cache only which is already better that updating a cache nobody cares about. This changeset have a few expected impact on the testsuite are different cache are updated.

File last commit:

r17425:e95ec38f default
r18394:50104481 default
Show More
parsers.py
89 lines | 2.2 KiB | text/x-python | PythonLexer
# parsers.py - Python implementation of parsers.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from mercurial.node import bin, nullid
from mercurial import util
import struct, zlib
_pack = struct.pack
_unpack = struct.unpack
_compress = zlib.compress
_decompress = zlib.decompress
_sha = util.sha1
def parse_manifest(mfdict, fdict, lines):
for l in lines.splitlines():
f, n = l.split('\0')
if len(n) > 40:
fdict[f] = n[40:]
mfdict[f] = bin(n[:40])
else:
mfdict[f] = bin(n)
def parse_index2(data, inline):
def gettype(q):
return int(q & 0xFFFF)
def offset_type(offset, type):
return long(long(offset) << 16 | type)
indexformatng = ">Qiiiiii20s12x"
s = struct.calcsize(indexformatng)
index = []
cache = None
off = 0
l = len(data) - s
append = index.append
if inline:
cache = (0, data)
while off <= l:
e = _unpack(indexformatng, data[off:off + s])
append(e)
if e[1] < 0:
break
off += e[1] + s
else:
while off <= l:
e = _unpack(indexformatng, data[off:off + s])
append(e)
off += s
if off != len(data):
raise ValueError('corrupt index file')
if index:
e = list(index[0])
type = gettype(e[0])
e[0] = offset_type(0, type)
index[0] = tuple(e)
# add the magic null revision at -1
index.append((0, 0, 0, -1, -1, -1, -1, nullid))
return index, cache
def parse_dirstate(dmap, copymap, st):
parents = [st[:20], st[20: 40]]
# dereference fields so they will be local in loop
format = ">cllll"
e_size = struct.calcsize(format)
pos1 = 40
l = len(st)
# the inner loop
while pos1 < l:
pos2 = pos1 + e_size
e = _unpack(">cllll", st[pos1:pos2]) # a literal here is faster
pos1 = pos2 + e[4]
f = st[pos2:pos1]
if '\0' in f:
f, c = f.split('\0')
copymap[f] = c
dmap[f] = e[:4]
return parents