branchmap.py
634 lines
| 23.7 KiB
| text/x-python
|
PythonLexer
/ mercurial / branchmap.py
Pierre-Yves David
|
r18116 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | ||
# | ||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Pierre-Yves David
|
r18117 | |||
Gregory Szorc
|
r25918 | from __future__ import absolute_import | ||
import struct | ||||
from .node import ( | ||||
bin, | ||||
hex, | ||||
nullid, | ||||
nullrev, | ||||
) | ||||
from . import ( | ||||
encoding, | ||||
Pierre-Yves David
|
r26587 | error, | ||
Augie Fackler
|
r35849 | pycompat, | ||
Gregory Szorc
|
r25918 | scmutil, | ||
Simon Farnsworth
|
r30975 | util, | ||
Gregory Szorc
|
r25918 | ) | ||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
stringutil, | ||||
) | ||||
Gregory Szorc
|
r25918 | |||
calcsize = struct.calcsize | ||||
Mads Kiilerich
|
r31370 | pack_into = struct.pack_into | ||
unpack_from = struct.unpack_from | ||||
Pierre-Yves David
|
r18117 | |||
Pierre-Yves David
|
r18118 | |||
Augie Fackler
|
r20032 | ### Nearest subset relation | ||
# Nearest subset of filter X is a filter Y so that: | ||||
# * Y is included in X, | ||||
# * X - Y is as small as possible. | ||||
# This create and ordering used for branchmap purpose. | ||||
# the ordering may be partial | ||||
subsettable = {None: 'visible', | ||||
Pulkit Goyal
|
r35511 | 'visible-hidden': 'visible', | ||
Augie Fackler
|
r20032 | 'visible': 'served', | ||
'served': 'immutable', | ||||
'immutable': 'base'} | ||||
Martijn Pieters
|
r41764 | |||
class BranchMapCache(object): | ||||
Pulkit Goyal
|
r41867 | """mapping of filtered views of repo with their branchcache""" | ||
Martijn Pieters
|
r41764 | def __init__(self): | ||
self._per_filter = {} | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | def __getitem__(self, repo): | ||
self.updatecache(repo) | ||||
return self._per_filter[repo.filtername] | ||||
def updatecache(self, repo): | ||||
"""Update the cache for the given filtered view on a repository""" | ||||
# This can trigger updates for the caches for subsets of the filtered | ||||
# view, e.g. when there is no cache for this filtered view or the cache | ||||
# is stale. | ||||
Pierre-Yves David
|
r18121 | |||
Martijn Pieters
|
r41764 | cl = repo.changelog | ||
filtername = repo.filtername | ||||
bcache = self._per_filter.get(filtername) | ||||
if bcache is None or not bcache.validfor(repo): | ||||
# cache object missing or cache object stale? Read from disk | ||||
bcache = branchcache.fromfile(repo) | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | revs = [] | ||
if bcache is None: | ||||
# no (fresh) cache available anymore, perhaps we can re-use | ||||
# the cache for a subset, then extend that to add info on missing | ||||
# revisions. | ||||
subsetname = subsettable.get(filtername) | ||||
if subsetname is not None: | ||||
subset = repo.filtered(subsetname) | ||||
bcache = self[subset].copy() | ||||
extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | ||||
revs.extend(r for r in extrarevs if r <= bcache.tiprev) | ||||
else: | ||||
# nothing to fall back on, start empty. | ||||
bcache = branchcache() | ||||
Durham Goode
|
r24373 | |||
Martijn Pieters
|
r41764 | revs.extend(cl.revs(start=bcache.tiprev + 1)) | ||
if revs: | ||||
bcache.update(repo, revs) | ||||
Pierre-Yves David
|
r18124 | |||
Martijn Pieters
|
r41764 | assert bcache.validfor(repo), filtername | ||
self._per_filter[repo.filtername] = bcache | ||||
def replace(self, repo, remotebranchmap): | ||||
"""Replace the branchmap cache for a repo with a branch mapping. | ||||
This is likely only called during clone with a branch map from a | ||||
remote. | ||||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | """ | ||
cl = repo.changelog | ||||
clrev = cl.rev | ||||
clbranchinfo = cl.branchinfo | ||||
rbheads = [] | ||||
closed = [] | ||||
for bheads in remotebranchmap.itervalues(): | ||||
rbheads += bheads | ||||
for h in bheads: | ||||
r = clrev(h) | ||||
b, c = clbranchinfo(r) | ||||
if c: | ||||
closed.append(h) | ||||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | if rbheads: | ||
rtiprev = max((int(clrev(node)) for node in rbheads)) | ||||
cache = branchcache( | ||||
remotebranchmap, repo[rtiprev].node(), rtiprev, | ||||
closednodes=closed) | ||||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | # Try to stick it as low as possible | ||
# filter above served are unlikely to be fetch from a clone | ||||
for candidate in ('base', 'immutable', 'served'): | ||||
rview = repo.filtered(candidate) | ||||
if cache.validfor(rview): | ||||
self._per_filter[candidate] = cache | ||||
cache.write(rview) | ||||
return | ||||
def clear(self): | ||||
self._per_filter.clear() | ||||
Gregory Szorc
|
r26460 | |||
Pulkit Goyal
|
r42168 | class branchcache(object): | ||
Brodie Rao
|
r20181 | """A dict like object that hold branches heads cache. | ||
This cache is used to avoid costly computations to determine all the | ||||
branch heads of a repo. | ||||
The cache is serialized on disk in the following format: | ||||
<tip hex node> <tip rev number> [optional filtered repo hex hash] | ||||
Brodie Rao
|
r20185 | <branch head hex node> <open/closed state> <branch name> | ||
<branch head hex node> <open/closed state> <branch name> | ||||
Brodie Rao
|
r20181 | ... | ||
The first line is used to check if the cache is still valid. If the | ||||
branch cache is for a filtered repo view, an optional third hash is | ||||
included that hashes the hashes of all filtered revisions. | ||||
Brodie Rao
|
r20185 | |||
The open/closed state is represented by a single letter 'o' or 'c'. | ||||
This field can be used to avoid changelog reads when determining if a | ||||
branch head closes a branch or not. | ||||
Brodie Rao
|
r20181 | """ | ||
Pulkit Goyal
|
r41826 | |||
def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev, | ||||
Pulkit Goyal
|
r42174 | filteredhash=None, closednodes=None, hasnode=None): | ||
""" hasnode is a function which can be used to verify whether changelog | ||||
has a given node or not. If it's not provided, we assume that every node | ||||
we have exists in changelog """ | ||||
Pulkit Goyal
|
r41826 | self.tipnode = tipnode | ||
self.tiprev = tiprev | ||||
self.filteredhash = filteredhash | ||||
# closednodes is a set of nodes that close their branch. If the branch | ||||
# cache has been updated, it may contain nodes that are no longer | ||||
# heads. | ||||
if closednodes is None: | ||||
self._closednodes = set() | ||||
else: | ||||
self._closednodes = closednodes | ||||
Pulkit Goyal
|
r42172 | self._entries = dict(entries) | ||
Pulkit Goyal
|
r42173 | # whether closed nodes are verified or not | ||
self._closedverified = False | ||||
# branches for which nodes are verified | ||||
self._verifiedbranches = set() | ||||
Pulkit Goyal
|
r42174 | self._hasnode = hasnode | ||
if self._hasnode is None: | ||||
self._hasnode = lambda x: True | ||||
Pulkit Goyal
|
r42168 | |||
def __iter__(self): | ||||
Pulkit Goyal
|
r42172 | return iter(self._entries) | ||
Pulkit Goyal
|
r42168 | |||
def __setitem__(self, key, value): | ||||
Pulkit Goyal
|
r42172 | self._entries[key] = value | ||
Pulkit Goyal
|
r42168 | |||
def __getitem__(self, key): | ||||
Pulkit Goyal
|
r42172 | return self._entries[key] | ||
Pulkit Goyal
|
r42168 | |||
def iteritems(self): | ||||
Pulkit Goyal
|
r42172 | return self._entries.iteritems() | ||
Pulkit Goyal
|
r42168 | |||
Pulkit Goyal
|
r42171 | def hasbranch(self, label): | ||
""" checks whether a branch of this name exists or not """ | ||||
Pulkit Goyal
|
r42172 | return label in self._entries | ||
Pulkit Goyal
|
r42171 | |||
Martijn Pieters
|
r41706 | @classmethod | ||
def fromfile(cls, repo): | ||||
f = None | ||||
try: | ||||
f = repo.cachevfs(cls._filename(repo)) | ||||
lineiter = iter(f) | ||||
cachekey = next(lineiter).rstrip('\n').split(" ", 2) | ||||
last, lrev = cachekey[:2] | ||||
last, lrev = bin(last), int(lrev) | ||||
filteredhash = None | ||||
Pulkit Goyal
|
r42174 | hasnode = repo.changelog.hasnode | ||
Martijn Pieters
|
r41706 | if len(cachekey) > 2: | ||
filteredhash = bin(cachekey[2]) | ||||
Pulkit Goyal
|
r42174 | bcache = cls(tipnode=last, tiprev=lrev, filteredhash=filteredhash, | ||
hasnode=hasnode) | ||||
Martijn Pieters
|
r41706 | if not bcache.validfor(repo): | ||
# invalidate the cache | ||||
raise ValueError(r'tip differs') | ||||
Pulkit Goyal
|
r41974 | bcache.load(repo, lineiter) | ||
Martijn Pieters
|
r41706 | except (IOError, OSError): | ||
return None | ||||
except Exception as inst: | ||||
if repo.ui.debugflag: | ||||
msg = 'invalid branchheads cache' | ||||
if repo.filtername is not None: | ||||
msg += ' (%s)' % repo.filtername | ||||
msg += ': %s\n' | ||||
repo.ui.debug(msg % pycompat.bytestr(inst)) | ||||
bcache = None | ||||
finally: | ||||
if f: | ||||
f.close() | ||||
return bcache | ||||
Pulkit Goyal
|
r41974 | def load(self, repo, lineiter): | ||
""" fully loads the branchcache by reading from the file using the line | ||||
iterator passed""" | ||||
Pulkit Goyal
|
r41959 | cl = repo.changelog | ||
for line in lineiter: | ||||
line = line.rstrip('\n') | ||||
if not line: | ||||
continue | ||||
node, state, label = line.split(" ", 2) | ||||
if state not in 'oc': | ||||
raise ValueError(r'invalid branch state') | ||||
label = encoding.tolocal(label.strip()) | ||||
node = bin(node) | ||||
if not cl.hasnode(node): | ||||
raise ValueError( | ||||
r'node %s does not exist' % pycompat.sysstr(hex(node))) | ||||
Pulkit Goyal
|
r42172 | self._entries.setdefault(label, []).append(node) | ||
Pulkit Goyal
|
r42173 | self._verifiedbranches.add(label) | ||
Pulkit Goyal
|
r41959 | if state == 'c': | ||
self._closednodes.add(node) | ||||
Pulkit Goyal
|
r42173 | self._closedverified = True | ||
Pulkit Goyal
|
r41959 | |||
Martijn Pieters
|
r41706 | @staticmethod | ||
def _filename(repo): | ||||
"""name of a branchcache file for a given repo or repoview""" | ||||
filename = "branch2" | ||||
if repo.filtername: | ||||
filename = '%s-%s' % (filename, repo.filtername) | ||||
return filename | ||||
Pierre-Yves David
|
r18124 | |||
Pierre-Yves David
|
r18132 | def validfor(self, repo): | ||
Mads Kiilerich
|
r18644 | """Is the cache content valid regarding a repo | ||
Pierre-Yves David
|
r18132 | |||
Mads Kiilerich
|
r18644 | - False when cached tipnode is unknown or if we detect a strip. | ||
Pierre-Yves David
|
r18132 | - True when cache is up to date or a subset of current repo.""" | ||
try: | ||||
Pierre-Yves David
|
r18168 | return ((self.tipnode == repo.changelog.node(self.tiprev)) | ||
Augie Fackler
|
r41925 | and (self.filteredhash == | ||
Gregory Szorc
|
r24723 | scmutil.filteredhash(repo, self.tiprev))) | ||
Pierre-Yves David
|
r18132 | except IndexError: | ||
return False | ||||
Brodie Rao
|
r20186 | def _branchtip(self, heads): | ||
Mads Kiilerich
|
r20245 | '''Return tuple with last open head in heads and false, | ||
otherwise return last closed head and true.''' | ||||
Brodie Rao
|
r20186 | tip = heads[-1] | ||
closed = True | ||||
for h in reversed(heads): | ||||
if h not in self._closednodes: | ||||
tip = h | ||||
closed = False | ||||
break | ||||
return tip, closed | ||||
def branchtip(self, branch): | ||||
Mads Kiilerich
|
r20245 | '''Return the tipmost open head on branch head, otherwise return the | ||
tipmost closed head on branch. | ||||
Raise KeyError for unknown branch.''' | ||||
Brodie Rao
|
r20186 | return self._branchtip(self[branch])[0] | ||
the31k
|
r34076 | def iteropen(self, nodes): | ||
return (n for n in nodes if n not in self._closednodes) | ||||
Brodie Rao
|
r20188 | def branchheads(self, branch, closed=False): | ||
heads = self[branch] | ||||
if not closed: | ||||
the31k
|
r34076 | heads = list(self.iteropen(heads)) | ||
Brodie Rao
|
r20188 | return heads | ||
Brodie Rao
|
r20190 | def iterbranches(self): | ||
for bn, heads in self.iteritems(): | ||||
yield (bn, heads) + self._branchtip(heads) | ||||
Pulkit Goyal
|
r42169 | def iterheads(self): | ||
""" returns all the heads """ | ||||
Pulkit Goyal
|
r42172 | return self._entries.itervalues() | ||
Pulkit Goyal
|
r42169 | |||
Pierre-Yves David
|
r18232 | def copy(self): | ||
"""return an deep copy of the branchcache object""" | ||||
Pulkit Goyal
|
r42168 | return branchcache( | ||
Pulkit Goyal
|
r42172 | self._entries, self.tipnode, self.tiprev, self.filteredhash, | ||
Martijn Pieters
|
r41707 | self._closednodes) | ||
Pierre-Yves David
|
r18132 | |||
Pierre-Yves David
|
r18128 | def write(self, repo): | ||
try: | ||||
Martijn Pieters
|
r41706 | f = repo.cachevfs(self._filename(repo), "w", atomictemp=True) | ||
Augie Fackler
|
r31348 | cachekey = [hex(self.tipnode), '%d' % self.tiprev] | ||
Pierre-Yves David
|
r18184 | if self.filteredhash is not None: | ||
cachekey.append(hex(self.filteredhash)) | ||||
f.write(" ".join(cachekey) + '\n') | ||||
Gregory Szorc
|
r21031 | nodecount = 0 | ||
Mads Kiilerich
|
r18357 | for label, nodes in sorted(self.iteritems()): | ||
Pulkit Goyal
|
r41827 | label = encoding.fromlocal(label) | ||
Pierre-Yves David
|
r18128 | for node in nodes: | ||
Gregory Szorc
|
r21031 | nodecount += 1 | ||
Brodie Rao
|
r20185 | if node in self._closednodes: | ||
state = 'c' | ||||
else: | ||||
state = 'o' | ||||
Pulkit Goyal
|
r41827 | f.write("%s %s %s\n" % (hex(node), state, label)) | ||
Pierre-Yves David
|
r18128 | f.close() | ||
Gregory Szorc
|
r21031 | repo.ui.log('branchcache', | ||
'wrote %s branch cache with %d labels and %d nodes\n', | ||||
Pulkit Goyal
|
r42172 | repo.filtername, len(self._entries), nodecount) | ||
Pierre-Yves David
|
r26587 | except (IOError, OSError, error.Abort) as inst: | ||
Augie Fackler
|
r34369 | # Abort may be raised by read only opener, so log and continue | ||
Pulkit Goyal
|
r36414 | repo.ui.debug("couldn't write branch cache: %s\n" % | ||
Yuya Nishihara
|
r37102 | stringutil.forcebytestr(inst)) | ||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r18305 | def update(self, repo, revgen): | ||
Pierre-Yves David
|
r18131 | """Given a branchhead cache, self, that may have extra nodes or be | ||
Pierre-Yves David
|
r20263 | missing heads, and a generator of nodes that are strictly a superset of | ||
Pierre-Yves David
|
r18131 | heads missing, this function updates self to be correct. | ||
""" | ||||
Simon Farnsworth
|
r30975 | starttime = util.timer() | ||
Pierre-Yves David
|
r18131 | cl = repo.changelog | ||
# collect new branch entries | ||||
newbranches = {} | ||||
Durham Goode
|
r24373 | getbranchinfo = repo.revbranchcache().branchinfo | ||
Pierre-Yves David
|
r18307 | for r in revgen: | ||
Yuya Nishihara
|
r40455 | branch, closesbranch = getbranchinfo(r) | ||
Pierre-Yves David
|
r20262 | newbranches.setdefault(branch, []).append(r) | ||
Brodie Rao
|
r20185 | if closesbranch: | ||
Pierre-Yves David
|
r20262 | self._closednodes.add(cl.node(r)) | ||
Pierre-Yves David
|
r22357 | |||
# fetch current topological heads to speed up filtering | ||||
topoheads = set(cl.headrevs()) | ||||
Pierre-Yves David
|
r18131 | # if older branchheads are reachable from new ones, they aren't | ||
# really branchheads. Note checking parents is insufficient: | ||||
# 1 (branch a) -> 2 (branch b) -> 3 (branch a) | ||||
Pierre-Yves David
|
r20262 | for branch, newheadrevs in newbranches.iteritems(): | ||
Pulkit Goyal
|
r42172 | bheads = self._entries.setdefault(branch, []) | ||
Pierre-Yves David
|
r20264 | bheadset = set(cl.rev(node) for node in bheads) | ||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r20263 | # This have been tested True on all internal usage of this function. | ||
# run it again in case of doubt | ||||
# assert not (set(bheadrevs) & set(newheadrevs)) | ||||
Pierre-Yves David
|
r20264 | bheadset.update(newheadrevs) | ||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r22356 | # This prunes out two kinds of heads - heads that are superseded by | ||
# a head in newheadrevs, and newheadrevs that are not heads because | ||||
# an existing head is their descendant. | ||||
Pierre-Yves David
|
r22357 | uncertain = bheadset - topoheads | ||
if uncertain: | ||||
floorrev = min(uncertain) | ||||
ancestors = set(cl.ancestors(newheadrevs, floorrev)) | ||||
bheadset -= ancestors | ||||
Pierre-Yves David
|
r20264 | bheadrevs = sorted(bheadset) | ||
Pierre-Yves David
|
r18131 | self[branch] = [cl.node(rev) for rev in bheadrevs] | ||
Pierre-Yves David
|
r20263 | tiprev = bheadrevs[-1] | ||
Pierre-Yves David
|
r18131 | if tiprev > self.tiprev: | ||
self.tipnode = cl.node(tiprev) | ||||
self.tiprev = tiprev | ||||
Pierre-Yves David
|
r19838 | if not self.validfor(repo): | ||
Pierre-Yves David
|
r18131 | # cache key are not valid anymore | ||
self.tipnode = nullid | ||||
self.tiprev = nullrev | ||||
Pulkit Goyal
|
r42169 | for heads in self.iterheads(): | ||
Pierre-Yves David
|
r18131 | tiprev = max(cl.rev(node) for node in heads) | ||
if tiprev > self.tiprev: | ||||
self.tipnode = cl.node(tiprev) | ||||
self.tiprev = tiprev | ||||
Gregory Szorc
|
r24723 | self.filteredhash = scmutil.filteredhash(repo, self.tiprev) | ||
Gregory Szorc
|
r21031 | |||
Simon Farnsworth
|
r30975 | duration = util.timer() - starttime | ||
Gregory Szorc
|
r21031 | repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n', | ||
Pulkit Goyal
|
r42005 | repo.filtername or b'None', duration) | ||
Mads Kiilerich
|
r23785 | |||
Martijn Pieters
|
r41707 | self.write(repo) | ||
class remotebranchcache(branchcache): | ||||
"""Branchmap info for a remote connection, should not write locally""" | ||||
def write(self, repo): | ||||
pass | ||||
Mads Kiilerich
|
r23785 | # Revision branch info cache | ||
_rbcversion = '-v1' | ||||
Boris Feld
|
r33535 | _rbcnames = 'rbc-names' + _rbcversion | ||
_rbcrevs = 'rbc-revs' + _rbcversion | ||||
Mads Kiilerich
|
r23785 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] | ||
_rbcrecfmt = '>4sI' | ||||
_rbcrecsize = calcsize(_rbcrecfmt) | ||||
_rbcnodelen = 4 | ||||
_rbcbranchidxmask = 0x7fffffff | ||||
_rbccloseflag = 0x80000000 | ||||
class revbranchcache(object): | ||||
"""Persistent cache, mapping from revision number to branch name and close. | ||||
This is a low level cache, independent of filtering. | ||||
Branch names are stored in rbc-names in internal encoding separated by 0. | ||||
rbc-names is append-only, and each branch name is only stored once and will | ||||
thus have a unique index. | ||||
The branch info for each revision is stored in rbc-revs as constant size | ||||
records. The whole file is read into memory, but it is only 'parsed' on | ||||
demand. The file is usually append-only but will be truncated if repo | ||||
modification is detected. | ||||
The record for each revision contains the first 4 bytes of the | ||||
corresponding node hash, and the record is only used if it still matches. | ||||
Even a completely trashed rbc-revs fill thus still give the right result | ||||
while converging towards full recovery ... assuming no incorrectly matching | ||||
node hashes. | ||||
The record also contains 4 bytes where 31 bits contains the index of the | ||||
branch and the last bit indicate that it is a branch close commit. | ||||
The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i | ||||
and will grow with it but be 1/8th of its size. | ||||
""" | ||||
Mads Kiilerich
|
r24159 | def __init__(self, repo, readonly=True): | ||
Mads Kiilerich
|
r23785 | assert repo.filtername is None | ||
Durham Goode
|
r24374 | self._repo = repo | ||
Mads Kiilerich
|
r23785 | self._names = [] # branch names in local encoding with static index | ||
Augie Fackler
|
r31346 | self._rbcrevs = bytearray() | ||
Mads Kiilerich
|
r29615 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen | ||
Mads Kiilerich
|
r23785 | try: | ||
Boris Feld
|
r33535 | bndata = repo.cachevfs.read(_rbcnames) | ||
Mads Kiilerich
|
r23785 | self._rbcsnameslen = len(bndata) # for verification before writing | ||
Mads Kiilerich
|
r31371 | if bndata: | ||
self._names = [encoding.tolocal(bn) | ||||
for bn in bndata.split('\0')] | ||||
Gregory Szorc
|
r29423 | except (IOError, OSError): | ||
Mads Kiilerich
|
r24159 | if readonly: | ||
# don't try to use cache - fall back to the slow path | ||||
self.branchinfo = self._branchinfo | ||||
Mads Kiilerich
|
r23785 | if self._names: | ||
try: | ||||
Boris Feld
|
r33535 | data = repo.cachevfs.read(_rbcrevs) | ||
Augie Fackler
|
r31346 | self._rbcrevs[:] = data | ||
Gregory Szorc
|
r25660 | except (IOError, OSError) as inst: | ||
Mads Kiilerich
|
r23785 | repo.ui.debug("couldn't read revision branch cache: %s\n" % | ||
Yuya Nishihara
|
r37102 | stringutil.forcebytestr(inst)) | ||
Mads Kiilerich
|
r23785 | # remember number of good records on disk | ||
self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize, | ||||
len(repo.changelog)) | ||||
if self._rbcrevslen == 0: | ||||
self._names = [] | ||||
Mads Kiilerich
|
r29615 | self._rbcnamescount = len(self._names) # number of names read at | ||
# _rbcsnameslen | ||||
Mads Kiilerich
|
r23785 | |||
Mads Kiilerich
|
r28558 | def _clear(self): | ||
self._rbcsnameslen = 0 | ||||
del self._names[:] | ||||
self._rbcnamescount = 0 | ||||
self._rbcrevslen = len(self._repo.changelog) | ||||
Augie Fackler
|
r31346 | self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize) | ||
Pulkit Goyal
|
r40746 | util.clearcachedproperty(self, '_namesreverse') | ||
@util.propertycache | ||||
def _namesreverse(self): | ||||
return dict((b, r) for r, b in enumerate(self._names)) | ||||
Mads Kiilerich
|
r28558 | |||
Yuya Nishihara
|
r40455 | def branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Return branch name and close flag for rev, using and updating | ||
persistent cache.""" | ||||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Yuya Nishihara
|
r25266 | # avoid negative index, changelog.read(nullrev) is fast without cache | ||
if rev == nullrev: | ||||
return changelog.branchinfo(rev) | ||||
Mads Kiilerich
|
r29604 | # if requested rev isn't allocated, grow and cache the rev info | ||
Mads Kiilerich
|
r23785 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
# fast path: extract data from cache, use it if node is matching | ||||
reponode = changelog.node(rev)[:_rbcnodelen] | ||||
Mike Hommey
|
r33737 | cachenode, branchidx = unpack_from( | ||
_rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx) | ||||
Mads Kiilerich
|
r23785 | close = bool(branchidx & _rbccloseflag) | ||
if close: | ||||
branchidx &= _rbcbranchidxmask | ||||
Durham Goode
|
r24376 | if cachenode == '\0\0\0\0': | ||
pass | ||||
elif cachenode == reponode: | ||||
Mads Kiilerich
|
r29615 | try: | ||
Mads Kiilerich
|
r28558 | return self._names[branchidx], close | ||
Mads Kiilerich
|
r29615 | except IndexError: | ||
# recover from invalid reference to unknown branch | ||||
self._repo.ui.debug("referenced branch names not found" | ||||
" - rebuilding revision branch cache from scratch\n") | ||||
self._clear() | ||||
Durham Goode
|
r24376 | else: | ||
# rev/node map has changed, invalidate the cache from here up | ||||
Mads Kiilerich
|
r29615 | self._repo.ui.debug("history modification detected - truncating " | ||
Augie Fackler
|
r31497 | "revision branch cache to revision %d\n" % rev) | ||
Durham Goode
|
r24376 | truncate = rbcrevidx + _rbcrecsize | ||
del self._rbcrevs[truncate:] | ||||
self._rbcrevslen = min(self._rbcrevslen, truncate) | ||||
Mads Kiilerich
|
r23785 | # fall back to slow path and make sure it will be written to disk | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
Yuya Nishihara
|
r40455 | def _branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Retrieve branch info from changelog and update _rbcrevs""" | ||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | b, close = changelog.branchinfo(rev) | ||
if b in self._namesreverse: | ||||
branchidx = self._namesreverse[b] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(b) | ||||
self._namesreverse[b] = branchidx | ||||
reponode = changelog.node(rev) | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
Yuya Nishihara
|
r40455 | self._setcachedata(rev, reponode, branchidx) | ||
Durham Goode
|
r24375 | return b, close | ||
Boris Feld
|
r36980 | def setdata(self, branch, rev, node, close): | ||
"""add new data information to the cache""" | ||||
if branch in self._namesreverse: | ||||
branchidx = self._namesreverse[branch] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(branch) | ||||
self._namesreverse[branch] = branchidx | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
self._setcachedata(rev, node, branchidx) | ||||
# If no cache data were readable (non exists, bad permission, etc) | ||||
# the cache was bypassing itself by setting: | ||||
# | ||||
# self.branchinfo = self._branchinfo | ||||
# | ||||
# Since we now have data in the cache, we need to drop this bypassing. | ||||
Yuya Nishihara
|
r40268 | if r'branchinfo' in vars(self): | ||
Boris Feld
|
r36980 | del self.branchinfo | ||
Yuya Nishihara
|
r40455 | def _setcachedata(self, rev, node, branchidx): | ||
Durham Goode
|
r24375 | """Writes the node's branch data to the in-memory cache data.""" | ||
Durham Goode
|
r31454 | if rev == nullrev: | ||
return | ||||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Mads Kiilerich
|
r29604 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | ||
self._rbcrevs.extend('\0' * | ||||
Yuya Nishihara
|
r40455 | (len(self._repo.changelog) * _rbcrecsize - | ||
Mads Kiilerich
|
r29604 | len(self._rbcrevs))) | ||
Mads Kiilerich
|
r31370 | pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx) | ||
Durham Goode
|
r24376 | self._rbcrevslen = min(self._rbcrevslen, rev) | ||
Mads Kiilerich
|
r23785 | |||
Durham Goode
|
r24377 | tr = self._repo.currenttransaction() | ||
if tr: | ||||
tr.addfinalize('write-revbranchcache', self.write) | ||||
def write(self, tr=None): | ||||
Mads Kiilerich
|
r23785 | """Save branch cache if it is dirty.""" | ||
Durham Goode
|
r24374 | repo = self._repo | ||
Pierre-Yves David
|
r29744 | wlock = None | ||
Pierre-Yves David
|
r29745 | step = '' | ||
Pierre-Yves David
|
r29744 | try: | ||
Pierre-Yves David
|
r29743 | if self._rbcnamescount < len(self._names): | ||
Pierre-Yves David
|
r29745 | step = ' names' | ||
Pierre-Yves David
|
r29744 | wlock = repo.wlock(wait=False) | ||
Pierre-Yves David
|
r29746 | if self._rbcnamescount != 0: | ||
Boris Feld
|
r33535 | f = repo.cachevfs.open(_rbcnames, 'ab') | ||
Pierre-Yves David
|
r29746 | if f.tell() == self._rbcsnameslen: | ||
f.write('\0') | ||||
else: | ||||
f.close() | ||||
repo.ui.debug("%s changed - rewriting it\n" % _rbcnames) | ||||
self._rbcnamescount = 0 | ||||
self._rbcrevslen = 0 | ||||
if self._rbcnamescount == 0: | ||||
# before rewriting names, make sure references are removed | ||||
Boris Feld
|
r33535 | repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | ||
f = repo.cachevfs.open(_rbcnames, 'wb') | ||||
Pierre-Yves David
|
r29746 | f.write('\0'.join(encoding.fromlocal(b) | ||
for b in self._names[self._rbcnamescount:])) | ||||
self._rbcsnameslen = f.tell() | ||||
f.close() | ||||
Pierre-Yves David
|
r29743 | self._rbcnamescount = len(self._names) | ||
Mads Kiilerich
|
r23785 | |||
Pierre-Yves David
|
r29743 | start = self._rbcrevslen * _rbcrecsize | ||
if start != len(self._rbcrevs): | ||||
Pierre-Yves David
|
r29745 | step = '' | ||
Pierre-Yves David
|
r29744 | if wlock is None: | ||
wlock = repo.wlock(wait=False) | ||||
Pierre-Yves David
|
r29743 | revs = min(len(repo.changelog), | ||
len(self._rbcrevs) // _rbcrecsize) | ||||
Boris Feld
|
r33535 | f = repo.cachevfs.open(_rbcrevs, 'ab') | ||
Pierre-Yves David
|
r29746 | if f.tell() != start: | ||
Boris Feld
|
r33535 | repo.ui.debug("truncating cache/%s to %d\n" | ||
% (_rbcrevs, start)) | ||||
Pierre-Yves David
|
r29746 | f.seek(start) | ||
Mads Kiilerich
|
r28557 | if f.tell() != start: | ||
Pierre-Yves David
|
r29746 | start = 0 | ||
Mads Kiilerich
|
r28557 | f.seek(start) | ||
Pierre-Yves David
|
r29746 | f.truncate() | ||
end = revs * _rbcrecsize | ||||
f.write(self._rbcrevs[start:end]) | ||||
f.close() | ||||
Pierre-Yves David
|
r29743 | self._rbcrevslen = revs | ||
Pierre-Yves David
|
r29745 | except (IOError, OSError, error.Abort, error.LockError) as inst: | ||
repo.ui.debug("couldn't write revision branch cache%s: %s\n" | ||||
Yuya Nishihara
|
r37102 | % (step, stringutil.forcebytestr(inst))) | ||
Pierre-Yves David
|
r29744 | finally: | ||
if wlock is not None: | ||||
wlock.release() | ||||