branchmap.py
756 lines
| 25.9 KiB
| text/x-python
|
PythonLexer
/ mercurial / branchmap.py
Pierre-Yves David
|
r18116 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | ||
# | ||||
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Pierre-Yves David
|
r18117 | |||
Gregory Szorc
|
r25918 | from __future__ import absolute_import | ||
import struct | ||||
from .node import ( | ||||
bin, | ||||
hex, | ||||
nullid, | ||||
nullrev, | ||||
) | ||||
from . import ( | ||||
encoding, | ||||
Pierre-Yves David
|
r26587 | error, | ||
Augie Fackler
|
r35849 | pycompat, | ||
Gregory Szorc
|
r25918 | scmutil, | ||
Simon Farnsworth
|
r30975 | util, | ||
Gregory Szorc
|
r25918 | ) | ||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
r42314 | repoviewutil, | |||
Yuya Nishihara
|
r37102 | stringutil, | ||
) | ||||
Gregory Szorc
|
r25918 | |||
Yuya Nishihara
|
r44212 | if pycompat.TYPE_CHECKING: | ||
Augie Fackler
|
r44035 | from typing import ( | ||
Any, | ||||
Callable, | ||||
Dict, | ||||
Iterable, | ||||
List, | ||||
Optional, | ||||
Set, | ||||
Tuple, | ||||
Union, | ||||
) | ||||
assert any( | ||||
(Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union,) | ||||
) | ||||
Augie Fackler
|
r43346 | subsettable = repoviewutil.subsettable | ||
r42314 | ||||
Gregory Szorc
|
r25918 | calcsize = struct.calcsize | ||
Mads Kiilerich
|
r31370 | pack_into = struct.pack_into | ||
unpack_from = struct.unpack_from | ||||
Pierre-Yves David
|
r18117 | |||
Pierre-Yves David
|
r18118 | |||
Martijn Pieters
|
r41764 | class BranchMapCache(object): | ||
Pulkit Goyal
|
r41867 | """mapping of filtered views of repo with their branchcache""" | ||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r41764 | def __init__(self): | ||
self._per_filter = {} | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | def __getitem__(self, repo): | ||
self.updatecache(repo) | ||||
return self._per_filter[repo.filtername] | ||||
def updatecache(self, repo): | ||||
"""Update the cache for the given filtered view on a repository""" | ||||
# This can trigger updates for the caches for subsets of the filtered | ||||
# view, e.g. when there is no cache for this filtered view or the cache | ||||
# is stale. | ||||
Pierre-Yves David
|
r18121 | |||
Martijn Pieters
|
r41764 | cl = repo.changelog | ||
filtername = repo.filtername | ||||
bcache = self._per_filter.get(filtername) | ||||
if bcache is None or not bcache.validfor(repo): | ||||
# cache object missing or cache object stale? Read from disk | ||||
bcache = branchcache.fromfile(repo) | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | revs = [] | ||
if bcache is None: | ||||
# no (fresh) cache available anymore, perhaps we can re-use | ||||
# the cache for a subset, then extend that to add info on missing | ||||
# revisions. | ||||
subsetname = subsettable.get(filtername) | ||||
if subsetname is not None: | ||||
subset = repo.filtered(subsetname) | ||||
bcache = self[subset].copy() | ||||
extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | ||||
revs.extend(r for r in extrarevs if r <= bcache.tiprev) | ||||
else: | ||||
# nothing to fall back on, start empty. | ||||
bcache = branchcache() | ||||
Durham Goode
|
r24373 | |||
Martijn Pieters
|
r41764 | revs.extend(cl.revs(start=bcache.tiprev + 1)) | ||
if revs: | ||||
bcache.update(repo, revs) | ||||
Pierre-Yves David
|
r18124 | |||
Martijn Pieters
|
r41764 | assert bcache.validfor(repo), filtername | ||
self._per_filter[repo.filtername] = bcache | ||||
def replace(self, repo, remotebranchmap): | ||||
"""Replace the branchmap cache for a repo with a branch mapping. | ||||
This is likely only called during clone with a branch map from a | ||||
remote. | ||||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | """ | ||
cl = repo.changelog | ||||
clrev = cl.rev | ||||
clbranchinfo = cl.branchinfo | ||||
rbheads = [] | ||||
Martin von Zweigbergk
|
r44086 | closed = set() | ||
Gregory Szorc
|
r43374 | for bheads in pycompat.itervalues(remotebranchmap): | ||
Martijn Pieters
|
r41764 | rbheads += bheads | ||
for h in bheads: | ||||
r = clrev(h) | ||||
b, c = clbranchinfo(r) | ||||
if c: | ||||
Martin von Zweigbergk
|
r44086 | closed.add(h) | ||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | if rbheads: | ||
rtiprev = max((int(clrev(node)) for node in rbheads)) | ||||
cache = branchcache( | ||||
Augie Fackler
|
r43346 | remotebranchmap, | ||
repo[rtiprev].node(), | ||||
rtiprev, | ||||
Martin von Zweigbergk
|
r44086 | closednodes=closed, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | # Try to stick it as low as possible | ||
# filter above served are unlikely to be fetch from a clone | ||||
Augie Fackler
|
r43347 | for candidate in (b'base', b'immutable', b'served'): | ||
Martijn Pieters
|
r41764 | rview = repo.filtered(candidate) | ||
if cache.validfor(rview): | ||||
self._per_filter[candidate] = cache | ||||
cache.write(rview) | ||||
return | ||||
def clear(self): | ||||
self._per_filter.clear() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r42289 | def _unknownnode(node): | ||
""" raises ValueError when branchcache found a node which does not exists | ||||
""" | ||||
Augie Fackler
|
r43906 | raise ValueError('node %s does not exist' % pycompat.sysstr(hex(node))) | ||
Gregory Szorc
|
r26460 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42805 | def _branchcachedesc(repo): | ||
if repo.filtername is not None: | ||||
Augie Fackler
|
r43347 | return b'branch cache (%s)' % repo.filtername | ||
Martin von Zweigbergk
|
r42805 | else: | ||
Augie Fackler
|
r43347 | return b'branch cache' | ||
Martin von Zweigbergk
|
r42805 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r42168 | class branchcache(object): | ||
Brodie Rao
|
r20181 | """A dict like object that hold branches heads cache. | ||
This cache is used to avoid costly computations to determine all the | ||||
branch heads of a repo. | ||||
The cache is serialized on disk in the following format: | ||||
<tip hex node> <tip rev number> [optional filtered repo hex hash] | ||||
Brodie Rao
|
r20185 | <branch head hex node> <open/closed state> <branch name> | ||
<branch head hex node> <open/closed state> <branch name> | ||||
Brodie Rao
|
r20181 | ... | ||
The first line is used to check if the cache is still valid. If the | ||||
branch cache is for a filtered repo view, an optional third hash is | ||||
included that hashes the hashes of all filtered revisions. | ||||
Brodie Rao
|
r20185 | |||
The open/closed state is represented by a single letter 'o' or 'c'. | ||||
This field can be used to avoid changelog reads when determining if a | ||||
branch head closes a branch or not. | ||||
Brodie Rao
|
r20181 | """ | ||
Pulkit Goyal
|
r41826 | |||
Augie Fackler
|
r43346 | def __init__( | ||
self, | ||||
entries=(), | ||||
tipnode=nullid, | ||||
tiprev=nullrev, | ||||
filteredhash=None, | ||||
closednodes=None, | ||||
hasnode=None, | ||||
): | ||||
Augie Fackler
|
r44035 | # type: (Union[Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]]], bytes, int, Optional[bytes], Optional[Set[bytes]], Optional[Callable[[bytes], bool]]) -> None | ||
Pulkit Goyal
|
r42174 | """ hasnode is a function which can be used to verify whether changelog | ||
has a given node or not. If it's not provided, we assume that every node | ||||
we have exists in changelog """ | ||||
Pulkit Goyal
|
r41826 | self.tipnode = tipnode | ||
self.tiprev = tiprev | ||||
self.filteredhash = filteredhash | ||||
# closednodes is a set of nodes that close their branch. If the branch | ||||
# cache has been updated, it may contain nodes that are no longer | ||||
# heads. | ||||
if closednodes is None: | ||||
self._closednodes = set() | ||||
else: | ||||
Augie Fackler
|
r44035 | self._closednodes = closednodes | ||
Pulkit Goyal
|
r42172 | self._entries = dict(entries) | ||
Pulkit Goyal
|
r42173 | # whether closed nodes are verified or not | ||
self._closedverified = False | ||||
# branches for which nodes are verified | ||||
self._verifiedbranches = set() | ||||
Pulkit Goyal
|
r42174 | self._hasnode = hasnode | ||
if self._hasnode is None: | ||||
self._hasnode = lambda x: True | ||||
Pulkit Goyal
|
r42168 | |||
Pulkit Goyal
|
r42289 | def _verifyclosed(self): | ||
""" verify the closed nodes we have """ | ||||
if self._closedverified: | ||||
return | ||||
for node in self._closednodes: | ||||
if not self._hasnode(node): | ||||
_unknownnode(node) | ||||
self._closedverified = True | ||||
def _verifybranch(self, branch): | ||||
Pulkit Goyal
|
r42301 | """ verify head nodes for the given branch. """ | ||
Pulkit Goyal
|
r42289 | if branch not in self._entries or branch in self._verifiedbranches: | ||
return | ||||
for n in self._entries[branch]: | ||||
if not self._hasnode(n): | ||||
_unknownnode(n) | ||||
self._verifiedbranches.add(branch) | ||||
def _verifyall(self): | ||||
""" verifies nodes of all the branches """ | ||||
Pulkit Goyal
|
r42302 | needverification = set(self._entries.keys()) - self._verifiedbranches | ||
for b in needverification: | ||||
Pulkit Goyal
|
r42289 | self._verifybranch(b) | ||
Pulkit Goyal
|
r42168 | def __iter__(self): | ||
Pulkit Goyal
|
r42172 | return iter(self._entries) | ||
Pulkit Goyal
|
r42168 | |||
def __setitem__(self, key, value): | ||||
Pulkit Goyal
|
r42172 | self._entries[key] = value | ||
Pulkit Goyal
|
r42168 | |||
def __getitem__(self, key): | ||||
Pulkit Goyal
|
r42290 | self._verifybranch(key) | ||
Pulkit Goyal
|
r42172 | return self._entries[key] | ||
Pulkit Goyal
|
r42168 | |||
Pulkit Goyal
|
r42282 | def __contains__(self, key): | ||
Pulkit Goyal
|
r42290 | self._verifybranch(key) | ||
Pulkit Goyal
|
r42282 | return key in self._entries | ||
Pulkit Goyal
|
r42168 | def iteritems(self): | ||
Gregory Szorc
|
r43376 | for k, v in pycompat.iteritems(self._entries): | ||
Pulkit Goyal
|
r42303 | self._verifybranch(k) | ||
yield k, v | ||||
Pulkit Goyal
|
r42168 | |||
Martin von Zweigbergk
|
r42809 | items = iteritems | ||
Pulkit Goyal
|
r42171 | def hasbranch(self, label): | ||
""" checks whether a branch of this name exists or not """ | ||||
Pulkit Goyal
|
r42290 | self._verifybranch(label) | ||
Pulkit Goyal
|
r42172 | return label in self._entries | ||
Pulkit Goyal
|
r42171 | |||
Martijn Pieters
|
r41706 | @classmethod | ||
def fromfile(cls, repo): | ||||
f = None | ||||
try: | ||||
f = repo.cachevfs(cls._filename(repo)) | ||||
lineiter = iter(f) | ||||
Augie Fackler
|
r43347 | cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | ||
Martijn Pieters
|
r41706 | last, lrev = cachekey[:2] | ||
last, lrev = bin(last), int(lrev) | ||||
filteredhash = None | ||||
Pulkit Goyal
|
r42174 | hasnode = repo.changelog.hasnode | ||
Martijn Pieters
|
r41706 | if len(cachekey) > 2: | ||
filteredhash = bin(cachekey[2]) | ||||
Augie Fackler
|
r43346 | bcache = cls( | ||
tipnode=last, | ||||
tiprev=lrev, | ||||
filteredhash=filteredhash, | ||||
hasnode=hasnode, | ||||
) | ||||
Martijn Pieters
|
r41706 | if not bcache.validfor(repo): | ||
# invalidate the cache | ||||
Augie Fackler
|
r43906 | raise ValueError('tip differs') | ||
Pulkit Goyal
|
r41974 | bcache.load(repo, lineiter) | ||
Martijn Pieters
|
r41706 | except (IOError, OSError): | ||
return None | ||||
except Exception as inst: | ||||
if repo.ui.debugflag: | ||||
Augie Fackler
|
r43347 | msg = b'invalid %s: %s\n' | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43805 | msg | ||
% ( | ||||
_branchcachedesc(repo), | ||||
pycompat.bytestr( | ||||
inst # pytype: disable=wrong-arg-types | ||||
), | ||||
) | ||||
Augie Fackler
|
r43346 | ) | ||
Martijn Pieters
|
r41706 | bcache = None | ||
finally: | ||||
if f: | ||||
f.close() | ||||
return bcache | ||||
Pulkit Goyal
|
r41974 | def load(self, repo, lineiter): | ||
""" fully loads the branchcache by reading from the file using the line | ||||
iterator passed""" | ||||
Pulkit Goyal
|
r41959 | for line in lineiter: | ||
Augie Fackler
|
r43347 | line = line.rstrip(b'\n') | ||
Pulkit Goyal
|
r41959 | if not line: | ||
continue | ||||
Augie Fackler
|
r43347 | node, state, label = line.split(b" ", 2) | ||
if state not in b'oc': | ||||
Augie Fackler
|
r43906 | raise ValueError('invalid branch state') | ||
Pulkit Goyal
|
r41959 | label = encoding.tolocal(label.strip()) | ||
node = bin(node) | ||||
Pulkit Goyal
|
r42172 | self._entries.setdefault(label, []).append(node) | ||
Augie Fackler
|
r43347 | if state == b'c': | ||
Pulkit Goyal
|
r41959 | self._closednodes.add(node) | ||
Martijn Pieters
|
r41706 | @staticmethod | ||
def _filename(repo): | ||||
"""name of a branchcache file for a given repo or repoview""" | ||||
Augie Fackler
|
r43347 | filename = b"branch2" | ||
Martijn Pieters
|
r41706 | if repo.filtername: | ||
Augie Fackler
|
r43347 | filename = b'%s-%s' % (filename, repo.filtername) | ||
Martijn Pieters
|
r41706 | return filename | ||
Pierre-Yves David
|
r18124 | |||
Pierre-Yves David
|
r18132 | def validfor(self, repo): | ||
Mads Kiilerich
|
r18644 | """Is the cache content valid regarding a repo | ||
Pierre-Yves David
|
r18132 | |||
Mads Kiilerich
|
r18644 | - False when cached tipnode is unknown or if we detect a strip. | ||
Pierre-Yves David
|
r18132 | - True when cache is up to date or a subset of current repo.""" | ||
try: | ||||
Augie Fackler
|
r43346 | return (self.tipnode == repo.changelog.node(self.tiprev)) and ( | ||
self.filteredhash == scmutil.filteredhash(repo, self.tiprev) | ||||
) | ||||
Pierre-Yves David
|
r18132 | except IndexError: | ||
return False | ||||
Brodie Rao
|
r20186 | def _branchtip(self, heads): | ||
Mads Kiilerich
|
r20245 | '''Return tuple with last open head in heads and false, | ||
otherwise return last closed head and true.''' | ||||
Brodie Rao
|
r20186 | tip = heads[-1] | ||
closed = True | ||||
for h in reversed(heads): | ||||
if h not in self._closednodes: | ||||
tip = h | ||||
closed = False | ||||
break | ||||
return tip, closed | ||||
def branchtip(self, branch): | ||||
Mads Kiilerich
|
r20245 | '''Return the tipmost open head on branch head, otherwise return the | ||
tipmost closed head on branch. | ||||
Raise KeyError for unknown branch.''' | ||||
Brodie Rao
|
r20186 | return self._branchtip(self[branch])[0] | ||
the31k
|
r34076 | def iteropen(self, nodes): | ||
return (n for n in nodes if n not in self._closednodes) | ||||
Brodie Rao
|
r20188 | def branchheads(self, branch, closed=False): | ||
Pulkit Goyal
|
r42290 | self._verifybranch(branch) | ||
Pulkit Goyal
|
r42281 | heads = self._entries[branch] | ||
Brodie Rao
|
r20188 | if not closed: | ||
the31k
|
r34076 | heads = list(self.iteropen(heads)) | ||
Brodie Rao
|
r20188 | return heads | ||
Brodie Rao
|
r20190 | def iterbranches(self): | ||
Gregory Szorc
|
r43376 | for bn, heads in pycompat.iteritems(self): | ||
Brodie Rao
|
r20190 | yield (bn, heads) + self._branchtip(heads) | ||
Pulkit Goyal
|
r42169 | def iterheads(self): | ||
""" returns all the heads """ | ||||
Pulkit Goyal
|
r42290 | self._verifyall() | ||
Gregory Szorc
|
r43374 | return pycompat.itervalues(self._entries) | ||
Pulkit Goyal
|
r42169 | |||
Pierre-Yves David
|
r18232 | def copy(self): | ||
"""return an deep copy of the branchcache object""" | ||||
Pulkit Goyal
|
r42280 | return type(self)( | ||
Augie Fackler
|
r43346 | self._entries, | ||
self.tipnode, | ||||
self.tiprev, | ||||
self.filteredhash, | ||||
self._closednodes, | ||||
) | ||||
Pierre-Yves David
|
r18132 | |||
Pierre-Yves David
|
r18128 | def write(self, repo): | ||
try: | ||||
Augie Fackler
|
r43347 | f = repo.cachevfs(self._filename(repo), b"w", atomictemp=True) | ||
cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | ||||
Pierre-Yves David
|
r18184 | if self.filteredhash is not None: | ||
cachekey.append(hex(self.filteredhash)) | ||||
Augie Fackler
|
r43347 | f.write(b" ".join(cachekey) + b'\n') | ||
Gregory Szorc
|
r21031 | nodecount = 0 | ||
Gregory Szorc
|
r43376 | for label, nodes in sorted(pycompat.iteritems(self._entries)): | ||
Pulkit Goyal
|
r41827 | label = encoding.fromlocal(label) | ||
Pierre-Yves David
|
r18128 | for node in nodes: | ||
Gregory Szorc
|
r21031 | nodecount += 1 | ||
Brodie Rao
|
r20185 | if node in self._closednodes: | ||
Augie Fackler
|
r43347 | state = b'c' | ||
Brodie Rao
|
r20185 | else: | ||
Augie Fackler
|
r43347 | state = b'o' | ||
f.write(b"%s %s %s\n" % (hex(node), state, label)) | ||||
Pierre-Yves David
|
r18128 | f.close() | ||
Augie Fackler
|
r43346 | repo.ui.log( | ||
Augie Fackler
|
r43347 | b'branchcache', | ||
b'wrote %s with %d labels and %d nodes\n', | ||||
Augie Fackler
|
r43346 | _branchcachedesc(repo), | ||
len(self._entries), | ||||
nodecount, | ||||
) | ||||
Pierre-Yves David
|
r26587 | except (IOError, OSError, error.Abort) as inst: | ||
Augie Fackler
|
r34369 | # Abort may be raised by read only opener, so log and continue | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't write branch cache: %s\n" | ||
Augie Fackler
|
r43346 | % stringutil.forcebytestr(inst) | ||
) | ||||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r18305 | def update(self, repo, revgen): | ||
Pierre-Yves David
|
r18131 | """Given a branchhead cache, self, that may have extra nodes or be | ||
Pierre-Yves David
|
r20263 | missing heads, and a generator of nodes that are strictly a superset of | ||
Pierre-Yves David
|
r18131 | heads missing, this function updates self to be correct. | ||
""" | ||||
Simon Farnsworth
|
r30975 | starttime = util.timer() | ||
Pierre-Yves David
|
r18131 | cl = repo.changelog | ||
# collect new branch entries | ||||
newbranches = {} | ||||
Durham Goode
|
r24373 | getbranchinfo = repo.revbranchcache().branchinfo | ||
Pierre-Yves David
|
r18307 | for r in revgen: | ||
Yuya Nishihara
|
r40455 | branch, closesbranch = getbranchinfo(r) | ||
Pierre-Yves David
|
r20262 | newbranches.setdefault(branch, []).append(r) | ||
Brodie Rao
|
r20185 | if closesbranch: | ||
Pierre-Yves David
|
r20262 | self._closednodes.add(cl.node(r)) | ||
Pierre-Yves David
|
r22357 | |||
# fetch current topological heads to speed up filtering | ||||
topoheads = set(cl.headrevs()) | ||||
Pulkit Goyal
|
r42400 | # new tip revision which we found after iterating items from new | ||
# branches | ||||
ntiprev = self.tiprev | ||||
Pierre-Yves David
|
r18131 | # if older branchheads are reachable from new ones, they aren't | ||
# really branchheads. Note checking parents is insufficient: | ||||
# 1 (branch a) -> 2 (branch b) -> 3 (branch a) | ||||
Gregory Szorc
|
r43376 | for branch, newheadrevs in pycompat.iteritems(newbranches): | ||
Pulkit Goyal
|
r42172 | bheads = self._entries.setdefault(branch, []) | ||
Pierre-Yves David
|
r20264 | bheadset = set(cl.rev(node) for node in bheads) | ||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r20263 | # This have been tested True on all internal usage of this function. | ||
# run it again in case of doubt | ||||
# assert not (set(bheadrevs) & set(newheadrevs)) | ||||
Pierre-Yves David
|
r20264 | bheadset.update(newheadrevs) | ||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r22356 | # This prunes out two kinds of heads - heads that are superseded by | ||
# a head in newheadrevs, and newheadrevs that are not heads because | ||||
# an existing head is their descendant. | ||||
Pierre-Yves David
|
r22357 | uncertain = bheadset - topoheads | ||
if uncertain: | ||||
floorrev = min(uncertain) | ||||
ancestors = set(cl.ancestors(newheadrevs, floorrev)) | ||||
bheadset -= ancestors | ||||
Pierre-Yves David
|
r20264 | bheadrevs = sorted(bheadset) | ||
Pierre-Yves David
|
r18131 | self[branch] = [cl.node(rev) for rev in bheadrevs] | ||
Pierre-Yves David
|
r20263 | tiprev = bheadrevs[-1] | ||
Pulkit Goyal
|
r42400 | if tiprev > ntiprev: | ||
ntiprev = tiprev | ||||
if ntiprev > self.tiprev: | ||||
self.tiprev = ntiprev | ||||
self.tipnode = cl.node(ntiprev) | ||||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r19838 | if not self.validfor(repo): | ||
Pierre-Yves David
|
r18131 | # cache key are not valid anymore | ||
self.tipnode = nullid | ||||
self.tiprev = nullrev | ||||
Pulkit Goyal
|
r42169 | for heads in self.iterheads(): | ||
Pierre-Yves David
|
r18131 | tiprev = max(cl.rev(node) for node in heads) | ||
if tiprev > self.tiprev: | ||||
self.tipnode = cl.node(tiprev) | ||||
self.tiprev = tiprev | ||||
Gregory Szorc
|
r24723 | self.filteredhash = scmutil.filteredhash(repo, self.tiprev) | ||
Gregory Szorc
|
r21031 | |||
Simon Farnsworth
|
r30975 | duration = util.timer() - starttime | ||
Augie Fackler
|
r43346 | repo.ui.log( | ||
Augie Fackler
|
r43347 | b'branchcache', | ||
b'updated %s in %.4f seconds\n', | ||||
Augie Fackler
|
r43346 | _branchcachedesc(repo), | ||
duration, | ||||
) | ||||
Mads Kiilerich
|
r23785 | |||
Martijn Pieters
|
r41707 | self.write(repo) | ||
class remotebranchcache(branchcache): | ||||
"""Branchmap info for a remote connection, should not write locally""" | ||||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r41707 | def write(self, repo): | ||
pass | ||||
Mads Kiilerich
|
r23785 | # Revision branch info cache | ||
Augie Fackler
|
r43347 | _rbcversion = b'-v1' | ||
_rbcnames = b'rbc-names' + _rbcversion | ||||
_rbcrevs = b'rbc-revs' + _rbcversion | ||||
Mads Kiilerich
|
r23785 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] | ||
Augie Fackler
|
r43347 | _rbcrecfmt = b'>4sI' | ||
Mads Kiilerich
|
r23785 | _rbcrecsize = calcsize(_rbcrecfmt) | ||
_rbcnodelen = 4 | ||||
Augie Fackler
|
r43346 | _rbcbranchidxmask = 0x7FFFFFFF | ||
Mads Kiilerich
|
r23785 | _rbccloseflag = 0x80000000 | ||
Augie Fackler
|
r43346 | |||
Mads Kiilerich
|
r23785 | class revbranchcache(object): | ||
"""Persistent cache, mapping from revision number to branch name and close. | ||||
This is a low level cache, independent of filtering. | ||||
Branch names are stored in rbc-names in internal encoding separated by 0. | ||||
rbc-names is append-only, and each branch name is only stored once and will | ||||
thus have a unique index. | ||||
The branch info for each revision is stored in rbc-revs as constant size | ||||
records. The whole file is read into memory, but it is only 'parsed' on | ||||
demand. The file is usually append-only but will be truncated if repo | ||||
modification is detected. | ||||
The record for each revision contains the first 4 bytes of the | ||||
corresponding node hash, and the record is only used if it still matches. | ||||
Even a completely trashed rbc-revs fill thus still give the right result | ||||
while converging towards full recovery ... assuming no incorrectly matching | ||||
node hashes. | ||||
The record also contains 4 bytes where 31 bits contains the index of the | ||||
branch and the last bit indicate that it is a branch close commit. | ||||
The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i | ||||
and will grow with it but be 1/8th of its size. | ||||
""" | ||||
Mads Kiilerich
|
r24159 | def __init__(self, repo, readonly=True): | ||
Mads Kiilerich
|
r23785 | assert repo.filtername is None | ||
Durham Goode
|
r24374 | self._repo = repo | ||
Augie Fackler
|
r43346 | self._names = [] # branch names in local encoding with static index | ||
Augie Fackler
|
r31346 | self._rbcrevs = bytearray() | ||
Augie Fackler
|
r43346 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen | ||
Mads Kiilerich
|
r23785 | try: | ||
Boris Feld
|
r33535 | bndata = repo.cachevfs.read(_rbcnames) | ||
Augie Fackler
|
r43346 | self._rbcsnameslen = len(bndata) # for verification before writing | ||
Mads Kiilerich
|
r31371 | if bndata: | ||
Augie Fackler
|
r43346 | self._names = [ | ||
Augie Fackler
|
r43347 | encoding.tolocal(bn) for bn in bndata.split(b'\0') | ||
Augie Fackler
|
r43346 | ] | ||
Gregory Szorc
|
r29423 | except (IOError, OSError): | ||
Mads Kiilerich
|
r24159 | if readonly: | ||
# don't try to use cache - fall back to the slow path | ||||
self.branchinfo = self._branchinfo | ||||
Mads Kiilerich
|
r23785 | if self._names: | ||
try: | ||||
Boris Feld
|
r33535 | data = repo.cachevfs.read(_rbcrevs) | ||
Augie Fackler
|
r31346 | self._rbcrevs[:] = data | ||
Gregory Szorc
|
r25660 | except (IOError, OSError) as inst: | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't read revision branch cache: %s\n" | ||
Augie Fackler
|
r43346 | % stringutil.forcebytestr(inst) | ||
) | ||||
Mads Kiilerich
|
r23785 | # remember number of good records on disk | ||
Augie Fackler
|
r43346 | self._rbcrevslen = min( | ||
len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) | ||||
) | ||||
Mads Kiilerich
|
r23785 | if self._rbcrevslen == 0: | ||
self._names = [] | ||||
Augie Fackler
|
r43346 | self._rbcnamescount = len(self._names) # number of names read at | ||
# _rbcsnameslen | ||||
Mads Kiilerich
|
r23785 | |||
Mads Kiilerich
|
r28558 | def _clear(self): | ||
self._rbcsnameslen = 0 | ||||
del self._names[:] | ||||
self._rbcnamescount = 0 | ||||
self._rbcrevslen = len(self._repo.changelog) | ||||
Augie Fackler
|
r31346 | self._rbcrevs = bytearray(self._rbcrevslen * _rbcrecsize) | ||
Augie Fackler
|
r43347 | util.clearcachedproperty(self, b'_namesreverse') | ||
Pulkit Goyal
|
r40746 | |||
@util.propertycache | ||||
def _namesreverse(self): | ||||
return dict((b, r) for r, b in enumerate(self._names)) | ||||
Mads Kiilerich
|
r28558 | |||
Yuya Nishihara
|
r40455 | def branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Return branch name and close flag for rev, using and updating | ||
persistent cache.""" | ||||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Yuya Nishihara
|
r25266 | # avoid negative index, changelog.read(nullrev) is fast without cache | ||
if rev == nullrev: | ||||
return changelog.branchinfo(rev) | ||||
Mads Kiilerich
|
r29604 | # if requested rev isn't allocated, grow and cache the rev info | ||
Mads Kiilerich
|
r23785 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
# fast path: extract data from cache, use it if node is matching | ||||
reponode = changelog.node(rev)[:_rbcnodelen] | ||||
Mike Hommey
|
r33737 | cachenode, branchidx = unpack_from( | ||
Augie Fackler
|
r43346 | _rbcrecfmt, util.buffer(self._rbcrevs), rbcrevidx | ||
) | ||||
Mads Kiilerich
|
r23785 | close = bool(branchidx & _rbccloseflag) | ||
if close: | ||||
branchidx &= _rbcbranchidxmask | ||||
Augie Fackler
|
r43347 | if cachenode == b'\0\0\0\0': | ||
Durham Goode
|
r24376 | pass | ||
elif cachenode == reponode: | ||||
Mads Kiilerich
|
r29615 | try: | ||
Mads Kiilerich
|
r28558 | return self._names[branchidx], close | ||
Mads Kiilerich
|
r29615 | except IndexError: | ||
# recover from invalid reference to unknown branch | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"referenced branch names not found" | ||
b" - rebuilding revision branch cache from scratch\n" | ||||
Augie Fackler
|
r43346 | ) | ||
Mads Kiilerich
|
r29615 | self._clear() | ||
Durham Goode
|
r24376 | else: | ||
# rev/node map has changed, invalidate the cache from here up | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"history modification detected - truncating " | ||
b"revision branch cache to revision %d\n" % rev | ||||
Augie Fackler
|
r43346 | ) | ||
Durham Goode
|
r24376 | truncate = rbcrevidx + _rbcrecsize | ||
del self._rbcrevs[truncate:] | ||||
self._rbcrevslen = min(self._rbcrevslen, truncate) | ||||
Mads Kiilerich
|
r23785 | # fall back to slow path and make sure it will be written to disk | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
Yuya Nishihara
|
r40455 | def _branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Retrieve branch info from changelog and update _rbcrevs""" | ||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | b, close = changelog.branchinfo(rev) | ||
if b in self._namesreverse: | ||||
branchidx = self._namesreverse[b] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(b) | ||||
self._namesreverse[b] = branchidx | ||||
reponode = changelog.node(rev) | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
Yuya Nishihara
|
r40455 | self._setcachedata(rev, reponode, branchidx) | ||
Durham Goode
|
r24375 | return b, close | ||
Boris Feld
|
r36980 | def setdata(self, branch, rev, node, close): | ||
"""add new data information to the cache""" | ||||
if branch in self._namesreverse: | ||||
branchidx = self._namesreverse[branch] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(branch) | ||||
self._namesreverse[branch] = branchidx | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
self._setcachedata(rev, node, branchidx) | ||||
# If no cache data were readable (non exists, bad permission, etc) | ||||
# the cache was bypassing itself by setting: | ||||
# | ||||
# self.branchinfo = self._branchinfo | ||||
# | ||||
# Since we now have data in the cache, we need to drop this bypassing. | ||||
Augie Fackler
|
r43906 | if 'branchinfo' in vars(self): | ||
Boris Feld
|
r36980 | del self.branchinfo | ||
Yuya Nishihara
|
r40455 | def _setcachedata(self, rev, node, branchidx): | ||
Durham Goode
|
r24375 | """Writes the node's branch data to the in-memory cache data.""" | ||
Durham Goode
|
r31454 | if rev == nullrev: | ||
return | ||||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Mads Kiilerich
|
r29604 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | ||
Augie Fackler
|
r43346 | self._rbcrevs.extend( | ||
Augie Fackler
|
r43347 | b'\0' | ||
Augie Fackler
|
r43346 | * (len(self._repo.changelog) * _rbcrecsize - len(self._rbcrevs)) | ||
) | ||||
Mads Kiilerich
|
r31370 | pack_into(_rbcrecfmt, self._rbcrevs, rbcrevidx, node, branchidx) | ||
Durham Goode
|
r24376 | self._rbcrevslen = min(self._rbcrevslen, rev) | ||
Mads Kiilerich
|
r23785 | |||
Durham Goode
|
r24377 | tr = self._repo.currenttransaction() | ||
if tr: | ||||
Augie Fackler
|
r43347 | tr.addfinalize(b'write-revbranchcache', self.write) | ||
Durham Goode
|
r24377 | |||
def write(self, tr=None): | ||||
Mads Kiilerich
|
r23785 | """Save branch cache if it is dirty.""" | ||
Durham Goode
|
r24374 | repo = self._repo | ||
Pierre-Yves David
|
r29744 | wlock = None | ||
Augie Fackler
|
r43347 | step = b'' | ||
Pierre-Yves David
|
r29744 | try: | ||
Pulkit Goyal
|
r42363 | # write the new names | ||
Pierre-Yves David
|
r29743 | if self._rbcnamescount < len(self._names): | ||
Pierre-Yves David
|
r29744 | wlock = repo.wlock(wait=False) | ||
Augie Fackler
|
r43347 | step = b' names' | ||
Pulkit Goyal
|
r42363 | self._writenames(repo) | ||
Mads Kiilerich
|
r23785 | |||
Pulkit Goyal
|
r42363 | # write the new revs | ||
Pierre-Yves David
|
r29743 | start = self._rbcrevslen * _rbcrecsize | ||
if start != len(self._rbcrevs): | ||||
Augie Fackler
|
r43347 | step = b'' | ||
Pierre-Yves David
|
r29744 | if wlock is None: | ||
wlock = repo.wlock(wait=False) | ||||
Pulkit Goyal
|
r42363 | self._writerevs(repo, start) | ||
Pierre-Yves David
|
r29745 | except (IOError, OSError, error.Abort, error.LockError) as inst: | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't write revision branch cache%s: %s\n" | ||
Augie Fackler
|
r43346 | % (step, stringutil.forcebytestr(inst)) | ||
) | ||||
Pierre-Yves David
|
r29744 | finally: | ||
if wlock is not None: | ||||
wlock.release() | ||||
Pulkit Goyal
|
r42363 | |||
def _writenames(self, repo): | ||||
""" write the new branch names to revbranchcache """ | ||||
if self._rbcnamescount != 0: | ||||
Augie Fackler
|
r43347 | f = repo.cachevfs.open(_rbcnames, b'ab') | ||
Pulkit Goyal
|
r42363 | if f.tell() == self._rbcsnameslen: | ||
Augie Fackler
|
r43347 | f.write(b'\0') | ||
Pulkit Goyal
|
r42363 | else: | ||
f.close() | ||||
Augie Fackler
|
r43347 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) | ||
Pulkit Goyal
|
r42363 | self._rbcnamescount = 0 | ||
self._rbcrevslen = 0 | ||||
if self._rbcnamescount == 0: | ||||
# before rewriting names, make sure references are removed | ||||
repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | ||||
Augie Fackler
|
r43347 | f = repo.cachevfs.open(_rbcnames, b'wb') | ||
Augie Fackler
|
r43346 | f.write( | ||
Augie Fackler
|
r43347 | b'\0'.join( | ||
Augie Fackler
|
r43346 | encoding.fromlocal(b) | ||
for b in self._names[self._rbcnamescount :] | ||||
) | ||||
) | ||||
Pulkit Goyal
|
r42363 | self._rbcsnameslen = f.tell() | ||
f.close() | ||||
self._rbcnamescount = len(self._names) | ||||
def _writerevs(self, repo, start): | ||||
""" write the new revs to revbranchcache """ | ||||
Pulkit Goyal
|
r42364 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) | ||
Augie Fackler
|
r43347 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: | ||
Pulkit Goyal
|
r42363 | if f.tell() != start: | ||
Augie Fackler
|
r43347 | repo.ui.debug( | ||
b"truncating cache/%s to %d\n" % (_rbcrevs, start) | ||||
) | ||||
Pulkit Goyal
|
r42363 | f.seek(start) | ||
Pulkit Goyal
|
r42364 | if f.tell() != start: | ||
start = 0 | ||||
f.seek(start) | ||||
f.truncate() | ||||
end = revs * _rbcrecsize | ||||
f.write(self._rbcrevs[start:end]) | ||||
Pulkit Goyal
|
r42363 | self._rbcrevslen = revs | ||