branchmap.py
931 lines
| 33.1 KiB
| text/x-python
|
PythonLexer
/ mercurial / branchmap.py
Pierre-Yves David
|
r18116 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | ||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | ||
Pierre-Yves David
|
r18116 | # | ||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Pierre-Yves David
|
r18117 | |||
Gregory Szorc
|
r25918 | |||
import struct | ||||
from .node import ( | ||||
bin, | ||||
hex, | ||||
nullrev, | ||||
) | ||||
r52178 | ||||
from typing import ( | ||||
Callable, | ||||
Dict, | ||||
Iterable, | ||||
List, | ||||
Optional, | ||||
Set, | ||||
TYPE_CHECKING, | ||||
Tuple, | ||||
Union, | ||||
) | ||||
Gregory Szorc
|
r25918 | from . import ( | ||
encoding, | ||||
Pierre-Yves David
|
r26587 | error, | ||
r49536 | obsolete, | |||
Gregory Szorc
|
r25918 | scmutil, | ||
Simon Farnsworth
|
r30975 | util, | ||
Gregory Szorc
|
r25918 | ) | ||
r52178 | ||||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
r42314 | repoviewutil, | |||
Yuya Nishihara
|
r37102 | stringutil, | ||
) | ||||
Gregory Szorc
|
r25918 | |||
r52178 | if TYPE_CHECKING: | |||
Matt Harbison
|
r47552 | from . import localrepo | ||
Augie Fackler
|
r44035 | |||
r52178 | assert [localrepo] | |||
Augie Fackler
|
r44035 | |||
Augie Fackler
|
r43346 | subsettable = repoviewutil.subsettable | ||
r42314 | ||||
Gregory Szorc
|
r25918 | calcsize = struct.calcsize | ||
Mads Kiilerich
|
r31370 | pack_into = struct.pack_into | ||
unpack_from = struct.unpack_from | ||||
Pierre-Yves David
|
r18117 | |||
Pierre-Yves David
|
r18118 | |||
Gregory Szorc
|
r49801 | class BranchMapCache: | ||
Pulkit Goyal
|
r41867 | """mapping of filtered views of repo with their branchcache""" | ||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r41764 | def __init__(self): | ||
self._per_filter = {} | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | def __getitem__(self, repo): | ||
self.updatecache(repo) | ||||
return self._per_filter[repo.filtername] | ||||
def updatecache(self, repo): | ||||
"""Update the cache for the given filtered view on a repository""" | ||||
# This can trigger updates for the caches for subsets of the filtered | ||||
# view, e.g. when there is no cache for this filtered view or the cache | ||||
# is stale. | ||||
Pierre-Yves David
|
r18121 | |||
Martijn Pieters
|
r41764 | cl = repo.changelog | ||
filtername = repo.filtername | ||||
bcache = self._per_filter.get(filtername) | ||||
if bcache is None or not bcache.validfor(repo): | ||||
# cache object missing or cache object stale? Read from disk | ||||
bcache = branchcache.fromfile(repo) | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | revs = [] | ||
if bcache is None: | ||||
# no (fresh) cache available anymore, perhaps we can re-use | ||||
# the cache for a subset, then extend that to add info on missing | ||||
# revisions. | ||||
subsetname = subsettable.get(filtername) | ||||
if subsetname is not None: | ||||
subset = repo.filtered(subsetname) | ||||
bcache = self[subset].copy() | ||||
extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | ||||
revs.extend(r for r in extrarevs if r <= bcache.tiprev) | ||||
else: | ||||
# nothing to fall back on, start empty. | ||||
Joerg Sonnenberger
|
r47538 | bcache = branchcache(repo) | ||
Durham Goode
|
r24373 | |||
Martijn Pieters
|
r41764 | revs.extend(cl.revs(start=bcache.tiprev + 1)) | ||
if revs: | ||||
bcache.update(repo, revs) | ||||
Pierre-Yves David
|
r18124 | |||
Martijn Pieters
|
r41764 | assert bcache.validfor(repo), filtername | ||
self._per_filter[repo.filtername] = bcache | ||||
def replace(self, repo, remotebranchmap): | ||||
"""Replace the branchmap cache for a repo with a branch mapping. | ||||
This is likely only called during clone with a branch map from a | ||||
remote. | ||||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | """ | ||
cl = repo.changelog | ||||
clrev = cl.rev | ||||
clbranchinfo = cl.branchinfo | ||||
rbheads = [] | ||||
Martin von Zweigbergk
|
r44086 | closed = set() | ||
Gregory Szorc
|
r49790 | for bheads in remotebranchmap.values(): | ||
Martijn Pieters
|
r41764 | rbheads += bheads | ||
for h in bheads: | ||||
r = clrev(h) | ||||
b, c = clbranchinfo(r) | ||||
if c: | ||||
Martin von Zweigbergk
|
r44086 | closed.add(h) | ||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | if rbheads: | ||
rtiprev = max((int(clrev(node)) for node in rbheads)) | ||||
cache = branchcache( | ||||
Joerg Sonnenberger
|
r47538 | repo, | ||
Augie Fackler
|
r43346 | remotebranchmap, | ||
repo[rtiprev].node(), | ||||
rtiprev, | ||||
Martin von Zweigbergk
|
r44086 | closednodes=closed, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | # Try to stick it as low as possible | ||
# filter above served are unlikely to be fetch from a clone | ||||
Augie Fackler
|
r43347 | for candidate in (b'base', b'immutable', b'served'): | ||
Martijn Pieters
|
r41764 | rview = repo.filtered(candidate) | ||
if cache.validfor(rview): | ||||
self._per_filter[candidate] = cache | ||||
cache.write(rview) | ||||
return | ||||
def clear(self): | ||||
self._per_filter.clear() | ||||
r49526 | def write_delayed(self, repo): | |||
unfi = repo.unfiltered() | ||||
for filtername, cache in self._per_filter.items(): | ||||
if cache._delayed: | ||||
repo = unfi.filtered(filtername) | ||||
cache.write(repo) | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r42289 | def _unknownnode(node): | ||
Augie Fackler
|
r46554 | """raises ValueError when branchcache found a node which does not exists""" | ||
Manuel Jacob
|
r50195 | raise ValueError('node %s does not exist' % node.hex()) | ||
Gregory Szorc
|
r26460 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42805 | def _branchcachedesc(repo): | ||
if repo.filtername is not None: | ||||
Augie Fackler
|
r43347 | return b'branch cache (%s)' % repo.filtername | ||
Martin von Zweigbergk
|
r42805 | else: | ||
Augie Fackler
|
r43347 | return b'branch cache' | ||
Martin von Zweigbergk
|
r42805 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r49801 | class branchcache: | ||
Brodie Rao
|
r20181 | """A dict like object that hold branches heads cache. | ||
This cache is used to avoid costly computations to determine all the | ||||
branch heads of a repo. | ||||
The cache is serialized on disk in the following format: | ||||
<tip hex node> <tip rev number> [optional filtered repo hex hash] | ||||
Brodie Rao
|
r20185 | <branch head hex node> <open/closed state> <branch name> | ||
<branch head hex node> <open/closed state> <branch name> | ||||
Brodie Rao
|
r20181 | ... | ||
The first line is used to check if the cache is still valid. If the | ||||
branch cache is for a filtered repo view, an optional third hash is | ||||
r49536 | included that hashes the hashes of all filtered and obsolete revisions. | |||
Brodie Rao
|
r20185 | |||
The open/closed state is represented by a single letter 'o' or 'c'. | ||||
This field can be used to avoid changelog reads when determining if a | ||||
branch head closes a branch or not. | ||||
Brodie Rao
|
r20181 | """ | ||
Pulkit Goyal
|
r41826 | |||
Augie Fackler
|
r43346 | def __init__( | ||
self, | ||||
r52180 | repo: "localrepo.localrepository", | |||
entries: Union[ | ||||
Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | ||||
] = (), | ||||
tipnode: Optional[bytes] = None, | ||||
tiprev: Optional[int] = nullrev, | ||||
filteredhash: Optional[bytes] = None, | ||||
closednodes: Optional[Set[bytes]] = None, | ||||
hasnode: Optional[Callable[[bytes], bool]] = None, | ||||
) -> None: | ||||
Augie Fackler
|
r46554 | """hasnode is a function which can be used to verify whether changelog | ||
Pulkit Goyal
|
r42174 | has a given node or not. If it's not provided, we assume that every node | ||
Augie Fackler
|
r46554 | we have exists in changelog""" | ||
Joerg Sonnenberger
|
r47538 | self._repo = repo | ||
r49526 | self._delayed = False | |||
Joerg Sonnenberger
|
r47771 | if tipnode is None: | ||
self.tipnode = repo.nullid | ||||
else: | ||||
self.tipnode = tipnode | ||||
Pulkit Goyal
|
r41826 | self.tiprev = tiprev | ||
self.filteredhash = filteredhash | ||||
# closednodes is a set of nodes that close their branch. If the branch | ||||
# cache has been updated, it may contain nodes that are no longer | ||||
# heads. | ||||
if closednodes is None: | ||||
self._closednodes = set() | ||||
else: | ||||
Augie Fackler
|
r44035 | self._closednodes = closednodes | ||
Pulkit Goyal
|
r42172 | self._entries = dict(entries) | ||
Pulkit Goyal
|
r42173 | # whether closed nodes are verified or not | ||
self._closedverified = False | ||||
# branches for which nodes are verified | ||||
self._verifiedbranches = set() | ||||
Pulkit Goyal
|
r42174 | self._hasnode = hasnode | ||
if self._hasnode is None: | ||||
self._hasnode = lambda x: True | ||||
Pulkit Goyal
|
r42168 | |||
Pulkit Goyal
|
r42289 | def _verifyclosed(self): | ||
Kyle Lippincott
|
r47856 | """verify the closed nodes we have""" | ||
Pulkit Goyal
|
r42289 | if self._closedverified: | ||
return | ||||
for node in self._closednodes: | ||||
if not self._hasnode(node): | ||||
_unknownnode(node) | ||||
self._closedverified = True | ||||
def _verifybranch(self, branch): | ||||
Kyle Lippincott
|
r47856 | """verify head nodes for the given branch.""" | ||
Pulkit Goyal
|
r42289 | if branch not in self._entries or branch in self._verifiedbranches: | ||
return | ||||
for n in self._entries[branch]: | ||||
if not self._hasnode(n): | ||||
_unknownnode(n) | ||||
self._verifiedbranches.add(branch) | ||||
def _verifyall(self): | ||||
Kyle Lippincott
|
r47856 | """verifies nodes of all the branches""" | ||
Pulkit Goyal
|
r42302 | needverification = set(self._entries.keys()) - self._verifiedbranches | ||
for b in needverification: | ||||
Pulkit Goyal
|
r42289 | self._verifybranch(b) | ||
Pulkit Goyal
|
r42168 | def __iter__(self): | ||
Pulkit Goyal
|
r42172 | return iter(self._entries) | ||
Pulkit Goyal
|
r42168 | |||
def __setitem__(self, key, value): | ||||
Pulkit Goyal
|
r42172 | self._entries[key] = value | ||
Pulkit Goyal
|
r42168 | |||
def __getitem__(self, key): | ||||
Pulkit Goyal
|
r42290 | self._verifybranch(key) | ||
Pulkit Goyal
|
r42172 | return self._entries[key] | ||
Pulkit Goyal
|
r42168 | |||
Pulkit Goyal
|
r42282 | def __contains__(self, key): | ||
Pulkit Goyal
|
r42290 | self._verifybranch(key) | ||
Pulkit Goyal
|
r42282 | return key in self._entries | ||
Pulkit Goyal
|
r42168 | def iteritems(self): | ||
Gregory Szorc
|
r49768 | for k, v in self._entries.items(): | ||
Pulkit Goyal
|
r42303 | self._verifybranch(k) | ||
yield k, v | ||||
Pulkit Goyal
|
r42168 | |||
Martin von Zweigbergk
|
r42809 | items = iteritems | ||
Pulkit Goyal
|
r42171 | def hasbranch(self, label): | ||
Kyle Lippincott
|
r47856 | """checks whether a branch of this name exists or not""" | ||
Pulkit Goyal
|
r42290 | self._verifybranch(label) | ||
Pulkit Goyal
|
r42172 | return label in self._entries | ||
Pulkit Goyal
|
r42171 | |||
Martijn Pieters
|
r41706 | @classmethod | ||
def fromfile(cls, repo): | ||||
f = None | ||||
try: | ||||
f = repo.cachevfs(cls._filename(repo)) | ||||
lineiter = iter(f) | ||||
Augie Fackler
|
r43347 | cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | ||
Martijn Pieters
|
r41706 | last, lrev = cachekey[:2] | ||
last, lrev = bin(last), int(lrev) | ||||
filteredhash = None | ||||
Pulkit Goyal
|
r42174 | hasnode = repo.changelog.hasnode | ||
Martijn Pieters
|
r41706 | if len(cachekey) > 2: | ||
filteredhash = bin(cachekey[2]) | ||||
Augie Fackler
|
r43346 | bcache = cls( | ||
Joerg Sonnenberger
|
r47538 | repo, | ||
Augie Fackler
|
r43346 | tipnode=last, | ||
tiprev=lrev, | ||||
filteredhash=filteredhash, | ||||
hasnode=hasnode, | ||||
) | ||||
Martijn Pieters
|
r41706 | if not bcache.validfor(repo): | ||
# invalidate the cache | ||||
Augie Fackler
|
r43906 | raise ValueError('tip differs') | ||
Pulkit Goyal
|
r41974 | bcache.load(repo, lineiter) | ||
Martijn Pieters
|
r41706 | except (IOError, OSError): | ||
return None | ||||
except Exception as inst: | ||||
if repo.ui.debugflag: | ||||
Augie Fackler
|
r43347 | msg = b'invalid %s: %s\n' | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43805 | msg | ||
% ( | ||||
_branchcachedesc(repo), | ||||
Matt Harbison
|
r47512 | stringutil.forcebytestr(inst), | ||
Augie Fackler
|
r43805 | ) | ||
Augie Fackler
|
r43346 | ) | ||
Martijn Pieters
|
r41706 | bcache = None | ||
finally: | ||||
if f: | ||||
f.close() | ||||
return bcache | ||||
Pulkit Goyal
|
r41974 | def load(self, repo, lineiter): | ||
Augie Fackler
|
r46554 | """fully loads the branchcache by reading from the file using the line | ||
Pulkit Goyal
|
r41974 | iterator passed""" | ||
Pulkit Goyal
|
r41959 | for line in lineiter: | ||
Augie Fackler
|
r43347 | line = line.rstrip(b'\n') | ||
Pulkit Goyal
|
r41959 | if not line: | ||
continue | ||||
Augie Fackler
|
r43347 | node, state, label = line.split(b" ", 2) | ||
if state not in b'oc': | ||||
Augie Fackler
|
r43906 | raise ValueError('invalid branch state') | ||
Pulkit Goyal
|
r41959 | label = encoding.tolocal(label.strip()) | ||
node = bin(node) | ||||
Pulkit Goyal
|
r42172 | self._entries.setdefault(label, []).append(node) | ||
Augie Fackler
|
r43347 | if state == b'c': | ||
Pulkit Goyal
|
r41959 | self._closednodes.add(node) | ||
Martijn Pieters
|
r41706 | @staticmethod | ||
def _filename(repo): | ||||
"""name of a branchcache file for a given repo or repoview""" | ||||
Augie Fackler
|
r43347 | filename = b"branch2" | ||
Martijn Pieters
|
r41706 | if repo.filtername: | ||
Augie Fackler
|
r43347 | filename = b'%s-%s' % (filename, repo.filtername) | ||
Martijn Pieters
|
r41706 | return filename | ||
Pierre-Yves David
|
r18124 | |||
Pierre-Yves David
|
r18132 | def validfor(self, repo): | ||
r49568 | """check that cache contents are valid for (a subset of) this repo | |||
Pierre-Yves David
|
r18132 | |||
r49568 | - False when the order of changesets changed or if we detect a strip. | |||
- True when cache is up-to-date for the current repo or its subset.""" | ||||
Pierre-Yves David
|
r18132 | try: | ||
r49568 | node = repo.changelog.node(self.tiprev) | |||
Pierre-Yves David
|
r18132 | except IndexError: | ||
r49568 | # changesets were stripped and now we don't even have enough to | |||
# find tiprev | ||||
Pierre-Yves David
|
r18132 | return False | ||
r49568 | if self.tipnode != node: | |||
# tiprev doesn't correspond to tipnode: repo was stripped, or this | ||||
# repo has a different order of changesets | ||||
return False | ||||
tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) | ||||
# hashes don't match if this repo view has a different set of filtered | ||||
# revisions (e.g. due to phase changes) or obsolete revisions (e.g. | ||||
# history was rewritten) | ||||
return self.filteredhash == tiphash | ||||
Pierre-Yves David
|
r18132 | |||
Brodie Rao
|
r20186 | def _branchtip(self, heads): | ||
Augie Fackler
|
r46554 | """Return tuple with last open head in heads and false, | ||
otherwise return last closed head and true.""" | ||||
Brodie Rao
|
r20186 | tip = heads[-1] | ||
closed = True | ||||
for h in reversed(heads): | ||||
if h not in self._closednodes: | ||||
tip = h | ||||
closed = False | ||||
break | ||||
return tip, closed | ||||
def branchtip(self, branch): | ||||
Augie Fackler
|
r46554 | """Return the tipmost open head on branch head, otherwise return the | ||
Mads Kiilerich
|
r20245 | tipmost closed head on branch. | ||
Augie Fackler
|
r46554 | Raise KeyError for unknown branch.""" | ||
Brodie Rao
|
r20186 | return self._branchtip(self[branch])[0] | ||
the31k
|
r34076 | def iteropen(self, nodes): | ||
return (n for n in nodes if n not in self._closednodes) | ||||
Brodie Rao
|
r20188 | def branchheads(self, branch, closed=False): | ||
Pulkit Goyal
|
r42290 | self._verifybranch(branch) | ||
Pulkit Goyal
|
r42281 | heads = self._entries[branch] | ||
Brodie Rao
|
r20188 | if not closed: | ||
the31k
|
r34076 | heads = list(self.iteropen(heads)) | ||
Brodie Rao
|
r20188 | return heads | ||
Brodie Rao
|
r20190 | def iterbranches(self): | ||
Gregory Szorc
|
r49768 | for bn, heads in self.items(): | ||
Brodie Rao
|
r20190 | yield (bn, heads) + self._branchtip(heads) | ||
Pulkit Goyal
|
r42169 | def iterheads(self): | ||
Kyle Lippincott
|
r47856 | """returns all the heads""" | ||
Pulkit Goyal
|
r42290 | self._verifyall() | ||
Gregory Szorc
|
r49790 | return self._entries.values() | ||
Pulkit Goyal
|
r42169 | |||
Pierre-Yves David
|
r18232 | def copy(self): | ||
"""return an deep copy of the branchcache object""" | ||||
Pulkit Goyal
|
r42280 | return type(self)( | ||
Joerg Sonnenberger
|
r47538 | self._repo, | ||
Augie Fackler
|
r43346 | self._entries, | ||
self.tipnode, | ||||
self.tiprev, | ||||
self.filteredhash, | ||||
self._closednodes, | ||||
) | ||||
Pierre-Yves David
|
r18132 | |||
Pierre-Yves David
|
r18128 | def write(self, repo): | ||
r49526 | tr = repo.currenttransaction() | |||
if not getattr(tr, 'finalized', True): | ||||
# Avoid premature writing. | ||||
# | ||||
# (The cache warming setup by localrepo will update the file later.) | ||||
self._delayed = True | ||||
return | ||||
Pierre-Yves David
|
r18128 | try: | ||
r50098 | filename = self._filename(repo) | |||
with repo.cachevfs(filename, b"w", atomictemp=True) as f: | ||||
cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | ||||
if self.filteredhash is not None: | ||||
cachekey.append(hex(self.filteredhash)) | ||||
f.write(b" ".join(cachekey) + b'\n') | ||||
nodecount = 0 | ||||
for label, nodes in sorted(self._entries.items()): | ||||
label = encoding.fromlocal(label) | ||||
for node in nodes: | ||||
nodecount += 1 | ||||
if node in self._closednodes: | ||||
state = b'c' | ||||
else: | ||||
state = b'o' | ||||
f.write(b"%s %s %s\n" % (hex(node), state, label)) | ||||
Augie Fackler
|
r43346 | repo.ui.log( | ||
Augie Fackler
|
r43347 | b'branchcache', | ||
b'wrote %s with %d labels and %d nodes\n', | ||||
Augie Fackler
|
r43346 | _branchcachedesc(repo), | ||
len(self._entries), | ||||
nodecount, | ||||
) | ||||
r49526 | self._delayed = False | |||
Pierre-Yves David
|
r26587 | except (IOError, OSError, error.Abort) as inst: | ||
Augie Fackler
|
r34369 | # Abort may be raised by read only opener, so log and continue | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't write branch cache: %s\n" | ||
Augie Fackler
|
r43346 | % stringutil.forcebytestr(inst) | ||
) | ||||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r18305 | def update(self, repo, revgen): | ||
Pierre-Yves David
|
r18131 | """Given a branchhead cache, self, that may have extra nodes or be | ||
Pierre-Yves David
|
r20263 | missing heads, and a generator of nodes that are strictly a superset of | ||
Pierre-Yves David
|
r18131 | heads missing, this function updates self to be correct. | ||
""" | ||||
Simon Farnsworth
|
r30975 | starttime = util.timer() | ||
Pierre-Yves David
|
r18131 | cl = repo.changelog | ||
# collect new branch entries | ||||
newbranches = {} | ||||
Durham Goode
|
r24373 | getbranchinfo = repo.revbranchcache().branchinfo | ||
Pierre-Yves David
|
r18307 | for r in revgen: | ||
Yuya Nishihara
|
r40455 | branch, closesbranch = getbranchinfo(r) | ||
Pierre-Yves David
|
r20262 | newbranches.setdefault(branch, []).append(r) | ||
Brodie Rao
|
r20185 | if closesbranch: | ||
Pierre-Yves David
|
r20262 | self._closednodes.add(cl.node(r)) | ||
Pierre-Yves David
|
r22357 | |||
Pulkit Goyal
|
r42400 | # new tip revision which we found after iterating items from new | ||
# branches | ||||
ntiprev = self.tiprev | ||||
Joerg Sonnenberger
|
r46880 | # Delay fetching the topological heads until they are needed. | ||
# A repository without non-continous branches can skip this part. | ||||
topoheads = None | ||||
# If a changeset is visible, its parents must be visible too, so | ||||
# use the faster unfiltered parent accessor. | ||||
parentrevs = repo.unfiltered().changelog.parentrevs | ||||
r49536 | # Faster than using ctx.obsolete() | |||
obsrevs = obsolete.getrevs(repo, b'obsolete') | ||||
Gregory Szorc
|
r49768 | for branch, newheadrevs in newbranches.items(): | ||
Joerg Sonnenberger
|
r46880 | # For every branch, compute the new branchheads. | ||
# A branchhead is a revision such that no descendant is on | ||||
# the same branch. | ||||
# | ||||
# The branchheads are computed iteratively in revision order. | ||||
# This ensures topological order, i.e. parents are processed | ||||
# before their children. Ancestors are inclusive here, i.e. | ||||
# any revision is an ancestor of itself. | ||||
# | ||||
# Core observations: | ||||
# - The current revision is always a branchhead for the | ||||
# repository up to that point. | ||||
# - It is the first revision of the branch if and only if | ||||
# there was no branchhead before. In that case, it is the | ||||
# only branchhead as there are no possible ancestors on | ||||
# the same branch. | ||||
# - If a parent is on the same branch, a branchhead can | ||||
# only be an ancestor of that parent, if it is parent | ||||
# itself. Otherwise it would have been removed as ancestor | ||||
# of that parent before. | ||||
# - Therefore, if all parents are on the same branch, they | ||||
# can just be removed from the branchhead set. | ||||
# - If one parent is on the same branch and the other is not | ||||
# and there was exactly one branchhead known, the existing | ||||
# branchhead can only be an ancestor if it is the parent. | ||||
# Otherwise it would have been removed as ancestor of | ||||
# the parent before. The other parent therefore can't have | ||||
# a branchhead as ancestor. | ||||
# - In all other cases, the parents on different branches | ||||
# could have a branchhead as ancestor. Those parents are | ||||
# kept in the "uncertain" set. If all branchheads are also | ||||
# topological heads, they can't have descendants and further | ||||
# checks can be skipped. Otherwise, the ancestors of the | ||||
# "uncertain" set are removed from branchheads. | ||||
# This computation is heavy and avoided if at all possible. | ||||
r49567 | bheads = self._entries.get(branch, []) | |||
Augie Fackler
|
r44937 | bheadset = {cl.rev(node) for node in bheads} | ||
Joerg Sonnenberger
|
r46880 | uncertain = set() | ||
for newrev in sorted(newheadrevs): | ||||
r49536 | if newrev in obsrevs: | |||
# We ignore obsolete changesets as they shouldn't be | ||||
# considered heads. | ||||
continue | ||||
Joerg Sonnenberger
|
r46880 | if not bheadset: | ||
bheadset.add(newrev) | ||||
continue | ||||
Pierre-Yves David
|
r18131 | |||
Joerg Sonnenberger
|
r46880 | parents = [p for p in parentrevs(newrev) if p != nullrev] | ||
samebranch = set() | ||||
otherbranch = set() | ||||
r49536 | obsparents = set() | |||
Joerg Sonnenberger
|
r46880 | for p in parents: | ||
r49536 | if p in obsrevs: | |||
# We ignored this obsolete changeset earlier, but now | ||||
# that it has non-ignored children, we need to make | ||||
# sure their ancestors are not considered heads. To | ||||
# achieve that, we will simply treat this obsolete | ||||
# changeset as a parent from other branch. | ||||
obsparents.add(p) | ||||
elif p in bheadset or getbranchinfo(p)[0] == branch: | ||||
Joerg Sonnenberger
|
r46880 | samebranch.add(p) | ||
else: | ||||
otherbranch.add(p) | ||||
r49536 | if not (len(bheadset) == len(samebranch) == 1): | |||
Joerg Sonnenberger
|
r46880 | uncertain.update(otherbranch) | ||
r49536 | uncertain.update(obsparents) | |||
Joerg Sonnenberger
|
r46880 | bheadset.difference_update(samebranch) | ||
bheadset.add(newrev) | ||||
Pierre-Yves David
|
r22357 | if uncertain: | ||
Joerg Sonnenberger
|
r46880 | if topoheads is None: | ||
topoheads = set(cl.headrevs()) | ||||
if bheadset - topoheads: | ||||
floorrev = min(bheadset) | ||||
r49536 | if floorrev <= max(uncertain): | |||
ancestors = set(cl.ancestors(uncertain, floorrev)) | ||||
bheadset -= ancestors | ||||
r49567 | if bheadset: | |||
self[branch] = [cl.node(rev) for rev in sorted(bheadset)] | ||||
r49536 | tiprev = max(newheadrevs) | |||
Pulkit Goyal
|
r42400 | if tiprev > ntiprev: | ||
ntiprev = tiprev | ||||
if ntiprev > self.tiprev: | ||||
self.tiprev = ntiprev | ||||
self.tipnode = cl.node(ntiprev) | ||||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r19838 | if not self.validfor(repo): | ||
r49536 | # old cache key is now invalid for the repo, but we've just updated | |||
# the cache and we assume it's valid, so let's make the cache key | ||||
# valid as well by recomputing it from the cached data | ||||
Joerg Sonnenberger
|
r47771 | self.tipnode = repo.nullid | ||
Pierre-Yves David
|
r18131 | self.tiprev = nullrev | ||
Pulkit Goyal
|
r42169 | for heads in self.iterheads(): | ||
r49536 | if not heads: | |||
# all revisions on a branch are obsolete | ||||
continue | ||||
# note: tiprev is not necessarily the tip revision of repo, | ||||
# because the tip could be obsolete (i.e. not a head) | ||||
Pierre-Yves David
|
r18131 | tiprev = max(cl.rev(node) for node in heads) | ||
if tiprev > self.tiprev: | ||||
self.tipnode = cl.node(tiprev) | ||||
self.tiprev = tiprev | ||||
r49536 | self.filteredhash = scmutil.filteredhash( | |||
repo, self.tiprev, needobsolete=True | ||||
) | ||||
Gregory Szorc
|
r21031 | |||
Simon Farnsworth
|
r30975 | duration = util.timer() - starttime | ||
Augie Fackler
|
r43346 | repo.ui.log( | ||
Augie Fackler
|
r43347 | b'branchcache', | ||
b'updated %s in %.4f seconds\n', | ||||
Augie Fackler
|
r43346 | _branchcachedesc(repo), | ||
duration, | ||||
) | ||||
Mads Kiilerich
|
r23785 | |||
Martijn Pieters
|
r41707 | self.write(repo) | ||
class remotebranchcache(branchcache): | ||||
"""Branchmap info for a remote connection, should not write locally""" | ||||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r41707 | def write(self, repo): | ||
pass | ||||
Mads Kiilerich
|
r23785 | # Revision branch info cache | ||
Augie Fackler
|
r43347 | _rbcversion = b'-v1' | ||
_rbcnames = b'rbc-names' + _rbcversion | ||||
_rbcrevs = b'rbc-revs' + _rbcversion | ||||
Mads Kiilerich
|
r23785 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] | ||
Augie Fackler
|
r43347 | _rbcrecfmt = b'>4sI' | ||
Mads Kiilerich
|
r23785 | _rbcrecsize = calcsize(_rbcrecfmt) | ||
Joerg Sonnenberger
|
r47069 | _rbcmininc = 64 * _rbcrecsize | ||
Mads Kiilerich
|
r23785 | _rbcnodelen = 4 | ||
Augie Fackler
|
r43346 | _rbcbranchidxmask = 0x7FFFFFFF | ||
Mads Kiilerich
|
r23785 | _rbccloseflag = 0x80000000 | ||
Augie Fackler
|
r43346 | |||
Arseniy Alekseyev
|
r52268 | class rbcrevs: | ||
"""a byte string consisting of an immutable prefix followed by a mutable suffix""" | ||||
def __init__(self, revs): | ||||
self._prefix = revs | ||||
self._rest = bytearray() | ||||
def __len__(self): | ||||
return len(self._prefix) + len(self._rest) | ||||
def unpack_record(self, rbcrevidx): | ||||
if rbcrevidx < len(self._prefix): | ||||
return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx) | ||||
else: | ||||
return unpack_from( | ||||
_rbcrecfmt, | ||||
util.buffer(self._rest), | ||||
rbcrevidx - len(self._prefix), | ||||
) | ||||
def make_mutable(self): | ||||
if len(self._prefix) > 0: | ||||
entirety = bytearray() | ||||
entirety[:] = self._prefix | ||||
entirety.extend(self._rest) | ||||
self._rest = entirety | ||||
self._prefix = bytearray() | ||||
def truncate(self, pos): | ||||
self.make_mutable() | ||||
del self._rest[pos:] | ||||
def pack_into(self, rbcrevidx, node, branchidx): | ||||
if rbcrevidx < len(self._prefix): | ||||
self.make_mutable() | ||||
buf = self._rest | ||||
start_offset = rbcrevidx - len(self._prefix) | ||||
end_offset = start_offset + _rbcrecsize | ||||
if len(self._rest) < end_offset: | ||||
# bytearray doesn't allocate extra space at least in Python 3.7. | ||||
# When multiple changesets are added in a row, precise resize would | ||||
# result in quadratic complexity. Overallocate to compensate by | ||||
# using the classic doubling technique for dynamic arrays instead. | ||||
# If there was a gap in the map before, less space will be reserved. | ||||
self._rest.extend(b'\0' * end_offset) | ||||
return pack_into( | ||||
_rbcrecfmt, | ||||
buf, | ||||
start_offset, | ||||
node, | ||||
branchidx, | ||||
) | ||||
def extend(self, extension): | ||||
return self._rest.extend(extension) | ||||
def slice(self, begin, end): | ||||
if begin < len(self._prefix): | ||||
acc = bytearray() | ||||
acc[:] = self._prefix[begin:end] | ||||
acc.extend( | ||||
self._rest[begin - len(self._prefix) : end - len(self._prefix)] | ||||
) | ||||
return acc | ||||
return self._rest[begin - len(self._prefix) : end - len(self._prefix)] | ||||
Gregory Szorc
|
r49801 | class revbranchcache: | ||
Mads Kiilerich
|
r23785 | """Persistent cache, mapping from revision number to branch name and close. | ||
This is a low level cache, independent of filtering. | ||||
Branch names are stored in rbc-names in internal encoding separated by 0. | ||||
rbc-names is append-only, and each branch name is only stored once and will | ||||
thus have a unique index. | ||||
The branch info for each revision is stored in rbc-revs as constant size | ||||
records. The whole file is read into memory, but it is only 'parsed' on | ||||
demand. The file is usually append-only but will be truncated if repo | ||||
modification is detected. | ||||
The record for each revision contains the first 4 bytes of the | ||||
corresponding node hash, and the record is only used if it still matches. | ||||
Even a completely trashed rbc-revs fill thus still give the right result | ||||
while converging towards full recovery ... assuming no incorrectly matching | ||||
node hashes. | ||||
The record also contains 4 bytes where 31 bits contains the index of the | ||||
branch and the last bit indicate that it is a branch close commit. | ||||
The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i | ||||
and will grow with it but be 1/8th of its size. | ||||
""" | ||||
Mads Kiilerich
|
r24159 | def __init__(self, repo, readonly=True): | ||
Mads Kiilerich
|
r23785 | assert repo.filtername is None | ||
Durham Goode
|
r24374 | self._repo = repo | ||
Augie Fackler
|
r43346 | self._names = [] # branch names in local encoding with static index | ||
Arseniy Alekseyev
|
r52268 | self._rbcrevs = rbcrevs(bytearray()) | ||
Augie Fackler
|
r43346 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen | ||
Mads Kiilerich
|
r23785 | try: | ||
Boris Feld
|
r33535 | bndata = repo.cachevfs.read(_rbcnames) | ||
Augie Fackler
|
r43346 | self._rbcsnameslen = len(bndata) # for verification before writing | ||
Mads Kiilerich
|
r31371 | if bndata: | ||
Augie Fackler
|
r43346 | self._names = [ | ||
Augie Fackler
|
r43347 | encoding.tolocal(bn) for bn in bndata.split(b'\0') | ||
Augie Fackler
|
r43346 | ] | ||
Gregory Szorc
|
r29423 | except (IOError, OSError): | ||
Mads Kiilerich
|
r24159 | if readonly: | ||
# don't try to use cache - fall back to the slow path | ||||
self.branchinfo = self._branchinfo | ||||
Mads Kiilerich
|
r23785 | if self._names: | ||
try: | ||||
r52340 | if repo.ui.configbool(b'storage', b'revbranchcache.mmap'): | |||
Arseniy Alekseyev
|
r52268 | with repo.cachevfs(_rbcrevs) as fp: | ||
data = util.buffer(util.mmapread(fp)) | ||||
else: | ||||
data = repo.cachevfs.read(_rbcrevs) | ||||
self._rbcrevs = rbcrevs(data) | ||||
Gregory Szorc
|
r25660 | except (IOError, OSError) as inst: | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't read revision branch cache: %s\n" | ||
Augie Fackler
|
r43346 | % stringutil.forcebytestr(inst) | ||
) | ||||
Mads Kiilerich
|
r23785 | # remember number of good records on disk | ||
Augie Fackler
|
r43346 | self._rbcrevslen = min( | ||
len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) | ||||
) | ||||
Mads Kiilerich
|
r23785 | if self._rbcrevslen == 0: | ||
self._names = [] | ||||
Augie Fackler
|
r43346 | self._rbcnamescount = len(self._names) # number of names read at | ||
# _rbcsnameslen | ||||
Mads Kiilerich
|
r23785 | |||
Mads Kiilerich
|
r28558 | def _clear(self): | ||
self._rbcsnameslen = 0 | ||||
del self._names[:] | ||||
self._rbcnamescount = 0 | ||||
self._rbcrevslen = len(self._repo.changelog) | ||||
Arseniy Alekseyev
|
r52268 | self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize)) | ||
Augie Fackler
|
r43347 | util.clearcachedproperty(self, b'_namesreverse') | ||
Pulkit Goyal
|
r40746 | |||
@util.propertycache | ||||
def _namesreverse(self): | ||||
Augie Fackler
|
r44937 | return {b: r for r, b in enumerate(self._names)} | ||
Mads Kiilerich
|
r28558 | |||
Yuya Nishihara
|
r40455 | def branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Return branch name and close flag for rev, using and updating | ||
persistent cache.""" | ||||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Yuya Nishihara
|
r25266 | # avoid negative index, changelog.read(nullrev) is fast without cache | ||
if rev == nullrev: | ||||
return changelog.branchinfo(rev) | ||||
Mads Kiilerich
|
r29604 | # if requested rev isn't allocated, grow and cache the rev info | ||
Mads Kiilerich
|
r23785 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
# fast path: extract data from cache, use it if node is matching | ||||
reponode = changelog.node(rev)[:_rbcnodelen] | ||||
Arseniy Alekseyev
|
r52268 | cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx) | ||
Mads Kiilerich
|
r23785 | close = bool(branchidx & _rbccloseflag) | ||
if close: | ||||
branchidx &= _rbcbranchidxmask | ||||
Augie Fackler
|
r43347 | if cachenode == b'\0\0\0\0': | ||
Durham Goode
|
r24376 | pass | ||
elif cachenode == reponode: | ||||
Mads Kiilerich
|
r29615 | try: | ||
Mads Kiilerich
|
r28558 | return self._names[branchidx], close | ||
Mads Kiilerich
|
r29615 | except IndexError: | ||
# recover from invalid reference to unknown branch | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"referenced branch names not found" | ||
b" - rebuilding revision branch cache from scratch\n" | ||||
Augie Fackler
|
r43346 | ) | ||
Mads Kiilerich
|
r29615 | self._clear() | ||
Durham Goode
|
r24376 | else: | ||
# rev/node map has changed, invalidate the cache from here up | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"history modification detected - truncating " | ||
b"revision branch cache to revision %d\n" % rev | ||||
Augie Fackler
|
r43346 | ) | ||
Durham Goode
|
r24376 | truncate = rbcrevidx + _rbcrecsize | ||
Arseniy Alekseyev
|
r52268 | self._rbcrevs.truncate(truncate) | ||
Durham Goode
|
r24376 | self._rbcrevslen = min(self._rbcrevslen, truncate) | ||
Mads Kiilerich
|
r23785 | # fall back to slow path and make sure it will be written to disk | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
Yuya Nishihara
|
r40455 | def _branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Retrieve branch info from changelog and update _rbcrevs""" | ||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | b, close = changelog.branchinfo(rev) | ||
if b in self._namesreverse: | ||||
branchidx = self._namesreverse[b] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(b) | ||||
self._namesreverse[b] = branchidx | ||||
reponode = changelog.node(rev) | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
Yuya Nishihara
|
r40455 | self._setcachedata(rev, reponode, branchidx) | ||
Durham Goode
|
r24375 | return b, close | ||
Joerg Sonnenberger
|
r47084 | def setdata(self, rev, changelogrevision): | ||
Boris Feld
|
r36980 | """add new data information to the cache""" | ||
Joerg Sonnenberger
|
r47084 | branch, close = changelogrevision.branchinfo | ||
Boris Feld
|
r36980 | if branch in self._namesreverse: | ||
branchidx = self._namesreverse[branch] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(branch) | ||||
self._namesreverse[branch] = branchidx | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
Joerg Sonnenberger
|
r47084 | self._setcachedata(rev, self._repo.changelog.node(rev), branchidx) | ||
Boris Feld
|
r36980 | # If no cache data were readable (non exists, bad permission, etc) | ||
# the cache was bypassing itself by setting: | ||||
# | ||||
# self.branchinfo = self._branchinfo | ||||
# | ||||
# Since we now have data in the cache, we need to drop this bypassing. | ||||
Augie Fackler
|
r43906 | if 'branchinfo' in vars(self): | ||
Boris Feld
|
r36980 | del self.branchinfo | ||
Yuya Nishihara
|
r40455 | def _setcachedata(self, rev, node, branchidx): | ||
Durham Goode
|
r24375 | """Writes the node's branch data to the in-memory cache data.""" | ||
Durham Goode
|
r31454 | if rev == nullrev: | ||
return | ||||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Arseniy Alekseyev
|
r52268 | self._rbcrevs.pack_into(rbcrevidx, node, branchidx) | ||
Durham Goode
|
r24376 | self._rbcrevslen = min(self._rbcrevslen, rev) | ||
Mads Kiilerich
|
r23785 | |||
Durham Goode
|
r24377 | tr = self._repo.currenttransaction() | ||
if tr: | ||||
Augie Fackler
|
r43347 | tr.addfinalize(b'write-revbranchcache', self.write) | ||
Durham Goode
|
r24377 | |||
def write(self, tr=None): | ||||
Mads Kiilerich
|
r23785 | """Save branch cache if it is dirty.""" | ||
Durham Goode
|
r24374 | repo = self._repo | ||
Pierre-Yves David
|
r29744 | wlock = None | ||
Augie Fackler
|
r43347 | step = b'' | ||
Pierre-Yves David
|
r29744 | try: | ||
Pulkit Goyal
|
r42363 | # write the new names | ||
Pierre-Yves David
|
r29743 | if self._rbcnamescount < len(self._names): | ||
Pierre-Yves David
|
r29744 | wlock = repo.wlock(wait=False) | ||
Augie Fackler
|
r43347 | step = b' names' | ||
Pulkit Goyal
|
r42363 | self._writenames(repo) | ||
Mads Kiilerich
|
r23785 | |||
Pulkit Goyal
|
r42363 | # write the new revs | ||
Pierre-Yves David
|
r29743 | start = self._rbcrevslen * _rbcrecsize | ||
if start != len(self._rbcrevs): | ||||
Augie Fackler
|
r43347 | step = b'' | ||
Pierre-Yves David
|
r29744 | if wlock is None: | ||
wlock = repo.wlock(wait=False) | ||||
Pulkit Goyal
|
r42363 | self._writerevs(repo, start) | ||
Pierre-Yves David
|
r29745 | except (IOError, OSError, error.Abort, error.LockError) as inst: | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't write revision branch cache%s: %s\n" | ||
Augie Fackler
|
r43346 | % (step, stringutil.forcebytestr(inst)) | ||
) | ||||
Pierre-Yves David
|
r29744 | finally: | ||
if wlock is not None: | ||||
wlock.release() | ||||
Pulkit Goyal
|
r42363 | |||
def _writenames(self, repo): | ||||
Kyle Lippincott
|
r47856 | """write the new branch names to revbranchcache""" | ||
Pulkit Goyal
|
r42363 | if self._rbcnamescount != 0: | ||
Augie Fackler
|
r43347 | f = repo.cachevfs.open(_rbcnames, b'ab') | ||
Pulkit Goyal
|
r42363 | if f.tell() == self._rbcsnameslen: | ||
Augie Fackler
|
r43347 | f.write(b'\0') | ||
Pulkit Goyal
|
r42363 | else: | ||
f.close() | ||||
Augie Fackler
|
r43347 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) | ||
Pulkit Goyal
|
r42363 | self._rbcnamescount = 0 | ||
self._rbcrevslen = 0 | ||||
if self._rbcnamescount == 0: | ||||
# before rewriting names, make sure references are removed | ||||
repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | ||||
Augie Fackler
|
r43347 | f = repo.cachevfs.open(_rbcnames, b'wb') | ||
Augie Fackler
|
r43346 | f.write( | ||
Augie Fackler
|
r43347 | b'\0'.join( | ||
Augie Fackler
|
r43346 | encoding.fromlocal(b) | ||
for b in self._names[self._rbcnamescount :] | ||||
) | ||||
) | ||||
Pulkit Goyal
|
r42363 | self._rbcsnameslen = f.tell() | ||
f.close() | ||||
self._rbcnamescount = len(self._names) | ||||
def _writerevs(self, repo, start): | ||||
Kyle Lippincott
|
r47856 | """write the new revs to revbranchcache""" | ||
Pulkit Goyal
|
r42364 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) | ||
Augie Fackler
|
r43347 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: | ||
Pulkit Goyal
|
r42363 | if f.tell() != start: | ||
Augie Fackler
|
r43347 | repo.ui.debug( | ||
b"truncating cache/%s to %d\n" % (_rbcrevs, start) | ||||
) | ||||
Pulkit Goyal
|
r42363 | f.seek(start) | ||
Pulkit Goyal
|
r42364 | if f.tell() != start: | ||
start = 0 | ||||
f.seek(start) | ||||
f.truncate() | ||||
end = revs * _rbcrecsize | ||||
Arseniy Alekseyev
|
r52268 | f.write(self._rbcrevs.slice(start, end)) | ||
Pulkit Goyal
|
r42363 | self._rbcrevslen = revs | ||