branchmap.py
1051 lines
| 37.3 KiB
| text/x-python
|
PythonLexer
/ mercurial / branchmap.py
Pierre-Yves David
|
r18116 | # branchmap.py - logic to computes, maintain and stores branchmap for local repo | ||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> | ||
Pierre-Yves David
|
r18116 | # | ||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
Pierre-Yves David
|
r18117 | |||
Gregory Szorc
|
r25918 | |||
import struct | ||||
from .node import ( | ||||
bin, | ||||
hex, | ||||
nullrev, | ||||
) | ||||
r52178 | ||||
from typing import ( | ||||
r52356 | Any, | |||
r52178 | Callable, | |||
Dict, | ||||
Iterable, | ||||
List, | ||||
Optional, | ||||
Set, | ||||
TYPE_CHECKING, | ||||
Tuple, | ||||
Union, | ||||
) | ||||
Gregory Szorc
|
r25918 | from . import ( | ||
encoding, | ||||
Pierre-Yves David
|
r26587 | error, | ||
r49536 | obsolete, | |||
Gregory Szorc
|
r25918 | scmutil, | ||
Simon Farnsworth
|
r30975 | util, | ||
Gregory Szorc
|
r25918 | ) | ||
r52178 | ||||
Yuya Nishihara
|
r37102 | from .utils import ( | ||
r42314 | repoviewutil, | |||
Yuya Nishihara
|
r37102 | stringutil, | ||
) | ||||
Gregory Szorc
|
r25918 | |||
r52178 | if TYPE_CHECKING: | |||
Matt Harbison
|
r47552 | from . import localrepo | ||
Augie Fackler
|
r44035 | |||
r52178 | assert [localrepo] | |||
Augie Fackler
|
r44035 | |||
Augie Fackler
|
r43346 | subsettable = repoviewutil.subsettable | ||
r42314 | ||||
Gregory Szorc
|
r25918 | calcsize = struct.calcsize | ||
Mads Kiilerich
|
r31370 | pack_into = struct.pack_into | ||
unpack_from = struct.unpack_from | ||||
Pierre-Yves David
|
r18117 | |||
Pierre-Yves David
|
r18118 | |||
Gregory Szorc
|
r49801 | class BranchMapCache: | ||
Pulkit Goyal
|
r41867 | """mapping of filtered views of repo with their branchcache""" | ||
Augie Fackler
|
r43346 | |||
Martijn Pieters
|
r41764 | def __init__(self): | ||
self._per_filter = {} | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | def __getitem__(self, repo): | ||
self.updatecache(repo) | ||||
r52342 | bcache = self._per_filter[repo.filtername] | |||
r52344 | assert bcache._filtername == repo.filtername, ( | |||
bcache._filtername, | ||||
r52343 | repo.filtername, | |||
) | ||||
r52342 | return bcache | |||
def update_disk(self, repo): | ||||
"""ensure and up-to-date cache is (or will be) written on disk | ||||
The cache for this repository view is updated if needed and written on | ||||
disk. | ||||
If a transaction is in progress, the writing is schedule to transaction | ||||
r52381 | close. See the `BranchMapCache.write_dirty` method. | |||
r52342 | ||||
This method exist independently of __getitem__ as it is sometime useful | ||||
to signal that we have no intend to use the data in memory yet. | ||||
""" | ||||
self.updatecache(repo) | ||||
bcache = self._per_filter[repo.filtername] | ||||
r52344 | assert bcache._filtername == repo.filtername, ( | |||
bcache._filtername, | ||||
r52343 | repo.filtername, | |||
) | ||||
r52342 | bcache.write(repo) | |||
Martijn Pieters
|
r41764 | |||
def updatecache(self, repo): | ||||
"""Update the cache for the given filtered view on a repository""" | ||||
# This can trigger updates for the caches for subsets of the filtered | ||||
# view, e.g. when there is no cache for this filtered view or the cache | ||||
# is stale. | ||||
Pierre-Yves David
|
r18121 | |||
Martijn Pieters
|
r41764 | cl = repo.changelog | ||
filtername = repo.filtername | ||||
bcache = self._per_filter.get(filtername) | ||||
if bcache is None or not bcache.validfor(repo): | ||||
# cache object missing or cache object stale? Read from disk | ||||
bcache = branchcache.fromfile(repo) | ||||
Martijn Pieters
|
r41708 | |||
Martijn Pieters
|
r41764 | revs = [] | ||
if bcache is None: | ||||
# no (fresh) cache available anymore, perhaps we can re-use | ||||
# the cache for a subset, then extend that to add info on missing | ||||
# revisions. | ||||
subsetname = subsettable.get(filtername) | ||||
if subsetname is not None: | ||||
subset = repo.filtered(subsetname) | ||||
r52343 | bcache = self[subset].copy(repo) | |||
Martijn Pieters
|
r41764 | extrarevs = subset.changelog.filteredrevs - cl.filteredrevs | ||
revs.extend(r for r in extrarevs if r <= bcache.tiprev) | ||||
else: | ||||
# nothing to fall back on, start empty. | ||||
Joerg Sonnenberger
|
r47538 | bcache = branchcache(repo) | ||
Durham Goode
|
r24373 | |||
Martijn Pieters
|
r41764 | revs.extend(cl.revs(start=bcache.tiprev + 1)) | ||
if revs: | ||||
bcache.update(repo, revs) | ||||
Pierre-Yves David
|
r18124 | |||
Martijn Pieters
|
r41764 | assert bcache.validfor(repo), filtername | ||
self._per_filter[repo.filtername] = bcache | ||||
def replace(self, repo, remotebranchmap): | ||||
"""Replace the branchmap cache for a repo with a branch mapping. | ||||
This is likely only called during clone with a branch map from a | ||||
remote. | ||||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | """ | ||
cl = repo.changelog | ||||
clrev = cl.rev | ||||
clbranchinfo = cl.branchinfo | ||||
rbheads = [] | ||||
Martin von Zweigbergk
|
r44086 | closed = set() | ||
Gregory Szorc
|
r49790 | for bheads in remotebranchmap.values(): | ||
Martijn Pieters
|
r41764 | rbheads += bheads | ||
for h in bheads: | ||||
r = clrev(h) | ||||
b, c = clbranchinfo(r) | ||||
if c: | ||||
Martin von Zweigbergk
|
r44086 | closed.add(h) | ||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | if rbheads: | ||
rtiprev = max((int(clrev(node)) for node in rbheads)) | ||||
cache = branchcache( | ||||
Joerg Sonnenberger
|
r47538 | repo, | ||
Augie Fackler
|
r43346 | remotebranchmap, | ||
repo[rtiprev].node(), | ||||
rtiprev, | ||||
Martin von Zweigbergk
|
r44086 | closednodes=closed, | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r26460 | |||
Martijn Pieters
|
r41764 | # Try to stick it as low as possible | ||
# filter above served are unlikely to be fetch from a clone | ||||
Augie Fackler
|
r43347 | for candidate in (b'base', b'immutable', b'served'): | ||
Martijn Pieters
|
r41764 | rview = repo.filtered(candidate) | ||
if cache.validfor(rview): | ||||
r52382 | cache._filtername = candidate | |||
self._per_filter[candidate] = cache | ||||
cache._dirty = True | ||||
Martijn Pieters
|
r41764 | cache.write(rview) | ||
return | ||||
def clear(self): | ||||
self._per_filter.clear() | ||||
r52381 | def write_dirty(self, repo): | |||
r49526 | unfi = repo.unfiltered() | |||
r52380 | for filtername in repoviewutil.get_ordered_subset(): | |||
cache = self._per_filter.get(filtername) | ||||
if cache is None: | ||||
continue | ||||
r52381 | if cache._dirty: | |||
r52361 | if filtername is None: | |||
repo = unfi | ||||
else: | ||||
repo = unfi.filtered(filtername) | ||||
r49526 | cache.write(repo) | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r42289 | def _unknownnode(node): | ||
Augie Fackler
|
r46554 | """raises ValueError when branchcache found a node which does not exists""" | ||
Manuel Jacob
|
r50195 | raise ValueError('node %s does not exist' % node.hex()) | ||
Gregory Szorc
|
r26460 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r42805 | def _branchcachedesc(repo): | ||
if repo.filtername is not None: | ||||
Augie Fackler
|
r43347 | return b'branch cache (%s)' % repo.filtername | ||
Martin von Zweigbergk
|
r42805 | else: | ||
Augie Fackler
|
r43347 | return b'branch cache' | ||
Martin von Zweigbergk
|
r42805 | |||
Augie Fackler
|
r43346 | |||
r52347 | class _BaseBranchCache: | |||
Brodie Rao
|
r20181 | """A dict like object that hold branches heads cache. | ||
This cache is used to avoid costly computations to determine all the | ||||
branch heads of a repo. | ||||
The cache is serialized on disk in the following format: | ||||
<tip hex node> <tip rev number> [optional filtered repo hex hash] | ||||
Brodie Rao
|
r20185 | <branch head hex node> <open/closed state> <branch name> | ||
<branch head hex node> <open/closed state> <branch name> | ||||
Brodie Rao
|
r20181 | ... | ||
The first line is used to check if the cache is still valid. If the | ||||
branch cache is for a filtered repo view, an optional third hash is | ||||
r49536 | included that hashes the hashes of all filtered and obsolete revisions. | |||
Brodie Rao
|
r20185 | |||
The open/closed state is represented by a single letter 'o' or 'c'. | ||||
This field can be used to avoid changelog reads when determining if a | ||||
branch head closes a branch or not. | ||||
Brodie Rao
|
r20181 | """ | ||
Pulkit Goyal
|
r41826 | |||
Augie Fackler
|
r43346 | def __init__( | ||
self, | ||||
r52180 | repo: "localrepo.localrepository", | |||
entries: Union[ | ||||
Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | ||||
] = (), | ||||
r52348 | closed_nodes: Optional[Set[bytes]] = None, | |||
r52180 | ) -> None: | |||
Augie Fackler
|
r46554 | """hasnode is a function which can be used to verify whether changelog | ||
Pulkit Goyal
|
r42174 | has a given node or not. If it's not provided, we assume that every node | ||
Augie Fackler
|
r46554 | we have exists in changelog""" | ||
Pulkit Goyal
|
r41826 | # closednodes is a set of nodes that close their branch. If the branch | ||
# cache has been updated, it may contain nodes that are no longer | ||||
# heads. | ||||
r52348 | if closed_nodes is None: | |||
closed_nodes = set() | ||||
self._closednodes = set(closed_nodes) | ||||
Pulkit Goyal
|
r42172 | self._entries = dict(entries) | ||
Pulkit Goyal
|
r42289 | |||
Pulkit Goyal
|
r42168 | def __iter__(self): | ||
Pulkit Goyal
|
r42172 | return iter(self._entries) | ||
Pulkit Goyal
|
r42168 | |||
def __setitem__(self, key, value): | ||||
Pulkit Goyal
|
r42172 | self._entries[key] = value | ||
Pulkit Goyal
|
r42168 | |||
def __getitem__(self, key): | ||||
Pulkit Goyal
|
r42172 | return self._entries[key] | ||
Pulkit Goyal
|
r42168 | |||
Pulkit Goyal
|
r42282 | def __contains__(self, key): | ||
return key in self._entries | ||||
Pulkit Goyal
|
r42168 | def iteritems(self): | ||
r52348 | return self._entries.items() | |||
Pulkit Goyal
|
r42168 | |||
Martin von Zweigbergk
|
r42809 | items = iteritems | ||
Pulkit Goyal
|
r42171 | def hasbranch(self, label): | ||
Kyle Lippincott
|
r47856 | """checks whether a branch of this name exists or not""" | ||
Pulkit Goyal
|
r42172 | return label in self._entries | ||
Pulkit Goyal
|
r42171 | |||
Brodie Rao
|
r20186 | def _branchtip(self, heads): | ||
Augie Fackler
|
r46554 | """Return tuple with last open head in heads and false, | ||
otherwise return last closed head and true.""" | ||||
Brodie Rao
|
r20186 | tip = heads[-1] | ||
closed = True | ||||
for h in reversed(heads): | ||||
if h not in self._closednodes: | ||||
tip = h | ||||
closed = False | ||||
break | ||||
return tip, closed | ||||
def branchtip(self, branch): | ||||
Augie Fackler
|
r46554 | """Return the tipmost open head on branch head, otherwise return the | ||
Mads Kiilerich
|
r20245 | tipmost closed head on branch. | ||
Augie Fackler
|
r46554 | Raise KeyError for unknown branch.""" | ||
Brodie Rao
|
r20186 | return self._branchtip(self[branch])[0] | ||
the31k
|
r34076 | def iteropen(self, nodes): | ||
return (n for n in nodes if n not in self._closednodes) | ||||
Brodie Rao
|
r20188 | def branchheads(self, branch, closed=False): | ||
Pulkit Goyal
|
r42281 | heads = self._entries[branch] | ||
Brodie Rao
|
r20188 | if not closed: | ||
the31k
|
r34076 | heads = list(self.iteropen(heads)) | ||
Brodie Rao
|
r20188 | return heads | ||
Brodie Rao
|
r20190 | def iterbranches(self): | ||
Gregory Szorc
|
r49768 | for bn, heads in self.items(): | ||
Brodie Rao
|
r20190 | yield (bn, heads) + self._branchtip(heads) | ||
Pulkit Goyal
|
r42169 | def iterheads(self): | ||
Kyle Lippincott
|
r47856 | """returns all the heads""" | ||
Gregory Szorc
|
r49790 | return self._entries.values() | ||
Pulkit Goyal
|
r42169 | |||
Pierre-Yves David
|
r18305 | def update(self, repo, revgen): | ||
Pierre-Yves David
|
r18131 | """Given a branchhead cache, self, that may have extra nodes or be | ||
Pierre-Yves David
|
r20263 | missing heads, and a generator of nodes that are strictly a superset of | ||
Pierre-Yves David
|
r18131 | heads missing, this function updates self to be correct. | ||
""" | ||||
Simon Farnsworth
|
r30975 | starttime = util.timer() | ||
Pierre-Yves David
|
r18131 | cl = repo.changelog | ||
# collect new branch entries | ||||
newbranches = {} | ||||
Durham Goode
|
r24373 | getbranchinfo = repo.revbranchcache().branchinfo | ||
r52348 | max_rev = -1 | |||
Pierre-Yves David
|
r18307 | for r in revgen: | ||
Yuya Nishihara
|
r40455 | branch, closesbranch = getbranchinfo(r) | ||
Pierre-Yves David
|
r20262 | newbranches.setdefault(branch, []).append(r) | ||
Brodie Rao
|
r20185 | if closesbranch: | ||
Pierre-Yves David
|
r20262 | self._closednodes.add(cl.node(r)) | ||
r52348 | max_rev = max(max_rev, r) | |||
if max_rev < 0: | ||||
r52379 | msg = "running branchcache.update without revision to update" | |||
raise error.ProgrammingError(msg) | ||||
Pulkit Goyal
|
r42400 | |||
Joerg Sonnenberger
|
r46880 | # Delay fetching the topological heads until they are needed. | ||
# A repository without non-continous branches can skip this part. | ||||
topoheads = None | ||||
# If a changeset is visible, its parents must be visible too, so | ||||
# use the faster unfiltered parent accessor. | ||||
parentrevs = repo.unfiltered().changelog.parentrevs | ||||
r49536 | # Faster than using ctx.obsolete() | |||
obsrevs = obsolete.getrevs(repo, b'obsolete') | ||||
Gregory Szorc
|
r49768 | for branch, newheadrevs in newbranches.items(): | ||
Joerg Sonnenberger
|
r46880 | # For every branch, compute the new branchheads. | ||
# A branchhead is a revision such that no descendant is on | ||||
# the same branch. | ||||
# | ||||
# The branchheads are computed iteratively in revision order. | ||||
# This ensures topological order, i.e. parents are processed | ||||
# before their children. Ancestors are inclusive here, i.e. | ||||
# any revision is an ancestor of itself. | ||||
# | ||||
# Core observations: | ||||
# - The current revision is always a branchhead for the | ||||
# repository up to that point. | ||||
# - It is the first revision of the branch if and only if | ||||
# there was no branchhead before. In that case, it is the | ||||
# only branchhead as there are no possible ancestors on | ||||
# the same branch. | ||||
# - If a parent is on the same branch, a branchhead can | ||||
# only be an ancestor of that parent, if it is parent | ||||
# itself. Otherwise it would have been removed as ancestor | ||||
# of that parent before. | ||||
# - Therefore, if all parents are on the same branch, they | ||||
# can just be removed from the branchhead set. | ||||
# - If one parent is on the same branch and the other is not | ||||
# and there was exactly one branchhead known, the existing | ||||
# branchhead can only be an ancestor if it is the parent. | ||||
# Otherwise it would have been removed as ancestor of | ||||
# the parent before. The other parent therefore can't have | ||||
# a branchhead as ancestor. | ||||
# - In all other cases, the parents on different branches | ||||
# could have a branchhead as ancestor. Those parents are | ||||
# kept in the "uncertain" set. If all branchheads are also | ||||
# topological heads, they can't have descendants and further | ||||
# checks can be skipped. Otherwise, the ancestors of the | ||||
# "uncertain" set are removed from branchheads. | ||||
# This computation is heavy and avoided if at all possible. | ||||
r49567 | bheads = self._entries.get(branch, []) | |||
Augie Fackler
|
r44937 | bheadset = {cl.rev(node) for node in bheads} | ||
Joerg Sonnenberger
|
r46880 | uncertain = set() | ||
for newrev in sorted(newheadrevs): | ||||
r49536 | if newrev in obsrevs: | |||
# We ignore obsolete changesets as they shouldn't be | ||||
# considered heads. | ||||
continue | ||||
Joerg Sonnenberger
|
r46880 | if not bheadset: | ||
bheadset.add(newrev) | ||||
continue | ||||
Pierre-Yves David
|
r18131 | |||
Joerg Sonnenberger
|
r46880 | parents = [p for p in parentrevs(newrev) if p != nullrev] | ||
samebranch = set() | ||||
otherbranch = set() | ||||
r49536 | obsparents = set() | |||
Joerg Sonnenberger
|
r46880 | for p in parents: | ||
r49536 | if p in obsrevs: | |||
# We ignored this obsolete changeset earlier, but now | ||||
# that it has non-ignored children, we need to make | ||||
# sure their ancestors are not considered heads. To | ||||
# achieve that, we will simply treat this obsolete | ||||
# changeset as a parent from other branch. | ||||
obsparents.add(p) | ||||
elif p in bheadset or getbranchinfo(p)[0] == branch: | ||||
Joerg Sonnenberger
|
r46880 | samebranch.add(p) | ||
else: | ||||
otherbranch.add(p) | ||||
r49536 | if not (len(bheadset) == len(samebranch) == 1): | |||
Joerg Sonnenberger
|
r46880 | uncertain.update(otherbranch) | ||
r49536 | uncertain.update(obsparents) | |||
Joerg Sonnenberger
|
r46880 | bheadset.difference_update(samebranch) | ||
bheadset.add(newrev) | ||||
Pierre-Yves David
|
r22357 | if uncertain: | ||
Joerg Sonnenberger
|
r46880 | if topoheads is None: | ||
topoheads = set(cl.headrevs()) | ||||
if bheadset - topoheads: | ||||
floorrev = min(bheadset) | ||||
r49536 | if floorrev <= max(uncertain): | |||
ancestors = set(cl.ancestors(uncertain, floorrev)) | ||||
bheadset -= ancestors | ||||
r49567 | if bheadset: | |||
self[branch] = [cl.node(rev) for rev in sorted(bheadset)] | ||||
r52348 | ||||
duration = util.timer() - starttime | ||||
repo.ui.log( | ||||
b'branchcache', | ||||
b'updated %s in %.4f seconds\n', | ||||
_branchcachedesc(repo), | ||||
duration, | ||||
) | ||||
return max_rev | ||||
class branchcache(_BaseBranchCache): | ||||
"""Branchmap info for a local repo or repoview""" | ||||
r52353 | _base_filename = b"branch2" | |||
r52348 | def __init__( | |||
self, | ||||
repo: "localrepo.localrepository", | ||||
entries: Union[ | ||||
Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | ||||
] = (), | ||||
tipnode: Optional[bytes] = None, | ||||
tiprev: Optional[int] = nullrev, | ||||
filteredhash: Optional[bytes] = None, | ||||
closednodes: Optional[Set[bytes]] = None, | ||||
hasnode: Optional[Callable[[bytes], bool]] = None, | ||||
verify_node: bool = False, | ||||
) -> None: | ||||
"""hasnode is a function which can be used to verify whether changelog | ||||
has a given node or not. If it's not provided, we assume that every node | ||||
we have exists in changelog""" | ||||
self._filtername = repo.filtername | ||||
if tipnode is None: | ||||
self.tipnode = repo.nullid | ||||
else: | ||||
self.tipnode = tipnode | ||||
self.tiprev = tiprev | ||||
self.filteredhash = filteredhash | ||||
r52381 | self._dirty = False | |||
r52348 | ||||
super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) | ||||
# closednodes is a set of nodes that close their branch. If the branch | ||||
# cache has been updated, it may contain nodes that are no longer | ||||
# heads. | ||||
# Do we need to verify branch at all ? | ||||
self._verify_node = verify_node | ||||
# branches for which nodes are verified | ||||
self._verifiedbranches = set() | ||||
self._hasnode = None | ||||
if self._verify_node: | ||||
self._hasnode = repo.changelog.hasnode | ||||
def validfor(self, repo): | ||||
"""check that cache contents are valid for (a subset of) this repo | ||||
- False when the order of changesets changed or if we detect a strip. | ||||
- True when cache is up-to-date for the current repo or its subset.""" | ||||
try: | ||||
node = repo.changelog.node(self.tiprev) | ||||
except IndexError: | ||||
# changesets were stripped and now we don't even have enough to | ||||
# find tiprev | ||||
return False | ||||
if self.tipnode != node: | ||||
# tiprev doesn't correspond to tipnode: repo was stripped, or this | ||||
# repo has a different order of changesets | ||||
return False | ||||
tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) | ||||
# hashes don't match if this repo view has a different set of filtered | ||||
# revisions (e.g. due to phase changes) or obsolete revisions (e.g. | ||||
# history was rewritten) | ||||
return self.filteredhash == tiphash | ||||
@classmethod | ||||
def fromfile(cls, repo): | ||||
f = None | ||||
try: | ||||
f = repo.cachevfs(cls._filename(repo)) | ||||
lineiter = iter(f) | ||||
r52356 | init_kwargs = cls._load_header(repo, lineiter) | |||
r52348 | bcache = cls( | |||
repo, | ||||
verify_node=True, | ||||
r52356 | **init_kwargs, | |||
r52348 | ) | |||
if not bcache.validfor(repo): | ||||
# invalidate the cache | ||||
raise ValueError('tip differs') | ||||
r52354 | bcache._load_heads(repo, lineiter) | |||
r52348 | except (IOError, OSError): | |||
return None | ||||
except Exception as inst: | ||||
if repo.ui.debugflag: | ||||
msg = b'invalid %s: %s\n' | ||||
r52355 | msg %= ( | |||
_branchcachedesc(repo), | ||||
stringutil.forcebytestr(inst), | ||||
r52348 | ) | |||
r52355 | repo.ui.debug(msg) | |||
r52348 | bcache = None | |||
finally: | ||||
if f: | ||||
f.close() | ||||
return bcache | ||||
r52356 | @classmethod | |||
def _load_header(cls, repo, lineiter) -> "dict[str, Any]": | ||||
"""parse the head of a branchmap file | ||||
return parameters to pass to a newly created class instance. | ||||
""" | ||||
cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | ||||
last, lrev = cachekey[:2] | ||||
last, lrev = bin(last), int(lrev) | ||||
filteredhash = None | ||||
if len(cachekey) > 2: | ||||
filteredhash = bin(cachekey[2]) | ||||
return { | ||||
"tipnode": last, | ||||
"tiprev": lrev, | ||||
"filteredhash": filteredhash, | ||||
} | ||||
r52354 | def _load_heads(self, repo, lineiter): | |||
r52348 | """fully loads the branchcache by reading from the file using the line | |||
iterator passed""" | ||||
for line in lineiter: | ||||
line = line.rstrip(b'\n') | ||||
if not line: | ||||
continue | ||||
node, state, label = line.split(b" ", 2) | ||||
if state not in b'oc': | ||||
raise ValueError('invalid branch state') | ||||
label = encoding.tolocal(label.strip()) | ||||
node = bin(node) | ||||
self._entries.setdefault(label, []).append(node) | ||||
if state == b'c': | ||||
self._closednodes.add(node) | ||||
Pulkit Goyal
|
r42400 | |||
r52353 | @classmethod | |||
def _filename(cls, repo): | ||||
r52348 | """name of a branchcache file for a given repo or repoview""" | |||
r52353 | filename = cls._base_filename | |||
r52348 | if repo.filtername: | |||
filename = b'%s-%s' % (filename, repo.filtername) | ||||
return filename | ||||
def copy(self, repo): | ||||
"""return a deep copy of the branchcache object""" | ||||
r52383 | assert repo.filtername != self._filtername | |||
r52348 | other = type(self)( | |||
repo=repo, | ||||
# we always do a shally copy of self._entries, and the values is | ||||
# always replaced, so no need to deepcopy until the above remains | ||||
# true. | ||||
entries=self._entries, | ||||
tipnode=self.tipnode, | ||||
tiprev=self.tiprev, | ||||
filteredhash=self.filteredhash, | ||||
closednodes=set(self._closednodes), | ||||
verify_node=self._verify_node, | ||||
) | ||||
# we copy will likely schedule a write anyway, but that does not seems | ||||
# to hurt to overschedule | ||||
r52381 | other._dirty = self._dirty | |||
r52348 | # also copy information about the current verification state | |||
other._verifiedbranches = set(self._verifiedbranches) | ||||
return other | ||||
def write(self, repo): | ||||
assert self._filtername == repo.filtername, ( | ||||
self._filtername, | ||||
repo.filtername, | ||||
) | ||||
tr = repo.currenttransaction() | ||||
if not getattr(tr, 'finalized', True): | ||||
# Avoid premature writing. | ||||
# | ||||
# (The cache warming setup by localrepo will update the file later.) | ||||
return | ||||
try: | ||||
filename = self._filename(repo) | ||||
with repo.cachevfs(filename, b"w", atomictemp=True) as f: | ||||
r52358 | self._write_header(f) | |||
r52357 | nodecount = self._write_heads(f) | |||
r52348 | repo.ui.log( | |||
b'branchcache', | ||||
b'wrote %s with %d labels and %d nodes\n', | ||||
_branchcachedesc(repo), | ||||
len(self._entries), | ||||
nodecount, | ||||
) | ||||
r52381 | self._dirty = False | |||
r52348 | except (IOError, OSError, error.Abort) as inst: | |||
# Abort may be raised by read only opener, so log and continue | ||||
repo.ui.debug( | ||||
b"couldn't write branch cache: %s\n" | ||||
% stringutil.forcebytestr(inst) | ||||
) | ||||
r52358 | def _write_header(self, fp) -> None: | |||
"""write the branch cache header to a file""" | ||||
cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | ||||
if self.filteredhash is not None: | ||||
cachekey.append(hex(self.filteredhash)) | ||||
fp.write(b" ".join(cachekey) + b'\n') | ||||
r52357 | def _write_heads(self, fp) -> int: | |||
"""write list of heads to a file | ||||
Return the number of heads written.""" | ||||
nodecount = 0 | ||||
for label, nodes in sorted(self._entries.items()): | ||||
label = encoding.fromlocal(label) | ||||
for node in nodes: | ||||
nodecount += 1 | ||||
if node in self._closednodes: | ||||
state = b'c' | ||||
else: | ||||
state = b'o' | ||||
fp.write(b"%s %s %s\n" % (hex(node), state, label)) | ||||
return nodecount | ||||
r52348 | def _verifybranch(self, branch): | |||
"""verify head nodes for the given branch.""" | ||||
if not self._verify_node: | ||||
return | ||||
if branch not in self._entries or branch in self._verifiedbranches: | ||||
return | ||||
assert self._hasnode is not None | ||||
for n in self._entries[branch]: | ||||
if not self._hasnode(n): | ||||
_unknownnode(n) | ||||
self._verifiedbranches.add(branch) | ||||
def _verifyall(self): | ||||
"""verifies nodes of all the branches""" | ||||
for b in self._entries.keys(): | ||||
if b not in self._verifiedbranches: | ||||
self._verifybranch(b) | ||||
def __getitem__(self, key): | ||||
self._verifybranch(key) | ||||
return super().__getitem__(key) | ||||
def __contains__(self, key): | ||||
self._verifybranch(key) | ||||
return super().__contains__(key) | ||||
def iteritems(self): | ||||
self._verifyall() | ||||
return super().iteritems() | ||||
items = iteritems | ||||
def iterheads(self): | ||||
"""returns all the heads""" | ||||
self._verifyall() | ||||
return super().iterheads() | ||||
def hasbranch(self, label): | ||||
"""checks whether a branch of this name exists or not""" | ||||
self._verifybranch(label) | ||||
return super().hasbranch(label) | ||||
def branchheads(self, branch, closed=False): | ||||
self._verifybranch(branch) | ||||
return super().branchheads(branch, closed=closed) | ||||
def update(self, repo, revgen): | ||||
assert self._filtername == repo.filtername, ( | ||||
self._filtername, | ||||
repo.filtername, | ||||
) | ||||
cl = repo.changelog | ||||
max_rev = super().update(repo, revgen) | ||||
# new tip revision which we found after iterating items from new | ||||
# branches | ||||
if max_rev is not None and max_rev > self.tiprev: | ||||
self.tiprev = max_rev | ||||
self.tipnode = cl.node(max_rev) | ||||
Pierre-Yves David
|
r18131 | |||
Pierre-Yves David
|
r19838 | if not self.validfor(repo): | ||
r49536 | # old cache key is now invalid for the repo, but we've just updated | |||
# the cache and we assume it's valid, so let's make the cache key | ||||
# valid as well by recomputing it from the cached data | ||||
Joerg Sonnenberger
|
r47771 | self.tipnode = repo.nullid | ||
Pierre-Yves David
|
r18131 | self.tiprev = nullrev | ||
Pulkit Goyal
|
r42169 | for heads in self.iterheads(): | ||
r49536 | if not heads: | |||
# all revisions on a branch are obsolete | ||||
continue | ||||
# note: tiprev is not necessarily the tip revision of repo, | ||||
# because the tip could be obsolete (i.e. not a head) | ||||
Pierre-Yves David
|
r18131 | tiprev = max(cl.rev(node) for node in heads) | ||
if tiprev > self.tiprev: | ||||
self.tipnode = cl.node(tiprev) | ||||
self.tiprev = tiprev | ||||
r49536 | self.filteredhash = scmutil.filteredhash( | |||
repo, self.tiprev, needobsolete=True | ||||
) | ||||
r52381 | self._dirty = True | |||
Martijn Pieters
|
r41707 | self.write(repo) | ||
r52347 | class remotebranchcache(_BaseBranchCache): | |||
"""Branchmap info for a remote connection, should not write locally""" | ||||
Martijn Pieters
|
r41707 | |||
r52348 | def __init__( | |||
self, | ||||
repo: "localrepo.localrepository", | ||||
entries: Union[ | ||||
Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | ||||
] = (), | ||||
closednodes: Optional[Set[bytes]] = None, | ||||
) -> None: | ||||
super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) | ||||
Martijn Pieters
|
r41707 | |||
Mads Kiilerich
|
r23785 | # Revision branch info cache | ||
Augie Fackler
|
r43347 | _rbcversion = b'-v1' | ||
_rbcnames = b'rbc-names' + _rbcversion | ||||
_rbcrevs = b'rbc-revs' + _rbcversion | ||||
Mads Kiilerich
|
r23785 | # [4 byte hash prefix][4 byte branch name number with sign bit indicating open] | ||
Augie Fackler
|
r43347 | _rbcrecfmt = b'>4sI' | ||
Mads Kiilerich
|
r23785 | _rbcrecsize = calcsize(_rbcrecfmt) | ||
Joerg Sonnenberger
|
r47069 | _rbcmininc = 64 * _rbcrecsize | ||
Mads Kiilerich
|
r23785 | _rbcnodelen = 4 | ||
Augie Fackler
|
r43346 | _rbcbranchidxmask = 0x7FFFFFFF | ||
Mads Kiilerich
|
r23785 | _rbccloseflag = 0x80000000 | ||
Augie Fackler
|
r43346 | |||
Arseniy Alekseyev
|
r52268 | class rbcrevs: | ||
"""a byte string consisting of an immutable prefix followed by a mutable suffix""" | ||||
def __init__(self, revs): | ||||
self._prefix = revs | ||||
self._rest = bytearray() | ||||
def __len__(self): | ||||
return len(self._prefix) + len(self._rest) | ||||
def unpack_record(self, rbcrevidx): | ||||
if rbcrevidx < len(self._prefix): | ||||
return unpack_from(_rbcrecfmt, util.buffer(self._prefix), rbcrevidx) | ||||
else: | ||||
return unpack_from( | ||||
_rbcrecfmt, | ||||
util.buffer(self._rest), | ||||
rbcrevidx - len(self._prefix), | ||||
) | ||||
def make_mutable(self): | ||||
if len(self._prefix) > 0: | ||||
entirety = bytearray() | ||||
entirety[:] = self._prefix | ||||
entirety.extend(self._rest) | ||||
self._rest = entirety | ||||
self._prefix = bytearray() | ||||
def truncate(self, pos): | ||||
self.make_mutable() | ||||
del self._rest[pos:] | ||||
def pack_into(self, rbcrevidx, node, branchidx): | ||||
if rbcrevidx < len(self._prefix): | ||||
self.make_mutable() | ||||
buf = self._rest | ||||
start_offset = rbcrevidx - len(self._prefix) | ||||
end_offset = start_offset + _rbcrecsize | ||||
if len(self._rest) < end_offset: | ||||
# bytearray doesn't allocate extra space at least in Python 3.7. | ||||
# When multiple changesets are added in a row, precise resize would | ||||
# result in quadratic complexity. Overallocate to compensate by | ||||
# using the classic doubling technique for dynamic arrays instead. | ||||
# If there was a gap in the map before, less space will be reserved. | ||||
self._rest.extend(b'\0' * end_offset) | ||||
return pack_into( | ||||
_rbcrecfmt, | ||||
buf, | ||||
start_offset, | ||||
node, | ||||
branchidx, | ||||
) | ||||
def extend(self, extension): | ||||
return self._rest.extend(extension) | ||||
def slice(self, begin, end): | ||||
if begin < len(self._prefix): | ||||
acc = bytearray() | ||||
acc[:] = self._prefix[begin:end] | ||||
acc.extend( | ||||
self._rest[begin - len(self._prefix) : end - len(self._prefix)] | ||||
) | ||||
return acc | ||||
return self._rest[begin - len(self._prefix) : end - len(self._prefix)] | ||||
Gregory Szorc
|
r49801 | class revbranchcache: | ||
Mads Kiilerich
|
r23785 | """Persistent cache, mapping from revision number to branch name and close. | ||
This is a low level cache, independent of filtering. | ||||
Branch names are stored in rbc-names in internal encoding separated by 0. | ||||
rbc-names is append-only, and each branch name is only stored once and will | ||||
thus have a unique index. | ||||
The branch info for each revision is stored in rbc-revs as constant size | ||||
records. The whole file is read into memory, but it is only 'parsed' on | ||||
demand. The file is usually append-only but will be truncated if repo | ||||
modification is detected. | ||||
The record for each revision contains the first 4 bytes of the | ||||
corresponding node hash, and the record is only used if it still matches. | ||||
Even a completely trashed rbc-revs fill thus still give the right result | ||||
while converging towards full recovery ... assuming no incorrectly matching | ||||
node hashes. | ||||
The record also contains 4 bytes where 31 bits contains the index of the | ||||
branch and the last bit indicate that it is a branch close commit. | ||||
The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i | ||||
and will grow with it but be 1/8th of its size. | ||||
""" | ||||
Mads Kiilerich
|
r24159 | def __init__(self, repo, readonly=True): | ||
Mads Kiilerich
|
r23785 | assert repo.filtername is None | ||
Durham Goode
|
r24374 | self._repo = repo | ||
Augie Fackler
|
r43346 | self._names = [] # branch names in local encoding with static index | ||
Arseniy Alekseyev
|
r52268 | self._rbcrevs = rbcrevs(bytearray()) | ||
Augie Fackler
|
r43346 | self._rbcsnameslen = 0 # length of names read at _rbcsnameslen | ||
Mads Kiilerich
|
r23785 | try: | ||
Boris Feld
|
r33535 | bndata = repo.cachevfs.read(_rbcnames) | ||
Augie Fackler
|
r43346 | self._rbcsnameslen = len(bndata) # for verification before writing | ||
Mads Kiilerich
|
r31371 | if bndata: | ||
Augie Fackler
|
r43346 | self._names = [ | ||
Augie Fackler
|
r43347 | encoding.tolocal(bn) for bn in bndata.split(b'\0') | ||
Augie Fackler
|
r43346 | ] | ||
Gregory Szorc
|
r29423 | except (IOError, OSError): | ||
Mads Kiilerich
|
r24159 | if readonly: | ||
# don't try to use cache - fall back to the slow path | ||||
self.branchinfo = self._branchinfo | ||||
Mads Kiilerich
|
r23785 | if self._names: | ||
try: | ||||
Arseniy Alekseyev
|
r52268 | if repo.ui.configbool(b'format', b'mmap-revbranchcache'): | ||
with repo.cachevfs(_rbcrevs) as fp: | ||||
data = util.buffer(util.mmapread(fp)) | ||||
else: | ||||
data = repo.cachevfs.read(_rbcrevs) | ||||
self._rbcrevs = rbcrevs(data) | ||||
Gregory Szorc
|
r25660 | except (IOError, OSError) as inst: | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't read revision branch cache: %s\n" | ||
Augie Fackler
|
r43346 | % stringutil.forcebytestr(inst) | ||
) | ||||
Mads Kiilerich
|
r23785 | # remember number of good records on disk | ||
Augie Fackler
|
r43346 | self._rbcrevslen = min( | ||
len(self._rbcrevs) // _rbcrecsize, len(repo.changelog) | ||||
) | ||||
Mads Kiilerich
|
r23785 | if self._rbcrevslen == 0: | ||
self._names = [] | ||||
Augie Fackler
|
r43346 | self._rbcnamescount = len(self._names) # number of names read at | ||
# _rbcsnameslen | ||||
Mads Kiilerich
|
r23785 | |||
Mads Kiilerich
|
r28558 | def _clear(self): | ||
self._rbcsnameslen = 0 | ||||
del self._names[:] | ||||
self._rbcnamescount = 0 | ||||
self._rbcrevslen = len(self._repo.changelog) | ||||
Arseniy Alekseyev
|
r52268 | self._rbcrevs = rbcrevs(bytearray(self._rbcrevslen * _rbcrecsize)) | ||
Augie Fackler
|
r43347 | util.clearcachedproperty(self, b'_namesreverse') | ||
Pulkit Goyal
|
r40746 | |||
@util.propertycache | ||||
def _namesreverse(self): | ||||
Augie Fackler
|
r44937 | return {b: r for r, b in enumerate(self._names)} | ||
Mads Kiilerich
|
r28558 | |||
Yuya Nishihara
|
r40455 | def branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Return branch name and close flag for rev, using and updating | ||
persistent cache.""" | ||||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Yuya Nishihara
|
r25266 | # avoid negative index, changelog.read(nullrev) is fast without cache | ||
if rev == nullrev: | ||||
return changelog.branchinfo(rev) | ||||
Mads Kiilerich
|
r29604 | # if requested rev isn't allocated, grow and cache the rev info | ||
Mads Kiilerich
|
r23785 | if len(self._rbcrevs) < rbcrevidx + _rbcrecsize: | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
# fast path: extract data from cache, use it if node is matching | ||||
reponode = changelog.node(rev)[:_rbcnodelen] | ||||
Arseniy Alekseyev
|
r52268 | cachenode, branchidx = self._rbcrevs.unpack_record(rbcrevidx) | ||
Mads Kiilerich
|
r23785 | close = bool(branchidx & _rbccloseflag) | ||
if close: | ||||
branchidx &= _rbcbranchidxmask | ||||
Augie Fackler
|
r43347 | if cachenode == b'\0\0\0\0': | ||
Durham Goode
|
r24376 | pass | ||
elif cachenode == reponode: | ||||
Mads Kiilerich
|
r29615 | try: | ||
Mads Kiilerich
|
r28558 | return self._names[branchidx], close | ||
Mads Kiilerich
|
r29615 | except IndexError: | ||
# recover from invalid reference to unknown branch | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"referenced branch names not found" | ||
b" - rebuilding revision branch cache from scratch\n" | ||||
Augie Fackler
|
r43346 | ) | ||
Mads Kiilerich
|
r29615 | self._clear() | ||
Durham Goode
|
r24376 | else: | ||
# rev/node map has changed, invalidate the cache from here up | ||||
Augie Fackler
|
r43346 | self._repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"history modification detected - truncating " | ||
b"revision branch cache to revision %d\n" % rev | ||||
Augie Fackler
|
r43346 | ) | ||
Durham Goode
|
r24376 | truncate = rbcrevidx + _rbcrecsize | ||
Arseniy Alekseyev
|
r52268 | self._rbcrevs.truncate(truncate) | ||
Durham Goode
|
r24376 | self._rbcrevslen = min(self._rbcrevslen, truncate) | ||
Mads Kiilerich
|
r23785 | # fall back to slow path and make sure it will be written to disk | ||
Yuya Nishihara
|
r40455 | return self._branchinfo(rev) | ||
Mads Kiilerich
|
r23785 | |||
Yuya Nishihara
|
r40455 | def _branchinfo(self, rev): | ||
Mads Kiilerich
|
r23785 | """Retrieve branch info from changelog and update _rbcrevs""" | ||
Yuya Nishihara
|
r40455 | changelog = self._repo.changelog | ||
Mads Kiilerich
|
r23785 | b, close = changelog.branchinfo(rev) | ||
if b in self._namesreverse: | ||||
branchidx = self._namesreverse[b] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(b) | ||||
self._namesreverse[b] = branchidx | ||||
reponode = changelog.node(rev) | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
Yuya Nishihara
|
r40455 | self._setcachedata(rev, reponode, branchidx) | ||
Durham Goode
|
r24375 | return b, close | ||
Joerg Sonnenberger
|
r47084 | def setdata(self, rev, changelogrevision): | ||
Boris Feld
|
r36980 | """add new data information to the cache""" | ||
Joerg Sonnenberger
|
r47084 | branch, close = changelogrevision.branchinfo | ||
Boris Feld
|
r36980 | if branch in self._namesreverse: | ||
branchidx = self._namesreverse[branch] | ||||
else: | ||||
branchidx = len(self._names) | ||||
self._names.append(branch) | ||||
self._namesreverse[branch] = branchidx | ||||
if close: | ||||
branchidx |= _rbccloseflag | ||||
Joerg Sonnenberger
|
r47084 | self._setcachedata(rev, self._repo.changelog.node(rev), branchidx) | ||
Boris Feld
|
r36980 | # If no cache data were readable (non exists, bad permission, etc) | ||
# the cache was bypassing itself by setting: | ||||
# | ||||
# self.branchinfo = self._branchinfo | ||||
# | ||||
# Since we now have data in the cache, we need to drop this bypassing. | ||||
Augie Fackler
|
r43906 | if 'branchinfo' in vars(self): | ||
Boris Feld
|
r36980 | del self.branchinfo | ||
Yuya Nishihara
|
r40455 | def _setcachedata(self, rev, node, branchidx): | ||
Durham Goode
|
r24375 | """Writes the node's branch data to the in-memory cache data.""" | ||
Durham Goode
|
r31454 | if rev == nullrev: | ||
return | ||||
Mads Kiilerich
|
r23785 | rbcrevidx = rev * _rbcrecsize | ||
Arseniy Alekseyev
|
r52268 | self._rbcrevs.pack_into(rbcrevidx, node, branchidx) | ||
Durham Goode
|
r24376 | self._rbcrevslen = min(self._rbcrevslen, rev) | ||
Mads Kiilerich
|
r23785 | |||
Durham Goode
|
r24377 | tr = self._repo.currenttransaction() | ||
if tr: | ||||
Augie Fackler
|
r43347 | tr.addfinalize(b'write-revbranchcache', self.write) | ||
Durham Goode
|
r24377 | |||
def write(self, tr=None): | ||||
Mads Kiilerich
|
r23785 | """Save branch cache if it is dirty.""" | ||
Durham Goode
|
r24374 | repo = self._repo | ||
Pierre-Yves David
|
r29744 | wlock = None | ||
Augie Fackler
|
r43347 | step = b'' | ||
Pierre-Yves David
|
r29744 | try: | ||
Pulkit Goyal
|
r42363 | # write the new names | ||
Pierre-Yves David
|
r29743 | if self._rbcnamescount < len(self._names): | ||
Pierre-Yves David
|
r29744 | wlock = repo.wlock(wait=False) | ||
Augie Fackler
|
r43347 | step = b' names' | ||
Pulkit Goyal
|
r42363 | self._writenames(repo) | ||
Mads Kiilerich
|
r23785 | |||
Pulkit Goyal
|
r42363 | # write the new revs | ||
Pierre-Yves David
|
r29743 | start = self._rbcrevslen * _rbcrecsize | ||
if start != len(self._rbcrevs): | ||||
Augie Fackler
|
r43347 | step = b'' | ||
Pierre-Yves David
|
r29744 | if wlock is None: | ||
wlock = repo.wlock(wait=False) | ||||
Pulkit Goyal
|
r42363 | self._writerevs(repo, start) | ||
Pierre-Yves David
|
r29745 | except (IOError, OSError, error.Abort, error.LockError) as inst: | ||
Augie Fackler
|
r43346 | repo.ui.debug( | ||
Augie Fackler
|
r43347 | b"couldn't write revision branch cache%s: %s\n" | ||
Augie Fackler
|
r43346 | % (step, stringutil.forcebytestr(inst)) | ||
) | ||||
Pierre-Yves David
|
r29744 | finally: | ||
if wlock is not None: | ||||
wlock.release() | ||||
Pulkit Goyal
|
r42363 | |||
def _writenames(self, repo): | ||||
Kyle Lippincott
|
r47856 | """write the new branch names to revbranchcache""" | ||
Pulkit Goyal
|
r42363 | if self._rbcnamescount != 0: | ||
Augie Fackler
|
r43347 | f = repo.cachevfs.open(_rbcnames, b'ab') | ||
Pulkit Goyal
|
r42363 | if f.tell() == self._rbcsnameslen: | ||
Augie Fackler
|
r43347 | f.write(b'\0') | ||
Pulkit Goyal
|
r42363 | else: | ||
f.close() | ||||
Augie Fackler
|
r43347 | repo.ui.debug(b"%s changed - rewriting it\n" % _rbcnames) | ||
Pulkit Goyal
|
r42363 | self._rbcnamescount = 0 | ||
self._rbcrevslen = 0 | ||||
if self._rbcnamescount == 0: | ||||
# before rewriting names, make sure references are removed | ||||
repo.cachevfs.unlinkpath(_rbcrevs, ignoremissing=True) | ||||
Augie Fackler
|
r43347 | f = repo.cachevfs.open(_rbcnames, b'wb') | ||
Augie Fackler
|
r43346 | f.write( | ||
Augie Fackler
|
r43347 | b'\0'.join( | ||
Augie Fackler
|
r43346 | encoding.fromlocal(b) | ||
for b in self._names[self._rbcnamescount :] | ||||
) | ||||
) | ||||
Pulkit Goyal
|
r42363 | self._rbcsnameslen = f.tell() | ||
f.close() | ||||
self._rbcnamescount = len(self._names) | ||||
def _writerevs(self, repo, start): | ||||
Kyle Lippincott
|
r47856 | """write the new revs to revbranchcache""" | ||
Pulkit Goyal
|
r42364 | revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize) | ||
Augie Fackler
|
r43347 | with repo.cachevfs.open(_rbcrevs, b'ab') as f: | ||
Pulkit Goyal
|
r42363 | if f.tell() != start: | ||
Augie Fackler
|
r43347 | repo.ui.debug( | ||
b"truncating cache/%s to %d\n" % (_rbcrevs, start) | ||||
) | ||||
Pulkit Goyal
|
r42363 | f.seek(start) | ||
Pulkit Goyal
|
r42364 | if f.tell() != start: | ||
start = 0 | ||||
f.seek(start) | ||||
f.truncate() | ||||
end = revs * _rbcrecsize | ||||
Arseniy Alekseyev
|
r52268 | f.write(self._rbcrevs.slice(start, end)) | ||
Pulkit Goyal
|
r42363 | self._rbcrevslen = revs | ||