##// END OF EJS Templates
manifest: avoid corruption by dropping removed files with pure (issue5801)...
manifest: avoid corruption by dropping removed files with pure (issue5801) Previously, removed files would simply be marked by overwriting the first byte with NUL and dropping their entry in `self.position`. But no effort was made to ignore them when compacting the dictionary into text form. This allowed them to slip into the manifest revision, since the code seems to be trying to minimize the string operations by copying as large a chunk as possible. As part of this, compact() walks the existing text based on entries in the `positions` list, and consumed everything up to the next position entry. This typically resulted in a ValueError complaining about unsorted manifest entries. Sometimes it seems that files do get dropped in large repos- it seems to correspond to there being a new entry that would take the same slot. A much more trivial problem is that if the only changes were removals, `_compact()` didn't even run because `__delitem__` doesn't add anything to `self.extradata`. Now there's an explicit variable to flag this, both to allow `_compact()` to run, and to avoid searching the manifest in cases where there are no removals. In practice, this behavior was mostly obscured by the check in fastdelta() which takes a different path that explicitly drops removed files if there are fewer than 1000 changes. However, timeless has a repo where after rebasing tens of commits, a totally different path[1] is taken that bypasses the change count check and hits this problem. [1] https://www.mercurial-scm.org/repo/hg/file/2338bdea4474/mercurial/manifest.py#l1511

File last commit:

r40646:13d4ad8d default
r42569:0546ead3 stable
Show More
metadatastore.py
156 lines | 5.2 KiB | text/x-python | PythonLexer
from __future__ import absolute_import
from mercurial.node import hex, nullid
from . import (
basestore,
shallowutil,
)
class unionmetadatastore(basestore.baseunionstore):
def __init__(self, *args, **kwargs):
super(unionmetadatastore, self).__init__(*args, **kwargs)
self.stores = args
self.writestore = kwargs.get(r'writestore')
# If allowincomplete==True then the union store can return partial
# ancestor lists, otherwise it will throw a KeyError if a full
# history can't be found.
self.allowincomplete = kwargs.get(r'allowincomplete', False)
def getancestors(self, name, node, known=None):
"""Returns as many ancestors as we're aware of.
return value: {
node: (p1, p2, linknode, copyfrom),
...
}
"""
if known is None:
known = set()
if node in known:
return []
ancestors = {}
def traverse(curname, curnode):
# TODO: this algorithm has the potential to traverse parts of
# history twice. Ex: with A->B->C->F and A->B->D->F, both D and C
# may be queued as missing, then B and A are traversed for both.
queue = [(curname, curnode)]
missing = []
seen = set()
while queue:
name, node = queue.pop()
if (name, node) in seen:
continue
seen.add((name, node))
value = ancestors.get(node)
if not value:
missing.append((name, node))
continue
p1, p2, linknode, copyfrom = value
if p1 != nullid and p1 not in known:
queue.append((copyfrom or curname, p1))
if p2 != nullid and p2 not in known:
queue.append((curname, p2))
return missing
missing = [(name, node)]
while missing:
curname, curnode = missing.pop()
try:
ancestors.update(self._getpartialancestors(curname, curnode,
known=known))
newmissing = traverse(curname, curnode)
missing.extend(newmissing)
except KeyError:
# If we allow incomplete histories, don't throw.
if not self.allowincomplete:
raise
# If the requested name+node doesn't exist, always throw.
if (curname, curnode) == (name, node):
raise
# TODO: ancestors should probably be (name, node) -> (value)
return ancestors
@basestore.baseunionstore.retriable
def _getpartialancestors(self, name, node, known=None):
for store in self.stores:
try:
return store.getancestors(name, node, known=known)
except KeyError:
pass
raise KeyError((name, hex(node)))
@basestore.baseunionstore.retriable
def getnodeinfo(self, name, node):
for store in self.stores:
try:
return store.getnodeinfo(name, node)
except KeyError:
pass
raise KeyError((name, hex(node)))
def add(self, name, node, data):
raise RuntimeError("cannot add content only to remotefilelog "
"contentstore")
def getmissing(self, keys):
missing = keys
for store in self.stores:
if missing:
missing = store.getmissing(missing)
return missing
def markledger(self, ledger, options=None):
for store in self.stores:
store.markledger(ledger, options)
def getmetrics(self):
metrics = [s.getmetrics() for s in self.stores]
return shallowutil.sumdicts(*metrics)
class remotefilelogmetadatastore(basestore.basestore):
def getancestors(self, name, node, known=None):
"""Returns as many ancestors as we're aware of.
return value: {
node: (p1, p2, linknode, copyfrom),
...
}
"""
data = self._getdata(name, node)
ancestors = shallowutil.ancestormap(data)
return ancestors
def getnodeinfo(self, name, node):
return self.getancestors(name, node)[node]
def add(self, name, node, parents, linknode):
raise RuntimeError("cannot add metadata only to remotefilelog "
"metadatastore")
class remotemetadatastore(object):
def __init__(self, ui, fileservice, shared):
self._fileservice = fileservice
self._shared = shared
def getancestors(self, name, node, known=None):
self._fileservice.prefetch([(name, hex(node))], force=True,
fetchdata=False, fetchhistory=True)
return self._shared.getancestors(name, node, known=known)
def getnodeinfo(self, name, node):
return self.getancestors(name, node)[node]
def add(self, name, node, data):
raise RuntimeError("cannot add to a remote store")
def getmissing(self, keys):
return keys
def markledger(self, ledger, options=None):
pass