##// END OF EJS Templates
tracked-key: remove the dual write and rename to tracked-hint...
tracked-key: remove the dual write and rename to tracked-hint The dual-write approach was mostly useless. As explained in the previous version of the help, the key had to be read twice before we could cache a value. However this "read twice" limitation actually also apply to any usage of the key. If some operation wants to rely of the "same value == same tracked set" property it would need to read the value before, and after running that operation (or at least, after, in all cases). So it cannot be sure the operation it did is "valid" until checking the key after the operation. As a resultat such operation can only be read-only or rollbackable. This reduce the utility of the "same value == same tracked set" a lot. So it seems simpler to drop the double write and to update the documentation to highlight that this file does not garantee race-free operation. As a result the "key" is demoted to a "hint". Documentation is updated accordingly. Differential Revision: https://phab.mercurial-scm.org/D12201

File last commit:

r47758:07b9ebea default
r49644:6e559391 default
Show More
metadatastore.py
167 lines | 5.2 KiB | text/x-python | PythonLexer
from __future__ import absolute_import
from mercurial.node import (
hex,
sha1nodeconstants,
)
from . import (
basestore,
shallowutil,
)
class unionmetadatastore(basestore.baseunionstore):
def __init__(self, *args, **kwargs):
super(unionmetadatastore, self).__init__(*args, **kwargs)
self.stores = args
self.writestore = kwargs.get('writestore')
# If allowincomplete==True then the union store can return partial
# ancestor lists, otherwise it will throw a KeyError if a full
# history can't be found.
self.allowincomplete = kwargs.get('allowincomplete', False)
def getancestors(self, name, node, known=None):
"""Returns as many ancestors as we're aware of.
return value: {
node: (p1, p2, linknode, copyfrom),
...
}
"""
if known is None:
known = set()
if node in known:
return []
ancestors = {}
def traverse(curname, curnode):
# TODO: this algorithm has the potential to traverse parts of
# history twice. Ex: with A->B->C->F and A->B->D->F, both D and C
# may be queued as missing, then B and A are traversed for both.
queue = [(curname, curnode)]
missing = []
seen = set()
while queue:
name, node = queue.pop()
if (name, node) in seen:
continue
seen.add((name, node))
value = ancestors.get(node)
if not value:
missing.append((name, node))
continue
p1, p2, linknode, copyfrom = value
if p1 != sha1nodeconstants.nullid and p1 not in known:
queue.append((copyfrom or curname, p1))
if p2 != sha1nodeconstants.nullid and p2 not in known:
queue.append((curname, p2))
return missing
missing = [(name, node)]
while missing:
curname, curnode = missing.pop()
try:
ancestors.update(
self._getpartialancestors(curname, curnode, known=known)
)
newmissing = traverse(curname, curnode)
missing.extend(newmissing)
except KeyError:
# If we allow incomplete histories, don't throw.
if not self.allowincomplete:
raise
# If the requested name+node doesn't exist, always throw.
if (curname, curnode) == (name, node):
raise
# TODO: ancestors should probably be (name, node) -> (value)
return ancestors
@basestore.baseunionstore.retriable
def _getpartialancestors(self, name, node, known=None):
for store in self.stores:
try:
return store.getancestors(name, node, known=known)
except KeyError:
pass
raise KeyError((name, hex(node)))
@basestore.baseunionstore.retriable
def getnodeinfo(self, name, node):
for store in self.stores:
try:
return store.getnodeinfo(name, node)
except KeyError:
pass
raise KeyError((name, hex(node)))
def add(self, name, node, data):
raise RuntimeError(
b"cannot add content only to remotefilelog contentstore"
)
def getmissing(self, keys):
missing = keys
for store in self.stores:
if missing:
missing = store.getmissing(missing)
return missing
def markledger(self, ledger, options=None):
for store in self.stores:
store.markledger(ledger, options)
def getmetrics(self):
metrics = [s.getmetrics() for s in self.stores]
return shallowutil.sumdicts(*metrics)
class remotefilelogmetadatastore(basestore.basestore):
def getancestors(self, name, node, known=None):
"""Returns as many ancestors as we're aware of.
return value: {
node: (p1, p2, linknode, copyfrom),
...
}
"""
data = self._getdata(name, node)
ancestors = shallowutil.ancestormap(data)
return ancestors
def getnodeinfo(self, name, node):
return self.getancestors(name, node)[node]
def add(self, name, node, parents, linknode):
raise RuntimeError(
b"cannot add metadata only to remotefilelog metadatastore"
)
class remotemetadatastore(object):
def __init__(self, ui, fileservice, shared):
self._fileservice = fileservice
self._shared = shared
def getancestors(self, name, node, known=None):
self._fileservice.prefetch(
[(name, hex(node))], force=True, fetchdata=False, fetchhistory=True
)
return self._shared.getancestors(name, node, known=known)
def getnodeinfo(self, name, node):
return self.getancestors(name, node)[node]
def add(self, name, node, data):
raise RuntimeError(b"cannot add to a remote store")
def getmissing(self, keys):
return keys
def markledger(self, ledger, options=None):
pass