remotefilelog.py
516 lines
| 15.6 KiB
| text/x-python
|
PythonLexer
Augie Fackler
|
r40530 | # remotefilelog.py - filelog implementation where filelog history is stored | ||
# remotely | ||||
# | ||||
# Copyright 2013 Facebook, Inc. | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
from __future__ import absolute_import | ||||
import collections | ||||
import os | ||||
Augie Fackler
|
r42264 | from mercurial.node import ( | ||
bin, | ||||
nullid, | ||||
wdirfilenodeids, | ||||
wdirid, | ||||
) | ||||
Augie Fackler
|
r40530 | from mercurial.i18n import _ | ||
from mercurial import ( | ||||
ancestor, | ||||
error, | ||||
mdiff, | ||||
Gregory Szorc
|
r43375 | pycompat, | ||
Augie Fackler
|
r40530 | revlog, | ||
r43263 | util, | |||
Augie Fackler
|
r40530 | ) | ||
from mercurial.utils import storageutil | ||||
r43141 | from mercurial.revlogutils import flagutil | |||
Augie Fackler
|
r40530 | |||
from . import ( | ||||
constants, | ||||
fileserverclient, | ||||
shallowutil, | ||||
) | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r40530 | class remotefilelognodemap(object): | ||
def __init__(self, filename, store): | ||||
self._filename = filename | ||||
self._store = store | ||||
def __contains__(self, node): | ||||
missing = self._store.getmissing([(self._filename, node)]) | ||||
return not bool(missing) | ||||
def __get__(self, node): | ||||
if node not in self: | ||||
raise KeyError(node) | ||||
return node | ||||
Augie Fackler
|
r43346 | |||
r43265 | class remotefilelog(object): | |||
Augie Fackler
|
r40530 | |||
_generaldelta = True | ||||
r43264 | _flagserrorclass = error.RevlogError | |||
Augie Fackler
|
r40530 | |||
def __init__(self, opener, path, repo): | ||||
self.opener = opener | ||||
self.filename = path | ||||
self.repo = repo | ||||
self.nodemap = remotefilelognodemap(self.filename, repo.contentstore) | ||||
self.version = 1 | ||||
r43141 | self._flagprocessors = dict(flagutil.flagprocessors) | |||
Augie Fackler
|
r40530 | def read(self, node): | ||
"""returns the file contents at this node""" | ||||
t = self.revision(node) | ||||
Augie Fackler
|
r43347 | if not t.startswith(b'\1\n'): | ||
Augie Fackler
|
r40530 | return t | ||
Augie Fackler
|
r43347 | s = t.index(b'\1\n', 2) | ||
Augie Fackler
|
r43346 | return t[s + 2 :] | ||
Augie Fackler
|
r40530 | |||
def add(self, text, meta, transaction, linknode, p1=None, p2=None): | ||||
# hash with the metadata, like in vanilla filelogs | ||||
Augie Fackler
|
r43346 | hashtext = shallowutil.createrevlogtext( | ||
Augie Fackler
|
r43347 | text, meta.get(b'copy'), meta.get(b'copyrev') | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | node = storageutil.hashrevisionsha1(hashtext, p1, p2) | ||
Augie Fackler
|
r43346 | return self.addrevision( | ||
hashtext, transaction, linknode, p1, p2, node=node | ||||
) | ||||
Augie Fackler
|
r40530 | |||
def _createfileblob(self, text, meta, flags, p1, p2, node, linknode): | ||||
# text passed to "_createfileblob" does not include filelog metadata | ||||
header = shallowutil.buildfileblobheader(len(text), flags) | ||||
Augie Fackler
|
r43347 | data = b"%s\0%s" % (header, text) | ||
Augie Fackler
|
r40530 | |||
realp1 = p1 | ||||
Augie Fackler
|
r43347 | copyfrom = b"" | ||
if meta and b'copy' in meta: | ||||
copyfrom = meta[b'copy'] | ||||
realp1 = bin(meta[b'copyrev']) | ||||
Augie Fackler
|
r40530 | |||
Augie Fackler
|
r43347 | data += b"%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom) | ||
Augie Fackler
|
r40530 | |||
visited = set() | ||||
pancestors = {} | ||||
queue = [] | ||||
if realp1 != nullid: | ||||
p1flog = self | ||||
if copyfrom: | ||||
p1flog = remotefilelog(self.opener, copyfrom, self.repo) | ||||
pancestors.update(p1flog.ancestormap(realp1)) | ||||
queue.append(realp1) | ||||
visited.add(realp1) | ||||
if p2 != nullid: | ||||
pancestors.update(self.ancestormap(p2)) | ||||
queue.append(p2) | ||||
visited.add(p2) | ||||
Augie Fackler
|
r43347 | ancestortext = b"" | ||
Augie Fackler
|
r40530 | |||
# add the ancestors in topological order | ||||
while queue: | ||||
c = queue.pop(0) | ||||
pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c] | ||||
Augie Fackler
|
r43347 | pacopyfrom = pacopyfrom or b'' | ||
ancestortext += b"%s%s%s%s%s\0" % ( | ||||
Augie Fackler
|
r43346 | c, | ||
pa1, | ||||
pa2, | ||||
ancestorlinknode, | ||||
pacopyfrom, | ||||
) | ||||
Augie Fackler
|
r40530 | |||
if pa1 != nullid and pa1 not in visited: | ||||
queue.append(pa1) | ||||
visited.add(pa1) | ||||
if pa2 != nullid and pa2 not in visited: | ||||
queue.append(pa2) | ||||
visited.add(pa2) | ||||
data += ancestortext | ||||
return data | ||||
Augie Fackler
|
r43346 | def addrevision( | ||
self, | ||||
text, | ||||
transaction, | ||||
linknode, | ||||
p1, | ||||
p2, | ||||
cachedelta=None, | ||||
node=None, | ||||
flags=revlog.REVIDX_DEFAULT_FLAGS, | ||||
sidedata=None, | ||||
): | ||||
Augie Fackler
|
r40530 | # text passed to "addrevision" includes hg filelog metadata header | ||
if node is None: | ||||
node = storageutil.hashrevisionsha1(text, p1, p2) | ||||
meta, metaoffset = storageutil.parsemeta(text) | ||||
Augie Fackler
|
r43346 | rawtext, validatehash = flagutil.processflagswrite( | ||
Raphaël Gomès
|
r47443 | self, | ||
text, | ||||
flags, | ||||
Augie Fackler
|
r43346 | ) | ||
return self.addrawrevision( | ||||
rawtext, | ||||
transaction, | ||||
linknode, | ||||
p1, | ||||
p2, | ||||
node, | ||||
flags, | ||||
cachedelta, | ||||
_metatuple=(meta, metaoffset), | ||||
) | ||||
Augie Fackler
|
r40530 | |||
Augie Fackler
|
r43346 | def addrawrevision( | ||
self, | ||||
rawtext, | ||||
transaction, | ||||
linknode, | ||||
p1, | ||||
p2, | ||||
node, | ||||
flags, | ||||
cachedelta=None, | ||||
_metatuple=None, | ||||
): | ||||
Augie Fackler
|
r40530 | if _metatuple: | ||
# _metatuple: used by "addrevision" internally by remotefilelog | ||||
# meta was parsed confidently | ||||
meta, metaoffset = _metatuple | ||||
else: | ||||
# not from self.addrevision, but something else (repo._filecommit) | ||||
# calls addrawrevision directly. remotefilelog needs to get and | ||||
# strip filelog metadata. | ||||
# we don't have confidence about whether rawtext contains filelog | ||||
# metadata or not (flag processor could replace it), so we just | ||||
# parse it as best-effort. | ||||
# in LFS (flags != 0)'s case, the best way is to call LFS code to | ||||
# get the meta information, instead of storageutil.parsemeta. | ||||
meta, metaoffset = storageutil.parsemeta(rawtext) | ||||
if flags != 0: | ||||
# when flags != 0, be conservative and do not mangle rawtext, since | ||||
# a read flag processor expects the text not being mangled at all. | ||||
metaoffset = 0 | ||||
if metaoffset: | ||||
# remotefilelog fileblob stores copy metadata in its ancestortext, | ||||
# not its main blob. so we need to remove filelog metadata | ||||
# (containing copy information) from text. | ||||
blobtext = rawtext[metaoffset:] | ||||
else: | ||||
blobtext = rawtext | ||||
Augie Fackler
|
r43346 | data = self._createfileblob( | ||
blobtext, meta, flags, p1, p2, node, linknode | ||||
) | ||||
Augie Fackler
|
r40530 | self.repo.contentstore.addremotefilelognode(self.filename, node, data) | ||
return node | ||||
def renamed(self, node): | ||||
ancestors = self.repo.metadatastore.getancestors(self.filename, node) | ||||
p1, p2, linknode, copyfrom = ancestors[node] | ||||
if copyfrom: | ||||
return (copyfrom, p1) | ||||
return False | ||||
def size(self, node): | ||||
"""return the size of a given revision""" | ||||
return len(self.read(node)) | ||||
rawsize = size | ||||
def cmp(self, node, text): | ||||
"""compare text with a given file revision | ||||
returns True if text is different than what is stored. | ||||
""" | ||||
if node == nullid: | ||||
return True | ||||
nodetext = self.read(node) | ||||
return nodetext != text | ||||
def __nonzero__(self): | ||||
return True | ||||
Augie Fackler
|
r41287 | __bool__ = __nonzero__ | ||
Augie Fackler
|
r40530 | def __len__(self): | ||
Augie Fackler
|
r43347 | if self.filename == b'.hgtags': | ||
Augie Fackler
|
r40530 | # The length of .hgtags is used to fast path tag checking. | ||
# remotefilelog doesn't support .hgtags since the entire .hgtags | ||||
# history is needed. Use the excludepattern setting to make | ||||
# .hgtags a normal filelog. | ||||
return 0 | ||||
Augie Fackler
|
r43347 | raise RuntimeError(b"len not supported") | ||
Augie Fackler
|
r40530 | |||
Augie Fackler
|
r45060 | def heads(self): | ||
# Fake heads of the filelog to satisfy hgweb. | ||||
return [] | ||||
Augie Fackler
|
r40530 | def empty(self): | ||
return False | ||||
def flags(self, node): | ||||
if isinstance(node, int): | ||||
raise error.ProgrammingError( | ||||
Augie Fackler
|
r43347 | b'remotefilelog does not accept integer rev for flags' | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | store = self.repo.contentstore | ||
return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) | ||||
def parents(self, node): | ||||
if node == nullid: | ||||
return nullid, nullid | ||||
ancestormap = self.repo.metadatastore.getancestors(self.filename, node) | ||||
p1, p2, linknode, copyfrom = ancestormap[node] | ||||
if copyfrom: | ||||
p1 = nullid | ||||
return p1, p2 | ||||
def parentrevs(self, rev): | ||||
# TODO(augie): this is a node and should be a rev, but for now | ||||
# nothing in core seems to actually break. | ||||
return self.parents(rev) | ||||
def linknode(self, node): | ||||
ancestormap = self.repo.metadatastore.getancestors(self.filename, node) | ||||
p1, p2, linknode, copyfrom = ancestormap[node] | ||||
return linknode | ||||
def linkrev(self, node): | ||||
return self.repo.unfiltered().changelog.rev(self.linknode(node)) | ||||
Augie Fackler
|
r43346 | def emitrevisions( | ||
self, | ||||
nodes, | ||||
nodesorder=None, | ||||
revisiondata=False, | ||||
assumehaveparentrevisions=False, | ||||
deltaprevious=False, | ||||
deltamode=None, | ||||
Raphaël Gomès
|
r47445 | sidedata_helpers=None, | ||
Augie Fackler
|
r43346 | ): | ||
Augie Fackler
|
r40530 | # we don't use any of these parameters here | ||
del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious | ||||
del deltamode | ||||
prevnode = None | ||||
for node in nodes: | ||||
p1, p2 = self.parents(node) | ||||
if prevnode is None: | ||||
basenode = prevnode = p1 | ||||
if basenode == node: | ||||
basenode = nullid | ||||
if basenode != nullid: | ||||
revision = None | ||||
delta = self.revdiff(basenode, node) | ||||
else: | ||||
r43039 | revision = self.rawdata(node) | |||
Augie Fackler
|
r40530 | delta = None | ||
yield revlog.revlogrevisiondelta( | ||||
node=node, | ||||
p1node=p1, | ||||
p2node=p2, | ||||
linknode=self.linknode(node), | ||||
basenode=basenode, | ||||
flags=self.flags(node), | ||||
baserevisionsize=None, | ||||
revision=revision, | ||||
delta=delta, | ||||
Raphaël Gomès
|
r47445 | # Sidedata is not supported yet | ||
sidedata=None, | ||||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | |||
def revdiff(self, node1, node2): | ||||
Augie Fackler
|
r43346 | return mdiff.textdiff(self.rawdata(node1), self.rawdata(node2)) | ||
Augie Fackler
|
r40530 | |||
def lookup(self, node): | ||||
if len(node) == 40: | ||||
node = bin(node) | ||||
if len(node) != 20: | ||||
Augie Fackler
|
r43346 | raise error.LookupError( | ||
Augie Fackler
|
r43347 | node, self.filename, _(b'invalid lookup input') | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | |||
return node | ||||
def rev(self, node): | ||||
# This is a hack to make TortoiseHG work. | ||||
return node | ||||
def node(self, rev): | ||||
# This is a hack. | ||||
if isinstance(rev, int): | ||||
raise error.ProgrammingError( | ||||
Augie Fackler
|
r43347 | b'remotefilelog does not convert integer rev to node' | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r40530 | return rev | ||
r43263 | def _processflags(self, text, flags, operation, raw=False): | |||
"""deprecated entry point to access flag processors""" | ||||
Augie Fackler
|
r43347 | msg = b'_processflag(...) use the specialized variant' | ||
util.nouideprecwarn(msg, b'5.2', stacklevel=2) | ||||
r43263 | if raw: | |||
return text, flagutil.processflagsraw(self, text, flags) | ||||
Augie Fackler
|
r43347 | elif operation == b'read': | ||
r43263 | return flagutil.processflagsread(self, text, flags) | |||
Augie Fackler
|
r43346 | else: # write operation | ||
r43263 | return flagutil.processflagswrite(self, text, flags) | |||
Augie Fackler
|
r40530 | def revision(self, node, raw=False): | ||
"""returns the revlog contents at this node. | ||||
this includes the meta data traditionally included in file revlogs. | ||||
this is generally only used for bundling and communicating with vanilla | ||||
hg clients. | ||||
""" | ||||
if node == nullid: | ||||
Augie Fackler
|
r43347 | return b"" | ||
Augie Fackler
|
r40530 | if len(node) != 20: | ||
Augie Fackler
|
r43346 | raise error.LookupError( | ||
Augie Fackler
|
r43347 | node, self.filename, _(b'invalid revision input') | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r42264 | if node == wdirid or node in wdirfilenodeids: | ||
raise error.WdirUnsupported | ||||
Augie Fackler
|
r40530 | |||
store = self.repo.contentstore | ||||
rawtext = store.get(self.filename, node) | ||||
if raw: | ||||
return rawtext | ||||
flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0) | ||||
if flags == 0: | ||||
return rawtext | ||||
r43261 | return flagutil.processflagsread(self, rawtext, flags)[0] | |||
Augie Fackler
|
r40530 | |||
r42951 | def rawdata(self, node): | |||
return self.revision(node, raw=False) | ||||
Augie Fackler
|
r40530 | def _read(self, id): | ||
"""reads the raw file blob from disk, cache, or server""" | ||||
fileservice = self.repo.fileservice | ||||
localcache = fileservice.localcache | ||||
Augie Fackler
|
r43346 | cachekey = fileserverclient.getcachekey( | ||
self.repo.name, self.filename, id | ||||
) | ||||
Augie Fackler
|
r40530 | try: | ||
return localcache.read(cachekey) | ||||
except KeyError: | ||||
pass | ||||
localkey = fileserverclient.getlocalkey(self.filename, id) | ||||
localpath = os.path.join(self.localpath, localkey) | ||||
try: | ||||
return shallowutil.readfile(localpath) | ||||
except IOError: | ||||
pass | ||||
fileservice.prefetch([(self.filename, id)]) | ||||
try: | ||||
return localcache.read(cachekey) | ||||
except KeyError: | ||||
pass | ||||
Augie Fackler
|
r43347 | raise error.LookupError(id, self.filename, _(b'no node')) | ||
Augie Fackler
|
r40530 | |||
def ancestormap(self, node): | ||||
return self.repo.metadatastore.getancestors(self.filename, node) | ||||
def ancestor(self, a, b): | ||||
if a == nullid or b == nullid: | ||||
return nullid | ||||
revmap, parentfunc = self._buildrevgraph(a, b) | ||||
Augie Fackler
|
r44937 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} | ||
Augie Fackler
|
r40530 | |||
ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b]) | ||||
if ancs: | ||||
# choose a consistent winner when there's a tie | ||||
return min(map(nodemap.__getitem__, ancs)) | ||||
return nullid | ||||
def commonancestorsheads(self, a, b): | ||||
"""calculate all the heads of the common ancestors of nodes a and b""" | ||||
if a == nullid or b == nullid: | ||||
return nullid | ||||
revmap, parentfunc = self._buildrevgraph(a, b) | ||||
Augie Fackler
|
r44937 | nodemap = {v: k for (k, v) in pycompat.iteritems(revmap)} | ||
Augie Fackler
|
r40530 | |||
ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b]) | ||||
return map(nodemap.__getitem__, ancs) | ||||
def _buildrevgraph(self, a, b): | ||||
"""Builds a numeric revision graph for the given two nodes. | ||||
Returns a node->rev map and a rev->[revs] parent function. | ||||
""" | ||||
amap = self.ancestormap(a) | ||||
bmap = self.ancestormap(b) | ||||
# Union the two maps | ||||
parentsmap = collections.defaultdict(list) | ||||
allparents = set() | ||||
for mapping in (amap, bmap): | ||||
Gregory Szorc
|
r43375 | for node, pdata in pycompat.iteritems(mapping): | ||
Augie Fackler
|
r40530 | parents = parentsmap[node] | ||
p1, p2, linknode, copyfrom = pdata | ||||
# Don't follow renames (copyfrom). | ||||
# remotefilectx.ancestor does that. | ||||
if p1 != nullid and not copyfrom: | ||||
parents.append(p1) | ||||
allparents.add(p1) | ||||
if p2 != nullid: | ||||
parents.append(p2) | ||||
allparents.add(p2) | ||||
# Breadth first traversal to build linkrev graph | ||||
parentrevs = collections.defaultdict(list) | ||||
revmap = {} | ||||
Augie Fackler
|
r43346 | queue = collections.deque( | ||
((None, n) for n in parentsmap if n not in allparents) | ||||
) | ||||
Augie Fackler
|
r40530 | while queue: | ||
prevrev, current = queue.pop() | ||||
if current in revmap: | ||||
if prevrev: | ||||
parentrevs[prevrev].append(revmap[current]) | ||||
continue | ||||
# Assign linkrevs in reverse order, so start at | ||||
# len(parentsmap) and work backwards. | ||||
currentrev = len(parentsmap) - len(revmap) - 1 | ||||
revmap[current] = currentrev | ||||
if prevrev: | ||||
parentrevs[prevrev].append(currentrev) | ||||
for parent in parentsmap.get(current): | ||||
queue.appendleft((currentrev, parent)) | ||||
return revmap, parentrevs.__getitem__ | ||||
def strip(self, minlink, transaction): | ||||
pass | ||||
# misc unused things | ||||
def files(self): | ||||
return [] | ||||
def checksize(self): | ||||
return 0, 0 | ||||