sqlitestore.py
1293 lines
| 37.5 KiB
| text/x-python
|
PythonLexer
/ hgext / sqlitestore.py
Gregory Szorc
|
r40362 | # sqlitestore.py - Storage backend that uses SQLite | ||
# | ||||
# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> | ||||
# | ||||
# This software may be used and distributed according to the terms of the | ||||
# GNU General Public License version 2 or any later version. | ||||
"""store repository data in SQLite (EXPERIMENTAL) | ||||
The sqlitestore extension enables the storage of repository data in SQLite. | ||||
This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY | ||||
GUARANTEES. This means that repositories created with this extension may | ||||
only be usable with the exact version of this extension/Mercurial that was | ||||
used. The extension attempts to enforce this in order to prevent repository | ||||
corruption. | ||||
In addition, several features are not yet supported or have known bugs: | ||||
* Only some data is stored in SQLite. Changeset, manifest, and other repository | ||||
data is not yet stored in SQLite. | ||||
* Transactions are not robust. If the process is aborted at the right time | ||||
during transaction close/rollback, the repository could be in an inconsistent | ||||
state. This problem will diminish once all repository data is tracked by | ||||
SQLite. | ||||
* Bundle repositories do not work (the ability to use e.g. | ||||
`hg -R <bundle-file> log` to automatically overlay a bundle on top of the | ||||
existing repository). | ||||
* Various other features don't work. | ||||
This extension should work for basic clone/pull, update, and commit workflows. | ||||
Some history rewriting operations may fail due to lack of support for bundle | ||||
repositories. | ||||
To use, activate the extension and set the ``storage.new-repo-backend`` config | ||||
option to ``sqlite`` to enable new repositories to use SQLite for storage. | ||||
""" | ||||
# To run the test suite with repos using SQLite by default, execute the | ||||
# following: | ||||
# | ||||
# HGREPOFEATURES="sqlitestore" run-tests.py \ | ||||
# --extra-config-opt extensions.sqlitestore= \ | ||||
# --extra-config-opt storage.new-repo-backend=sqlite | ||||
from __future__ import absolute_import | ||||
import hashlib | ||||
import sqlite3 | ||||
import struct | ||||
import threading | ||||
import zlib | ||||
from mercurial.i18n import _ | ||||
from mercurial.node import ( | ||||
nullid, | ||||
nullrev, | ||||
short, | ||||
) | ||||
Augie Fackler
|
r43346 | from mercurial.thirdparty import attr | ||
Gregory Szorc
|
r40362 | from mercurial import ( | ||
ancestor, | ||||
dagop, | ||||
Pulkit Goyal
|
r40446 | encoding, | ||
Gregory Szorc
|
r40362 | error, | ||
extensions, | ||||
localrepo, | ||||
mdiff, | ||||
pycompat, | ||||
registrar, | ||||
util, | ||||
verify, | ||||
) | ||||
Pulkit Goyal
|
r43078 | from mercurial.interfaces import ( | ||
repository, | ||||
Pulkit Goyal
|
r43079 | util as interfaceutil, | ||
Pulkit Goyal
|
r43078 | ) | ||
Augie Fackler
|
r43346 | from mercurial.utils import storageutil | ||
Gregory Szorc
|
r40362 | |||
try: | ||||
from mercurial import zstd | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | zstd.__version__ | ||
except ImportError: | ||||
zstd = None | ||||
configtable = {} | ||||
configitem = registrar.configitem(configtable) | ||||
# experimental config: storage.sqlite.compression | ||||
Augie Fackler
|
r43346 | configitem( | ||
Augie Fackler
|
r43347 | b'storage', | ||
b'sqlite.compression', | ||||
default=b'zstd' if zstd else b'zlib', | ||||
Augie Fackler
|
r43346 | experimental=True, | ||
) | ||||
Gregory Szorc
|
r40362 | |||
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | ||||
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | ||||
# be specifying the version(s) of Mercurial they are tested with, or | ||||
# leave the attribute unspecified. | ||||
Augie Fackler
|
r43347 | testedwith = b'ships-with-hg-core' | ||
Gregory Szorc
|
r40362 | |||
REQUIREMENT = b'exp-sqlite-001' | ||||
REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd' | ||||
REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib' | ||||
REQUIREMENT_NONE = b'exp-sqlite-comp-001=none' | ||||
Gregory Szorc
|
r40426 | REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files' | ||
Gregory Szorc
|
r40362 | |||
CURRENT_SCHEMA_VERSION = 1 | ||||
COMPRESSION_NONE = 1 | ||||
COMPRESSION_ZSTD = 2 | ||||
COMPRESSION_ZLIB = 3 | ||||
FLAG_CENSORED = 1 | ||||
Gregory Szorc
|
r40428 | FLAG_MISSING_P1 = 2 | ||
FLAG_MISSING_P2 = 4 | ||||
Gregory Szorc
|
r40362 | |||
CREATE_SCHEMA = [ | ||||
# Deltas are stored as content-indexed blobs. | ||||
# compression column holds COMPRESSION_* constant for how the | ||||
# delta is encoded. | ||||
Augie Fackler
|
r43907 | 'CREATE TABLE delta (' | ||
' id INTEGER PRIMARY KEY, ' | ||||
' compression INTEGER NOT NULL, ' | ||||
' hash BLOB UNIQUE ON CONFLICT ABORT, ' | ||||
' delta BLOB NOT NULL ' | ||||
')', | ||||
Gregory Szorc
|
r40362 | # Tracked paths are denormalized to integers to avoid redundant | ||
# storage of the path name. | ||||
Augie Fackler
|
r43907 | 'CREATE TABLE filepath (' | ||
' id INTEGER PRIMARY KEY, ' | ||||
' path BLOB NOT NULL ' | ||||
')', | ||||
'CREATE UNIQUE INDEX filepath_path ON filepath (path)', | ||||
Gregory Szorc
|
r40362 | # We have a single table for all file revision data. | ||
# Each file revision is uniquely described by a (path, rev) and | ||||
# (path, node). | ||||
# | ||||
# Revision data is stored as a pointer to the delta producing this | ||||
# revision and the file revision whose delta should be applied before | ||||
# that one. One can reconstruct the delta chain by recursively following | ||||
# the delta base revision pointers until one encounters NULL. | ||||
# | ||||
# flags column holds bitwise integer flags controlling storage options. | ||||
# These flags are defined by the FLAG_* constants. | ||||
Augie Fackler
|
r43907 | 'CREATE TABLE fileindex (' | ||
' id INTEGER PRIMARY KEY, ' | ||||
' pathid INTEGER REFERENCES filepath(id), ' | ||||
' revnum INTEGER NOT NULL, ' | ||||
' p1rev INTEGER NOT NULL, ' | ||||
' p2rev INTEGER NOT NULL, ' | ||||
' linkrev INTEGER NOT NULL, ' | ||||
' flags INTEGER NOT NULL, ' | ||||
' deltaid INTEGER REFERENCES delta(id), ' | ||||
' deltabaseid INTEGER REFERENCES fileindex(id), ' | ||||
' node BLOB NOT NULL ' | ||||
')', | ||||
'CREATE UNIQUE INDEX fileindex_pathrevnum ' | ||||
' ON fileindex (pathid, revnum)', | ||||
'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)', | ||||
Gregory Szorc
|
r40362 | # Provide a view over all file data for convenience. | ||
Augie Fackler
|
r43907 | 'CREATE VIEW filedata AS ' | ||
'SELECT ' | ||||
' fileindex.id AS id, ' | ||||
' filepath.id AS pathid, ' | ||||
' filepath.path AS path, ' | ||||
' fileindex.revnum AS revnum, ' | ||||
' fileindex.node AS node, ' | ||||
' fileindex.p1rev AS p1rev, ' | ||||
' fileindex.p2rev AS p2rev, ' | ||||
' fileindex.linkrev AS linkrev, ' | ||||
' fileindex.flags AS flags, ' | ||||
' fileindex.deltaid AS deltaid, ' | ||||
' fileindex.deltabaseid AS deltabaseid ' | ||||
'FROM filepath, fileindex ' | ||||
'WHERE fileindex.pathid=filepath.id', | ||||
'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION, | ||||
Gregory Szorc
|
r40362 | ] | ||
Augie Fackler
|
r43346 | |||
def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None): | ||||
Gregory Szorc
|
r40362 | """Resolve a delta chain for a file node.""" | ||
# TODO the "not in ({stops})" here is possibly slowing down the query | ||||
# because it needs to perform the lookup on every recursive invocation. | ||||
# This could possibly be faster if we created a temporary query with | ||||
# baseid "poisoned" to null and limited the recursive filter to | ||||
# "is not null". | ||||
res = db.execute( | ||||
Augie Fackler
|
r43907 | 'WITH RECURSIVE ' | ||
' deltachain(deltaid, baseid) AS (' | ||||
' SELECT deltaid, deltabaseid FROM fileindex ' | ||||
' WHERE pathid=? AND node=? ' | ||||
' UNION ALL ' | ||||
' SELECT fileindex.deltaid, deltabaseid ' | ||||
' FROM fileindex, deltachain ' | ||||
' WHERE ' | ||||
' fileindex.id=deltachain.baseid ' | ||||
' AND deltachain.baseid IS NOT NULL ' | ||||
' AND fileindex.id NOT IN ({stops}) ' | ||||
' ) ' | ||||
'SELECT deltachain.baseid, compression, delta ' | ||||
'FROM deltachain, delta ' | ||||
'WHERE delta.id=deltachain.deltaid'.format( | ||||
stops=','.join(['?'] * len(stoprids)) | ||||
Augie Fackler
|
r43346 | ), | ||
tuple([pathid, node] + list(stoprids.keys())), | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
deltas = [] | ||||
lastdeltabaseid = None | ||||
for deltabaseid, compression, delta in res: | ||||
lastdeltabaseid = deltabaseid | ||||
if compression == COMPRESSION_ZSTD: | ||||
delta = zstddctx.decompress(delta) | ||||
elif compression == COMPRESSION_NONE: | ||||
delta = delta | ||||
elif compression == COMPRESSION_ZLIB: | ||||
delta = zlib.decompress(delta) | ||||
else: | ||||
Augie Fackler
|
r43346 | raise SQLiteStoreError( | ||
Augie Fackler
|
r43347 | b'unhandled compression type: %d' % compression | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
deltas.append(delta) | ||||
if lastdeltabaseid in stoprids: | ||||
basetext = revisioncache[stoprids[lastdeltabaseid]] | ||||
else: | ||||
basetext = deltas.pop() | ||||
deltas.reverse() | ||||
fulltext = mdiff.patches(basetext, deltas) | ||||
# SQLite returns buffer instances for blob columns on Python 2. This | ||||
# type can propagate through the delta application layer. Because | ||||
# downstream callers assume revisions are bytes, cast as needed. | ||||
if not isinstance(fulltext, bytes): | ||||
fulltext = bytes(delta) | ||||
return fulltext | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def insertdelta(db, compression, hash, delta): | ||
try: | ||||
return db.execute( | ||||
Augie Fackler
|
r43907 | 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)', | ||
Augie Fackler
|
r43346 | (compression, hash, delta), | ||
).lastrowid | ||||
Gregory Szorc
|
r40362 | except sqlite3.IntegrityError: | ||
return db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT id FROM delta WHERE hash=?', (hash,) | ||
Augie Fackler
|
r43346 | ).fetchone()[0] | ||
Gregory Szorc
|
r40362 | |||
class SQLiteStoreError(error.StorageError): | ||||
pass | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | @attr.s | ||
class revisionentry(object): | ||||
rid = attr.ib() | ||||
rev = attr.ib() | ||||
node = attr.ib() | ||||
p1rev = attr.ib() | ||||
p2rev = attr.ib() | ||||
p1node = attr.ib() | ||||
p2node = attr.ib() | ||||
linkrev = attr.ib() | ||||
flags = attr.ib() | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | @interfaceutil.implementer(repository.irevisiondelta) | ||
@attr.s(slots=True) | ||||
class sqliterevisiondelta(object): | ||||
node = attr.ib() | ||||
p1node = attr.ib() | ||||
p2node = attr.ib() | ||||
basenode = attr.ib() | ||||
flags = attr.ib() | ||||
baserevisionsize = attr.ib() | ||||
revision = attr.ib() | ||||
delta = attr.ib() | ||||
linknode = attr.ib(default=None) | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | @interfaceutil.implementer(repository.iverifyproblem) | ||
@attr.s(frozen=True) | ||||
class sqliteproblem(object): | ||||
warning = attr.ib(default=None) | ||||
error = attr.ib(default=None) | ||||
node = attr.ib(default=None) | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | @interfaceutil.implementer(repository.ifilestorage) | ||
class sqlitefilestore(object): | ||||
"""Implements storage for an individual tracked path.""" | ||||
def __init__(self, db, path, compression): | ||||
self._db = db | ||||
self._path = path | ||||
self._pathid = None | ||||
# revnum -> node | ||||
self._revtonode = {} | ||||
# node -> revnum | ||||
self._nodetorev = {} | ||||
# node -> data structure | ||||
self._revisions = {} | ||||
self._revisioncache = util.lrucachedict(10) | ||||
self._compengine = compression | ||||
Augie Fackler
|
r43347 | if compression == b'zstd': | ||
Gregory Szorc
|
r40362 | self._cctx = zstd.ZstdCompressor(level=3) | ||
self._dctx = zstd.ZstdDecompressor() | ||||
else: | ||||
self._cctx = None | ||||
self._dctx = None | ||||
self._refreshindex() | ||||
def _refreshindex(self): | ||||
self._revtonode = {} | ||||
self._nodetorev = {} | ||||
self._revisions = {} | ||||
Augie Fackler
|
r43346 | res = list( | ||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT id FROM filepath WHERE path=?', (self._path,) | ||
Augie Fackler
|
r43346 | ) | ||
) | ||||
Gregory Szorc
|
r40362 | |||
if not res: | ||||
self._pathid = None | ||||
return | ||||
self._pathid = res[0][0] | ||||
res = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags ' | ||
'FROM fileindex ' | ||||
'WHERE pathid=? ' | ||||
'ORDER BY revnum ASC', | ||||
Augie Fackler
|
r43346 | (self._pathid,), | ||
) | ||||
Gregory Szorc
|
r40362 | |||
for i, row in enumerate(res): | ||||
rid, rev, node, p1rev, p2rev, linkrev, flags = row | ||||
if i != rev: | ||||
Augie Fackler
|
r43346 | raise SQLiteStoreError( | ||
Martin von Zweigbergk
|
r43387 | _(b'sqlite database has inconsistent revision numbers') | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
if p1rev == nullrev: | ||||
p1node = nullid | ||||
else: | ||||
p1node = self._revtonode[p1rev] | ||||
if p2rev == nullrev: | ||||
p2node = nullid | ||||
else: | ||||
p2node = self._revtonode[p2rev] | ||||
entry = revisionentry( | ||||
rid=rid, | ||||
rev=rev, | ||||
node=node, | ||||
p1rev=p1rev, | ||||
p2rev=p2rev, | ||||
p1node=p1node, | ||||
p2node=p2node, | ||||
linkrev=linkrev, | ||||
Augie Fackler
|
r43346 | flags=flags, | ||
) | ||||
Gregory Szorc
|
r40362 | |||
self._revtonode[rev] = node | ||||
self._nodetorev[node] = rev | ||||
self._revisions[node] = entry | ||||
# Start of ifileindex interface. | ||||
def __len__(self): | ||||
return len(self._revisions) | ||||
def __iter__(self): | ||||
return iter(pycompat.xrange(len(self._revisions))) | ||||
Gregory Szorc
|
r40423 | def hasnode(self, node): | ||
if node == nullid: | ||||
return False | ||||
return node in self._nodetorev | ||||
Gregory Szorc
|
r40362 | def revs(self, start=0, stop=None): | ||
Augie Fackler
|
r43346 | return storageutil.iterrevs( | ||
len(self._revisions), start=start, stop=stop | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
def parents(self, node): | ||||
if node == nullid: | ||||
return nullid, nullid | ||||
if node not in self._revisions: | ||||
Augie Fackler
|
r43347 | raise error.LookupError(node, self._path, _(b'no node')) | ||
Gregory Szorc
|
r40362 | |||
entry = self._revisions[node] | ||||
return entry.p1node, entry.p2node | ||||
def parentrevs(self, rev): | ||||
if rev == nullrev: | ||||
return nullrev, nullrev | ||||
if rev not in self._revtonode: | ||||
raise IndexError(rev) | ||||
entry = self._revisions[self._revtonode[rev]] | ||||
return entry.p1rev, entry.p2rev | ||||
def rev(self, node): | ||||
if node == nullid: | ||||
return nullrev | ||||
if node not in self._nodetorev: | ||||
Augie Fackler
|
r43347 | raise error.LookupError(node, self._path, _(b'no node')) | ||
Gregory Szorc
|
r40362 | |||
return self._nodetorev[node] | ||||
def node(self, rev): | ||||
if rev == nullrev: | ||||
return nullid | ||||
if rev not in self._revtonode: | ||||
raise IndexError(rev) | ||||
return self._revtonode[rev] | ||||
def lookup(self, node): | ||||
return storageutil.fileidlookup(self, node, self._path) | ||||
def linkrev(self, rev): | ||||
if rev == nullrev: | ||||
return nullrev | ||||
if rev not in self._revtonode: | ||||
raise IndexError(rev) | ||||
entry = self._revisions[self._revtonode[rev]] | ||||
return entry.linkrev | ||||
def iscensored(self, rev): | ||||
if rev == nullrev: | ||||
return False | ||||
if rev not in self._revtonode: | ||||
raise IndexError(rev) | ||||
return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED | ||||
def commonancestorsheads(self, node1, node2): | ||||
rev1 = self.rev(node1) | ||||
rev2 = self.rev(node2) | ||||
ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2) | ||||
return pycompat.maplist(self.node, ancestors) | ||||
def descendants(self, revs): | ||||
# TODO we could implement this using a recursive SQL query, which | ||||
# might be faster. | ||||
return dagop.descendantrevs(revs, self.revs, self.parentrevs) | ||||
def heads(self, start=None, stop=None): | ||||
if start is None and stop is None: | ||||
if not len(self): | ||||
return [nullid] | ||||
startrev = self.rev(start) if start is not None else nullrev | ||||
stoprevs = {self.rev(n) for n in stop or []} | ||||
Augie Fackler
|
r43346 | revs = dagop.headrevssubset( | ||
self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
return [self.node(rev) for rev in revs] | ||||
def children(self, node): | ||||
rev = self.rev(node) | ||||
res = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT' | ||
' node ' | ||||
' FROM filedata ' | ||||
' WHERE path=? AND (p1rev=? OR p2rev=?) ' | ||||
' ORDER BY revnum ASC', | ||||
Augie Fackler
|
r43346 | (self._path, rev, rev), | ||
) | ||||
Gregory Szorc
|
r40362 | |||
return [row[0] for row in res] | ||||
# End of ifileindex interface. | ||||
# Start of ifiledata interface. | ||||
def size(self, rev): | ||||
if rev == nullrev: | ||||
return 0 | ||||
if rev not in self._revtonode: | ||||
raise IndexError(rev) | ||||
node = self._revtonode[rev] | ||||
if self.renamed(node): | ||||
return len(self.read(node)) | ||||
return len(self.revision(node)) | ||||
def revision(self, node, raw=False, _verifyhash=True): | ||||
if node in (nullid, nullrev): | ||||
return b'' | ||||
if isinstance(node, int): | ||||
node = self.node(node) | ||||
if node not in self._nodetorev: | ||||
Augie Fackler
|
r43347 | raise error.LookupError(node, self._path, _(b'no node')) | ||
Gregory Szorc
|
r40362 | |||
if node in self._revisioncache: | ||||
return self._revisioncache[node] | ||||
# Because we have a fulltext revision cache, we are able to | ||||
# short-circuit delta chain traversal and decompression as soon as | ||||
# we encounter a revision in the cache. | ||||
Augie Fackler
|
r43346 | stoprids = {self._revisions[n].rid: n for n in self._revisioncache} | ||
Gregory Szorc
|
r40362 | |||
if not stoprids: | ||||
stoprids[-1] = None | ||||
Augie Fackler
|
r43346 | fulltext = resolvedeltachain( | ||
self._db, | ||||
self._pathid, | ||||
node, | ||||
self._revisioncache, | ||||
stoprids, | ||||
zstddctx=self._dctx, | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
Gregory Szorc
|
r40428 | # Don't verify hashes if parent nodes were rewritten, as the hash | ||
# wouldn't verify. | ||||
if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2): | ||||
_verifyhash = False | ||||
Gregory Szorc
|
r40362 | if _verifyhash: | ||
self._checkhash(fulltext, node) | ||||
self._revisioncache[node] = fulltext | ||||
return fulltext | ||||
r42948 | def rawdata(self, *args, **kwargs): | |||
return self.revision(*args, **kwargs) | ||||
Gregory Szorc
|
r40362 | def read(self, node): | ||
return storageutil.filtermetadata(self.revision(node)) | ||||
def renamed(self, node): | ||||
return storageutil.filerevisioncopied(self, node) | ||||
def cmp(self, node, fulltext): | ||||
return not storageutil.filedataequivalent(self, node, fulltext) | ||||
Augie Fackler
|
r43346 | def emitrevisions( | ||
self, | ||||
nodes, | ||||
nodesorder=None, | ||||
revisiondata=False, | ||||
assumehaveparentrevisions=False, | ||||
deltamode=repository.CG_DELTAMODE_STD, | ||||
): | ||||
Augie Fackler
|
r43347 | if nodesorder not in (b'nodes', b'storage', b'linear', None): | ||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Augie Fackler
|
r43347 | b'unhandled value for nodesorder: %s' % nodesorder | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
nodes = [n for n in nodes if n != nullid] | ||||
if not nodes: | ||||
return | ||||
# TODO perform in a single query. | ||||
res = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT revnum, deltaid FROM fileindex ' | ||
'WHERE pathid=? ' | ||||
' AND node in (%s)' % (','.join(['?'] * len(nodes))), | ||||
Augie Fackler
|
r43346 | tuple([self._pathid] + nodes), | ||
) | ||||
Gregory Szorc
|
r40362 | |||
deltabases = {} | ||||
for rev, deltaid in res: | ||||
res = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?', | ||
Augie Fackler
|
r43346 | (self._pathid, deltaid), | ||
) | ||||
Gregory Szorc
|
r40362 | deltabases[rev] = res.fetchone()[0] | ||
# TODO define revdifffn so we can use delta from storage. | ||||
for delta in storageutil.emitrevisions( | ||||
Augie Fackler
|
r43346 | self, | ||
nodes, | ||||
nodesorder, | ||||
sqliterevisiondelta, | ||||
Gregory Szorc
|
r40362 | deltaparentfn=deltabases.__getitem__, | ||
revisiondata=revisiondata, | ||||
assumehaveparentrevisions=assumehaveparentrevisions, | ||||
Augie Fackler
|
r43346 | deltamode=deltamode, | ||
): | ||||
Gregory Szorc
|
r40362 | |||
yield delta | ||||
# End of ifiledata interface. | ||||
# Start of ifilemutation interface. | ||||
def add(self, filedata, meta, transaction, linkrev, p1, p2): | ||||
if meta or filedata.startswith(b'\x01\n'): | ||||
filedata = storageutil.packmeta(meta, filedata) | ||||
return self.addrevision(filedata, transaction, linkrev, p1, p2) | ||||
Augie Fackler
|
r43346 | def addrevision( | ||
self, | ||||
revisiondata, | ||||
transaction, | ||||
linkrev, | ||||
p1, | ||||
p2, | ||||
node=None, | ||||
flags=0, | ||||
cachedelta=None, | ||||
): | ||||
Gregory Szorc
|
r40362 | if flags: | ||
Augie Fackler
|
r43347 | raise SQLiteStoreError(_(b'flags not supported on revisions')) | ||
Gregory Szorc
|
r40362 | |||
validatehash = node is not None | ||||
node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2) | ||||
if validatehash: | ||||
self._checkhash(revisiondata, node, p1, p2) | ||||
if node in self._nodetorev: | ||||
return node | ||||
Augie Fackler
|
r43346 | node = self._addrawrevision( | ||
node, revisiondata, transaction, linkrev, p1, p2 | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
self._revisioncache[node] = revisiondata | ||||
return node | ||||
Augie Fackler
|
r43346 | def addgroup( | ||
self, | ||||
deltas, | ||||
linkmapper, | ||||
transaction, | ||||
addrevisioncb=None, | ||||
maybemissingparents=False, | ||||
): | ||||
Gregory Szorc
|
r40362 | nodes = [] | ||
for node, p1, p2, linknode, deltabase, delta, wireflags in deltas: | ||||
storeflags = 0 | ||||
if wireflags & repository.REVISION_FLAG_CENSORED: | ||||
storeflags |= FLAG_CENSORED | ||||
if wireflags & ~repository.REVISION_FLAG_CENSORED: | ||||
Augie Fackler
|
r43347 | raise SQLiteStoreError(b'unhandled revision flag') | ||
Gregory Szorc
|
r40362 | |||
Gregory Szorc
|
r40428 | if maybemissingparents: | ||
if p1 != nullid and not self.hasnode(p1): | ||||
p1 = nullid | ||||
storeflags |= FLAG_MISSING_P1 | ||||
if p2 != nullid and not self.hasnode(p2): | ||||
p2 = nullid | ||||
storeflags |= FLAG_MISSING_P2 | ||||
Gregory Szorc
|
r40362 | baserev = self.rev(deltabase) | ||
# If base is censored, delta must be full replacement in a single | ||||
# patch operation. | ||||
if baserev != nullrev and self.iscensored(baserev): | ||||
Augie Fackler
|
r43347 | hlen = struct.calcsize(b'>lll') | ||
r43040 | oldlen = len(self.rawdata(deltabase, _verifyhash=False)) | |||
Gregory Szorc
|
r40362 | newlen = len(delta) - hlen | ||
if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): | ||||
Augie Fackler
|
r43346 | raise error.CensoredBaseError(self._path, deltabase) | ||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43346 | if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored( | ||
delta, baserev, lambda x: len(self.rawdata(x)) | ||||
): | ||||
Gregory Szorc
|
r40362 | storeflags |= FLAG_CENSORED | ||
linkrev = linkmapper(linknode) | ||||
nodes.append(node) | ||||
if node in self._revisions: | ||||
Gregory Szorc
|
r40428 | # Possibly reset parents to make them proper. | ||
entry = self._revisions[node] | ||||
if entry.flags & FLAG_MISSING_P1 and p1 != nullid: | ||||
entry.p1node = p1 | ||||
entry.p1rev = self._nodetorev[p1] | ||||
entry.flags &= ~FLAG_MISSING_P1 | ||||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?', | ||
Augie Fackler
|
r43346 | (self._nodetorev[p1], entry.flags, entry.rid), | ||
) | ||||
Gregory Szorc
|
r40428 | |||
if entry.flags & FLAG_MISSING_P2 and p2 != nullid: | ||||
entry.p2node = p2 | ||||
entry.p2rev = self._nodetorev[p2] | ||||
entry.flags &= ~FLAG_MISSING_P2 | ||||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?', | ||
Augie Fackler
|
r43346 | (self._nodetorev[p1], entry.flags, entry.rid), | ||
) | ||||
Gregory Szorc
|
r40428 | |||
Gregory Szorc
|
r40362 | continue | ||
if deltabase == nullid: | ||||
text = mdiff.patch(b'', delta) | ||||
storedelta = None | ||||
else: | ||||
text = None | ||||
storedelta = (deltabase, delta) | ||||
Augie Fackler
|
r43346 | self._addrawrevision( | ||
node, | ||||
text, | ||||
transaction, | ||||
linkrev, | ||||
p1, | ||||
p2, | ||||
storedelta=storedelta, | ||||
flags=storeflags, | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
if addrevisioncb: | ||||
addrevisioncb(self, node) | ||||
return nodes | ||||
def censorrevision(self, tr, censornode, tombstone=b''): | ||||
tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | ||||
# This restriction is cargo culted from revlogs and makes no sense for | ||||
# SQLite, since columns can be resized at will. | ||||
r43040 | if len(tombstone) > len(self.rawdata(censornode)): | |||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Martin von Zweigbergk
|
r43387 | _(b'censor tombstone must be no longer than censored data') | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
# We need to replace the censored revision's data with the tombstone. | ||||
# But replacing that data will have implications for delta chains that | ||||
# reference it. | ||||
# | ||||
# While "better," more complex strategies are possible, we do something | ||||
# simple: we find delta chain children of the censored revision and we | ||||
# replace those incremental deltas with fulltexts of their corresponding | ||||
# revision. Then we delete the now-unreferenced delta and original | ||||
# revision and insert a replacement. | ||||
# Find the delta to be censored. | ||||
censoreddeltaid = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT deltaid FROM fileindex WHERE id=?', | ||
Augie Fackler
|
r43346 | (self._revisions[censornode].rid,), | ||
).fetchone()[0] | ||||
Gregory Szorc
|
r40362 | |||
# Find all its delta chain children. | ||||
# TODO once we support storing deltas for !files, we'll need to look | ||||
# for those delta chains too. | ||||
Augie Fackler
|
r43346 | rows = list( | ||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'SELECT id, pathid, node FROM fileindex ' | ||
'WHERE deltabaseid=? OR deltaid=?', | ||||
Augie Fackler
|
r43346 | (censoreddeltaid, censoreddeltaid), | ||
) | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
for row in rows: | ||||
rid, pathid, node = row | ||||
Augie Fackler
|
r43346 | fulltext = resolvedeltachain( | ||
self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
deltahash = hashlib.sha1(fulltext).digest() | ||||
Augie Fackler
|
r43347 | if self._compengine == b'zstd': | ||
Gregory Szorc
|
r40362 | deltablob = self._cctx.compress(fulltext) | ||
compression = COMPRESSION_ZSTD | ||||
Augie Fackler
|
r43347 | elif self._compengine == b'zlib': | ||
Gregory Szorc
|
r40362 | deltablob = zlib.compress(fulltext) | ||
compression = COMPRESSION_ZLIB | ||||
Augie Fackler
|
r43347 | elif self._compengine == b'none': | ||
Gregory Szorc
|
r40362 | deltablob = fulltext | ||
compression = COMPRESSION_NONE | ||||
else: | ||||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Augie Fackler
|
r43347 | b'unhandled compression engine: %s' % self._compengine | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
if len(deltablob) >= len(fulltext): | ||||
deltablob = fulltext | ||||
compression = COMPRESSION_NONE | ||||
deltaid = insertdelta(self._db, compression, deltahash, deltablob) | ||||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL ' | ||
'WHERE id=?', | ||||
Augie Fackler
|
r43346 | (deltaid, rid), | ||
) | ||||
Gregory Szorc
|
r40362 | |||
# Now create the tombstone delta and replace the delta on the censored | ||||
# node. | ||||
deltahash = hashlib.sha1(tombstone).digest() | ||||
Augie Fackler
|
r43346 | tombstonedeltaid = insertdelta( | ||
self._db, COMPRESSION_NONE, deltahash, tombstone | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
flags = self._revisions[censornode].flags | ||||
flags |= FLAG_CENSORED | ||||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL ' | ||
'WHERE pathid=? AND node=?', | ||||
Augie Fackler
|
r43346 | (flags, tombstonedeltaid, self._pathid, censornode), | ||
) | ||||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43907 | self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,)) | ||
Gregory Szorc
|
r40362 | |||
self._refreshindex() | ||||
self._revisioncache.clear() | ||||
def getstrippoint(self, minlink): | ||||
Augie Fackler
|
r43346 | return storageutil.resolvestripinfo( | ||
minlink, | ||||
len(self) - 1, | ||||
[self.rev(n) for n in self.heads()], | ||||
self.linkrev, | ||||
self.parentrevs, | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
def strip(self, minlink, transaction): | ||||
if not len(self): | ||||
return | ||||
rev, _ignored = self.getstrippoint(minlink) | ||||
if rev == len(self): | ||||
return | ||||
for rev in self.revs(rev): | ||||
self._db.execute( | ||||
Augie Fackler
|
r43907 | 'DELETE FROM fileindex WHERE pathid=? AND node=?', | ||
Augie Fackler
|
r43346 | (self._pathid, self.node(rev)), | ||
) | ||||
Gregory Szorc
|
r40362 | |||
# TODO how should we garbage collect data in delta table? | ||||
self._refreshindex() | ||||
# End of ifilemutation interface. | ||||
# Start of ifilestorage interface. | ||||
def files(self): | ||||
return [] | ||||
Augie Fackler
|
r43346 | def storageinfo( | ||
self, | ||||
exclusivefiles=False, | ||||
sharedfiles=False, | ||||
revisionscount=False, | ||||
trackedsize=False, | ||||
storedsize=False, | ||||
): | ||||
Gregory Szorc
|
r40362 | d = {} | ||
if exclusivefiles: | ||||
Augie Fackler
|
r43347 | d[b'exclusivefiles'] = [] | ||
Gregory Szorc
|
r40362 | |||
if sharedfiles: | ||||
# TODO list sqlite file(s) here. | ||||
Augie Fackler
|
r43347 | d[b'sharedfiles'] = [] | ||
Gregory Szorc
|
r40362 | |||
if revisionscount: | ||||
Augie Fackler
|
r43347 | d[b'revisionscount'] = len(self) | ||
Gregory Szorc
|
r40362 | |||
if trackedsize: | ||||
Augie Fackler
|
r43347 | d[b'trackedsize'] = sum( | ||
Augie Fackler
|
r43346 | len(self.revision(node)) for node in self._nodetorev | ||
) | ||||
Gregory Szorc
|
r40362 | |||
if storedsize: | ||||
# TODO implement this? | ||||
Augie Fackler
|
r43347 | d[b'storedsize'] = None | ||
Gregory Szorc
|
r40362 | |||
return d | ||||
def verifyintegrity(self, state): | ||||
Augie Fackler
|
r43347 | state[b'skipread'] = set() | ||
Gregory Szorc
|
r40362 | |||
for rev in self: | ||||
node = self.node(rev) | ||||
try: | ||||
self.revision(node) | ||||
except Exception as e: | ||||
yield sqliteproblem( | ||||
Augie Fackler
|
r43347 | error=_(b'unpacking %s: %s') % (short(node), e), node=node | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43347 | state[b'skipread'].add(node) | ||
Gregory Szorc
|
r40362 | |||
# End of ifilestorage interface. | ||||
def _checkhash(self, fulltext, node, p1=None, p2=None): | ||||
if p1 is None and p2 is None: | ||||
p1, p2 = self.parents(node) | ||||
if node == storageutil.hashrevisionsha1(fulltext, p1, p2): | ||||
return | ||||
try: | ||||
del self._revisioncache[node] | ||||
except KeyError: | ||||
pass | ||||
if storageutil.iscensoredtext(fulltext): | ||||
raise error.CensoredNodeError(self._path, node, fulltext) | ||||
Augie Fackler
|
r43347 | raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path) | ||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43346 | def _addrawrevision( | ||
self, | ||||
node, | ||||
revisiondata, | ||||
transaction, | ||||
linkrev, | ||||
p1, | ||||
p2, | ||||
storedelta=None, | ||||
flags=0, | ||||
): | ||||
Gregory Szorc
|
r40362 | if self._pathid is None: | ||
res = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'INSERT INTO filepath (path) VALUES (?)', (self._path,) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | self._pathid = res.lastrowid | ||
# For simplicity, always store a delta against p1. | ||||
# TODO we need a lot more logic here to make behavior reasonable. | ||||
if storedelta: | ||||
deltabase, delta = storedelta | ||||
if isinstance(deltabase, int): | ||||
deltabase = self.node(deltabase) | ||||
else: | ||||
assert revisiondata is not None | ||||
deltabase = p1 | ||||
if deltabase == nullid: | ||||
delta = revisiondata | ||||
else: | ||||
Augie Fackler
|
r43346 | delta = mdiff.textdiff( | ||
self.revision(self.rev(deltabase)), revisiondata | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
# File index stores a pointer to its delta and the parent delta. | ||||
# The parent delta is stored via a pointer to the fileindex PK. | ||||
if deltabase == nullid: | ||||
baseid = None | ||||
else: | ||||
baseid = self._revisions[deltabase].rid | ||||
# Deltas are stored with a hash of their content. This allows | ||||
# us to de-duplicate. The table is configured to ignore conflicts | ||||
# and it is faster to just insert and silently noop than to look | ||||
# first. | ||||
deltahash = hashlib.sha1(delta).digest() | ||||
Augie Fackler
|
r43347 | if self._compengine == b'zstd': | ||
Gregory Szorc
|
r40362 | deltablob = self._cctx.compress(delta) | ||
compression = COMPRESSION_ZSTD | ||||
Augie Fackler
|
r43347 | elif self._compengine == b'zlib': | ||
Gregory Szorc
|
r40362 | deltablob = zlib.compress(delta) | ||
compression = COMPRESSION_ZLIB | ||||
Augie Fackler
|
r43347 | elif self._compengine == b'none': | ||
Gregory Szorc
|
r40362 | deltablob = delta | ||
compression = COMPRESSION_NONE | ||||
else: | ||||
Augie Fackler
|
r43346 | raise error.ProgrammingError( | ||
Augie Fackler
|
r43347 | b'unhandled compression engine: %s' % self._compengine | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
# Don't store compressed data if it isn't practical. | ||||
if len(deltablob) >= len(delta): | ||||
deltablob = delta | ||||
compression = COMPRESSION_NONE | ||||
deltaid = insertdelta(self._db, compression, deltahash, deltablob) | ||||
rev = len(self) | ||||
if p1 == nullid: | ||||
p1rev = nullrev | ||||
else: | ||||
p1rev = self._nodetorev[p1] | ||||
if p2 == nullid: | ||||
p2rev = nullrev | ||||
else: | ||||
p2rev = self._nodetorev[p2] | ||||
rid = self._db.execute( | ||||
Augie Fackler
|
r43907 | 'INSERT INTO fileindex (' | ||
' pathid, revnum, node, p1rev, p2rev, linkrev, flags, ' | ||||
' deltaid, deltabaseid) ' | ||||
' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', | ||||
Augie Fackler
|
r43346 | ( | ||
self._pathid, | ||||
rev, | ||||
node, | ||||
p1rev, | ||||
p2rev, | ||||
linkrev, | ||||
flags, | ||||
deltaid, | ||||
baseid, | ||||
), | ||||
Gregory Szorc
|
r40362 | ).lastrowid | ||
entry = revisionentry( | ||||
rid=rid, | ||||
rev=rev, | ||||
node=node, | ||||
p1rev=p1rev, | ||||
p2rev=p2rev, | ||||
p1node=p1, | ||||
p2node=p2, | ||||
linkrev=linkrev, | ||||
Augie Fackler
|
r43346 | flags=flags, | ||
) | ||||
Gregory Szorc
|
r40362 | |||
self._nodetorev[node] = rev | ||||
self._revtonode[rev] = node | ||||
self._revisions[node] = entry | ||||
return node | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | class sqliterepository(localrepo.localrepository): | ||
def cancopy(self): | ||||
return False | ||||
def transaction(self, *args, **kwargs): | ||||
current = self.currenttransaction() | ||||
tr = super(sqliterepository, self).transaction(*args, **kwargs) | ||||
if current: | ||||
return tr | ||||
Augie Fackler
|
r43907 | self._dbconn.execute('BEGIN TRANSACTION') | ||
Gregory Szorc
|
r40362 | |||
def committransaction(_): | ||||
self._dbconn.commit() | ||||
Augie Fackler
|
r43347 | tr.addfinalize(b'sqlitestore', committransaction) | ||
Gregory Szorc
|
r40362 | |||
return tr | ||||
@property | ||||
def _dbconn(self): | ||||
# SQLite connections can only be used on the thread that created | ||||
# them. In most cases, this "just works." However, hgweb uses | ||||
# multiple threads. | ||||
tid = threading.current_thread().ident | ||||
if self._db: | ||||
if self._db[0] == tid: | ||||
return self._db[1] | ||||
Augie Fackler
|
r43347 | db = makedb(self.svfs.join(b'db.sqlite')) | ||
Gregory Szorc
|
r40362 | self._db = (tid, db) | ||
return db | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def makedb(path): | ||
"""Construct a database handle for a database at path.""" | ||||
Pulkit Goyal
|
r40446 | db = sqlite3.connect(encoding.strfromlocal(path)) | ||
Gregory Szorc
|
r40362 | db.text_factory = bytes | ||
Augie Fackler
|
r43907 | res = db.execute('PRAGMA user_version').fetchone()[0] | ||
Gregory Szorc
|
r40362 | |||
# New database. | ||||
if res == 0: | ||||
for statement in CREATE_SCHEMA: | ||||
db.execute(statement) | ||||
db.commit() | ||||
elif res == CURRENT_SCHEMA_VERSION: | ||||
pass | ||||
else: | ||||
Augie Fackler
|
r43347 | raise error.Abort(_(b'sqlite database has unrecognized version')) | ||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43907 | db.execute('PRAGMA journal_mode=WAL') | ||
Gregory Szorc
|
r40362 | |||
return db | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def featuresetup(ui, supported): | ||
supported.add(REQUIREMENT) | ||||
if zstd: | ||||
supported.add(REQUIREMENT_ZSTD) | ||||
supported.add(REQUIREMENT_ZLIB) | ||||
supported.add(REQUIREMENT_NONE) | ||||
Gregory Szorc
|
r40426 | supported.add(REQUIREMENT_SHALLOW_FILES) | ||
supported.add(repository.NARROW_REQUIREMENT) | ||||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def newreporequirements(orig, ui, createopts): | ||
Augie Fackler
|
r43347 | if createopts[b'backend'] != b'sqlite': | ||
Gregory Szorc
|
r40362 | return orig(ui, createopts) | ||
# This restriction can be lifted once we have more confidence. | ||||
Augie Fackler
|
r43347 | if b'sharedrepo' in createopts: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Martin von Zweigbergk
|
r43387 | _(b'shared repositories not supported with SQLite store') | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
# This filtering is out of an abundance of caution: we want to ensure | ||||
# we honor creation options and we do that by annotating exactly the | ||||
# creation options we recognize. | ||||
known = { | ||||
Augie Fackler
|
r43347 | b'narrowfiles', | ||
b'backend', | ||||
b'shallowfilestore', | ||||
Gregory Szorc
|
r40362 | } | ||
unsupported = set(createopts) - known | ||||
if unsupported: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Martin von Zweigbergk
|
r43387 | _(b'SQLite store does not support repo creation option: %s') | ||
Augie Fackler
|
r43347 | % b', '.join(sorted(unsupported)) | ||
Augie Fackler
|
r43346 | ) | ||
Gregory Szorc
|
r40362 | |||
# Since we're a hybrid store that still relies on revlogs, we fall back | ||||
# to using the revlogv1 backend's storage requirements then adding our | ||||
# own requirement. | ||||
Augie Fackler
|
r43347 | createopts[b'backend'] = b'revlogv1' | ||
Gregory Szorc
|
r40362 | requirements = orig(ui, createopts) | ||
requirements.add(REQUIREMENT) | ||||
Augie Fackler
|
r43347 | compression = ui.config(b'storage', b'sqlite.compression') | ||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43347 | if compression == b'zstd' and not zstd: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
_( | ||||
Augie Fackler
|
r43347 | b'storage.sqlite.compression set to "zstd" but ' | ||
b'zstandard compression not available to this ' | ||||
b'Mercurial install' | ||||
Augie Fackler
|
r43346 | ) | ||
) | ||||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43347 | if compression == b'zstd': | ||
Gregory Szorc
|
r40362 | requirements.add(REQUIREMENT_ZSTD) | ||
Augie Fackler
|
r43347 | elif compression == b'zlib': | ||
Gregory Szorc
|
r40362 | requirements.add(REQUIREMENT_ZLIB) | ||
Augie Fackler
|
r43347 | elif compression == b'none': | ||
Gregory Szorc
|
r40362 | requirements.add(REQUIREMENT_NONE) | ||
else: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
_( | ||||
Augie Fackler
|
r43347 | b'unknown compression engine defined in ' | ||
b'storage.sqlite.compression: %s' | ||||
Augie Fackler
|
r43346 | ) | ||
% compression | ||||
) | ||||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43347 | if createopts.get(b'shallowfilestore'): | ||
Gregory Szorc
|
r40426 | requirements.add(REQUIREMENT_SHALLOW_FILES) | ||
Gregory Szorc
|
r40362 | return requirements | ||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | ||
class sqlitefilestorage(object): | ||||
"""Repository file storage backed by SQLite.""" | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def file(self, path): | ||
if path[0] == b'/': | ||||
path = path[1:] | ||||
if REQUIREMENT_ZSTD in self.requirements: | ||||
Augie Fackler
|
r43347 | compression = b'zstd' | ||
Gregory Szorc
|
r40362 | elif REQUIREMENT_ZLIB in self.requirements: | ||
Augie Fackler
|
r43347 | compression = b'zlib' | ||
Gregory Szorc
|
r40362 | elif REQUIREMENT_NONE in self.requirements: | ||
Augie Fackler
|
r43347 | compression = b'none' | ||
Gregory Szorc
|
r40362 | else: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
_( | ||||
Augie Fackler
|
r43347 | b'unable to determine what compression engine ' | ||
b'to use for SQLite storage' | ||||
Augie Fackler
|
r43346 | ) | ||
) | ||||
Gregory Szorc
|
r40362 | |||
return sqlitefilestore(self._dbconn, path, compression) | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40426 | def makefilestorage(orig, requirements, features, **kwargs): | ||
Gregory Szorc
|
r40362 | """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" | ||
if REQUIREMENT in requirements: | ||||
Gregory Szorc
|
r40426 | if REQUIREMENT_SHALLOW_FILES in requirements: | ||
features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE) | ||||
Gregory Szorc
|
r40362 | return sqlitefilestorage | ||
else: | ||||
Gregory Szorc
|
r40426 | return orig(requirements=requirements, features=features, **kwargs) | ||
Gregory Szorc
|
r40362 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def makemain(orig, ui, requirements, **kwargs): | ||
if REQUIREMENT in requirements: | ||||
if REQUIREMENT_ZSTD in requirements and not zstd: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
_( | ||||
Augie Fackler
|
r43347 | b'repository uses zstandard compression, which ' | ||
b'is not available to this Mercurial install' | ||||
Augie Fackler
|
r43346 | ) | ||
) | ||||
Gregory Szorc
|
r40362 | |||
return sqliterepository | ||||
return orig(requirements=requirements, **kwargs) | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def verifierinit(orig, self, *args, **kwargs): | ||
orig(self, *args, **kwargs) | ||||
# We don't care that files in the store don't align with what is | ||||
# advertised. So suppress these warnings. | ||||
self.warnorphanstorefiles = False | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | def extsetup(ui): | ||
localrepo.featuresetupfuncs.add(featuresetup) | ||||
Augie Fackler
|
r43346 | extensions.wrapfunction( | ||
Augie Fackler
|
r43347 | localrepo, b'newreporequirements', newreporequirements | ||
Augie Fackler
|
r43346 | ) | ||
Augie Fackler
|
r43347 | extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage) | ||
extensions.wrapfunction(localrepo, b'makemain', makemain) | ||||
extensions.wrapfunction(verify.verifier, b'__init__', verifierinit) | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r40362 | |||
def reposetup(ui, repo): | ||||
if isinstance(repo, sqliterepository): | ||||
repo._db = None | ||||
# TODO check for bundlerepository? | ||||