##// END OF EJS Templates
rawdata: update callers in sqlitestore...
marmoute -
r43017:ac9fed51 default draft
parent child Browse files
Show More
@@ -1,1175 +1,1174 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import hashlib
48 import hashlib
49 import sqlite3
49 import sqlite3
50 import struct
50 import struct
51 import threading
51 import threading
52 import zlib
52 import zlib
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55 from mercurial.node import (
55 from mercurial.node import (
56 nullid,
56 nullid,
57 nullrev,
57 nullrev,
58 short,
58 short,
59 )
59 )
60 from mercurial.thirdparty import (
60 from mercurial.thirdparty import (
61 attr,
61 attr,
62 )
62 )
63 from mercurial import (
63 from mercurial import (
64 ancestor,
64 ancestor,
65 dagop,
65 dagop,
66 encoding,
66 encoding,
67 error,
67 error,
68 extensions,
68 extensions,
69 localrepo,
69 localrepo,
70 mdiff,
70 mdiff,
71 pycompat,
71 pycompat,
72 registrar,
72 registrar,
73 repository,
73 repository,
74 util,
74 util,
75 verify,
75 verify,
76 )
76 )
77 from mercurial.utils import (
77 from mercurial.utils import (
78 interfaceutil,
78 interfaceutil,
79 storageutil,
79 storageutil,
80 )
80 )
81
81
82 try:
82 try:
83 from mercurial import zstd
83 from mercurial import zstd
84 zstd.__version__
84 zstd.__version__
85 except ImportError:
85 except ImportError:
86 zstd = None
86 zstd = None
87
87
88 configtable = {}
88 configtable = {}
89 configitem = registrar.configitem(configtable)
89 configitem = registrar.configitem(configtable)
90
90
91 # experimental config: storage.sqlite.compression
91 # experimental config: storage.sqlite.compression
92 configitem('storage', 'sqlite.compression',
92 configitem('storage', 'sqlite.compression',
93 default='zstd' if zstd else 'zlib',
93 default='zstd' if zstd else 'zlib',
94 experimental=True)
94 experimental=True)
95
95
96 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
96 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
97 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
97 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
98 # be specifying the version(s) of Mercurial they are tested with, or
98 # be specifying the version(s) of Mercurial they are tested with, or
99 # leave the attribute unspecified.
99 # leave the attribute unspecified.
100 testedwith = 'ships-with-hg-core'
100 testedwith = 'ships-with-hg-core'
101
101
102 REQUIREMENT = b'exp-sqlite-001'
102 REQUIREMENT = b'exp-sqlite-001'
103 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
103 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
104 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
104 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
105 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
105 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
106 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
106 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
107
107
108 CURRENT_SCHEMA_VERSION = 1
108 CURRENT_SCHEMA_VERSION = 1
109
109
110 COMPRESSION_NONE = 1
110 COMPRESSION_NONE = 1
111 COMPRESSION_ZSTD = 2
111 COMPRESSION_ZSTD = 2
112 COMPRESSION_ZLIB = 3
112 COMPRESSION_ZLIB = 3
113
113
114 FLAG_CENSORED = 1
114 FLAG_CENSORED = 1
115 FLAG_MISSING_P1 = 2
115 FLAG_MISSING_P1 = 2
116 FLAG_MISSING_P2 = 4
116 FLAG_MISSING_P2 = 4
117
117
118 CREATE_SCHEMA = [
118 CREATE_SCHEMA = [
119 # Deltas are stored as content-indexed blobs.
119 # Deltas are stored as content-indexed blobs.
120 # compression column holds COMPRESSION_* constant for how the
120 # compression column holds COMPRESSION_* constant for how the
121 # delta is encoded.
121 # delta is encoded.
122
122
123 r'CREATE TABLE delta ('
123 r'CREATE TABLE delta ('
124 r' id INTEGER PRIMARY KEY, '
124 r' id INTEGER PRIMARY KEY, '
125 r' compression INTEGER NOT NULL, '
125 r' compression INTEGER NOT NULL, '
126 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
126 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
127 r' delta BLOB NOT NULL '
127 r' delta BLOB NOT NULL '
128 r')',
128 r')',
129
129
130 # Tracked paths are denormalized to integers to avoid redundant
130 # Tracked paths are denormalized to integers to avoid redundant
131 # storage of the path name.
131 # storage of the path name.
132 r'CREATE TABLE filepath ('
132 r'CREATE TABLE filepath ('
133 r' id INTEGER PRIMARY KEY, '
133 r' id INTEGER PRIMARY KEY, '
134 r' path BLOB NOT NULL '
134 r' path BLOB NOT NULL '
135 r')',
135 r')',
136
136
137 r'CREATE UNIQUE INDEX filepath_path '
137 r'CREATE UNIQUE INDEX filepath_path '
138 r' ON filepath (path)',
138 r' ON filepath (path)',
139
139
140 # We have a single table for all file revision data.
140 # We have a single table for all file revision data.
141 # Each file revision is uniquely described by a (path, rev) and
141 # Each file revision is uniquely described by a (path, rev) and
142 # (path, node).
142 # (path, node).
143 #
143 #
144 # Revision data is stored as a pointer to the delta producing this
144 # Revision data is stored as a pointer to the delta producing this
145 # revision and the file revision whose delta should be applied before
145 # revision and the file revision whose delta should be applied before
146 # that one. One can reconstruct the delta chain by recursively following
146 # that one. One can reconstruct the delta chain by recursively following
147 # the delta base revision pointers until one encounters NULL.
147 # the delta base revision pointers until one encounters NULL.
148 #
148 #
149 # flags column holds bitwise integer flags controlling storage options.
149 # flags column holds bitwise integer flags controlling storage options.
150 # These flags are defined by the FLAG_* constants.
150 # These flags are defined by the FLAG_* constants.
151 r'CREATE TABLE fileindex ('
151 r'CREATE TABLE fileindex ('
152 r' id INTEGER PRIMARY KEY, '
152 r' id INTEGER PRIMARY KEY, '
153 r' pathid INTEGER REFERENCES filepath(id), '
153 r' pathid INTEGER REFERENCES filepath(id), '
154 r' revnum INTEGER NOT NULL, '
154 r' revnum INTEGER NOT NULL, '
155 r' p1rev INTEGER NOT NULL, '
155 r' p1rev INTEGER NOT NULL, '
156 r' p2rev INTEGER NOT NULL, '
156 r' p2rev INTEGER NOT NULL, '
157 r' linkrev INTEGER NOT NULL, '
157 r' linkrev INTEGER NOT NULL, '
158 r' flags INTEGER NOT NULL, '
158 r' flags INTEGER NOT NULL, '
159 r' deltaid INTEGER REFERENCES delta(id), '
159 r' deltaid INTEGER REFERENCES delta(id), '
160 r' deltabaseid INTEGER REFERENCES fileindex(id), '
160 r' deltabaseid INTEGER REFERENCES fileindex(id), '
161 r' node BLOB NOT NULL '
161 r' node BLOB NOT NULL '
162 r')',
162 r')',
163
163
164 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
164 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
165 r' ON fileindex (pathid, revnum)',
165 r' ON fileindex (pathid, revnum)',
166
166
167 r'CREATE UNIQUE INDEX fileindex_pathnode '
167 r'CREATE UNIQUE INDEX fileindex_pathnode '
168 r' ON fileindex (pathid, node)',
168 r' ON fileindex (pathid, node)',
169
169
170 # Provide a view over all file data for convenience.
170 # Provide a view over all file data for convenience.
171 r'CREATE VIEW filedata AS '
171 r'CREATE VIEW filedata AS '
172 r'SELECT '
172 r'SELECT '
173 r' fileindex.id AS id, '
173 r' fileindex.id AS id, '
174 r' filepath.id AS pathid, '
174 r' filepath.id AS pathid, '
175 r' filepath.path AS path, '
175 r' filepath.path AS path, '
176 r' fileindex.revnum AS revnum, '
176 r' fileindex.revnum AS revnum, '
177 r' fileindex.node AS node, '
177 r' fileindex.node AS node, '
178 r' fileindex.p1rev AS p1rev, '
178 r' fileindex.p1rev AS p1rev, '
179 r' fileindex.p2rev AS p2rev, '
179 r' fileindex.p2rev AS p2rev, '
180 r' fileindex.linkrev AS linkrev, '
180 r' fileindex.linkrev AS linkrev, '
181 r' fileindex.flags AS flags, '
181 r' fileindex.flags AS flags, '
182 r' fileindex.deltaid AS deltaid, '
182 r' fileindex.deltaid AS deltaid, '
183 r' fileindex.deltabaseid AS deltabaseid '
183 r' fileindex.deltabaseid AS deltabaseid '
184 r'FROM filepath, fileindex '
184 r'FROM filepath, fileindex '
185 r'WHERE fileindex.pathid=filepath.id',
185 r'WHERE fileindex.pathid=filepath.id',
186
186
187 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
187 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
188 ]
188 ]
189
189
190 def resolvedeltachain(db, pathid, node, revisioncache,
190 def resolvedeltachain(db, pathid, node, revisioncache,
191 stoprids, zstddctx=None):
191 stoprids, zstddctx=None):
192 """Resolve a delta chain for a file node."""
192 """Resolve a delta chain for a file node."""
193
193
194 # TODO the "not in ({stops})" here is possibly slowing down the query
194 # TODO the "not in ({stops})" here is possibly slowing down the query
195 # because it needs to perform the lookup on every recursive invocation.
195 # because it needs to perform the lookup on every recursive invocation.
196 # This could possibly be faster if we created a temporary query with
196 # This could possibly be faster if we created a temporary query with
197 # baseid "poisoned" to null and limited the recursive filter to
197 # baseid "poisoned" to null and limited the recursive filter to
198 # "is not null".
198 # "is not null".
199 res = db.execute(
199 res = db.execute(
200 r'WITH RECURSIVE '
200 r'WITH RECURSIVE '
201 r' deltachain(deltaid, baseid) AS ('
201 r' deltachain(deltaid, baseid) AS ('
202 r' SELECT deltaid, deltabaseid FROM fileindex '
202 r' SELECT deltaid, deltabaseid FROM fileindex '
203 r' WHERE pathid=? AND node=? '
203 r' WHERE pathid=? AND node=? '
204 r' UNION ALL '
204 r' UNION ALL '
205 r' SELECT fileindex.deltaid, deltabaseid '
205 r' SELECT fileindex.deltaid, deltabaseid '
206 r' FROM fileindex, deltachain '
206 r' FROM fileindex, deltachain '
207 r' WHERE '
207 r' WHERE '
208 r' fileindex.id=deltachain.baseid '
208 r' fileindex.id=deltachain.baseid '
209 r' AND deltachain.baseid IS NOT NULL '
209 r' AND deltachain.baseid IS NOT NULL '
210 r' AND fileindex.id NOT IN ({stops}) '
210 r' AND fileindex.id NOT IN ({stops}) '
211 r' ) '
211 r' ) '
212 r'SELECT deltachain.baseid, compression, delta '
212 r'SELECT deltachain.baseid, compression, delta '
213 r'FROM deltachain, delta '
213 r'FROM deltachain, delta '
214 r'WHERE delta.id=deltachain.deltaid'.format(
214 r'WHERE delta.id=deltachain.deltaid'.format(
215 stops=r','.join([r'?'] * len(stoprids))),
215 stops=r','.join([r'?'] * len(stoprids))),
216 tuple([pathid, node] + list(stoprids.keys())))
216 tuple([pathid, node] + list(stoprids.keys())))
217
217
218 deltas = []
218 deltas = []
219 lastdeltabaseid = None
219 lastdeltabaseid = None
220
220
221 for deltabaseid, compression, delta in res:
221 for deltabaseid, compression, delta in res:
222 lastdeltabaseid = deltabaseid
222 lastdeltabaseid = deltabaseid
223
223
224 if compression == COMPRESSION_ZSTD:
224 if compression == COMPRESSION_ZSTD:
225 delta = zstddctx.decompress(delta)
225 delta = zstddctx.decompress(delta)
226 elif compression == COMPRESSION_NONE:
226 elif compression == COMPRESSION_NONE:
227 delta = delta
227 delta = delta
228 elif compression == COMPRESSION_ZLIB:
228 elif compression == COMPRESSION_ZLIB:
229 delta = zlib.decompress(delta)
229 delta = zlib.decompress(delta)
230 else:
230 else:
231 raise SQLiteStoreError('unhandled compression type: %d' %
231 raise SQLiteStoreError('unhandled compression type: %d' %
232 compression)
232 compression)
233
233
234 deltas.append(delta)
234 deltas.append(delta)
235
235
236 if lastdeltabaseid in stoprids:
236 if lastdeltabaseid in stoprids:
237 basetext = revisioncache[stoprids[lastdeltabaseid]]
237 basetext = revisioncache[stoprids[lastdeltabaseid]]
238 else:
238 else:
239 basetext = deltas.pop()
239 basetext = deltas.pop()
240
240
241 deltas.reverse()
241 deltas.reverse()
242 fulltext = mdiff.patches(basetext, deltas)
242 fulltext = mdiff.patches(basetext, deltas)
243
243
244 # SQLite returns buffer instances for blob columns on Python 2. This
244 # SQLite returns buffer instances for blob columns on Python 2. This
245 # type can propagate through the delta application layer. Because
245 # type can propagate through the delta application layer. Because
246 # downstream callers assume revisions are bytes, cast as needed.
246 # downstream callers assume revisions are bytes, cast as needed.
247 if not isinstance(fulltext, bytes):
247 if not isinstance(fulltext, bytes):
248 fulltext = bytes(delta)
248 fulltext = bytes(delta)
249
249
250 return fulltext
250 return fulltext
251
251
252 def insertdelta(db, compression, hash, delta):
252 def insertdelta(db, compression, hash, delta):
253 try:
253 try:
254 return db.execute(
254 return db.execute(
255 r'INSERT INTO delta (compression, hash, delta) '
255 r'INSERT INTO delta (compression, hash, delta) '
256 r'VALUES (?, ?, ?)',
256 r'VALUES (?, ?, ?)',
257 (compression, hash, delta)).lastrowid
257 (compression, hash, delta)).lastrowid
258 except sqlite3.IntegrityError:
258 except sqlite3.IntegrityError:
259 return db.execute(
259 return db.execute(
260 r'SELECT id FROM delta WHERE hash=?',
260 r'SELECT id FROM delta WHERE hash=?',
261 (hash,)).fetchone()[0]
261 (hash,)).fetchone()[0]
262
262
263 class SQLiteStoreError(error.StorageError):
263 class SQLiteStoreError(error.StorageError):
264 pass
264 pass
265
265
266 @attr.s
266 @attr.s
267 class revisionentry(object):
267 class revisionentry(object):
268 rid = attr.ib()
268 rid = attr.ib()
269 rev = attr.ib()
269 rev = attr.ib()
270 node = attr.ib()
270 node = attr.ib()
271 p1rev = attr.ib()
271 p1rev = attr.ib()
272 p2rev = attr.ib()
272 p2rev = attr.ib()
273 p1node = attr.ib()
273 p1node = attr.ib()
274 p2node = attr.ib()
274 p2node = attr.ib()
275 linkrev = attr.ib()
275 linkrev = attr.ib()
276 flags = attr.ib()
276 flags = attr.ib()
277
277
278 @interfaceutil.implementer(repository.irevisiondelta)
278 @interfaceutil.implementer(repository.irevisiondelta)
279 @attr.s(slots=True)
279 @attr.s(slots=True)
280 class sqliterevisiondelta(object):
280 class sqliterevisiondelta(object):
281 node = attr.ib()
281 node = attr.ib()
282 p1node = attr.ib()
282 p1node = attr.ib()
283 p2node = attr.ib()
283 p2node = attr.ib()
284 basenode = attr.ib()
284 basenode = attr.ib()
285 flags = attr.ib()
285 flags = attr.ib()
286 baserevisionsize = attr.ib()
286 baserevisionsize = attr.ib()
287 revision = attr.ib()
287 revision = attr.ib()
288 delta = attr.ib()
288 delta = attr.ib()
289 linknode = attr.ib(default=None)
289 linknode = attr.ib(default=None)
290
290
291 @interfaceutil.implementer(repository.iverifyproblem)
291 @interfaceutil.implementer(repository.iverifyproblem)
292 @attr.s(frozen=True)
292 @attr.s(frozen=True)
293 class sqliteproblem(object):
293 class sqliteproblem(object):
294 warning = attr.ib(default=None)
294 warning = attr.ib(default=None)
295 error = attr.ib(default=None)
295 error = attr.ib(default=None)
296 node = attr.ib(default=None)
296 node = attr.ib(default=None)
297
297
298 @interfaceutil.implementer(repository.ifilestorage)
298 @interfaceutil.implementer(repository.ifilestorage)
299 class sqlitefilestore(object):
299 class sqlitefilestore(object):
300 """Implements storage for an individual tracked path."""
300 """Implements storage for an individual tracked path."""
301
301
302 def __init__(self, db, path, compression):
302 def __init__(self, db, path, compression):
303 self._db = db
303 self._db = db
304 self._path = path
304 self._path = path
305
305
306 self._pathid = None
306 self._pathid = None
307
307
308 # revnum -> node
308 # revnum -> node
309 self._revtonode = {}
309 self._revtonode = {}
310 # node -> revnum
310 # node -> revnum
311 self._nodetorev = {}
311 self._nodetorev = {}
312 # node -> data structure
312 # node -> data structure
313 self._revisions = {}
313 self._revisions = {}
314
314
315 self._revisioncache = util.lrucachedict(10)
315 self._revisioncache = util.lrucachedict(10)
316
316
317 self._compengine = compression
317 self._compengine = compression
318
318
319 if compression == 'zstd':
319 if compression == 'zstd':
320 self._cctx = zstd.ZstdCompressor(level=3)
320 self._cctx = zstd.ZstdCompressor(level=3)
321 self._dctx = zstd.ZstdDecompressor()
321 self._dctx = zstd.ZstdDecompressor()
322 else:
322 else:
323 self._cctx = None
323 self._cctx = None
324 self._dctx = None
324 self._dctx = None
325
325
326 self._refreshindex()
326 self._refreshindex()
327
327
328 def _refreshindex(self):
328 def _refreshindex(self):
329 self._revtonode = {}
329 self._revtonode = {}
330 self._nodetorev = {}
330 self._nodetorev = {}
331 self._revisions = {}
331 self._revisions = {}
332
332
333 res = list(self._db.execute(
333 res = list(self._db.execute(
334 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
334 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
335
335
336 if not res:
336 if not res:
337 self._pathid = None
337 self._pathid = None
338 return
338 return
339
339
340 self._pathid = res[0][0]
340 self._pathid = res[0][0]
341
341
342 res = self._db.execute(
342 res = self._db.execute(
343 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
343 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
344 r'FROM fileindex '
344 r'FROM fileindex '
345 r'WHERE pathid=? '
345 r'WHERE pathid=? '
346 r'ORDER BY revnum ASC',
346 r'ORDER BY revnum ASC',
347 (self._pathid,))
347 (self._pathid,))
348
348
349 for i, row in enumerate(res):
349 for i, row in enumerate(res):
350 rid, rev, node, p1rev, p2rev, linkrev, flags = row
350 rid, rev, node, p1rev, p2rev, linkrev, flags = row
351
351
352 if i != rev:
352 if i != rev:
353 raise SQLiteStoreError(_('sqlite database has inconsistent '
353 raise SQLiteStoreError(_('sqlite database has inconsistent '
354 'revision numbers'))
354 'revision numbers'))
355
355
356 if p1rev == nullrev:
356 if p1rev == nullrev:
357 p1node = nullid
357 p1node = nullid
358 else:
358 else:
359 p1node = self._revtonode[p1rev]
359 p1node = self._revtonode[p1rev]
360
360
361 if p2rev == nullrev:
361 if p2rev == nullrev:
362 p2node = nullid
362 p2node = nullid
363 else:
363 else:
364 p2node = self._revtonode[p2rev]
364 p2node = self._revtonode[p2rev]
365
365
366 entry = revisionentry(
366 entry = revisionentry(
367 rid=rid,
367 rid=rid,
368 rev=rev,
368 rev=rev,
369 node=node,
369 node=node,
370 p1rev=p1rev,
370 p1rev=p1rev,
371 p2rev=p2rev,
371 p2rev=p2rev,
372 p1node=p1node,
372 p1node=p1node,
373 p2node=p2node,
373 p2node=p2node,
374 linkrev=linkrev,
374 linkrev=linkrev,
375 flags=flags)
375 flags=flags)
376
376
377 self._revtonode[rev] = node
377 self._revtonode[rev] = node
378 self._nodetorev[node] = rev
378 self._nodetorev[node] = rev
379 self._revisions[node] = entry
379 self._revisions[node] = entry
380
380
381 # Start of ifileindex interface.
381 # Start of ifileindex interface.
382
382
383 def __len__(self):
383 def __len__(self):
384 return len(self._revisions)
384 return len(self._revisions)
385
385
386 def __iter__(self):
386 def __iter__(self):
387 return iter(pycompat.xrange(len(self._revisions)))
387 return iter(pycompat.xrange(len(self._revisions)))
388
388
389 def hasnode(self, node):
389 def hasnode(self, node):
390 if node == nullid:
390 if node == nullid:
391 return False
391 return False
392
392
393 return node in self._nodetorev
393 return node in self._nodetorev
394
394
395 def revs(self, start=0, stop=None):
395 def revs(self, start=0, stop=None):
396 return storageutil.iterrevs(len(self._revisions), start=start,
396 return storageutil.iterrevs(len(self._revisions), start=start,
397 stop=stop)
397 stop=stop)
398
398
399 def parents(self, node):
399 def parents(self, node):
400 if node == nullid:
400 if node == nullid:
401 return nullid, nullid
401 return nullid, nullid
402
402
403 if node not in self._revisions:
403 if node not in self._revisions:
404 raise error.LookupError(node, self._path, _('no node'))
404 raise error.LookupError(node, self._path, _('no node'))
405
405
406 entry = self._revisions[node]
406 entry = self._revisions[node]
407 return entry.p1node, entry.p2node
407 return entry.p1node, entry.p2node
408
408
409 def parentrevs(self, rev):
409 def parentrevs(self, rev):
410 if rev == nullrev:
410 if rev == nullrev:
411 return nullrev, nullrev
411 return nullrev, nullrev
412
412
413 if rev not in self._revtonode:
413 if rev not in self._revtonode:
414 raise IndexError(rev)
414 raise IndexError(rev)
415
415
416 entry = self._revisions[self._revtonode[rev]]
416 entry = self._revisions[self._revtonode[rev]]
417 return entry.p1rev, entry.p2rev
417 return entry.p1rev, entry.p2rev
418
418
419 def rev(self, node):
419 def rev(self, node):
420 if node == nullid:
420 if node == nullid:
421 return nullrev
421 return nullrev
422
422
423 if node not in self._nodetorev:
423 if node not in self._nodetorev:
424 raise error.LookupError(node, self._path, _('no node'))
424 raise error.LookupError(node, self._path, _('no node'))
425
425
426 return self._nodetorev[node]
426 return self._nodetorev[node]
427
427
428 def node(self, rev):
428 def node(self, rev):
429 if rev == nullrev:
429 if rev == nullrev:
430 return nullid
430 return nullid
431
431
432 if rev not in self._revtonode:
432 if rev not in self._revtonode:
433 raise IndexError(rev)
433 raise IndexError(rev)
434
434
435 return self._revtonode[rev]
435 return self._revtonode[rev]
436
436
437 def lookup(self, node):
437 def lookup(self, node):
438 return storageutil.fileidlookup(self, node, self._path)
438 return storageutil.fileidlookup(self, node, self._path)
439
439
440 def linkrev(self, rev):
440 def linkrev(self, rev):
441 if rev == nullrev:
441 if rev == nullrev:
442 return nullrev
442 return nullrev
443
443
444 if rev not in self._revtonode:
444 if rev not in self._revtonode:
445 raise IndexError(rev)
445 raise IndexError(rev)
446
446
447 entry = self._revisions[self._revtonode[rev]]
447 entry = self._revisions[self._revtonode[rev]]
448 return entry.linkrev
448 return entry.linkrev
449
449
450 def iscensored(self, rev):
450 def iscensored(self, rev):
451 if rev == nullrev:
451 if rev == nullrev:
452 return False
452 return False
453
453
454 if rev not in self._revtonode:
454 if rev not in self._revtonode:
455 raise IndexError(rev)
455 raise IndexError(rev)
456
456
457 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
457 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
458
458
459 def commonancestorsheads(self, node1, node2):
459 def commonancestorsheads(self, node1, node2):
460 rev1 = self.rev(node1)
460 rev1 = self.rev(node1)
461 rev2 = self.rev(node2)
461 rev2 = self.rev(node2)
462
462
463 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
463 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
464 return pycompat.maplist(self.node, ancestors)
464 return pycompat.maplist(self.node, ancestors)
465
465
466 def descendants(self, revs):
466 def descendants(self, revs):
467 # TODO we could implement this using a recursive SQL query, which
467 # TODO we could implement this using a recursive SQL query, which
468 # might be faster.
468 # might be faster.
469 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
469 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
470
470
471 def heads(self, start=None, stop=None):
471 def heads(self, start=None, stop=None):
472 if start is None and stop is None:
472 if start is None and stop is None:
473 if not len(self):
473 if not len(self):
474 return [nullid]
474 return [nullid]
475
475
476 startrev = self.rev(start) if start is not None else nullrev
476 startrev = self.rev(start) if start is not None else nullrev
477 stoprevs = {self.rev(n) for n in stop or []}
477 stoprevs = {self.rev(n) for n in stop or []}
478
478
479 revs = dagop.headrevssubset(self.revs, self.parentrevs,
479 revs = dagop.headrevssubset(self.revs, self.parentrevs,
480 startrev=startrev, stoprevs=stoprevs)
480 startrev=startrev, stoprevs=stoprevs)
481
481
482 return [self.node(rev) for rev in revs]
482 return [self.node(rev) for rev in revs]
483
483
484 def children(self, node):
484 def children(self, node):
485 rev = self.rev(node)
485 rev = self.rev(node)
486
486
487 res = self._db.execute(
487 res = self._db.execute(
488 r'SELECT'
488 r'SELECT'
489 r' node '
489 r' node '
490 r' FROM filedata '
490 r' FROM filedata '
491 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
491 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
492 r' ORDER BY revnum ASC',
492 r' ORDER BY revnum ASC',
493 (self._path, rev, rev))
493 (self._path, rev, rev))
494
494
495 return [row[0] for row in res]
495 return [row[0] for row in res]
496
496
497 # End of ifileindex interface.
497 # End of ifileindex interface.
498
498
499 # Start of ifiledata interface.
499 # Start of ifiledata interface.
500
500
501 def size(self, rev):
501 def size(self, rev):
502 if rev == nullrev:
502 if rev == nullrev:
503 return 0
503 return 0
504
504
505 if rev not in self._revtonode:
505 if rev not in self._revtonode:
506 raise IndexError(rev)
506 raise IndexError(rev)
507
507
508 node = self._revtonode[rev]
508 node = self._revtonode[rev]
509
509
510 if self.renamed(node):
510 if self.renamed(node):
511 return len(self.read(node))
511 return len(self.read(node))
512
512
513 return len(self.revision(node))
513 return len(self.revision(node))
514
514
515 def revision(self, node, raw=False, _verifyhash=True):
515 def revision(self, node, raw=False, _verifyhash=True):
516 if node in (nullid, nullrev):
516 if node in (nullid, nullrev):
517 return b''
517 return b''
518
518
519 if isinstance(node, int):
519 if isinstance(node, int):
520 node = self.node(node)
520 node = self.node(node)
521
521
522 if node not in self._nodetorev:
522 if node not in self._nodetorev:
523 raise error.LookupError(node, self._path, _('no node'))
523 raise error.LookupError(node, self._path, _('no node'))
524
524
525 if node in self._revisioncache:
525 if node in self._revisioncache:
526 return self._revisioncache[node]
526 return self._revisioncache[node]
527
527
528 # Because we have a fulltext revision cache, we are able to
528 # Because we have a fulltext revision cache, we are able to
529 # short-circuit delta chain traversal and decompression as soon as
529 # short-circuit delta chain traversal and decompression as soon as
530 # we encounter a revision in the cache.
530 # we encounter a revision in the cache.
531
531
532 stoprids = {self._revisions[n].rid: n
532 stoprids = {self._revisions[n].rid: n
533 for n in self._revisioncache}
533 for n in self._revisioncache}
534
534
535 if not stoprids:
535 if not stoprids:
536 stoprids[-1] = None
536 stoprids[-1] = None
537
537
538 fulltext = resolvedeltachain(self._db, self._pathid, node,
538 fulltext = resolvedeltachain(self._db, self._pathid, node,
539 self._revisioncache, stoprids,
539 self._revisioncache, stoprids,
540 zstddctx=self._dctx)
540 zstddctx=self._dctx)
541
541
542 # Don't verify hashes if parent nodes were rewritten, as the hash
542 # Don't verify hashes if parent nodes were rewritten, as the hash
543 # wouldn't verify.
543 # wouldn't verify.
544 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
544 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
545 _verifyhash = False
545 _verifyhash = False
546
546
547 if _verifyhash:
547 if _verifyhash:
548 self._checkhash(fulltext, node)
548 self._checkhash(fulltext, node)
549 self._revisioncache[node] = fulltext
549 self._revisioncache[node] = fulltext
550
550
551 return fulltext
551 return fulltext
552
552
553 def rawdata(self, *args, **kwargs):
553 def rawdata(self, *args, **kwargs):
554 return self.revision(*args, **kwargs)
554 return self.revision(*args, **kwargs)
555
555
556 def read(self, node):
556 def read(self, node):
557 return storageutil.filtermetadata(self.revision(node))
557 return storageutil.filtermetadata(self.revision(node))
558
558
559 def renamed(self, node):
559 def renamed(self, node):
560 return storageutil.filerevisioncopied(self, node)
560 return storageutil.filerevisioncopied(self, node)
561
561
562 def cmp(self, node, fulltext):
562 def cmp(self, node, fulltext):
563 return not storageutil.filedataequivalent(self, node, fulltext)
563 return not storageutil.filedataequivalent(self, node, fulltext)
564
564
565 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
565 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
566 assumehaveparentrevisions=False,
566 assumehaveparentrevisions=False,
567 deltamode=repository.CG_DELTAMODE_STD):
567 deltamode=repository.CG_DELTAMODE_STD):
568 if nodesorder not in ('nodes', 'storage', 'linear', None):
568 if nodesorder not in ('nodes', 'storage', 'linear', None):
569 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
569 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
570 nodesorder)
570 nodesorder)
571
571
572 nodes = [n for n in nodes if n != nullid]
572 nodes = [n for n in nodes if n != nullid]
573
573
574 if not nodes:
574 if not nodes:
575 return
575 return
576
576
577 # TODO perform in a single query.
577 # TODO perform in a single query.
578 res = self._db.execute(
578 res = self._db.execute(
579 r'SELECT revnum, deltaid FROM fileindex '
579 r'SELECT revnum, deltaid FROM fileindex '
580 r'WHERE pathid=? '
580 r'WHERE pathid=? '
581 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
581 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
582 tuple([self._pathid] + nodes))
582 tuple([self._pathid] + nodes))
583
583
584 deltabases = {}
584 deltabases = {}
585
585
586 for rev, deltaid in res:
586 for rev, deltaid in res:
587 res = self._db.execute(
587 res = self._db.execute(
588 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
588 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
589 (self._pathid, deltaid))
589 (self._pathid, deltaid))
590 deltabases[rev] = res.fetchone()[0]
590 deltabases[rev] = res.fetchone()[0]
591
591
592 # TODO define revdifffn so we can use delta from storage.
592 # TODO define revdifffn so we can use delta from storage.
593 for delta in storageutil.emitrevisions(
593 for delta in storageutil.emitrevisions(
594 self, nodes, nodesorder, sqliterevisiondelta,
594 self, nodes, nodesorder, sqliterevisiondelta,
595 deltaparentfn=deltabases.__getitem__,
595 deltaparentfn=deltabases.__getitem__,
596 revisiondata=revisiondata,
596 revisiondata=revisiondata,
597 assumehaveparentrevisions=assumehaveparentrevisions,
597 assumehaveparentrevisions=assumehaveparentrevisions,
598 deltamode=deltamode):
598 deltamode=deltamode):
599
599
600 yield delta
600 yield delta
601
601
602 # End of ifiledata interface.
602 # End of ifiledata interface.
603
603
604 # Start of ifilemutation interface.
604 # Start of ifilemutation interface.
605
605
606 def add(self, filedata, meta, transaction, linkrev, p1, p2):
606 def add(self, filedata, meta, transaction, linkrev, p1, p2):
607 if meta or filedata.startswith(b'\x01\n'):
607 if meta or filedata.startswith(b'\x01\n'):
608 filedata = storageutil.packmeta(meta, filedata)
608 filedata = storageutil.packmeta(meta, filedata)
609
609
610 return self.addrevision(filedata, transaction, linkrev, p1, p2)
610 return self.addrevision(filedata, transaction, linkrev, p1, p2)
611
611
612 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
612 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
613 flags=0, cachedelta=None):
613 flags=0, cachedelta=None):
614 if flags:
614 if flags:
615 raise SQLiteStoreError(_('flags not supported on revisions'))
615 raise SQLiteStoreError(_('flags not supported on revisions'))
616
616
617 validatehash = node is not None
617 validatehash = node is not None
618 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
618 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
619
619
620 if validatehash:
620 if validatehash:
621 self._checkhash(revisiondata, node, p1, p2)
621 self._checkhash(revisiondata, node, p1, p2)
622
622
623 if node in self._nodetorev:
623 if node in self._nodetorev:
624 return node
624 return node
625
625
626 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
626 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
627 p1, p2)
627 p1, p2)
628
628
629 self._revisioncache[node] = revisiondata
629 self._revisioncache[node] = revisiondata
630 return node
630 return node
631
631
632 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
632 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
633 maybemissingparents=False):
633 maybemissingparents=False):
634 nodes = []
634 nodes = []
635
635
636 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
636 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
637 storeflags = 0
637 storeflags = 0
638
638
639 if wireflags & repository.REVISION_FLAG_CENSORED:
639 if wireflags & repository.REVISION_FLAG_CENSORED:
640 storeflags |= FLAG_CENSORED
640 storeflags |= FLAG_CENSORED
641
641
642 if wireflags & ~repository.REVISION_FLAG_CENSORED:
642 if wireflags & ~repository.REVISION_FLAG_CENSORED:
643 raise SQLiteStoreError('unhandled revision flag')
643 raise SQLiteStoreError('unhandled revision flag')
644
644
645 if maybemissingparents:
645 if maybemissingparents:
646 if p1 != nullid and not self.hasnode(p1):
646 if p1 != nullid and not self.hasnode(p1):
647 p1 = nullid
647 p1 = nullid
648 storeflags |= FLAG_MISSING_P1
648 storeflags |= FLAG_MISSING_P1
649
649
650 if p2 != nullid and not self.hasnode(p2):
650 if p2 != nullid and not self.hasnode(p2):
651 p2 = nullid
651 p2 = nullid
652 storeflags |= FLAG_MISSING_P2
652 storeflags |= FLAG_MISSING_P2
653
653
654 baserev = self.rev(deltabase)
654 baserev = self.rev(deltabase)
655
655
656 # If base is censored, delta must be full replacement in a single
656 # If base is censored, delta must be full replacement in a single
657 # patch operation.
657 # patch operation.
658 if baserev != nullrev and self.iscensored(baserev):
658 if baserev != nullrev and self.iscensored(baserev):
659 hlen = struct.calcsize('>lll')
659 hlen = struct.calcsize('>lll')
660 oldlen = len(self.revision(deltabase, raw=True,
660 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
661 _verifyhash=False))
662 newlen = len(delta) - hlen
661 newlen = len(delta) - hlen
663
662
664 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
663 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
665 raise error.CensoredBaseError(self._path,
664 raise error.CensoredBaseError(self._path,
666 deltabase)
665 deltabase)
667
666
668 if (not (storeflags & FLAG_CENSORED)
667 if (not (storeflags & FLAG_CENSORED)
669 and storageutil.deltaiscensored(
668 and storageutil.deltaiscensored(
670 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
669 delta, baserev, lambda x: len(self.rawdata(x)))):
671 storeflags |= FLAG_CENSORED
670 storeflags |= FLAG_CENSORED
672
671
673 linkrev = linkmapper(linknode)
672 linkrev = linkmapper(linknode)
674
673
675 nodes.append(node)
674 nodes.append(node)
676
675
677 if node in self._revisions:
676 if node in self._revisions:
678 # Possibly reset parents to make them proper.
677 # Possibly reset parents to make them proper.
679 entry = self._revisions[node]
678 entry = self._revisions[node]
680
679
681 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
680 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
682 entry.p1node = p1
681 entry.p1node = p1
683 entry.p1rev = self._nodetorev[p1]
682 entry.p1rev = self._nodetorev[p1]
684 entry.flags &= ~FLAG_MISSING_P1
683 entry.flags &= ~FLAG_MISSING_P1
685
684
686 self._db.execute(
685 self._db.execute(
687 r'UPDATE fileindex SET p1rev=?, flags=? '
686 r'UPDATE fileindex SET p1rev=?, flags=? '
688 r'WHERE id=?',
687 r'WHERE id=?',
689 (self._nodetorev[p1], entry.flags, entry.rid))
688 (self._nodetorev[p1], entry.flags, entry.rid))
690
689
691 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
690 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
692 entry.p2node = p2
691 entry.p2node = p2
693 entry.p2rev = self._nodetorev[p2]
692 entry.p2rev = self._nodetorev[p2]
694 entry.flags &= ~FLAG_MISSING_P2
693 entry.flags &= ~FLAG_MISSING_P2
695
694
696 self._db.execute(
695 self._db.execute(
697 r'UPDATE fileindex SET p2rev=?, flags=? '
696 r'UPDATE fileindex SET p2rev=?, flags=? '
698 r'WHERE id=?',
697 r'WHERE id=?',
699 (self._nodetorev[p1], entry.flags, entry.rid))
698 (self._nodetorev[p1], entry.flags, entry.rid))
700
699
701 continue
700 continue
702
701
703 if deltabase == nullid:
702 if deltabase == nullid:
704 text = mdiff.patch(b'', delta)
703 text = mdiff.patch(b'', delta)
705 storedelta = None
704 storedelta = None
706 else:
705 else:
707 text = None
706 text = None
708 storedelta = (deltabase, delta)
707 storedelta = (deltabase, delta)
709
708
710 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
709 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
711 storedelta=storedelta, flags=storeflags)
710 storedelta=storedelta, flags=storeflags)
712
711
713 if addrevisioncb:
712 if addrevisioncb:
714 addrevisioncb(self, node)
713 addrevisioncb(self, node)
715
714
716 return nodes
715 return nodes
717
716
718 def censorrevision(self, tr, censornode, tombstone=b''):
717 def censorrevision(self, tr, censornode, tombstone=b''):
719 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
718 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
720
719
721 # This restriction is cargo culted from revlogs and makes no sense for
720 # This restriction is cargo culted from revlogs and makes no sense for
722 # SQLite, since columns can be resized at will.
721 # SQLite, since columns can be resized at will.
723 if len(tombstone) > len(self.revision(censornode, raw=True)):
722 if len(tombstone) > len(self.rawdata(censornode)):
724 raise error.Abort(_('censor tombstone must be no longer than '
723 raise error.Abort(_('censor tombstone must be no longer than '
725 'censored data'))
724 'censored data'))
726
725
727 # We need to replace the censored revision's data with the tombstone.
726 # We need to replace the censored revision's data with the tombstone.
728 # But replacing that data will have implications for delta chains that
727 # But replacing that data will have implications for delta chains that
729 # reference it.
728 # reference it.
730 #
729 #
731 # While "better," more complex strategies are possible, we do something
730 # While "better," more complex strategies are possible, we do something
732 # simple: we find delta chain children of the censored revision and we
731 # simple: we find delta chain children of the censored revision and we
733 # replace those incremental deltas with fulltexts of their corresponding
732 # replace those incremental deltas with fulltexts of their corresponding
734 # revision. Then we delete the now-unreferenced delta and original
733 # revision. Then we delete the now-unreferenced delta and original
735 # revision and insert a replacement.
734 # revision and insert a replacement.
736
735
737 # Find the delta to be censored.
736 # Find the delta to be censored.
738 censoreddeltaid = self._db.execute(
737 censoreddeltaid = self._db.execute(
739 r'SELECT deltaid FROM fileindex WHERE id=?',
738 r'SELECT deltaid FROM fileindex WHERE id=?',
740 (self._revisions[censornode].rid,)).fetchone()[0]
739 (self._revisions[censornode].rid,)).fetchone()[0]
741
740
742 # Find all its delta chain children.
741 # Find all its delta chain children.
743 # TODO once we support storing deltas for !files, we'll need to look
742 # TODO once we support storing deltas for !files, we'll need to look
744 # for those delta chains too.
743 # for those delta chains too.
745 rows = list(self._db.execute(
744 rows = list(self._db.execute(
746 r'SELECT id, pathid, node FROM fileindex '
745 r'SELECT id, pathid, node FROM fileindex '
747 r'WHERE deltabaseid=? OR deltaid=?',
746 r'WHERE deltabaseid=? OR deltaid=?',
748 (censoreddeltaid, censoreddeltaid)))
747 (censoreddeltaid, censoreddeltaid)))
749
748
750 for row in rows:
749 for row in rows:
751 rid, pathid, node = row
750 rid, pathid, node = row
752
751
753 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
752 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
754 zstddctx=self._dctx)
753 zstddctx=self._dctx)
755
754
756 deltahash = hashlib.sha1(fulltext).digest()
755 deltahash = hashlib.sha1(fulltext).digest()
757
756
758 if self._compengine == 'zstd':
757 if self._compengine == 'zstd':
759 deltablob = self._cctx.compress(fulltext)
758 deltablob = self._cctx.compress(fulltext)
760 compression = COMPRESSION_ZSTD
759 compression = COMPRESSION_ZSTD
761 elif self._compengine == 'zlib':
760 elif self._compengine == 'zlib':
762 deltablob = zlib.compress(fulltext)
761 deltablob = zlib.compress(fulltext)
763 compression = COMPRESSION_ZLIB
762 compression = COMPRESSION_ZLIB
764 elif self._compengine == 'none':
763 elif self._compengine == 'none':
765 deltablob = fulltext
764 deltablob = fulltext
766 compression = COMPRESSION_NONE
765 compression = COMPRESSION_NONE
767 else:
766 else:
768 raise error.ProgrammingError('unhandled compression engine: %s'
767 raise error.ProgrammingError('unhandled compression engine: %s'
769 % self._compengine)
768 % self._compengine)
770
769
771 if len(deltablob) >= len(fulltext):
770 if len(deltablob) >= len(fulltext):
772 deltablob = fulltext
771 deltablob = fulltext
773 compression = COMPRESSION_NONE
772 compression = COMPRESSION_NONE
774
773
775 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
774 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
776
775
777 self._db.execute(
776 self._db.execute(
778 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
777 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
779 r'WHERE id=?', (deltaid, rid))
778 r'WHERE id=?', (deltaid, rid))
780
779
781 # Now create the tombstone delta and replace the delta on the censored
780 # Now create the tombstone delta and replace the delta on the censored
782 # node.
781 # node.
783 deltahash = hashlib.sha1(tombstone).digest()
782 deltahash = hashlib.sha1(tombstone).digest()
784 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
783 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
785 deltahash, tombstone)
784 deltahash, tombstone)
786
785
787 flags = self._revisions[censornode].flags
786 flags = self._revisions[censornode].flags
788 flags |= FLAG_CENSORED
787 flags |= FLAG_CENSORED
789
788
790 self._db.execute(
789 self._db.execute(
791 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
790 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
792 r'WHERE pathid=? AND node=?',
791 r'WHERE pathid=? AND node=?',
793 (flags, tombstonedeltaid, self._pathid, censornode))
792 (flags, tombstonedeltaid, self._pathid, censornode))
794
793
795 self._db.execute(
794 self._db.execute(
796 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
795 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
797
796
798 self._refreshindex()
797 self._refreshindex()
799 self._revisioncache.clear()
798 self._revisioncache.clear()
800
799
801 def getstrippoint(self, minlink):
800 def getstrippoint(self, minlink):
802 return storageutil.resolvestripinfo(minlink, len(self) - 1,
801 return storageutil.resolvestripinfo(minlink, len(self) - 1,
803 [self.rev(n) for n in self.heads()],
802 [self.rev(n) for n in self.heads()],
804 self.linkrev,
803 self.linkrev,
805 self.parentrevs)
804 self.parentrevs)
806
805
807 def strip(self, minlink, transaction):
806 def strip(self, minlink, transaction):
808 if not len(self):
807 if not len(self):
809 return
808 return
810
809
811 rev, _ignored = self.getstrippoint(minlink)
810 rev, _ignored = self.getstrippoint(minlink)
812
811
813 if rev == len(self):
812 if rev == len(self):
814 return
813 return
815
814
816 for rev in self.revs(rev):
815 for rev in self.revs(rev):
817 self._db.execute(
816 self._db.execute(
818 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
817 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
819 (self._pathid, self.node(rev)))
818 (self._pathid, self.node(rev)))
820
819
821 # TODO how should we garbage collect data in delta table?
820 # TODO how should we garbage collect data in delta table?
822
821
823 self._refreshindex()
822 self._refreshindex()
824
823
825 # End of ifilemutation interface.
824 # End of ifilemutation interface.
826
825
827 # Start of ifilestorage interface.
826 # Start of ifilestorage interface.
828
827
829 def files(self):
828 def files(self):
830 return []
829 return []
831
830
832 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
831 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
833 revisionscount=False, trackedsize=False,
832 revisionscount=False, trackedsize=False,
834 storedsize=False):
833 storedsize=False):
835 d = {}
834 d = {}
836
835
837 if exclusivefiles:
836 if exclusivefiles:
838 d['exclusivefiles'] = []
837 d['exclusivefiles'] = []
839
838
840 if sharedfiles:
839 if sharedfiles:
841 # TODO list sqlite file(s) here.
840 # TODO list sqlite file(s) here.
842 d['sharedfiles'] = []
841 d['sharedfiles'] = []
843
842
844 if revisionscount:
843 if revisionscount:
845 d['revisionscount'] = len(self)
844 d['revisionscount'] = len(self)
846
845
847 if trackedsize:
846 if trackedsize:
848 d['trackedsize'] = sum(len(self.revision(node))
847 d['trackedsize'] = sum(len(self.revision(node))
849 for node in self._nodetorev)
848 for node in self._nodetorev)
850
849
851 if storedsize:
850 if storedsize:
852 # TODO implement this?
851 # TODO implement this?
853 d['storedsize'] = None
852 d['storedsize'] = None
854
853
855 return d
854 return d
856
855
857 def verifyintegrity(self, state):
856 def verifyintegrity(self, state):
858 state['skipread'] = set()
857 state['skipread'] = set()
859
858
860 for rev in self:
859 for rev in self:
861 node = self.node(rev)
860 node = self.node(rev)
862
861
863 try:
862 try:
864 self.revision(node)
863 self.revision(node)
865 except Exception as e:
864 except Exception as e:
866 yield sqliteproblem(
865 yield sqliteproblem(
867 error=_('unpacking %s: %s') % (short(node), e),
866 error=_('unpacking %s: %s') % (short(node), e),
868 node=node)
867 node=node)
869
868
870 state['skipread'].add(node)
869 state['skipread'].add(node)
871
870
872 # End of ifilestorage interface.
871 # End of ifilestorage interface.
873
872
874 def _checkhash(self, fulltext, node, p1=None, p2=None):
873 def _checkhash(self, fulltext, node, p1=None, p2=None):
875 if p1 is None and p2 is None:
874 if p1 is None and p2 is None:
876 p1, p2 = self.parents(node)
875 p1, p2 = self.parents(node)
877
876
878 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
877 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
879 return
878 return
880
879
881 try:
880 try:
882 del self._revisioncache[node]
881 del self._revisioncache[node]
883 except KeyError:
882 except KeyError:
884 pass
883 pass
885
884
886 if storageutil.iscensoredtext(fulltext):
885 if storageutil.iscensoredtext(fulltext):
887 raise error.CensoredNodeError(self._path, node, fulltext)
886 raise error.CensoredNodeError(self._path, node, fulltext)
888
887
889 raise SQLiteStoreError(_('integrity check failed on %s') %
888 raise SQLiteStoreError(_('integrity check failed on %s') %
890 self._path)
889 self._path)
891
890
892 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
891 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
893 p1, p2, storedelta=None, flags=0):
892 p1, p2, storedelta=None, flags=0):
894 if self._pathid is None:
893 if self._pathid is None:
895 res = self._db.execute(
894 res = self._db.execute(
896 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
895 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
897 self._pathid = res.lastrowid
896 self._pathid = res.lastrowid
898
897
899 # For simplicity, always store a delta against p1.
898 # For simplicity, always store a delta against p1.
900 # TODO we need a lot more logic here to make behavior reasonable.
899 # TODO we need a lot more logic here to make behavior reasonable.
901
900
902 if storedelta:
901 if storedelta:
903 deltabase, delta = storedelta
902 deltabase, delta = storedelta
904
903
905 if isinstance(deltabase, int):
904 if isinstance(deltabase, int):
906 deltabase = self.node(deltabase)
905 deltabase = self.node(deltabase)
907
906
908 else:
907 else:
909 assert revisiondata is not None
908 assert revisiondata is not None
910 deltabase = p1
909 deltabase = p1
911
910
912 if deltabase == nullid:
911 if deltabase == nullid:
913 delta = revisiondata
912 delta = revisiondata
914 else:
913 else:
915 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
914 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
916 revisiondata)
915 revisiondata)
917
916
918 # File index stores a pointer to its delta and the parent delta.
917 # File index stores a pointer to its delta and the parent delta.
919 # The parent delta is stored via a pointer to the fileindex PK.
918 # The parent delta is stored via a pointer to the fileindex PK.
920 if deltabase == nullid:
919 if deltabase == nullid:
921 baseid = None
920 baseid = None
922 else:
921 else:
923 baseid = self._revisions[deltabase].rid
922 baseid = self._revisions[deltabase].rid
924
923
925 # Deltas are stored with a hash of their content. This allows
924 # Deltas are stored with a hash of their content. This allows
926 # us to de-duplicate. The table is configured to ignore conflicts
925 # us to de-duplicate. The table is configured to ignore conflicts
927 # and it is faster to just insert and silently noop than to look
926 # and it is faster to just insert and silently noop than to look
928 # first.
927 # first.
929 deltahash = hashlib.sha1(delta).digest()
928 deltahash = hashlib.sha1(delta).digest()
930
929
931 if self._compengine == 'zstd':
930 if self._compengine == 'zstd':
932 deltablob = self._cctx.compress(delta)
931 deltablob = self._cctx.compress(delta)
933 compression = COMPRESSION_ZSTD
932 compression = COMPRESSION_ZSTD
934 elif self._compengine == 'zlib':
933 elif self._compengine == 'zlib':
935 deltablob = zlib.compress(delta)
934 deltablob = zlib.compress(delta)
936 compression = COMPRESSION_ZLIB
935 compression = COMPRESSION_ZLIB
937 elif self._compengine == 'none':
936 elif self._compengine == 'none':
938 deltablob = delta
937 deltablob = delta
939 compression = COMPRESSION_NONE
938 compression = COMPRESSION_NONE
940 else:
939 else:
941 raise error.ProgrammingError('unhandled compression engine: %s' %
940 raise error.ProgrammingError('unhandled compression engine: %s' %
942 self._compengine)
941 self._compengine)
943
942
944 # Don't store compressed data if it isn't practical.
943 # Don't store compressed data if it isn't practical.
945 if len(deltablob) >= len(delta):
944 if len(deltablob) >= len(delta):
946 deltablob = delta
945 deltablob = delta
947 compression = COMPRESSION_NONE
946 compression = COMPRESSION_NONE
948
947
949 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
948 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
950
949
951 rev = len(self)
950 rev = len(self)
952
951
953 if p1 == nullid:
952 if p1 == nullid:
954 p1rev = nullrev
953 p1rev = nullrev
955 else:
954 else:
956 p1rev = self._nodetorev[p1]
955 p1rev = self._nodetorev[p1]
957
956
958 if p2 == nullid:
957 if p2 == nullid:
959 p2rev = nullrev
958 p2rev = nullrev
960 else:
959 else:
961 p2rev = self._nodetorev[p2]
960 p2rev = self._nodetorev[p2]
962
961
963 rid = self._db.execute(
962 rid = self._db.execute(
964 r'INSERT INTO fileindex ('
963 r'INSERT INTO fileindex ('
965 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
964 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
966 r' deltaid, deltabaseid) '
965 r' deltaid, deltabaseid) '
967 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
966 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
968 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
967 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
969 deltaid, baseid)
968 deltaid, baseid)
970 ).lastrowid
969 ).lastrowid
971
970
972 entry = revisionentry(
971 entry = revisionentry(
973 rid=rid,
972 rid=rid,
974 rev=rev,
973 rev=rev,
975 node=node,
974 node=node,
976 p1rev=p1rev,
975 p1rev=p1rev,
977 p2rev=p2rev,
976 p2rev=p2rev,
978 p1node=p1,
977 p1node=p1,
979 p2node=p2,
978 p2node=p2,
980 linkrev=linkrev,
979 linkrev=linkrev,
981 flags=flags)
980 flags=flags)
982
981
983 self._nodetorev[node] = rev
982 self._nodetorev[node] = rev
984 self._revtonode[rev] = node
983 self._revtonode[rev] = node
985 self._revisions[node] = entry
984 self._revisions[node] = entry
986
985
987 return node
986 return node
988
987
989 class sqliterepository(localrepo.localrepository):
988 class sqliterepository(localrepo.localrepository):
990 def cancopy(self):
989 def cancopy(self):
991 return False
990 return False
992
991
993 def transaction(self, *args, **kwargs):
992 def transaction(self, *args, **kwargs):
994 current = self.currenttransaction()
993 current = self.currenttransaction()
995
994
996 tr = super(sqliterepository, self).transaction(*args, **kwargs)
995 tr = super(sqliterepository, self).transaction(*args, **kwargs)
997
996
998 if current:
997 if current:
999 return tr
998 return tr
1000
999
1001 self._dbconn.execute(r'BEGIN TRANSACTION')
1000 self._dbconn.execute(r'BEGIN TRANSACTION')
1002
1001
1003 def committransaction(_):
1002 def committransaction(_):
1004 self._dbconn.commit()
1003 self._dbconn.commit()
1005
1004
1006 tr.addfinalize('sqlitestore', committransaction)
1005 tr.addfinalize('sqlitestore', committransaction)
1007
1006
1008 return tr
1007 return tr
1009
1008
1010 @property
1009 @property
1011 def _dbconn(self):
1010 def _dbconn(self):
1012 # SQLite connections can only be used on the thread that created
1011 # SQLite connections can only be used on the thread that created
1013 # them. In most cases, this "just works." However, hgweb uses
1012 # them. In most cases, this "just works." However, hgweb uses
1014 # multiple threads.
1013 # multiple threads.
1015 tid = threading.current_thread().ident
1014 tid = threading.current_thread().ident
1016
1015
1017 if self._db:
1016 if self._db:
1018 if self._db[0] == tid:
1017 if self._db[0] == tid:
1019 return self._db[1]
1018 return self._db[1]
1020
1019
1021 db = makedb(self.svfs.join('db.sqlite'))
1020 db = makedb(self.svfs.join('db.sqlite'))
1022 self._db = (tid, db)
1021 self._db = (tid, db)
1023
1022
1024 return db
1023 return db
1025
1024
1026 def makedb(path):
1025 def makedb(path):
1027 """Construct a database handle for a database at path."""
1026 """Construct a database handle for a database at path."""
1028
1027
1029 db = sqlite3.connect(encoding.strfromlocal(path))
1028 db = sqlite3.connect(encoding.strfromlocal(path))
1030 db.text_factory = bytes
1029 db.text_factory = bytes
1031
1030
1032 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1031 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1033
1032
1034 # New database.
1033 # New database.
1035 if res == 0:
1034 if res == 0:
1036 for statement in CREATE_SCHEMA:
1035 for statement in CREATE_SCHEMA:
1037 db.execute(statement)
1036 db.execute(statement)
1038
1037
1039 db.commit()
1038 db.commit()
1040
1039
1041 elif res == CURRENT_SCHEMA_VERSION:
1040 elif res == CURRENT_SCHEMA_VERSION:
1042 pass
1041 pass
1043
1042
1044 else:
1043 else:
1045 raise error.Abort(_('sqlite database has unrecognized version'))
1044 raise error.Abort(_('sqlite database has unrecognized version'))
1046
1045
1047 db.execute(r'PRAGMA journal_mode=WAL')
1046 db.execute(r'PRAGMA journal_mode=WAL')
1048
1047
1049 return db
1048 return db
1050
1049
1051 def featuresetup(ui, supported):
1050 def featuresetup(ui, supported):
1052 supported.add(REQUIREMENT)
1051 supported.add(REQUIREMENT)
1053
1052
1054 if zstd:
1053 if zstd:
1055 supported.add(REQUIREMENT_ZSTD)
1054 supported.add(REQUIREMENT_ZSTD)
1056
1055
1057 supported.add(REQUIREMENT_ZLIB)
1056 supported.add(REQUIREMENT_ZLIB)
1058 supported.add(REQUIREMENT_NONE)
1057 supported.add(REQUIREMENT_NONE)
1059 supported.add(REQUIREMENT_SHALLOW_FILES)
1058 supported.add(REQUIREMENT_SHALLOW_FILES)
1060 supported.add(repository.NARROW_REQUIREMENT)
1059 supported.add(repository.NARROW_REQUIREMENT)
1061
1060
1062 def newreporequirements(orig, ui, createopts):
1061 def newreporequirements(orig, ui, createopts):
1063 if createopts['backend'] != 'sqlite':
1062 if createopts['backend'] != 'sqlite':
1064 return orig(ui, createopts)
1063 return orig(ui, createopts)
1065
1064
1066 # This restriction can be lifted once we have more confidence.
1065 # This restriction can be lifted once we have more confidence.
1067 if 'sharedrepo' in createopts:
1066 if 'sharedrepo' in createopts:
1068 raise error.Abort(_('shared repositories not supported with SQLite '
1067 raise error.Abort(_('shared repositories not supported with SQLite '
1069 'store'))
1068 'store'))
1070
1069
1071 # This filtering is out of an abundance of caution: we want to ensure
1070 # This filtering is out of an abundance of caution: we want to ensure
1072 # we honor creation options and we do that by annotating exactly the
1071 # we honor creation options and we do that by annotating exactly the
1073 # creation options we recognize.
1072 # creation options we recognize.
1074 known = {
1073 known = {
1075 'narrowfiles',
1074 'narrowfiles',
1076 'backend',
1075 'backend',
1077 'shallowfilestore',
1076 'shallowfilestore',
1078 }
1077 }
1079
1078
1080 unsupported = set(createopts) - known
1079 unsupported = set(createopts) - known
1081 if unsupported:
1080 if unsupported:
1082 raise error.Abort(_('SQLite store does not support repo creation '
1081 raise error.Abort(_('SQLite store does not support repo creation '
1083 'option: %s') % ', '.join(sorted(unsupported)))
1082 'option: %s') % ', '.join(sorted(unsupported)))
1084
1083
1085 # Since we're a hybrid store that still relies on revlogs, we fall back
1084 # Since we're a hybrid store that still relies on revlogs, we fall back
1086 # to using the revlogv1 backend's storage requirements then adding our
1085 # to using the revlogv1 backend's storage requirements then adding our
1087 # own requirement.
1086 # own requirement.
1088 createopts['backend'] = 'revlogv1'
1087 createopts['backend'] = 'revlogv1'
1089 requirements = orig(ui, createopts)
1088 requirements = orig(ui, createopts)
1090 requirements.add(REQUIREMENT)
1089 requirements.add(REQUIREMENT)
1091
1090
1092 compression = ui.config('storage', 'sqlite.compression')
1091 compression = ui.config('storage', 'sqlite.compression')
1093
1092
1094 if compression == 'zstd' and not zstd:
1093 if compression == 'zstd' and not zstd:
1095 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1094 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1096 'zstandard compression not available to this '
1095 'zstandard compression not available to this '
1097 'Mercurial install'))
1096 'Mercurial install'))
1098
1097
1099 if compression == 'zstd':
1098 if compression == 'zstd':
1100 requirements.add(REQUIREMENT_ZSTD)
1099 requirements.add(REQUIREMENT_ZSTD)
1101 elif compression == 'zlib':
1100 elif compression == 'zlib':
1102 requirements.add(REQUIREMENT_ZLIB)
1101 requirements.add(REQUIREMENT_ZLIB)
1103 elif compression == 'none':
1102 elif compression == 'none':
1104 requirements.add(REQUIREMENT_NONE)
1103 requirements.add(REQUIREMENT_NONE)
1105 else:
1104 else:
1106 raise error.Abort(_('unknown compression engine defined in '
1105 raise error.Abort(_('unknown compression engine defined in '
1107 'storage.sqlite.compression: %s') % compression)
1106 'storage.sqlite.compression: %s') % compression)
1108
1107
1109 if createopts.get('shallowfilestore'):
1108 if createopts.get('shallowfilestore'):
1110 requirements.add(REQUIREMENT_SHALLOW_FILES)
1109 requirements.add(REQUIREMENT_SHALLOW_FILES)
1111
1110
1112 return requirements
1111 return requirements
1113
1112
1114 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1113 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1115 class sqlitefilestorage(object):
1114 class sqlitefilestorage(object):
1116 """Repository file storage backed by SQLite."""
1115 """Repository file storage backed by SQLite."""
1117 def file(self, path):
1116 def file(self, path):
1118 if path[0] == b'/':
1117 if path[0] == b'/':
1119 path = path[1:]
1118 path = path[1:]
1120
1119
1121 if REQUIREMENT_ZSTD in self.requirements:
1120 if REQUIREMENT_ZSTD in self.requirements:
1122 compression = 'zstd'
1121 compression = 'zstd'
1123 elif REQUIREMENT_ZLIB in self.requirements:
1122 elif REQUIREMENT_ZLIB in self.requirements:
1124 compression = 'zlib'
1123 compression = 'zlib'
1125 elif REQUIREMENT_NONE in self.requirements:
1124 elif REQUIREMENT_NONE in self.requirements:
1126 compression = 'none'
1125 compression = 'none'
1127 else:
1126 else:
1128 raise error.Abort(_('unable to determine what compression engine '
1127 raise error.Abort(_('unable to determine what compression engine '
1129 'to use for SQLite storage'))
1128 'to use for SQLite storage'))
1130
1129
1131 return sqlitefilestore(self._dbconn, path, compression)
1130 return sqlitefilestore(self._dbconn, path, compression)
1132
1131
1133 def makefilestorage(orig, requirements, features, **kwargs):
1132 def makefilestorage(orig, requirements, features, **kwargs):
1134 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1133 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1135 if REQUIREMENT in requirements:
1134 if REQUIREMENT in requirements:
1136 if REQUIREMENT_SHALLOW_FILES in requirements:
1135 if REQUIREMENT_SHALLOW_FILES in requirements:
1137 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1136 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1138
1137
1139 return sqlitefilestorage
1138 return sqlitefilestorage
1140 else:
1139 else:
1141 return orig(requirements=requirements, features=features, **kwargs)
1140 return orig(requirements=requirements, features=features, **kwargs)
1142
1141
1143 def makemain(orig, ui, requirements, **kwargs):
1142 def makemain(orig, ui, requirements, **kwargs):
1144 if REQUIREMENT in requirements:
1143 if REQUIREMENT in requirements:
1145 if REQUIREMENT_ZSTD in requirements and not zstd:
1144 if REQUIREMENT_ZSTD in requirements and not zstd:
1146 raise error.Abort(_('repository uses zstandard compression, which '
1145 raise error.Abort(_('repository uses zstandard compression, which '
1147 'is not available to this Mercurial install'))
1146 'is not available to this Mercurial install'))
1148
1147
1149 return sqliterepository
1148 return sqliterepository
1150
1149
1151 return orig(requirements=requirements, **kwargs)
1150 return orig(requirements=requirements, **kwargs)
1152
1151
1153 def verifierinit(orig, self, *args, **kwargs):
1152 def verifierinit(orig, self, *args, **kwargs):
1154 orig(self, *args, **kwargs)
1153 orig(self, *args, **kwargs)
1155
1154
1156 # We don't care that files in the store don't align with what is
1155 # We don't care that files in the store don't align with what is
1157 # advertised. So suppress these warnings.
1156 # advertised. So suppress these warnings.
1158 self.warnorphanstorefiles = False
1157 self.warnorphanstorefiles = False
1159
1158
1160 def extsetup(ui):
1159 def extsetup(ui):
1161 localrepo.featuresetupfuncs.add(featuresetup)
1160 localrepo.featuresetupfuncs.add(featuresetup)
1162 extensions.wrapfunction(localrepo, 'newreporequirements',
1161 extensions.wrapfunction(localrepo, 'newreporequirements',
1163 newreporequirements)
1162 newreporequirements)
1164 extensions.wrapfunction(localrepo, 'makefilestorage',
1163 extensions.wrapfunction(localrepo, 'makefilestorage',
1165 makefilestorage)
1164 makefilestorage)
1166 extensions.wrapfunction(localrepo, 'makemain',
1165 extensions.wrapfunction(localrepo, 'makemain',
1167 makemain)
1166 makemain)
1168 extensions.wrapfunction(verify.verifier, '__init__',
1167 extensions.wrapfunction(verify.verifier, '__init__',
1169 verifierinit)
1168 verifierinit)
1170
1169
1171 def reposetup(ui, repo):
1170 def reposetup(ui, repo):
1172 if isinstance(repo, sqliterepository):
1171 if isinstance(repo, sqliterepository):
1173 repo._db = None
1172 repo._db = None
1174
1173
1175 # TODO check for bundlerepository?
1174 # TODO check for bundlerepository?
General Comments 0
You need to be logged in to leave comments. Login now