##// END OF EJS Templates
rawdata: implement `rawdata` for `sqlitestore` too...
marmoute -
r42948:c9f3f4c8 default
parent child Browse files
Show More
@@ -1,1171 +1,1174 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import hashlib
48 import hashlib
49 import sqlite3
49 import sqlite3
50 import struct
50 import struct
51 import threading
51 import threading
52 import zlib
52 import zlib
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55 from mercurial.node import (
55 from mercurial.node import (
56 nullid,
56 nullid,
57 nullrev,
57 nullrev,
58 short,
58 short,
59 )
59 )
60 from mercurial.thirdparty import (
60 from mercurial.thirdparty import (
61 attr,
61 attr,
62 )
62 )
63 from mercurial import (
63 from mercurial import (
64 ancestor,
64 ancestor,
65 dagop,
65 dagop,
66 encoding,
66 encoding,
67 error,
67 error,
68 extensions,
68 extensions,
69 localrepo,
69 localrepo,
70 mdiff,
70 mdiff,
71 pycompat,
71 pycompat,
72 registrar,
72 registrar,
73 repository,
73 repository,
74 util,
74 util,
75 verify,
75 verify,
76 )
76 )
77 from mercurial.utils import (
77 from mercurial.utils import (
78 interfaceutil,
78 interfaceutil,
79 storageutil,
79 storageutil,
80 )
80 )
81
81
82 try:
82 try:
83 from mercurial import zstd
83 from mercurial import zstd
84 zstd.__version__
84 zstd.__version__
85 except ImportError:
85 except ImportError:
86 zstd = None
86 zstd = None
87
87
88 configtable = {}
88 configtable = {}
89 configitem = registrar.configitem(configtable)
89 configitem = registrar.configitem(configtable)
90
90
91 # experimental config: storage.sqlite.compression
91 # experimental config: storage.sqlite.compression
92 configitem('storage', 'sqlite.compression',
92 configitem('storage', 'sqlite.compression',
93 default='zstd' if zstd else 'zlib')
93 default='zstd' if zstd else 'zlib')
94
94
95 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
95 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
96 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
96 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
97 # be specifying the version(s) of Mercurial they are tested with, or
97 # be specifying the version(s) of Mercurial they are tested with, or
98 # leave the attribute unspecified.
98 # leave the attribute unspecified.
99 testedwith = 'ships-with-hg-core'
99 testedwith = 'ships-with-hg-core'
100
100
101 REQUIREMENT = b'exp-sqlite-001'
101 REQUIREMENT = b'exp-sqlite-001'
102 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
102 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
103 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
103 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
104 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
104 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
105 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
105 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
106
106
107 CURRENT_SCHEMA_VERSION = 1
107 CURRENT_SCHEMA_VERSION = 1
108
108
109 COMPRESSION_NONE = 1
109 COMPRESSION_NONE = 1
110 COMPRESSION_ZSTD = 2
110 COMPRESSION_ZSTD = 2
111 COMPRESSION_ZLIB = 3
111 COMPRESSION_ZLIB = 3
112
112
113 FLAG_CENSORED = 1
113 FLAG_CENSORED = 1
114 FLAG_MISSING_P1 = 2
114 FLAG_MISSING_P1 = 2
115 FLAG_MISSING_P2 = 4
115 FLAG_MISSING_P2 = 4
116
116
117 CREATE_SCHEMA = [
117 CREATE_SCHEMA = [
118 # Deltas are stored as content-indexed blobs.
118 # Deltas are stored as content-indexed blobs.
119 # compression column holds COMPRESSION_* constant for how the
119 # compression column holds COMPRESSION_* constant for how the
120 # delta is encoded.
120 # delta is encoded.
121
121
122 r'CREATE TABLE delta ('
122 r'CREATE TABLE delta ('
123 r' id INTEGER PRIMARY KEY, '
123 r' id INTEGER PRIMARY KEY, '
124 r' compression INTEGER NOT NULL, '
124 r' compression INTEGER NOT NULL, '
125 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
125 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
126 r' delta BLOB NOT NULL '
126 r' delta BLOB NOT NULL '
127 r')',
127 r')',
128
128
129 # Tracked paths are denormalized to integers to avoid redundant
129 # Tracked paths are denormalized to integers to avoid redundant
130 # storage of the path name.
130 # storage of the path name.
131 r'CREATE TABLE filepath ('
131 r'CREATE TABLE filepath ('
132 r' id INTEGER PRIMARY KEY, '
132 r' id INTEGER PRIMARY KEY, '
133 r' path BLOB NOT NULL '
133 r' path BLOB NOT NULL '
134 r')',
134 r')',
135
135
136 r'CREATE UNIQUE INDEX filepath_path '
136 r'CREATE UNIQUE INDEX filepath_path '
137 r' ON filepath (path)',
137 r' ON filepath (path)',
138
138
139 # We have a single table for all file revision data.
139 # We have a single table for all file revision data.
140 # Each file revision is uniquely described by a (path, rev) and
140 # Each file revision is uniquely described by a (path, rev) and
141 # (path, node).
141 # (path, node).
142 #
142 #
143 # Revision data is stored as a pointer to the delta producing this
143 # Revision data is stored as a pointer to the delta producing this
144 # revision and the file revision whose delta should be applied before
144 # revision and the file revision whose delta should be applied before
145 # that one. One can reconstruct the delta chain by recursively following
145 # that one. One can reconstruct the delta chain by recursively following
146 # the delta base revision pointers until one encounters NULL.
146 # the delta base revision pointers until one encounters NULL.
147 #
147 #
148 # flags column holds bitwise integer flags controlling storage options.
148 # flags column holds bitwise integer flags controlling storage options.
149 # These flags are defined by the FLAG_* constants.
149 # These flags are defined by the FLAG_* constants.
150 r'CREATE TABLE fileindex ('
150 r'CREATE TABLE fileindex ('
151 r' id INTEGER PRIMARY KEY, '
151 r' id INTEGER PRIMARY KEY, '
152 r' pathid INTEGER REFERENCES filepath(id), '
152 r' pathid INTEGER REFERENCES filepath(id), '
153 r' revnum INTEGER NOT NULL, '
153 r' revnum INTEGER NOT NULL, '
154 r' p1rev INTEGER NOT NULL, '
154 r' p1rev INTEGER NOT NULL, '
155 r' p2rev INTEGER NOT NULL, '
155 r' p2rev INTEGER NOT NULL, '
156 r' linkrev INTEGER NOT NULL, '
156 r' linkrev INTEGER NOT NULL, '
157 r' flags INTEGER NOT NULL, '
157 r' flags INTEGER NOT NULL, '
158 r' deltaid INTEGER REFERENCES delta(id), '
158 r' deltaid INTEGER REFERENCES delta(id), '
159 r' deltabaseid INTEGER REFERENCES fileindex(id), '
159 r' deltabaseid INTEGER REFERENCES fileindex(id), '
160 r' node BLOB NOT NULL '
160 r' node BLOB NOT NULL '
161 r')',
161 r')',
162
162
163 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
163 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
164 r' ON fileindex (pathid, revnum)',
164 r' ON fileindex (pathid, revnum)',
165
165
166 r'CREATE UNIQUE INDEX fileindex_pathnode '
166 r'CREATE UNIQUE INDEX fileindex_pathnode '
167 r' ON fileindex (pathid, node)',
167 r' ON fileindex (pathid, node)',
168
168
169 # Provide a view over all file data for convenience.
169 # Provide a view over all file data for convenience.
170 r'CREATE VIEW filedata AS '
170 r'CREATE VIEW filedata AS '
171 r'SELECT '
171 r'SELECT '
172 r' fileindex.id AS id, '
172 r' fileindex.id AS id, '
173 r' filepath.id AS pathid, '
173 r' filepath.id AS pathid, '
174 r' filepath.path AS path, '
174 r' filepath.path AS path, '
175 r' fileindex.revnum AS revnum, '
175 r' fileindex.revnum AS revnum, '
176 r' fileindex.node AS node, '
176 r' fileindex.node AS node, '
177 r' fileindex.p1rev AS p1rev, '
177 r' fileindex.p1rev AS p1rev, '
178 r' fileindex.p2rev AS p2rev, '
178 r' fileindex.p2rev AS p2rev, '
179 r' fileindex.linkrev AS linkrev, '
179 r' fileindex.linkrev AS linkrev, '
180 r' fileindex.flags AS flags, '
180 r' fileindex.flags AS flags, '
181 r' fileindex.deltaid AS deltaid, '
181 r' fileindex.deltaid AS deltaid, '
182 r' fileindex.deltabaseid AS deltabaseid '
182 r' fileindex.deltabaseid AS deltabaseid '
183 r'FROM filepath, fileindex '
183 r'FROM filepath, fileindex '
184 r'WHERE fileindex.pathid=filepath.id',
184 r'WHERE fileindex.pathid=filepath.id',
185
185
186 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
186 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
187 ]
187 ]
188
188
189 def resolvedeltachain(db, pathid, node, revisioncache,
189 def resolvedeltachain(db, pathid, node, revisioncache,
190 stoprids, zstddctx=None):
190 stoprids, zstddctx=None):
191 """Resolve a delta chain for a file node."""
191 """Resolve a delta chain for a file node."""
192
192
193 # TODO the "not in ({stops})" here is possibly slowing down the query
193 # TODO the "not in ({stops})" here is possibly slowing down the query
194 # because it needs to perform the lookup on every recursive invocation.
194 # because it needs to perform the lookup on every recursive invocation.
195 # This could possibly be faster if we created a temporary query with
195 # This could possibly be faster if we created a temporary query with
196 # baseid "poisoned" to null and limited the recursive filter to
196 # baseid "poisoned" to null and limited the recursive filter to
197 # "is not null".
197 # "is not null".
198 res = db.execute(
198 res = db.execute(
199 r'WITH RECURSIVE '
199 r'WITH RECURSIVE '
200 r' deltachain(deltaid, baseid) AS ('
200 r' deltachain(deltaid, baseid) AS ('
201 r' SELECT deltaid, deltabaseid FROM fileindex '
201 r' SELECT deltaid, deltabaseid FROM fileindex '
202 r' WHERE pathid=? AND node=? '
202 r' WHERE pathid=? AND node=? '
203 r' UNION ALL '
203 r' UNION ALL '
204 r' SELECT fileindex.deltaid, deltabaseid '
204 r' SELECT fileindex.deltaid, deltabaseid '
205 r' FROM fileindex, deltachain '
205 r' FROM fileindex, deltachain '
206 r' WHERE '
206 r' WHERE '
207 r' fileindex.id=deltachain.baseid '
207 r' fileindex.id=deltachain.baseid '
208 r' AND deltachain.baseid IS NOT NULL '
208 r' AND deltachain.baseid IS NOT NULL '
209 r' AND fileindex.id NOT IN ({stops}) '
209 r' AND fileindex.id NOT IN ({stops}) '
210 r' ) '
210 r' ) '
211 r'SELECT deltachain.baseid, compression, delta '
211 r'SELECT deltachain.baseid, compression, delta '
212 r'FROM deltachain, delta '
212 r'FROM deltachain, delta '
213 r'WHERE delta.id=deltachain.deltaid'.format(
213 r'WHERE delta.id=deltachain.deltaid'.format(
214 stops=r','.join([r'?'] * len(stoprids))),
214 stops=r','.join([r'?'] * len(stoprids))),
215 tuple([pathid, node] + list(stoprids.keys())))
215 tuple([pathid, node] + list(stoprids.keys())))
216
216
217 deltas = []
217 deltas = []
218 lastdeltabaseid = None
218 lastdeltabaseid = None
219
219
220 for deltabaseid, compression, delta in res:
220 for deltabaseid, compression, delta in res:
221 lastdeltabaseid = deltabaseid
221 lastdeltabaseid = deltabaseid
222
222
223 if compression == COMPRESSION_ZSTD:
223 if compression == COMPRESSION_ZSTD:
224 delta = zstddctx.decompress(delta)
224 delta = zstddctx.decompress(delta)
225 elif compression == COMPRESSION_NONE:
225 elif compression == COMPRESSION_NONE:
226 delta = delta
226 delta = delta
227 elif compression == COMPRESSION_ZLIB:
227 elif compression == COMPRESSION_ZLIB:
228 delta = zlib.decompress(delta)
228 delta = zlib.decompress(delta)
229 else:
229 else:
230 raise SQLiteStoreError('unhandled compression type: %d' %
230 raise SQLiteStoreError('unhandled compression type: %d' %
231 compression)
231 compression)
232
232
233 deltas.append(delta)
233 deltas.append(delta)
234
234
235 if lastdeltabaseid in stoprids:
235 if lastdeltabaseid in stoprids:
236 basetext = revisioncache[stoprids[lastdeltabaseid]]
236 basetext = revisioncache[stoprids[lastdeltabaseid]]
237 else:
237 else:
238 basetext = deltas.pop()
238 basetext = deltas.pop()
239
239
240 deltas.reverse()
240 deltas.reverse()
241 fulltext = mdiff.patches(basetext, deltas)
241 fulltext = mdiff.patches(basetext, deltas)
242
242
243 # SQLite returns buffer instances for blob columns on Python 2. This
243 # SQLite returns buffer instances for blob columns on Python 2. This
244 # type can propagate through the delta application layer. Because
244 # type can propagate through the delta application layer. Because
245 # downstream callers assume revisions are bytes, cast as needed.
245 # downstream callers assume revisions are bytes, cast as needed.
246 if not isinstance(fulltext, bytes):
246 if not isinstance(fulltext, bytes):
247 fulltext = bytes(delta)
247 fulltext = bytes(delta)
248
248
249 return fulltext
249 return fulltext
250
250
251 def insertdelta(db, compression, hash, delta):
251 def insertdelta(db, compression, hash, delta):
252 try:
252 try:
253 return db.execute(
253 return db.execute(
254 r'INSERT INTO delta (compression, hash, delta) '
254 r'INSERT INTO delta (compression, hash, delta) '
255 r'VALUES (?, ?, ?)',
255 r'VALUES (?, ?, ?)',
256 (compression, hash, delta)).lastrowid
256 (compression, hash, delta)).lastrowid
257 except sqlite3.IntegrityError:
257 except sqlite3.IntegrityError:
258 return db.execute(
258 return db.execute(
259 r'SELECT id FROM delta WHERE hash=?',
259 r'SELECT id FROM delta WHERE hash=?',
260 (hash,)).fetchone()[0]
260 (hash,)).fetchone()[0]
261
261
262 class SQLiteStoreError(error.StorageError):
262 class SQLiteStoreError(error.StorageError):
263 pass
263 pass
264
264
265 @attr.s
265 @attr.s
266 class revisionentry(object):
266 class revisionentry(object):
267 rid = attr.ib()
267 rid = attr.ib()
268 rev = attr.ib()
268 rev = attr.ib()
269 node = attr.ib()
269 node = attr.ib()
270 p1rev = attr.ib()
270 p1rev = attr.ib()
271 p2rev = attr.ib()
271 p2rev = attr.ib()
272 p1node = attr.ib()
272 p1node = attr.ib()
273 p2node = attr.ib()
273 p2node = attr.ib()
274 linkrev = attr.ib()
274 linkrev = attr.ib()
275 flags = attr.ib()
275 flags = attr.ib()
276
276
277 @interfaceutil.implementer(repository.irevisiondelta)
277 @interfaceutil.implementer(repository.irevisiondelta)
278 @attr.s(slots=True)
278 @attr.s(slots=True)
279 class sqliterevisiondelta(object):
279 class sqliterevisiondelta(object):
280 node = attr.ib()
280 node = attr.ib()
281 p1node = attr.ib()
281 p1node = attr.ib()
282 p2node = attr.ib()
282 p2node = attr.ib()
283 basenode = attr.ib()
283 basenode = attr.ib()
284 flags = attr.ib()
284 flags = attr.ib()
285 baserevisionsize = attr.ib()
285 baserevisionsize = attr.ib()
286 revision = attr.ib()
286 revision = attr.ib()
287 delta = attr.ib()
287 delta = attr.ib()
288 linknode = attr.ib(default=None)
288 linknode = attr.ib(default=None)
289
289
290 @interfaceutil.implementer(repository.iverifyproblem)
290 @interfaceutil.implementer(repository.iverifyproblem)
291 @attr.s(frozen=True)
291 @attr.s(frozen=True)
292 class sqliteproblem(object):
292 class sqliteproblem(object):
293 warning = attr.ib(default=None)
293 warning = attr.ib(default=None)
294 error = attr.ib(default=None)
294 error = attr.ib(default=None)
295 node = attr.ib(default=None)
295 node = attr.ib(default=None)
296
296
297 @interfaceutil.implementer(repository.ifilestorage)
297 @interfaceutil.implementer(repository.ifilestorage)
298 class sqlitefilestore(object):
298 class sqlitefilestore(object):
299 """Implements storage for an individual tracked path."""
299 """Implements storage for an individual tracked path."""
300
300
301 def __init__(self, db, path, compression):
301 def __init__(self, db, path, compression):
302 self._db = db
302 self._db = db
303 self._path = path
303 self._path = path
304
304
305 self._pathid = None
305 self._pathid = None
306
306
307 # revnum -> node
307 # revnum -> node
308 self._revtonode = {}
308 self._revtonode = {}
309 # node -> revnum
309 # node -> revnum
310 self._nodetorev = {}
310 self._nodetorev = {}
311 # node -> data structure
311 # node -> data structure
312 self._revisions = {}
312 self._revisions = {}
313
313
314 self._revisioncache = util.lrucachedict(10)
314 self._revisioncache = util.lrucachedict(10)
315
315
316 self._compengine = compression
316 self._compengine = compression
317
317
318 if compression == 'zstd':
318 if compression == 'zstd':
319 self._cctx = zstd.ZstdCompressor(level=3)
319 self._cctx = zstd.ZstdCompressor(level=3)
320 self._dctx = zstd.ZstdDecompressor()
320 self._dctx = zstd.ZstdDecompressor()
321 else:
321 else:
322 self._cctx = None
322 self._cctx = None
323 self._dctx = None
323 self._dctx = None
324
324
325 self._refreshindex()
325 self._refreshindex()
326
326
327 def _refreshindex(self):
327 def _refreshindex(self):
328 self._revtonode = {}
328 self._revtonode = {}
329 self._nodetorev = {}
329 self._nodetorev = {}
330 self._revisions = {}
330 self._revisions = {}
331
331
332 res = list(self._db.execute(
332 res = list(self._db.execute(
333 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
333 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
334
334
335 if not res:
335 if not res:
336 self._pathid = None
336 self._pathid = None
337 return
337 return
338
338
339 self._pathid = res[0][0]
339 self._pathid = res[0][0]
340
340
341 res = self._db.execute(
341 res = self._db.execute(
342 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
342 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
343 r'FROM fileindex '
343 r'FROM fileindex '
344 r'WHERE pathid=? '
344 r'WHERE pathid=? '
345 r'ORDER BY revnum ASC',
345 r'ORDER BY revnum ASC',
346 (self._pathid,))
346 (self._pathid,))
347
347
348 for i, row in enumerate(res):
348 for i, row in enumerate(res):
349 rid, rev, node, p1rev, p2rev, linkrev, flags = row
349 rid, rev, node, p1rev, p2rev, linkrev, flags = row
350
350
351 if i != rev:
351 if i != rev:
352 raise SQLiteStoreError(_('sqlite database has inconsistent '
352 raise SQLiteStoreError(_('sqlite database has inconsistent '
353 'revision numbers'))
353 'revision numbers'))
354
354
355 if p1rev == nullrev:
355 if p1rev == nullrev:
356 p1node = nullid
356 p1node = nullid
357 else:
357 else:
358 p1node = self._revtonode[p1rev]
358 p1node = self._revtonode[p1rev]
359
359
360 if p2rev == nullrev:
360 if p2rev == nullrev:
361 p2node = nullid
361 p2node = nullid
362 else:
362 else:
363 p2node = self._revtonode[p2rev]
363 p2node = self._revtonode[p2rev]
364
364
365 entry = revisionentry(
365 entry = revisionentry(
366 rid=rid,
366 rid=rid,
367 rev=rev,
367 rev=rev,
368 node=node,
368 node=node,
369 p1rev=p1rev,
369 p1rev=p1rev,
370 p2rev=p2rev,
370 p2rev=p2rev,
371 p1node=p1node,
371 p1node=p1node,
372 p2node=p2node,
372 p2node=p2node,
373 linkrev=linkrev,
373 linkrev=linkrev,
374 flags=flags)
374 flags=flags)
375
375
376 self._revtonode[rev] = node
376 self._revtonode[rev] = node
377 self._nodetorev[node] = rev
377 self._nodetorev[node] = rev
378 self._revisions[node] = entry
378 self._revisions[node] = entry
379
379
380 # Start of ifileindex interface.
380 # Start of ifileindex interface.
381
381
382 def __len__(self):
382 def __len__(self):
383 return len(self._revisions)
383 return len(self._revisions)
384
384
385 def __iter__(self):
385 def __iter__(self):
386 return iter(pycompat.xrange(len(self._revisions)))
386 return iter(pycompat.xrange(len(self._revisions)))
387
387
388 def hasnode(self, node):
388 def hasnode(self, node):
389 if node == nullid:
389 if node == nullid:
390 return False
390 return False
391
391
392 return node in self._nodetorev
392 return node in self._nodetorev
393
393
394 def revs(self, start=0, stop=None):
394 def revs(self, start=0, stop=None):
395 return storageutil.iterrevs(len(self._revisions), start=start,
395 return storageutil.iterrevs(len(self._revisions), start=start,
396 stop=stop)
396 stop=stop)
397
397
398 def parents(self, node):
398 def parents(self, node):
399 if node == nullid:
399 if node == nullid:
400 return nullid, nullid
400 return nullid, nullid
401
401
402 if node not in self._revisions:
402 if node not in self._revisions:
403 raise error.LookupError(node, self._path, _('no node'))
403 raise error.LookupError(node, self._path, _('no node'))
404
404
405 entry = self._revisions[node]
405 entry = self._revisions[node]
406 return entry.p1node, entry.p2node
406 return entry.p1node, entry.p2node
407
407
408 def parentrevs(self, rev):
408 def parentrevs(self, rev):
409 if rev == nullrev:
409 if rev == nullrev:
410 return nullrev, nullrev
410 return nullrev, nullrev
411
411
412 if rev not in self._revtonode:
412 if rev not in self._revtonode:
413 raise IndexError(rev)
413 raise IndexError(rev)
414
414
415 entry = self._revisions[self._revtonode[rev]]
415 entry = self._revisions[self._revtonode[rev]]
416 return entry.p1rev, entry.p2rev
416 return entry.p1rev, entry.p2rev
417
417
418 def rev(self, node):
418 def rev(self, node):
419 if node == nullid:
419 if node == nullid:
420 return nullrev
420 return nullrev
421
421
422 if node not in self._nodetorev:
422 if node not in self._nodetorev:
423 raise error.LookupError(node, self._path, _('no node'))
423 raise error.LookupError(node, self._path, _('no node'))
424
424
425 return self._nodetorev[node]
425 return self._nodetorev[node]
426
426
427 def node(self, rev):
427 def node(self, rev):
428 if rev == nullrev:
428 if rev == nullrev:
429 return nullid
429 return nullid
430
430
431 if rev not in self._revtonode:
431 if rev not in self._revtonode:
432 raise IndexError(rev)
432 raise IndexError(rev)
433
433
434 return self._revtonode[rev]
434 return self._revtonode[rev]
435
435
436 def lookup(self, node):
436 def lookup(self, node):
437 return storageutil.fileidlookup(self, node, self._path)
437 return storageutil.fileidlookup(self, node, self._path)
438
438
439 def linkrev(self, rev):
439 def linkrev(self, rev):
440 if rev == nullrev:
440 if rev == nullrev:
441 return nullrev
441 return nullrev
442
442
443 if rev not in self._revtonode:
443 if rev not in self._revtonode:
444 raise IndexError(rev)
444 raise IndexError(rev)
445
445
446 entry = self._revisions[self._revtonode[rev]]
446 entry = self._revisions[self._revtonode[rev]]
447 return entry.linkrev
447 return entry.linkrev
448
448
449 def iscensored(self, rev):
449 def iscensored(self, rev):
450 if rev == nullrev:
450 if rev == nullrev:
451 return False
451 return False
452
452
453 if rev not in self._revtonode:
453 if rev not in self._revtonode:
454 raise IndexError(rev)
454 raise IndexError(rev)
455
455
456 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
456 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
457
457
458 def commonancestorsheads(self, node1, node2):
458 def commonancestorsheads(self, node1, node2):
459 rev1 = self.rev(node1)
459 rev1 = self.rev(node1)
460 rev2 = self.rev(node2)
460 rev2 = self.rev(node2)
461
461
462 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
462 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
463 return pycompat.maplist(self.node, ancestors)
463 return pycompat.maplist(self.node, ancestors)
464
464
465 def descendants(self, revs):
465 def descendants(self, revs):
466 # TODO we could implement this using a recursive SQL query, which
466 # TODO we could implement this using a recursive SQL query, which
467 # might be faster.
467 # might be faster.
468 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
468 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
469
469
470 def heads(self, start=None, stop=None):
470 def heads(self, start=None, stop=None):
471 if start is None and stop is None:
471 if start is None and stop is None:
472 if not len(self):
472 if not len(self):
473 return [nullid]
473 return [nullid]
474
474
475 startrev = self.rev(start) if start is not None else nullrev
475 startrev = self.rev(start) if start is not None else nullrev
476 stoprevs = {self.rev(n) for n in stop or []}
476 stoprevs = {self.rev(n) for n in stop or []}
477
477
478 revs = dagop.headrevssubset(self.revs, self.parentrevs,
478 revs = dagop.headrevssubset(self.revs, self.parentrevs,
479 startrev=startrev, stoprevs=stoprevs)
479 startrev=startrev, stoprevs=stoprevs)
480
480
481 return [self.node(rev) for rev in revs]
481 return [self.node(rev) for rev in revs]
482
482
483 def children(self, node):
483 def children(self, node):
484 rev = self.rev(node)
484 rev = self.rev(node)
485
485
486 res = self._db.execute(
486 res = self._db.execute(
487 r'SELECT'
487 r'SELECT'
488 r' node '
488 r' node '
489 r' FROM filedata '
489 r' FROM filedata '
490 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
490 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
491 r' ORDER BY revnum ASC',
491 r' ORDER BY revnum ASC',
492 (self._path, rev, rev))
492 (self._path, rev, rev))
493
493
494 return [row[0] for row in res]
494 return [row[0] for row in res]
495
495
496 # End of ifileindex interface.
496 # End of ifileindex interface.
497
497
498 # Start of ifiledata interface.
498 # Start of ifiledata interface.
499
499
500 def size(self, rev):
500 def size(self, rev):
501 if rev == nullrev:
501 if rev == nullrev:
502 return 0
502 return 0
503
503
504 if rev not in self._revtonode:
504 if rev not in self._revtonode:
505 raise IndexError(rev)
505 raise IndexError(rev)
506
506
507 node = self._revtonode[rev]
507 node = self._revtonode[rev]
508
508
509 if self.renamed(node):
509 if self.renamed(node):
510 return len(self.read(node))
510 return len(self.read(node))
511
511
512 return len(self.revision(node))
512 return len(self.revision(node))
513
513
514 def revision(self, node, raw=False, _verifyhash=True):
514 def revision(self, node, raw=False, _verifyhash=True):
515 if node in (nullid, nullrev):
515 if node in (nullid, nullrev):
516 return b''
516 return b''
517
517
518 if isinstance(node, int):
518 if isinstance(node, int):
519 node = self.node(node)
519 node = self.node(node)
520
520
521 if node not in self._nodetorev:
521 if node not in self._nodetorev:
522 raise error.LookupError(node, self._path, _('no node'))
522 raise error.LookupError(node, self._path, _('no node'))
523
523
524 if node in self._revisioncache:
524 if node in self._revisioncache:
525 return self._revisioncache[node]
525 return self._revisioncache[node]
526
526
527 # Because we have a fulltext revision cache, we are able to
527 # Because we have a fulltext revision cache, we are able to
528 # short-circuit delta chain traversal and decompression as soon as
528 # short-circuit delta chain traversal and decompression as soon as
529 # we encounter a revision in the cache.
529 # we encounter a revision in the cache.
530
530
531 stoprids = {self._revisions[n].rid: n
531 stoprids = {self._revisions[n].rid: n
532 for n in self._revisioncache}
532 for n in self._revisioncache}
533
533
534 if not stoprids:
534 if not stoprids:
535 stoprids[-1] = None
535 stoprids[-1] = None
536
536
537 fulltext = resolvedeltachain(self._db, self._pathid, node,
537 fulltext = resolvedeltachain(self._db, self._pathid, node,
538 self._revisioncache, stoprids,
538 self._revisioncache, stoprids,
539 zstddctx=self._dctx)
539 zstddctx=self._dctx)
540
540
541 # Don't verify hashes if parent nodes were rewritten, as the hash
541 # Don't verify hashes if parent nodes were rewritten, as the hash
542 # wouldn't verify.
542 # wouldn't verify.
543 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
543 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
544 _verifyhash = False
544 _verifyhash = False
545
545
546 if _verifyhash:
546 if _verifyhash:
547 self._checkhash(fulltext, node)
547 self._checkhash(fulltext, node)
548 self._revisioncache[node] = fulltext
548 self._revisioncache[node] = fulltext
549
549
550 return fulltext
550 return fulltext
551
551
552 def rawdata(self, *args, **kwargs):
553 return self.revision(*args, **kwargs)
554
552 def read(self, node):
555 def read(self, node):
553 return storageutil.filtermetadata(self.revision(node))
556 return storageutil.filtermetadata(self.revision(node))
554
557
555 def renamed(self, node):
558 def renamed(self, node):
556 return storageutil.filerevisioncopied(self, node)
559 return storageutil.filerevisioncopied(self, node)
557
560
558 def cmp(self, node, fulltext):
561 def cmp(self, node, fulltext):
559 return not storageutil.filedataequivalent(self, node, fulltext)
562 return not storageutil.filedataequivalent(self, node, fulltext)
560
563
561 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
564 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
562 assumehaveparentrevisions=False,
565 assumehaveparentrevisions=False,
563 deltamode=repository.CG_DELTAMODE_STD):
566 deltamode=repository.CG_DELTAMODE_STD):
564 if nodesorder not in ('nodes', 'storage', 'linear', None):
567 if nodesorder not in ('nodes', 'storage', 'linear', None):
565 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
568 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
566 nodesorder)
569 nodesorder)
567
570
568 nodes = [n for n in nodes if n != nullid]
571 nodes = [n for n in nodes if n != nullid]
569
572
570 if not nodes:
573 if not nodes:
571 return
574 return
572
575
573 # TODO perform in a single query.
576 # TODO perform in a single query.
574 res = self._db.execute(
577 res = self._db.execute(
575 r'SELECT revnum, deltaid FROM fileindex '
578 r'SELECT revnum, deltaid FROM fileindex '
576 r'WHERE pathid=? '
579 r'WHERE pathid=? '
577 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
580 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
578 tuple([self._pathid] + nodes))
581 tuple([self._pathid] + nodes))
579
582
580 deltabases = {}
583 deltabases = {}
581
584
582 for rev, deltaid in res:
585 for rev, deltaid in res:
583 res = self._db.execute(
586 res = self._db.execute(
584 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
587 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
585 (self._pathid, deltaid))
588 (self._pathid, deltaid))
586 deltabases[rev] = res.fetchone()[0]
589 deltabases[rev] = res.fetchone()[0]
587
590
588 # TODO define revdifffn so we can use delta from storage.
591 # TODO define revdifffn so we can use delta from storage.
589 for delta in storageutil.emitrevisions(
592 for delta in storageutil.emitrevisions(
590 self, nodes, nodesorder, sqliterevisiondelta,
593 self, nodes, nodesorder, sqliterevisiondelta,
591 deltaparentfn=deltabases.__getitem__,
594 deltaparentfn=deltabases.__getitem__,
592 revisiondata=revisiondata,
595 revisiondata=revisiondata,
593 assumehaveparentrevisions=assumehaveparentrevisions,
596 assumehaveparentrevisions=assumehaveparentrevisions,
594 deltamode=deltamode):
597 deltamode=deltamode):
595
598
596 yield delta
599 yield delta
597
600
598 # End of ifiledata interface.
601 # End of ifiledata interface.
599
602
600 # Start of ifilemutation interface.
603 # Start of ifilemutation interface.
601
604
602 def add(self, filedata, meta, transaction, linkrev, p1, p2):
605 def add(self, filedata, meta, transaction, linkrev, p1, p2):
603 if meta or filedata.startswith(b'\x01\n'):
606 if meta or filedata.startswith(b'\x01\n'):
604 filedata = storageutil.packmeta(meta, filedata)
607 filedata = storageutil.packmeta(meta, filedata)
605
608
606 return self.addrevision(filedata, transaction, linkrev, p1, p2)
609 return self.addrevision(filedata, transaction, linkrev, p1, p2)
607
610
608 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
611 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
609 flags=0, cachedelta=None):
612 flags=0, cachedelta=None):
610 if flags:
613 if flags:
611 raise SQLiteStoreError(_('flags not supported on revisions'))
614 raise SQLiteStoreError(_('flags not supported on revisions'))
612
615
613 validatehash = node is not None
616 validatehash = node is not None
614 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
617 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
615
618
616 if validatehash:
619 if validatehash:
617 self._checkhash(revisiondata, node, p1, p2)
620 self._checkhash(revisiondata, node, p1, p2)
618
621
619 if node in self._nodetorev:
622 if node in self._nodetorev:
620 return node
623 return node
621
624
622 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
625 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
623 p1, p2)
626 p1, p2)
624
627
625 self._revisioncache[node] = revisiondata
628 self._revisioncache[node] = revisiondata
626 return node
629 return node
627
630
628 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
631 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
629 maybemissingparents=False):
632 maybemissingparents=False):
630 nodes = []
633 nodes = []
631
634
632 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
635 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
633 storeflags = 0
636 storeflags = 0
634
637
635 if wireflags & repository.REVISION_FLAG_CENSORED:
638 if wireflags & repository.REVISION_FLAG_CENSORED:
636 storeflags |= FLAG_CENSORED
639 storeflags |= FLAG_CENSORED
637
640
638 if wireflags & ~repository.REVISION_FLAG_CENSORED:
641 if wireflags & ~repository.REVISION_FLAG_CENSORED:
639 raise SQLiteStoreError('unhandled revision flag')
642 raise SQLiteStoreError('unhandled revision flag')
640
643
641 if maybemissingparents:
644 if maybemissingparents:
642 if p1 != nullid and not self.hasnode(p1):
645 if p1 != nullid and not self.hasnode(p1):
643 p1 = nullid
646 p1 = nullid
644 storeflags |= FLAG_MISSING_P1
647 storeflags |= FLAG_MISSING_P1
645
648
646 if p2 != nullid and not self.hasnode(p2):
649 if p2 != nullid and not self.hasnode(p2):
647 p2 = nullid
650 p2 = nullid
648 storeflags |= FLAG_MISSING_P2
651 storeflags |= FLAG_MISSING_P2
649
652
650 baserev = self.rev(deltabase)
653 baserev = self.rev(deltabase)
651
654
652 # If base is censored, delta must be full replacement in a single
655 # If base is censored, delta must be full replacement in a single
653 # patch operation.
656 # patch operation.
654 if baserev != nullrev and self.iscensored(baserev):
657 if baserev != nullrev and self.iscensored(baserev):
655 hlen = struct.calcsize('>lll')
658 hlen = struct.calcsize('>lll')
656 oldlen = len(self.revision(deltabase, raw=True,
659 oldlen = len(self.revision(deltabase, raw=True,
657 _verifyhash=False))
660 _verifyhash=False))
658 newlen = len(delta) - hlen
661 newlen = len(delta) - hlen
659
662
660 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
663 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
661 raise error.CensoredBaseError(self._path,
664 raise error.CensoredBaseError(self._path,
662 deltabase)
665 deltabase)
663
666
664 if (not (storeflags & FLAG_CENSORED)
667 if (not (storeflags & FLAG_CENSORED)
665 and storageutil.deltaiscensored(
668 and storageutil.deltaiscensored(
666 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
669 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
667 storeflags |= FLAG_CENSORED
670 storeflags |= FLAG_CENSORED
668
671
669 linkrev = linkmapper(linknode)
672 linkrev = linkmapper(linknode)
670
673
671 nodes.append(node)
674 nodes.append(node)
672
675
673 if node in self._revisions:
676 if node in self._revisions:
674 # Possibly reset parents to make them proper.
677 # Possibly reset parents to make them proper.
675 entry = self._revisions[node]
678 entry = self._revisions[node]
676
679
677 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
680 if entry.flags & FLAG_MISSING_P1 and p1 != nullid:
678 entry.p1node = p1
681 entry.p1node = p1
679 entry.p1rev = self._nodetorev[p1]
682 entry.p1rev = self._nodetorev[p1]
680 entry.flags &= ~FLAG_MISSING_P1
683 entry.flags &= ~FLAG_MISSING_P1
681
684
682 self._db.execute(
685 self._db.execute(
683 r'UPDATE fileindex SET p1rev=?, flags=? '
686 r'UPDATE fileindex SET p1rev=?, flags=? '
684 r'WHERE id=?',
687 r'WHERE id=?',
685 (self._nodetorev[p1], entry.flags, entry.rid))
688 (self._nodetorev[p1], entry.flags, entry.rid))
686
689
687 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
690 if entry.flags & FLAG_MISSING_P2 and p2 != nullid:
688 entry.p2node = p2
691 entry.p2node = p2
689 entry.p2rev = self._nodetorev[p2]
692 entry.p2rev = self._nodetorev[p2]
690 entry.flags &= ~FLAG_MISSING_P2
693 entry.flags &= ~FLAG_MISSING_P2
691
694
692 self._db.execute(
695 self._db.execute(
693 r'UPDATE fileindex SET p2rev=?, flags=? '
696 r'UPDATE fileindex SET p2rev=?, flags=? '
694 r'WHERE id=?',
697 r'WHERE id=?',
695 (self._nodetorev[p1], entry.flags, entry.rid))
698 (self._nodetorev[p1], entry.flags, entry.rid))
696
699
697 continue
700 continue
698
701
699 if deltabase == nullid:
702 if deltabase == nullid:
700 text = mdiff.patch(b'', delta)
703 text = mdiff.patch(b'', delta)
701 storedelta = None
704 storedelta = None
702 else:
705 else:
703 text = None
706 text = None
704 storedelta = (deltabase, delta)
707 storedelta = (deltabase, delta)
705
708
706 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
709 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
707 storedelta=storedelta, flags=storeflags)
710 storedelta=storedelta, flags=storeflags)
708
711
709 if addrevisioncb:
712 if addrevisioncb:
710 addrevisioncb(self, node)
713 addrevisioncb(self, node)
711
714
712 return nodes
715 return nodes
713
716
714 def censorrevision(self, tr, censornode, tombstone=b''):
717 def censorrevision(self, tr, censornode, tombstone=b''):
715 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
718 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
716
719
717 # This restriction is cargo culted from revlogs and makes no sense for
720 # This restriction is cargo culted from revlogs and makes no sense for
718 # SQLite, since columns can be resized at will.
721 # SQLite, since columns can be resized at will.
719 if len(tombstone) > len(self.revision(censornode, raw=True)):
722 if len(tombstone) > len(self.revision(censornode, raw=True)):
720 raise error.Abort(_('censor tombstone must be no longer than '
723 raise error.Abort(_('censor tombstone must be no longer than '
721 'censored data'))
724 'censored data'))
722
725
723 # We need to replace the censored revision's data with the tombstone.
726 # We need to replace the censored revision's data with the tombstone.
724 # But replacing that data will have implications for delta chains that
727 # But replacing that data will have implications for delta chains that
725 # reference it.
728 # reference it.
726 #
729 #
727 # While "better," more complex strategies are possible, we do something
730 # While "better," more complex strategies are possible, we do something
728 # simple: we find delta chain children of the censored revision and we
731 # simple: we find delta chain children of the censored revision and we
729 # replace those incremental deltas with fulltexts of their corresponding
732 # replace those incremental deltas with fulltexts of their corresponding
730 # revision. Then we delete the now-unreferenced delta and original
733 # revision. Then we delete the now-unreferenced delta and original
731 # revision and insert a replacement.
734 # revision and insert a replacement.
732
735
733 # Find the delta to be censored.
736 # Find the delta to be censored.
734 censoreddeltaid = self._db.execute(
737 censoreddeltaid = self._db.execute(
735 r'SELECT deltaid FROM fileindex WHERE id=?',
738 r'SELECT deltaid FROM fileindex WHERE id=?',
736 (self._revisions[censornode].rid,)).fetchone()[0]
739 (self._revisions[censornode].rid,)).fetchone()[0]
737
740
738 # Find all its delta chain children.
741 # Find all its delta chain children.
739 # TODO once we support storing deltas for !files, we'll need to look
742 # TODO once we support storing deltas for !files, we'll need to look
740 # for those delta chains too.
743 # for those delta chains too.
741 rows = list(self._db.execute(
744 rows = list(self._db.execute(
742 r'SELECT id, pathid, node FROM fileindex '
745 r'SELECT id, pathid, node FROM fileindex '
743 r'WHERE deltabaseid=? OR deltaid=?',
746 r'WHERE deltabaseid=? OR deltaid=?',
744 (censoreddeltaid, censoreddeltaid)))
747 (censoreddeltaid, censoreddeltaid)))
745
748
746 for row in rows:
749 for row in rows:
747 rid, pathid, node = row
750 rid, pathid, node = row
748
751
749 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
752 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
750 zstddctx=self._dctx)
753 zstddctx=self._dctx)
751
754
752 deltahash = hashlib.sha1(fulltext).digest()
755 deltahash = hashlib.sha1(fulltext).digest()
753
756
754 if self._compengine == 'zstd':
757 if self._compengine == 'zstd':
755 deltablob = self._cctx.compress(fulltext)
758 deltablob = self._cctx.compress(fulltext)
756 compression = COMPRESSION_ZSTD
759 compression = COMPRESSION_ZSTD
757 elif self._compengine == 'zlib':
760 elif self._compengine == 'zlib':
758 deltablob = zlib.compress(fulltext)
761 deltablob = zlib.compress(fulltext)
759 compression = COMPRESSION_ZLIB
762 compression = COMPRESSION_ZLIB
760 elif self._compengine == 'none':
763 elif self._compengine == 'none':
761 deltablob = fulltext
764 deltablob = fulltext
762 compression = COMPRESSION_NONE
765 compression = COMPRESSION_NONE
763 else:
766 else:
764 raise error.ProgrammingError('unhandled compression engine: %s'
767 raise error.ProgrammingError('unhandled compression engine: %s'
765 % self._compengine)
768 % self._compengine)
766
769
767 if len(deltablob) >= len(fulltext):
770 if len(deltablob) >= len(fulltext):
768 deltablob = fulltext
771 deltablob = fulltext
769 compression = COMPRESSION_NONE
772 compression = COMPRESSION_NONE
770
773
771 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
774 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
772
775
773 self._db.execute(
776 self._db.execute(
774 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
777 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
775 r'WHERE id=?', (deltaid, rid))
778 r'WHERE id=?', (deltaid, rid))
776
779
777 # Now create the tombstone delta and replace the delta on the censored
780 # Now create the tombstone delta and replace the delta on the censored
778 # node.
781 # node.
779 deltahash = hashlib.sha1(tombstone).digest()
782 deltahash = hashlib.sha1(tombstone).digest()
780 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
783 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
781 deltahash, tombstone)
784 deltahash, tombstone)
782
785
783 flags = self._revisions[censornode].flags
786 flags = self._revisions[censornode].flags
784 flags |= FLAG_CENSORED
787 flags |= FLAG_CENSORED
785
788
786 self._db.execute(
789 self._db.execute(
787 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
790 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
788 r'WHERE pathid=? AND node=?',
791 r'WHERE pathid=? AND node=?',
789 (flags, tombstonedeltaid, self._pathid, censornode))
792 (flags, tombstonedeltaid, self._pathid, censornode))
790
793
791 self._db.execute(
794 self._db.execute(
792 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
795 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
793
796
794 self._refreshindex()
797 self._refreshindex()
795 self._revisioncache.clear()
798 self._revisioncache.clear()
796
799
797 def getstrippoint(self, minlink):
800 def getstrippoint(self, minlink):
798 return storageutil.resolvestripinfo(minlink, len(self) - 1,
801 return storageutil.resolvestripinfo(minlink, len(self) - 1,
799 [self.rev(n) for n in self.heads()],
802 [self.rev(n) for n in self.heads()],
800 self.linkrev,
803 self.linkrev,
801 self.parentrevs)
804 self.parentrevs)
802
805
803 def strip(self, minlink, transaction):
806 def strip(self, minlink, transaction):
804 if not len(self):
807 if not len(self):
805 return
808 return
806
809
807 rev, _ignored = self.getstrippoint(minlink)
810 rev, _ignored = self.getstrippoint(minlink)
808
811
809 if rev == len(self):
812 if rev == len(self):
810 return
813 return
811
814
812 for rev in self.revs(rev):
815 for rev in self.revs(rev):
813 self._db.execute(
816 self._db.execute(
814 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
817 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
815 (self._pathid, self.node(rev)))
818 (self._pathid, self.node(rev)))
816
819
817 # TODO how should we garbage collect data in delta table?
820 # TODO how should we garbage collect data in delta table?
818
821
819 self._refreshindex()
822 self._refreshindex()
820
823
821 # End of ifilemutation interface.
824 # End of ifilemutation interface.
822
825
823 # Start of ifilestorage interface.
826 # Start of ifilestorage interface.
824
827
825 def files(self):
828 def files(self):
826 return []
829 return []
827
830
828 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
831 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
829 revisionscount=False, trackedsize=False,
832 revisionscount=False, trackedsize=False,
830 storedsize=False):
833 storedsize=False):
831 d = {}
834 d = {}
832
835
833 if exclusivefiles:
836 if exclusivefiles:
834 d['exclusivefiles'] = []
837 d['exclusivefiles'] = []
835
838
836 if sharedfiles:
839 if sharedfiles:
837 # TODO list sqlite file(s) here.
840 # TODO list sqlite file(s) here.
838 d['sharedfiles'] = []
841 d['sharedfiles'] = []
839
842
840 if revisionscount:
843 if revisionscount:
841 d['revisionscount'] = len(self)
844 d['revisionscount'] = len(self)
842
845
843 if trackedsize:
846 if trackedsize:
844 d['trackedsize'] = sum(len(self.revision(node))
847 d['trackedsize'] = sum(len(self.revision(node))
845 for node in self._nodetorev)
848 for node in self._nodetorev)
846
849
847 if storedsize:
850 if storedsize:
848 # TODO implement this?
851 # TODO implement this?
849 d['storedsize'] = None
852 d['storedsize'] = None
850
853
851 return d
854 return d
852
855
853 def verifyintegrity(self, state):
856 def verifyintegrity(self, state):
854 state['skipread'] = set()
857 state['skipread'] = set()
855
858
856 for rev in self:
859 for rev in self:
857 node = self.node(rev)
860 node = self.node(rev)
858
861
859 try:
862 try:
860 self.revision(node)
863 self.revision(node)
861 except Exception as e:
864 except Exception as e:
862 yield sqliteproblem(
865 yield sqliteproblem(
863 error=_('unpacking %s: %s') % (short(node), e),
866 error=_('unpacking %s: %s') % (short(node), e),
864 node=node)
867 node=node)
865
868
866 state['skipread'].add(node)
869 state['skipread'].add(node)
867
870
868 # End of ifilestorage interface.
871 # End of ifilestorage interface.
869
872
870 def _checkhash(self, fulltext, node, p1=None, p2=None):
873 def _checkhash(self, fulltext, node, p1=None, p2=None):
871 if p1 is None and p2 is None:
874 if p1 is None and p2 is None:
872 p1, p2 = self.parents(node)
875 p1, p2 = self.parents(node)
873
876
874 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
877 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
875 return
878 return
876
879
877 try:
880 try:
878 del self._revisioncache[node]
881 del self._revisioncache[node]
879 except KeyError:
882 except KeyError:
880 pass
883 pass
881
884
882 if storageutil.iscensoredtext(fulltext):
885 if storageutil.iscensoredtext(fulltext):
883 raise error.CensoredNodeError(self._path, node, fulltext)
886 raise error.CensoredNodeError(self._path, node, fulltext)
884
887
885 raise SQLiteStoreError(_('integrity check failed on %s') %
888 raise SQLiteStoreError(_('integrity check failed on %s') %
886 self._path)
889 self._path)
887
890
888 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
891 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
889 p1, p2, storedelta=None, flags=0):
892 p1, p2, storedelta=None, flags=0):
890 if self._pathid is None:
893 if self._pathid is None:
891 res = self._db.execute(
894 res = self._db.execute(
892 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
895 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
893 self._pathid = res.lastrowid
896 self._pathid = res.lastrowid
894
897
895 # For simplicity, always store a delta against p1.
898 # For simplicity, always store a delta against p1.
896 # TODO we need a lot more logic here to make behavior reasonable.
899 # TODO we need a lot more logic here to make behavior reasonable.
897
900
898 if storedelta:
901 if storedelta:
899 deltabase, delta = storedelta
902 deltabase, delta = storedelta
900
903
901 if isinstance(deltabase, int):
904 if isinstance(deltabase, int):
902 deltabase = self.node(deltabase)
905 deltabase = self.node(deltabase)
903
906
904 else:
907 else:
905 assert revisiondata is not None
908 assert revisiondata is not None
906 deltabase = p1
909 deltabase = p1
907
910
908 if deltabase == nullid:
911 if deltabase == nullid:
909 delta = revisiondata
912 delta = revisiondata
910 else:
913 else:
911 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
914 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
912 revisiondata)
915 revisiondata)
913
916
914 # File index stores a pointer to its delta and the parent delta.
917 # File index stores a pointer to its delta and the parent delta.
915 # The parent delta is stored via a pointer to the fileindex PK.
918 # The parent delta is stored via a pointer to the fileindex PK.
916 if deltabase == nullid:
919 if deltabase == nullid:
917 baseid = None
920 baseid = None
918 else:
921 else:
919 baseid = self._revisions[deltabase].rid
922 baseid = self._revisions[deltabase].rid
920
923
921 # Deltas are stored with a hash of their content. This allows
924 # Deltas are stored with a hash of their content. This allows
922 # us to de-duplicate. The table is configured to ignore conflicts
925 # us to de-duplicate. The table is configured to ignore conflicts
923 # and it is faster to just insert and silently noop than to look
926 # and it is faster to just insert and silently noop than to look
924 # first.
927 # first.
925 deltahash = hashlib.sha1(delta).digest()
928 deltahash = hashlib.sha1(delta).digest()
926
929
927 if self._compengine == 'zstd':
930 if self._compengine == 'zstd':
928 deltablob = self._cctx.compress(delta)
931 deltablob = self._cctx.compress(delta)
929 compression = COMPRESSION_ZSTD
932 compression = COMPRESSION_ZSTD
930 elif self._compengine == 'zlib':
933 elif self._compengine == 'zlib':
931 deltablob = zlib.compress(delta)
934 deltablob = zlib.compress(delta)
932 compression = COMPRESSION_ZLIB
935 compression = COMPRESSION_ZLIB
933 elif self._compengine == 'none':
936 elif self._compengine == 'none':
934 deltablob = delta
937 deltablob = delta
935 compression = COMPRESSION_NONE
938 compression = COMPRESSION_NONE
936 else:
939 else:
937 raise error.ProgrammingError('unhandled compression engine: %s' %
940 raise error.ProgrammingError('unhandled compression engine: %s' %
938 self._compengine)
941 self._compengine)
939
942
940 # Don't store compressed data if it isn't practical.
943 # Don't store compressed data if it isn't practical.
941 if len(deltablob) >= len(delta):
944 if len(deltablob) >= len(delta):
942 deltablob = delta
945 deltablob = delta
943 compression = COMPRESSION_NONE
946 compression = COMPRESSION_NONE
944
947
945 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
948 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
946
949
947 rev = len(self)
950 rev = len(self)
948
951
949 if p1 == nullid:
952 if p1 == nullid:
950 p1rev = nullrev
953 p1rev = nullrev
951 else:
954 else:
952 p1rev = self._nodetorev[p1]
955 p1rev = self._nodetorev[p1]
953
956
954 if p2 == nullid:
957 if p2 == nullid:
955 p2rev = nullrev
958 p2rev = nullrev
956 else:
959 else:
957 p2rev = self._nodetorev[p2]
960 p2rev = self._nodetorev[p2]
958
961
959 rid = self._db.execute(
962 rid = self._db.execute(
960 r'INSERT INTO fileindex ('
963 r'INSERT INTO fileindex ('
961 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
964 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
962 r' deltaid, deltabaseid) '
965 r' deltaid, deltabaseid) '
963 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
966 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
964 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
967 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
965 deltaid, baseid)
968 deltaid, baseid)
966 ).lastrowid
969 ).lastrowid
967
970
968 entry = revisionentry(
971 entry = revisionentry(
969 rid=rid,
972 rid=rid,
970 rev=rev,
973 rev=rev,
971 node=node,
974 node=node,
972 p1rev=p1rev,
975 p1rev=p1rev,
973 p2rev=p2rev,
976 p2rev=p2rev,
974 p1node=p1,
977 p1node=p1,
975 p2node=p2,
978 p2node=p2,
976 linkrev=linkrev,
979 linkrev=linkrev,
977 flags=flags)
980 flags=flags)
978
981
979 self._nodetorev[node] = rev
982 self._nodetorev[node] = rev
980 self._revtonode[rev] = node
983 self._revtonode[rev] = node
981 self._revisions[node] = entry
984 self._revisions[node] = entry
982
985
983 return node
986 return node
984
987
985 class sqliterepository(localrepo.localrepository):
988 class sqliterepository(localrepo.localrepository):
986 def cancopy(self):
989 def cancopy(self):
987 return False
990 return False
988
991
989 def transaction(self, *args, **kwargs):
992 def transaction(self, *args, **kwargs):
990 current = self.currenttransaction()
993 current = self.currenttransaction()
991
994
992 tr = super(sqliterepository, self).transaction(*args, **kwargs)
995 tr = super(sqliterepository, self).transaction(*args, **kwargs)
993
996
994 if current:
997 if current:
995 return tr
998 return tr
996
999
997 self._dbconn.execute(r'BEGIN TRANSACTION')
1000 self._dbconn.execute(r'BEGIN TRANSACTION')
998
1001
999 def committransaction(_):
1002 def committransaction(_):
1000 self._dbconn.commit()
1003 self._dbconn.commit()
1001
1004
1002 tr.addfinalize('sqlitestore', committransaction)
1005 tr.addfinalize('sqlitestore', committransaction)
1003
1006
1004 return tr
1007 return tr
1005
1008
1006 @property
1009 @property
1007 def _dbconn(self):
1010 def _dbconn(self):
1008 # SQLite connections can only be used on the thread that created
1011 # SQLite connections can only be used on the thread that created
1009 # them. In most cases, this "just works." However, hgweb uses
1012 # them. In most cases, this "just works." However, hgweb uses
1010 # multiple threads.
1013 # multiple threads.
1011 tid = threading.current_thread().ident
1014 tid = threading.current_thread().ident
1012
1015
1013 if self._db:
1016 if self._db:
1014 if self._db[0] == tid:
1017 if self._db[0] == tid:
1015 return self._db[1]
1018 return self._db[1]
1016
1019
1017 db = makedb(self.svfs.join('db.sqlite'))
1020 db = makedb(self.svfs.join('db.sqlite'))
1018 self._db = (tid, db)
1021 self._db = (tid, db)
1019
1022
1020 return db
1023 return db
1021
1024
1022 def makedb(path):
1025 def makedb(path):
1023 """Construct a database handle for a database at path."""
1026 """Construct a database handle for a database at path."""
1024
1027
1025 db = sqlite3.connect(encoding.strfromlocal(path))
1028 db = sqlite3.connect(encoding.strfromlocal(path))
1026 db.text_factory = bytes
1029 db.text_factory = bytes
1027
1030
1028 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1031 res = db.execute(r'PRAGMA user_version').fetchone()[0]
1029
1032
1030 # New database.
1033 # New database.
1031 if res == 0:
1034 if res == 0:
1032 for statement in CREATE_SCHEMA:
1035 for statement in CREATE_SCHEMA:
1033 db.execute(statement)
1036 db.execute(statement)
1034
1037
1035 db.commit()
1038 db.commit()
1036
1039
1037 elif res == CURRENT_SCHEMA_VERSION:
1040 elif res == CURRENT_SCHEMA_VERSION:
1038 pass
1041 pass
1039
1042
1040 else:
1043 else:
1041 raise error.Abort(_('sqlite database has unrecognized version'))
1044 raise error.Abort(_('sqlite database has unrecognized version'))
1042
1045
1043 db.execute(r'PRAGMA journal_mode=WAL')
1046 db.execute(r'PRAGMA journal_mode=WAL')
1044
1047
1045 return db
1048 return db
1046
1049
1047 def featuresetup(ui, supported):
1050 def featuresetup(ui, supported):
1048 supported.add(REQUIREMENT)
1051 supported.add(REQUIREMENT)
1049
1052
1050 if zstd:
1053 if zstd:
1051 supported.add(REQUIREMENT_ZSTD)
1054 supported.add(REQUIREMENT_ZSTD)
1052
1055
1053 supported.add(REQUIREMENT_ZLIB)
1056 supported.add(REQUIREMENT_ZLIB)
1054 supported.add(REQUIREMENT_NONE)
1057 supported.add(REQUIREMENT_NONE)
1055 supported.add(REQUIREMENT_SHALLOW_FILES)
1058 supported.add(REQUIREMENT_SHALLOW_FILES)
1056 supported.add(repository.NARROW_REQUIREMENT)
1059 supported.add(repository.NARROW_REQUIREMENT)
1057
1060
1058 def newreporequirements(orig, ui, createopts):
1061 def newreporequirements(orig, ui, createopts):
1059 if createopts['backend'] != 'sqlite':
1062 if createopts['backend'] != 'sqlite':
1060 return orig(ui, createopts)
1063 return orig(ui, createopts)
1061
1064
1062 # This restriction can be lifted once we have more confidence.
1065 # This restriction can be lifted once we have more confidence.
1063 if 'sharedrepo' in createopts:
1066 if 'sharedrepo' in createopts:
1064 raise error.Abort(_('shared repositories not supported with SQLite '
1067 raise error.Abort(_('shared repositories not supported with SQLite '
1065 'store'))
1068 'store'))
1066
1069
1067 # This filtering is out of an abundance of caution: we want to ensure
1070 # This filtering is out of an abundance of caution: we want to ensure
1068 # we honor creation options and we do that by annotating exactly the
1071 # we honor creation options and we do that by annotating exactly the
1069 # creation options we recognize.
1072 # creation options we recognize.
1070 known = {
1073 known = {
1071 'narrowfiles',
1074 'narrowfiles',
1072 'backend',
1075 'backend',
1073 'shallowfilestore',
1076 'shallowfilestore',
1074 }
1077 }
1075
1078
1076 unsupported = set(createopts) - known
1079 unsupported = set(createopts) - known
1077 if unsupported:
1080 if unsupported:
1078 raise error.Abort(_('SQLite store does not support repo creation '
1081 raise error.Abort(_('SQLite store does not support repo creation '
1079 'option: %s') % ', '.join(sorted(unsupported)))
1082 'option: %s') % ', '.join(sorted(unsupported)))
1080
1083
1081 # Since we're a hybrid store that still relies on revlogs, we fall back
1084 # Since we're a hybrid store that still relies on revlogs, we fall back
1082 # to using the revlogv1 backend's storage requirements then adding our
1085 # to using the revlogv1 backend's storage requirements then adding our
1083 # own requirement.
1086 # own requirement.
1084 createopts['backend'] = 'revlogv1'
1087 createopts['backend'] = 'revlogv1'
1085 requirements = orig(ui, createopts)
1088 requirements = orig(ui, createopts)
1086 requirements.add(REQUIREMENT)
1089 requirements.add(REQUIREMENT)
1087
1090
1088 compression = ui.config('storage', 'sqlite.compression')
1091 compression = ui.config('storage', 'sqlite.compression')
1089
1092
1090 if compression == 'zstd' and not zstd:
1093 if compression == 'zstd' and not zstd:
1091 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1094 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1092 'zstandard compression not available to this '
1095 'zstandard compression not available to this '
1093 'Mercurial install'))
1096 'Mercurial install'))
1094
1097
1095 if compression == 'zstd':
1098 if compression == 'zstd':
1096 requirements.add(REQUIREMENT_ZSTD)
1099 requirements.add(REQUIREMENT_ZSTD)
1097 elif compression == 'zlib':
1100 elif compression == 'zlib':
1098 requirements.add(REQUIREMENT_ZLIB)
1101 requirements.add(REQUIREMENT_ZLIB)
1099 elif compression == 'none':
1102 elif compression == 'none':
1100 requirements.add(REQUIREMENT_NONE)
1103 requirements.add(REQUIREMENT_NONE)
1101 else:
1104 else:
1102 raise error.Abort(_('unknown compression engine defined in '
1105 raise error.Abort(_('unknown compression engine defined in '
1103 'storage.sqlite.compression: %s') % compression)
1106 'storage.sqlite.compression: %s') % compression)
1104
1107
1105 if createopts.get('shallowfilestore'):
1108 if createopts.get('shallowfilestore'):
1106 requirements.add(REQUIREMENT_SHALLOW_FILES)
1109 requirements.add(REQUIREMENT_SHALLOW_FILES)
1107
1110
1108 return requirements
1111 return requirements
1109
1112
1110 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1113 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1111 class sqlitefilestorage(object):
1114 class sqlitefilestorage(object):
1112 """Repository file storage backed by SQLite."""
1115 """Repository file storage backed by SQLite."""
1113 def file(self, path):
1116 def file(self, path):
1114 if path[0] == b'/':
1117 if path[0] == b'/':
1115 path = path[1:]
1118 path = path[1:]
1116
1119
1117 if REQUIREMENT_ZSTD in self.requirements:
1120 if REQUIREMENT_ZSTD in self.requirements:
1118 compression = 'zstd'
1121 compression = 'zstd'
1119 elif REQUIREMENT_ZLIB in self.requirements:
1122 elif REQUIREMENT_ZLIB in self.requirements:
1120 compression = 'zlib'
1123 compression = 'zlib'
1121 elif REQUIREMENT_NONE in self.requirements:
1124 elif REQUIREMENT_NONE in self.requirements:
1122 compression = 'none'
1125 compression = 'none'
1123 else:
1126 else:
1124 raise error.Abort(_('unable to determine what compression engine '
1127 raise error.Abort(_('unable to determine what compression engine '
1125 'to use for SQLite storage'))
1128 'to use for SQLite storage'))
1126
1129
1127 return sqlitefilestore(self._dbconn, path, compression)
1130 return sqlitefilestore(self._dbconn, path, compression)
1128
1131
1129 def makefilestorage(orig, requirements, features, **kwargs):
1132 def makefilestorage(orig, requirements, features, **kwargs):
1130 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1133 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1131 if REQUIREMENT in requirements:
1134 if REQUIREMENT in requirements:
1132 if REQUIREMENT_SHALLOW_FILES in requirements:
1135 if REQUIREMENT_SHALLOW_FILES in requirements:
1133 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1136 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1134
1137
1135 return sqlitefilestorage
1138 return sqlitefilestorage
1136 else:
1139 else:
1137 return orig(requirements=requirements, features=features, **kwargs)
1140 return orig(requirements=requirements, features=features, **kwargs)
1138
1141
1139 def makemain(orig, ui, requirements, **kwargs):
1142 def makemain(orig, ui, requirements, **kwargs):
1140 if REQUIREMENT in requirements:
1143 if REQUIREMENT in requirements:
1141 if REQUIREMENT_ZSTD in requirements and not zstd:
1144 if REQUIREMENT_ZSTD in requirements and not zstd:
1142 raise error.Abort(_('repository uses zstandard compression, which '
1145 raise error.Abort(_('repository uses zstandard compression, which '
1143 'is not available to this Mercurial install'))
1146 'is not available to this Mercurial install'))
1144
1147
1145 return sqliterepository
1148 return sqliterepository
1146
1149
1147 return orig(requirements=requirements, **kwargs)
1150 return orig(requirements=requirements, **kwargs)
1148
1151
1149 def verifierinit(orig, self, *args, **kwargs):
1152 def verifierinit(orig, self, *args, **kwargs):
1150 orig(self, *args, **kwargs)
1153 orig(self, *args, **kwargs)
1151
1154
1152 # We don't care that files in the store don't align with what is
1155 # We don't care that files in the store don't align with what is
1153 # advertised. So suppress these warnings.
1156 # advertised. So suppress these warnings.
1154 self.warnorphanstorefiles = False
1157 self.warnorphanstorefiles = False
1155
1158
1156 def extsetup(ui):
1159 def extsetup(ui):
1157 localrepo.featuresetupfuncs.add(featuresetup)
1160 localrepo.featuresetupfuncs.add(featuresetup)
1158 extensions.wrapfunction(localrepo, 'newreporequirements',
1161 extensions.wrapfunction(localrepo, 'newreporequirements',
1159 newreporequirements)
1162 newreporequirements)
1160 extensions.wrapfunction(localrepo, 'makefilestorage',
1163 extensions.wrapfunction(localrepo, 'makefilestorage',
1161 makefilestorage)
1164 makefilestorage)
1162 extensions.wrapfunction(localrepo, 'makemain',
1165 extensions.wrapfunction(localrepo, 'makemain',
1163 makemain)
1166 makemain)
1164 extensions.wrapfunction(verify.verifier, '__init__',
1167 extensions.wrapfunction(verify.verifier, '__init__',
1165 verifierinit)
1168 verifierinit)
1166
1169
1167 def reposetup(ui, repo):
1170 def reposetup(ui, repo):
1168 if isinstance(repo, sqliterepository):
1171 if isinstance(repo, sqliterepository):
1169 repo._db = None
1172 repo._db = None
1170
1173
1171 # TODO check for bundlerepository?
1174 # TODO check for bundlerepository?
General Comments 0
You need to be logged in to leave comments. Login now