##// END OF EJS Templates
wrapfunction: use sysstr instead of bytes as argument in "sqlitestore"...
marmoute -
r51686:1fbae268 default
parent child Browse files
Show More
@@ -1,1344 +1,1344 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46
46
47 import sqlite3
47 import sqlite3
48 import struct
48 import struct
49 import threading
49 import threading
50 import zlib
50 import zlib
51
51
52 from mercurial.i18n import _
52 from mercurial.i18n import _
53 from mercurial.node import (
53 from mercurial.node import (
54 nullrev,
54 nullrev,
55 sha1nodeconstants,
55 sha1nodeconstants,
56 short,
56 short,
57 )
57 )
58 from mercurial.thirdparty import attr
58 from mercurial.thirdparty import attr
59 from mercurial import (
59 from mercurial import (
60 ancestor,
60 ancestor,
61 dagop,
61 dagop,
62 encoding,
62 encoding,
63 error,
63 error,
64 extensions,
64 extensions,
65 localrepo,
65 localrepo,
66 mdiff,
66 mdiff,
67 pycompat,
67 pycompat,
68 registrar,
68 registrar,
69 requirements,
69 requirements,
70 util,
70 util,
71 verify,
71 verify,
72 )
72 )
73 from mercurial.interfaces import (
73 from mercurial.interfaces import (
74 repository,
74 repository,
75 util as interfaceutil,
75 util as interfaceutil,
76 )
76 )
77 from mercurial.utils import (
77 from mercurial.utils import (
78 hashutil,
78 hashutil,
79 storageutil,
79 storageutil,
80 )
80 )
81
81
82 try:
82 try:
83 from mercurial import zstd # pytype: disable=import-error
83 from mercurial import zstd # pytype: disable=import-error
84
84
85 zstd.__version__
85 zstd.__version__
86 except ImportError:
86 except ImportError:
87 zstd = None
87 zstd = None
88
88
89 configtable = {}
89 configtable = {}
90 configitem = registrar.configitem(configtable)
90 configitem = registrar.configitem(configtable)
91
91
92 # experimental config: storage.sqlite.compression
92 # experimental config: storage.sqlite.compression
93 configitem(
93 configitem(
94 b'storage',
94 b'storage',
95 b'sqlite.compression',
95 b'sqlite.compression',
96 default=b'zstd' if zstd else b'zlib',
96 default=b'zstd' if zstd else b'zlib',
97 experimental=True,
97 experimental=True,
98 )
98 )
99
99
100 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
100 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
101 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
101 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
102 # be specifying the version(s) of Mercurial they are tested with, or
102 # be specifying the version(s) of Mercurial they are tested with, or
103 # leave the attribute unspecified.
103 # leave the attribute unspecified.
104 testedwith = b'ships-with-hg-core'
104 testedwith = b'ships-with-hg-core'
105
105
106 REQUIREMENT = b'exp-sqlite-001'
106 REQUIREMENT = b'exp-sqlite-001'
107 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
107 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
108 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
108 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
109 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
109 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
110 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
110 REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files'
111
111
112 CURRENT_SCHEMA_VERSION = 1
112 CURRENT_SCHEMA_VERSION = 1
113
113
114 COMPRESSION_NONE = 1
114 COMPRESSION_NONE = 1
115 COMPRESSION_ZSTD = 2
115 COMPRESSION_ZSTD = 2
116 COMPRESSION_ZLIB = 3
116 COMPRESSION_ZLIB = 3
117
117
118 FLAG_CENSORED = 1
118 FLAG_CENSORED = 1
119 FLAG_MISSING_P1 = 2
119 FLAG_MISSING_P1 = 2
120 FLAG_MISSING_P2 = 4
120 FLAG_MISSING_P2 = 4
121
121
122 CREATE_SCHEMA = [
122 CREATE_SCHEMA = [
123 # Deltas are stored as content-indexed blobs.
123 # Deltas are stored as content-indexed blobs.
124 # compression column holds COMPRESSION_* constant for how the
124 # compression column holds COMPRESSION_* constant for how the
125 # delta is encoded.
125 # delta is encoded.
126 'CREATE TABLE delta ('
126 'CREATE TABLE delta ('
127 ' id INTEGER PRIMARY KEY, '
127 ' id INTEGER PRIMARY KEY, '
128 ' compression INTEGER NOT NULL, '
128 ' compression INTEGER NOT NULL, '
129 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
129 ' hash BLOB UNIQUE ON CONFLICT ABORT, '
130 ' delta BLOB NOT NULL '
130 ' delta BLOB NOT NULL '
131 ')',
131 ')',
132 # Tracked paths are denormalized to integers to avoid redundant
132 # Tracked paths are denormalized to integers to avoid redundant
133 # storage of the path name.
133 # storage of the path name.
134 'CREATE TABLE filepath ('
134 'CREATE TABLE filepath ('
135 ' id INTEGER PRIMARY KEY, '
135 ' id INTEGER PRIMARY KEY, '
136 ' path BLOB NOT NULL '
136 ' path BLOB NOT NULL '
137 ')',
137 ')',
138 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
138 'CREATE UNIQUE INDEX filepath_path ON filepath (path)',
139 # We have a single table for all file revision data.
139 # We have a single table for all file revision data.
140 # Each file revision is uniquely described by a (path, rev) and
140 # Each file revision is uniquely described by a (path, rev) and
141 # (path, node).
141 # (path, node).
142 #
142 #
143 # Revision data is stored as a pointer to the delta producing this
143 # Revision data is stored as a pointer to the delta producing this
144 # revision and the file revision whose delta should be applied before
144 # revision and the file revision whose delta should be applied before
145 # that one. One can reconstruct the delta chain by recursively following
145 # that one. One can reconstruct the delta chain by recursively following
146 # the delta base revision pointers until one encounters NULL.
146 # the delta base revision pointers until one encounters NULL.
147 #
147 #
148 # flags column holds bitwise integer flags controlling storage options.
148 # flags column holds bitwise integer flags controlling storage options.
149 # These flags are defined by the FLAG_* constants.
149 # These flags are defined by the FLAG_* constants.
150 'CREATE TABLE fileindex ('
150 'CREATE TABLE fileindex ('
151 ' id INTEGER PRIMARY KEY, '
151 ' id INTEGER PRIMARY KEY, '
152 ' pathid INTEGER REFERENCES filepath(id), '
152 ' pathid INTEGER REFERENCES filepath(id), '
153 ' revnum INTEGER NOT NULL, '
153 ' revnum INTEGER NOT NULL, '
154 ' p1rev INTEGER NOT NULL, '
154 ' p1rev INTEGER NOT NULL, '
155 ' p2rev INTEGER NOT NULL, '
155 ' p2rev INTEGER NOT NULL, '
156 ' linkrev INTEGER NOT NULL, '
156 ' linkrev INTEGER NOT NULL, '
157 ' flags INTEGER NOT NULL, '
157 ' flags INTEGER NOT NULL, '
158 ' deltaid INTEGER REFERENCES delta(id), '
158 ' deltaid INTEGER REFERENCES delta(id), '
159 ' deltabaseid INTEGER REFERENCES fileindex(id), '
159 ' deltabaseid INTEGER REFERENCES fileindex(id), '
160 ' node BLOB NOT NULL '
160 ' node BLOB NOT NULL '
161 ')',
161 ')',
162 'CREATE UNIQUE INDEX fileindex_pathrevnum '
162 'CREATE UNIQUE INDEX fileindex_pathrevnum '
163 ' ON fileindex (pathid, revnum)',
163 ' ON fileindex (pathid, revnum)',
164 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
164 'CREATE UNIQUE INDEX fileindex_pathnode ON fileindex (pathid, node)',
165 # Provide a view over all file data for convenience.
165 # Provide a view over all file data for convenience.
166 'CREATE VIEW filedata AS '
166 'CREATE VIEW filedata AS '
167 'SELECT '
167 'SELECT '
168 ' fileindex.id AS id, '
168 ' fileindex.id AS id, '
169 ' filepath.id AS pathid, '
169 ' filepath.id AS pathid, '
170 ' filepath.path AS path, '
170 ' filepath.path AS path, '
171 ' fileindex.revnum AS revnum, '
171 ' fileindex.revnum AS revnum, '
172 ' fileindex.node AS node, '
172 ' fileindex.node AS node, '
173 ' fileindex.p1rev AS p1rev, '
173 ' fileindex.p1rev AS p1rev, '
174 ' fileindex.p2rev AS p2rev, '
174 ' fileindex.p2rev AS p2rev, '
175 ' fileindex.linkrev AS linkrev, '
175 ' fileindex.linkrev AS linkrev, '
176 ' fileindex.flags AS flags, '
176 ' fileindex.flags AS flags, '
177 ' fileindex.deltaid AS deltaid, '
177 ' fileindex.deltaid AS deltaid, '
178 ' fileindex.deltabaseid AS deltabaseid '
178 ' fileindex.deltabaseid AS deltabaseid '
179 'FROM filepath, fileindex '
179 'FROM filepath, fileindex '
180 'WHERE fileindex.pathid=filepath.id',
180 'WHERE fileindex.pathid=filepath.id',
181 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
181 'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 ]
182 ]
183
183
184
184
185 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
185 def resolvedeltachain(db, pathid, node, revisioncache, stoprids, zstddctx=None):
186 """Resolve a delta chain for a file node."""
186 """Resolve a delta chain for a file node."""
187
187
188 # TODO the "not in ({stops})" here is possibly slowing down the query
188 # TODO the "not in ({stops})" here is possibly slowing down the query
189 # because it needs to perform the lookup on every recursive invocation.
189 # because it needs to perform the lookup on every recursive invocation.
190 # This could possibly be faster if we created a temporary query with
190 # This could possibly be faster if we created a temporary query with
191 # baseid "poisoned" to null and limited the recursive filter to
191 # baseid "poisoned" to null and limited the recursive filter to
192 # "is not null".
192 # "is not null".
193 res = db.execute(
193 res = db.execute(
194 'WITH RECURSIVE '
194 'WITH RECURSIVE '
195 ' deltachain(deltaid, baseid) AS ('
195 ' deltachain(deltaid, baseid) AS ('
196 ' SELECT deltaid, deltabaseid FROM fileindex '
196 ' SELECT deltaid, deltabaseid FROM fileindex '
197 ' WHERE pathid=? AND node=? '
197 ' WHERE pathid=? AND node=? '
198 ' UNION ALL '
198 ' UNION ALL '
199 ' SELECT fileindex.deltaid, deltabaseid '
199 ' SELECT fileindex.deltaid, deltabaseid '
200 ' FROM fileindex, deltachain '
200 ' FROM fileindex, deltachain '
201 ' WHERE '
201 ' WHERE '
202 ' fileindex.id=deltachain.baseid '
202 ' fileindex.id=deltachain.baseid '
203 ' AND deltachain.baseid IS NOT NULL '
203 ' AND deltachain.baseid IS NOT NULL '
204 ' AND fileindex.id NOT IN ({stops}) '
204 ' AND fileindex.id NOT IN ({stops}) '
205 ' ) '
205 ' ) '
206 'SELECT deltachain.baseid, compression, delta '
206 'SELECT deltachain.baseid, compression, delta '
207 'FROM deltachain, delta '
207 'FROM deltachain, delta '
208 'WHERE delta.id=deltachain.deltaid'.format(
208 'WHERE delta.id=deltachain.deltaid'.format(
209 stops=','.join(['?'] * len(stoprids))
209 stops=','.join(['?'] * len(stoprids))
210 ),
210 ),
211 tuple([pathid, node] + list(stoprids.keys())),
211 tuple([pathid, node] + list(stoprids.keys())),
212 )
212 )
213
213
214 deltas = []
214 deltas = []
215 lastdeltabaseid = None
215 lastdeltabaseid = None
216
216
217 for deltabaseid, compression, delta in res:
217 for deltabaseid, compression, delta in res:
218 lastdeltabaseid = deltabaseid
218 lastdeltabaseid = deltabaseid
219
219
220 if compression == COMPRESSION_ZSTD:
220 if compression == COMPRESSION_ZSTD:
221 delta = zstddctx.decompress(delta)
221 delta = zstddctx.decompress(delta)
222 elif compression == COMPRESSION_NONE:
222 elif compression == COMPRESSION_NONE:
223 delta = delta
223 delta = delta
224 elif compression == COMPRESSION_ZLIB:
224 elif compression == COMPRESSION_ZLIB:
225 delta = zlib.decompress(delta)
225 delta = zlib.decompress(delta)
226 else:
226 else:
227 raise SQLiteStoreError(
227 raise SQLiteStoreError(
228 b'unhandled compression type: %d' % compression
228 b'unhandled compression type: %d' % compression
229 )
229 )
230
230
231 deltas.append(delta)
231 deltas.append(delta)
232
232
233 if lastdeltabaseid in stoprids:
233 if lastdeltabaseid in stoprids:
234 basetext = revisioncache[stoprids[lastdeltabaseid]]
234 basetext = revisioncache[stoprids[lastdeltabaseid]]
235 else:
235 else:
236 basetext = deltas.pop()
236 basetext = deltas.pop()
237
237
238 deltas.reverse()
238 deltas.reverse()
239 fulltext = mdiff.patches(basetext, deltas)
239 fulltext = mdiff.patches(basetext, deltas)
240
240
241 # SQLite returns buffer instances for blob columns on Python 2. This
241 # SQLite returns buffer instances for blob columns on Python 2. This
242 # type can propagate through the delta application layer. Because
242 # type can propagate through the delta application layer. Because
243 # downstream callers assume revisions are bytes, cast as needed.
243 # downstream callers assume revisions are bytes, cast as needed.
244 if not isinstance(fulltext, bytes):
244 if not isinstance(fulltext, bytes):
245 fulltext = bytes(delta)
245 fulltext = bytes(delta)
246
246
247 return fulltext
247 return fulltext
248
248
249
249
250 def insertdelta(db, compression, hash, delta):
250 def insertdelta(db, compression, hash, delta):
251 try:
251 try:
252 return db.execute(
252 return db.execute(
253 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
253 'INSERT INTO delta (compression, hash, delta) VALUES (?, ?, ?)',
254 (compression, hash, delta),
254 (compression, hash, delta),
255 ).lastrowid
255 ).lastrowid
256 except sqlite3.IntegrityError:
256 except sqlite3.IntegrityError:
257 return db.execute(
257 return db.execute(
258 'SELECT id FROM delta WHERE hash=?', (hash,)
258 'SELECT id FROM delta WHERE hash=?', (hash,)
259 ).fetchone()[0]
259 ).fetchone()[0]
260
260
261
261
262 class SQLiteStoreError(error.StorageError):
262 class SQLiteStoreError(error.StorageError):
263 pass
263 pass
264
264
265
265
266 @attr.s
266 @attr.s
267 class revisionentry:
267 class revisionentry:
268 rid = attr.ib()
268 rid = attr.ib()
269 rev = attr.ib()
269 rev = attr.ib()
270 node = attr.ib()
270 node = attr.ib()
271 p1rev = attr.ib()
271 p1rev = attr.ib()
272 p2rev = attr.ib()
272 p2rev = attr.ib()
273 p1node = attr.ib()
273 p1node = attr.ib()
274 p2node = attr.ib()
274 p2node = attr.ib()
275 linkrev = attr.ib()
275 linkrev = attr.ib()
276 flags = attr.ib()
276 flags = attr.ib()
277
277
278
278
279 @interfaceutil.implementer(repository.irevisiondelta)
279 @interfaceutil.implementer(repository.irevisiondelta)
280 @attr.s(slots=True)
280 @attr.s(slots=True)
281 class sqliterevisiondelta:
281 class sqliterevisiondelta:
282 node = attr.ib()
282 node = attr.ib()
283 p1node = attr.ib()
283 p1node = attr.ib()
284 p2node = attr.ib()
284 p2node = attr.ib()
285 basenode = attr.ib()
285 basenode = attr.ib()
286 flags = attr.ib()
286 flags = attr.ib()
287 baserevisionsize = attr.ib()
287 baserevisionsize = attr.ib()
288 revision = attr.ib()
288 revision = attr.ib()
289 delta = attr.ib()
289 delta = attr.ib()
290 sidedata = attr.ib()
290 sidedata = attr.ib()
291 protocol_flags = attr.ib()
291 protocol_flags = attr.ib()
292 linknode = attr.ib(default=None)
292 linknode = attr.ib(default=None)
293
293
294
294
295 @interfaceutil.implementer(repository.iverifyproblem)
295 @interfaceutil.implementer(repository.iverifyproblem)
296 @attr.s(frozen=True)
296 @attr.s(frozen=True)
297 class sqliteproblem:
297 class sqliteproblem:
298 warning = attr.ib(default=None)
298 warning = attr.ib(default=None)
299 error = attr.ib(default=None)
299 error = attr.ib(default=None)
300 node = attr.ib(default=None)
300 node = attr.ib(default=None)
301
301
302
302
303 @interfaceutil.implementer(repository.ifilestorage)
303 @interfaceutil.implementer(repository.ifilestorage)
304 class sqlitefilestore:
304 class sqlitefilestore:
305 """Implements storage for an individual tracked path."""
305 """Implements storage for an individual tracked path."""
306
306
307 def __init__(self, db, path, compression):
307 def __init__(self, db, path, compression):
308 self.nullid = sha1nodeconstants.nullid
308 self.nullid = sha1nodeconstants.nullid
309 self._db = db
309 self._db = db
310 self._path = path
310 self._path = path
311
311
312 self._pathid = None
312 self._pathid = None
313
313
314 # revnum -> node
314 # revnum -> node
315 self._revtonode = {}
315 self._revtonode = {}
316 # node -> revnum
316 # node -> revnum
317 self._nodetorev = {}
317 self._nodetorev = {}
318 # node -> data structure
318 # node -> data structure
319 self._revisions = {}
319 self._revisions = {}
320
320
321 self._revisioncache = util.lrucachedict(10)
321 self._revisioncache = util.lrucachedict(10)
322
322
323 self._compengine = compression
323 self._compengine = compression
324
324
325 if compression == b'zstd':
325 if compression == b'zstd':
326 self._cctx = zstd.ZstdCompressor(level=3)
326 self._cctx = zstd.ZstdCompressor(level=3)
327 self._dctx = zstd.ZstdDecompressor()
327 self._dctx = zstd.ZstdDecompressor()
328 else:
328 else:
329 self._cctx = None
329 self._cctx = None
330 self._dctx = None
330 self._dctx = None
331
331
332 self._refreshindex()
332 self._refreshindex()
333
333
334 def _refreshindex(self):
334 def _refreshindex(self):
335 self._revtonode = {}
335 self._revtonode = {}
336 self._nodetorev = {}
336 self._nodetorev = {}
337 self._revisions = {}
337 self._revisions = {}
338
338
339 res = list(
339 res = list(
340 self._db.execute(
340 self._db.execute(
341 'SELECT id FROM filepath WHERE path=?', (self._path,)
341 'SELECT id FROM filepath WHERE path=?', (self._path,)
342 )
342 )
343 )
343 )
344
344
345 if not res:
345 if not res:
346 self._pathid = None
346 self._pathid = None
347 return
347 return
348
348
349 self._pathid = res[0][0]
349 self._pathid = res[0][0]
350
350
351 res = self._db.execute(
351 res = self._db.execute(
352 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
352 'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
353 'FROM fileindex '
353 'FROM fileindex '
354 'WHERE pathid=? '
354 'WHERE pathid=? '
355 'ORDER BY revnum ASC',
355 'ORDER BY revnum ASC',
356 (self._pathid,),
356 (self._pathid,),
357 )
357 )
358
358
359 for i, row in enumerate(res):
359 for i, row in enumerate(res):
360 rid, rev, node, p1rev, p2rev, linkrev, flags = row
360 rid, rev, node, p1rev, p2rev, linkrev, flags = row
361
361
362 if i != rev:
362 if i != rev:
363 raise SQLiteStoreError(
363 raise SQLiteStoreError(
364 _(b'sqlite database has inconsistent revision numbers')
364 _(b'sqlite database has inconsistent revision numbers')
365 )
365 )
366
366
367 if p1rev == nullrev:
367 if p1rev == nullrev:
368 p1node = sha1nodeconstants.nullid
368 p1node = sha1nodeconstants.nullid
369 else:
369 else:
370 p1node = self._revtonode[p1rev]
370 p1node = self._revtonode[p1rev]
371
371
372 if p2rev == nullrev:
372 if p2rev == nullrev:
373 p2node = sha1nodeconstants.nullid
373 p2node = sha1nodeconstants.nullid
374 else:
374 else:
375 p2node = self._revtonode[p2rev]
375 p2node = self._revtonode[p2rev]
376
376
377 entry = revisionentry(
377 entry = revisionentry(
378 rid=rid,
378 rid=rid,
379 rev=rev,
379 rev=rev,
380 node=node,
380 node=node,
381 p1rev=p1rev,
381 p1rev=p1rev,
382 p2rev=p2rev,
382 p2rev=p2rev,
383 p1node=p1node,
383 p1node=p1node,
384 p2node=p2node,
384 p2node=p2node,
385 linkrev=linkrev,
385 linkrev=linkrev,
386 flags=flags,
386 flags=flags,
387 )
387 )
388
388
389 self._revtonode[rev] = node
389 self._revtonode[rev] = node
390 self._nodetorev[node] = rev
390 self._nodetorev[node] = rev
391 self._revisions[node] = entry
391 self._revisions[node] = entry
392
392
393 # Start of ifileindex interface.
393 # Start of ifileindex interface.
394
394
395 def __len__(self):
395 def __len__(self):
396 return len(self._revisions)
396 return len(self._revisions)
397
397
398 def __iter__(self):
398 def __iter__(self):
399 return iter(range(len(self._revisions)))
399 return iter(range(len(self._revisions)))
400
400
401 def hasnode(self, node):
401 def hasnode(self, node):
402 if node == sha1nodeconstants.nullid:
402 if node == sha1nodeconstants.nullid:
403 return False
403 return False
404
404
405 return node in self._nodetorev
405 return node in self._nodetorev
406
406
407 def revs(self, start=0, stop=None):
407 def revs(self, start=0, stop=None):
408 return storageutil.iterrevs(
408 return storageutil.iterrevs(
409 len(self._revisions), start=start, stop=stop
409 len(self._revisions), start=start, stop=stop
410 )
410 )
411
411
412 def parents(self, node):
412 def parents(self, node):
413 if node == sha1nodeconstants.nullid:
413 if node == sha1nodeconstants.nullid:
414 return sha1nodeconstants.nullid, sha1nodeconstants.nullid
414 return sha1nodeconstants.nullid, sha1nodeconstants.nullid
415
415
416 if node not in self._revisions:
416 if node not in self._revisions:
417 raise error.LookupError(node, self._path, _(b'no node'))
417 raise error.LookupError(node, self._path, _(b'no node'))
418
418
419 entry = self._revisions[node]
419 entry = self._revisions[node]
420 return entry.p1node, entry.p2node
420 return entry.p1node, entry.p2node
421
421
422 def parentrevs(self, rev):
422 def parentrevs(self, rev):
423 if rev == nullrev:
423 if rev == nullrev:
424 return nullrev, nullrev
424 return nullrev, nullrev
425
425
426 if rev not in self._revtonode:
426 if rev not in self._revtonode:
427 raise IndexError(rev)
427 raise IndexError(rev)
428
428
429 entry = self._revisions[self._revtonode[rev]]
429 entry = self._revisions[self._revtonode[rev]]
430 return entry.p1rev, entry.p2rev
430 return entry.p1rev, entry.p2rev
431
431
432 def ancestors(self, revs, stoprev=0, inclusive=False):
432 def ancestors(self, revs, stoprev=0, inclusive=False):
433 """Generate the ancestors of 'revs' in reverse revision order.
433 """Generate the ancestors of 'revs' in reverse revision order.
434 Does not generate revs lower than stoprev.
434 Does not generate revs lower than stoprev.
435
435
436 See the documentation for ancestor.lazyancestors for more details."""
436 See the documentation for ancestor.lazyancestors for more details."""
437
437
438 # first, make sure start revisions aren't filtered
438 # first, make sure start revisions aren't filtered
439 revs = list(revs)
439 revs = list(revs)
440 checkrev = self.node
440 checkrev = self.node
441 for r in revs:
441 for r in revs:
442 checkrev(r)
442 checkrev(r)
443
443
444 return ancestor.lazyancestors(
444 return ancestor.lazyancestors(
445 self.parentrevs,
445 self.parentrevs,
446 revs,
446 revs,
447 stoprev=stoprev,
447 stoprev=stoprev,
448 inclusive=inclusive,
448 inclusive=inclusive,
449 )
449 )
450
450
451 def rev(self, node):
451 def rev(self, node):
452 if node == sha1nodeconstants.nullid:
452 if node == sha1nodeconstants.nullid:
453 return nullrev
453 return nullrev
454
454
455 if node not in self._nodetorev:
455 if node not in self._nodetorev:
456 raise error.LookupError(node, self._path, _(b'no node'))
456 raise error.LookupError(node, self._path, _(b'no node'))
457
457
458 return self._nodetorev[node]
458 return self._nodetorev[node]
459
459
460 def node(self, rev):
460 def node(self, rev):
461 if rev == nullrev:
461 if rev == nullrev:
462 return sha1nodeconstants.nullid
462 return sha1nodeconstants.nullid
463
463
464 if rev not in self._revtonode:
464 if rev not in self._revtonode:
465 raise IndexError(rev)
465 raise IndexError(rev)
466
466
467 return self._revtonode[rev]
467 return self._revtonode[rev]
468
468
469 def lookup(self, node):
469 def lookup(self, node):
470 return storageutil.fileidlookup(self, node, self._path)
470 return storageutil.fileidlookup(self, node, self._path)
471
471
472 def linkrev(self, rev):
472 def linkrev(self, rev):
473 if rev == nullrev:
473 if rev == nullrev:
474 return nullrev
474 return nullrev
475
475
476 if rev not in self._revtonode:
476 if rev not in self._revtonode:
477 raise IndexError(rev)
477 raise IndexError(rev)
478
478
479 entry = self._revisions[self._revtonode[rev]]
479 entry = self._revisions[self._revtonode[rev]]
480 return entry.linkrev
480 return entry.linkrev
481
481
482 def iscensored(self, rev):
482 def iscensored(self, rev):
483 if rev == nullrev:
483 if rev == nullrev:
484 return False
484 return False
485
485
486 if rev not in self._revtonode:
486 if rev not in self._revtonode:
487 raise IndexError(rev)
487 raise IndexError(rev)
488
488
489 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
489 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
490
490
491 def commonancestorsheads(self, node1, node2):
491 def commonancestorsheads(self, node1, node2):
492 rev1 = self.rev(node1)
492 rev1 = self.rev(node1)
493 rev2 = self.rev(node2)
493 rev2 = self.rev(node2)
494
494
495 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
495 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
496 return pycompat.maplist(self.node, ancestors)
496 return pycompat.maplist(self.node, ancestors)
497
497
498 def descendants(self, revs):
498 def descendants(self, revs):
499 # TODO we could implement this using a recursive SQL query, which
499 # TODO we could implement this using a recursive SQL query, which
500 # might be faster.
500 # might be faster.
501 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
501 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
502
502
503 def heads(self, start=None, stop=None):
503 def heads(self, start=None, stop=None):
504 if start is None and stop is None:
504 if start is None and stop is None:
505 if not len(self):
505 if not len(self):
506 return [sha1nodeconstants.nullid]
506 return [sha1nodeconstants.nullid]
507
507
508 startrev = self.rev(start) if start is not None else nullrev
508 startrev = self.rev(start) if start is not None else nullrev
509 stoprevs = {self.rev(n) for n in stop or []}
509 stoprevs = {self.rev(n) for n in stop or []}
510
510
511 revs = dagop.headrevssubset(
511 revs = dagop.headrevssubset(
512 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
512 self.revs, self.parentrevs, startrev=startrev, stoprevs=stoprevs
513 )
513 )
514
514
515 return [self.node(rev) for rev in revs]
515 return [self.node(rev) for rev in revs]
516
516
517 def children(self, node):
517 def children(self, node):
518 rev = self.rev(node)
518 rev = self.rev(node)
519
519
520 res = self._db.execute(
520 res = self._db.execute(
521 'SELECT'
521 'SELECT'
522 ' node '
522 ' node '
523 ' FROM filedata '
523 ' FROM filedata '
524 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
524 ' WHERE path=? AND (p1rev=? OR p2rev=?) '
525 ' ORDER BY revnum ASC',
525 ' ORDER BY revnum ASC',
526 (self._path, rev, rev),
526 (self._path, rev, rev),
527 )
527 )
528
528
529 return [row[0] for row in res]
529 return [row[0] for row in res]
530
530
531 # End of ifileindex interface.
531 # End of ifileindex interface.
532
532
533 # Start of ifiledata interface.
533 # Start of ifiledata interface.
534
534
535 def size(self, rev):
535 def size(self, rev):
536 if rev == nullrev:
536 if rev == nullrev:
537 return 0
537 return 0
538
538
539 if rev not in self._revtonode:
539 if rev not in self._revtonode:
540 raise IndexError(rev)
540 raise IndexError(rev)
541
541
542 node = self._revtonode[rev]
542 node = self._revtonode[rev]
543
543
544 if self.renamed(node):
544 if self.renamed(node):
545 return len(self.read(node))
545 return len(self.read(node))
546
546
547 return len(self.revision(node))
547 return len(self.revision(node))
548
548
549 def revision(self, node, raw=False, _verifyhash=True):
549 def revision(self, node, raw=False, _verifyhash=True):
550 if node in (sha1nodeconstants.nullid, nullrev):
550 if node in (sha1nodeconstants.nullid, nullrev):
551 return b''
551 return b''
552
552
553 if isinstance(node, int):
553 if isinstance(node, int):
554 node = self.node(node)
554 node = self.node(node)
555
555
556 if node not in self._nodetorev:
556 if node not in self._nodetorev:
557 raise error.LookupError(node, self._path, _(b'no node'))
557 raise error.LookupError(node, self._path, _(b'no node'))
558
558
559 if node in self._revisioncache:
559 if node in self._revisioncache:
560 return self._revisioncache[node]
560 return self._revisioncache[node]
561
561
562 # Because we have a fulltext revision cache, we are able to
562 # Because we have a fulltext revision cache, we are able to
563 # short-circuit delta chain traversal and decompression as soon as
563 # short-circuit delta chain traversal and decompression as soon as
564 # we encounter a revision in the cache.
564 # we encounter a revision in the cache.
565
565
566 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
566 stoprids = {self._revisions[n].rid: n for n in self._revisioncache}
567
567
568 if not stoprids:
568 if not stoprids:
569 stoprids[-1] = None
569 stoprids[-1] = None
570
570
571 fulltext = resolvedeltachain(
571 fulltext = resolvedeltachain(
572 self._db,
572 self._db,
573 self._pathid,
573 self._pathid,
574 node,
574 node,
575 self._revisioncache,
575 self._revisioncache,
576 stoprids,
576 stoprids,
577 zstddctx=self._dctx,
577 zstddctx=self._dctx,
578 )
578 )
579
579
580 # Don't verify hashes if parent nodes were rewritten, as the hash
580 # Don't verify hashes if parent nodes were rewritten, as the hash
581 # wouldn't verify.
581 # wouldn't verify.
582 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
582 if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2):
583 _verifyhash = False
583 _verifyhash = False
584
584
585 if _verifyhash:
585 if _verifyhash:
586 self._checkhash(fulltext, node)
586 self._checkhash(fulltext, node)
587 self._revisioncache[node] = fulltext
587 self._revisioncache[node] = fulltext
588
588
589 return fulltext
589 return fulltext
590
590
591 def rawdata(self, *args, **kwargs):
591 def rawdata(self, *args, **kwargs):
592 return self.revision(*args, **kwargs)
592 return self.revision(*args, **kwargs)
593
593
594 def read(self, node):
594 def read(self, node):
595 return storageutil.filtermetadata(self.revision(node))
595 return storageutil.filtermetadata(self.revision(node))
596
596
597 def renamed(self, node):
597 def renamed(self, node):
598 return storageutil.filerevisioncopied(self, node)
598 return storageutil.filerevisioncopied(self, node)
599
599
600 def cmp(self, node, fulltext):
600 def cmp(self, node, fulltext):
601 return not storageutil.filedataequivalent(self, node, fulltext)
601 return not storageutil.filedataequivalent(self, node, fulltext)
602
602
603 def emitrevisions(
603 def emitrevisions(
604 self,
604 self,
605 nodes,
605 nodes,
606 nodesorder=None,
606 nodesorder=None,
607 revisiondata=False,
607 revisiondata=False,
608 assumehaveparentrevisions=False,
608 assumehaveparentrevisions=False,
609 deltamode=repository.CG_DELTAMODE_STD,
609 deltamode=repository.CG_DELTAMODE_STD,
610 sidedata_helpers=None,
610 sidedata_helpers=None,
611 debug_info=None,
611 debug_info=None,
612 ):
612 ):
613 if nodesorder not in (b'nodes', b'storage', b'linear', None):
613 if nodesorder not in (b'nodes', b'storage', b'linear', None):
614 raise error.ProgrammingError(
614 raise error.ProgrammingError(
615 b'unhandled value for nodesorder: %s' % nodesorder
615 b'unhandled value for nodesorder: %s' % nodesorder
616 )
616 )
617
617
618 nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
618 nodes = [n for n in nodes if n != sha1nodeconstants.nullid]
619
619
620 if not nodes:
620 if not nodes:
621 return
621 return
622
622
623 # TODO perform in a single query.
623 # TODO perform in a single query.
624 res = self._db.execute(
624 res = self._db.execute(
625 'SELECT revnum, deltaid FROM fileindex '
625 'SELECT revnum, deltaid FROM fileindex '
626 'WHERE pathid=? '
626 'WHERE pathid=? '
627 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
627 ' AND node in (%s)' % (','.join(['?'] * len(nodes))),
628 tuple([self._pathid] + nodes),
628 tuple([self._pathid] + nodes),
629 )
629 )
630
630
631 deltabases = {}
631 deltabases = {}
632
632
633 for rev, deltaid in res:
633 for rev, deltaid in res:
634 res = self._db.execute(
634 res = self._db.execute(
635 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
635 'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
636 (self._pathid, deltaid),
636 (self._pathid, deltaid),
637 )
637 )
638 deltabases[rev] = res.fetchone()[0]
638 deltabases[rev] = res.fetchone()[0]
639
639
640 # TODO define revdifffn so we can use delta from storage.
640 # TODO define revdifffn so we can use delta from storage.
641 for delta in storageutil.emitrevisions(
641 for delta in storageutil.emitrevisions(
642 self,
642 self,
643 nodes,
643 nodes,
644 nodesorder,
644 nodesorder,
645 sqliterevisiondelta,
645 sqliterevisiondelta,
646 deltaparentfn=deltabases.__getitem__,
646 deltaparentfn=deltabases.__getitem__,
647 revisiondata=revisiondata,
647 revisiondata=revisiondata,
648 assumehaveparentrevisions=assumehaveparentrevisions,
648 assumehaveparentrevisions=assumehaveparentrevisions,
649 deltamode=deltamode,
649 deltamode=deltamode,
650 sidedata_helpers=sidedata_helpers,
650 sidedata_helpers=sidedata_helpers,
651 ):
651 ):
652
652
653 yield delta
653 yield delta
654
654
655 # End of ifiledata interface.
655 # End of ifiledata interface.
656
656
657 # Start of ifilemutation interface.
657 # Start of ifilemutation interface.
658
658
659 def add(self, filedata, meta, transaction, linkrev, p1, p2):
659 def add(self, filedata, meta, transaction, linkrev, p1, p2):
660 if meta or filedata.startswith(b'\x01\n'):
660 if meta or filedata.startswith(b'\x01\n'):
661 filedata = storageutil.packmeta(meta, filedata)
661 filedata = storageutil.packmeta(meta, filedata)
662
662
663 rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
663 rev = self.addrevision(filedata, transaction, linkrev, p1, p2)
664 return self.node(rev)
664 return self.node(rev)
665
665
666 def addrevision(
666 def addrevision(
667 self,
667 self,
668 revisiondata,
668 revisiondata,
669 transaction,
669 transaction,
670 linkrev,
670 linkrev,
671 p1,
671 p1,
672 p2,
672 p2,
673 node=None,
673 node=None,
674 flags=0,
674 flags=0,
675 cachedelta=None,
675 cachedelta=None,
676 ):
676 ):
677 if flags:
677 if flags:
678 raise SQLiteStoreError(_(b'flags not supported on revisions'))
678 raise SQLiteStoreError(_(b'flags not supported on revisions'))
679
679
680 validatehash = node is not None
680 validatehash = node is not None
681 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
681 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
682
682
683 if validatehash:
683 if validatehash:
684 self._checkhash(revisiondata, node, p1, p2)
684 self._checkhash(revisiondata, node, p1, p2)
685
685
686 rev = self._nodetorev.get(node)
686 rev = self._nodetorev.get(node)
687 if rev is not None:
687 if rev is not None:
688 return rev
688 return rev
689
689
690 rev = self._addrawrevision(
690 rev = self._addrawrevision(
691 node, revisiondata, transaction, linkrev, p1, p2
691 node, revisiondata, transaction, linkrev, p1, p2
692 )
692 )
693
693
694 self._revisioncache[node] = revisiondata
694 self._revisioncache[node] = revisiondata
695 return rev
695 return rev
696
696
697 def addgroup(
697 def addgroup(
698 self,
698 self,
699 deltas,
699 deltas,
700 linkmapper,
700 linkmapper,
701 transaction,
701 transaction,
702 addrevisioncb=None,
702 addrevisioncb=None,
703 duplicaterevisioncb=None,
703 duplicaterevisioncb=None,
704 maybemissingparents=False,
704 maybemissingparents=False,
705 ):
705 ):
706 empty = True
706 empty = True
707
707
708 for (
708 for (
709 node,
709 node,
710 p1,
710 p1,
711 p2,
711 p2,
712 linknode,
712 linknode,
713 deltabase,
713 deltabase,
714 delta,
714 delta,
715 wireflags,
715 wireflags,
716 sidedata,
716 sidedata,
717 ) in deltas:
717 ) in deltas:
718 storeflags = 0
718 storeflags = 0
719
719
720 if wireflags & repository.REVISION_FLAG_CENSORED:
720 if wireflags & repository.REVISION_FLAG_CENSORED:
721 storeflags |= FLAG_CENSORED
721 storeflags |= FLAG_CENSORED
722
722
723 if wireflags & ~repository.REVISION_FLAG_CENSORED:
723 if wireflags & ~repository.REVISION_FLAG_CENSORED:
724 raise SQLiteStoreError(b'unhandled revision flag')
724 raise SQLiteStoreError(b'unhandled revision flag')
725
725
726 if maybemissingparents:
726 if maybemissingparents:
727 if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
727 if p1 != sha1nodeconstants.nullid and not self.hasnode(p1):
728 p1 = sha1nodeconstants.nullid
728 p1 = sha1nodeconstants.nullid
729 storeflags |= FLAG_MISSING_P1
729 storeflags |= FLAG_MISSING_P1
730
730
731 if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
731 if p2 != sha1nodeconstants.nullid and not self.hasnode(p2):
732 p2 = sha1nodeconstants.nullid
732 p2 = sha1nodeconstants.nullid
733 storeflags |= FLAG_MISSING_P2
733 storeflags |= FLAG_MISSING_P2
734
734
735 baserev = self.rev(deltabase)
735 baserev = self.rev(deltabase)
736
736
737 # If base is censored, delta must be full replacement in a single
737 # If base is censored, delta must be full replacement in a single
738 # patch operation.
738 # patch operation.
739 if baserev != nullrev and self.iscensored(baserev):
739 if baserev != nullrev and self.iscensored(baserev):
740 hlen = struct.calcsize(b'>lll')
740 hlen = struct.calcsize(b'>lll')
741 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
741 oldlen = len(self.rawdata(deltabase, _verifyhash=False))
742 newlen = len(delta) - hlen
742 newlen = len(delta) - hlen
743
743
744 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
744 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
745 raise error.CensoredBaseError(self._path, deltabase)
745 raise error.CensoredBaseError(self._path, deltabase)
746
746
747 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
747 if not (storeflags & FLAG_CENSORED) and storageutil.deltaiscensored(
748 delta, baserev, lambda x: len(self.rawdata(x))
748 delta, baserev, lambda x: len(self.rawdata(x))
749 ):
749 ):
750 storeflags |= FLAG_CENSORED
750 storeflags |= FLAG_CENSORED
751
751
752 linkrev = linkmapper(linknode)
752 linkrev = linkmapper(linknode)
753
753
754 if node in self._revisions:
754 if node in self._revisions:
755 # Possibly reset parents to make them proper.
755 # Possibly reset parents to make them proper.
756 entry = self._revisions[node]
756 entry = self._revisions[node]
757
757
758 if (
758 if (
759 entry.flags & FLAG_MISSING_P1
759 entry.flags & FLAG_MISSING_P1
760 and p1 != sha1nodeconstants.nullid
760 and p1 != sha1nodeconstants.nullid
761 ):
761 ):
762 entry.p1node = p1
762 entry.p1node = p1
763 entry.p1rev = self._nodetorev[p1]
763 entry.p1rev = self._nodetorev[p1]
764 entry.flags &= ~FLAG_MISSING_P1
764 entry.flags &= ~FLAG_MISSING_P1
765
765
766 self._db.execute(
766 self._db.execute(
767 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
767 'UPDATE fileindex SET p1rev=?, flags=? WHERE id=?',
768 (self._nodetorev[p1], entry.flags, entry.rid),
768 (self._nodetorev[p1], entry.flags, entry.rid),
769 )
769 )
770
770
771 if (
771 if (
772 entry.flags & FLAG_MISSING_P2
772 entry.flags & FLAG_MISSING_P2
773 and p2 != sha1nodeconstants.nullid
773 and p2 != sha1nodeconstants.nullid
774 ):
774 ):
775 entry.p2node = p2
775 entry.p2node = p2
776 entry.p2rev = self._nodetorev[p2]
776 entry.p2rev = self._nodetorev[p2]
777 entry.flags &= ~FLAG_MISSING_P2
777 entry.flags &= ~FLAG_MISSING_P2
778
778
779 self._db.execute(
779 self._db.execute(
780 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
780 'UPDATE fileindex SET p2rev=?, flags=? WHERE id=?',
781 (self._nodetorev[p1], entry.flags, entry.rid),
781 (self._nodetorev[p1], entry.flags, entry.rid),
782 )
782 )
783
783
784 if duplicaterevisioncb:
784 if duplicaterevisioncb:
785 duplicaterevisioncb(self, self.rev(node))
785 duplicaterevisioncb(self, self.rev(node))
786 empty = False
786 empty = False
787 continue
787 continue
788
788
789 if deltabase == sha1nodeconstants.nullid:
789 if deltabase == sha1nodeconstants.nullid:
790 text = mdiff.patch(b'', delta)
790 text = mdiff.patch(b'', delta)
791 storedelta = None
791 storedelta = None
792 else:
792 else:
793 text = None
793 text = None
794 storedelta = (deltabase, delta)
794 storedelta = (deltabase, delta)
795
795
796 rev = self._addrawrevision(
796 rev = self._addrawrevision(
797 node,
797 node,
798 text,
798 text,
799 transaction,
799 transaction,
800 linkrev,
800 linkrev,
801 p1,
801 p1,
802 p2,
802 p2,
803 storedelta=storedelta,
803 storedelta=storedelta,
804 flags=storeflags,
804 flags=storeflags,
805 )
805 )
806
806
807 if addrevisioncb:
807 if addrevisioncb:
808 addrevisioncb(self, rev)
808 addrevisioncb(self, rev)
809 empty = False
809 empty = False
810
810
811 return not empty
811 return not empty
812
812
813 def censorrevision(self, tr, censornode, tombstone=b''):
813 def censorrevision(self, tr, censornode, tombstone=b''):
814 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
814 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
815
815
816 # This restriction is cargo culted from revlogs and makes no sense for
816 # This restriction is cargo culted from revlogs and makes no sense for
817 # SQLite, since columns can be resized at will.
817 # SQLite, since columns can be resized at will.
818 if len(tombstone) > len(self.rawdata(censornode)):
818 if len(tombstone) > len(self.rawdata(censornode)):
819 raise error.Abort(
819 raise error.Abort(
820 _(b'censor tombstone must be no longer than censored data')
820 _(b'censor tombstone must be no longer than censored data')
821 )
821 )
822
822
823 # We need to replace the censored revision's data with the tombstone.
823 # We need to replace the censored revision's data with the tombstone.
824 # But replacing that data will have implications for delta chains that
824 # But replacing that data will have implications for delta chains that
825 # reference it.
825 # reference it.
826 #
826 #
827 # While "better," more complex strategies are possible, we do something
827 # While "better," more complex strategies are possible, we do something
828 # simple: we find delta chain children of the censored revision and we
828 # simple: we find delta chain children of the censored revision and we
829 # replace those incremental deltas with fulltexts of their corresponding
829 # replace those incremental deltas with fulltexts of their corresponding
830 # revision. Then we delete the now-unreferenced delta and original
830 # revision. Then we delete the now-unreferenced delta and original
831 # revision and insert a replacement.
831 # revision and insert a replacement.
832
832
833 # Find the delta to be censored.
833 # Find the delta to be censored.
834 censoreddeltaid = self._db.execute(
834 censoreddeltaid = self._db.execute(
835 'SELECT deltaid FROM fileindex WHERE id=?',
835 'SELECT deltaid FROM fileindex WHERE id=?',
836 (self._revisions[censornode].rid,),
836 (self._revisions[censornode].rid,),
837 ).fetchone()[0]
837 ).fetchone()[0]
838
838
839 # Find all its delta chain children.
839 # Find all its delta chain children.
840 # TODO once we support storing deltas for !files, we'll need to look
840 # TODO once we support storing deltas for !files, we'll need to look
841 # for those delta chains too.
841 # for those delta chains too.
842 rows = list(
842 rows = list(
843 self._db.execute(
843 self._db.execute(
844 'SELECT id, pathid, node FROM fileindex '
844 'SELECT id, pathid, node FROM fileindex '
845 'WHERE deltabaseid=? OR deltaid=?',
845 'WHERE deltabaseid=? OR deltaid=?',
846 (censoreddeltaid, censoreddeltaid),
846 (censoreddeltaid, censoreddeltaid),
847 )
847 )
848 )
848 )
849
849
850 for row in rows:
850 for row in rows:
851 rid, pathid, node = row
851 rid, pathid, node = row
852
852
853 fulltext = resolvedeltachain(
853 fulltext = resolvedeltachain(
854 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
854 self._db, pathid, node, {}, {-1: None}, zstddctx=self._dctx
855 )
855 )
856
856
857 deltahash = hashutil.sha1(fulltext).digest()
857 deltahash = hashutil.sha1(fulltext).digest()
858
858
859 if self._compengine == b'zstd':
859 if self._compengine == b'zstd':
860 deltablob = self._cctx.compress(fulltext)
860 deltablob = self._cctx.compress(fulltext)
861 compression = COMPRESSION_ZSTD
861 compression = COMPRESSION_ZSTD
862 elif self._compengine == b'zlib':
862 elif self._compengine == b'zlib':
863 deltablob = zlib.compress(fulltext)
863 deltablob = zlib.compress(fulltext)
864 compression = COMPRESSION_ZLIB
864 compression = COMPRESSION_ZLIB
865 elif self._compengine == b'none':
865 elif self._compengine == b'none':
866 deltablob = fulltext
866 deltablob = fulltext
867 compression = COMPRESSION_NONE
867 compression = COMPRESSION_NONE
868 else:
868 else:
869 raise error.ProgrammingError(
869 raise error.ProgrammingError(
870 b'unhandled compression engine: %s' % self._compengine
870 b'unhandled compression engine: %s' % self._compengine
871 )
871 )
872
872
873 if len(deltablob) >= len(fulltext):
873 if len(deltablob) >= len(fulltext):
874 deltablob = fulltext
874 deltablob = fulltext
875 compression = COMPRESSION_NONE
875 compression = COMPRESSION_NONE
876
876
877 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
877 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
878
878
879 self._db.execute(
879 self._db.execute(
880 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
880 'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
881 'WHERE id=?',
881 'WHERE id=?',
882 (deltaid, rid),
882 (deltaid, rid),
883 )
883 )
884
884
885 # Now create the tombstone delta and replace the delta on the censored
885 # Now create the tombstone delta and replace the delta on the censored
886 # node.
886 # node.
887 deltahash = hashutil.sha1(tombstone).digest()
887 deltahash = hashutil.sha1(tombstone).digest()
888 tombstonedeltaid = insertdelta(
888 tombstonedeltaid = insertdelta(
889 self._db, COMPRESSION_NONE, deltahash, tombstone
889 self._db, COMPRESSION_NONE, deltahash, tombstone
890 )
890 )
891
891
892 flags = self._revisions[censornode].flags
892 flags = self._revisions[censornode].flags
893 flags |= FLAG_CENSORED
893 flags |= FLAG_CENSORED
894
894
895 self._db.execute(
895 self._db.execute(
896 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
896 'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
897 'WHERE pathid=? AND node=?',
897 'WHERE pathid=? AND node=?',
898 (flags, tombstonedeltaid, self._pathid, censornode),
898 (flags, tombstonedeltaid, self._pathid, censornode),
899 )
899 )
900
900
901 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
901 self._db.execute('DELETE FROM delta WHERE id=?', (censoreddeltaid,))
902
902
903 self._refreshindex()
903 self._refreshindex()
904 self._revisioncache.clear()
904 self._revisioncache.clear()
905
905
906 def getstrippoint(self, minlink):
906 def getstrippoint(self, minlink):
907 return storageutil.resolvestripinfo(
907 return storageutil.resolvestripinfo(
908 minlink,
908 minlink,
909 len(self) - 1,
909 len(self) - 1,
910 [self.rev(n) for n in self.heads()],
910 [self.rev(n) for n in self.heads()],
911 self.linkrev,
911 self.linkrev,
912 self.parentrevs,
912 self.parentrevs,
913 )
913 )
914
914
915 def strip(self, minlink, transaction):
915 def strip(self, minlink, transaction):
916 if not len(self):
916 if not len(self):
917 return
917 return
918
918
919 rev, _ignored = self.getstrippoint(minlink)
919 rev, _ignored = self.getstrippoint(minlink)
920
920
921 if rev == len(self):
921 if rev == len(self):
922 return
922 return
923
923
924 for rev in self.revs(rev):
924 for rev in self.revs(rev):
925 self._db.execute(
925 self._db.execute(
926 'DELETE FROM fileindex WHERE pathid=? AND node=?',
926 'DELETE FROM fileindex WHERE pathid=? AND node=?',
927 (self._pathid, self.node(rev)),
927 (self._pathid, self.node(rev)),
928 )
928 )
929
929
930 # TODO how should we garbage collect data in delta table?
930 # TODO how should we garbage collect data in delta table?
931
931
932 self._refreshindex()
932 self._refreshindex()
933
933
934 # End of ifilemutation interface.
934 # End of ifilemutation interface.
935
935
936 # Start of ifilestorage interface.
936 # Start of ifilestorage interface.
937
937
938 def files(self):
938 def files(self):
939 return []
939 return []
940
940
941 def sidedata(self, nodeorrev, _df=None):
941 def sidedata(self, nodeorrev, _df=None):
942 # Not supported for now
942 # Not supported for now
943 return {}
943 return {}
944
944
945 def storageinfo(
945 def storageinfo(
946 self,
946 self,
947 exclusivefiles=False,
947 exclusivefiles=False,
948 sharedfiles=False,
948 sharedfiles=False,
949 revisionscount=False,
949 revisionscount=False,
950 trackedsize=False,
950 trackedsize=False,
951 storedsize=False,
951 storedsize=False,
952 ):
952 ):
953 d = {}
953 d = {}
954
954
955 if exclusivefiles:
955 if exclusivefiles:
956 d[b'exclusivefiles'] = []
956 d[b'exclusivefiles'] = []
957
957
958 if sharedfiles:
958 if sharedfiles:
959 # TODO list sqlite file(s) here.
959 # TODO list sqlite file(s) here.
960 d[b'sharedfiles'] = []
960 d[b'sharedfiles'] = []
961
961
962 if revisionscount:
962 if revisionscount:
963 d[b'revisionscount'] = len(self)
963 d[b'revisionscount'] = len(self)
964
964
965 if trackedsize:
965 if trackedsize:
966 d[b'trackedsize'] = sum(
966 d[b'trackedsize'] = sum(
967 len(self.revision(node)) for node in self._nodetorev
967 len(self.revision(node)) for node in self._nodetorev
968 )
968 )
969
969
970 if storedsize:
970 if storedsize:
971 # TODO implement this?
971 # TODO implement this?
972 d[b'storedsize'] = None
972 d[b'storedsize'] = None
973
973
974 return d
974 return d
975
975
976 def verifyintegrity(self, state):
976 def verifyintegrity(self, state):
977 state[b'skipread'] = set()
977 state[b'skipread'] = set()
978
978
979 for rev in self:
979 for rev in self:
980 node = self.node(rev)
980 node = self.node(rev)
981
981
982 try:
982 try:
983 self.revision(node)
983 self.revision(node)
984 except Exception as e:
984 except Exception as e:
985 yield sqliteproblem(
985 yield sqliteproblem(
986 error=_(b'unpacking %s: %s') % (short(node), e), node=node
986 error=_(b'unpacking %s: %s') % (short(node), e), node=node
987 )
987 )
988
988
989 state[b'skipread'].add(node)
989 state[b'skipread'].add(node)
990
990
991 # End of ifilestorage interface.
991 # End of ifilestorage interface.
992
992
993 def _checkhash(self, fulltext, node, p1=None, p2=None):
993 def _checkhash(self, fulltext, node, p1=None, p2=None):
994 if p1 is None and p2 is None:
994 if p1 is None and p2 is None:
995 p1, p2 = self.parents(node)
995 p1, p2 = self.parents(node)
996
996
997 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
997 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
998 return
998 return
999
999
1000 try:
1000 try:
1001 del self._revisioncache[node]
1001 del self._revisioncache[node]
1002 except KeyError:
1002 except KeyError:
1003 pass
1003 pass
1004
1004
1005 if storageutil.iscensoredtext(fulltext):
1005 if storageutil.iscensoredtext(fulltext):
1006 raise error.CensoredNodeError(self._path, node, fulltext)
1006 raise error.CensoredNodeError(self._path, node, fulltext)
1007
1007
1008 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
1008 raise SQLiteStoreError(_(b'integrity check failed on %s') % self._path)
1009
1009
1010 def _addrawrevision(
1010 def _addrawrevision(
1011 self,
1011 self,
1012 node,
1012 node,
1013 revisiondata,
1013 revisiondata,
1014 transaction,
1014 transaction,
1015 linkrev,
1015 linkrev,
1016 p1,
1016 p1,
1017 p2,
1017 p2,
1018 storedelta=None,
1018 storedelta=None,
1019 flags=0,
1019 flags=0,
1020 ):
1020 ):
1021 if self._pathid is None:
1021 if self._pathid is None:
1022 res = self._db.execute(
1022 res = self._db.execute(
1023 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
1023 'INSERT INTO filepath (path) VALUES (?)', (self._path,)
1024 )
1024 )
1025 self._pathid = res.lastrowid
1025 self._pathid = res.lastrowid
1026
1026
1027 # For simplicity, always store a delta against p1.
1027 # For simplicity, always store a delta against p1.
1028 # TODO we need a lot more logic here to make behavior reasonable.
1028 # TODO we need a lot more logic here to make behavior reasonable.
1029
1029
1030 if storedelta:
1030 if storedelta:
1031 deltabase, delta = storedelta
1031 deltabase, delta = storedelta
1032
1032
1033 if isinstance(deltabase, int):
1033 if isinstance(deltabase, int):
1034 deltabase = self.node(deltabase)
1034 deltabase = self.node(deltabase)
1035
1035
1036 else:
1036 else:
1037 assert revisiondata is not None
1037 assert revisiondata is not None
1038 deltabase = p1
1038 deltabase = p1
1039
1039
1040 if deltabase == sha1nodeconstants.nullid:
1040 if deltabase == sha1nodeconstants.nullid:
1041 delta = revisiondata
1041 delta = revisiondata
1042 else:
1042 else:
1043 delta = mdiff.textdiff(
1043 delta = mdiff.textdiff(
1044 self.revision(self.rev(deltabase)), revisiondata
1044 self.revision(self.rev(deltabase)), revisiondata
1045 )
1045 )
1046
1046
1047 # File index stores a pointer to its delta and the parent delta.
1047 # File index stores a pointer to its delta and the parent delta.
1048 # The parent delta is stored via a pointer to the fileindex PK.
1048 # The parent delta is stored via a pointer to the fileindex PK.
1049 if deltabase == sha1nodeconstants.nullid:
1049 if deltabase == sha1nodeconstants.nullid:
1050 baseid = None
1050 baseid = None
1051 else:
1051 else:
1052 baseid = self._revisions[deltabase].rid
1052 baseid = self._revisions[deltabase].rid
1053
1053
1054 # Deltas are stored with a hash of their content. This allows
1054 # Deltas are stored with a hash of their content. This allows
1055 # us to de-duplicate. The table is configured to ignore conflicts
1055 # us to de-duplicate. The table is configured to ignore conflicts
1056 # and it is faster to just insert and silently noop than to look
1056 # and it is faster to just insert and silently noop than to look
1057 # first.
1057 # first.
1058 deltahash = hashutil.sha1(delta).digest()
1058 deltahash = hashutil.sha1(delta).digest()
1059
1059
1060 if self._compengine == b'zstd':
1060 if self._compengine == b'zstd':
1061 deltablob = self._cctx.compress(delta)
1061 deltablob = self._cctx.compress(delta)
1062 compression = COMPRESSION_ZSTD
1062 compression = COMPRESSION_ZSTD
1063 elif self._compengine == b'zlib':
1063 elif self._compengine == b'zlib':
1064 deltablob = zlib.compress(delta)
1064 deltablob = zlib.compress(delta)
1065 compression = COMPRESSION_ZLIB
1065 compression = COMPRESSION_ZLIB
1066 elif self._compengine == b'none':
1066 elif self._compengine == b'none':
1067 deltablob = delta
1067 deltablob = delta
1068 compression = COMPRESSION_NONE
1068 compression = COMPRESSION_NONE
1069 else:
1069 else:
1070 raise error.ProgrammingError(
1070 raise error.ProgrammingError(
1071 b'unhandled compression engine: %s' % self._compengine
1071 b'unhandled compression engine: %s' % self._compengine
1072 )
1072 )
1073
1073
1074 # Don't store compressed data if it isn't practical.
1074 # Don't store compressed data if it isn't practical.
1075 if len(deltablob) >= len(delta):
1075 if len(deltablob) >= len(delta):
1076 deltablob = delta
1076 deltablob = delta
1077 compression = COMPRESSION_NONE
1077 compression = COMPRESSION_NONE
1078
1078
1079 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1079 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
1080
1080
1081 rev = len(self)
1081 rev = len(self)
1082
1082
1083 if p1 == sha1nodeconstants.nullid:
1083 if p1 == sha1nodeconstants.nullid:
1084 p1rev = nullrev
1084 p1rev = nullrev
1085 else:
1085 else:
1086 p1rev = self._nodetorev[p1]
1086 p1rev = self._nodetorev[p1]
1087
1087
1088 if p2 == sha1nodeconstants.nullid:
1088 if p2 == sha1nodeconstants.nullid:
1089 p2rev = nullrev
1089 p2rev = nullrev
1090 else:
1090 else:
1091 p2rev = self._nodetorev[p2]
1091 p2rev = self._nodetorev[p2]
1092
1092
1093 rid = self._db.execute(
1093 rid = self._db.execute(
1094 'INSERT INTO fileindex ('
1094 'INSERT INTO fileindex ('
1095 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1095 ' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
1096 ' deltaid, deltabaseid) '
1096 ' deltaid, deltabaseid) '
1097 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1097 ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
1098 (
1098 (
1099 self._pathid,
1099 self._pathid,
1100 rev,
1100 rev,
1101 node,
1101 node,
1102 p1rev,
1102 p1rev,
1103 p2rev,
1103 p2rev,
1104 linkrev,
1104 linkrev,
1105 flags,
1105 flags,
1106 deltaid,
1106 deltaid,
1107 baseid,
1107 baseid,
1108 ),
1108 ),
1109 ).lastrowid
1109 ).lastrowid
1110
1110
1111 entry = revisionentry(
1111 entry = revisionentry(
1112 rid=rid,
1112 rid=rid,
1113 rev=rev,
1113 rev=rev,
1114 node=node,
1114 node=node,
1115 p1rev=p1rev,
1115 p1rev=p1rev,
1116 p2rev=p2rev,
1116 p2rev=p2rev,
1117 p1node=p1,
1117 p1node=p1,
1118 p2node=p2,
1118 p2node=p2,
1119 linkrev=linkrev,
1119 linkrev=linkrev,
1120 flags=flags,
1120 flags=flags,
1121 )
1121 )
1122
1122
1123 self._nodetorev[node] = rev
1123 self._nodetorev[node] = rev
1124 self._revtonode[rev] = node
1124 self._revtonode[rev] = node
1125 self._revisions[node] = entry
1125 self._revisions[node] = entry
1126
1126
1127 return rev
1127 return rev
1128
1128
1129
1129
1130 class sqliterepository(localrepo.localrepository):
1130 class sqliterepository(localrepo.localrepository):
1131 def cancopy(self):
1131 def cancopy(self):
1132 return False
1132 return False
1133
1133
1134 def transaction(self, *args, **kwargs):
1134 def transaction(self, *args, **kwargs):
1135 current = self.currenttransaction()
1135 current = self.currenttransaction()
1136
1136
1137 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1137 tr = super(sqliterepository, self).transaction(*args, **kwargs)
1138
1138
1139 if current:
1139 if current:
1140 return tr
1140 return tr
1141
1141
1142 self._dbconn.execute('BEGIN TRANSACTION')
1142 self._dbconn.execute('BEGIN TRANSACTION')
1143
1143
1144 def committransaction(_):
1144 def committransaction(_):
1145 self._dbconn.commit()
1145 self._dbconn.commit()
1146
1146
1147 tr.addfinalize(b'sqlitestore', committransaction)
1147 tr.addfinalize(b'sqlitestore', committransaction)
1148
1148
1149 return tr
1149 return tr
1150
1150
1151 @property
1151 @property
1152 def _dbconn(self):
1152 def _dbconn(self):
1153 # SQLite connections can only be used on the thread that created
1153 # SQLite connections can only be used on the thread that created
1154 # them. In most cases, this "just works." However, hgweb uses
1154 # them. In most cases, this "just works." However, hgweb uses
1155 # multiple threads.
1155 # multiple threads.
1156 tid = threading.current_thread().ident
1156 tid = threading.current_thread().ident
1157
1157
1158 if self._db:
1158 if self._db:
1159 if self._db[0] == tid:
1159 if self._db[0] == tid:
1160 return self._db[1]
1160 return self._db[1]
1161
1161
1162 db = makedb(self.svfs.join(b'db.sqlite'))
1162 db = makedb(self.svfs.join(b'db.sqlite'))
1163 self._db = (tid, db)
1163 self._db = (tid, db)
1164
1164
1165 return db
1165 return db
1166
1166
1167
1167
1168 def makedb(path):
1168 def makedb(path):
1169 """Construct a database handle for a database at path."""
1169 """Construct a database handle for a database at path."""
1170
1170
1171 db = sqlite3.connect(encoding.strfromlocal(path))
1171 db = sqlite3.connect(encoding.strfromlocal(path))
1172 db.text_factory = bytes
1172 db.text_factory = bytes
1173
1173
1174 res = db.execute('PRAGMA user_version').fetchone()[0]
1174 res = db.execute('PRAGMA user_version').fetchone()[0]
1175
1175
1176 # New database.
1176 # New database.
1177 if res == 0:
1177 if res == 0:
1178 for statement in CREATE_SCHEMA:
1178 for statement in CREATE_SCHEMA:
1179 db.execute(statement)
1179 db.execute(statement)
1180
1180
1181 db.commit()
1181 db.commit()
1182
1182
1183 elif res == CURRENT_SCHEMA_VERSION:
1183 elif res == CURRENT_SCHEMA_VERSION:
1184 pass
1184 pass
1185
1185
1186 else:
1186 else:
1187 raise error.Abort(_(b'sqlite database has unrecognized version'))
1187 raise error.Abort(_(b'sqlite database has unrecognized version'))
1188
1188
1189 db.execute('PRAGMA journal_mode=WAL')
1189 db.execute('PRAGMA journal_mode=WAL')
1190
1190
1191 return db
1191 return db
1192
1192
1193
1193
1194 def featuresetup(ui, supported):
1194 def featuresetup(ui, supported):
1195 supported.add(REQUIREMENT)
1195 supported.add(REQUIREMENT)
1196
1196
1197 if zstd:
1197 if zstd:
1198 supported.add(REQUIREMENT_ZSTD)
1198 supported.add(REQUIREMENT_ZSTD)
1199
1199
1200 supported.add(REQUIREMENT_ZLIB)
1200 supported.add(REQUIREMENT_ZLIB)
1201 supported.add(REQUIREMENT_NONE)
1201 supported.add(REQUIREMENT_NONE)
1202 supported.add(REQUIREMENT_SHALLOW_FILES)
1202 supported.add(REQUIREMENT_SHALLOW_FILES)
1203 supported.add(requirements.NARROW_REQUIREMENT)
1203 supported.add(requirements.NARROW_REQUIREMENT)
1204
1204
1205
1205
1206 def newreporequirements(orig, ui, createopts):
1206 def newreporequirements(orig, ui, createopts):
1207 if createopts[b'backend'] != b'sqlite':
1207 if createopts[b'backend'] != b'sqlite':
1208 return orig(ui, createopts)
1208 return orig(ui, createopts)
1209
1209
1210 # This restriction can be lifted once we have more confidence.
1210 # This restriction can be lifted once we have more confidence.
1211 if b'sharedrepo' in createopts:
1211 if b'sharedrepo' in createopts:
1212 raise error.Abort(
1212 raise error.Abort(
1213 _(b'shared repositories not supported with SQLite store')
1213 _(b'shared repositories not supported with SQLite store')
1214 )
1214 )
1215
1215
1216 # This filtering is out of an abundance of caution: we want to ensure
1216 # This filtering is out of an abundance of caution: we want to ensure
1217 # we honor creation options and we do that by annotating exactly the
1217 # we honor creation options and we do that by annotating exactly the
1218 # creation options we recognize.
1218 # creation options we recognize.
1219 known = {
1219 known = {
1220 b'narrowfiles',
1220 b'narrowfiles',
1221 b'backend',
1221 b'backend',
1222 b'shallowfilestore',
1222 b'shallowfilestore',
1223 }
1223 }
1224
1224
1225 unsupported = set(createopts) - known
1225 unsupported = set(createopts) - known
1226 if unsupported:
1226 if unsupported:
1227 raise error.Abort(
1227 raise error.Abort(
1228 _(b'SQLite store does not support repo creation option: %s')
1228 _(b'SQLite store does not support repo creation option: %s')
1229 % b', '.join(sorted(unsupported))
1229 % b', '.join(sorted(unsupported))
1230 )
1230 )
1231
1231
1232 # Since we're a hybrid store that still relies on revlogs, we fall back
1232 # Since we're a hybrid store that still relies on revlogs, we fall back
1233 # to using the revlogv1 backend's storage requirements then adding our
1233 # to using the revlogv1 backend's storage requirements then adding our
1234 # own requirement.
1234 # own requirement.
1235 createopts[b'backend'] = b'revlogv1'
1235 createopts[b'backend'] = b'revlogv1'
1236 requirements = orig(ui, createopts)
1236 requirements = orig(ui, createopts)
1237 requirements.add(REQUIREMENT)
1237 requirements.add(REQUIREMENT)
1238
1238
1239 compression = ui.config(b'storage', b'sqlite.compression')
1239 compression = ui.config(b'storage', b'sqlite.compression')
1240
1240
1241 if compression == b'zstd' and not zstd:
1241 if compression == b'zstd' and not zstd:
1242 raise error.Abort(
1242 raise error.Abort(
1243 _(
1243 _(
1244 b'storage.sqlite.compression set to "zstd" but '
1244 b'storage.sqlite.compression set to "zstd" but '
1245 b'zstandard compression not available to this '
1245 b'zstandard compression not available to this '
1246 b'Mercurial install'
1246 b'Mercurial install'
1247 )
1247 )
1248 )
1248 )
1249
1249
1250 if compression == b'zstd':
1250 if compression == b'zstd':
1251 requirements.add(REQUIREMENT_ZSTD)
1251 requirements.add(REQUIREMENT_ZSTD)
1252 elif compression == b'zlib':
1252 elif compression == b'zlib':
1253 requirements.add(REQUIREMENT_ZLIB)
1253 requirements.add(REQUIREMENT_ZLIB)
1254 elif compression == b'none':
1254 elif compression == b'none':
1255 requirements.add(REQUIREMENT_NONE)
1255 requirements.add(REQUIREMENT_NONE)
1256 else:
1256 else:
1257 raise error.Abort(
1257 raise error.Abort(
1258 _(
1258 _(
1259 b'unknown compression engine defined in '
1259 b'unknown compression engine defined in '
1260 b'storage.sqlite.compression: %s'
1260 b'storage.sqlite.compression: %s'
1261 )
1261 )
1262 % compression
1262 % compression
1263 )
1263 )
1264
1264
1265 if createopts.get(b'shallowfilestore'):
1265 if createopts.get(b'shallowfilestore'):
1266 requirements.add(REQUIREMENT_SHALLOW_FILES)
1266 requirements.add(REQUIREMENT_SHALLOW_FILES)
1267
1267
1268 return requirements
1268 return requirements
1269
1269
1270
1270
1271 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1271 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1272 class sqlitefilestorage:
1272 class sqlitefilestorage:
1273 """Repository file storage backed by SQLite."""
1273 """Repository file storage backed by SQLite."""
1274
1274
1275 def file(self, path):
1275 def file(self, path):
1276 if path[0] == b'/':
1276 if path[0] == b'/':
1277 path = path[1:]
1277 path = path[1:]
1278
1278
1279 if REQUIREMENT_ZSTD in self.requirements:
1279 if REQUIREMENT_ZSTD in self.requirements:
1280 compression = b'zstd'
1280 compression = b'zstd'
1281 elif REQUIREMENT_ZLIB in self.requirements:
1281 elif REQUIREMENT_ZLIB in self.requirements:
1282 compression = b'zlib'
1282 compression = b'zlib'
1283 elif REQUIREMENT_NONE in self.requirements:
1283 elif REQUIREMENT_NONE in self.requirements:
1284 compression = b'none'
1284 compression = b'none'
1285 else:
1285 else:
1286 raise error.Abort(
1286 raise error.Abort(
1287 _(
1287 _(
1288 b'unable to determine what compression engine '
1288 b'unable to determine what compression engine '
1289 b'to use for SQLite storage'
1289 b'to use for SQLite storage'
1290 )
1290 )
1291 )
1291 )
1292
1292
1293 return sqlitefilestore(self._dbconn, path, compression)
1293 return sqlitefilestore(self._dbconn, path, compression)
1294
1294
1295
1295
1296 def makefilestorage(orig, requirements, features, **kwargs):
1296 def makefilestorage(orig, requirements, features, **kwargs):
1297 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1297 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1298 if REQUIREMENT in requirements:
1298 if REQUIREMENT in requirements:
1299 if REQUIREMENT_SHALLOW_FILES in requirements:
1299 if REQUIREMENT_SHALLOW_FILES in requirements:
1300 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1300 features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE)
1301
1301
1302 return sqlitefilestorage
1302 return sqlitefilestorage
1303 else:
1303 else:
1304 return orig(requirements=requirements, features=features, **kwargs)
1304 return orig(requirements=requirements, features=features, **kwargs)
1305
1305
1306
1306
1307 def makemain(orig, ui, requirements, **kwargs):
1307 def makemain(orig, ui, requirements, **kwargs):
1308 if REQUIREMENT in requirements:
1308 if REQUIREMENT in requirements:
1309 if REQUIREMENT_ZSTD in requirements and not zstd:
1309 if REQUIREMENT_ZSTD in requirements and not zstd:
1310 raise error.Abort(
1310 raise error.Abort(
1311 _(
1311 _(
1312 b'repository uses zstandard compression, which '
1312 b'repository uses zstandard compression, which '
1313 b'is not available to this Mercurial install'
1313 b'is not available to this Mercurial install'
1314 )
1314 )
1315 )
1315 )
1316
1316
1317 return sqliterepository
1317 return sqliterepository
1318
1318
1319 return orig(requirements=requirements, **kwargs)
1319 return orig(requirements=requirements, **kwargs)
1320
1320
1321
1321
1322 def verifierinit(orig, self, *args, **kwargs):
1322 def verifierinit(orig, self, *args, **kwargs):
1323 orig(self, *args, **kwargs)
1323 orig(self, *args, **kwargs)
1324
1324
1325 # We don't care that files in the store don't align with what is
1325 # We don't care that files in the store don't align with what is
1326 # advertised. So suppress these warnings.
1326 # advertised. So suppress these warnings.
1327 self.warnorphanstorefiles = False
1327 self.warnorphanstorefiles = False
1328
1328
1329
1329
1330 def extsetup(ui):
1330 def extsetup(ui):
1331 localrepo.featuresetupfuncs.add(featuresetup)
1331 localrepo.featuresetupfuncs.add(featuresetup)
1332 extensions.wrapfunction(
1332 extensions.wrapfunction(
1333 localrepo, b'newreporequirements', newreporequirements
1333 localrepo, b'newreporequirements', newreporequirements
1334 )
1334 )
1335 extensions.wrapfunction(localrepo, b'makefilestorage', makefilestorage)
1335 extensions.wrapfunction(localrepo, 'makefilestorage', makefilestorage)
1336 extensions.wrapfunction(localrepo, b'makemain', makemain)
1336 extensions.wrapfunction(localrepo, 'makemain', makemain)
1337 extensions.wrapfunction(verify.verifier, b'__init__', verifierinit)
1337 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
1338
1338
1339
1339
1340 def reposetup(ui, repo):
1340 def reposetup(ui, repo):
1341 if isinstance(repo, sqliterepository):
1341 if isinstance(repo, sqliterepository):
1342 repo._db = None
1342 repo._db = None
1343
1343
1344 # TODO check for bundlerepository?
1344 # TODO check for bundlerepository?
General Comments 0
You need to be logged in to leave comments. Login now