Show More
@@ -1,1134 +1,1169 | |||||
1 | # sqlitestore.py - Storage backend that uses SQLite |
|
1 | # sqlitestore.py - Storage backend that uses SQLite | |
2 | # |
|
2 | # | |
3 | # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> |
|
3 | # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | """store repository data in SQLite (EXPERIMENTAL) |
|
8 | """store repository data in SQLite (EXPERIMENTAL) | |
9 |
|
9 | |||
10 | The sqlitestore extension enables the storage of repository data in SQLite. |
|
10 | The sqlitestore extension enables the storage of repository data in SQLite. | |
11 |
|
11 | |||
12 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY |
|
12 | This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY | |
13 | GUARANTEES. This means that repositories created with this extension may |
|
13 | GUARANTEES. This means that repositories created with this extension may | |
14 | only be usable with the exact version of this extension/Mercurial that was |
|
14 | only be usable with the exact version of this extension/Mercurial that was | |
15 | used. The extension attempts to enforce this in order to prevent repository |
|
15 | used. The extension attempts to enforce this in order to prevent repository | |
16 | corruption. |
|
16 | corruption. | |
17 |
|
17 | |||
18 | In addition, several features are not yet supported or have known bugs: |
|
18 | In addition, several features are not yet supported or have known bugs: | |
19 |
|
19 | |||
20 | * Only some data is stored in SQLite. Changeset, manifest, and other repository |
|
20 | * Only some data is stored in SQLite. Changeset, manifest, and other repository | |
21 | data is not yet stored in SQLite. |
|
21 | data is not yet stored in SQLite. | |
22 | * Transactions are not robust. If the process is aborted at the right time |
|
22 | * Transactions are not robust. If the process is aborted at the right time | |
23 | during transaction close/rollback, the repository could be in an inconsistent |
|
23 | during transaction close/rollback, the repository could be in an inconsistent | |
24 | state. This problem will diminish once all repository data is tracked by |
|
24 | state. This problem will diminish once all repository data is tracked by | |
25 | SQLite. |
|
25 | SQLite. | |
26 | * Bundle repositories do not work (the ability to use e.g. |
|
26 | * Bundle repositories do not work (the ability to use e.g. | |
27 | `hg -R <bundle-file> log` to automatically overlay a bundle on top of the |
|
27 | `hg -R <bundle-file> log` to automatically overlay a bundle on top of the | |
28 | existing repository). |
|
28 | existing repository). | |
29 | * Various other features don't work. |
|
29 | * Various other features don't work. | |
30 |
|
30 | |||
31 | This extension should work for basic clone/pull, update, and commit workflows. |
|
31 | This extension should work for basic clone/pull, update, and commit workflows. | |
32 | Some history rewriting operations may fail due to lack of support for bundle |
|
32 | Some history rewriting operations may fail due to lack of support for bundle | |
33 | repositories. |
|
33 | repositories. | |
34 |
|
34 | |||
35 | To use, activate the extension and set the ``storage.new-repo-backend`` config |
|
35 | To use, activate the extension and set the ``storage.new-repo-backend`` config | |
36 | option to ``sqlite`` to enable new repositories to use SQLite for storage. |
|
36 | option to ``sqlite`` to enable new repositories to use SQLite for storage. | |
37 | """ |
|
37 | """ | |
38 |
|
38 | |||
39 | # To run the test suite with repos using SQLite by default, execute the |
|
39 | # To run the test suite with repos using SQLite by default, execute the | |
40 | # following: |
|
40 | # following: | |
41 | # |
|
41 | # | |
42 | # HGREPOFEATURES="sqlitestore" run-tests.py \ |
|
42 | # HGREPOFEATURES="sqlitestore" run-tests.py \ | |
43 | # --extra-config-opt extensions.sqlitestore= \ |
|
43 | # --extra-config-opt extensions.sqlitestore= \ | |
44 | # --extra-config-opt storage.new-repo-backend=sqlite |
|
44 | # --extra-config-opt storage.new-repo-backend=sqlite | |
45 |
|
45 | |||
46 | from __future__ import absolute_import |
|
46 | from __future__ import absolute_import | |
47 |
|
47 | |||
48 | import hashlib |
|
48 | import hashlib | |
49 | import sqlite3 |
|
49 | import sqlite3 | |
50 | import struct |
|
50 | import struct | |
51 | import threading |
|
51 | import threading | |
52 | import zlib |
|
52 | import zlib | |
53 |
|
53 | |||
54 | from mercurial.i18n import _ |
|
54 | from mercurial.i18n import _ | |
55 | from mercurial.node import ( |
|
55 | from mercurial.node import ( | |
56 | nullid, |
|
56 | nullid, | |
57 | nullrev, |
|
57 | nullrev, | |
58 | short, |
|
58 | short, | |
59 | ) |
|
59 | ) | |
60 | from mercurial.thirdparty import ( |
|
60 | from mercurial.thirdparty import ( | |
61 | attr, |
|
61 | attr, | |
62 | ) |
|
62 | ) | |
63 | from mercurial import ( |
|
63 | from mercurial import ( | |
64 | ancestor, |
|
64 | ancestor, | |
65 | dagop, |
|
65 | dagop, | |
66 | error, |
|
66 | error, | |
67 | extensions, |
|
67 | extensions, | |
68 | localrepo, |
|
68 | localrepo, | |
69 | mdiff, |
|
69 | mdiff, | |
70 | pycompat, |
|
70 | pycompat, | |
71 | registrar, |
|
71 | registrar, | |
72 | repository, |
|
72 | repository, | |
73 | util, |
|
73 | util, | |
74 | verify, |
|
74 | verify, | |
75 | ) |
|
75 | ) | |
76 | from mercurial.utils import ( |
|
76 | from mercurial.utils import ( | |
77 | interfaceutil, |
|
77 | interfaceutil, | |
78 | storageutil, |
|
78 | storageutil, | |
79 | ) |
|
79 | ) | |
80 |
|
80 | |||
81 | try: |
|
81 | try: | |
82 | from mercurial import zstd |
|
82 | from mercurial import zstd | |
83 | zstd.__version__ |
|
83 | zstd.__version__ | |
84 | except ImportError: |
|
84 | except ImportError: | |
85 | zstd = None |
|
85 | zstd = None | |
86 |
|
86 | |||
87 | configtable = {} |
|
87 | configtable = {} | |
88 | configitem = registrar.configitem(configtable) |
|
88 | configitem = registrar.configitem(configtable) | |
89 |
|
89 | |||
90 | # experimental config: storage.sqlite.compression |
|
90 | # experimental config: storage.sqlite.compression | |
91 | configitem('storage', 'sqlite.compression', |
|
91 | configitem('storage', 'sqlite.compression', | |
92 | default='zstd' if zstd else 'zlib') |
|
92 | default='zstd' if zstd else 'zlib') | |
93 |
|
93 | |||
94 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
94 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for | |
95 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
95 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should | |
96 | # be specifying the version(s) of Mercurial they are tested with, or |
|
96 | # be specifying the version(s) of Mercurial they are tested with, or | |
97 | # leave the attribute unspecified. |
|
97 | # leave the attribute unspecified. | |
98 | testedwith = 'ships-with-hg-core' |
|
98 | testedwith = 'ships-with-hg-core' | |
99 |
|
99 | |||
100 | REQUIREMENT = b'exp-sqlite-001' |
|
100 | REQUIREMENT = b'exp-sqlite-001' | |
101 | REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd' |
|
101 | REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd' | |
102 | REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib' |
|
102 | REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib' | |
103 | REQUIREMENT_NONE = b'exp-sqlite-comp-001=none' |
|
103 | REQUIREMENT_NONE = b'exp-sqlite-comp-001=none' | |
104 | REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files' |
|
104 | REQUIREMENT_SHALLOW_FILES = b'exp-sqlite-shallow-files' | |
105 |
|
105 | |||
106 | CURRENT_SCHEMA_VERSION = 1 |
|
106 | CURRENT_SCHEMA_VERSION = 1 | |
107 |
|
107 | |||
108 | COMPRESSION_NONE = 1 |
|
108 | COMPRESSION_NONE = 1 | |
109 | COMPRESSION_ZSTD = 2 |
|
109 | COMPRESSION_ZSTD = 2 | |
110 | COMPRESSION_ZLIB = 3 |
|
110 | COMPRESSION_ZLIB = 3 | |
111 |
|
111 | |||
112 | FLAG_CENSORED = 1 |
|
112 | FLAG_CENSORED = 1 | |
|
113 | FLAG_MISSING_P1 = 2 | |||
|
114 | FLAG_MISSING_P2 = 4 | |||
113 |
|
115 | |||
114 | CREATE_SCHEMA = [ |
|
116 | CREATE_SCHEMA = [ | |
115 | # Deltas are stored as content-indexed blobs. |
|
117 | # Deltas are stored as content-indexed blobs. | |
116 | # compression column holds COMPRESSION_* constant for how the |
|
118 | # compression column holds COMPRESSION_* constant for how the | |
117 | # delta is encoded. |
|
119 | # delta is encoded. | |
118 |
|
120 | |||
119 | r'CREATE TABLE delta (' |
|
121 | r'CREATE TABLE delta (' | |
120 | r' id INTEGER PRIMARY KEY, ' |
|
122 | r' id INTEGER PRIMARY KEY, ' | |
121 | r' compression INTEGER NOT NULL, ' |
|
123 | r' compression INTEGER NOT NULL, ' | |
122 | r' hash BLOB UNIQUE ON CONFLICT ABORT, ' |
|
124 | r' hash BLOB UNIQUE ON CONFLICT ABORT, ' | |
123 | r' delta BLOB NOT NULL ' |
|
125 | r' delta BLOB NOT NULL ' | |
124 | r')', |
|
126 | r')', | |
125 |
|
127 | |||
126 | # Tracked paths are denormalized to integers to avoid redundant |
|
128 | # Tracked paths are denormalized to integers to avoid redundant | |
127 | # storage of the path name. |
|
129 | # storage of the path name. | |
128 | r'CREATE TABLE filepath (' |
|
130 | r'CREATE TABLE filepath (' | |
129 | r' id INTEGER PRIMARY KEY, ' |
|
131 | r' id INTEGER PRIMARY KEY, ' | |
130 | r' path BLOB NOT NULL ' |
|
132 | r' path BLOB NOT NULL ' | |
131 | r')', |
|
133 | r')', | |
132 |
|
134 | |||
133 | r'CREATE UNIQUE INDEX filepath_path ' |
|
135 | r'CREATE UNIQUE INDEX filepath_path ' | |
134 | r' ON filepath (path)', |
|
136 | r' ON filepath (path)', | |
135 |
|
137 | |||
136 | # We have a single table for all file revision data. |
|
138 | # We have a single table for all file revision data. | |
137 | # Each file revision is uniquely described by a (path, rev) and |
|
139 | # Each file revision is uniquely described by a (path, rev) and | |
138 | # (path, node). |
|
140 | # (path, node). | |
139 | # |
|
141 | # | |
140 | # Revision data is stored as a pointer to the delta producing this |
|
142 | # Revision data is stored as a pointer to the delta producing this | |
141 | # revision and the file revision whose delta should be applied before |
|
143 | # revision and the file revision whose delta should be applied before | |
142 | # that one. One can reconstruct the delta chain by recursively following |
|
144 | # that one. One can reconstruct the delta chain by recursively following | |
143 | # the delta base revision pointers until one encounters NULL. |
|
145 | # the delta base revision pointers until one encounters NULL. | |
144 | # |
|
146 | # | |
145 | # flags column holds bitwise integer flags controlling storage options. |
|
147 | # flags column holds bitwise integer flags controlling storage options. | |
146 | # These flags are defined by the FLAG_* constants. |
|
148 | # These flags are defined by the FLAG_* constants. | |
147 | r'CREATE TABLE fileindex (' |
|
149 | r'CREATE TABLE fileindex (' | |
148 | r' id INTEGER PRIMARY KEY, ' |
|
150 | r' id INTEGER PRIMARY KEY, ' | |
149 | r' pathid INTEGER REFERENCES filepath(id), ' |
|
151 | r' pathid INTEGER REFERENCES filepath(id), ' | |
150 | r' revnum INTEGER NOT NULL, ' |
|
152 | r' revnum INTEGER NOT NULL, ' | |
151 | r' p1rev INTEGER NOT NULL, ' |
|
153 | r' p1rev INTEGER NOT NULL, ' | |
152 | r' p2rev INTEGER NOT NULL, ' |
|
154 | r' p2rev INTEGER NOT NULL, ' | |
153 | r' linkrev INTEGER NOT NULL, ' |
|
155 | r' linkrev INTEGER NOT NULL, ' | |
154 | r' flags INTEGER NOT NULL, ' |
|
156 | r' flags INTEGER NOT NULL, ' | |
155 | r' deltaid INTEGER REFERENCES delta(id), ' |
|
157 | r' deltaid INTEGER REFERENCES delta(id), ' | |
156 | r' deltabaseid INTEGER REFERENCES fileindex(id), ' |
|
158 | r' deltabaseid INTEGER REFERENCES fileindex(id), ' | |
157 | r' node BLOB NOT NULL ' |
|
159 | r' node BLOB NOT NULL ' | |
158 | r')', |
|
160 | r')', | |
159 |
|
161 | |||
160 | r'CREATE UNIQUE INDEX fileindex_pathrevnum ' |
|
162 | r'CREATE UNIQUE INDEX fileindex_pathrevnum ' | |
161 | r' ON fileindex (pathid, revnum)', |
|
163 | r' ON fileindex (pathid, revnum)', | |
162 |
|
164 | |||
163 | r'CREATE UNIQUE INDEX fileindex_pathnode ' |
|
165 | r'CREATE UNIQUE INDEX fileindex_pathnode ' | |
164 | r' ON fileindex (pathid, node)', |
|
166 | r' ON fileindex (pathid, node)', | |
165 |
|
167 | |||
166 | # Provide a view over all file data for convenience. |
|
168 | # Provide a view over all file data for convenience. | |
167 | r'CREATE VIEW filedata AS ' |
|
169 | r'CREATE VIEW filedata AS ' | |
168 | r'SELECT ' |
|
170 | r'SELECT ' | |
169 | r' fileindex.id AS id, ' |
|
171 | r' fileindex.id AS id, ' | |
170 | r' filepath.id AS pathid, ' |
|
172 | r' filepath.id AS pathid, ' | |
171 | r' filepath.path AS path, ' |
|
173 | r' filepath.path AS path, ' | |
172 | r' fileindex.revnum AS revnum, ' |
|
174 | r' fileindex.revnum AS revnum, ' | |
173 | r' fileindex.node AS node, ' |
|
175 | r' fileindex.node AS node, ' | |
174 | r' fileindex.p1rev AS p1rev, ' |
|
176 | r' fileindex.p1rev AS p1rev, ' | |
175 | r' fileindex.p2rev AS p2rev, ' |
|
177 | r' fileindex.p2rev AS p2rev, ' | |
176 | r' fileindex.linkrev AS linkrev, ' |
|
178 | r' fileindex.linkrev AS linkrev, ' | |
177 | r' fileindex.flags AS flags, ' |
|
179 | r' fileindex.flags AS flags, ' | |
178 | r' fileindex.deltaid AS deltaid, ' |
|
180 | r' fileindex.deltaid AS deltaid, ' | |
179 | r' fileindex.deltabaseid AS deltabaseid ' |
|
181 | r' fileindex.deltabaseid AS deltabaseid ' | |
180 | r'FROM filepath, fileindex ' |
|
182 | r'FROM filepath, fileindex ' | |
181 | r'WHERE fileindex.pathid=filepath.id', |
|
183 | r'WHERE fileindex.pathid=filepath.id', | |
182 |
|
184 | |||
183 | r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION, |
|
185 | r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION, | |
184 | ] |
|
186 | ] | |
185 |
|
187 | |||
186 | def resolvedeltachain(db, pathid, node, revisioncache, |
|
188 | def resolvedeltachain(db, pathid, node, revisioncache, | |
187 | stoprids, zstddctx=None): |
|
189 | stoprids, zstddctx=None): | |
188 | """Resolve a delta chain for a file node.""" |
|
190 | """Resolve a delta chain for a file node.""" | |
189 |
|
191 | |||
190 | # TODO the "not in ({stops})" here is possibly slowing down the query |
|
192 | # TODO the "not in ({stops})" here is possibly slowing down the query | |
191 | # because it needs to perform the lookup on every recursive invocation. |
|
193 | # because it needs to perform the lookup on every recursive invocation. | |
192 | # This could possibly be faster if we created a temporary query with |
|
194 | # This could possibly be faster if we created a temporary query with | |
193 | # baseid "poisoned" to null and limited the recursive filter to |
|
195 | # baseid "poisoned" to null and limited the recursive filter to | |
194 | # "is not null". |
|
196 | # "is not null". | |
195 | res = db.execute( |
|
197 | res = db.execute( | |
196 | r'WITH RECURSIVE ' |
|
198 | r'WITH RECURSIVE ' | |
197 | r' deltachain(deltaid, baseid) AS (' |
|
199 | r' deltachain(deltaid, baseid) AS (' | |
198 | r' SELECT deltaid, deltabaseid FROM fileindex ' |
|
200 | r' SELECT deltaid, deltabaseid FROM fileindex ' | |
199 | r' WHERE pathid=? AND node=? ' |
|
201 | r' WHERE pathid=? AND node=? ' | |
200 | r' UNION ALL ' |
|
202 | r' UNION ALL ' | |
201 | r' SELECT fileindex.deltaid, deltabaseid ' |
|
203 | r' SELECT fileindex.deltaid, deltabaseid ' | |
202 | r' FROM fileindex, deltachain ' |
|
204 | r' FROM fileindex, deltachain ' | |
203 | r' WHERE ' |
|
205 | r' WHERE ' | |
204 | r' fileindex.id=deltachain.baseid ' |
|
206 | r' fileindex.id=deltachain.baseid ' | |
205 | r' AND deltachain.baseid IS NOT NULL ' |
|
207 | r' AND deltachain.baseid IS NOT NULL ' | |
206 | r' AND fileindex.id NOT IN ({stops}) ' |
|
208 | r' AND fileindex.id NOT IN ({stops}) ' | |
207 | r' ) ' |
|
209 | r' ) ' | |
208 | r'SELECT deltachain.baseid, compression, delta ' |
|
210 | r'SELECT deltachain.baseid, compression, delta ' | |
209 | r'FROM deltachain, delta ' |
|
211 | r'FROM deltachain, delta ' | |
210 | r'WHERE delta.id=deltachain.deltaid'.format( |
|
212 | r'WHERE delta.id=deltachain.deltaid'.format( | |
211 | stops=r','.join([r'?'] * len(stoprids))), |
|
213 | stops=r','.join([r'?'] * len(stoprids))), | |
212 | tuple([pathid, node] + list(stoprids.keys()))) |
|
214 | tuple([pathid, node] + list(stoprids.keys()))) | |
213 |
|
215 | |||
214 | deltas = [] |
|
216 | deltas = [] | |
215 | lastdeltabaseid = None |
|
217 | lastdeltabaseid = None | |
216 |
|
218 | |||
217 | for deltabaseid, compression, delta in res: |
|
219 | for deltabaseid, compression, delta in res: | |
218 | lastdeltabaseid = deltabaseid |
|
220 | lastdeltabaseid = deltabaseid | |
219 |
|
221 | |||
220 | if compression == COMPRESSION_ZSTD: |
|
222 | if compression == COMPRESSION_ZSTD: | |
221 | delta = zstddctx.decompress(delta) |
|
223 | delta = zstddctx.decompress(delta) | |
222 | elif compression == COMPRESSION_NONE: |
|
224 | elif compression == COMPRESSION_NONE: | |
223 | delta = delta |
|
225 | delta = delta | |
224 | elif compression == COMPRESSION_ZLIB: |
|
226 | elif compression == COMPRESSION_ZLIB: | |
225 | delta = zlib.decompress(delta) |
|
227 | delta = zlib.decompress(delta) | |
226 | else: |
|
228 | else: | |
227 | raise SQLiteStoreError('unhandled compression type: %d' % |
|
229 | raise SQLiteStoreError('unhandled compression type: %d' % | |
228 | compression) |
|
230 | compression) | |
229 |
|
231 | |||
230 | deltas.append(delta) |
|
232 | deltas.append(delta) | |
231 |
|
233 | |||
232 | if lastdeltabaseid in stoprids: |
|
234 | if lastdeltabaseid in stoprids: | |
233 | basetext = revisioncache[stoprids[lastdeltabaseid]] |
|
235 | basetext = revisioncache[stoprids[lastdeltabaseid]] | |
234 | else: |
|
236 | else: | |
235 | basetext = deltas.pop() |
|
237 | basetext = deltas.pop() | |
236 |
|
238 | |||
237 | deltas.reverse() |
|
239 | deltas.reverse() | |
238 | fulltext = mdiff.patches(basetext, deltas) |
|
240 | fulltext = mdiff.patches(basetext, deltas) | |
239 |
|
241 | |||
240 | # SQLite returns buffer instances for blob columns on Python 2. This |
|
242 | # SQLite returns buffer instances for blob columns on Python 2. This | |
241 | # type can propagate through the delta application layer. Because |
|
243 | # type can propagate through the delta application layer. Because | |
242 | # downstream callers assume revisions are bytes, cast as needed. |
|
244 | # downstream callers assume revisions are bytes, cast as needed. | |
243 | if not isinstance(fulltext, bytes): |
|
245 | if not isinstance(fulltext, bytes): | |
244 | fulltext = bytes(delta) |
|
246 | fulltext = bytes(delta) | |
245 |
|
247 | |||
246 | return fulltext |
|
248 | return fulltext | |
247 |
|
249 | |||
248 | def insertdelta(db, compression, hash, delta): |
|
250 | def insertdelta(db, compression, hash, delta): | |
249 | try: |
|
251 | try: | |
250 | return db.execute( |
|
252 | return db.execute( | |
251 | r'INSERT INTO delta (compression, hash, delta) ' |
|
253 | r'INSERT INTO delta (compression, hash, delta) ' | |
252 | r'VALUES (?, ?, ?)', |
|
254 | r'VALUES (?, ?, ?)', | |
253 | (compression, hash, delta)).lastrowid |
|
255 | (compression, hash, delta)).lastrowid | |
254 | except sqlite3.IntegrityError: |
|
256 | except sqlite3.IntegrityError: | |
255 | return db.execute( |
|
257 | return db.execute( | |
256 | r'SELECT id FROM delta WHERE hash=?', |
|
258 | r'SELECT id FROM delta WHERE hash=?', | |
257 | (hash,)).fetchone()[0] |
|
259 | (hash,)).fetchone()[0] | |
258 |
|
260 | |||
259 | class SQLiteStoreError(error.StorageError): |
|
261 | class SQLiteStoreError(error.StorageError): | |
260 | pass |
|
262 | pass | |
261 |
|
263 | |||
262 | @attr.s |
|
264 | @attr.s | |
263 | class revisionentry(object): |
|
265 | class revisionentry(object): | |
264 | rid = attr.ib() |
|
266 | rid = attr.ib() | |
265 | rev = attr.ib() |
|
267 | rev = attr.ib() | |
266 | node = attr.ib() |
|
268 | node = attr.ib() | |
267 | p1rev = attr.ib() |
|
269 | p1rev = attr.ib() | |
268 | p2rev = attr.ib() |
|
270 | p2rev = attr.ib() | |
269 | p1node = attr.ib() |
|
271 | p1node = attr.ib() | |
270 | p2node = attr.ib() |
|
272 | p2node = attr.ib() | |
271 | linkrev = attr.ib() |
|
273 | linkrev = attr.ib() | |
272 | flags = attr.ib() |
|
274 | flags = attr.ib() | |
273 |
|
275 | |||
274 | @interfaceutil.implementer(repository.irevisiondelta) |
|
276 | @interfaceutil.implementer(repository.irevisiondelta) | |
275 | @attr.s(slots=True) |
|
277 | @attr.s(slots=True) | |
276 | class sqliterevisiondelta(object): |
|
278 | class sqliterevisiondelta(object): | |
277 | node = attr.ib() |
|
279 | node = attr.ib() | |
278 | p1node = attr.ib() |
|
280 | p1node = attr.ib() | |
279 | p2node = attr.ib() |
|
281 | p2node = attr.ib() | |
280 | basenode = attr.ib() |
|
282 | basenode = attr.ib() | |
281 | flags = attr.ib() |
|
283 | flags = attr.ib() | |
282 | baserevisionsize = attr.ib() |
|
284 | baserevisionsize = attr.ib() | |
283 | revision = attr.ib() |
|
285 | revision = attr.ib() | |
284 | delta = attr.ib() |
|
286 | delta = attr.ib() | |
285 | linknode = attr.ib(default=None) |
|
287 | linknode = attr.ib(default=None) | |
286 |
|
288 | |||
287 | @interfaceutil.implementer(repository.iverifyproblem) |
|
289 | @interfaceutil.implementer(repository.iverifyproblem) | |
288 | @attr.s(frozen=True) |
|
290 | @attr.s(frozen=True) | |
289 | class sqliteproblem(object): |
|
291 | class sqliteproblem(object): | |
290 | warning = attr.ib(default=None) |
|
292 | warning = attr.ib(default=None) | |
291 | error = attr.ib(default=None) |
|
293 | error = attr.ib(default=None) | |
292 | node = attr.ib(default=None) |
|
294 | node = attr.ib(default=None) | |
293 |
|
295 | |||
294 | @interfaceutil.implementer(repository.ifilestorage) |
|
296 | @interfaceutil.implementer(repository.ifilestorage) | |
295 | class sqlitefilestore(object): |
|
297 | class sqlitefilestore(object): | |
296 | """Implements storage for an individual tracked path.""" |
|
298 | """Implements storage for an individual tracked path.""" | |
297 |
|
299 | |||
298 | def __init__(self, db, path, compression): |
|
300 | def __init__(self, db, path, compression): | |
299 | self._db = db |
|
301 | self._db = db | |
300 | self._path = path |
|
302 | self._path = path | |
301 |
|
303 | |||
302 | self._pathid = None |
|
304 | self._pathid = None | |
303 |
|
305 | |||
304 | # revnum -> node |
|
306 | # revnum -> node | |
305 | self._revtonode = {} |
|
307 | self._revtonode = {} | |
306 | # node -> revnum |
|
308 | # node -> revnum | |
307 | self._nodetorev = {} |
|
309 | self._nodetorev = {} | |
308 | # node -> data structure |
|
310 | # node -> data structure | |
309 | self._revisions = {} |
|
311 | self._revisions = {} | |
310 |
|
312 | |||
311 | self._revisioncache = util.lrucachedict(10) |
|
313 | self._revisioncache = util.lrucachedict(10) | |
312 |
|
314 | |||
313 | self._compengine = compression |
|
315 | self._compengine = compression | |
314 |
|
316 | |||
315 | if compression == 'zstd': |
|
317 | if compression == 'zstd': | |
316 | self._cctx = zstd.ZstdCompressor(level=3) |
|
318 | self._cctx = zstd.ZstdCompressor(level=3) | |
317 | self._dctx = zstd.ZstdDecompressor() |
|
319 | self._dctx = zstd.ZstdDecompressor() | |
318 | else: |
|
320 | else: | |
319 | self._cctx = None |
|
321 | self._cctx = None | |
320 | self._dctx = None |
|
322 | self._dctx = None | |
321 |
|
323 | |||
322 | self._refreshindex() |
|
324 | self._refreshindex() | |
323 |
|
325 | |||
324 | def _refreshindex(self): |
|
326 | def _refreshindex(self): | |
325 | self._revtonode = {} |
|
327 | self._revtonode = {} | |
326 | self._nodetorev = {} |
|
328 | self._nodetorev = {} | |
327 | self._revisions = {} |
|
329 | self._revisions = {} | |
328 |
|
330 | |||
329 | res = list(self._db.execute( |
|
331 | res = list(self._db.execute( | |
330 | r'SELECT id FROM filepath WHERE path=?', (self._path,))) |
|
332 | r'SELECT id FROM filepath WHERE path=?', (self._path,))) | |
331 |
|
333 | |||
332 | if not res: |
|
334 | if not res: | |
333 | self._pathid = None |
|
335 | self._pathid = None | |
334 | return |
|
336 | return | |
335 |
|
337 | |||
336 | self._pathid = res[0][0] |
|
338 | self._pathid = res[0][0] | |
337 |
|
339 | |||
338 | res = self._db.execute( |
|
340 | res = self._db.execute( | |
339 | r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags ' |
|
341 | r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags ' | |
340 | r'FROM fileindex ' |
|
342 | r'FROM fileindex ' | |
341 | r'WHERE pathid=? ' |
|
343 | r'WHERE pathid=? ' | |
342 | r'ORDER BY revnum ASC', |
|
344 | r'ORDER BY revnum ASC', | |
343 | (self._pathid,)) |
|
345 | (self._pathid,)) | |
344 |
|
346 | |||
345 | for i, row in enumerate(res): |
|
347 | for i, row in enumerate(res): | |
346 | rid, rev, node, p1rev, p2rev, linkrev, flags = row |
|
348 | rid, rev, node, p1rev, p2rev, linkrev, flags = row | |
347 |
|
349 | |||
348 | if i != rev: |
|
350 | if i != rev: | |
349 | raise SQLiteStoreError(_('sqlite database has inconsistent ' |
|
351 | raise SQLiteStoreError(_('sqlite database has inconsistent ' | |
350 | 'revision numbers')) |
|
352 | 'revision numbers')) | |
351 |
|
353 | |||
352 | if p1rev == nullrev: |
|
354 | if p1rev == nullrev: | |
353 | p1node = nullid |
|
355 | p1node = nullid | |
354 | else: |
|
356 | else: | |
355 | p1node = self._revtonode[p1rev] |
|
357 | p1node = self._revtonode[p1rev] | |
356 |
|
358 | |||
357 | if p2rev == nullrev: |
|
359 | if p2rev == nullrev: | |
358 | p2node = nullid |
|
360 | p2node = nullid | |
359 | else: |
|
361 | else: | |
360 | p2node = self._revtonode[p2rev] |
|
362 | p2node = self._revtonode[p2rev] | |
361 |
|
363 | |||
362 | entry = revisionentry( |
|
364 | entry = revisionentry( | |
363 | rid=rid, |
|
365 | rid=rid, | |
364 | rev=rev, |
|
366 | rev=rev, | |
365 | node=node, |
|
367 | node=node, | |
366 | p1rev=p1rev, |
|
368 | p1rev=p1rev, | |
367 | p2rev=p2rev, |
|
369 | p2rev=p2rev, | |
368 | p1node=p1node, |
|
370 | p1node=p1node, | |
369 | p2node=p2node, |
|
371 | p2node=p2node, | |
370 | linkrev=linkrev, |
|
372 | linkrev=linkrev, | |
371 | flags=flags) |
|
373 | flags=flags) | |
372 |
|
374 | |||
373 | self._revtonode[rev] = node |
|
375 | self._revtonode[rev] = node | |
374 | self._nodetorev[node] = rev |
|
376 | self._nodetorev[node] = rev | |
375 | self._revisions[node] = entry |
|
377 | self._revisions[node] = entry | |
376 |
|
378 | |||
377 | # Start of ifileindex interface. |
|
379 | # Start of ifileindex interface. | |
378 |
|
380 | |||
379 | def __len__(self): |
|
381 | def __len__(self): | |
380 | return len(self._revisions) |
|
382 | return len(self._revisions) | |
381 |
|
383 | |||
382 | def __iter__(self): |
|
384 | def __iter__(self): | |
383 | return iter(pycompat.xrange(len(self._revisions))) |
|
385 | return iter(pycompat.xrange(len(self._revisions))) | |
384 |
|
386 | |||
385 | def hasnode(self, node): |
|
387 | def hasnode(self, node): | |
386 | if node == nullid: |
|
388 | if node == nullid: | |
387 | return False |
|
389 | return False | |
388 |
|
390 | |||
389 | return node in self._nodetorev |
|
391 | return node in self._nodetorev | |
390 |
|
392 | |||
391 | def revs(self, start=0, stop=None): |
|
393 | def revs(self, start=0, stop=None): | |
392 | return storageutil.iterrevs(len(self._revisions), start=start, |
|
394 | return storageutil.iterrevs(len(self._revisions), start=start, | |
393 | stop=stop) |
|
395 | stop=stop) | |
394 |
|
396 | |||
395 | def parents(self, node): |
|
397 | def parents(self, node): | |
396 | if node == nullid: |
|
398 | if node == nullid: | |
397 | return nullid, nullid |
|
399 | return nullid, nullid | |
398 |
|
400 | |||
399 | if node not in self._revisions: |
|
401 | if node not in self._revisions: | |
400 | raise error.LookupError(node, self._path, _('no node')) |
|
402 | raise error.LookupError(node, self._path, _('no node')) | |
401 |
|
403 | |||
402 | entry = self._revisions[node] |
|
404 | entry = self._revisions[node] | |
403 | return entry.p1node, entry.p2node |
|
405 | return entry.p1node, entry.p2node | |
404 |
|
406 | |||
405 | def parentrevs(self, rev): |
|
407 | def parentrevs(self, rev): | |
406 | if rev == nullrev: |
|
408 | if rev == nullrev: | |
407 | return nullrev, nullrev |
|
409 | return nullrev, nullrev | |
408 |
|
410 | |||
409 | if rev not in self._revtonode: |
|
411 | if rev not in self._revtonode: | |
410 | raise IndexError(rev) |
|
412 | raise IndexError(rev) | |
411 |
|
413 | |||
412 | entry = self._revisions[self._revtonode[rev]] |
|
414 | entry = self._revisions[self._revtonode[rev]] | |
413 | return entry.p1rev, entry.p2rev |
|
415 | return entry.p1rev, entry.p2rev | |
414 |
|
416 | |||
415 | def rev(self, node): |
|
417 | def rev(self, node): | |
416 | if node == nullid: |
|
418 | if node == nullid: | |
417 | return nullrev |
|
419 | return nullrev | |
418 |
|
420 | |||
419 | if node not in self._nodetorev: |
|
421 | if node not in self._nodetorev: | |
420 | raise error.LookupError(node, self._path, _('no node')) |
|
422 | raise error.LookupError(node, self._path, _('no node')) | |
421 |
|
423 | |||
422 | return self._nodetorev[node] |
|
424 | return self._nodetorev[node] | |
423 |
|
425 | |||
424 | def node(self, rev): |
|
426 | def node(self, rev): | |
425 | if rev == nullrev: |
|
427 | if rev == nullrev: | |
426 | return nullid |
|
428 | return nullid | |
427 |
|
429 | |||
428 | if rev not in self._revtonode: |
|
430 | if rev not in self._revtonode: | |
429 | raise IndexError(rev) |
|
431 | raise IndexError(rev) | |
430 |
|
432 | |||
431 | return self._revtonode[rev] |
|
433 | return self._revtonode[rev] | |
432 |
|
434 | |||
433 | def lookup(self, node): |
|
435 | def lookup(self, node): | |
434 | return storageutil.fileidlookup(self, node, self._path) |
|
436 | return storageutil.fileidlookup(self, node, self._path) | |
435 |
|
437 | |||
436 | def linkrev(self, rev): |
|
438 | def linkrev(self, rev): | |
437 | if rev == nullrev: |
|
439 | if rev == nullrev: | |
438 | return nullrev |
|
440 | return nullrev | |
439 |
|
441 | |||
440 | if rev not in self._revtonode: |
|
442 | if rev not in self._revtonode: | |
441 | raise IndexError(rev) |
|
443 | raise IndexError(rev) | |
442 |
|
444 | |||
443 | entry = self._revisions[self._revtonode[rev]] |
|
445 | entry = self._revisions[self._revtonode[rev]] | |
444 | return entry.linkrev |
|
446 | return entry.linkrev | |
445 |
|
447 | |||
446 | def iscensored(self, rev): |
|
448 | def iscensored(self, rev): | |
447 | if rev == nullrev: |
|
449 | if rev == nullrev: | |
448 | return False |
|
450 | return False | |
449 |
|
451 | |||
450 | if rev not in self._revtonode: |
|
452 | if rev not in self._revtonode: | |
451 | raise IndexError(rev) |
|
453 | raise IndexError(rev) | |
452 |
|
454 | |||
453 | return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED |
|
455 | return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED | |
454 |
|
456 | |||
455 | def commonancestorsheads(self, node1, node2): |
|
457 | def commonancestorsheads(self, node1, node2): | |
456 | rev1 = self.rev(node1) |
|
458 | rev1 = self.rev(node1) | |
457 | rev2 = self.rev(node2) |
|
459 | rev2 = self.rev(node2) | |
458 |
|
460 | |||
459 | ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2) |
|
461 | ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2) | |
460 | return pycompat.maplist(self.node, ancestors) |
|
462 | return pycompat.maplist(self.node, ancestors) | |
461 |
|
463 | |||
462 | def descendants(self, revs): |
|
464 | def descendants(self, revs): | |
463 | # TODO we could implement this using a recursive SQL query, which |
|
465 | # TODO we could implement this using a recursive SQL query, which | |
464 | # might be faster. |
|
466 | # might be faster. | |
465 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) |
|
467 | return dagop.descendantrevs(revs, self.revs, self.parentrevs) | |
466 |
|
468 | |||
467 | def heads(self, start=None, stop=None): |
|
469 | def heads(self, start=None, stop=None): | |
468 | if start is None and stop is None: |
|
470 | if start is None and stop is None: | |
469 | if not len(self): |
|
471 | if not len(self): | |
470 | return [nullid] |
|
472 | return [nullid] | |
471 |
|
473 | |||
472 | startrev = self.rev(start) if start is not None else nullrev |
|
474 | startrev = self.rev(start) if start is not None else nullrev | |
473 | stoprevs = {self.rev(n) for n in stop or []} |
|
475 | stoprevs = {self.rev(n) for n in stop or []} | |
474 |
|
476 | |||
475 | revs = dagop.headrevssubset(self.revs, self.parentrevs, |
|
477 | revs = dagop.headrevssubset(self.revs, self.parentrevs, | |
476 | startrev=startrev, stoprevs=stoprevs) |
|
478 | startrev=startrev, stoprevs=stoprevs) | |
477 |
|
479 | |||
478 | return [self.node(rev) for rev in revs] |
|
480 | return [self.node(rev) for rev in revs] | |
479 |
|
481 | |||
480 | def children(self, node): |
|
482 | def children(self, node): | |
481 | rev = self.rev(node) |
|
483 | rev = self.rev(node) | |
482 |
|
484 | |||
483 | res = self._db.execute( |
|
485 | res = self._db.execute( | |
484 | r'SELECT' |
|
486 | r'SELECT' | |
485 | r' node ' |
|
487 | r' node ' | |
486 | r' FROM filedata ' |
|
488 | r' FROM filedata ' | |
487 | r' WHERE path=? AND (p1rev=? OR p2rev=?) ' |
|
489 | r' WHERE path=? AND (p1rev=? OR p2rev=?) ' | |
488 | r' ORDER BY revnum ASC', |
|
490 | r' ORDER BY revnum ASC', | |
489 | (self._path, rev, rev)) |
|
491 | (self._path, rev, rev)) | |
490 |
|
492 | |||
491 | return [row[0] for row in res] |
|
493 | return [row[0] for row in res] | |
492 |
|
494 | |||
493 | # End of ifileindex interface. |
|
495 | # End of ifileindex interface. | |
494 |
|
496 | |||
495 | # Start of ifiledata interface. |
|
497 | # Start of ifiledata interface. | |
496 |
|
498 | |||
497 | def size(self, rev): |
|
499 | def size(self, rev): | |
498 | if rev == nullrev: |
|
500 | if rev == nullrev: | |
499 | return 0 |
|
501 | return 0 | |
500 |
|
502 | |||
501 | if rev not in self._revtonode: |
|
503 | if rev not in self._revtonode: | |
502 | raise IndexError(rev) |
|
504 | raise IndexError(rev) | |
503 |
|
505 | |||
504 | node = self._revtonode[rev] |
|
506 | node = self._revtonode[rev] | |
505 |
|
507 | |||
506 | if self.renamed(node): |
|
508 | if self.renamed(node): | |
507 | return len(self.read(node)) |
|
509 | return len(self.read(node)) | |
508 |
|
510 | |||
509 | return len(self.revision(node)) |
|
511 | return len(self.revision(node)) | |
510 |
|
512 | |||
511 | def revision(self, node, raw=False, _verifyhash=True): |
|
513 | def revision(self, node, raw=False, _verifyhash=True): | |
512 | if node in (nullid, nullrev): |
|
514 | if node in (nullid, nullrev): | |
513 | return b'' |
|
515 | return b'' | |
514 |
|
516 | |||
515 | if isinstance(node, int): |
|
517 | if isinstance(node, int): | |
516 | node = self.node(node) |
|
518 | node = self.node(node) | |
517 |
|
519 | |||
518 | if node not in self._nodetorev: |
|
520 | if node not in self._nodetorev: | |
519 | raise error.LookupError(node, self._path, _('no node')) |
|
521 | raise error.LookupError(node, self._path, _('no node')) | |
520 |
|
522 | |||
521 | if node in self._revisioncache: |
|
523 | if node in self._revisioncache: | |
522 | return self._revisioncache[node] |
|
524 | return self._revisioncache[node] | |
523 |
|
525 | |||
524 | # Because we have a fulltext revision cache, we are able to |
|
526 | # Because we have a fulltext revision cache, we are able to | |
525 | # short-circuit delta chain traversal and decompression as soon as |
|
527 | # short-circuit delta chain traversal and decompression as soon as | |
526 | # we encounter a revision in the cache. |
|
528 | # we encounter a revision in the cache. | |
527 |
|
529 | |||
528 | stoprids = {self._revisions[n].rid: n |
|
530 | stoprids = {self._revisions[n].rid: n | |
529 | for n in self._revisioncache} |
|
531 | for n in self._revisioncache} | |
530 |
|
532 | |||
531 | if not stoprids: |
|
533 | if not stoprids: | |
532 | stoprids[-1] = None |
|
534 | stoprids[-1] = None | |
533 |
|
535 | |||
534 | fulltext = resolvedeltachain(self._db, self._pathid, node, |
|
536 | fulltext = resolvedeltachain(self._db, self._pathid, node, | |
535 | self._revisioncache, stoprids, |
|
537 | self._revisioncache, stoprids, | |
536 | zstddctx=self._dctx) |
|
538 | zstddctx=self._dctx) | |
537 |
|
539 | |||
|
540 | # Don't verify hashes if parent nodes were rewritten, as the hash | |||
|
541 | # wouldn't verify. | |||
|
542 | if self._revisions[node].flags & (FLAG_MISSING_P1 | FLAG_MISSING_P2): | |||
|
543 | _verifyhash = False | |||
|
544 | ||||
538 | if _verifyhash: |
|
545 | if _verifyhash: | |
539 | self._checkhash(fulltext, node) |
|
546 | self._checkhash(fulltext, node) | |
540 | self._revisioncache[node] = fulltext |
|
547 | self._revisioncache[node] = fulltext | |
541 |
|
548 | |||
542 | return fulltext |
|
549 | return fulltext | |
543 |
|
550 | |||
544 | def read(self, node): |
|
551 | def read(self, node): | |
545 | return storageutil.filtermetadata(self.revision(node)) |
|
552 | return storageutil.filtermetadata(self.revision(node)) | |
546 |
|
553 | |||
547 | def renamed(self, node): |
|
554 | def renamed(self, node): | |
548 | return storageutil.filerevisioncopied(self, node) |
|
555 | return storageutil.filerevisioncopied(self, node) | |
549 |
|
556 | |||
550 | def cmp(self, node, fulltext): |
|
557 | def cmp(self, node, fulltext): | |
551 | return not storageutil.filedataequivalent(self, node, fulltext) |
|
558 | return not storageutil.filedataequivalent(self, node, fulltext) | |
552 |
|
559 | |||
553 | def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, |
|
560 | def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, | |
554 | assumehaveparentrevisions=False, deltaprevious=False): |
|
561 | assumehaveparentrevisions=False, deltaprevious=False): | |
555 | if nodesorder not in ('nodes', 'storage', None): |
|
562 | if nodesorder not in ('nodes', 'storage', None): | |
556 | raise error.ProgrammingError('unhandled value for nodesorder: %s' % |
|
563 | raise error.ProgrammingError('unhandled value for nodesorder: %s' % | |
557 | nodesorder) |
|
564 | nodesorder) | |
558 |
|
565 | |||
559 | nodes = [n for n in nodes if n != nullid] |
|
566 | nodes = [n for n in nodes if n != nullid] | |
560 |
|
567 | |||
561 | if not nodes: |
|
568 | if not nodes: | |
562 | return |
|
569 | return | |
563 |
|
570 | |||
564 | # TODO perform in a single query. |
|
571 | # TODO perform in a single query. | |
565 | res = self._db.execute( |
|
572 | res = self._db.execute( | |
566 | r'SELECT revnum, deltaid FROM fileindex ' |
|
573 | r'SELECT revnum, deltaid FROM fileindex ' | |
567 | r'WHERE pathid=? ' |
|
574 | r'WHERE pathid=? ' | |
568 | r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))), |
|
575 | r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))), | |
569 | tuple([self._pathid] + nodes)) |
|
576 | tuple([self._pathid] + nodes)) | |
570 |
|
577 | |||
571 | deltabases = {} |
|
578 | deltabases = {} | |
572 |
|
579 | |||
573 | for rev, deltaid in res: |
|
580 | for rev, deltaid in res: | |
574 | res = self._db.execute( |
|
581 | res = self._db.execute( | |
575 | r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?', |
|
582 | r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?', | |
576 | (self._pathid, deltaid)) |
|
583 | (self._pathid, deltaid)) | |
577 | deltabases[rev] = res.fetchone()[0] |
|
584 | deltabases[rev] = res.fetchone()[0] | |
578 |
|
585 | |||
579 | # TODO define revdifffn so we can use delta from storage. |
|
586 | # TODO define revdifffn so we can use delta from storage. | |
580 | for delta in storageutil.emitrevisions( |
|
587 | for delta in storageutil.emitrevisions( | |
581 | self, nodes, nodesorder, sqliterevisiondelta, |
|
588 | self, nodes, nodesorder, sqliterevisiondelta, | |
582 | deltaparentfn=deltabases.__getitem__, |
|
589 | deltaparentfn=deltabases.__getitem__, | |
583 | revisiondata=revisiondata, |
|
590 | revisiondata=revisiondata, | |
584 | assumehaveparentrevisions=assumehaveparentrevisions, |
|
591 | assumehaveparentrevisions=assumehaveparentrevisions, | |
585 | deltaprevious=deltaprevious): |
|
592 | deltaprevious=deltaprevious): | |
586 |
|
593 | |||
587 | yield delta |
|
594 | yield delta | |
588 |
|
595 | |||
589 | # End of ifiledata interface. |
|
596 | # End of ifiledata interface. | |
590 |
|
597 | |||
591 | # Start of ifilemutation interface. |
|
598 | # Start of ifilemutation interface. | |
592 |
|
599 | |||
593 | def add(self, filedata, meta, transaction, linkrev, p1, p2): |
|
600 | def add(self, filedata, meta, transaction, linkrev, p1, p2): | |
594 | if meta or filedata.startswith(b'\x01\n'): |
|
601 | if meta or filedata.startswith(b'\x01\n'): | |
595 | filedata = storageutil.packmeta(meta, filedata) |
|
602 | filedata = storageutil.packmeta(meta, filedata) | |
596 |
|
603 | |||
597 | return self.addrevision(filedata, transaction, linkrev, p1, p2) |
|
604 | return self.addrevision(filedata, transaction, linkrev, p1, p2) | |
598 |
|
605 | |||
599 | def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None, |
|
606 | def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None, | |
600 | flags=0, cachedelta=None): |
|
607 | flags=0, cachedelta=None): | |
601 | if flags: |
|
608 | if flags: | |
602 | raise SQLiteStoreError(_('flags not supported on revisions')) |
|
609 | raise SQLiteStoreError(_('flags not supported on revisions')) | |
603 |
|
610 | |||
604 | validatehash = node is not None |
|
611 | validatehash = node is not None | |
605 | node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2) |
|
612 | node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2) | |
606 |
|
613 | |||
607 | if validatehash: |
|
614 | if validatehash: | |
608 | self._checkhash(revisiondata, node, p1, p2) |
|
615 | self._checkhash(revisiondata, node, p1, p2) | |
609 |
|
616 | |||
610 | if node in self._nodetorev: |
|
617 | if node in self._nodetorev: | |
611 | return node |
|
618 | return node | |
612 |
|
619 | |||
613 | node = self._addrawrevision(node, revisiondata, transaction, linkrev, |
|
620 | node = self._addrawrevision(node, revisiondata, transaction, linkrev, | |
614 | p1, p2) |
|
621 | p1, p2) | |
615 |
|
622 | |||
616 | self._revisioncache[node] = revisiondata |
|
623 | self._revisioncache[node] = revisiondata | |
617 | return node |
|
624 | return node | |
618 |
|
625 | |||
619 | def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None, |
|
626 | def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None, | |
620 | maybemissingparents=False): |
|
627 | maybemissingparents=False): | |
621 | if maybemissingparents: |
|
|||
622 | raise error.Abort(_('SQLite storage does not support missing ' |
|
|||
623 | 'parents write mode')) |
|
|||
624 |
|
||||
625 | nodes = [] |
|
628 | nodes = [] | |
626 |
|
629 | |||
627 | for node, p1, p2, linknode, deltabase, delta, wireflags in deltas: |
|
630 | for node, p1, p2, linknode, deltabase, delta, wireflags in deltas: | |
628 | storeflags = 0 |
|
631 | storeflags = 0 | |
629 |
|
632 | |||
630 | if wireflags & repository.REVISION_FLAG_CENSORED: |
|
633 | if wireflags & repository.REVISION_FLAG_CENSORED: | |
631 | storeflags |= FLAG_CENSORED |
|
634 | storeflags |= FLAG_CENSORED | |
632 |
|
635 | |||
633 | if wireflags & ~repository.REVISION_FLAG_CENSORED: |
|
636 | if wireflags & ~repository.REVISION_FLAG_CENSORED: | |
634 | raise SQLiteStoreError('unhandled revision flag') |
|
637 | raise SQLiteStoreError('unhandled revision flag') | |
635 |
|
638 | |||
|
639 | if maybemissingparents: | |||
|
640 | if p1 != nullid and not self.hasnode(p1): | |||
|
641 | p1 = nullid | |||
|
642 | storeflags |= FLAG_MISSING_P1 | |||
|
643 | ||||
|
644 | if p2 != nullid and not self.hasnode(p2): | |||
|
645 | p2 = nullid | |||
|
646 | storeflags |= FLAG_MISSING_P2 | |||
|
647 | ||||
636 | baserev = self.rev(deltabase) |
|
648 | baserev = self.rev(deltabase) | |
637 |
|
649 | |||
638 | # If base is censored, delta must be full replacement in a single |
|
650 | # If base is censored, delta must be full replacement in a single | |
639 | # patch operation. |
|
651 | # patch operation. | |
640 | if baserev != nullrev and self.iscensored(baserev): |
|
652 | if baserev != nullrev and self.iscensored(baserev): | |
641 | hlen = struct.calcsize('>lll') |
|
653 | hlen = struct.calcsize('>lll') | |
642 | oldlen = len(self.revision(deltabase, raw=True, |
|
654 | oldlen = len(self.revision(deltabase, raw=True, | |
643 | _verifyhash=False)) |
|
655 | _verifyhash=False)) | |
644 | newlen = len(delta) - hlen |
|
656 | newlen = len(delta) - hlen | |
645 |
|
657 | |||
646 | if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): |
|
658 | if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): | |
647 | raise error.CensoredBaseError(self._path, |
|
659 | raise error.CensoredBaseError(self._path, | |
648 | deltabase) |
|
660 | deltabase) | |
649 |
|
661 | |||
650 | if (not (storeflags & FLAG_CENSORED) |
|
662 | if (not (storeflags & FLAG_CENSORED) | |
651 | and storageutil.deltaiscensored( |
|
663 | and storageutil.deltaiscensored( | |
652 | delta, baserev, lambda x: len(self.revision(x, raw=True)))): |
|
664 | delta, baserev, lambda x: len(self.revision(x, raw=True)))): | |
653 | storeflags |= FLAG_CENSORED |
|
665 | storeflags |= FLAG_CENSORED | |
654 |
|
666 | |||
655 | linkrev = linkmapper(linknode) |
|
667 | linkrev = linkmapper(linknode) | |
656 |
|
668 | |||
657 | nodes.append(node) |
|
669 | nodes.append(node) | |
658 |
|
670 | |||
659 | if node in self._revisions: |
|
671 | if node in self._revisions: | |
|
672 | # Possibly reset parents to make them proper. | |||
|
673 | entry = self._revisions[node] | |||
|
674 | ||||
|
675 | if entry.flags & FLAG_MISSING_P1 and p1 != nullid: | |||
|
676 | entry.p1node = p1 | |||
|
677 | entry.p1rev = self._nodetorev[p1] | |||
|
678 | entry.flags &= ~FLAG_MISSING_P1 | |||
|
679 | ||||
|
680 | self._db.execute( | |||
|
681 | r'UPDATE fileindex SET p1rev=?, flags=? ' | |||
|
682 | r'WHERE id=?', | |||
|
683 | (self._nodetorev[p1], entry.flags, entry.rid)) | |||
|
684 | ||||
|
685 | if entry.flags & FLAG_MISSING_P2 and p2 != nullid: | |||
|
686 | entry.p2node = p2 | |||
|
687 | entry.p2rev = self._nodetorev[p2] | |||
|
688 | entry.flags &= ~FLAG_MISSING_P2 | |||
|
689 | ||||
|
690 | self._db.execute( | |||
|
691 | r'UPDATE fileindex SET p2rev=?, flags=? ' | |||
|
692 | r'WHERE id=?', | |||
|
693 | (self._nodetorev[p1], entry.flags, entry.rid)) | |||
|
694 | ||||
660 | continue |
|
695 | continue | |
661 |
|
696 | |||
662 | if deltabase == nullid: |
|
697 | if deltabase == nullid: | |
663 | text = mdiff.patch(b'', delta) |
|
698 | text = mdiff.patch(b'', delta) | |
664 | storedelta = None |
|
699 | storedelta = None | |
665 | else: |
|
700 | else: | |
666 | text = None |
|
701 | text = None | |
667 | storedelta = (deltabase, delta) |
|
702 | storedelta = (deltabase, delta) | |
668 |
|
703 | |||
669 | self._addrawrevision(node, text, transaction, linkrev, p1, p2, |
|
704 | self._addrawrevision(node, text, transaction, linkrev, p1, p2, | |
670 | storedelta=storedelta, flags=storeflags) |
|
705 | storedelta=storedelta, flags=storeflags) | |
671 |
|
706 | |||
672 | if addrevisioncb: |
|
707 | if addrevisioncb: | |
673 | addrevisioncb(self, node) |
|
708 | addrevisioncb(self, node) | |
674 |
|
709 | |||
675 | return nodes |
|
710 | return nodes | |
676 |
|
711 | |||
677 | def censorrevision(self, tr, censornode, tombstone=b''): |
|
712 | def censorrevision(self, tr, censornode, tombstone=b''): | |
678 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') |
|
713 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | |
679 |
|
714 | |||
680 | # This restriction is cargo culted from revlogs and makes no sense for |
|
715 | # This restriction is cargo culted from revlogs and makes no sense for | |
681 | # SQLite, since columns can be resized at will. |
|
716 | # SQLite, since columns can be resized at will. | |
682 | if len(tombstone) > len(self.revision(censornode, raw=True)): |
|
717 | if len(tombstone) > len(self.revision(censornode, raw=True)): | |
683 | raise error.Abort(_('censor tombstone must be no longer than ' |
|
718 | raise error.Abort(_('censor tombstone must be no longer than ' | |
684 | 'censored data')) |
|
719 | 'censored data')) | |
685 |
|
720 | |||
686 | # We need to replace the censored revision's data with the tombstone. |
|
721 | # We need to replace the censored revision's data with the tombstone. | |
687 | # But replacing that data will have implications for delta chains that |
|
722 | # But replacing that data will have implications for delta chains that | |
688 | # reference it. |
|
723 | # reference it. | |
689 | # |
|
724 | # | |
690 | # While "better," more complex strategies are possible, we do something |
|
725 | # While "better," more complex strategies are possible, we do something | |
691 | # simple: we find delta chain children of the censored revision and we |
|
726 | # simple: we find delta chain children of the censored revision and we | |
692 | # replace those incremental deltas with fulltexts of their corresponding |
|
727 | # replace those incremental deltas with fulltexts of their corresponding | |
693 | # revision. Then we delete the now-unreferenced delta and original |
|
728 | # revision. Then we delete the now-unreferenced delta and original | |
694 | # revision and insert a replacement. |
|
729 | # revision and insert a replacement. | |
695 |
|
730 | |||
696 | # Find the delta to be censored. |
|
731 | # Find the delta to be censored. | |
697 | censoreddeltaid = self._db.execute( |
|
732 | censoreddeltaid = self._db.execute( | |
698 | r'SELECT deltaid FROM fileindex WHERE id=?', |
|
733 | r'SELECT deltaid FROM fileindex WHERE id=?', | |
699 | (self._revisions[censornode].rid,)).fetchone()[0] |
|
734 | (self._revisions[censornode].rid,)).fetchone()[0] | |
700 |
|
735 | |||
701 | # Find all its delta chain children. |
|
736 | # Find all its delta chain children. | |
702 | # TODO once we support storing deltas for !files, we'll need to look |
|
737 | # TODO once we support storing deltas for !files, we'll need to look | |
703 | # for those delta chains too. |
|
738 | # for those delta chains too. | |
704 | rows = list(self._db.execute( |
|
739 | rows = list(self._db.execute( | |
705 | r'SELECT id, pathid, node FROM fileindex ' |
|
740 | r'SELECT id, pathid, node FROM fileindex ' | |
706 | r'WHERE deltabaseid=? OR deltaid=?', |
|
741 | r'WHERE deltabaseid=? OR deltaid=?', | |
707 | (censoreddeltaid, censoreddeltaid))) |
|
742 | (censoreddeltaid, censoreddeltaid))) | |
708 |
|
743 | |||
709 | for row in rows: |
|
744 | for row in rows: | |
710 | rid, pathid, node = row |
|
745 | rid, pathid, node = row | |
711 |
|
746 | |||
712 | fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None}, |
|
747 | fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None}, | |
713 | zstddctx=self._dctx) |
|
748 | zstddctx=self._dctx) | |
714 |
|
749 | |||
715 | deltahash = hashlib.sha1(fulltext).digest() |
|
750 | deltahash = hashlib.sha1(fulltext).digest() | |
716 |
|
751 | |||
717 | if self._compengine == 'zstd': |
|
752 | if self._compengine == 'zstd': | |
718 | deltablob = self._cctx.compress(fulltext) |
|
753 | deltablob = self._cctx.compress(fulltext) | |
719 | compression = COMPRESSION_ZSTD |
|
754 | compression = COMPRESSION_ZSTD | |
720 | elif self._compengine == 'zlib': |
|
755 | elif self._compengine == 'zlib': | |
721 | deltablob = zlib.compress(fulltext) |
|
756 | deltablob = zlib.compress(fulltext) | |
722 | compression = COMPRESSION_ZLIB |
|
757 | compression = COMPRESSION_ZLIB | |
723 | elif self._compengine == 'none': |
|
758 | elif self._compengine == 'none': | |
724 | deltablob = fulltext |
|
759 | deltablob = fulltext | |
725 | compression = COMPRESSION_NONE |
|
760 | compression = COMPRESSION_NONE | |
726 | else: |
|
761 | else: | |
727 | raise error.ProgrammingError('unhandled compression engine: %s' |
|
762 | raise error.ProgrammingError('unhandled compression engine: %s' | |
728 | % self._compengine) |
|
763 | % self._compengine) | |
729 |
|
764 | |||
730 | if len(deltablob) >= len(fulltext): |
|
765 | if len(deltablob) >= len(fulltext): | |
731 | deltablob = fulltext |
|
766 | deltablob = fulltext | |
732 | compression = COMPRESSION_NONE |
|
767 | compression = COMPRESSION_NONE | |
733 |
|
768 | |||
734 | deltaid = insertdelta(self._db, compression, deltahash, deltablob) |
|
769 | deltaid = insertdelta(self._db, compression, deltahash, deltablob) | |
735 |
|
770 | |||
736 | self._db.execute( |
|
771 | self._db.execute( | |
737 | r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL ' |
|
772 | r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL ' | |
738 | r'WHERE id=?', (deltaid, rid)) |
|
773 | r'WHERE id=?', (deltaid, rid)) | |
739 |
|
774 | |||
740 | # Now create the tombstone delta and replace the delta on the censored |
|
775 | # Now create the tombstone delta and replace the delta on the censored | |
741 | # node. |
|
776 | # node. | |
742 | deltahash = hashlib.sha1(tombstone).digest() |
|
777 | deltahash = hashlib.sha1(tombstone).digest() | |
743 | tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE, |
|
778 | tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE, | |
744 | deltahash, tombstone) |
|
779 | deltahash, tombstone) | |
745 |
|
780 | |||
746 | flags = self._revisions[censornode].flags |
|
781 | flags = self._revisions[censornode].flags | |
747 | flags |= FLAG_CENSORED |
|
782 | flags |= FLAG_CENSORED | |
748 |
|
783 | |||
749 | self._db.execute( |
|
784 | self._db.execute( | |
750 | r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL ' |
|
785 | r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL ' | |
751 | r'WHERE pathid=? AND node=?', |
|
786 | r'WHERE pathid=? AND node=?', | |
752 | (flags, tombstonedeltaid, self._pathid, censornode)) |
|
787 | (flags, tombstonedeltaid, self._pathid, censornode)) | |
753 |
|
788 | |||
754 | self._db.execute( |
|
789 | self._db.execute( | |
755 | r'DELETE FROM delta WHERE id=?', (censoreddeltaid,)) |
|
790 | r'DELETE FROM delta WHERE id=?', (censoreddeltaid,)) | |
756 |
|
791 | |||
757 | self._refreshindex() |
|
792 | self._refreshindex() | |
758 | self._revisioncache.clear() |
|
793 | self._revisioncache.clear() | |
759 |
|
794 | |||
760 | def getstrippoint(self, minlink): |
|
795 | def getstrippoint(self, minlink): | |
761 | return storageutil.resolvestripinfo(minlink, len(self) - 1, |
|
796 | return storageutil.resolvestripinfo(minlink, len(self) - 1, | |
762 | [self.rev(n) for n in self.heads()], |
|
797 | [self.rev(n) for n in self.heads()], | |
763 | self.linkrev, |
|
798 | self.linkrev, | |
764 | self.parentrevs) |
|
799 | self.parentrevs) | |
765 |
|
800 | |||
766 | def strip(self, minlink, transaction): |
|
801 | def strip(self, minlink, transaction): | |
767 | if not len(self): |
|
802 | if not len(self): | |
768 | return |
|
803 | return | |
769 |
|
804 | |||
770 | rev, _ignored = self.getstrippoint(minlink) |
|
805 | rev, _ignored = self.getstrippoint(minlink) | |
771 |
|
806 | |||
772 | if rev == len(self): |
|
807 | if rev == len(self): | |
773 | return |
|
808 | return | |
774 |
|
809 | |||
775 | for rev in self.revs(rev): |
|
810 | for rev in self.revs(rev): | |
776 | self._db.execute( |
|
811 | self._db.execute( | |
777 | r'DELETE FROM fileindex WHERE pathid=? AND node=?', |
|
812 | r'DELETE FROM fileindex WHERE pathid=? AND node=?', | |
778 | (self._pathid, self.node(rev))) |
|
813 | (self._pathid, self.node(rev))) | |
779 |
|
814 | |||
780 | # TODO how should we garbage collect data in delta table? |
|
815 | # TODO how should we garbage collect data in delta table? | |
781 |
|
816 | |||
782 | self._refreshindex() |
|
817 | self._refreshindex() | |
783 |
|
818 | |||
784 | # End of ifilemutation interface. |
|
819 | # End of ifilemutation interface. | |
785 |
|
820 | |||
786 | # Start of ifilestorage interface. |
|
821 | # Start of ifilestorage interface. | |
787 |
|
822 | |||
788 | def files(self): |
|
823 | def files(self): | |
789 | return [] |
|
824 | return [] | |
790 |
|
825 | |||
791 | def storageinfo(self, exclusivefiles=False, sharedfiles=False, |
|
826 | def storageinfo(self, exclusivefiles=False, sharedfiles=False, | |
792 | revisionscount=False, trackedsize=False, |
|
827 | revisionscount=False, trackedsize=False, | |
793 | storedsize=False): |
|
828 | storedsize=False): | |
794 | d = {} |
|
829 | d = {} | |
795 |
|
830 | |||
796 | if exclusivefiles: |
|
831 | if exclusivefiles: | |
797 | d['exclusivefiles'] = [] |
|
832 | d['exclusivefiles'] = [] | |
798 |
|
833 | |||
799 | if sharedfiles: |
|
834 | if sharedfiles: | |
800 | # TODO list sqlite file(s) here. |
|
835 | # TODO list sqlite file(s) here. | |
801 | d['sharedfiles'] = [] |
|
836 | d['sharedfiles'] = [] | |
802 |
|
837 | |||
803 | if revisionscount: |
|
838 | if revisionscount: | |
804 | d['revisionscount'] = len(self) |
|
839 | d['revisionscount'] = len(self) | |
805 |
|
840 | |||
806 | if trackedsize: |
|
841 | if trackedsize: | |
807 | d['trackedsize'] = sum(len(self.revision(node)) |
|
842 | d['trackedsize'] = sum(len(self.revision(node)) | |
808 | for node in self._nodetorev) |
|
843 | for node in self._nodetorev) | |
809 |
|
844 | |||
810 | if storedsize: |
|
845 | if storedsize: | |
811 | # TODO implement this? |
|
846 | # TODO implement this? | |
812 | d['storedsize'] = None |
|
847 | d['storedsize'] = None | |
813 |
|
848 | |||
814 | return d |
|
849 | return d | |
815 |
|
850 | |||
816 | def verifyintegrity(self, state): |
|
851 | def verifyintegrity(self, state): | |
817 | state['skipread'] = set() |
|
852 | state['skipread'] = set() | |
818 |
|
853 | |||
819 | for rev in self: |
|
854 | for rev in self: | |
820 | node = self.node(rev) |
|
855 | node = self.node(rev) | |
821 |
|
856 | |||
822 | try: |
|
857 | try: | |
823 | self.revision(node) |
|
858 | self.revision(node) | |
824 | except Exception as e: |
|
859 | except Exception as e: | |
825 | yield sqliteproblem( |
|
860 | yield sqliteproblem( | |
826 | error=_('unpacking %s: %s') % (short(node), e), |
|
861 | error=_('unpacking %s: %s') % (short(node), e), | |
827 | node=node) |
|
862 | node=node) | |
828 |
|
863 | |||
829 | state['skipread'].add(node) |
|
864 | state['skipread'].add(node) | |
830 |
|
865 | |||
831 | # End of ifilestorage interface. |
|
866 | # End of ifilestorage interface. | |
832 |
|
867 | |||
833 | def _checkhash(self, fulltext, node, p1=None, p2=None): |
|
868 | def _checkhash(self, fulltext, node, p1=None, p2=None): | |
834 | if p1 is None and p2 is None: |
|
869 | if p1 is None and p2 is None: | |
835 | p1, p2 = self.parents(node) |
|
870 | p1, p2 = self.parents(node) | |
836 |
|
871 | |||
837 | if node == storageutil.hashrevisionsha1(fulltext, p1, p2): |
|
872 | if node == storageutil.hashrevisionsha1(fulltext, p1, p2): | |
838 | return |
|
873 | return | |
839 |
|
874 | |||
840 | try: |
|
875 | try: | |
841 | del self._revisioncache[node] |
|
876 | del self._revisioncache[node] | |
842 | except KeyError: |
|
877 | except KeyError: | |
843 | pass |
|
878 | pass | |
844 |
|
879 | |||
845 | if storageutil.iscensoredtext(fulltext): |
|
880 | if storageutil.iscensoredtext(fulltext): | |
846 | raise error.CensoredNodeError(self._path, node, fulltext) |
|
881 | raise error.CensoredNodeError(self._path, node, fulltext) | |
847 |
|
882 | |||
848 | raise SQLiteStoreError(_('integrity check failed on %s') % |
|
883 | raise SQLiteStoreError(_('integrity check failed on %s') % | |
849 | self._path) |
|
884 | self._path) | |
850 |
|
885 | |||
851 | def _addrawrevision(self, node, revisiondata, transaction, linkrev, |
|
886 | def _addrawrevision(self, node, revisiondata, transaction, linkrev, | |
852 | p1, p2, storedelta=None, flags=0): |
|
887 | p1, p2, storedelta=None, flags=0): | |
853 | if self._pathid is None: |
|
888 | if self._pathid is None: | |
854 | res = self._db.execute( |
|
889 | res = self._db.execute( | |
855 | r'INSERT INTO filepath (path) VALUES (?)', (self._path,)) |
|
890 | r'INSERT INTO filepath (path) VALUES (?)', (self._path,)) | |
856 | self._pathid = res.lastrowid |
|
891 | self._pathid = res.lastrowid | |
857 |
|
892 | |||
858 | # For simplicity, always store a delta against p1. |
|
893 | # For simplicity, always store a delta against p1. | |
859 | # TODO we need a lot more logic here to make behavior reasonable. |
|
894 | # TODO we need a lot more logic here to make behavior reasonable. | |
860 |
|
895 | |||
861 | if storedelta: |
|
896 | if storedelta: | |
862 | deltabase, delta = storedelta |
|
897 | deltabase, delta = storedelta | |
863 |
|
898 | |||
864 | if isinstance(deltabase, int): |
|
899 | if isinstance(deltabase, int): | |
865 | deltabase = self.node(deltabase) |
|
900 | deltabase = self.node(deltabase) | |
866 |
|
901 | |||
867 | else: |
|
902 | else: | |
868 | assert revisiondata is not None |
|
903 | assert revisiondata is not None | |
869 | deltabase = p1 |
|
904 | deltabase = p1 | |
870 |
|
905 | |||
871 | if deltabase == nullid: |
|
906 | if deltabase == nullid: | |
872 | delta = revisiondata |
|
907 | delta = revisiondata | |
873 | else: |
|
908 | else: | |
874 | delta = mdiff.textdiff(self.revision(self.rev(deltabase)), |
|
909 | delta = mdiff.textdiff(self.revision(self.rev(deltabase)), | |
875 | revisiondata) |
|
910 | revisiondata) | |
876 |
|
911 | |||
877 | # File index stores a pointer to its delta and the parent delta. |
|
912 | # File index stores a pointer to its delta and the parent delta. | |
878 | # The parent delta is stored via a pointer to the fileindex PK. |
|
913 | # The parent delta is stored via a pointer to the fileindex PK. | |
879 | if deltabase == nullid: |
|
914 | if deltabase == nullid: | |
880 | baseid = None |
|
915 | baseid = None | |
881 | else: |
|
916 | else: | |
882 | baseid = self._revisions[deltabase].rid |
|
917 | baseid = self._revisions[deltabase].rid | |
883 |
|
918 | |||
884 | # Deltas are stored with a hash of their content. This allows |
|
919 | # Deltas are stored with a hash of their content. This allows | |
885 | # us to de-duplicate. The table is configured to ignore conflicts |
|
920 | # us to de-duplicate. The table is configured to ignore conflicts | |
886 | # and it is faster to just insert and silently noop than to look |
|
921 | # and it is faster to just insert and silently noop than to look | |
887 | # first. |
|
922 | # first. | |
888 | deltahash = hashlib.sha1(delta).digest() |
|
923 | deltahash = hashlib.sha1(delta).digest() | |
889 |
|
924 | |||
890 | if self._compengine == 'zstd': |
|
925 | if self._compengine == 'zstd': | |
891 | deltablob = self._cctx.compress(delta) |
|
926 | deltablob = self._cctx.compress(delta) | |
892 | compression = COMPRESSION_ZSTD |
|
927 | compression = COMPRESSION_ZSTD | |
893 | elif self._compengine == 'zlib': |
|
928 | elif self._compengine == 'zlib': | |
894 | deltablob = zlib.compress(delta) |
|
929 | deltablob = zlib.compress(delta) | |
895 | compression = COMPRESSION_ZLIB |
|
930 | compression = COMPRESSION_ZLIB | |
896 | elif self._compengine == 'none': |
|
931 | elif self._compengine == 'none': | |
897 | deltablob = delta |
|
932 | deltablob = delta | |
898 | compression = COMPRESSION_NONE |
|
933 | compression = COMPRESSION_NONE | |
899 | else: |
|
934 | else: | |
900 | raise error.ProgrammingError('unhandled compression engine: %s' % |
|
935 | raise error.ProgrammingError('unhandled compression engine: %s' % | |
901 | self._compengine) |
|
936 | self._compengine) | |
902 |
|
937 | |||
903 | # Don't store compressed data if it isn't practical. |
|
938 | # Don't store compressed data if it isn't practical. | |
904 | if len(deltablob) >= len(delta): |
|
939 | if len(deltablob) >= len(delta): | |
905 | deltablob = delta |
|
940 | deltablob = delta | |
906 | compression = COMPRESSION_NONE |
|
941 | compression = COMPRESSION_NONE | |
907 |
|
942 | |||
908 | deltaid = insertdelta(self._db, compression, deltahash, deltablob) |
|
943 | deltaid = insertdelta(self._db, compression, deltahash, deltablob) | |
909 |
|
944 | |||
910 | rev = len(self) |
|
945 | rev = len(self) | |
911 |
|
946 | |||
912 | if p1 == nullid: |
|
947 | if p1 == nullid: | |
913 | p1rev = nullrev |
|
948 | p1rev = nullrev | |
914 | else: |
|
949 | else: | |
915 | p1rev = self._nodetorev[p1] |
|
950 | p1rev = self._nodetorev[p1] | |
916 |
|
951 | |||
917 | if p2 == nullid: |
|
952 | if p2 == nullid: | |
918 | p2rev = nullrev |
|
953 | p2rev = nullrev | |
919 | else: |
|
954 | else: | |
920 | p2rev = self._nodetorev[p2] |
|
955 | p2rev = self._nodetorev[p2] | |
921 |
|
956 | |||
922 | rid = self._db.execute( |
|
957 | rid = self._db.execute( | |
923 | r'INSERT INTO fileindex (' |
|
958 | r'INSERT INTO fileindex (' | |
924 | r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, ' |
|
959 | r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, ' | |
925 | r' deltaid, deltabaseid) ' |
|
960 | r' deltaid, deltabaseid) ' | |
926 | r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', |
|
961 | r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)', | |
927 | (self._pathid, rev, node, p1rev, p2rev, linkrev, flags, |
|
962 | (self._pathid, rev, node, p1rev, p2rev, linkrev, flags, | |
928 | deltaid, baseid) |
|
963 | deltaid, baseid) | |
929 | ).lastrowid |
|
964 | ).lastrowid | |
930 |
|
965 | |||
931 | entry = revisionentry( |
|
966 | entry = revisionentry( | |
932 | rid=rid, |
|
967 | rid=rid, | |
933 | rev=rev, |
|
968 | rev=rev, | |
934 | node=node, |
|
969 | node=node, | |
935 | p1rev=p1rev, |
|
970 | p1rev=p1rev, | |
936 | p2rev=p2rev, |
|
971 | p2rev=p2rev, | |
937 | p1node=p1, |
|
972 | p1node=p1, | |
938 | p2node=p2, |
|
973 | p2node=p2, | |
939 | linkrev=linkrev, |
|
974 | linkrev=linkrev, | |
940 | flags=flags) |
|
975 | flags=flags) | |
941 |
|
976 | |||
942 | self._nodetorev[node] = rev |
|
977 | self._nodetorev[node] = rev | |
943 | self._revtonode[rev] = node |
|
978 | self._revtonode[rev] = node | |
944 | self._revisions[node] = entry |
|
979 | self._revisions[node] = entry | |
945 |
|
980 | |||
946 | return node |
|
981 | return node | |
947 |
|
982 | |||
948 | class sqliterepository(localrepo.localrepository): |
|
983 | class sqliterepository(localrepo.localrepository): | |
949 | def cancopy(self): |
|
984 | def cancopy(self): | |
950 | return False |
|
985 | return False | |
951 |
|
986 | |||
952 | def transaction(self, *args, **kwargs): |
|
987 | def transaction(self, *args, **kwargs): | |
953 | current = self.currenttransaction() |
|
988 | current = self.currenttransaction() | |
954 |
|
989 | |||
955 | tr = super(sqliterepository, self).transaction(*args, **kwargs) |
|
990 | tr = super(sqliterepository, self).transaction(*args, **kwargs) | |
956 |
|
991 | |||
957 | if current: |
|
992 | if current: | |
958 | return tr |
|
993 | return tr | |
959 |
|
994 | |||
960 | self._dbconn.execute(r'BEGIN TRANSACTION') |
|
995 | self._dbconn.execute(r'BEGIN TRANSACTION') | |
961 |
|
996 | |||
962 | def committransaction(_): |
|
997 | def committransaction(_): | |
963 | self._dbconn.commit() |
|
998 | self._dbconn.commit() | |
964 |
|
999 | |||
965 | tr.addfinalize('sqlitestore', committransaction) |
|
1000 | tr.addfinalize('sqlitestore', committransaction) | |
966 |
|
1001 | |||
967 | return tr |
|
1002 | return tr | |
968 |
|
1003 | |||
969 | @property |
|
1004 | @property | |
970 | def _dbconn(self): |
|
1005 | def _dbconn(self): | |
971 | # SQLite connections can only be used on the thread that created |
|
1006 | # SQLite connections can only be used on the thread that created | |
972 | # them. In most cases, this "just works." However, hgweb uses |
|
1007 | # them. In most cases, this "just works." However, hgweb uses | |
973 | # multiple threads. |
|
1008 | # multiple threads. | |
974 | tid = threading.current_thread().ident |
|
1009 | tid = threading.current_thread().ident | |
975 |
|
1010 | |||
976 | if self._db: |
|
1011 | if self._db: | |
977 | if self._db[0] == tid: |
|
1012 | if self._db[0] == tid: | |
978 | return self._db[1] |
|
1013 | return self._db[1] | |
979 |
|
1014 | |||
980 | db = makedb(self.svfs.join('db.sqlite')) |
|
1015 | db = makedb(self.svfs.join('db.sqlite')) | |
981 | self._db = (tid, db) |
|
1016 | self._db = (tid, db) | |
982 |
|
1017 | |||
983 | return db |
|
1018 | return db | |
984 |
|
1019 | |||
985 | def makedb(path): |
|
1020 | def makedb(path): | |
986 | """Construct a database handle for a database at path.""" |
|
1021 | """Construct a database handle for a database at path.""" | |
987 |
|
1022 | |||
988 | db = sqlite3.connect(path) |
|
1023 | db = sqlite3.connect(path) | |
989 | db.text_factory = bytes |
|
1024 | db.text_factory = bytes | |
990 |
|
1025 | |||
991 | res = db.execute(r'PRAGMA user_version').fetchone()[0] |
|
1026 | res = db.execute(r'PRAGMA user_version').fetchone()[0] | |
992 |
|
1027 | |||
993 | # New database. |
|
1028 | # New database. | |
994 | if res == 0: |
|
1029 | if res == 0: | |
995 | for statement in CREATE_SCHEMA: |
|
1030 | for statement in CREATE_SCHEMA: | |
996 | db.execute(statement) |
|
1031 | db.execute(statement) | |
997 |
|
1032 | |||
998 | db.commit() |
|
1033 | db.commit() | |
999 |
|
1034 | |||
1000 | elif res == CURRENT_SCHEMA_VERSION: |
|
1035 | elif res == CURRENT_SCHEMA_VERSION: | |
1001 | pass |
|
1036 | pass | |
1002 |
|
1037 | |||
1003 | else: |
|
1038 | else: | |
1004 | raise error.Abort(_('sqlite database has unrecognized version')) |
|
1039 | raise error.Abort(_('sqlite database has unrecognized version')) | |
1005 |
|
1040 | |||
1006 | db.execute(r'PRAGMA journal_mode=WAL') |
|
1041 | db.execute(r'PRAGMA journal_mode=WAL') | |
1007 |
|
1042 | |||
1008 | return db |
|
1043 | return db | |
1009 |
|
1044 | |||
1010 | def featuresetup(ui, supported): |
|
1045 | def featuresetup(ui, supported): | |
1011 | supported.add(REQUIREMENT) |
|
1046 | supported.add(REQUIREMENT) | |
1012 |
|
1047 | |||
1013 | if zstd: |
|
1048 | if zstd: | |
1014 | supported.add(REQUIREMENT_ZSTD) |
|
1049 | supported.add(REQUIREMENT_ZSTD) | |
1015 |
|
1050 | |||
1016 | supported.add(REQUIREMENT_ZLIB) |
|
1051 | supported.add(REQUIREMENT_ZLIB) | |
1017 | supported.add(REQUIREMENT_NONE) |
|
1052 | supported.add(REQUIREMENT_NONE) | |
1018 | supported.add(REQUIREMENT_SHALLOW_FILES) |
|
1053 | supported.add(REQUIREMENT_SHALLOW_FILES) | |
1019 | supported.add(repository.NARROW_REQUIREMENT) |
|
1054 | supported.add(repository.NARROW_REQUIREMENT) | |
1020 |
|
1055 | |||
1021 | def newreporequirements(orig, ui, createopts): |
|
1056 | def newreporequirements(orig, ui, createopts): | |
1022 | if createopts['backend'] != 'sqlite': |
|
1057 | if createopts['backend'] != 'sqlite': | |
1023 | return orig(ui, createopts) |
|
1058 | return orig(ui, createopts) | |
1024 |
|
1059 | |||
1025 | # This restriction can be lifted once we have more confidence. |
|
1060 | # This restriction can be lifted once we have more confidence. | |
1026 | if 'sharedrepo' in createopts: |
|
1061 | if 'sharedrepo' in createopts: | |
1027 | raise error.Abort(_('shared repositories not supported with SQLite ' |
|
1062 | raise error.Abort(_('shared repositories not supported with SQLite ' | |
1028 | 'store')) |
|
1063 | 'store')) | |
1029 |
|
1064 | |||
1030 | # This filtering is out of an abundance of caution: we want to ensure |
|
1065 | # This filtering is out of an abundance of caution: we want to ensure | |
1031 | # we honor creation options and we do that by annotating exactly the |
|
1066 | # we honor creation options and we do that by annotating exactly the | |
1032 | # creation options we recognize. |
|
1067 | # creation options we recognize. | |
1033 | known = { |
|
1068 | known = { | |
1034 | 'narrowfiles', |
|
1069 | 'narrowfiles', | |
1035 | 'backend', |
|
1070 | 'backend', | |
1036 | 'shallowfilestore', |
|
1071 | 'shallowfilestore', | |
1037 | } |
|
1072 | } | |
1038 |
|
1073 | |||
1039 | unsupported = set(createopts) - known |
|
1074 | unsupported = set(createopts) - known | |
1040 | if unsupported: |
|
1075 | if unsupported: | |
1041 | raise error.Abort(_('SQLite store does not support repo creation ' |
|
1076 | raise error.Abort(_('SQLite store does not support repo creation ' | |
1042 | 'option: %s') % ', '.join(sorted(unsupported))) |
|
1077 | 'option: %s') % ', '.join(sorted(unsupported))) | |
1043 |
|
1078 | |||
1044 | # Since we're a hybrid store that still relies on revlogs, we fall back |
|
1079 | # Since we're a hybrid store that still relies on revlogs, we fall back | |
1045 | # to using the revlogv1 backend's storage requirements then adding our |
|
1080 | # to using the revlogv1 backend's storage requirements then adding our | |
1046 | # own requirement. |
|
1081 | # own requirement. | |
1047 | createopts['backend'] = 'revlogv1' |
|
1082 | createopts['backend'] = 'revlogv1' | |
1048 | requirements = orig(ui, createopts) |
|
1083 | requirements = orig(ui, createopts) | |
1049 | requirements.add(REQUIREMENT) |
|
1084 | requirements.add(REQUIREMENT) | |
1050 |
|
1085 | |||
1051 | compression = ui.config('storage', 'sqlite.compression') |
|
1086 | compression = ui.config('storage', 'sqlite.compression') | |
1052 |
|
1087 | |||
1053 | if compression == 'zstd' and not zstd: |
|
1088 | if compression == 'zstd' and not zstd: | |
1054 | raise error.Abort(_('storage.sqlite.compression set to "zstd" but ' |
|
1089 | raise error.Abort(_('storage.sqlite.compression set to "zstd" but ' | |
1055 | 'zstandard compression not available to this ' |
|
1090 | 'zstandard compression not available to this ' | |
1056 | 'Mercurial install')) |
|
1091 | 'Mercurial install')) | |
1057 |
|
1092 | |||
1058 | if compression == 'zstd': |
|
1093 | if compression == 'zstd': | |
1059 | requirements.add(REQUIREMENT_ZSTD) |
|
1094 | requirements.add(REQUIREMENT_ZSTD) | |
1060 | elif compression == 'zlib': |
|
1095 | elif compression == 'zlib': | |
1061 | requirements.add(REQUIREMENT_ZLIB) |
|
1096 | requirements.add(REQUIREMENT_ZLIB) | |
1062 | elif compression == 'none': |
|
1097 | elif compression == 'none': | |
1063 | requirements.add(REQUIREMENT_NONE) |
|
1098 | requirements.add(REQUIREMENT_NONE) | |
1064 | else: |
|
1099 | else: | |
1065 | raise error.Abort(_('unknown compression engine defined in ' |
|
1100 | raise error.Abort(_('unknown compression engine defined in ' | |
1066 | 'storage.sqlite.compression: %s') % compression) |
|
1101 | 'storage.sqlite.compression: %s') % compression) | |
1067 |
|
1102 | |||
1068 | if createopts.get('shallowfilestore'): |
|
1103 | if createopts.get('shallowfilestore'): | |
1069 | requirements.add(REQUIREMENT_SHALLOW_FILES) |
|
1104 | requirements.add(REQUIREMENT_SHALLOW_FILES) | |
1070 |
|
1105 | |||
1071 | return requirements |
|
1106 | return requirements | |
1072 |
|
1107 | |||
1073 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) |
|
1108 | @interfaceutil.implementer(repository.ilocalrepositoryfilestorage) | |
1074 | class sqlitefilestorage(object): |
|
1109 | class sqlitefilestorage(object): | |
1075 | """Repository file storage backed by SQLite.""" |
|
1110 | """Repository file storage backed by SQLite.""" | |
1076 | def file(self, path): |
|
1111 | def file(self, path): | |
1077 | if path[0] == b'/': |
|
1112 | if path[0] == b'/': | |
1078 | path = path[1:] |
|
1113 | path = path[1:] | |
1079 |
|
1114 | |||
1080 | if REQUIREMENT_ZSTD in self.requirements: |
|
1115 | if REQUIREMENT_ZSTD in self.requirements: | |
1081 | compression = 'zstd' |
|
1116 | compression = 'zstd' | |
1082 | elif REQUIREMENT_ZLIB in self.requirements: |
|
1117 | elif REQUIREMENT_ZLIB in self.requirements: | |
1083 | compression = 'zlib' |
|
1118 | compression = 'zlib' | |
1084 | elif REQUIREMENT_NONE in self.requirements: |
|
1119 | elif REQUIREMENT_NONE in self.requirements: | |
1085 | compression = 'none' |
|
1120 | compression = 'none' | |
1086 | else: |
|
1121 | else: | |
1087 | raise error.Abort(_('unable to determine what compression engine ' |
|
1122 | raise error.Abort(_('unable to determine what compression engine ' | |
1088 | 'to use for SQLite storage')) |
|
1123 | 'to use for SQLite storage')) | |
1089 |
|
1124 | |||
1090 | return sqlitefilestore(self._dbconn, path, compression) |
|
1125 | return sqlitefilestore(self._dbconn, path, compression) | |
1091 |
|
1126 | |||
1092 | def makefilestorage(orig, requirements, features, **kwargs): |
|
1127 | def makefilestorage(orig, requirements, features, **kwargs): | |
1093 | """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" |
|
1128 | """Produce a type conforming to ``ilocalrepositoryfilestorage``.""" | |
1094 | if REQUIREMENT in requirements: |
|
1129 | if REQUIREMENT in requirements: | |
1095 | if REQUIREMENT_SHALLOW_FILES in requirements: |
|
1130 | if REQUIREMENT_SHALLOW_FILES in requirements: | |
1096 | features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE) |
|
1131 | features.add(repository.REPO_FEATURE_SHALLOW_FILE_STORAGE) | |
1097 |
|
1132 | |||
1098 | return sqlitefilestorage |
|
1133 | return sqlitefilestorage | |
1099 | else: |
|
1134 | else: | |
1100 | return orig(requirements=requirements, features=features, **kwargs) |
|
1135 | return orig(requirements=requirements, features=features, **kwargs) | |
1101 |
|
1136 | |||
1102 | def makemain(orig, ui, requirements, **kwargs): |
|
1137 | def makemain(orig, ui, requirements, **kwargs): | |
1103 | if REQUIREMENT in requirements: |
|
1138 | if REQUIREMENT in requirements: | |
1104 | if REQUIREMENT_ZSTD in requirements and not zstd: |
|
1139 | if REQUIREMENT_ZSTD in requirements and not zstd: | |
1105 | raise error.Abort(_('repository uses zstandard compression, which ' |
|
1140 | raise error.Abort(_('repository uses zstandard compression, which ' | |
1106 | 'is not available to this Mercurial install')) |
|
1141 | 'is not available to this Mercurial install')) | |
1107 |
|
1142 | |||
1108 | return sqliterepository |
|
1143 | return sqliterepository | |
1109 |
|
1144 | |||
1110 | return orig(requirements=requirements, **kwargs) |
|
1145 | return orig(requirements=requirements, **kwargs) | |
1111 |
|
1146 | |||
1112 | def verifierinit(orig, self, *args, **kwargs): |
|
1147 | def verifierinit(orig, self, *args, **kwargs): | |
1113 | orig(self, *args, **kwargs) |
|
1148 | orig(self, *args, **kwargs) | |
1114 |
|
1149 | |||
1115 | # We don't care that files in the store don't align with what is |
|
1150 | # We don't care that files in the store don't align with what is | |
1116 | # advertised. So suppress these warnings. |
|
1151 | # advertised. So suppress these warnings. | |
1117 | self.warnorphanstorefiles = False |
|
1152 | self.warnorphanstorefiles = False | |
1118 |
|
1153 | |||
1119 | def extsetup(ui): |
|
1154 | def extsetup(ui): | |
1120 | localrepo.featuresetupfuncs.add(featuresetup) |
|
1155 | localrepo.featuresetupfuncs.add(featuresetup) | |
1121 | extensions.wrapfunction(localrepo, 'newreporequirements', |
|
1156 | extensions.wrapfunction(localrepo, 'newreporequirements', | |
1122 | newreporequirements) |
|
1157 | newreporequirements) | |
1123 | extensions.wrapfunction(localrepo, 'makefilestorage', |
|
1158 | extensions.wrapfunction(localrepo, 'makefilestorage', | |
1124 | makefilestorage) |
|
1159 | makefilestorage) | |
1125 | extensions.wrapfunction(localrepo, 'makemain', |
|
1160 | extensions.wrapfunction(localrepo, 'makemain', | |
1126 | makemain) |
|
1161 | makemain) | |
1127 | extensions.wrapfunction(verify.verifier, '__init__', |
|
1162 | extensions.wrapfunction(verify.verifier, '__init__', | |
1128 | verifierinit) |
|
1163 | verifierinit) | |
1129 |
|
1164 | |||
1130 | def reposetup(ui, repo): |
|
1165 | def reposetup(ui, repo): | |
1131 | if isinstance(repo, sqliterepository): |
|
1166 | if isinstance(repo, sqliterepository): | |
1132 | repo._db = None |
|
1167 | repo._db = None | |
1133 |
|
1168 | |||
1134 | # TODO check for bundlerepository? |
|
1169 | # TODO check for bundlerepository? |
General Comments 0
You need to be logged in to leave comments.
Login now