##// END OF EJS Templates
repository: teach addgroup() to receive data with missing parents...
Gregory Szorc -
r40425:1b183edb default
parent child Browse files
Show More
@@ -1,1119 +1,1124 b''
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import hashlib
48 import hashlib
49 import sqlite3
49 import sqlite3
50 import struct
50 import struct
51 import threading
51 import threading
52 import zlib
52 import zlib
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55 from mercurial.node import (
55 from mercurial.node import (
56 nullid,
56 nullid,
57 nullrev,
57 nullrev,
58 short,
58 short,
59 )
59 )
60 from mercurial.thirdparty import (
60 from mercurial.thirdparty import (
61 attr,
61 attr,
62 )
62 )
63 from mercurial import (
63 from mercurial import (
64 ancestor,
64 ancestor,
65 dagop,
65 dagop,
66 error,
66 error,
67 extensions,
67 extensions,
68 localrepo,
68 localrepo,
69 mdiff,
69 mdiff,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repository,
72 repository,
73 util,
73 util,
74 verify,
74 verify,
75 )
75 )
76 from mercurial.utils import (
76 from mercurial.utils import (
77 interfaceutil,
77 interfaceutil,
78 storageutil,
78 storageutil,
79 )
79 )
80
80
81 try:
81 try:
82 from mercurial import zstd
82 from mercurial import zstd
83 zstd.__version__
83 zstd.__version__
84 except ImportError:
84 except ImportError:
85 zstd = None
85 zstd = None
86
86
87 configtable = {}
87 configtable = {}
88 configitem = registrar.configitem(configtable)
88 configitem = registrar.configitem(configtable)
89
89
90 # experimental config: storage.sqlite.compression
90 # experimental config: storage.sqlite.compression
91 configitem('storage', 'sqlite.compression',
91 configitem('storage', 'sqlite.compression',
92 default='zstd' if zstd else 'zlib')
92 default='zstd' if zstd else 'zlib')
93
93
94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
96 # be specifying the version(s) of Mercurial they are tested with, or
96 # be specifying the version(s) of Mercurial they are tested with, or
97 # leave the attribute unspecified.
97 # leave the attribute unspecified.
98 testedwith = 'ships-with-hg-core'
98 testedwith = 'ships-with-hg-core'
99
99
100 REQUIREMENT = b'exp-sqlite-001'
100 REQUIREMENT = b'exp-sqlite-001'
101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
104
104
105 CURRENT_SCHEMA_VERSION = 1
105 CURRENT_SCHEMA_VERSION = 1
106
106
107 COMPRESSION_NONE = 1
107 COMPRESSION_NONE = 1
108 COMPRESSION_ZSTD = 2
108 COMPRESSION_ZSTD = 2
109 COMPRESSION_ZLIB = 3
109 COMPRESSION_ZLIB = 3
110
110
111 FLAG_CENSORED = 1
111 FLAG_CENSORED = 1
112
112
113 CREATE_SCHEMA = [
113 CREATE_SCHEMA = [
114 # Deltas are stored as content-indexed blobs.
114 # Deltas are stored as content-indexed blobs.
115 # compression column holds COMPRESSION_* constant for how the
115 # compression column holds COMPRESSION_* constant for how the
116 # delta is encoded.
116 # delta is encoded.
117
117
118 r'CREATE TABLE delta ('
118 r'CREATE TABLE delta ('
119 r' id INTEGER PRIMARY KEY, '
119 r' id INTEGER PRIMARY KEY, '
120 r' compression INTEGER NOT NULL, '
120 r' compression INTEGER NOT NULL, '
121 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
121 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
122 r' delta BLOB NOT NULL '
122 r' delta BLOB NOT NULL '
123 r')',
123 r')',
124
124
125 # Tracked paths are denormalized to integers to avoid redundant
125 # Tracked paths are denormalized to integers to avoid redundant
126 # storage of the path name.
126 # storage of the path name.
127 r'CREATE TABLE filepath ('
127 r'CREATE TABLE filepath ('
128 r' id INTEGER PRIMARY KEY, '
128 r' id INTEGER PRIMARY KEY, '
129 r' path BLOB NOT NULL '
129 r' path BLOB NOT NULL '
130 r')',
130 r')',
131
131
132 r'CREATE UNIQUE INDEX filepath_path '
132 r'CREATE UNIQUE INDEX filepath_path '
133 r' ON filepath (path)',
133 r' ON filepath (path)',
134
134
135 # We have a single table for all file revision data.
135 # We have a single table for all file revision data.
136 # Each file revision is uniquely described by a (path, rev) and
136 # Each file revision is uniquely described by a (path, rev) and
137 # (path, node).
137 # (path, node).
138 #
138 #
139 # Revision data is stored as a pointer to the delta producing this
139 # Revision data is stored as a pointer to the delta producing this
140 # revision and the file revision whose delta should be applied before
140 # revision and the file revision whose delta should be applied before
141 # that one. One can reconstruct the delta chain by recursively following
141 # that one. One can reconstruct the delta chain by recursively following
142 # the delta base revision pointers until one encounters NULL.
142 # the delta base revision pointers until one encounters NULL.
143 #
143 #
144 # flags column holds bitwise integer flags controlling storage options.
144 # flags column holds bitwise integer flags controlling storage options.
145 # These flags are defined by the FLAG_* constants.
145 # These flags are defined by the FLAG_* constants.
146 r'CREATE TABLE fileindex ('
146 r'CREATE TABLE fileindex ('
147 r' id INTEGER PRIMARY KEY, '
147 r' id INTEGER PRIMARY KEY, '
148 r' pathid INTEGER REFERENCES filepath(id), '
148 r' pathid INTEGER REFERENCES filepath(id), '
149 r' revnum INTEGER NOT NULL, '
149 r' revnum INTEGER NOT NULL, '
150 r' p1rev INTEGER NOT NULL, '
150 r' p1rev INTEGER NOT NULL, '
151 r' p2rev INTEGER NOT NULL, '
151 r' p2rev INTEGER NOT NULL, '
152 r' linkrev INTEGER NOT NULL, '
152 r' linkrev INTEGER NOT NULL, '
153 r' flags INTEGER NOT NULL, '
153 r' flags INTEGER NOT NULL, '
154 r' deltaid INTEGER REFERENCES delta(id), '
154 r' deltaid INTEGER REFERENCES delta(id), '
155 r' deltabaseid INTEGER REFERENCES fileindex(id), '
155 r' deltabaseid INTEGER REFERENCES fileindex(id), '
156 r' node BLOB NOT NULL '
156 r' node BLOB NOT NULL '
157 r')',
157 r')',
158
158
159 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
159 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
160 r' ON fileindex (pathid, revnum)',
160 r' ON fileindex (pathid, revnum)',
161
161
162 r'CREATE UNIQUE INDEX fileindex_pathnode '
162 r'CREATE UNIQUE INDEX fileindex_pathnode '
163 r' ON fileindex (pathid, node)',
163 r' ON fileindex (pathid, node)',
164
164
165 # Provide a view over all file data for convenience.
165 # Provide a view over all file data for convenience.
166 r'CREATE VIEW filedata AS '
166 r'CREATE VIEW filedata AS '
167 r'SELECT '
167 r'SELECT '
168 r' fileindex.id AS id, '
168 r' fileindex.id AS id, '
169 r' filepath.id AS pathid, '
169 r' filepath.id AS pathid, '
170 r' filepath.path AS path, '
170 r' filepath.path AS path, '
171 r' fileindex.revnum AS revnum, '
171 r' fileindex.revnum AS revnum, '
172 r' fileindex.node AS node, '
172 r' fileindex.node AS node, '
173 r' fileindex.p1rev AS p1rev, '
173 r' fileindex.p1rev AS p1rev, '
174 r' fileindex.p2rev AS p2rev, '
174 r' fileindex.p2rev AS p2rev, '
175 r' fileindex.linkrev AS linkrev, '
175 r' fileindex.linkrev AS linkrev, '
176 r' fileindex.flags AS flags, '
176 r' fileindex.flags AS flags, '
177 r' fileindex.deltaid AS deltaid, '
177 r' fileindex.deltaid AS deltaid, '
178 r' fileindex.deltabaseid AS deltabaseid '
178 r' fileindex.deltabaseid AS deltabaseid '
179 r'FROM filepath, fileindex '
179 r'FROM filepath, fileindex '
180 r'WHERE fileindex.pathid=filepath.id',
180 r'WHERE fileindex.pathid=filepath.id',
181
181
182 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
183 ]
183 ]
184
184
185 def resolvedeltachain(db, pathid, node, revisioncache,
185 def resolvedeltachain(db, pathid, node, revisioncache,
186 stoprids, zstddctx=None):
186 stoprids, zstddctx=None):
187 """Resolve a delta chain for a file node."""
187 """Resolve a delta chain for a file node."""
188
188
189 # TODO the "not in ({stops})" here is possibly slowing down the query
189 # TODO the "not in ({stops})" here is possibly slowing down the query
190 # because it needs to perform the lookup on every recursive invocation.
190 # because it needs to perform the lookup on every recursive invocation.
191 # This could possibly be faster if we created a temporary query with
191 # This could possibly be faster if we created a temporary query with
192 # baseid "poisoned" to null and limited the recursive filter to
192 # baseid "poisoned" to null and limited the recursive filter to
193 # "is not null".
193 # "is not null".
194 res = db.execute(
194 res = db.execute(
195 r'WITH RECURSIVE '
195 r'WITH RECURSIVE '
196 r' deltachain(deltaid, baseid) AS ('
196 r' deltachain(deltaid, baseid) AS ('
197 r' SELECT deltaid, deltabaseid FROM fileindex '
197 r' SELECT deltaid, deltabaseid FROM fileindex '
198 r' WHERE pathid=? AND node=? '
198 r' WHERE pathid=? AND node=? '
199 r' UNION ALL '
199 r' UNION ALL '
200 r' SELECT fileindex.deltaid, deltabaseid '
200 r' SELECT fileindex.deltaid, deltabaseid '
201 r' FROM fileindex, deltachain '
201 r' FROM fileindex, deltachain '
202 r' WHERE '
202 r' WHERE '
203 r' fileindex.id=deltachain.baseid '
203 r' fileindex.id=deltachain.baseid '
204 r' AND deltachain.baseid IS NOT NULL '
204 r' AND deltachain.baseid IS NOT NULL '
205 r' AND fileindex.id NOT IN ({stops}) '
205 r' AND fileindex.id NOT IN ({stops}) '
206 r' ) '
206 r' ) '
207 r'SELECT deltachain.baseid, compression, delta '
207 r'SELECT deltachain.baseid, compression, delta '
208 r'FROM deltachain, delta '
208 r'FROM deltachain, delta '
209 r'WHERE delta.id=deltachain.deltaid'.format(
209 r'WHERE delta.id=deltachain.deltaid'.format(
210 stops=r','.join([r'?'] * len(stoprids))),
210 stops=r','.join([r'?'] * len(stoprids))),
211 tuple([pathid, node] + list(stoprids.keys())))
211 tuple([pathid, node] + list(stoprids.keys())))
212
212
213 deltas = []
213 deltas = []
214 lastdeltabaseid = None
214 lastdeltabaseid = None
215
215
216 for deltabaseid, compression, delta in res:
216 for deltabaseid, compression, delta in res:
217 lastdeltabaseid = deltabaseid
217 lastdeltabaseid = deltabaseid
218
218
219 if compression == COMPRESSION_ZSTD:
219 if compression == COMPRESSION_ZSTD:
220 delta = zstddctx.decompress(delta)
220 delta = zstddctx.decompress(delta)
221 elif compression == COMPRESSION_NONE:
221 elif compression == COMPRESSION_NONE:
222 delta = delta
222 delta = delta
223 elif compression == COMPRESSION_ZLIB:
223 elif compression == COMPRESSION_ZLIB:
224 delta = zlib.decompress(delta)
224 delta = zlib.decompress(delta)
225 else:
225 else:
226 raise SQLiteStoreError('unhandled compression type: %d' %
226 raise SQLiteStoreError('unhandled compression type: %d' %
227 compression)
227 compression)
228
228
229 deltas.append(delta)
229 deltas.append(delta)
230
230
231 if lastdeltabaseid in stoprids:
231 if lastdeltabaseid in stoprids:
232 basetext = revisioncache[stoprids[lastdeltabaseid]]
232 basetext = revisioncache[stoprids[lastdeltabaseid]]
233 else:
233 else:
234 basetext = deltas.pop()
234 basetext = deltas.pop()
235
235
236 deltas.reverse()
236 deltas.reverse()
237 fulltext = mdiff.patches(basetext, deltas)
237 fulltext = mdiff.patches(basetext, deltas)
238
238
239 # SQLite returns buffer instances for blob columns on Python 2. This
239 # SQLite returns buffer instances for blob columns on Python 2. This
240 # type can propagate through the delta application layer. Because
240 # type can propagate through the delta application layer. Because
241 # downstream callers assume revisions are bytes, cast as needed.
241 # downstream callers assume revisions are bytes, cast as needed.
242 if not isinstance(fulltext, bytes):
242 if not isinstance(fulltext, bytes):
243 fulltext = bytes(delta)
243 fulltext = bytes(delta)
244
244
245 return fulltext
245 return fulltext
246
246
247 def insertdelta(db, compression, hash, delta):
247 def insertdelta(db, compression, hash, delta):
248 try:
248 try:
249 return db.execute(
249 return db.execute(
250 r'INSERT INTO delta (compression, hash, delta) '
250 r'INSERT INTO delta (compression, hash, delta) '
251 r'VALUES (?, ?, ?)',
251 r'VALUES (?, ?, ?)',
252 (compression, hash, delta)).lastrowid
252 (compression, hash, delta)).lastrowid
253 except sqlite3.IntegrityError:
253 except sqlite3.IntegrityError:
254 return db.execute(
254 return db.execute(
255 r'SELECT id FROM delta WHERE hash=?',
255 r'SELECT id FROM delta WHERE hash=?',
256 (hash,)).fetchone()[0]
256 (hash,)).fetchone()[0]
257
257
258 class SQLiteStoreError(error.StorageError):
258 class SQLiteStoreError(error.StorageError):
259 pass
259 pass
260
260
261 @attr.s
261 @attr.s
262 class revisionentry(object):
262 class revisionentry(object):
263 rid = attr.ib()
263 rid = attr.ib()
264 rev = attr.ib()
264 rev = attr.ib()
265 node = attr.ib()
265 node = attr.ib()
266 p1rev = attr.ib()
266 p1rev = attr.ib()
267 p2rev = attr.ib()
267 p2rev = attr.ib()
268 p1node = attr.ib()
268 p1node = attr.ib()
269 p2node = attr.ib()
269 p2node = attr.ib()
270 linkrev = attr.ib()
270 linkrev = attr.ib()
271 flags = attr.ib()
271 flags = attr.ib()
272
272
273 @interfaceutil.implementer(repository.irevisiondelta)
273 @interfaceutil.implementer(repository.irevisiondelta)
274 @attr.s(slots=True)
274 @attr.s(slots=True)
275 class sqliterevisiondelta(object):
275 class sqliterevisiondelta(object):
276 node = attr.ib()
276 node = attr.ib()
277 p1node = attr.ib()
277 p1node = attr.ib()
278 p2node = attr.ib()
278 p2node = attr.ib()
279 basenode = attr.ib()
279 basenode = attr.ib()
280 flags = attr.ib()
280 flags = attr.ib()
281 baserevisionsize = attr.ib()
281 baserevisionsize = attr.ib()
282 revision = attr.ib()
282 revision = attr.ib()
283 delta = attr.ib()
283 delta = attr.ib()
284 linknode = attr.ib(default=None)
284 linknode = attr.ib(default=None)
285
285
286 @interfaceutil.implementer(repository.iverifyproblem)
286 @interfaceutil.implementer(repository.iverifyproblem)
287 @attr.s(frozen=True)
287 @attr.s(frozen=True)
288 class sqliteproblem(object):
288 class sqliteproblem(object):
289 warning = attr.ib(default=None)
289 warning = attr.ib(default=None)
290 error = attr.ib(default=None)
290 error = attr.ib(default=None)
291 node = attr.ib(default=None)
291 node = attr.ib(default=None)
292
292
293 @interfaceutil.implementer(repository.ifilestorage)
293 @interfaceutil.implementer(repository.ifilestorage)
294 class sqlitefilestore(object):
294 class sqlitefilestore(object):
295 """Implements storage for an individual tracked path."""
295 """Implements storage for an individual tracked path."""
296
296
297 def __init__(self, db, path, compression):
297 def __init__(self, db, path, compression):
298 self._db = db
298 self._db = db
299 self._path = path
299 self._path = path
300
300
301 self._pathid = None
301 self._pathid = None
302
302
303 # revnum -> node
303 # revnum -> node
304 self._revtonode = {}
304 self._revtonode = {}
305 # node -> revnum
305 # node -> revnum
306 self._nodetorev = {}
306 self._nodetorev = {}
307 # node -> data structure
307 # node -> data structure
308 self._revisions = {}
308 self._revisions = {}
309
309
310 self._revisioncache = util.lrucachedict(10)
310 self._revisioncache = util.lrucachedict(10)
311
311
312 self._compengine = compression
312 self._compengine = compression
313
313
314 if compression == 'zstd':
314 if compression == 'zstd':
315 self._cctx = zstd.ZstdCompressor(level=3)
315 self._cctx = zstd.ZstdCompressor(level=3)
316 self._dctx = zstd.ZstdDecompressor()
316 self._dctx = zstd.ZstdDecompressor()
317 else:
317 else:
318 self._cctx = None
318 self._cctx = None
319 self._dctx = None
319 self._dctx = None
320
320
321 self._refreshindex()
321 self._refreshindex()
322
322
323 def _refreshindex(self):
323 def _refreshindex(self):
324 self._revtonode = {}
324 self._revtonode = {}
325 self._nodetorev = {}
325 self._nodetorev = {}
326 self._revisions = {}
326 self._revisions = {}
327
327
328 res = list(self._db.execute(
328 res = list(self._db.execute(
329 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
329 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
330
330
331 if not res:
331 if not res:
332 self._pathid = None
332 self._pathid = None
333 return
333 return
334
334
335 self._pathid = res[0][0]
335 self._pathid = res[0][0]
336
336
337 res = self._db.execute(
337 res = self._db.execute(
338 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
338 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
339 r'FROM fileindex '
339 r'FROM fileindex '
340 r'WHERE pathid=? '
340 r'WHERE pathid=? '
341 r'ORDER BY revnum ASC',
341 r'ORDER BY revnum ASC',
342 (self._pathid,))
342 (self._pathid,))
343
343
344 for i, row in enumerate(res):
344 for i, row in enumerate(res):
345 rid, rev, node, p1rev, p2rev, linkrev, flags = row
345 rid, rev, node, p1rev, p2rev, linkrev, flags = row
346
346
347 if i != rev:
347 if i != rev:
348 raise SQLiteStoreError(_('sqlite database has inconsistent '
348 raise SQLiteStoreError(_('sqlite database has inconsistent '
349 'revision numbers'))
349 'revision numbers'))
350
350
351 if p1rev == nullrev:
351 if p1rev == nullrev:
352 p1node = nullid
352 p1node = nullid
353 else:
353 else:
354 p1node = self._revtonode[p1rev]
354 p1node = self._revtonode[p1rev]
355
355
356 if p2rev == nullrev:
356 if p2rev == nullrev:
357 p2node = nullid
357 p2node = nullid
358 else:
358 else:
359 p2node = self._revtonode[p2rev]
359 p2node = self._revtonode[p2rev]
360
360
361 entry = revisionentry(
361 entry = revisionentry(
362 rid=rid,
362 rid=rid,
363 rev=rev,
363 rev=rev,
364 node=node,
364 node=node,
365 p1rev=p1rev,
365 p1rev=p1rev,
366 p2rev=p2rev,
366 p2rev=p2rev,
367 p1node=p1node,
367 p1node=p1node,
368 p2node=p2node,
368 p2node=p2node,
369 linkrev=linkrev,
369 linkrev=linkrev,
370 flags=flags)
370 flags=flags)
371
371
372 self._revtonode[rev] = node
372 self._revtonode[rev] = node
373 self._nodetorev[node] = rev
373 self._nodetorev[node] = rev
374 self._revisions[node] = entry
374 self._revisions[node] = entry
375
375
376 # Start of ifileindex interface.
376 # Start of ifileindex interface.
377
377
378 def __len__(self):
378 def __len__(self):
379 return len(self._revisions)
379 return len(self._revisions)
380
380
381 def __iter__(self):
381 def __iter__(self):
382 return iter(pycompat.xrange(len(self._revisions)))
382 return iter(pycompat.xrange(len(self._revisions)))
383
383
384 def hasnode(self, node):
384 def hasnode(self, node):
385 if node == nullid:
385 if node == nullid:
386 return False
386 return False
387
387
388 return node in self._nodetorev
388 return node in self._nodetorev
389
389
390 def revs(self, start=0, stop=None):
390 def revs(self, start=0, stop=None):
391 return storageutil.iterrevs(len(self._revisions), start=start,
391 return storageutil.iterrevs(len(self._revisions), start=start,
392 stop=stop)
392 stop=stop)
393
393
394 def parents(self, node):
394 def parents(self, node):
395 if node == nullid:
395 if node == nullid:
396 return nullid, nullid
396 return nullid, nullid
397
397
398 if node not in self._revisions:
398 if node not in self._revisions:
399 raise error.LookupError(node, self._path, _('no node'))
399 raise error.LookupError(node, self._path, _('no node'))
400
400
401 entry = self._revisions[node]
401 entry = self._revisions[node]
402 return entry.p1node, entry.p2node
402 return entry.p1node, entry.p2node
403
403
404 def parentrevs(self, rev):
404 def parentrevs(self, rev):
405 if rev == nullrev:
405 if rev == nullrev:
406 return nullrev, nullrev
406 return nullrev, nullrev
407
407
408 if rev not in self._revtonode:
408 if rev not in self._revtonode:
409 raise IndexError(rev)
409 raise IndexError(rev)
410
410
411 entry = self._revisions[self._revtonode[rev]]
411 entry = self._revisions[self._revtonode[rev]]
412 return entry.p1rev, entry.p2rev
412 return entry.p1rev, entry.p2rev
413
413
414 def rev(self, node):
414 def rev(self, node):
415 if node == nullid:
415 if node == nullid:
416 return nullrev
416 return nullrev
417
417
418 if node not in self._nodetorev:
418 if node not in self._nodetorev:
419 raise error.LookupError(node, self._path, _('no node'))
419 raise error.LookupError(node, self._path, _('no node'))
420
420
421 return self._nodetorev[node]
421 return self._nodetorev[node]
422
422
423 def node(self, rev):
423 def node(self, rev):
424 if rev == nullrev:
424 if rev == nullrev:
425 return nullid
425 return nullid
426
426
427 if rev not in self._revtonode:
427 if rev not in self._revtonode:
428 raise IndexError(rev)
428 raise IndexError(rev)
429
429
430 return self._revtonode[rev]
430 return self._revtonode[rev]
431
431
432 def lookup(self, node):
432 def lookup(self, node):
433 return storageutil.fileidlookup(self, node, self._path)
433 return storageutil.fileidlookup(self, node, self._path)
434
434
435 def linkrev(self, rev):
435 def linkrev(self, rev):
436 if rev == nullrev:
436 if rev == nullrev:
437 return nullrev
437 return nullrev
438
438
439 if rev not in self._revtonode:
439 if rev not in self._revtonode:
440 raise IndexError(rev)
440 raise IndexError(rev)
441
441
442 entry = self._revisions[self._revtonode[rev]]
442 entry = self._revisions[self._revtonode[rev]]
443 return entry.linkrev
443 return entry.linkrev
444
444
445 def iscensored(self, rev):
445 def iscensored(self, rev):
446 if rev == nullrev:
446 if rev == nullrev:
447 return False
447 return False
448
448
449 if rev not in self._revtonode:
449 if rev not in self._revtonode:
450 raise IndexError(rev)
450 raise IndexError(rev)
451
451
452 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
452 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
453
453
454 def commonancestorsheads(self, node1, node2):
454 def commonancestorsheads(self, node1, node2):
455 rev1 = self.rev(node1)
455 rev1 = self.rev(node1)
456 rev2 = self.rev(node2)
456 rev2 = self.rev(node2)
457
457
458 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
458 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
459 return pycompat.maplist(self.node, ancestors)
459 return pycompat.maplist(self.node, ancestors)
460
460
461 def descendants(self, revs):
461 def descendants(self, revs):
462 # TODO we could implement this using a recursive SQL query, which
462 # TODO we could implement this using a recursive SQL query, which
463 # might be faster.
463 # might be faster.
464 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
464 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
465
465
466 def heads(self, start=None, stop=None):
466 def heads(self, start=None, stop=None):
467 if start is None and stop is None:
467 if start is None and stop is None:
468 if not len(self):
468 if not len(self):
469 return [nullid]
469 return [nullid]
470
470
471 startrev = self.rev(start) if start is not None else nullrev
471 startrev = self.rev(start) if start is not None else nullrev
472 stoprevs = {self.rev(n) for n in stop or []}
472 stoprevs = {self.rev(n) for n in stop or []}
473
473
474 revs = dagop.headrevssubset(self.revs, self.parentrevs,
474 revs = dagop.headrevssubset(self.revs, self.parentrevs,
475 startrev=startrev, stoprevs=stoprevs)
475 startrev=startrev, stoprevs=stoprevs)
476
476
477 return [self.node(rev) for rev in revs]
477 return [self.node(rev) for rev in revs]
478
478
479 def children(self, node):
479 def children(self, node):
480 rev = self.rev(node)
480 rev = self.rev(node)
481
481
482 res = self._db.execute(
482 res = self._db.execute(
483 r'SELECT'
483 r'SELECT'
484 r' node '
484 r' node '
485 r' FROM filedata '
485 r' FROM filedata '
486 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
486 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
487 r' ORDER BY revnum ASC',
487 r' ORDER BY revnum ASC',
488 (self._path, rev, rev))
488 (self._path, rev, rev))
489
489
490 return [row[0] for row in res]
490 return [row[0] for row in res]
491
491
492 # End of ifileindex interface.
492 # End of ifileindex interface.
493
493
494 # Start of ifiledata interface.
494 # Start of ifiledata interface.
495
495
496 def size(self, rev):
496 def size(self, rev):
497 if rev == nullrev:
497 if rev == nullrev:
498 return 0
498 return 0
499
499
500 if rev not in self._revtonode:
500 if rev not in self._revtonode:
501 raise IndexError(rev)
501 raise IndexError(rev)
502
502
503 node = self._revtonode[rev]
503 node = self._revtonode[rev]
504
504
505 if self.renamed(node):
505 if self.renamed(node):
506 return len(self.read(node))
506 return len(self.read(node))
507
507
508 return len(self.revision(node))
508 return len(self.revision(node))
509
509
510 def revision(self, node, raw=False, _verifyhash=True):
510 def revision(self, node, raw=False, _verifyhash=True):
511 if node in (nullid, nullrev):
511 if node in (nullid, nullrev):
512 return b''
512 return b''
513
513
514 if isinstance(node, int):
514 if isinstance(node, int):
515 node = self.node(node)
515 node = self.node(node)
516
516
517 if node not in self._nodetorev:
517 if node not in self._nodetorev:
518 raise error.LookupError(node, self._path, _('no node'))
518 raise error.LookupError(node, self._path, _('no node'))
519
519
520 if node in self._revisioncache:
520 if node in self._revisioncache:
521 return self._revisioncache[node]
521 return self._revisioncache[node]
522
522
523 # Because we have a fulltext revision cache, we are able to
523 # Because we have a fulltext revision cache, we are able to
524 # short-circuit delta chain traversal and decompression as soon as
524 # short-circuit delta chain traversal and decompression as soon as
525 # we encounter a revision in the cache.
525 # we encounter a revision in the cache.
526
526
527 stoprids = {self._revisions[n].rid: n
527 stoprids = {self._revisions[n].rid: n
528 for n in self._revisioncache}
528 for n in self._revisioncache}
529
529
530 if not stoprids:
530 if not stoprids:
531 stoprids[-1] = None
531 stoprids[-1] = None
532
532
533 fulltext = resolvedeltachain(self._db, self._pathid, node,
533 fulltext = resolvedeltachain(self._db, self._pathid, node,
534 self._revisioncache, stoprids,
534 self._revisioncache, stoprids,
535 zstddctx=self._dctx)
535 zstddctx=self._dctx)
536
536
537 if _verifyhash:
537 if _verifyhash:
538 self._checkhash(fulltext, node)
538 self._checkhash(fulltext, node)
539 self._revisioncache[node] = fulltext
539 self._revisioncache[node] = fulltext
540
540
541 return fulltext
541 return fulltext
542
542
543 def read(self, node):
543 def read(self, node):
544 return storageutil.filtermetadata(self.revision(node))
544 return storageutil.filtermetadata(self.revision(node))
545
545
546 def renamed(self, node):
546 def renamed(self, node):
547 return storageutil.filerevisioncopied(self, node)
547 return storageutil.filerevisioncopied(self, node)
548
548
549 def cmp(self, node, fulltext):
549 def cmp(self, node, fulltext):
550 return not storageutil.filedataequivalent(self, node, fulltext)
550 return not storageutil.filedataequivalent(self, node, fulltext)
551
551
552 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
552 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
553 assumehaveparentrevisions=False, deltaprevious=False):
553 assumehaveparentrevisions=False, deltaprevious=False):
554 if nodesorder not in ('nodes', 'storage', None):
554 if nodesorder not in ('nodes', 'storage', None):
555 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
555 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
556 nodesorder)
556 nodesorder)
557
557
558 nodes = [n for n in nodes if n != nullid]
558 nodes = [n for n in nodes if n != nullid]
559
559
560 if not nodes:
560 if not nodes:
561 return
561 return
562
562
563 # TODO perform in a single query.
563 # TODO perform in a single query.
564 res = self._db.execute(
564 res = self._db.execute(
565 r'SELECT revnum, deltaid FROM fileindex '
565 r'SELECT revnum, deltaid FROM fileindex '
566 r'WHERE pathid=? '
566 r'WHERE pathid=? '
567 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
567 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
568 tuple([self._pathid] + nodes))
568 tuple([self._pathid] + nodes))
569
569
570 deltabases = {}
570 deltabases = {}
571
571
572 for rev, deltaid in res:
572 for rev, deltaid in res:
573 res = self._db.execute(
573 res = self._db.execute(
574 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
574 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
575 (self._pathid, deltaid))
575 (self._pathid, deltaid))
576 deltabases[rev] = res.fetchone()[0]
576 deltabases[rev] = res.fetchone()[0]
577
577
578 # TODO define revdifffn so we can use delta from storage.
578 # TODO define revdifffn so we can use delta from storage.
579 for delta in storageutil.emitrevisions(
579 for delta in storageutil.emitrevisions(
580 self, nodes, nodesorder, sqliterevisiondelta,
580 self, nodes, nodesorder, sqliterevisiondelta,
581 deltaparentfn=deltabases.__getitem__,
581 deltaparentfn=deltabases.__getitem__,
582 revisiondata=revisiondata,
582 revisiondata=revisiondata,
583 assumehaveparentrevisions=assumehaveparentrevisions,
583 assumehaveparentrevisions=assumehaveparentrevisions,
584 deltaprevious=deltaprevious):
584 deltaprevious=deltaprevious):
585
585
586 yield delta
586 yield delta
587
587
588 # End of ifiledata interface.
588 # End of ifiledata interface.
589
589
590 # Start of ifilemutation interface.
590 # Start of ifilemutation interface.
591
591
592 def add(self, filedata, meta, transaction, linkrev, p1, p2):
592 def add(self, filedata, meta, transaction, linkrev, p1, p2):
593 if meta or filedata.startswith(b'\x01\n'):
593 if meta or filedata.startswith(b'\x01\n'):
594 filedata = storageutil.packmeta(meta, filedata)
594 filedata = storageutil.packmeta(meta, filedata)
595
595
596 return self.addrevision(filedata, transaction, linkrev, p1, p2)
596 return self.addrevision(filedata, transaction, linkrev, p1, p2)
597
597
598 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
598 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
599 flags=0, cachedelta=None):
599 flags=0, cachedelta=None):
600 if flags:
600 if flags:
601 raise SQLiteStoreError(_('flags not supported on revisions'))
601 raise SQLiteStoreError(_('flags not supported on revisions'))
602
602
603 validatehash = node is not None
603 validatehash = node is not None
604 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
604 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
605
605
606 if validatehash:
606 if validatehash:
607 self._checkhash(revisiondata, node, p1, p2)
607 self._checkhash(revisiondata, node, p1, p2)
608
608
609 if node in self._nodetorev:
609 if node in self._nodetorev:
610 return node
610 return node
611
611
612 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
612 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
613 p1, p2)
613 p1, p2)
614
614
615 self._revisioncache[node] = revisiondata
615 self._revisioncache[node] = revisiondata
616 return node
616 return node
617
617
618 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
618 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
619 maybemissingparents=False):
620 if maybemissingparents:
621 raise error.Abort(_('SQLite storage does not support missing '
622 'parents write mode'))
623
619 nodes = []
624 nodes = []
620
625
621 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
626 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
622 storeflags = 0
627 storeflags = 0
623
628
624 if wireflags & repository.REVISION_FLAG_CENSORED:
629 if wireflags & repository.REVISION_FLAG_CENSORED:
625 storeflags |= FLAG_CENSORED
630 storeflags |= FLAG_CENSORED
626
631
627 if wireflags & ~repository.REVISION_FLAG_CENSORED:
632 if wireflags & ~repository.REVISION_FLAG_CENSORED:
628 raise SQLiteStoreError('unhandled revision flag')
633 raise SQLiteStoreError('unhandled revision flag')
629
634
630 baserev = self.rev(deltabase)
635 baserev = self.rev(deltabase)
631
636
632 # If base is censored, delta must be full replacement in a single
637 # If base is censored, delta must be full replacement in a single
633 # patch operation.
638 # patch operation.
634 if baserev != nullrev and self.iscensored(baserev):
639 if baserev != nullrev and self.iscensored(baserev):
635 hlen = struct.calcsize('>lll')
640 hlen = struct.calcsize('>lll')
636 oldlen = len(self.revision(deltabase, raw=True,
641 oldlen = len(self.revision(deltabase, raw=True,
637 _verifyhash=False))
642 _verifyhash=False))
638 newlen = len(delta) - hlen
643 newlen = len(delta) - hlen
639
644
640 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
645 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
641 raise error.CensoredBaseError(self._path,
646 raise error.CensoredBaseError(self._path,
642 deltabase)
647 deltabase)
643
648
644 if (not (storeflags & FLAG_CENSORED)
649 if (not (storeflags & FLAG_CENSORED)
645 and storageutil.deltaiscensored(
650 and storageutil.deltaiscensored(
646 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
651 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
647 storeflags |= FLAG_CENSORED
652 storeflags |= FLAG_CENSORED
648
653
649 linkrev = linkmapper(linknode)
654 linkrev = linkmapper(linknode)
650
655
651 nodes.append(node)
656 nodes.append(node)
652
657
653 if node in self._revisions:
658 if node in self._revisions:
654 continue
659 continue
655
660
656 if deltabase == nullid:
661 if deltabase == nullid:
657 text = mdiff.patch(b'', delta)
662 text = mdiff.patch(b'', delta)
658 storedelta = None
663 storedelta = None
659 else:
664 else:
660 text = None
665 text = None
661 storedelta = (deltabase, delta)
666 storedelta = (deltabase, delta)
662
667
663 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
668 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
664 storedelta=storedelta, flags=storeflags)
669 storedelta=storedelta, flags=storeflags)
665
670
666 if addrevisioncb:
671 if addrevisioncb:
667 addrevisioncb(self, node)
672 addrevisioncb(self, node)
668
673
669 return nodes
674 return nodes
670
675
671 def censorrevision(self, tr, censornode, tombstone=b''):
676 def censorrevision(self, tr, censornode, tombstone=b''):
672 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
677 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
673
678
674 # This restriction is cargo culted from revlogs and makes no sense for
679 # This restriction is cargo culted from revlogs and makes no sense for
675 # SQLite, since columns can be resized at will.
680 # SQLite, since columns can be resized at will.
676 if len(tombstone) > len(self.revision(censornode, raw=True)):
681 if len(tombstone) > len(self.revision(censornode, raw=True)):
677 raise error.Abort(_('censor tombstone must be no longer than '
682 raise error.Abort(_('censor tombstone must be no longer than '
678 'censored data'))
683 'censored data'))
679
684
680 # We need to replace the censored revision's data with the tombstone.
685 # We need to replace the censored revision's data with the tombstone.
681 # But replacing that data will have implications for delta chains that
686 # But replacing that data will have implications for delta chains that
682 # reference it.
687 # reference it.
683 #
688 #
684 # While "better," more complex strategies are possible, we do something
689 # While "better," more complex strategies are possible, we do something
685 # simple: we find delta chain children of the censored revision and we
690 # simple: we find delta chain children of the censored revision and we
686 # replace those incremental deltas with fulltexts of their corresponding
691 # replace those incremental deltas with fulltexts of their corresponding
687 # revision. Then we delete the now-unreferenced delta and original
692 # revision. Then we delete the now-unreferenced delta and original
688 # revision and insert a replacement.
693 # revision and insert a replacement.
689
694
690 # Find the delta to be censored.
695 # Find the delta to be censored.
691 censoreddeltaid = self._db.execute(
696 censoreddeltaid = self._db.execute(
692 r'SELECT deltaid FROM fileindex WHERE id=?',
697 r'SELECT deltaid FROM fileindex WHERE id=?',
693 (self._revisions[censornode].rid,)).fetchone()[0]
698 (self._revisions[censornode].rid,)).fetchone()[0]
694
699
695 # Find all its delta chain children.
700 # Find all its delta chain children.
696 # TODO once we support storing deltas for !files, we'll need to look
701 # TODO once we support storing deltas for !files, we'll need to look
697 # for those delta chains too.
702 # for those delta chains too.
698 rows = list(self._db.execute(
703 rows = list(self._db.execute(
699 r'SELECT id, pathid, node FROM fileindex '
704 r'SELECT id, pathid, node FROM fileindex '
700 r'WHERE deltabaseid=? OR deltaid=?',
705 r'WHERE deltabaseid=? OR deltaid=?',
701 (censoreddeltaid, censoreddeltaid)))
706 (censoreddeltaid, censoreddeltaid)))
702
707
703 for row in rows:
708 for row in rows:
704 rid, pathid, node = row
709 rid, pathid, node = row
705
710
706 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
711 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
707 zstddctx=self._dctx)
712 zstddctx=self._dctx)
708
713
709 deltahash = hashlib.sha1(fulltext).digest()
714 deltahash = hashlib.sha1(fulltext).digest()
710
715
711 if self._compengine == 'zstd':
716 if self._compengine == 'zstd':
712 deltablob = self._cctx.compress(fulltext)
717 deltablob = self._cctx.compress(fulltext)
713 compression = COMPRESSION_ZSTD
718 compression = COMPRESSION_ZSTD
714 elif self._compengine == 'zlib':
719 elif self._compengine == 'zlib':
715 deltablob = zlib.compress(fulltext)
720 deltablob = zlib.compress(fulltext)
716 compression = COMPRESSION_ZLIB
721 compression = COMPRESSION_ZLIB
717 elif self._compengine == 'none':
722 elif self._compengine == 'none':
718 deltablob = fulltext
723 deltablob = fulltext
719 compression = COMPRESSION_NONE
724 compression = COMPRESSION_NONE
720 else:
725 else:
721 raise error.ProgrammingError('unhandled compression engine: %s'
726 raise error.ProgrammingError('unhandled compression engine: %s'
722 % self._compengine)
727 % self._compengine)
723
728
724 if len(deltablob) >= len(fulltext):
729 if len(deltablob) >= len(fulltext):
725 deltablob = fulltext
730 deltablob = fulltext
726 compression = COMPRESSION_NONE
731 compression = COMPRESSION_NONE
727
732
728 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
733 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
729
734
730 self._db.execute(
735 self._db.execute(
731 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
736 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
732 r'WHERE id=?', (deltaid, rid))
737 r'WHERE id=?', (deltaid, rid))
733
738
734 # Now create the tombstone delta and replace the delta on the censored
739 # Now create the tombstone delta and replace the delta on the censored
735 # node.
740 # node.
736 deltahash = hashlib.sha1(tombstone).digest()
741 deltahash = hashlib.sha1(tombstone).digest()
737 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
742 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
738 deltahash, tombstone)
743 deltahash, tombstone)
739
744
740 flags = self._revisions[censornode].flags
745 flags = self._revisions[censornode].flags
741 flags |= FLAG_CENSORED
746 flags |= FLAG_CENSORED
742
747
743 self._db.execute(
748 self._db.execute(
744 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
749 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
745 r'WHERE pathid=? AND node=?',
750 r'WHERE pathid=? AND node=?',
746 (flags, tombstonedeltaid, self._pathid, censornode))
751 (flags, tombstonedeltaid, self._pathid, censornode))
747
752
748 self._db.execute(
753 self._db.execute(
749 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
754 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
750
755
751 self._refreshindex()
756 self._refreshindex()
752 self._revisioncache.clear()
757 self._revisioncache.clear()
753
758
754 def getstrippoint(self, minlink):
759 def getstrippoint(self, minlink):
755 return storageutil.resolvestripinfo(minlink, len(self) - 1,
760 return storageutil.resolvestripinfo(minlink, len(self) - 1,
756 [self.rev(n) for n in self.heads()],
761 [self.rev(n) for n in self.heads()],
757 self.linkrev,
762 self.linkrev,
758 self.parentrevs)
763 self.parentrevs)
759
764
760 def strip(self, minlink, transaction):
765 def strip(self, minlink, transaction):
761 if not len(self):
766 if not len(self):
762 return
767 return
763
768
764 rev, _ignored = self.getstrippoint(minlink)
769 rev, _ignored = self.getstrippoint(minlink)
765
770
766 if rev == len(self):
771 if rev == len(self):
767 return
772 return
768
773
769 for rev in self.revs(rev):
774 for rev in self.revs(rev):
770 self._db.execute(
775 self._db.execute(
771 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
776 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
772 (self._pathid, self.node(rev)))
777 (self._pathid, self.node(rev)))
773
778
774 # TODO how should we garbage collect data in delta table?
779 # TODO how should we garbage collect data in delta table?
775
780
776 self._refreshindex()
781 self._refreshindex()
777
782
778 # End of ifilemutation interface.
783 # End of ifilemutation interface.
779
784
780 # Start of ifilestorage interface.
785 # Start of ifilestorage interface.
781
786
782 def files(self):
787 def files(self):
783 return []
788 return []
784
789
785 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
790 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
786 revisionscount=False, trackedsize=False,
791 revisionscount=False, trackedsize=False,
787 storedsize=False):
792 storedsize=False):
788 d = {}
793 d = {}
789
794
790 if exclusivefiles:
795 if exclusivefiles:
791 d['exclusivefiles'] = []
796 d['exclusivefiles'] = []
792
797
793 if sharedfiles:
798 if sharedfiles:
794 # TODO list sqlite file(s) here.
799 # TODO list sqlite file(s) here.
795 d['sharedfiles'] = []
800 d['sharedfiles'] = []
796
801
797 if revisionscount:
802 if revisionscount:
798 d['revisionscount'] = len(self)
803 d['revisionscount'] = len(self)
799
804
800 if trackedsize:
805 if trackedsize:
801 d['trackedsize'] = sum(len(self.revision(node))
806 d['trackedsize'] = sum(len(self.revision(node))
802 for node in self._nodetorev)
807 for node in self._nodetorev)
803
808
804 if storedsize:
809 if storedsize:
805 # TODO implement this?
810 # TODO implement this?
806 d['storedsize'] = None
811 d['storedsize'] = None
807
812
808 return d
813 return d
809
814
810 def verifyintegrity(self, state):
815 def verifyintegrity(self, state):
811 state['skipread'] = set()
816 state['skipread'] = set()
812
817
813 for rev in self:
818 for rev in self:
814 node = self.node(rev)
819 node = self.node(rev)
815
820
816 try:
821 try:
817 self.revision(node)
822 self.revision(node)
818 except Exception as e:
823 except Exception as e:
819 yield sqliteproblem(
824 yield sqliteproblem(
820 error=_('unpacking %s: %s') % (short(node), e),
825 error=_('unpacking %s: %s') % (short(node), e),
821 node=node)
826 node=node)
822
827
823 state['skipread'].add(node)
828 state['skipread'].add(node)
824
829
825 # End of ifilestorage interface.
830 # End of ifilestorage interface.
826
831
827 def _checkhash(self, fulltext, node, p1=None, p2=None):
832 def _checkhash(self, fulltext, node, p1=None, p2=None):
828 if p1 is None and p2 is None:
833 if p1 is None and p2 is None:
829 p1, p2 = self.parents(node)
834 p1, p2 = self.parents(node)
830
835
831 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
836 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
832 return
837 return
833
838
834 try:
839 try:
835 del self._revisioncache[node]
840 del self._revisioncache[node]
836 except KeyError:
841 except KeyError:
837 pass
842 pass
838
843
839 if storageutil.iscensoredtext(fulltext):
844 if storageutil.iscensoredtext(fulltext):
840 raise error.CensoredNodeError(self._path, node, fulltext)
845 raise error.CensoredNodeError(self._path, node, fulltext)
841
846
842 raise SQLiteStoreError(_('integrity check failed on %s') %
847 raise SQLiteStoreError(_('integrity check failed on %s') %
843 self._path)
848 self._path)
844
849
845 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
850 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
846 p1, p2, storedelta=None, flags=0):
851 p1, p2, storedelta=None, flags=0):
847 if self._pathid is None:
852 if self._pathid is None:
848 res = self._db.execute(
853 res = self._db.execute(
849 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
854 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
850 self._pathid = res.lastrowid
855 self._pathid = res.lastrowid
851
856
852 # For simplicity, always store a delta against p1.
857 # For simplicity, always store a delta against p1.
853 # TODO we need a lot more logic here to make behavior reasonable.
858 # TODO we need a lot more logic here to make behavior reasonable.
854
859
855 if storedelta:
860 if storedelta:
856 deltabase, delta = storedelta
861 deltabase, delta = storedelta
857
862
858 if isinstance(deltabase, int):
863 if isinstance(deltabase, int):
859 deltabase = self.node(deltabase)
864 deltabase = self.node(deltabase)
860
865
861 else:
866 else:
862 assert revisiondata is not None
867 assert revisiondata is not None
863 deltabase = p1
868 deltabase = p1
864
869
865 if deltabase == nullid:
870 if deltabase == nullid:
866 delta = revisiondata
871 delta = revisiondata
867 else:
872 else:
868 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
873 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
869 revisiondata)
874 revisiondata)
870
875
871 # File index stores a pointer to its delta and the parent delta.
876 # File index stores a pointer to its delta and the parent delta.
872 # The parent delta is stored via a pointer to the fileindex PK.
877 # The parent delta is stored via a pointer to the fileindex PK.
873 if deltabase == nullid:
878 if deltabase == nullid:
874 baseid = None
879 baseid = None
875 else:
880 else:
876 baseid = self._revisions[deltabase].rid
881 baseid = self._revisions[deltabase].rid
877
882
878 # Deltas are stored with a hash of their content. This allows
883 # Deltas are stored with a hash of their content. This allows
879 # us to de-duplicate. The table is configured to ignore conflicts
884 # us to de-duplicate. The table is configured to ignore conflicts
880 # and it is faster to just insert and silently noop than to look
885 # and it is faster to just insert and silently noop than to look
881 # first.
886 # first.
882 deltahash = hashlib.sha1(delta).digest()
887 deltahash = hashlib.sha1(delta).digest()
883
888
884 if self._compengine == 'zstd':
889 if self._compengine == 'zstd':
885 deltablob = self._cctx.compress(delta)
890 deltablob = self._cctx.compress(delta)
886 compression = COMPRESSION_ZSTD
891 compression = COMPRESSION_ZSTD
887 elif self._compengine == 'zlib':
892 elif self._compengine == 'zlib':
888 deltablob = zlib.compress(delta)
893 deltablob = zlib.compress(delta)
889 compression = COMPRESSION_ZLIB
894 compression = COMPRESSION_ZLIB
890 elif self._compengine == 'none':
895 elif self._compengine == 'none':
891 deltablob = delta
896 deltablob = delta
892 compression = COMPRESSION_NONE
897 compression = COMPRESSION_NONE
893 else:
898 else:
894 raise error.ProgrammingError('unhandled compression engine: %s' %
899 raise error.ProgrammingError('unhandled compression engine: %s' %
895 self._compengine)
900 self._compengine)
896
901
897 # Don't store compressed data if it isn't practical.
902 # Don't store compressed data if it isn't practical.
898 if len(deltablob) >= len(delta):
903 if len(deltablob) >= len(delta):
899 deltablob = delta
904 deltablob = delta
900 compression = COMPRESSION_NONE
905 compression = COMPRESSION_NONE
901
906
902 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
907 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
903
908
904 rev = len(self)
909 rev = len(self)
905
910
906 if p1 == nullid:
911 if p1 == nullid:
907 p1rev = nullrev
912 p1rev = nullrev
908 else:
913 else:
909 p1rev = self._nodetorev[p1]
914 p1rev = self._nodetorev[p1]
910
915
911 if p2 == nullid:
916 if p2 == nullid:
912 p2rev = nullrev
917 p2rev = nullrev
913 else:
918 else:
914 p2rev = self._nodetorev[p2]
919 p2rev = self._nodetorev[p2]
915
920
916 rid = self._db.execute(
921 rid = self._db.execute(
917 r'INSERT INTO fileindex ('
922 r'INSERT INTO fileindex ('
918 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
923 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
919 r' deltaid, deltabaseid) '
924 r' deltaid, deltabaseid) '
920 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
925 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
921 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
926 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
922 deltaid, baseid)
927 deltaid, baseid)
923 ).lastrowid
928 ).lastrowid
924
929
925 entry = revisionentry(
930 entry = revisionentry(
926 rid=rid,
931 rid=rid,
927 rev=rev,
932 rev=rev,
928 node=node,
933 node=node,
929 p1rev=p1rev,
934 p1rev=p1rev,
930 p2rev=p2rev,
935 p2rev=p2rev,
931 p1node=p1,
936 p1node=p1,
932 p2node=p2,
937 p2node=p2,
933 linkrev=linkrev,
938 linkrev=linkrev,
934 flags=flags)
939 flags=flags)
935
940
936 self._nodetorev[node] = rev
941 self._nodetorev[node] = rev
937 self._revtonode[rev] = node
942 self._revtonode[rev] = node
938 self._revisions[node] = entry
943 self._revisions[node] = entry
939
944
940 return node
945 return node
941
946
942 class sqliterepository(localrepo.localrepository):
947 class sqliterepository(localrepo.localrepository):
943 def cancopy(self):
948 def cancopy(self):
944 return False
949 return False
945
950
946 def transaction(self, *args, **kwargs):
951 def transaction(self, *args, **kwargs):
947 current = self.currenttransaction()
952 current = self.currenttransaction()
948
953
949 tr = super(sqliterepository, self).transaction(*args, **kwargs)
954 tr = super(sqliterepository, self).transaction(*args, **kwargs)
950
955
951 if current:
956 if current:
952 return tr
957 return tr
953
958
954 self._dbconn.execute(r'BEGIN TRANSACTION')
959 self._dbconn.execute(r'BEGIN TRANSACTION')
955
960
956 def committransaction(_):
961 def committransaction(_):
957 self._dbconn.commit()
962 self._dbconn.commit()
958
963
959 tr.addfinalize('sqlitestore', committransaction)
964 tr.addfinalize('sqlitestore', committransaction)
960
965
961 return tr
966 return tr
962
967
963 @property
968 @property
964 def _dbconn(self):
969 def _dbconn(self):
965 # SQLite connections can only be used on the thread that created
970 # SQLite connections can only be used on the thread that created
966 # them. In most cases, this "just works." However, hgweb uses
971 # them. In most cases, this "just works." However, hgweb uses
967 # multiple threads.
972 # multiple threads.
968 tid = threading.current_thread().ident
973 tid = threading.current_thread().ident
969
974
970 if self._db:
975 if self._db:
971 if self._db[0] == tid:
976 if self._db[0] == tid:
972 return self._db[1]
977 return self._db[1]
973
978
974 db = makedb(self.svfs.join('db.sqlite'))
979 db = makedb(self.svfs.join('db.sqlite'))
975 self._db = (tid, db)
980 self._db = (tid, db)
976
981
977 return db
982 return db
978
983
979 def makedb(path):
984 def makedb(path):
980 """Construct a database handle for a database at path."""
985 """Construct a database handle for a database at path."""
981
986
982 db = sqlite3.connect(path)
987 db = sqlite3.connect(path)
983 db.text_factory = bytes
988 db.text_factory = bytes
984
989
985 res = db.execute(r'PRAGMA user_version').fetchone()[0]
990 res = db.execute(r'PRAGMA user_version').fetchone()[0]
986
991
987 # New database.
992 # New database.
988 if res == 0:
993 if res == 0:
989 for statement in CREATE_SCHEMA:
994 for statement in CREATE_SCHEMA:
990 db.execute(statement)
995 db.execute(statement)
991
996
992 db.commit()
997 db.commit()
993
998
994 elif res == CURRENT_SCHEMA_VERSION:
999 elif res == CURRENT_SCHEMA_VERSION:
995 pass
1000 pass
996
1001
997 else:
1002 else:
998 raise error.Abort(_('sqlite database has unrecognized version'))
1003 raise error.Abort(_('sqlite database has unrecognized version'))
999
1004
1000 db.execute(r'PRAGMA journal_mode=WAL')
1005 db.execute(r'PRAGMA journal_mode=WAL')
1001
1006
1002 return db
1007 return db
1003
1008
1004 def featuresetup(ui, supported):
1009 def featuresetup(ui, supported):
1005 supported.add(REQUIREMENT)
1010 supported.add(REQUIREMENT)
1006
1011
1007 if zstd:
1012 if zstd:
1008 supported.add(REQUIREMENT_ZSTD)
1013 supported.add(REQUIREMENT_ZSTD)
1009
1014
1010 supported.add(REQUIREMENT_ZLIB)
1015 supported.add(REQUIREMENT_ZLIB)
1011 supported.add(REQUIREMENT_NONE)
1016 supported.add(REQUIREMENT_NONE)
1012
1017
1013 def newreporequirements(orig, ui, createopts):
1018 def newreporequirements(orig, ui, createopts):
1014 if createopts['backend'] != 'sqlite':
1019 if createopts['backend'] != 'sqlite':
1015 return orig(ui, createopts)
1020 return orig(ui, createopts)
1016
1021
1017 # This restriction can be lifted once we have more confidence.
1022 # This restriction can be lifted once we have more confidence.
1018 if 'sharedrepo' in createopts:
1023 if 'sharedrepo' in createopts:
1019 raise error.Abort(_('shared repositories not supported with SQLite '
1024 raise error.Abort(_('shared repositories not supported with SQLite '
1020 'store'))
1025 'store'))
1021
1026
1022 # This filtering is out of an abundance of caution: we want to ensure
1027 # This filtering is out of an abundance of caution: we want to ensure
1023 # we honor creation options and we do that by annotating exactly the
1028 # we honor creation options and we do that by annotating exactly the
1024 # creation options we recognize.
1029 # creation options we recognize.
1025 known = {
1030 known = {
1026 'narrowfiles',
1031 'narrowfiles',
1027 'backend',
1032 'backend',
1028 }
1033 }
1029
1034
1030 unsupported = set(createopts) - known
1035 unsupported = set(createopts) - known
1031 if unsupported:
1036 if unsupported:
1032 raise error.Abort(_('SQLite store does not support repo creation '
1037 raise error.Abort(_('SQLite store does not support repo creation '
1033 'option: %s') % ', '.join(sorted(unsupported)))
1038 'option: %s') % ', '.join(sorted(unsupported)))
1034
1039
1035 # Since we're a hybrid store that still relies on revlogs, we fall back
1040 # Since we're a hybrid store that still relies on revlogs, we fall back
1036 # to using the revlogv1 backend's storage requirements then adding our
1041 # to using the revlogv1 backend's storage requirements then adding our
1037 # own requirement.
1042 # own requirement.
1038 createopts['backend'] = 'revlogv1'
1043 createopts['backend'] = 'revlogv1'
1039 requirements = orig(ui, createopts)
1044 requirements = orig(ui, createopts)
1040 requirements.add(REQUIREMENT)
1045 requirements.add(REQUIREMENT)
1041
1046
1042 compression = ui.config('storage', 'sqlite.compression')
1047 compression = ui.config('storage', 'sqlite.compression')
1043
1048
1044 if compression == 'zstd' and not zstd:
1049 if compression == 'zstd' and not zstd:
1045 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1050 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1046 'zstandard compression not available to this '
1051 'zstandard compression not available to this '
1047 'Mercurial install'))
1052 'Mercurial install'))
1048
1053
1049 if compression == 'zstd':
1054 if compression == 'zstd':
1050 requirements.add(REQUIREMENT_ZSTD)
1055 requirements.add(REQUIREMENT_ZSTD)
1051 elif compression == 'zlib':
1056 elif compression == 'zlib':
1052 requirements.add(REQUIREMENT_ZLIB)
1057 requirements.add(REQUIREMENT_ZLIB)
1053 elif compression == 'none':
1058 elif compression == 'none':
1054 requirements.add(REQUIREMENT_NONE)
1059 requirements.add(REQUIREMENT_NONE)
1055 else:
1060 else:
1056 raise error.Abort(_('unknown compression engine defined in '
1061 raise error.Abort(_('unknown compression engine defined in '
1057 'storage.sqlite.compression: %s') % compression)
1062 'storage.sqlite.compression: %s') % compression)
1058
1063
1059 return requirements
1064 return requirements
1060
1065
1061 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1066 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1062 class sqlitefilestorage(object):
1067 class sqlitefilestorage(object):
1063 """Repository file storage backed by SQLite."""
1068 """Repository file storage backed by SQLite."""
1064 def file(self, path):
1069 def file(self, path):
1065 if path[0] == b'/':
1070 if path[0] == b'/':
1066 path = path[1:]
1071 path = path[1:]
1067
1072
1068 if REQUIREMENT_ZSTD in self.requirements:
1073 if REQUIREMENT_ZSTD in self.requirements:
1069 compression = 'zstd'
1074 compression = 'zstd'
1070 elif REQUIREMENT_ZLIB in self.requirements:
1075 elif REQUIREMENT_ZLIB in self.requirements:
1071 compression = 'zlib'
1076 compression = 'zlib'
1072 elif REQUIREMENT_NONE in self.requirements:
1077 elif REQUIREMENT_NONE in self.requirements:
1073 compression = 'none'
1078 compression = 'none'
1074 else:
1079 else:
1075 raise error.Abort(_('unable to determine what compression engine '
1080 raise error.Abort(_('unable to determine what compression engine '
1076 'to use for SQLite storage'))
1081 'to use for SQLite storage'))
1077
1082
1078 return sqlitefilestore(self._dbconn, path, compression)
1083 return sqlitefilestore(self._dbconn, path, compression)
1079
1084
1080 def makefilestorage(orig, requirements, **kwargs):
1085 def makefilestorage(orig, requirements, **kwargs):
1081 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1086 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1082 if REQUIREMENT in requirements:
1087 if REQUIREMENT in requirements:
1083 return sqlitefilestorage
1088 return sqlitefilestorage
1084 else:
1089 else:
1085 return orig(requirements=requirements, **kwargs)
1090 return orig(requirements=requirements, **kwargs)
1086
1091
1087 def makemain(orig, ui, requirements, **kwargs):
1092 def makemain(orig, ui, requirements, **kwargs):
1088 if REQUIREMENT in requirements:
1093 if REQUIREMENT in requirements:
1089 if REQUIREMENT_ZSTD in requirements and not zstd:
1094 if REQUIREMENT_ZSTD in requirements and not zstd:
1090 raise error.Abort(_('repository uses zstandard compression, which '
1095 raise error.Abort(_('repository uses zstandard compression, which '
1091 'is not available to this Mercurial install'))
1096 'is not available to this Mercurial install'))
1092
1097
1093 return sqliterepository
1098 return sqliterepository
1094
1099
1095 return orig(requirements=requirements, **kwargs)
1100 return orig(requirements=requirements, **kwargs)
1096
1101
1097 def verifierinit(orig, self, *args, **kwargs):
1102 def verifierinit(orig, self, *args, **kwargs):
1098 orig(self, *args, **kwargs)
1103 orig(self, *args, **kwargs)
1099
1104
1100 # We don't care that files in the store don't align with what is
1105 # We don't care that files in the store don't align with what is
1101 # advertised. So suppress these warnings.
1106 # advertised. So suppress these warnings.
1102 self.warnorphanstorefiles = False
1107 self.warnorphanstorefiles = False
1103
1108
1104 def extsetup(ui):
1109 def extsetup(ui):
1105 localrepo.featuresetupfuncs.add(featuresetup)
1110 localrepo.featuresetupfuncs.add(featuresetup)
1106 extensions.wrapfunction(localrepo, 'newreporequirements',
1111 extensions.wrapfunction(localrepo, 'newreporequirements',
1107 newreporequirements)
1112 newreporequirements)
1108 extensions.wrapfunction(localrepo, 'makefilestorage',
1113 extensions.wrapfunction(localrepo, 'makefilestorage',
1109 makefilestorage)
1114 makefilestorage)
1110 extensions.wrapfunction(localrepo, 'makemain',
1115 extensions.wrapfunction(localrepo, 'makemain',
1111 makemain)
1116 makemain)
1112 extensions.wrapfunction(verify.verifier, '__init__',
1117 extensions.wrapfunction(verify.verifier, '__init__',
1113 verifierinit)
1118 verifierinit)
1114
1119
1115 def reposetup(ui, repo):
1120 def reposetup(ui, repo):
1116 if isinstance(repo, sqliterepository):
1121 if isinstance(repo, sqliterepository):
1117 repo._db = None
1122 repo._db = None
1118
1123
1119 # TODO check for bundlerepository?
1124 # TODO check for bundlerepository?
@@ -1,233 +1,239 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .node import (
11 from .node import (
11 nullid,
12 nullid,
12 nullrev,
13 nullrev,
13 )
14 )
14 from . import (
15 from . import (
15 error,
16 error,
16 repository,
17 repository,
17 revlog,
18 revlog,
18 )
19 )
19 from .utils import (
20 from .utils import (
20 interfaceutil,
21 interfaceutil,
21 storageutil,
22 storageutil,
22 )
23 )
23
24
24 @interfaceutil.implementer(repository.ifilestorage)
25 @interfaceutil.implementer(repository.ifilestorage)
25 class filelog(object):
26 class filelog(object):
26 def __init__(self, opener, path):
27 def __init__(self, opener, path):
27 self._revlog = revlog.revlog(opener,
28 self._revlog = revlog.revlog(opener,
28 '/'.join(('data', path + '.i')),
29 '/'.join(('data', path + '.i')),
29 censorable=True)
30 censorable=True)
30 # Full name of the user visible file, relative to the repository root.
31 # Full name of the user visible file, relative to the repository root.
31 # Used by LFS.
32 # Used by LFS.
32 self._revlog.filename = path
33 self._revlog.filename = path
33
34
34 def __len__(self):
35 def __len__(self):
35 return len(self._revlog)
36 return len(self._revlog)
36
37
37 def __iter__(self):
38 def __iter__(self):
38 return self._revlog.__iter__()
39 return self._revlog.__iter__()
39
40
40 def hasnode(self, node):
41 def hasnode(self, node):
41 if node in (nullid, nullrev):
42 if node in (nullid, nullrev):
42 return False
43 return False
43
44
44 try:
45 try:
45 self._revlog.rev(node)
46 self._revlog.rev(node)
46 return True
47 return True
47 except (TypeError, ValueError, IndexError, error.LookupError):
48 except (TypeError, ValueError, IndexError, error.LookupError):
48 return False
49 return False
49
50
50 def revs(self, start=0, stop=None):
51 def revs(self, start=0, stop=None):
51 return self._revlog.revs(start=start, stop=stop)
52 return self._revlog.revs(start=start, stop=stop)
52
53
53 def parents(self, node):
54 def parents(self, node):
54 return self._revlog.parents(node)
55 return self._revlog.parents(node)
55
56
56 def parentrevs(self, rev):
57 def parentrevs(self, rev):
57 return self._revlog.parentrevs(rev)
58 return self._revlog.parentrevs(rev)
58
59
59 def rev(self, node):
60 def rev(self, node):
60 return self._revlog.rev(node)
61 return self._revlog.rev(node)
61
62
62 def node(self, rev):
63 def node(self, rev):
63 return self._revlog.node(rev)
64 return self._revlog.node(rev)
64
65
65 def lookup(self, node):
66 def lookup(self, node):
66 return storageutil.fileidlookup(self._revlog, node,
67 return storageutil.fileidlookup(self._revlog, node,
67 self._revlog.indexfile)
68 self._revlog.indexfile)
68
69
69 def linkrev(self, rev):
70 def linkrev(self, rev):
70 return self._revlog.linkrev(rev)
71 return self._revlog.linkrev(rev)
71
72
72 def commonancestorsheads(self, node1, node2):
73 def commonancestorsheads(self, node1, node2):
73 return self._revlog.commonancestorsheads(node1, node2)
74 return self._revlog.commonancestorsheads(node1, node2)
74
75
75 # Used by dagop.blockdescendants().
76 # Used by dagop.blockdescendants().
76 def descendants(self, revs):
77 def descendants(self, revs):
77 return self._revlog.descendants(revs)
78 return self._revlog.descendants(revs)
78
79
79 def heads(self, start=None, stop=None):
80 def heads(self, start=None, stop=None):
80 return self._revlog.heads(start, stop)
81 return self._revlog.heads(start, stop)
81
82
82 # Used by hgweb, children extension.
83 # Used by hgweb, children extension.
83 def children(self, node):
84 def children(self, node):
84 return self._revlog.children(node)
85 return self._revlog.children(node)
85
86
86 def iscensored(self, rev):
87 def iscensored(self, rev):
87 return self._revlog.iscensored(rev)
88 return self._revlog.iscensored(rev)
88
89
89 def revision(self, node, _df=None, raw=False):
90 def revision(self, node, _df=None, raw=False):
90 return self._revlog.revision(node, _df=_df, raw=raw)
91 return self._revlog.revision(node, _df=_df, raw=raw)
91
92
92 def emitrevisions(self, nodes, nodesorder=None,
93 def emitrevisions(self, nodes, nodesorder=None,
93 revisiondata=False, assumehaveparentrevisions=False,
94 revisiondata=False, assumehaveparentrevisions=False,
94 deltaprevious=False):
95 deltaprevious=False):
95 return self._revlog.emitrevisions(
96 return self._revlog.emitrevisions(
96 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
97 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
97 assumehaveparentrevisions=assumehaveparentrevisions,
98 assumehaveparentrevisions=assumehaveparentrevisions,
98 deltaprevious=deltaprevious)
99 deltaprevious=deltaprevious)
99
100
100 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
101 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
101 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
102 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
102 cachedelta=None):
103 cachedelta=None):
103 return self._revlog.addrevision(revisiondata, transaction, linkrev,
104 return self._revlog.addrevision(revisiondata, transaction, linkrev,
104 p1, p2, node=node, flags=flags,
105 p1, p2, node=node, flags=flags,
105 cachedelta=cachedelta)
106 cachedelta=cachedelta)
106
107
107 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
108 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
109 maybemissingparents=False):
110 if maybemissingparents:
111 raise error.Abort(_('revlog storage does not support missing '
112 'parents write mode'))
113
108 return self._revlog.addgroup(deltas, linkmapper, transaction,
114 return self._revlog.addgroup(deltas, linkmapper, transaction,
109 addrevisioncb=addrevisioncb)
115 addrevisioncb=addrevisioncb)
110
116
111 def getstrippoint(self, minlink):
117 def getstrippoint(self, minlink):
112 return self._revlog.getstrippoint(minlink)
118 return self._revlog.getstrippoint(minlink)
113
119
114 def strip(self, minlink, transaction):
120 def strip(self, minlink, transaction):
115 return self._revlog.strip(minlink, transaction)
121 return self._revlog.strip(minlink, transaction)
116
122
117 def censorrevision(self, tr, node, tombstone=b''):
123 def censorrevision(self, tr, node, tombstone=b''):
118 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
124 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
119
125
120 def files(self):
126 def files(self):
121 return self._revlog.files()
127 return self._revlog.files()
122
128
123 def read(self, node):
129 def read(self, node):
124 return storageutil.filtermetadata(self.revision(node))
130 return storageutil.filtermetadata(self.revision(node))
125
131
126 def add(self, text, meta, transaction, link, p1=None, p2=None):
132 def add(self, text, meta, transaction, link, p1=None, p2=None):
127 if meta or text.startswith('\1\n'):
133 if meta or text.startswith('\1\n'):
128 text = storageutil.packmeta(meta, text)
134 text = storageutil.packmeta(meta, text)
129 return self.addrevision(text, transaction, link, p1, p2)
135 return self.addrevision(text, transaction, link, p1, p2)
130
136
131 def renamed(self, node):
137 def renamed(self, node):
132 return storageutil.filerevisioncopied(self, node)
138 return storageutil.filerevisioncopied(self, node)
133
139
134 def size(self, rev):
140 def size(self, rev):
135 """return the size of a given revision"""
141 """return the size of a given revision"""
136
142
137 # for revisions with renames, we have to go the slow way
143 # for revisions with renames, we have to go the slow way
138 node = self.node(rev)
144 node = self.node(rev)
139 if self.renamed(node):
145 if self.renamed(node):
140 return len(self.read(node))
146 return len(self.read(node))
141 if self.iscensored(rev):
147 if self.iscensored(rev):
142 return 0
148 return 0
143
149
144 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
150 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
145 return self._revlog.size(rev)
151 return self._revlog.size(rev)
146
152
147 def cmp(self, node, text):
153 def cmp(self, node, text):
148 """compare text with a given file revision
154 """compare text with a given file revision
149
155
150 returns True if text is different than what is stored.
156 returns True if text is different than what is stored.
151 """
157 """
152 return not storageutil.filedataequivalent(self, node, text)
158 return not storageutil.filedataequivalent(self, node, text)
153
159
154 def verifyintegrity(self, state):
160 def verifyintegrity(self, state):
155 return self._revlog.verifyintegrity(state)
161 return self._revlog.verifyintegrity(state)
156
162
157 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
163 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
158 revisionscount=False, trackedsize=False,
164 revisionscount=False, trackedsize=False,
159 storedsize=False):
165 storedsize=False):
160 return self._revlog.storageinfo(
166 return self._revlog.storageinfo(
161 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
167 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
162 revisionscount=revisionscount, trackedsize=trackedsize,
168 revisionscount=revisionscount, trackedsize=trackedsize,
163 storedsize=storedsize)
169 storedsize=storedsize)
164
170
165 # TODO these aren't part of the interface and aren't internal methods.
171 # TODO these aren't part of the interface and aren't internal methods.
166 # Callers should be fixed to not use them.
172 # Callers should be fixed to not use them.
167
173
168 # Used by bundlefilelog, unionfilelog.
174 # Used by bundlefilelog, unionfilelog.
169 @property
175 @property
170 def indexfile(self):
176 def indexfile(self):
171 return self._revlog.indexfile
177 return self._revlog.indexfile
172
178
173 @indexfile.setter
179 @indexfile.setter
174 def indexfile(self, value):
180 def indexfile(self, value):
175 self._revlog.indexfile = value
181 self._revlog.indexfile = value
176
182
177 # Used by repo upgrade.
183 # Used by repo upgrade.
178 def clone(self, tr, destrevlog, **kwargs):
184 def clone(self, tr, destrevlog, **kwargs):
179 if not isinstance(destrevlog, filelog):
185 if not isinstance(destrevlog, filelog):
180 raise error.ProgrammingError('expected filelog to clone()')
186 raise error.ProgrammingError('expected filelog to clone()')
181
187
182 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
188 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
183
189
184 class narrowfilelog(filelog):
190 class narrowfilelog(filelog):
185 """Filelog variation to be used with narrow stores."""
191 """Filelog variation to be used with narrow stores."""
186
192
187 def __init__(self, opener, path, narrowmatch):
193 def __init__(self, opener, path, narrowmatch):
188 super(narrowfilelog, self).__init__(opener, path)
194 super(narrowfilelog, self).__init__(opener, path)
189 self._narrowmatch = narrowmatch
195 self._narrowmatch = narrowmatch
190
196
191 def renamed(self, node):
197 def renamed(self, node):
192 res = super(narrowfilelog, self).renamed(node)
198 res = super(narrowfilelog, self).renamed(node)
193
199
194 # Renames that come from outside the narrowspec are problematic
200 # Renames that come from outside the narrowspec are problematic
195 # because we may lack the base text for the rename. This can result
201 # because we may lack the base text for the rename. This can result
196 # in code attempting to walk the ancestry or compute a diff
202 # in code attempting to walk the ancestry or compute a diff
197 # encountering a missing revision. We address this by silently
203 # encountering a missing revision. We address this by silently
198 # removing rename metadata if the source file is outside the
204 # removing rename metadata if the source file is outside the
199 # narrow spec.
205 # narrow spec.
200 #
206 #
201 # A better solution would be to see if the base revision is available,
207 # A better solution would be to see if the base revision is available,
202 # rather than assuming it isn't.
208 # rather than assuming it isn't.
203 #
209 #
204 # An even better solution would be to teach all consumers of rename
210 # An even better solution would be to teach all consumers of rename
205 # metadata that the base revision may not be available.
211 # metadata that the base revision may not be available.
206 #
212 #
207 # TODO consider better ways of doing this.
213 # TODO consider better ways of doing this.
208 if res and not self._narrowmatch(res[0]):
214 if res and not self._narrowmatch(res[0]):
209 return None
215 return None
210
216
211 return res
217 return res
212
218
213 def size(self, rev):
219 def size(self, rev):
214 # Because we have a custom renamed() that may lie, we need to call
220 # Because we have a custom renamed() that may lie, we need to call
215 # the base renamed() to report accurate results.
221 # the base renamed() to report accurate results.
216 node = self.node(rev)
222 node = self.node(rev)
217 if super(narrowfilelog, self).renamed(node):
223 if super(narrowfilelog, self).renamed(node):
218 return len(self.read(node))
224 return len(self.read(node))
219 else:
225 else:
220 return super(narrowfilelog, self).size(rev)
226 return super(narrowfilelog, self).size(rev)
221
227
222 def cmp(self, node, text):
228 def cmp(self, node, text):
223 different = super(narrowfilelog, self).cmp(node, text)
229 different = super(narrowfilelog, self).cmp(node, text)
224
230
225 # Because renamed() may lie, we may get false positives for
231 # Because renamed() may lie, we may get false positives for
226 # different content. Check for this by comparing against the original
232 # different content. Check for this by comparing against the original
227 # renamed() implementation.
233 # renamed() implementation.
228 if different:
234 if different:
229 if super(narrowfilelog, self).renamed(node):
235 if super(narrowfilelog, self).renamed(node):
230 t2 = self.read(node)
236 t2 = self.read(node)
231 return t2 != text
237 return t2 != text
232
238
233 return different
239 return different
@@ -1,1845 +1,1851 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32
32
33 REVISION_FLAG_CENSORED = 1 << 15
33 REVISION_FLAG_CENSORED = 1 << 15
34 REVISION_FLAG_ELLIPSIS = 1 << 14
34 REVISION_FLAG_ELLIPSIS = 1 << 14
35 REVISION_FLAG_EXTSTORED = 1 << 13
35 REVISION_FLAG_EXTSTORED = 1 << 13
36
36
37 REVISION_FLAGS_KNOWN = (
37 REVISION_FLAGS_KNOWN = (
38 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
38 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
39
39
40 class ipeerconnection(interfaceutil.Interface):
40 class ipeerconnection(interfaceutil.Interface):
41 """Represents a "connection" to a repository.
41 """Represents a "connection" to a repository.
42
42
43 This is the base interface for representing a connection to a repository.
43 This is the base interface for representing a connection to a repository.
44 It holds basic properties and methods applicable to all peer types.
44 It holds basic properties and methods applicable to all peer types.
45
45
46 This is not a complete interface definition and should not be used
46 This is not a complete interface definition and should not be used
47 outside of this module.
47 outside of this module.
48 """
48 """
49 ui = interfaceutil.Attribute("""ui.ui instance""")
49 ui = interfaceutil.Attribute("""ui.ui instance""")
50
50
51 def url():
51 def url():
52 """Returns a URL string representing this peer.
52 """Returns a URL string representing this peer.
53
53
54 Currently, implementations expose the raw URL used to construct the
54 Currently, implementations expose the raw URL used to construct the
55 instance. It may contain credentials as part of the URL. The
55 instance. It may contain credentials as part of the URL. The
56 expectations of the value aren't well-defined and this could lead to
56 expectations of the value aren't well-defined and this could lead to
57 data leakage.
57 data leakage.
58
58
59 TODO audit/clean consumers and more clearly define the contents of this
59 TODO audit/clean consumers and more clearly define the contents of this
60 value.
60 value.
61 """
61 """
62
62
63 def local():
63 def local():
64 """Returns a local repository instance.
64 """Returns a local repository instance.
65
65
66 If the peer represents a local repository, returns an object that
66 If the peer represents a local repository, returns an object that
67 can be used to interface with it. Otherwise returns ``None``.
67 can be used to interface with it. Otherwise returns ``None``.
68 """
68 """
69
69
70 def peer():
70 def peer():
71 """Returns an object conforming to this interface.
71 """Returns an object conforming to this interface.
72
72
73 Most implementations will ``return self``.
73 Most implementations will ``return self``.
74 """
74 """
75
75
76 def canpush():
76 def canpush():
77 """Returns a boolean indicating if this peer can be pushed to."""
77 """Returns a boolean indicating if this peer can be pushed to."""
78
78
79 def close():
79 def close():
80 """Close the connection to this peer.
80 """Close the connection to this peer.
81
81
82 This is called when the peer will no longer be used. Resources
82 This is called when the peer will no longer be used. Resources
83 associated with the peer should be cleaned up.
83 associated with the peer should be cleaned up.
84 """
84 """
85
85
86 class ipeercapabilities(interfaceutil.Interface):
86 class ipeercapabilities(interfaceutil.Interface):
87 """Peer sub-interface related to capabilities."""
87 """Peer sub-interface related to capabilities."""
88
88
89 def capable(name):
89 def capable(name):
90 """Determine support for a named capability.
90 """Determine support for a named capability.
91
91
92 Returns ``False`` if capability not supported.
92 Returns ``False`` if capability not supported.
93
93
94 Returns ``True`` if boolean capability is supported. Returns a string
94 Returns ``True`` if boolean capability is supported. Returns a string
95 if capability support is non-boolean.
95 if capability support is non-boolean.
96
96
97 Capability strings may or may not map to wire protocol capabilities.
97 Capability strings may or may not map to wire protocol capabilities.
98 """
98 """
99
99
100 def requirecap(name, purpose):
100 def requirecap(name, purpose):
101 """Require a capability to be present.
101 """Require a capability to be present.
102
102
103 Raises a ``CapabilityError`` if the capability isn't present.
103 Raises a ``CapabilityError`` if the capability isn't present.
104 """
104 """
105
105
106 class ipeercommands(interfaceutil.Interface):
106 class ipeercommands(interfaceutil.Interface):
107 """Client-side interface for communicating over the wire protocol.
107 """Client-side interface for communicating over the wire protocol.
108
108
109 This interface is used as a gateway to the Mercurial wire protocol.
109 This interface is used as a gateway to the Mercurial wire protocol.
110 methods commonly call wire protocol commands of the same name.
110 methods commonly call wire protocol commands of the same name.
111 """
111 """
112
112
113 def branchmap():
113 def branchmap():
114 """Obtain heads in named branches.
114 """Obtain heads in named branches.
115
115
116 Returns a dict mapping branch name to an iterable of nodes that are
116 Returns a dict mapping branch name to an iterable of nodes that are
117 heads on that branch.
117 heads on that branch.
118 """
118 """
119
119
120 def capabilities():
120 def capabilities():
121 """Obtain capabilities of the peer.
121 """Obtain capabilities of the peer.
122
122
123 Returns a set of string capabilities.
123 Returns a set of string capabilities.
124 """
124 """
125
125
126 def clonebundles():
126 def clonebundles():
127 """Obtains the clone bundles manifest for the repo.
127 """Obtains the clone bundles manifest for the repo.
128
128
129 Returns the manifest as unparsed bytes.
129 Returns the manifest as unparsed bytes.
130 """
130 """
131
131
132 def debugwireargs(one, two, three=None, four=None, five=None):
132 def debugwireargs(one, two, three=None, four=None, five=None):
133 """Used to facilitate debugging of arguments passed over the wire."""
133 """Used to facilitate debugging of arguments passed over the wire."""
134
134
135 def getbundle(source, **kwargs):
135 def getbundle(source, **kwargs):
136 """Obtain remote repository data as a bundle.
136 """Obtain remote repository data as a bundle.
137
137
138 This command is how the bulk of repository data is transferred from
138 This command is how the bulk of repository data is transferred from
139 the peer to the local repository
139 the peer to the local repository
140
140
141 Returns a generator of bundle data.
141 Returns a generator of bundle data.
142 """
142 """
143
143
144 def heads():
144 def heads():
145 """Determine all known head revisions in the peer.
145 """Determine all known head revisions in the peer.
146
146
147 Returns an iterable of binary nodes.
147 Returns an iterable of binary nodes.
148 """
148 """
149
149
150 def known(nodes):
150 def known(nodes):
151 """Determine whether multiple nodes are known.
151 """Determine whether multiple nodes are known.
152
152
153 Accepts an iterable of nodes whose presence to check for.
153 Accepts an iterable of nodes whose presence to check for.
154
154
155 Returns an iterable of booleans indicating of the corresponding node
155 Returns an iterable of booleans indicating of the corresponding node
156 at that index is known to the peer.
156 at that index is known to the peer.
157 """
157 """
158
158
159 def listkeys(namespace):
159 def listkeys(namespace):
160 """Obtain all keys in a pushkey namespace.
160 """Obtain all keys in a pushkey namespace.
161
161
162 Returns an iterable of key names.
162 Returns an iterable of key names.
163 """
163 """
164
164
165 def lookup(key):
165 def lookup(key):
166 """Resolve a value to a known revision.
166 """Resolve a value to a known revision.
167
167
168 Returns a binary node of the resolved revision on success.
168 Returns a binary node of the resolved revision on success.
169 """
169 """
170
170
171 def pushkey(namespace, key, old, new):
171 def pushkey(namespace, key, old, new):
172 """Set a value using the ``pushkey`` protocol.
172 """Set a value using the ``pushkey`` protocol.
173
173
174 Arguments correspond to the pushkey namespace and key to operate on and
174 Arguments correspond to the pushkey namespace and key to operate on and
175 the old and new values for that key.
175 the old and new values for that key.
176
176
177 Returns a string with the peer result. The value inside varies by the
177 Returns a string with the peer result. The value inside varies by the
178 namespace.
178 namespace.
179 """
179 """
180
180
181 def stream_out():
181 def stream_out():
182 """Obtain streaming clone data.
182 """Obtain streaming clone data.
183
183
184 Successful result should be a generator of data chunks.
184 Successful result should be a generator of data chunks.
185 """
185 """
186
186
187 def unbundle(bundle, heads, url):
187 def unbundle(bundle, heads, url):
188 """Transfer repository data to the peer.
188 """Transfer repository data to the peer.
189
189
190 This is how the bulk of data during a push is transferred.
190 This is how the bulk of data during a push is transferred.
191
191
192 Returns the integer number of heads added to the peer.
192 Returns the integer number of heads added to the peer.
193 """
193 """
194
194
195 class ipeerlegacycommands(interfaceutil.Interface):
195 class ipeerlegacycommands(interfaceutil.Interface):
196 """Interface for implementing support for legacy wire protocol commands.
196 """Interface for implementing support for legacy wire protocol commands.
197
197
198 Wire protocol commands transition to legacy status when they are no longer
198 Wire protocol commands transition to legacy status when they are no longer
199 used by modern clients. To facilitate identifying which commands are
199 used by modern clients. To facilitate identifying which commands are
200 legacy, the interfaces are split.
200 legacy, the interfaces are split.
201 """
201 """
202
202
203 def between(pairs):
203 def between(pairs):
204 """Obtain nodes between pairs of nodes.
204 """Obtain nodes between pairs of nodes.
205
205
206 ``pairs`` is an iterable of node pairs.
206 ``pairs`` is an iterable of node pairs.
207
207
208 Returns an iterable of iterables of nodes corresponding to each
208 Returns an iterable of iterables of nodes corresponding to each
209 requested pair.
209 requested pair.
210 """
210 """
211
211
212 def branches(nodes):
212 def branches(nodes):
213 """Obtain ancestor changesets of specific nodes back to a branch point.
213 """Obtain ancestor changesets of specific nodes back to a branch point.
214
214
215 For each requested node, the peer finds the first ancestor node that is
215 For each requested node, the peer finds the first ancestor node that is
216 a DAG root or is a merge.
216 a DAG root or is a merge.
217
217
218 Returns an iterable of iterables with the resolved values for each node.
218 Returns an iterable of iterables with the resolved values for each node.
219 """
219 """
220
220
221 def changegroup(nodes, source):
221 def changegroup(nodes, source):
222 """Obtain a changegroup with data for descendants of specified nodes."""
222 """Obtain a changegroup with data for descendants of specified nodes."""
223
223
224 def changegroupsubset(bases, heads, source):
224 def changegroupsubset(bases, heads, source):
225 pass
225 pass
226
226
227 class ipeercommandexecutor(interfaceutil.Interface):
227 class ipeercommandexecutor(interfaceutil.Interface):
228 """Represents a mechanism to execute remote commands.
228 """Represents a mechanism to execute remote commands.
229
229
230 This is the primary interface for requesting that wire protocol commands
230 This is the primary interface for requesting that wire protocol commands
231 be executed. Instances of this interface are active in a context manager
231 be executed. Instances of this interface are active in a context manager
232 and have a well-defined lifetime. When the context manager exits, all
232 and have a well-defined lifetime. When the context manager exits, all
233 outstanding requests are waited on.
233 outstanding requests are waited on.
234 """
234 """
235
235
236 def callcommand(name, args):
236 def callcommand(name, args):
237 """Request that a named command be executed.
237 """Request that a named command be executed.
238
238
239 Receives the command name and a dictionary of command arguments.
239 Receives the command name and a dictionary of command arguments.
240
240
241 Returns a ``concurrent.futures.Future`` that will resolve to the
241 Returns a ``concurrent.futures.Future`` that will resolve to the
242 result of that command request. That exact value is left up to
242 result of that command request. That exact value is left up to
243 the implementation and possibly varies by command.
243 the implementation and possibly varies by command.
244
244
245 Not all commands can coexist with other commands in an executor
245 Not all commands can coexist with other commands in an executor
246 instance: it depends on the underlying wire protocol transport being
246 instance: it depends on the underlying wire protocol transport being
247 used and the command itself.
247 used and the command itself.
248
248
249 Implementations MAY call ``sendcommands()`` automatically if the
249 Implementations MAY call ``sendcommands()`` automatically if the
250 requested command can not coexist with other commands in this executor.
250 requested command can not coexist with other commands in this executor.
251
251
252 Implementations MAY call ``sendcommands()`` automatically when the
252 Implementations MAY call ``sendcommands()`` automatically when the
253 future's ``result()`` is called. So, consumers using multiple
253 future's ``result()`` is called. So, consumers using multiple
254 commands with an executor MUST ensure that ``result()`` is not called
254 commands with an executor MUST ensure that ``result()`` is not called
255 until all command requests have been issued.
255 until all command requests have been issued.
256 """
256 """
257
257
258 def sendcommands():
258 def sendcommands():
259 """Trigger submission of queued command requests.
259 """Trigger submission of queued command requests.
260
260
261 Not all transports submit commands as soon as they are requested to
261 Not all transports submit commands as soon as they are requested to
262 run. When called, this method forces queued command requests to be
262 run. When called, this method forces queued command requests to be
263 issued. It will no-op if all commands have already been sent.
263 issued. It will no-op if all commands have already been sent.
264
264
265 When called, no more new commands may be issued with this executor.
265 When called, no more new commands may be issued with this executor.
266 """
266 """
267
267
268 def close():
268 def close():
269 """Signal that this command request is finished.
269 """Signal that this command request is finished.
270
270
271 When called, no more new commands may be issued. All outstanding
271 When called, no more new commands may be issued. All outstanding
272 commands that have previously been issued are waited on before
272 commands that have previously been issued are waited on before
273 returning. This not only includes waiting for the futures to resolve,
273 returning. This not only includes waiting for the futures to resolve,
274 but also waiting for all response data to arrive. In other words,
274 but also waiting for all response data to arrive. In other words,
275 calling this waits for all on-wire state for issued command requests
275 calling this waits for all on-wire state for issued command requests
276 to finish.
276 to finish.
277
277
278 When used as a context manager, this method is called when exiting the
278 When used as a context manager, this method is called when exiting the
279 context manager.
279 context manager.
280
280
281 This method may call ``sendcommands()`` if there are buffered commands.
281 This method may call ``sendcommands()`` if there are buffered commands.
282 """
282 """
283
283
284 class ipeerrequests(interfaceutil.Interface):
284 class ipeerrequests(interfaceutil.Interface):
285 """Interface for executing commands on a peer."""
285 """Interface for executing commands on a peer."""
286
286
287 def commandexecutor():
287 def commandexecutor():
288 """A context manager that resolves to an ipeercommandexecutor.
288 """A context manager that resolves to an ipeercommandexecutor.
289
289
290 The object this resolves to can be used to issue command requests
290 The object this resolves to can be used to issue command requests
291 to the peer.
291 to the peer.
292
292
293 Callers should call its ``callcommand`` method to issue command
293 Callers should call its ``callcommand`` method to issue command
294 requests.
294 requests.
295
295
296 A new executor should be obtained for each distinct set of commands
296 A new executor should be obtained for each distinct set of commands
297 (possibly just a single command) that the consumer wants to execute
297 (possibly just a single command) that the consumer wants to execute
298 as part of a single operation or round trip. This is because some
298 as part of a single operation or round trip. This is because some
299 peers are half-duplex and/or don't support persistent connections.
299 peers are half-duplex and/or don't support persistent connections.
300 e.g. in the case of HTTP peers, commands sent to an executor represent
300 e.g. in the case of HTTP peers, commands sent to an executor represent
301 a single HTTP request. While some peers may support multiple command
301 a single HTTP request. While some peers may support multiple command
302 sends over the wire per executor, consumers need to code to the least
302 sends over the wire per executor, consumers need to code to the least
303 capable peer. So it should be assumed that command executors buffer
303 capable peer. So it should be assumed that command executors buffer
304 called commands until they are told to send them and that each
304 called commands until they are told to send them and that each
305 command executor could result in a new connection or wire-level request
305 command executor could result in a new connection or wire-level request
306 being issued.
306 being issued.
307 """
307 """
308
308
309 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
309 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
310 """Unified interface for peer repositories.
310 """Unified interface for peer repositories.
311
311
312 All peer instances must conform to this interface.
312 All peer instances must conform to this interface.
313 """
313 """
314
314
315 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
315 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
316 """Unified peer interface for wire protocol version 2 peers."""
316 """Unified peer interface for wire protocol version 2 peers."""
317
317
318 apidescriptor = interfaceutil.Attribute(
318 apidescriptor = interfaceutil.Attribute(
319 """Data structure holding description of server API.""")
319 """Data structure holding description of server API.""")
320
320
321 @interfaceutil.implementer(ipeerbase)
321 @interfaceutil.implementer(ipeerbase)
322 class peer(object):
322 class peer(object):
323 """Base class for peer repositories."""
323 """Base class for peer repositories."""
324
324
325 def capable(self, name):
325 def capable(self, name):
326 caps = self.capabilities()
326 caps = self.capabilities()
327 if name in caps:
327 if name in caps:
328 return True
328 return True
329
329
330 name = '%s=' % name
330 name = '%s=' % name
331 for cap in caps:
331 for cap in caps:
332 if cap.startswith(name):
332 if cap.startswith(name):
333 return cap[len(name):]
333 return cap[len(name):]
334
334
335 return False
335 return False
336
336
337 def requirecap(self, name, purpose):
337 def requirecap(self, name, purpose):
338 if self.capable(name):
338 if self.capable(name):
339 return
339 return
340
340
341 raise error.CapabilityError(
341 raise error.CapabilityError(
342 _('cannot %s; remote repository does not support the %r '
342 _('cannot %s; remote repository does not support the %r '
343 'capability') % (purpose, name))
343 'capability') % (purpose, name))
344
344
345 class iverifyproblem(interfaceutil.Interface):
345 class iverifyproblem(interfaceutil.Interface):
346 """Represents a problem with the integrity of the repository.
346 """Represents a problem with the integrity of the repository.
347
347
348 Instances of this interface are emitted to describe an integrity issue
348 Instances of this interface are emitted to describe an integrity issue
349 with a repository (e.g. corrupt storage, missing data, etc).
349 with a repository (e.g. corrupt storage, missing data, etc).
350
350
351 Instances are essentially messages associated with severity.
351 Instances are essentially messages associated with severity.
352 """
352 """
353 warning = interfaceutil.Attribute(
353 warning = interfaceutil.Attribute(
354 """Message indicating a non-fatal problem.""")
354 """Message indicating a non-fatal problem.""")
355
355
356 error = interfaceutil.Attribute(
356 error = interfaceutil.Attribute(
357 """Message indicating a fatal problem.""")
357 """Message indicating a fatal problem.""")
358
358
359 node = interfaceutil.Attribute(
359 node = interfaceutil.Attribute(
360 """Revision encountering the problem.
360 """Revision encountering the problem.
361
361
362 ``None`` means the problem doesn't apply to a single revision.
362 ``None`` means the problem doesn't apply to a single revision.
363 """)
363 """)
364
364
365 class irevisiondelta(interfaceutil.Interface):
365 class irevisiondelta(interfaceutil.Interface):
366 """Represents a delta between one revision and another.
366 """Represents a delta between one revision and another.
367
367
368 Instances convey enough information to allow a revision to be exchanged
368 Instances convey enough information to allow a revision to be exchanged
369 with another repository.
369 with another repository.
370
370
371 Instances represent the fulltext revision data or a delta against
371 Instances represent the fulltext revision data or a delta against
372 another revision. Therefore the ``revision`` and ``delta`` attributes
372 another revision. Therefore the ``revision`` and ``delta`` attributes
373 are mutually exclusive.
373 are mutually exclusive.
374
374
375 Typically used for changegroup generation.
375 Typically used for changegroup generation.
376 """
376 """
377
377
378 node = interfaceutil.Attribute(
378 node = interfaceutil.Attribute(
379 """20 byte node of this revision.""")
379 """20 byte node of this revision.""")
380
380
381 p1node = interfaceutil.Attribute(
381 p1node = interfaceutil.Attribute(
382 """20 byte node of 1st parent of this revision.""")
382 """20 byte node of 1st parent of this revision.""")
383
383
384 p2node = interfaceutil.Attribute(
384 p2node = interfaceutil.Attribute(
385 """20 byte node of 2nd parent of this revision.""")
385 """20 byte node of 2nd parent of this revision.""")
386
386
387 linknode = interfaceutil.Attribute(
387 linknode = interfaceutil.Attribute(
388 """20 byte node of the changelog revision this node is linked to.""")
388 """20 byte node of the changelog revision this node is linked to.""")
389
389
390 flags = interfaceutil.Attribute(
390 flags = interfaceutil.Attribute(
391 """2 bytes of integer flags that apply to this revision.
391 """2 bytes of integer flags that apply to this revision.
392
392
393 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
393 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
394 """)
394 """)
395
395
396 basenode = interfaceutil.Attribute(
396 basenode = interfaceutil.Attribute(
397 """20 byte node of the revision this data is a delta against.
397 """20 byte node of the revision this data is a delta against.
398
398
399 ``nullid`` indicates that the revision is a full revision and not
399 ``nullid`` indicates that the revision is a full revision and not
400 a delta.
400 a delta.
401 """)
401 """)
402
402
403 baserevisionsize = interfaceutil.Attribute(
403 baserevisionsize = interfaceutil.Attribute(
404 """Size of base revision this delta is against.
404 """Size of base revision this delta is against.
405
405
406 May be ``None`` if ``basenode`` is ``nullid``.
406 May be ``None`` if ``basenode`` is ``nullid``.
407 """)
407 """)
408
408
409 revision = interfaceutil.Attribute(
409 revision = interfaceutil.Attribute(
410 """Raw fulltext of revision data for this node.""")
410 """Raw fulltext of revision data for this node.""")
411
411
412 delta = interfaceutil.Attribute(
412 delta = interfaceutil.Attribute(
413 """Delta between ``basenode`` and ``node``.
413 """Delta between ``basenode`` and ``node``.
414
414
415 Stored in the bdiff delta format.
415 Stored in the bdiff delta format.
416 """)
416 """)
417
417
418 class ifilerevisionssequence(interfaceutil.Interface):
418 class ifilerevisionssequence(interfaceutil.Interface):
419 """Contains index data for all revisions of a file.
419 """Contains index data for all revisions of a file.
420
420
421 Types implementing this behave like lists of tuples. The index
421 Types implementing this behave like lists of tuples. The index
422 in the list corresponds to the revision number. The values contain
422 in the list corresponds to the revision number. The values contain
423 index metadata.
423 index metadata.
424
424
425 The *null* revision (revision number -1) is always the last item
425 The *null* revision (revision number -1) is always the last item
426 in the index.
426 in the index.
427 """
427 """
428
428
429 def __len__():
429 def __len__():
430 """The total number of revisions."""
430 """The total number of revisions."""
431
431
432 def __getitem__(rev):
432 def __getitem__(rev):
433 """Returns the object having a specific revision number.
433 """Returns the object having a specific revision number.
434
434
435 Returns an 8-tuple with the following fields:
435 Returns an 8-tuple with the following fields:
436
436
437 offset+flags
437 offset+flags
438 Contains the offset and flags for the revision. 64-bit unsigned
438 Contains the offset and flags for the revision. 64-bit unsigned
439 integer where first 6 bytes are the offset and the next 2 bytes
439 integer where first 6 bytes are the offset and the next 2 bytes
440 are flags. The offset can be 0 if it is not used by the store.
440 are flags. The offset can be 0 if it is not used by the store.
441 compressed size
441 compressed size
442 Size of the revision data in the store. It can be 0 if it isn't
442 Size of the revision data in the store. It can be 0 if it isn't
443 needed by the store.
443 needed by the store.
444 uncompressed size
444 uncompressed size
445 Fulltext size. It can be 0 if it isn't needed by the store.
445 Fulltext size. It can be 0 if it isn't needed by the store.
446 base revision
446 base revision
447 Revision number of revision the delta for storage is encoded
447 Revision number of revision the delta for storage is encoded
448 against. -1 indicates not encoded against a base revision.
448 against. -1 indicates not encoded against a base revision.
449 link revision
449 link revision
450 Revision number of changelog revision this entry is related to.
450 Revision number of changelog revision this entry is related to.
451 p1 revision
451 p1 revision
452 Revision number of 1st parent. -1 if no 1st parent.
452 Revision number of 1st parent. -1 if no 1st parent.
453 p2 revision
453 p2 revision
454 Revision number of 2nd parent. -1 if no 1st parent.
454 Revision number of 2nd parent. -1 if no 1st parent.
455 node
455 node
456 Binary node value for this revision number.
456 Binary node value for this revision number.
457
457
458 Negative values should index off the end of the sequence. ``-1``
458 Negative values should index off the end of the sequence. ``-1``
459 should return the null revision. ``-2`` should return the most
459 should return the null revision. ``-2`` should return the most
460 recent revision.
460 recent revision.
461 """
461 """
462
462
463 def __contains__(rev):
463 def __contains__(rev):
464 """Whether a revision number exists."""
464 """Whether a revision number exists."""
465
465
466 def insert(self, i, entry):
466 def insert(self, i, entry):
467 """Add an item to the index at specific revision."""
467 """Add an item to the index at specific revision."""
468
468
469 class ifileindex(interfaceutil.Interface):
469 class ifileindex(interfaceutil.Interface):
470 """Storage interface for index data of a single file.
470 """Storage interface for index data of a single file.
471
471
472 File storage data is divided into index metadata and data storage.
472 File storage data is divided into index metadata and data storage.
473 This interface defines the index portion of the interface.
473 This interface defines the index portion of the interface.
474
474
475 The index logically consists of:
475 The index logically consists of:
476
476
477 * A mapping between revision numbers and nodes.
477 * A mapping between revision numbers and nodes.
478 * DAG data (storing and querying the relationship between nodes).
478 * DAG data (storing and querying the relationship between nodes).
479 * Metadata to facilitate storage.
479 * Metadata to facilitate storage.
480 """
480 """
481 def __len__():
481 def __len__():
482 """Obtain the number of revisions stored for this file."""
482 """Obtain the number of revisions stored for this file."""
483
483
484 def __iter__():
484 def __iter__():
485 """Iterate over revision numbers for this file."""
485 """Iterate over revision numbers for this file."""
486
486
487 def hasnode(node):
487 def hasnode(node):
488 """Returns a bool indicating if a node is known to this store.
488 """Returns a bool indicating if a node is known to this store.
489
489
490 Implementations must only return True for full, binary node values:
490 Implementations must only return True for full, binary node values:
491 hex nodes, revision numbers, and partial node matches must be
491 hex nodes, revision numbers, and partial node matches must be
492 rejected.
492 rejected.
493
493
494 The null node is never present.
494 The null node is never present.
495 """
495 """
496
496
497 def revs(start=0, stop=None):
497 def revs(start=0, stop=None):
498 """Iterate over revision numbers for this file, with control."""
498 """Iterate over revision numbers for this file, with control."""
499
499
500 def parents(node):
500 def parents(node):
501 """Returns a 2-tuple of parent nodes for a revision.
501 """Returns a 2-tuple of parent nodes for a revision.
502
502
503 Values will be ``nullid`` if the parent is empty.
503 Values will be ``nullid`` if the parent is empty.
504 """
504 """
505
505
506 def parentrevs(rev):
506 def parentrevs(rev):
507 """Like parents() but operates on revision numbers."""
507 """Like parents() but operates on revision numbers."""
508
508
509 def rev(node):
509 def rev(node):
510 """Obtain the revision number given a node.
510 """Obtain the revision number given a node.
511
511
512 Raises ``error.LookupError`` if the node is not known.
512 Raises ``error.LookupError`` if the node is not known.
513 """
513 """
514
514
515 def node(rev):
515 def node(rev):
516 """Obtain the node value given a revision number.
516 """Obtain the node value given a revision number.
517
517
518 Raises ``IndexError`` if the node is not known.
518 Raises ``IndexError`` if the node is not known.
519 """
519 """
520
520
521 def lookup(node):
521 def lookup(node):
522 """Attempt to resolve a value to a node.
522 """Attempt to resolve a value to a node.
523
523
524 Value can be a binary node, hex node, revision number, or a string
524 Value can be a binary node, hex node, revision number, or a string
525 that can be converted to an integer.
525 that can be converted to an integer.
526
526
527 Raises ``error.LookupError`` if a node could not be resolved.
527 Raises ``error.LookupError`` if a node could not be resolved.
528 """
528 """
529
529
530 def linkrev(rev):
530 def linkrev(rev):
531 """Obtain the changeset revision number a revision is linked to."""
531 """Obtain the changeset revision number a revision is linked to."""
532
532
533 def iscensored(rev):
533 def iscensored(rev):
534 """Return whether a revision's content has been censored."""
534 """Return whether a revision's content has been censored."""
535
535
536 def commonancestorsheads(node1, node2):
536 def commonancestorsheads(node1, node2):
537 """Obtain an iterable of nodes containing heads of common ancestors.
537 """Obtain an iterable of nodes containing heads of common ancestors.
538
538
539 See ``ancestor.commonancestorsheads()``.
539 See ``ancestor.commonancestorsheads()``.
540 """
540 """
541
541
542 def descendants(revs):
542 def descendants(revs):
543 """Obtain descendant revision numbers for a set of revision numbers.
543 """Obtain descendant revision numbers for a set of revision numbers.
544
544
545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
546 """
546 """
547
547
548 def heads(start=None, stop=None):
548 def heads(start=None, stop=None):
549 """Obtain a list of nodes that are DAG heads, with control.
549 """Obtain a list of nodes that are DAG heads, with control.
550
550
551 The set of revisions examined can be limited by specifying
551 The set of revisions examined can be limited by specifying
552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
553 iterable of nodes. DAG traversal starts at earlier revision
553 iterable of nodes. DAG traversal starts at earlier revision
554 ``start`` and iterates forward until any node in ``stop`` is
554 ``start`` and iterates forward until any node in ``stop`` is
555 encountered.
555 encountered.
556 """
556 """
557
557
558 def children(node):
558 def children(node):
559 """Obtain nodes that are children of a node.
559 """Obtain nodes that are children of a node.
560
560
561 Returns a list of nodes.
561 Returns a list of nodes.
562 """
562 """
563
563
564 class ifiledata(interfaceutil.Interface):
564 class ifiledata(interfaceutil.Interface):
565 """Storage interface for data storage of a specific file.
565 """Storage interface for data storage of a specific file.
566
566
567 This complements ``ifileindex`` and provides an interface for accessing
567 This complements ``ifileindex`` and provides an interface for accessing
568 data for a tracked file.
568 data for a tracked file.
569 """
569 """
570 def size(rev):
570 def size(rev):
571 """Obtain the fulltext size of file data.
571 """Obtain the fulltext size of file data.
572
572
573 Any metadata is excluded from size measurements.
573 Any metadata is excluded from size measurements.
574 """
574 """
575
575
576 def revision(node, raw=False):
576 def revision(node, raw=False):
577 """"Obtain fulltext data for a node.
577 """"Obtain fulltext data for a node.
578
578
579 By default, any storage transformations are applied before the data
579 By default, any storage transformations are applied before the data
580 is returned. If ``raw`` is True, non-raw storage transformations
580 is returned. If ``raw`` is True, non-raw storage transformations
581 are not applied.
581 are not applied.
582
582
583 The fulltext data may contain a header containing metadata. Most
583 The fulltext data may contain a header containing metadata. Most
584 consumers should use ``read()`` to obtain the actual file data.
584 consumers should use ``read()`` to obtain the actual file data.
585 """
585 """
586
586
587 def read(node):
587 def read(node):
588 """Resolve file fulltext data.
588 """Resolve file fulltext data.
589
589
590 This is similar to ``revision()`` except any metadata in the data
590 This is similar to ``revision()`` except any metadata in the data
591 headers is stripped.
591 headers is stripped.
592 """
592 """
593
593
594 def renamed(node):
594 def renamed(node):
595 """Obtain copy metadata for a node.
595 """Obtain copy metadata for a node.
596
596
597 Returns ``False`` if no copy metadata is stored or a 2-tuple of
597 Returns ``False`` if no copy metadata is stored or a 2-tuple of
598 (path, node) from which this revision was copied.
598 (path, node) from which this revision was copied.
599 """
599 """
600
600
601 def cmp(node, fulltext):
601 def cmp(node, fulltext):
602 """Compare fulltext to another revision.
602 """Compare fulltext to another revision.
603
603
604 Returns True if the fulltext is different from what is stored.
604 Returns True if the fulltext is different from what is stored.
605
605
606 This takes copy metadata into account.
606 This takes copy metadata into account.
607
607
608 TODO better document the copy metadata and censoring logic.
608 TODO better document the copy metadata and censoring logic.
609 """
609 """
610
610
611 def emitrevisions(nodes,
611 def emitrevisions(nodes,
612 nodesorder=None,
612 nodesorder=None,
613 revisiondata=False,
613 revisiondata=False,
614 assumehaveparentrevisions=False,
614 assumehaveparentrevisions=False,
615 deltaprevious=False):
615 deltaprevious=False):
616 """Produce ``irevisiondelta`` for revisions.
616 """Produce ``irevisiondelta`` for revisions.
617
617
618 Given an iterable of nodes, emits objects conforming to the
618 Given an iterable of nodes, emits objects conforming to the
619 ``irevisiondelta`` interface that describe revisions in storage.
619 ``irevisiondelta`` interface that describe revisions in storage.
620
620
621 This method is a generator.
621 This method is a generator.
622
622
623 The input nodes may be unordered. Implementations must ensure that a
623 The input nodes may be unordered. Implementations must ensure that a
624 node's parents are emitted before the node itself. Transitively, this
624 node's parents are emitted before the node itself. Transitively, this
625 means that a node may only be emitted once all its ancestors in
625 means that a node may only be emitted once all its ancestors in
626 ``nodes`` have also been emitted.
626 ``nodes`` have also been emitted.
627
627
628 By default, emits "index" data (the ``node``, ``p1node``, and
628 By default, emits "index" data (the ``node``, ``p1node``, and
629 ``p2node`` attributes). If ``revisiondata`` is set, revision data
629 ``p2node`` attributes). If ``revisiondata`` is set, revision data
630 will also be present on the emitted objects.
630 will also be present on the emitted objects.
631
631
632 With default argument values, implementations can choose to emit
632 With default argument values, implementations can choose to emit
633 either fulltext revision data or a delta. When emitting deltas,
633 either fulltext revision data or a delta. When emitting deltas,
634 implementations must consider whether the delta's base revision
634 implementations must consider whether the delta's base revision
635 fulltext is available to the receiver.
635 fulltext is available to the receiver.
636
636
637 The base revision fulltext is guaranteed to be available if any of
637 The base revision fulltext is guaranteed to be available if any of
638 the following are met:
638 the following are met:
639
639
640 * Its fulltext revision was emitted by this method call.
640 * Its fulltext revision was emitted by this method call.
641 * A delta for that revision was emitted by this method call.
641 * A delta for that revision was emitted by this method call.
642 * ``assumehaveparentrevisions`` is True and the base revision is a
642 * ``assumehaveparentrevisions`` is True and the base revision is a
643 parent of the node.
643 parent of the node.
644
644
645 ``nodesorder`` can be used to control the order that revisions are
645 ``nodesorder`` can be used to control the order that revisions are
646 emitted. By default, revisions can be reordered as long as they are
646 emitted. By default, revisions can be reordered as long as they are
647 in DAG topological order (see above). If the value is ``nodes``,
647 in DAG topological order (see above). If the value is ``nodes``,
648 the iteration order from ``nodes`` should be used. If the value is
648 the iteration order from ``nodes`` should be used. If the value is
649 ``storage``, then the native order from the backing storage layer
649 ``storage``, then the native order from the backing storage layer
650 is used. (Not all storage layers will have strong ordering and behavior
650 is used. (Not all storage layers will have strong ordering and behavior
651 of this mode is storage-dependent.) ``nodes`` ordering can force
651 of this mode is storage-dependent.) ``nodes`` ordering can force
652 revisions to be emitted before their ancestors, so consumers should
652 revisions to be emitted before their ancestors, so consumers should
653 use it with care.
653 use it with care.
654
654
655 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
655 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
656 be set and it is the caller's responsibility to resolve it, if needed.
656 be set and it is the caller's responsibility to resolve it, if needed.
657
657
658 If ``deltaprevious`` is True and revision data is requested, all
658 If ``deltaprevious`` is True and revision data is requested, all
659 revision data should be emitted as deltas against the revision
659 revision data should be emitted as deltas against the revision
660 emitted just prior. The initial revision should be a delta against
660 emitted just prior. The initial revision should be a delta against
661 its 1st parent.
661 its 1st parent.
662 """
662 """
663
663
664 class ifilemutation(interfaceutil.Interface):
664 class ifilemutation(interfaceutil.Interface):
665 """Storage interface for mutation events of a tracked file."""
665 """Storage interface for mutation events of a tracked file."""
666
666
667 def add(filedata, meta, transaction, linkrev, p1, p2):
667 def add(filedata, meta, transaction, linkrev, p1, p2):
668 """Add a new revision to the store.
668 """Add a new revision to the store.
669
669
670 Takes file data, dictionary of metadata, a transaction, linkrev,
670 Takes file data, dictionary of metadata, a transaction, linkrev,
671 and parent nodes.
671 and parent nodes.
672
672
673 Returns the node that was added.
673 Returns the node that was added.
674
674
675 May no-op if a revision matching the supplied data is already stored.
675 May no-op if a revision matching the supplied data is already stored.
676 """
676 """
677
677
678 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
678 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
679 flags=0, cachedelta=None):
679 flags=0, cachedelta=None):
680 """Add a new revision to the store.
680 """Add a new revision to the store.
681
681
682 This is similar to ``add()`` except it operates at a lower level.
682 This is similar to ``add()`` except it operates at a lower level.
683
683
684 The data passed in already contains a metadata header, if any.
684 The data passed in already contains a metadata header, if any.
685
685
686 ``node`` and ``flags`` can be used to define the expected node and
686 ``node`` and ``flags`` can be used to define the expected node and
687 the flags to use with storage. ``flags`` is a bitwise value composed
687 the flags to use with storage. ``flags`` is a bitwise value composed
688 of the various ``REVISION_FLAG_*`` constants.
688 of the various ``REVISION_FLAG_*`` constants.
689
689
690 ``add()`` is usually called when adding files from e.g. the working
690 ``add()`` is usually called when adding files from e.g. the working
691 directory. ``addrevision()`` is often called by ``add()`` and for
691 directory. ``addrevision()`` is often called by ``add()`` and for
692 scenarios where revision data has already been computed, such as when
692 scenarios where revision data has already been computed, such as when
693 applying raw data from a peer repo.
693 applying raw data from a peer repo.
694 """
694 """
695
695
696 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
696 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None,
697 maybemissingparents=False):
697 """Process a series of deltas for storage.
698 """Process a series of deltas for storage.
698
699
699 ``deltas`` is an iterable of 7-tuples of
700 ``deltas`` is an iterable of 7-tuples of
700 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
701 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
701 to add.
702 to add.
702
703
703 The ``delta`` field contains ``mpatch`` data to apply to a base
704 The ``delta`` field contains ``mpatch`` data to apply to a base
704 revision, identified by ``deltabase``. The base node can be
705 revision, identified by ``deltabase``. The base node can be
705 ``nullid``, in which case the header from the delta can be ignored
706 ``nullid``, in which case the header from the delta can be ignored
706 and the delta used as the fulltext.
707 and the delta used as the fulltext.
707
708
708 ``addrevisioncb`` should be called for each node as it is committed.
709 ``addrevisioncb`` should be called for each node as it is committed.
709
710
711 ``maybemissingparents`` is a bool indicating whether the incoming
712 data may reference parents/ancestor revisions that aren't present.
713 This flag is set when receiving data into a "shallow" store that
714 doesn't hold all history.
715
710 Returns a list of nodes that were processed. A node will be in the list
716 Returns a list of nodes that were processed. A node will be in the list
711 even if it existed in the store previously.
717 even if it existed in the store previously.
712 """
718 """
713
719
714 def censorrevision(tr, node, tombstone=b''):
720 def censorrevision(tr, node, tombstone=b''):
715 """Remove the content of a single revision.
721 """Remove the content of a single revision.
716
722
717 The specified ``node`` will have its content purged from storage.
723 The specified ``node`` will have its content purged from storage.
718 Future attempts to access the revision data for this node will
724 Future attempts to access the revision data for this node will
719 result in failure.
725 result in failure.
720
726
721 A ``tombstone`` message can optionally be stored. This message may be
727 A ``tombstone`` message can optionally be stored. This message may be
722 displayed to users when they attempt to access the missing revision
728 displayed to users when they attempt to access the missing revision
723 data.
729 data.
724
730
725 Storage backends may have stored deltas against the previous content
731 Storage backends may have stored deltas against the previous content
726 in this revision. As part of censoring a revision, these storage
732 in this revision. As part of censoring a revision, these storage
727 backends are expected to rewrite any internally stored deltas such
733 backends are expected to rewrite any internally stored deltas such
728 that they no longer reference the deleted content.
734 that they no longer reference the deleted content.
729 """
735 """
730
736
731 def getstrippoint(minlink):
737 def getstrippoint(minlink):
732 """Find the minimum revision that must be stripped to strip a linkrev.
738 """Find the minimum revision that must be stripped to strip a linkrev.
733
739
734 Returns a 2-tuple containing the minimum revision number and a set
740 Returns a 2-tuple containing the minimum revision number and a set
735 of all revisions numbers that would be broken by this strip.
741 of all revisions numbers that would be broken by this strip.
736
742
737 TODO this is highly revlog centric and should be abstracted into
743 TODO this is highly revlog centric and should be abstracted into
738 a higher-level deletion API. ``repair.strip()`` relies on this.
744 a higher-level deletion API. ``repair.strip()`` relies on this.
739 """
745 """
740
746
741 def strip(minlink, transaction):
747 def strip(minlink, transaction):
742 """Remove storage of items starting at a linkrev.
748 """Remove storage of items starting at a linkrev.
743
749
744 This uses ``getstrippoint()`` to determine the first node to remove.
750 This uses ``getstrippoint()`` to determine the first node to remove.
745 Then it effectively truncates storage for all revisions after that.
751 Then it effectively truncates storage for all revisions after that.
746
752
747 TODO this is highly revlog centric and should be abstracted into a
753 TODO this is highly revlog centric and should be abstracted into a
748 higher-level deletion API.
754 higher-level deletion API.
749 """
755 """
750
756
751 class ifilestorage(ifileindex, ifiledata, ifilemutation):
757 class ifilestorage(ifileindex, ifiledata, ifilemutation):
752 """Complete storage interface for a single tracked file."""
758 """Complete storage interface for a single tracked file."""
753
759
754 def files():
760 def files():
755 """Obtain paths that are backing storage for this file.
761 """Obtain paths that are backing storage for this file.
756
762
757 TODO this is used heavily by verify code and there should probably
763 TODO this is used heavily by verify code and there should probably
758 be a better API for that.
764 be a better API for that.
759 """
765 """
760
766
761 def storageinfo(exclusivefiles=False, sharedfiles=False,
767 def storageinfo(exclusivefiles=False, sharedfiles=False,
762 revisionscount=False, trackedsize=False,
768 revisionscount=False, trackedsize=False,
763 storedsize=False):
769 storedsize=False):
764 """Obtain information about storage for this file's data.
770 """Obtain information about storage for this file's data.
765
771
766 Returns a dict describing storage for this tracked path. The keys
772 Returns a dict describing storage for this tracked path. The keys
767 in the dict map to arguments of the same. The arguments are bools
773 in the dict map to arguments of the same. The arguments are bools
768 indicating whether to calculate and obtain that data.
774 indicating whether to calculate and obtain that data.
769
775
770 exclusivefiles
776 exclusivefiles
771 Iterable of (vfs, path) describing files that are exclusively
777 Iterable of (vfs, path) describing files that are exclusively
772 used to back storage for this tracked path.
778 used to back storage for this tracked path.
773
779
774 sharedfiles
780 sharedfiles
775 Iterable of (vfs, path) describing files that are used to back
781 Iterable of (vfs, path) describing files that are used to back
776 storage for this tracked path. Those files may also provide storage
782 storage for this tracked path. Those files may also provide storage
777 for other stored entities.
783 for other stored entities.
778
784
779 revisionscount
785 revisionscount
780 Number of revisions available for retrieval.
786 Number of revisions available for retrieval.
781
787
782 trackedsize
788 trackedsize
783 Total size in bytes of all tracked revisions. This is a sum of the
789 Total size in bytes of all tracked revisions. This is a sum of the
784 length of the fulltext of all revisions.
790 length of the fulltext of all revisions.
785
791
786 storedsize
792 storedsize
787 Total size in bytes used to store data for all tracked revisions.
793 Total size in bytes used to store data for all tracked revisions.
788 This is commonly less than ``trackedsize`` due to internal usage
794 This is commonly less than ``trackedsize`` due to internal usage
789 of deltas rather than fulltext revisions.
795 of deltas rather than fulltext revisions.
790
796
791 Not all storage backends may support all queries are have a reasonable
797 Not all storage backends may support all queries are have a reasonable
792 value to use. In that case, the value should be set to ``None`` and
798 value to use. In that case, the value should be set to ``None`` and
793 callers are expected to handle this special value.
799 callers are expected to handle this special value.
794 """
800 """
795
801
796 def verifyintegrity(state):
802 def verifyintegrity(state):
797 """Verifies the integrity of file storage.
803 """Verifies the integrity of file storage.
798
804
799 ``state`` is a dict holding state of the verifier process. It can be
805 ``state`` is a dict holding state of the verifier process. It can be
800 used to communicate data between invocations of multiple storage
806 used to communicate data between invocations of multiple storage
801 primitives.
807 primitives.
802
808
803 If individual revisions cannot have their revision content resolved,
809 If individual revisions cannot have their revision content resolved,
804 the method is expected to set the ``skipread`` key to a set of nodes
810 the method is expected to set the ``skipread`` key to a set of nodes
805 that encountered problems.
811 that encountered problems.
806
812
807 The method yields objects conforming to the ``iverifyproblem``
813 The method yields objects conforming to the ``iverifyproblem``
808 interface.
814 interface.
809 """
815 """
810
816
811 class idirs(interfaceutil.Interface):
817 class idirs(interfaceutil.Interface):
812 """Interface representing a collection of directories from paths.
818 """Interface representing a collection of directories from paths.
813
819
814 This interface is essentially a derived data structure representing
820 This interface is essentially a derived data structure representing
815 directories from a collection of paths.
821 directories from a collection of paths.
816 """
822 """
817
823
818 def addpath(path):
824 def addpath(path):
819 """Add a path to the collection.
825 """Add a path to the collection.
820
826
821 All directories in the path will be added to the collection.
827 All directories in the path will be added to the collection.
822 """
828 """
823
829
824 def delpath(path):
830 def delpath(path):
825 """Remove a path from the collection.
831 """Remove a path from the collection.
826
832
827 If the removal was the last path in a particular directory, the
833 If the removal was the last path in a particular directory, the
828 directory is removed from the collection.
834 directory is removed from the collection.
829 """
835 """
830
836
831 def __iter__():
837 def __iter__():
832 """Iterate over the directories in this collection of paths."""
838 """Iterate over the directories in this collection of paths."""
833
839
834 def __contains__(path):
840 def __contains__(path):
835 """Whether a specific directory is in this collection."""
841 """Whether a specific directory is in this collection."""
836
842
837 class imanifestdict(interfaceutil.Interface):
843 class imanifestdict(interfaceutil.Interface):
838 """Interface representing a manifest data structure.
844 """Interface representing a manifest data structure.
839
845
840 A manifest is effectively a dict mapping paths to entries. Each entry
846 A manifest is effectively a dict mapping paths to entries. Each entry
841 consists of a binary node and extra flags affecting that entry.
847 consists of a binary node and extra flags affecting that entry.
842 """
848 """
843
849
844 def __getitem__(path):
850 def __getitem__(path):
845 """Returns the binary node value for a path in the manifest.
851 """Returns the binary node value for a path in the manifest.
846
852
847 Raises ``KeyError`` if the path does not exist in the manifest.
853 Raises ``KeyError`` if the path does not exist in the manifest.
848
854
849 Equivalent to ``self.find(path)[0]``.
855 Equivalent to ``self.find(path)[0]``.
850 """
856 """
851
857
852 def find(path):
858 def find(path):
853 """Returns the entry for a path in the manifest.
859 """Returns the entry for a path in the manifest.
854
860
855 Returns a 2-tuple of (node, flags).
861 Returns a 2-tuple of (node, flags).
856
862
857 Raises ``KeyError`` if the path does not exist in the manifest.
863 Raises ``KeyError`` if the path does not exist in the manifest.
858 """
864 """
859
865
860 def __len__():
866 def __len__():
861 """Return the number of entries in the manifest."""
867 """Return the number of entries in the manifest."""
862
868
863 def __nonzero__():
869 def __nonzero__():
864 """Returns True if the manifest has entries, False otherwise."""
870 """Returns True if the manifest has entries, False otherwise."""
865
871
866 __bool__ = __nonzero__
872 __bool__ = __nonzero__
867
873
868 def __setitem__(path, node):
874 def __setitem__(path, node):
869 """Define the node value for a path in the manifest.
875 """Define the node value for a path in the manifest.
870
876
871 If the path is already in the manifest, its flags will be copied to
877 If the path is already in the manifest, its flags will be copied to
872 the new entry.
878 the new entry.
873 """
879 """
874
880
875 def __contains__(path):
881 def __contains__(path):
876 """Whether a path exists in the manifest."""
882 """Whether a path exists in the manifest."""
877
883
878 def __delitem__(path):
884 def __delitem__(path):
879 """Remove a path from the manifest.
885 """Remove a path from the manifest.
880
886
881 Raises ``KeyError`` if the path is not in the manifest.
887 Raises ``KeyError`` if the path is not in the manifest.
882 """
888 """
883
889
884 def __iter__():
890 def __iter__():
885 """Iterate over paths in the manifest."""
891 """Iterate over paths in the manifest."""
886
892
887 def iterkeys():
893 def iterkeys():
888 """Iterate over paths in the manifest."""
894 """Iterate over paths in the manifest."""
889
895
890 def keys():
896 def keys():
891 """Obtain a list of paths in the manifest."""
897 """Obtain a list of paths in the manifest."""
892
898
893 def filesnotin(other, match=None):
899 def filesnotin(other, match=None):
894 """Obtain the set of paths in this manifest but not in another.
900 """Obtain the set of paths in this manifest but not in another.
895
901
896 ``match`` is an optional matcher function to be applied to both
902 ``match`` is an optional matcher function to be applied to both
897 manifests.
903 manifests.
898
904
899 Returns a set of paths.
905 Returns a set of paths.
900 """
906 """
901
907
902 def dirs():
908 def dirs():
903 """Returns an object implementing the ``idirs`` interface."""
909 """Returns an object implementing the ``idirs`` interface."""
904
910
905 def hasdir(dir):
911 def hasdir(dir):
906 """Returns a bool indicating if a directory is in this manifest."""
912 """Returns a bool indicating if a directory is in this manifest."""
907
913
908 def matches(match):
914 def matches(match):
909 """Generate a new manifest filtered through a matcher.
915 """Generate a new manifest filtered through a matcher.
910
916
911 Returns an object conforming to the ``imanifestdict`` interface.
917 Returns an object conforming to the ``imanifestdict`` interface.
912 """
918 """
913
919
914 def walk(match):
920 def walk(match):
915 """Generator of paths in manifest satisfying a matcher.
921 """Generator of paths in manifest satisfying a matcher.
916
922
917 This is equivalent to ``self.matches(match).iterkeys()`` except a new
923 This is equivalent to ``self.matches(match).iterkeys()`` except a new
918 manifest object is not created.
924 manifest object is not created.
919
925
920 If the matcher has explicit files listed and they don't exist in
926 If the matcher has explicit files listed and they don't exist in
921 the manifest, ``match.bad()`` is called for each missing file.
927 the manifest, ``match.bad()`` is called for each missing file.
922 """
928 """
923
929
924 def diff(other, match=None, clean=False):
930 def diff(other, match=None, clean=False):
925 """Find differences between this manifest and another.
931 """Find differences between this manifest and another.
926
932
927 This manifest is compared to ``other``.
933 This manifest is compared to ``other``.
928
934
929 If ``match`` is provided, the two manifests are filtered against this
935 If ``match`` is provided, the two manifests are filtered against this
930 matcher and only entries satisfying the matcher are compared.
936 matcher and only entries satisfying the matcher are compared.
931
937
932 If ``clean`` is True, unchanged files are included in the returned
938 If ``clean`` is True, unchanged files are included in the returned
933 object.
939 object.
934
940
935 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
941 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
936 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
942 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
937 represents the node and flags for this manifest and ``(node2, flag2)``
943 represents the node and flags for this manifest and ``(node2, flag2)``
938 are the same for the other manifest.
944 are the same for the other manifest.
939 """
945 """
940
946
941 def setflag(path, flag):
947 def setflag(path, flag):
942 """Set the flag value for a given path.
948 """Set the flag value for a given path.
943
949
944 Raises ``KeyError`` if the path is not already in the manifest.
950 Raises ``KeyError`` if the path is not already in the manifest.
945 """
951 """
946
952
947 def get(path, default=None):
953 def get(path, default=None):
948 """Obtain the node value for a path or a default value if missing."""
954 """Obtain the node value for a path or a default value if missing."""
949
955
950 def flags(path, default=''):
956 def flags(path, default=''):
951 """Return the flags value for a path or a default value if missing."""
957 """Return the flags value for a path or a default value if missing."""
952
958
953 def copy():
959 def copy():
954 """Return a copy of this manifest."""
960 """Return a copy of this manifest."""
955
961
956 def items():
962 def items():
957 """Returns an iterable of (path, node) for items in this manifest."""
963 """Returns an iterable of (path, node) for items in this manifest."""
958
964
959 def iteritems():
965 def iteritems():
960 """Identical to items()."""
966 """Identical to items()."""
961
967
962 def iterentries():
968 def iterentries():
963 """Returns an iterable of (path, node, flags) for this manifest.
969 """Returns an iterable of (path, node, flags) for this manifest.
964
970
965 Similar to ``iteritems()`` except items are a 3-tuple and include
971 Similar to ``iteritems()`` except items are a 3-tuple and include
966 flags.
972 flags.
967 """
973 """
968
974
969 def text():
975 def text():
970 """Obtain the raw data representation for this manifest.
976 """Obtain the raw data representation for this manifest.
971
977
972 Result is used to create a manifest revision.
978 Result is used to create a manifest revision.
973 """
979 """
974
980
975 def fastdelta(base, changes):
981 def fastdelta(base, changes):
976 """Obtain a delta between this manifest and another given changes.
982 """Obtain a delta between this manifest and another given changes.
977
983
978 ``base`` in the raw data representation for another manifest.
984 ``base`` in the raw data representation for another manifest.
979
985
980 ``changes`` is an iterable of ``(path, to_delete)``.
986 ``changes`` is an iterable of ``(path, to_delete)``.
981
987
982 Returns a 2-tuple containing ``bytearray(self.text())`` and the
988 Returns a 2-tuple containing ``bytearray(self.text())`` and the
983 delta between ``base`` and this manifest.
989 delta between ``base`` and this manifest.
984 """
990 """
985
991
986 class imanifestrevisionbase(interfaceutil.Interface):
992 class imanifestrevisionbase(interfaceutil.Interface):
987 """Base interface representing a single revision of a manifest.
993 """Base interface representing a single revision of a manifest.
988
994
989 Should not be used as a primary interface: should always be inherited
995 Should not be used as a primary interface: should always be inherited
990 as part of a larger interface.
996 as part of a larger interface.
991 """
997 """
992
998
993 def new():
999 def new():
994 """Obtain a new manifest instance.
1000 """Obtain a new manifest instance.
995
1001
996 Returns an object conforming to the ``imanifestrevisionwritable``
1002 Returns an object conforming to the ``imanifestrevisionwritable``
997 interface. The instance will be associated with the same
1003 interface. The instance will be associated with the same
998 ``imanifestlog`` collection as this instance.
1004 ``imanifestlog`` collection as this instance.
999 """
1005 """
1000
1006
1001 def copy():
1007 def copy():
1002 """Obtain a copy of this manifest instance.
1008 """Obtain a copy of this manifest instance.
1003
1009
1004 Returns an object conforming to the ``imanifestrevisionwritable``
1010 Returns an object conforming to the ``imanifestrevisionwritable``
1005 interface. The instance will be associated with the same
1011 interface. The instance will be associated with the same
1006 ``imanifestlog`` collection as this instance.
1012 ``imanifestlog`` collection as this instance.
1007 """
1013 """
1008
1014
1009 def read():
1015 def read():
1010 """Obtain the parsed manifest data structure.
1016 """Obtain the parsed manifest data structure.
1011
1017
1012 The returned object conforms to the ``imanifestdict`` interface.
1018 The returned object conforms to the ``imanifestdict`` interface.
1013 """
1019 """
1014
1020
1015 class imanifestrevisionstored(imanifestrevisionbase):
1021 class imanifestrevisionstored(imanifestrevisionbase):
1016 """Interface representing a manifest revision committed to storage."""
1022 """Interface representing a manifest revision committed to storage."""
1017
1023
1018 def node():
1024 def node():
1019 """The binary node for this manifest."""
1025 """The binary node for this manifest."""
1020
1026
1021 parents = interfaceutil.Attribute(
1027 parents = interfaceutil.Attribute(
1022 """List of binary nodes that are parents for this manifest revision."""
1028 """List of binary nodes that are parents for this manifest revision."""
1023 )
1029 )
1024
1030
1025 def readdelta(shallow=False):
1031 def readdelta(shallow=False):
1026 """Obtain the manifest data structure representing changes from parent.
1032 """Obtain the manifest data structure representing changes from parent.
1027
1033
1028 This manifest is compared to its 1st parent. A new manifest representing
1034 This manifest is compared to its 1st parent. A new manifest representing
1029 those differences is constructed.
1035 those differences is constructed.
1030
1036
1031 The returned object conforms to the ``imanifestdict`` interface.
1037 The returned object conforms to the ``imanifestdict`` interface.
1032 """
1038 """
1033
1039
1034 def readfast(shallow=False):
1040 def readfast(shallow=False):
1035 """Calls either ``read()`` or ``readdelta()``.
1041 """Calls either ``read()`` or ``readdelta()``.
1036
1042
1037 The faster of the two options is called.
1043 The faster of the two options is called.
1038 """
1044 """
1039
1045
1040 def find(key):
1046 def find(key):
1041 """Calls self.read().find(key)``.
1047 """Calls self.read().find(key)``.
1042
1048
1043 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1049 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1044 """
1050 """
1045
1051
1046 class imanifestrevisionwritable(imanifestrevisionbase):
1052 class imanifestrevisionwritable(imanifestrevisionbase):
1047 """Interface representing a manifest revision that can be committed."""
1053 """Interface representing a manifest revision that can be committed."""
1048
1054
1049 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1055 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1050 """Add this revision to storage.
1056 """Add this revision to storage.
1051
1057
1052 Takes a transaction object, the changeset revision number it will
1058 Takes a transaction object, the changeset revision number it will
1053 be associated with, its parent nodes, and lists of added and
1059 be associated with, its parent nodes, and lists of added and
1054 removed paths.
1060 removed paths.
1055
1061
1056 If match is provided, storage can choose not to inspect or write out
1062 If match is provided, storage can choose not to inspect or write out
1057 items that do not match. Storage is still required to be able to provide
1063 items that do not match. Storage is still required to be able to provide
1058 the full manifest in the future for any directories written (these
1064 the full manifest in the future for any directories written (these
1059 manifests should not be "narrowed on disk").
1065 manifests should not be "narrowed on disk").
1060
1066
1061 Returns the binary node of the created revision.
1067 Returns the binary node of the created revision.
1062 """
1068 """
1063
1069
1064 class imanifeststorage(interfaceutil.Interface):
1070 class imanifeststorage(interfaceutil.Interface):
1065 """Storage interface for manifest data."""
1071 """Storage interface for manifest data."""
1066
1072
1067 tree = interfaceutil.Attribute(
1073 tree = interfaceutil.Attribute(
1068 """The path to the directory this manifest tracks.
1074 """The path to the directory this manifest tracks.
1069
1075
1070 The empty bytestring represents the root manifest.
1076 The empty bytestring represents the root manifest.
1071 """)
1077 """)
1072
1078
1073 index = interfaceutil.Attribute(
1079 index = interfaceutil.Attribute(
1074 """An ``ifilerevisionssequence`` instance.""")
1080 """An ``ifilerevisionssequence`` instance.""")
1075
1081
1076 indexfile = interfaceutil.Attribute(
1082 indexfile = interfaceutil.Attribute(
1077 """Path of revlog index file.
1083 """Path of revlog index file.
1078
1084
1079 TODO this is revlog specific and should not be exposed.
1085 TODO this is revlog specific and should not be exposed.
1080 """)
1086 """)
1081
1087
1082 opener = interfaceutil.Attribute(
1088 opener = interfaceutil.Attribute(
1083 """VFS opener to use to access underlying files used for storage.
1089 """VFS opener to use to access underlying files used for storage.
1084
1090
1085 TODO this is revlog specific and should not be exposed.
1091 TODO this is revlog specific and should not be exposed.
1086 """)
1092 """)
1087
1093
1088 version = interfaceutil.Attribute(
1094 version = interfaceutil.Attribute(
1089 """Revlog version number.
1095 """Revlog version number.
1090
1096
1091 TODO this is revlog specific and should not be exposed.
1097 TODO this is revlog specific and should not be exposed.
1092 """)
1098 """)
1093
1099
1094 _generaldelta = interfaceutil.Attribute(
1100 _generaldelta = interfaceutil.Attribute(
1095 """Whether generaldelta storage is being used.
1101 """Whether generaldelta storage is being used.
1096
1102
1097 TODO this is revlog specific and should not be exposed.
1103 TODO this is revlog specific and should not be exposed.
1098 """)
1104 """)
1099
1105
1100 fulltextcache = interfaceutil.Attribute(
1106 fulltextcache = interfaceutil.Attribute(
1101 """Dict with cache of fulltexts.
1107 """Dict with cache of fulltexts.
1102
1108
1103 TODO this doesn't feel appropriate for the storage interface.
1109 TODO this doesn't feel appropriate for the storage interface.
1104 """)
1110 """)
1105
1111
1106 def __len__():
1112 def __len__():
1107 """Obtain the number of revisions stored for this manifest."""
1113 """Obtain the number of revisions stored for this manifest."""
1108
1114
1109 def __iter__():
1115 def __iter__():
1110 """Iterate over revision numbers for this manifest."""
1116 """Iterate over revision numbers for this manifest."""
1111
1117
1112 def rev(node):
1118 def rev(node):
1113 """Obtain the revision number given a binary node.
1119 """Obtain the revision number given a binary node.
1114
1120
1115 Raises ``error.LookupError`` if the node is not known.
1121 Raises ``error.LookupError`` if the node is not known.
1116 """
1122 """
1117
1123
1118 def node(rev):
1124 def node(rev):
1119 """Obtain the node value given a revision number.
1125 """Obtain the node value given a revision number.
1120
1126
1121 Raises ``error.LookupError`` if the revision is not known.
1127 Raises ``error.LookupError`` if the revision is not known.
1122 """
1128 """
1123
1129
1124 def lookup(value):
1130 def lookup(value):
1125 """Attempt to resolve a value to a node.
1131 """Attempt to resolve a value to a node.
1126
1132
1127 Value can be a binary node, hex node, revision number, or a bytes
1133 Value can be a binary node, hex node, revision number, or a bytes
1128 that can be converted to an integer.
1134 that can be converted to an integer.
1129
1135
1130 Raises ``error.LookupError`` if a ndoe could not be resolved.
1136 Raises ``error.LookupError`` if a ndoe could not be resolved.
1131 """
1137 """
1132
1138
1133 def parents(node):
1139 def parents(node):
1134 """Returns a 2-tuple of parent nodes for a node.
1140 """Returns a 2-tuple of parent nodes for a node.
1135
1141
1136 Values will be ``nullid`` if the parent is empty.
1142 Values will be ``nullid`` if the parent is empty.
1137 """
1143 """
1138
1144
1139 def parentrevs(rev):
1145 def parentrevs(rev):
1140 """Like parents() but operates on revision numbers."""
1146 """Like parents() but operates on revision numbers."""
1141
1147
1142 def linkrev(rev):
1148 def linkrev(rev):
1143 """Obtain the changeset revision number a revision is linked to."""
1149 """Obtain the changeset revision number a revision is linked to."""
1144
1150
1145 def revision(node, _df=None, raw=False):
1151 def revision(node, _df=None, raw=False):
1146 """Obtain fulltext data for a node."""
1152 """Obtain fulltext data for a node."""
1147
1153
1148 def revdiff(rev1, rev2):
1154 def revdiff(rev1, rev2):
1149 """Obtain a delta between two revision numbers.
1155 """Obtain a delta between two revision numbers.
1150
1156
1151 The returned data is the result of ``bdiff.bdiff()`` on the raw
1157 The returned data is the result of ``bdiff.bdiff()`` on the raw
1152 revision data.
1158 revision data.
1153 """
1159 """
1154
1160
1155 def cmp(node, fulltext):
1161 def cmp(node, fulltext):
1156 """Compare fulltext to another revision.
1162 """Compare fulltext to another revision.
1157
1163
1158 Returns True if the fulltext is different from what is stored.
1164 Returns True if the fulltext is different from what is stored.
1159 """
1165 """
1160
1166
1161 def emitrevisions(nodes,
1167 def emitrevisions(nodes,
1162 nodesorder=None,
1168 nodesorder=None,
1163 revisiondata=False,
1169 revisiondata=False,
1164 assumehaveparentrevisions=False):
1170 assumehaveparentrevisions=False):
1165 """Produce ``irevisiondelta`` describing revisions.
1171 """Produce ``irevisiondelta`` describing revisions.
1166
1172
1167 See the documentation for ``ifiledata`` for more.
1173 See the documentation for ``ifiledata`` for more.
1168 """
1174 """
1169
1175
1170 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1176 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1171 """Process a series of deltas for storage.
1177 """Process a series of deltas for storage.
1172
1178
1173 See the documentation in ``ifilemutation`` for more.
1179 See the documentation in ``ifilemutation`` for more.
1174 """
1180 """
1175
1181
1176 def rawsize(rev):
1182 def rawsize(rev):
1177 """Obtain the size of tracked data.
1183 """Obtain the size of tracked data.
1178
1184
1179 Is equivalent to ``len(m.revision(node, raw=True))``.
1185 Is equivalent to ``len(m.revision(node, raw=True))``.
1180
1186
1181 TODO this method is only used by upgrade code and may be removed.
1187 TODO this method is only used by upgrade code and may be removed.
1182 """
1188 """
1183
1189
1184 def getstrippoint(minlink):
1190 def getstrippoint(minlink):
1185 """Find minimum revision that must be stripped to strip a linkrev.
1191 """Find minimum revision that must be stripped to strip a linkrev.
1186
1192
1187 See the documentation in ``ifilemutation`` for more.
1193 See the documentation in ``ifilemutation`` for more.
1188 """
1194 """
1189
1195
1190 def strip(minlink, transaction):
1196 def strip(minlink, transaction):
1191 """Remove storage of items starting at a linkrev.
1197 """Remove storage of items starting at a linkrev.
1192
1198
1193 See the documentation in ``ifilemutation`` for more.
1199 See the documentation in ``ifilemutation`` for more.
1194 """
1200 """
1195
1201
1196 def checksize():
1202 def checksize():
1197 """Obtain the expected sizes of backing files.
1203 """Obtain the expected sizes of backing files.
1198
1204
1199 TODO this is used by verify and it should not be part of the interface.
1205 TODO this is used by verify and it should not be part of the interface.
1200 """
1206 """
1201
1207
1202 def files():
1208 def files():
1203 """Obtain paths that are backing storage for this manifest.
1209 """Obtain paths that are backing storage for this manifest.
1204
1210
1205 TODO this is used by verify and there should probably be a better API
1211 TODO this is used by verify and there should probably be a better API
1206 for this functionality.
1212 for this functionality.
1207 """
1213 """
1208
1214
1209 def deltaparent(rev):
1215 def deltaparent(rev):
1210 """Obtain the revision that a revision is delta'd against.
1216 """Obtain the revision that a revision is delta'd against.
1211
1217
1212 TODO delta encoding is an implementation detail of storage and should
1218 TODO delta encoding is an implementation detail of storage and should
1213 not be exposed to the storage interface.
1219 not be exposed to the storage interface.
1214 """
1220 """
1215
1221
1216 def clone(tr, dest, **kwargs):
1222 def clone(tr, dest, **kwargs):
1217 """Clone this instance to another."""
1223 """Clone this instance to another."""
1218
1224
1219 def clearcaches(clear_persisted_data=False):
1225 def clearcaches(clear_persisted_data=False):
1220 """Clear any caches associated with this instance."""
1226 """Clear any caches associated with this instance."""
1221
1227
1222 def dirlog(d):
1228 def dirlog(d):
1223 """Obtain a manifest storage instance for a tree."""
1229 """Obtain a manifest storage instance for a tree."""
1224
1230
1225 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1231 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1226 match=None):
1232 match=None):
1227 """Add a revision to storage.
1233 """Add a revision to storage.
1228
1234
1229 ``m`` is an object conforming to ``imanifestdict``.
1235 ``m`` is an object conforming to ``imanifestdict``.
1230
1236
1231 ``link`` is the linkrev revision number.
1237 ``link`` is the linkrev revision number.
1232
1238
1233 ``p1`` and ``p2`` are the parent revision numbers.
1239 ``p1`` and ``p2`` are the parent revision numbers.
1234
1240
1235 ``added`` and ``removed`` are iterables of added and removed paths,
1241 ``added`` and ``removed`` are iterables of added and removed paths,
1236 respectively.
1242 respectively.
1237
1243
1238 ``readtree`` is a function that can be used to read the child tree(s)
1244 ``readtree`` is a function that can be used to read the child tree(s)
1239 when recursively writing the full tree structure when using
1245 when recursively writing the full tree structure when using
1240 treemanifets.
1246 treemanifets.
1241
1247
1242 ``match`` is a matcher that can be used to hint to storage that not all
1248 ``match`` is a matcher that can be used to hint to storage that not all
1243 paths must be inspected; this is an optimization and can be safely
1249 paths must be inspected; this is an optimization and can be safely
1244 ignored. Note that the storage must still be able to reproduce a full
1250 ignored. Note that the storage must still be able to reproduce a full
1245 manifest including files that did not match.
1251 manifest including files that did not match.
1246 """
1252 """
1247
1253
1248 def storageinfo(exclusivefiles=False, sharedfiles=False,
1254 def storageinfo(exclusivefiles=False, sharedfiles=False,
1249 revisionscount=False, trackedsize=False,
1255 revisionscount=False, trackedsize=False,
1250 storedsize=False):
1256 storedsize=False):
1251 """Obtain information about storage for this manifest's data.
1257 """Obtain information about storage for this manifest's data.
1252
1258
1253 See ``ifilestorage.storageinfo()`` for a description of this method.
1259 See ``ifilestorage.storageinfo()`` for a description of this method.
1254 This one behaves the same way, except for manifest data.
1260 This one behaves the same way, except for manifest data.
1255 """
1261 """
1256
1262
1257 class imanifestlog(interfaceutil.Interface):
1263 class imanifestlog(interfaceutil.Interface):
1258 """Interface representing a collection of manifest snapshots.
1264 """Interface representing a collection of manifest snapshots.
1259
1265
1260 Represents the root manifest in a repository.
1266 Represents the root manifest in a repository.
1261
1267
1262 Also serves as a means to access nested tree manifests and to cache
1268 Also serves as a means to access nested tree manifests and to cache
1263 tree manifests.
1269 tree manifests.
1264 """
1270 """
1265
1271
1266 def __getitem__(node):
1272 def __getitem__(node):
1267 """Obtain a manifest instance for a given binary node.
1273 """Obtain a manifest instance for a given binary node.
1268
1274
1269 Equivalent to calling ``self.get('', node)``.
1275 Equivalent to calling ``self.get('', node)``.
1270
1276
1271 The returned object conforms to the ``imanifestrevisionstored``
1277 The returned object conforms to the ``imanifestrevisionstored``
1272 interface.
1278 interface.
1273 """
1279 """
1274
1280
1275 def get(tree, node, verify=True):
1281 def get(tree, node, verify=True):
1276 """Retrieve the manifest instance for a given directory and binary node.
1282 """Retrieve the manifest instance for a given directory and binary node.
1277
1283
1278 ``node`` always refers to the node of the root manifest (which will be
1284 ``node`` always refers to the node of the root manifest (which will be
1279 the only manifest if flat manifests are being used).
1285 the only manifest if flat manifests are being used).
1280
1286
1281 If ``tree`` is the empty string, the root manifest is returned.
1287 If ``tree`` is the empty string, the root manifest is returned.
1282 Otherwise the manifest for the specified directory will be returned
1288 Otherwise the manifest for the specified directory will be returned
1283 (requires tree manifests).
1289 (requires tree manifests).
1284
1290
1285 If ``verify`` is True, ``LookupError`` is raised if the node is not
1291 If ``verify`` is True, ``LookupError`` is raised if the node is not
1286 known.
1292 known.
1287
1293
1288 The returned object conforms to the ``imanifestrevisionstored``
1294 The returned object conforms to the ``imanifestrevisionstored``
1289 interface.
1295 interface.
1290 """
1296 """
1291
1297
1292 def getstorage(tree):
1298 def getstorage(tree):
1293 """Retrieve an interface to storage for a particular tree.
1299 """Retrieve an interface to storage for a particular tree.
1294
1300
1295 If ``tree`` is the empty bytestring, storage for the root manifest will
1301 If ``tree`` is the empty bytestring, storage for the root manifest will
1296 be returned. Otherwise storage for a tree manifest is returned.
1302 be returned. Otherwise storage for a tree manifest is returned.
1297
1303
1298 TODO formalize interface for returned object.
1304 TODO formalize interface for returned object.
1299 """
1305 """
1300
1306
1301 def clearcaches():
1307 def clearcaches():
1302 """Clear caches associated with this collection."""
1308 """Clear caches associated with this collection."""
1303
1309
1304 def rev(node):
1310 def rev(node):
1305 """Obtain the revision number for a binary node.
1311 """Obtain the revision number for a binary node.
1306
1312
1307 Raises ``error.LookupError`` if the node is not known.
1313 Raises ``error.LookupError`` if the node is not known.
1308 """
1314 """
1309
1315
1310 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1316 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1311 """Local repository sub-interface providing access to tracked file storage.
1317 """Local repository sub-interface providing access to tracked file storage.
1312
1318
1313 This interface defines how a repository accesses storage for a single
1319 This interface defines how a repository accesses storage for a single
1314 tracked file path.
1320 tracked file path.
1315 """
1321 """
1316
1322
1317 def file(f):
1323 def file(f):
1318 """Obtain a filelog for a tracked path.
1324 """Obtain a filelog for a tracked path.
1319
1325
1320 The returned type conforms to the ``ifilestorage`` interface.
1326 The returned type conforms to the ``ifilestorage`` interface.
1321 """
1327 """
1322
1328
1323 class ilocalrepositorymain(interfaceutil.Interface):
1329 class ilocalrepositorymain(interfaceutil.Interface):
1324 """Main interface for local repositories.
1330 """Main interface for local repositories.
1325
1331
1326 This currently captures the reality of things - not how things should be.
1332 This currently captures the reality of things - not how things should be.
1327 """
1333 """
1328
1334
1329 supportedformats = interfaceutil.Attribute(
1335 supportedformats = interfaceutil.Attribute(
1330 """Set of requirements that apply to stream clone.
1336 """Set of requirements that apply to stream clone.
1331
1337
1332 This is actually a class attribute and is shared among all instances.
1338 This is actually a class attribute and is shared among all instances.
1333 """)
1339 """)
1334
1340
1335 supported = interfaceutil.Attribute(
1341 supported = interfaceutil.Attribute(
1336 """Set of requirements that this repo is capable of opening.""")
1342 """Set of requirements that this repo is capable of opening.""")
1337
1343
1338 requirements = interfaceutil.Attribute(
1344 requirements = interfaceutil.Attribute(
1339 """Set of requirements this repo uses.""")
1345 """Set of requirements this repo uses.""")
1340
1346
1341 features = interfaceutil.Attribute(
1347 features = interfaceutil.Attribute(
1342 """Set of "features" this repository supports.
1348 """Set of "features" this repository supports.
1343
1349
1344 A "feature" is a loosely-defined term. It can refer to a feature
1350 A "feature" is a loosely-defined term. It can refer to a feature
1345 in the classical sense or can describe an implementation detail
1351 in the classical sense or can describe an implementation detail
1346 of the repository. For example, a ``readonly`` feature may denote
1352 of the repository. For example, a ``readonly`` feature may denote
1347 the repository as read-only. Or a ``revlogfilestore`` feature may
1353 the repository as read-only. Or a ``revlogfilestore`` feature may
1348 denote that the repository is using revlogs for file storage.
1354 denote that the repository is using revlogs for file storage.
1349
1355
1350 The intent of features is to provide a machine-queryable mechanism
1356 The intent of features is to provide a machine-queryable mechanism
1351 for repo consumers to test for various repository characteristics.
1357 for repo consumers to test for various repository characteristics.
1352
1358
1353 Features are similar to ``requirements``. The main difference is that
1359 Features are similar to ``requirements``. The main difference is that
1354 requirements are stored on-disk and represent requirements to open the
1360 requirements are stored on-disk and represent requirements to open the
1355 repository. Features are more run-time capabilities of the repository
1361 repository. Features are more run-time capabilities of the repository
1356 and more granular capabilities (which may be derived from requirements).
1362 and more granular capabilities (which may be derived from requirements).
1357 """)
1363 """)
1358
1364
1359 filtername = interfaceutil.Attribute(
1365 filtername = interfaceutil.Attribute(
1360 """Name of the repoview that is active on this repo.""")
1366 """Name of the repoview that is active on this repo.""")
1361
1367
1362 wvfs = interfaceutil.Attribute(
1368 wvfs = interfaceutil.Attribute(
1363 """VFS used to access the working directory.""")
1369 """VFS used to access the working directory.""")
1364
1370
1365 vfs = interfaceutil.Attribute(
1371 vfs = interfaceutil.Attribute(
1366 """VFS rooted at the .hg directory.
1372 """VFS rooted at the .hg directory.
1367
1373
1368 Used to access repository data not in the store.
1374 Used to access repository data not in the store.
1369 """)
1375 """)
1370
1376
1371 svfs = interfaceutil.Attribute(
1377 svfs = interfaceutil.Attribute(
1372 """VFS rooted at the store.
1378 """VFS rooted at the store.
1373
1379
1374 Used to access repository data in the store. Typically .hg/store.
1380 Used to access repository data in the store. Typically .hg/store.
1375 But can point elsewhere if the store is shared.
1381 But can point elsewhere if the store is shared.
1376 """)
1382 """)
1377
1383
1378 root = interfaceutil.Attribute(
1384 root = interfaceutil.Attribute(
1379 """Path to the root of the working directory.""")
1385 """Path to the root of the working directory.""")
1380
1386
1381 path = interfaceutil.Attribute(
1387 path = interfaceutil.Attribute(
1382 """Path to the .hg directory.""")
1388 """Path to the .hg directory.""")
1383
1389
1384 origroot = interfaceutil.Attribute(
1390 origroot = interfaceutil.Attribute(
1385 """The filesystem path that was used to construct the repo.""")
1391 """The filesystem path that was used to construct the repo.""")
1386
1392
1387 auditor = interfaceutil.Attribute(
1393 auditor = interfaceutil.Attribute(
1388 """A pathauditor for the working directory.
1394 """A pathauditor for the working directory.
1389
1395
1390 This checks if a path refers to a nested repository.
1396 This checks if a path refers to a nested repository.
1391
1397
1392 Operates on the filesystem.
1398 Operates on the filesystem.
1393 """)
1399 """)
1394
1400
1395 nofsauditor = interfaceutil.Attribute(
1401 nofsauditor = interfaceutil.Attribute(
1396 """A pathauditor for the working directory.
1402 """A pathauditor for the working directory.
1397
1403
1398 This is like ``auditor`` except it doesn't do filesystem checks.
1404 This is like ``auditor`` except it doesn't do filesystem checks.
1399 """)
1405 """)
1400
1406
1401 baseui = interfaceutil.Attribute(
1407 baseui = interfaceutil.Attribute(
1402 """Original ui instance passed into constructor.""")
1408 """Original ui instance passed into constructor.""")
1403
1409
1404 ui = interfaceutil.Attribute(
1410 ui = interfaceutil.Attribute(
1405 """Main ui instance for this instance.""")
1411 """Main ui instance for this instance.""")
1406
1412
1407 sharedpath = interfaceutil.Attribute(
1413 sharedpath = interfaceutil.Attribute(
1408 """Path to the .hg directory of the repo this repo was shared from.""")
1414 """Path to the .hg directory of the repo this repo was shared from.""")
1409
1415
1410 store = interfaceutil.Attribute(
1416 store = interfaceutil.Attribute(
1411 """A store instance.""")
1417 """A store instance.""")
1412
1418
1413 spath = interfaceutil.Attribute(
1419 spath = interfaceutil.Attribute(
1414 """Path to the store.""")
1420 """Path to the store.""")
1415
1421
1416 sjoin = interfaceutil.Attribute(
1422 sjoin = interfaceutil.Attribute(
1417 """Alias to self.store.join.""")
1423 """Alias to self.store.join.""")
1418
1424
1419 cachevfs = interfaceutil.Attribute(
1425 cachevfs = interfaceutil.Attribute(
1420 """A VFS used to access the cache directory.
1426 """A VFS used to access the cache directory.
1421
1427
1422 Typically .hg/cache.
1428 Typically .hg/cache.
1423 """)
1429 """)
1424
1430
1425 filteredrevcache = interfaceutil.Attribute(
1431 filteredrevcache = interfaceutil.Attribute(
1426 """Holds sets of revisions to be filtered.""")
1432 """Holds sets of revisions to be filtered.""")
1427
1433
1428 names = interfaceutil.Attribute(
1434 names = interfaceutil.Attribute(
1429 """A ``namespaces`` instance.""")
1435 """A ``namespaces`` instance.""")
1430
1436
1431 def close():
1437 def close():
1432 """Close the handle on this repository."""
1438 """Close the handle on this repository."""
1433
1439
1434 def peer():
1440 def peer():
1435 """Obtain an object conforming to the ``peer`` interface."""
1441 """Obtain an object conforming to the ``peer`` interface."""
1436
1442
1437 def unfiltered():
1443 def unfiltered():
1438 """Obtain an unfiltered/raw view of this repo."""
1444 """Obtain an unfiltered/raw view of this repo."""
1439
1445
1440 def filtered(name, visibilityexceptions=None):
1446 def filtered(name, visibilityexceptions=None):
1441 """Obtain a named view of this repository."""
1447 """Obtain a named view of this repository."""
1442
1448
1443 obsstore = interfaceutil.Attribute(
1449 obsstore = interfaceutil.Attribute(
1444 """A store of obsolescence data.""")
1450 """A store of obsolescence data.""")
1445
1451
1446 changelog = interfaceutil.Attribute(
1452 changelog = interfaceutil.Attribute(
1447 """A handle on the changelog revlog.""")
1453 """A handle on the changelog revlog.""")
1448
1454
1449 manifestlog = interfaceutil.Attribute(
1455 manifestlog = interfaceutil.Attribute(
1450 """An instance conforming to the ``imanifestlog`` interface.
1456 """An instance conforming to the ``imanifestlog`` interface.
1451
1457
1452 Provides access to manifests for the repository.
1458 Provides access to manifests for the repository.
1453 """)
1459 """)
1454
1460
1455 dirstate = interfaceutil.Attribute(
1461 dirstate = interfaceutil.Attribute(
1456 """Working directory state.""")
1462 """Working directory state.""")
1457
1463
1458 narrowpats = interfaceutil.Attribute(
1464 narrowpats = interfaceutil.Attribute(
1459 """Matcher patterns for this repository's narrowspec.""")
1465 """Matcher patterns for this repository's narrowspec.""")
1460
1466
1461 def narrowmatch():
1467 def narrowmatch():
1462 """Obtain a matcher for the narrowspec."""
1468 """Obtain a matcher for the narrowspec."""
1463
1469
1464 def setnarrowpats(newincludes, newexcludes):
1470 def setnarrowpats(newincludes, newexcludes):
1465 """Define the narrowspec for this repository."""
1471 """Define the narrowspec for this repository."""
1466
1472
1467 def __getitem__(changeid):
1473 def __getitem__(changeid):
1468 """Try to resolve a changectx."""
1474 """Try to resolve a changectx."""
1469
1475
1470 def __contains__(changeid):
1476 def __contains__(changeid):
1471 """Whether a changeset exists."""
1477 """Whether a changeset exists."""
1472
1478
1473 def __nonzero__():
1479 def __nonzero__():
1474 """Always returns True."""
1480 """Always returns True."""
1475 return True
1481 return True
1476
1482
1477 __bool__ = __nonzero__
1483 __bool__ = __nonzero__
1478
1484
1479 def __len__():
1485 def __len__():
1480 """Returns the number of changesets in the repo."""
1486 """Returns the number of changesets in the repo."""
1481
1487
1482 def __iter__():
1488 def __iter__():
1483 """Iterate over revisions in the changelog."""
1489 """Iterate over revisions in the changelog."""
1484
1490
1485 def revs(expr, *args):
1491 def revs(expr, *args):
1486 """Evaluate a revset.
1492 """Evaluate a revset.
1487
1493
1488 Emits revisions.
1494 Emits revisions.
1489 """
1495 """
1490
1496
1491 def set(expr, *args):
1497 def set(expr, *args):
1492 """Evaluate a revset.
1498 """Evaluate a revset.
1493
1499
1494 Emits changectx instances.
1500 Emits changectx instances.
1495 """
1501 """
1496
1502
1497 def anyrevs(specs, user=False, localalias=None):
1503 def anyrevs(specs, user=False, localalias=None):
1498 """Find revisions matching one of the given revsets."""
1504 """Find revisions matching one of the given revsets."""
1499
1505
1500 def url():
1506 def url():
1501 """Returns a string representing the location of this repo."""
1507 """Returns a string representing the location of this repo."""
1502
1508
1503 def hook(name, throw=False, **args):
1509 def hook(name, throw=False, **args):
1504 """Call a hook."""
1510 """Call a hook."""
1505
1511
1506 def tags():
1512 def tags():
1507 """Return a mapping of tag to node."""
1513 """Return a mapping of tag to node."""
1508
1514
1509 def tagtype(tagname):
1515 def tagtype(tagname):
1510 """Return the type of a given tag."""
1516 """Return the type of a given tag."""
1511
1517
1512 def tagslist():
1518 def tagslist():
1513 """Return a list of tags ordered by revision."""
1519 """Return a list of tags ordered by revision."""
1514
1520
1515 def nodetags(node):
1521 def nodetags(node):
1516 """Return the tags associated with a node."""
1522 """Return the tags associated with a node."""
1517
1523
1518 def nodebookmarks(node):
1524 def nodebookmarks(node):
1519 """Return the list of bookmarks pointing to the specified node."""
1525 """Return the list of bookmarks pointing to the specified node."""
1520
1526
1521 def branchmap():
1527 def branchmap():
1522 """Return a mapping of branch to heads in that branch."""
1528 """Return a mapping of branch to heads in that branch."""
1523
1529
1524 def revbranchcache():
1530 def revbranchcache():
1525 pass
1531 pass
1526
1532
1527 def branchtip(branchtip, ignoremissing=False):
1533 def branchtip(branchtip, ignoremissing=False):
1528 """Return the tip node for a given branch."""
1534 """Return the tip node for a given branch."""
1529
1535
1530 def lookup(key):
1536 def lookup(key):
1531 """Resolve the node for a revision."""
1537 """Resolve the node for a revision."""
1532
1538
1533 def lookupbranch(key):
1539 def lookupbranch(key):
1534 """Look up the branch name of the given revision or branch name."""
1540 """Look up the branch name of the given revision or branch name."""
1535
1541
1536 def known(nodes):
1542 def known(nodes):
1537 """Determine whether a series of nodes is known.
1543 """Determine whether a series of nodes is known.
1538
1544
1539 Returns a list of bools.
1545 Returns a list of bools.
1540 """
1546 """
1541
1547
1542 def local():
1548 def local():
1543 """Whether the repository is local."""
1549 """Whether the repository is local."""
1544 return True
1550 return True
1545
1551
1546 def publishing():
1552 def publishing():
1547 """Whether the repository is a publishing repository."""
1553 """Whether the repository is a publishing repository."""
1548
1554
1549 def cancopy():
1555 def cancopy():
1550 pass
1556 pass
1551
1557
1552 def shared():
1558 def shared():
1553 """The type of shared repository or None."""
1559 """The type of shared repository or None."""
1554
1560
1555 def wjoin(f, *insidef):
1561 def wjoin(f, *insidef):
1556 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1562 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1557
1563
1558 def setparents(p1, p2):
1564 def setparents(p1, p2):
1559 """Set the parent nodes of the working directory."""
1565 """Set the parent nodes of the working directory."""
1560
1566
1561 def filectx(path, changeid=None, fileid=None):
1567 def filectx(path, changeid=None, fileid=None):
1562 """Obtain a filectx for the given file revision."""
1568 """Obtain a filectx for the given file revision."""
1563
1569
1564 def getcwd():
1570 def getcwd():
1565 """Obtain the current working directory from the dirstate."""
1571 """Obtain the current working directory from the dirstate."""
1566
1572
1567 def pathto(f, cwd=None):
1573 def pathto(f, cwd=None):
1568 """Obtain the relative path to a file."""
1574 """Obtain the relative path to a file."""
1569
1575
1570 def adddatafilter(name, fltr):
1576 def adddatafilter(name, fltr):
1571 pass
1577 pass
1572
1578
1573 def wread(filename):
1579 def wread(filename):
1574 """Read a file from wvfs, using data filters."""
1580 """Read a file from wvfs, using data filters."""
1575
1581
1576 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1582 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1577 """Write data to a file in the wvfs, using data filters."""
1583 """Write data to a file in the wvfs, using data filters."""
1578
1584
1579 def wwritedata(filename, data):
1585 def wwritedata(filename, data):
1580 """Resolve data for writing to the wvfs, using data filters."""
1586 """Resolve data for writing to the wvfs, using data filters."""
1581
1587
1582 def currenttransaction():
1588 def currenttransaction():
1583 """Obtain the current transaction instance or None."""
1589 """Obtain the current transaction instance or None."""
1584
1590
1585 def transaction(desc, report=None):
1591 def transaction(desc, report=None):
1586 """Open a new transaction to write to the repository."""
1592 """Open a new transaction to write to the repository."""
1587
1593
1588 def undofiles():
1594 def undofiles():
1589 """Returns a list of (vfs, path) for files to undo transactions."""
1595 """Returns a list of (vfs, path) for files to undo transactions."""
1590
1596
1591 def recover():
1597 def recover():
1592 """Roll back an interrupted transaction."""
1598 """Roll back an interrupted transaction."""
1593
1599
1594 def rollback(dryrun=False, force=False):
1600 def rollback(dryrun=False, force=False):
1595 """Undo the last transaction.
1601 """Undo the last transaction.
1596
1602
1597 DANGEROUS.
1603 DANGEROUS.
1598 """
1604 """
1599
1605
1600 def updatecaches(tr=None, full=False):
1606 def updatecaches(tr=None, full=False):
1601 """Warm repo caches."""
1607 """Warm repo caches."""
1602
1608
1603 def invalidatecaches():
1609 def invalidatecaches():
1604 """Invalidate cached data due to the repository mutating."""
1610 """Invalidate cached data due to the repository mutating."""
1605
1611
1606 def invalidatevolatilesets():
1612 def invalidatevolatilesets():
1607 pass
1613 pass
1608
1614
1609 def invalidatedirstate():
1615 def invalidatedirstate():
1610 """Invalidate the dirstate."""
1616 """Invalidate the dirstate."""
1611
1617
1612 def invalidate(clearfilecache=False):
1618 def invalidate(clearfilecache=False):
1613 pass
1619 pass
1614
1620
1615 def invalidateall():
1621 def invalidateall():
1616 pass
1622 pass
1617
1623
1618 def lock(wait=True):
1624 def lock(wait=True):
1619 """Lock the repository store and return a lock instance."""
1625 """Lock the repository store and return a lock instance."""
1620
1626
1621 def wlock(wait=True):
1627 def wlock(wait=True):
1622 """Lock the non-store parts of the repository."""
1628 """Lock the non-store parts of the repository."""
1623
1629
1624 def currentwlock():
1630 def currentwlock():
1625 """Return the wlock if it's held or None."""
1631 """Return the wlock if it's held or None."""
1626
1632
1627 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1633 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1628 pass
1634 pass
1629
1635
1630 def commit(text='', user=None, date=None, match=None, force=False,
1636 def commit(text='', user=None, date=None, match=None, force=False,
1631 editor=False, extra=None):
1637 editor=False, extra=None):
1632 """Add a new revision to the repository."""
1638 """Add a new revision to the repository."""
1633
1639
1634 def commitctx(ctx, error=False):
1640 def commitctx(ctx, error=False):
1635 """Commit a commitctx instance to the repository."""
1641 """Commit a commitctx instance to the repository."""
1636
1642
1637 def destroying():
1643 def destroying():
1638 """Inform the repository that nodes are about to be destroyed."""
1644 """Inform the repository that nodes are about to be destroyed."""
1639
1645
1640 def destroyed():
1646 def destroyed():
1641 """Inform the repository that nodes have been destroyed."""
1647 """Inform the repository that nodes have been destroyed."""
1642
1648
1643 def status(node1='.', node2=None, match=None, ignored=False,
1649 def status(node1='.', node2=None, match=None, ignored=False,
1644 clean=False, unknown=False, listsubrepos=False):
1650 clean=False, unknown=False, listsubrepos=False):
1645 """Convenience method to call repo[x].status()."""
1651 """Convenience method to call repo[x].status()."""
1646
1652
1647 def addpostdsstatus(ps):
1653 def addpostdsstatus(ps):
1648 pass
1654 pass
1649
1655
1650 def postdsstatus():
1656 def postdsstatus():
1651 pass
1657 pass
1652
1658
1653 def clearpostdsstatus():
1659 def clearpostdsstatus():
1654 pass
1660 pass
1655
1661
1656 def heads(start=None):
1662 def heads(start=None):
1657 """Obtain list of nodes that are DAG heads."""
1663 """Obtain list of nodes that are DAG heads."""
1658
1664
1659 def branchheads(branch=None, start=None, closed=False):
1665 def branchheads(branch=None, start=None, closed=False):
1660 pass
1666 pass
1661
1667
1662 def branches(nodes):
1668 def branches(nodes):
1663 pass
1669 pass
1664
1670
1665 def between(pairs):
1671 def between(pairs):
1666 pass
1672 pass
1667
1673
1668 def checkpush(pushop):
1674 def checkpush(pushop):
1669 pass
1675 pass
1670
1676
1671 prepushoutgoinghooks = interfaceutil.Attribute(
1677 prepushoutgoinghooks = interfaceutil.Attribute(
1672 """util.hooks instance.""")
1678 """util.hooks instance.""")
1673
1679
1674 def pushkey(namespace, key, old, new):
1680 def pushkey(namespace, key, old, new):
1675 pass
1681 pass
1676
1682
1677 def listkeys(namespace):
1683 def listkeys(namespace):
1678 pass
1684 pass
1679
1685
1680 def debugwireargs(one, two, three=None, four=None, five=None):
1686 def debugwireargs(one, two, three=None, four=None, five=None):
1681 pass
1687 pass
1682
1688
1683 def savecommitmessage(text):
1689 def savecommitmessage(text):
1684 pass
1690 pass
1685
1691
1686 class completelocalrepository(ilocalrepositorymain,
1692 class completelocalrepository(ilocalrepositorymain,
1687 ilocalrepositoryfilestorage):
1693 ilocalrepositoryfilestorage):
1688 """Complete interface for a local repository."""
1694 """Complete interface for a local repository."""
1689
1695
1690 class iwireprotocolcommandcacher(interfaceutil.Interface):
1696 class iwireprotocolcommandcacher(interfaceutil.Interface):
1691 """Represents a caching backend for wire protocol commands.
1697 """Represents a caching backend for wire protocol commands.
1692
1698
1693 Wire protocol version 2 supports transparent caching of many commands.
1699 Wire protocol version 2 supports transparent caching of many commands.
1694 To leverage this caching, servers can activate objects that cache
1700 To leverage this caching, servers can activate objects that cache
1695 command responses. Objects handle both cache writing and reading.
1701 command responses. Objects handle both cache writing and reading.
1696 This interface defines how that response caching mechanism works.
1702 This interface defines how that response caching mechanism works.
1697
1703
1698 Wire protocol version 2 commands emit a series of objects that are
1704 Wire protocol version 2 commands emit a series of objects that are
1699 serialized and sent to the client. The caching layer exists between
1705 serialized and sent to the client. The caching layer exists between
1700 the invocation of the command function and the sending of its output
1706 the invocation of the command function and the sending of its output
1701 objects to an output layer.
1707 objects to an output layer.
1702
1708
1703 Instances of this interface represent a binding to a cache that
1709 Instances of this interface represent a binding to a cache that
1704 can serve a response (in place of calling a command function) and/or
1710 can serve a response (in place of calling a command function) and/or
1705 write responses to a cache for subsequent use.
1711 write responses to a cache for subsequent use.
1706
1712
1707 When a command request arrives, the following happens with regards
1713 When a command request arrives, the following happens with regards
1708 to this interface:
1714 to this interface:
1709
1715
1710 1. The server determines whether the command request is cacheable.
1716 1. The server determines whether the command request is cacheable.
1711 2. If it is, an instance of this interface is spawned.
1717 2. If it is, an instance of this interface is spawned.
1712 3. The cacher is activated in a context manager (``__enter__`` is called).
1718 3. The cacher is activated in a context manager (``__enter__`` is called).
1713 4. A cache *key* for that request is derived. This will call the
1719 4. A cache *key* for that request is derived. This will call the
1714 instance's ``adjustcachekeystate()`` method so the derivation
1720 instance's ``adjustcachekeystate()`` method so the derivation
1715 can be influenced.
1721 can be influenced.
1716 5. The cacher is informed of the derived cache key via a call to
1722 5. The cacher is informed of the derived cache key via a call to
1717 ``setcachekey()``.
1723 ``setcachekey()``.
1718 6. The cacher's ``lookup()`` method is called to test for presence of
1724 6. The cacher's ``lookup()`` method is called to test for presence of
1719 the derived key in the cache.
1725 the derived key in the cache.
1720 7. If ``lookup()`` returns a hit, that cached result is used in place
1726 7. If ``lookup()`` returns a hit, that cached result is used in place
1721 of invoking the command function. ``__exit__`` is called and the instance
1727 of invoking the command function. ``__exit__`` is called and the instance
1722 is discarded.
1728 is discarded.
1723 8. The command function is invoked.
1729 8. The command function is invoked.
1724 9. ``onobject()`` is called for each object emitted by the command
1730 9. ``onobject()`` is called for each object emitted by the command
1725 function.
1731 function.
1726 10. After the final object is seen, ``onfinished()`` is called.
1732 10. After the final object is seen, ``onfinished()`` is called.
1727 11. ``__exit__`` is called to signal the end of use of the instance.
1733 11. ``__exit__`` is called to signal the end of use of the instance.
1728
1734
1729 Cache *key* derivation can be influenced by the instance.
1735 Cache *key* derivation can be influenced by the instance.
1730
1736
1731 Cache keys are initially derived by a deterministic representation of
1737 Cache keys are initially derived by a deterministic representation of
1732 the command request. This includes the command name, arguments, protocol
1738 the command request. This includes the command name, arguments, protocol
1733 version, etc. This initial key derivation is performed by CBOR-encoding a
1739 version, etc. This initial key derivation is performed by CBOR-encoding a
1734 data structure and feeding that output into a hasher.
1740 data structure and feeding that output into a hasher.
1735
1741
1736 Instances of this interface can influence this initial key derivation
1742 Instances of this interface can influence this initial key derivation
1737 via ``adjustcachekeystate()``.
1743 via ``adjustcachekeystate()``.
1738
1744
1739 The instance is informed of the derived cache key via a call to
1745 The instance is informed of the derived cache key via a call to
1740 ``setcachekey()``. The instance must store the key locally so it can
1746 ``setcachekey()``. The instance must store the key locally so it can
1741 be consulted on subsequent operations that may require it.
1747 be consulted on subsequent operations that may require it.
1742
1748
1743 When constructed, the instance has access to a callable that can be used
1749 When constructed, the instance has access to a callable that can be used
1744 for encoding response objects. This callable receives as its single
1750 for encoding response objects. This callable receives as its single
1745 argument an object emitted by a command function. It returns an iterable
1751 argument an object emitted by a command function. It returns an iterable
1746 of bytes chunks representing the encoded object. Unless the cacher is
1752 of bytes chunks representing the encoded object. Unless the cacher is
1747 caching native Python objects in memory or has a way of reconstructing
1753 caching native Python objects in memory or has a way of reconstructing
1748 the original Python objects, implementations typically call this function
1754 the original Python objects, implementations typically call this function
1749 to produce bytes from the output objects and then store those bytes in
1755 to produce bytes from the output objects and then store those bytes in
1750 the cache. When it comes time to re-emit those bytes, they are wrapped
1756 the cache. When it comes time to re-emit those bytes, they are wrapped
1751 in a ``wireprototypes.encodedresponse`` instance to tell the output
1757 in a ``wireprototypes.encodedresponse`` instance to tell the output
1752 layer that they are pre-encoded.
1758 layer that they are pre-encoded.
1753
1759
1754 When receiving the objects emitted by the command function, instances
1760 When receiving the objects emitted by the command function, instances
1755 can choose what to do with those objects. The simplest thing to do is
1761 can choose what to do with those objects. The simplest thing to do is
1756 re-emit the original objects. They will be forwarded to the output
1762 re-emit the original objects. They will be forwarded to the output
1757 layer and will be processed as if the cacher did not exist.
1763 layer and will be processed as if the cacher did not exist.
1758
1764
1759 Implementations could also choose to not emit objects - instead locally
1765 Implementations could also choose to not emit objects - instead locally
1760 buffering objects or their encoded representation. They could then emit
1766 buffering objects or their encoded representation. They could then emit
1761 a single "coalesced" object when ``onfinished()`` is called. In
1767 a single "coalesced" object when ``onfinished()`` is called. In
1762 this way, the implementation would function as a filtering layer of
1768 this way, the implementation would function as a filtering layer of
1763 sorts.
1769 sorts.
1764
1770
1765 When caching objects, typically the encoded form of the object will
1771 When caching objects, typically the encoded form of the object will
1766 be stored. Keep in mind that if the original object is forwarded to
1772 be stored. Keep in mind that if the original object is forwarded to
1767 the output layer, it will need to be encoded there as well. For large
1773 the output layer, it will need to be encoded there as well. For large
1768 output, this redundant encoding could add overhead. Implementations
1774 output, this redundant encoding could add overhead. Implementations
1769 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1775 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1770 instances to avoid this overhead.
1776 instances to avoid this overhead.
1771 """
1777 """
1772 def __enter__():
1778 def __enter__():
1773 """Marks the instance as active.
1779 """Marks the instance as active.
1774
1780
1775 Should return self.
1781 Should return self.
1776 """
1782 """
1777
1783
1778 def __exit__(exctype, excvalue, exctb):
1784 def __exit__(exctype, excvalue, exctb):
1779 """Called when cacher is no longer used.
1785 """Called when cacher is no longer used.
1780
1786
1781 This can be used by implementations to perform cleanup actions (e.g.
1787 This can be used by implementations to perform cleanup actions (e.g.
1782 disconnecting network sockets, aborting a partially cached response.
1788 disconnecting network sockets, aborting a partially cached response.
1783 """
1789 """
1784
1790
1785 def adjustcachekeystate(state):
1791 def adjustcachekeystate(state):
1786 """Influences cache key derivation by adjusting state to derive key.
1792 """Influences cache key derivation by adjusting state to derive key.
1787
1793
1788 A dict defining the state used to derive the cache key is passed.
1794 A dict defining the state used to derive the cache key is passed.
1789
1795
1790 Implementations can modify this dict to record additional state that
1796 Implementations can modify this dict to record additional state that
1791 is wanted to influence key derivation.
1797 is wanted to influence key derivation.
1792
1798
1793 Implementations are *highly* encouraged to not modify or delete
1799 Implementations are *highly* encouraged to not modify or delete
1794 existing keys.
1800 existing keys.
1795 """
1801 """
1796
1802
1797 def setcachekey(key):
1803 def setcachekey(key):
1798 """Record the derived cache key for this request.
1804 """Record the derived cache key for this request.
1799
1805
1800 Instances may mutate the key for internal usage, as desired. e.g.
1806 Instances may mutate the key for internal usage, as desired. e.g.
1801 instances may wish to prepend the repo name, introduce path
1807 instances may wish to prepend the repo name, introduce path
1802 components for filesystem or URL addressing, etc. Behavior is up to
1808 components for filesystem or URL addressing, etc. Behavior is up to
1803 the cache.
1809 the cache.
1804
1810
1805 Returns a bool indicating if the request is cacheable by this
1811 Returns a bool indicating if the request is cacheable by this
1806 instance.
1812 instance.
1807 """
1813 """
1808
1814
1809 def lookup():
1815 def lookup():
1810 """Attempt to resolve an entry in the cache.
1816 """Attempt to resolve an entry in the cache.
1811
1817
1812 The instance is instructed to look for the cache key that it was
1818 The instance is instructed to look for the cache key that it was
1813 informed about via the call to ``setcachekey()``.
1819 informed about via the call to ``setcachekey()``.
1814
1820
1815 If there's no cache hit or the cacher doesn't wish to use the cached
1821 If there's no cache hit or the cacher doesn't wish to use the cached
1816 entry, ``None`` should be returned.
1822 entry, ``None`` should be returned.
1817
1823
1818 Else, a dict defining the cached result should be returned. The
1824 Else, a dict defining the cached result should be returned. The
1819 dict may have the following keys:
1825 dict may have the following keys:
1820
1826
1821 objs
1827 objs
1822 An iterable of objects that should be sent to the client. That
1828 An iterable of objects that should be sent to the client. That
1823 iterable of objects is expected to be what the command function
1829 iterable of objects is expected to be what the command function
1824 would return if invoked or an equivalent representation thereof.
1830 would return if invoked or an equivalent representation thereof.
1825 """
1831 """
1826
1832
1827 def onobject(obj):
1833 def onobject(obj):
1828 """Called when a new object is emitted from the command function.
1834 """Called when a new object is emitted from the command function.
1829
1835
1830 Receives as its argument the object that was emitted from the
1836 Receives as its argument the object that was emitted from the
1831 command function.
1837 command function.
1832
1838
1833 This method returns an iterator of objects to forward to the output
1839 This method returns an iterator of objects to forward to the output
1834 layer. The easiest implementation is a generator that just
1840 layer. The easiest implementation is a generator that just
1835 ``yield obj``.
1841 ``yield obj``.
1836 """
1842 """
1837
1843
1838 def onfinished():
1844 def onfinished():
1839 """Called after all objects have been emitted from the command function.
1845 """Called after all objects have been emitted from the command function.
1840
1846
1841 Implementations should return an iterator of objects to forward to
1847 Implementations should return an iterator of objects to forward to
1842 the output layer.
1848 the output layer.
1843
1849
1844 This method can be a generator.
1850 This method can be a generator.
1845 """
1851 """
@@ -1,664 +1,669 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 cbor,
26 cbor,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 ancestor,
29 ancestor,
30 bundlerepo,
30 bundlerepo,
31 error,
31 error,
32 extensions,
32 extensions,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 revlog,
37 revlog,
38 store,
38 store,
39 verify,
39 verify,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 interfaceutil,
42 interfaceutil,
43 storageutil,
43 storageutil,
44 )
44 )
45
45
46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
48 # be specifying the version(s) of Mercurial they are tested with, or
48 # be specifying the version(s) of Mercurial they are tested with, or
49 # leave the attribute unspecified.
49 # leave the attribute unspecified.
50 testedwith = 'ships-with-hg-core'
50 testedwith = 'ships-with-hg-core'
51
51
52 REQUIREMENT = 'testonly-simplestore'
52 REQUIREMENT = 'testonly-simplestore'
53
53
54 def validatenode(node):
54 def validatenode(node):
55 if isinstance(node, int):
55 if isinstance(node, int):
56 raise ValueError('expected node; got int')
56 raise ValueError('expected node; got int')
57
57
58 if len(node) != 20:
58 if len(node) != 20:
59 raise ValueError('expected 20 byte node')
59 raise ValueError('expected 20 byte node')
60
60
61 def validaterev(rev):
61 def validaterev(rev):
62 if not isinstance(rev, int):
62 if not isinstance(rev, int):
63 raise ValueError('expected int')
63 raise ValueError('expected int')
64
64
65 class simplestoreerror(error.StorageError):
65 class simplestoreerror(error.StorageError):
66 pass
66 pass
67
67
68 @interfaceutil.implementer(repository.irevisiondelta)
68 @interfaceutil.implementer(repository.irevisiondelta)
69 @attr.s(slots=True, frozen=True)
69 @attr.s(slots=True, frozen=True)
70 class simplestorerevisiondelta(object):
70 class simplestorerevisiondelta(object):
71 node = attr.ib()
71 node = attr.ib()
72 p1node = attr.ib()
72 p1node = attr.ib()
73 p2node = attr.ib()
73 p2node = attr.ib()
74 basenode = attr.ib()
74 basenode = attr.ib()
75 linknode = attr.ib()
75 linknode = attr.ib()
76 flags = attr.ib()
76 flags = attr.ib()
77 baserevisionsize = attr.ib()
77 baserevisionsize = attr.ib()
78 revision = attr.ib()
78 revision = attr.ib()
79 delta = attr.ib()
79 delta = attr.ib()
80
80
81 @interfaceutil.implementer(repository.ifilestorage)
81 @interfaceutil.implementer(repository.ifilestorage)
82 class filestorage(object):
82 class filestorage(object):
83 """Implements storage for a tracked path.
83 """Implements storage for a tracked path.
84
84
85 Data is stored in the VFS in a directory corresponding to the tracked
85 Data is stored in the VFS in a directory corresponding to the tracked
86 path.
86 path.
87
87
88 Index data is stored in an ``index`` file using CBOR.
88 Index data is stored in an ``index`` file using CBOR.
89
89
90 Fulltext data is stored in files having names of the node.
90 Fulltext data is stored in files having names of the node.
91 """
91 """
92
92
93 def __init__(self, svfs, path):
93 def __init__(self, svfs, path):
94 self._svfs = svfs
94 self._svfs = svfs
95 self._path = path
95 self._path = path
96
96
97 self._storepath = b'/'.join([b'data', path])
97 self._storepath = b'/'.join([b'data', path])
98 self._indexpath = b'/'.join([self._storepath, b'index'])
98 self._indexpath = b'/'.join([self._storepath, b'index'])
99
99
100 indexdata = self._svfs.tryread(self._indexpath)
100 indexdata = self._svfs.tryread(self._indexpath)
101 if indexdata:
101 if indexdata:
102 indexdata = cbor.loads(indexdata)
102 indexdata = cbor.loads(indexdata)
103
103
104 self._indexdata = indexdata or []
104 self._indexdata = indexdata or []
105 self._indexbynode = {}
105 self._indexbynode = {}
106 self._indexbyrev = {}
106 self._indexbyrev = {}
107 self._index = []
107 self._index = []
108 self._refreshindex()
108 self._refreshindex()
109
109
110 def _refreshindex(self):
110 def _refreshindex(self):
111 self._indexbynode.clear()
111 self._indexbynode.clear()
112 self._indexbyrev.clear()
112 self._indexbyrev.clear()
113 self._index = []
113 self._index = []
114
114
115 for i, entry in enumerate(self._indexdata):
115 for i, entry in enumerate(self._indexdata):
116 self._indexbynode[entry[b'node']] = entry
116 self._indexbynode[entry[b'node']] = entry
117 self._indexbyrev[i] = entry
117 self._indexbyrev[i] = entry
118
118
119 self._indexbynode[nullid] = {
119 self._indexbynode[nullid] = {
120 b'node': nullid,
120 b'node': nullid,
121 b'p1': nullid,
121 b'p1': nullid,
122 b'p2': nullid,
122 b'p2': nullid,
123 b'linkrev': nullrev,
123 b'linkrev': nullrev,
124 b'flags': 0,
124 b'flags': 0,
125 }
125 }
126
126
127 self._indexbyrev[nullrev] = {
127 self._indexbyrev[nullrev] = {
128 b'node': nullid,
128 b'node': nullid,
129 b'p1': nullid,
129 b'p1': nullid,
130 b'p2': nullid,
130 b'p2': nullid,
131 b'linkrev': nullrev,
131 b'linkrev': nullrev,
132 b'flags': 0,
132 b'flags': 0,
133 }
133 }
134
134
135 for i, entry in enumerate(self._indexdata):
135 for i, entry in enumerate(self._indexdata):
136 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
136 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
137
137
138 # start, length, rawsize, chainbase, linkrev, p1, p2, node
138 # start, length, rawsize, chainbase, linkrev, p1, p2, node
139 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
139 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
140 entry[b'node']))
140 entry[b'node']))
141
141
142 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
142 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
143
143
144 def __len__(self):
144 def __len__(self):
145 return len(self._indexdata)
145 return len(self._indexdata)
146
146
147 def __iter__(self):
147 def __iter__(self):
148 return iter(range(len(self)))
148 return iter(range(len(self)))
149
149
150 def revs(self, start=0, stop=None):
150 def revs(self, start=0, stop=None):
151 step = 1
151 step = 1
152 if stop is not None:
152 if stop is not None:
153 if start > stop:
153 if start > stop:
154 step = -1
154 step = -1
155
155
156 stop += step
156 stop += step
157 else:
157 else:
158 stop = len(self)
158 stop = len(self)
159
159
160 return range(start, stop, step)
160 return range(start, stop, step)
161
161
162 def parents(self, node):
162 def parents(self, node):
163 validatenode(node)
163 validatenode(node)
164
164
165 if node not in self._indexbynode:
165 if node not in self._indexbynode:
166 raise KeyError('unknown node')
166 raise KeyError('unknown node')
167
167
168 entry = self._indexbynode[node]
168 entry = self._indexbynode[node]
169
169
170 return entry[b'p1'], entry[b'p2']
170 return entry[b'p1'], entry[b'p2']
171
171
172 def parentrevs(self, rev):
172 def parentrevs(self, rev):
173 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
173 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
174 return self.rev(p1), self.rev(p2)
174 return self.rev(p1), self.rev(p2)
175
175
176 def rev(self, node):
176 def rev(self, node):
177 validatenode(node)
177 validatenode(node)
178
178
179 try:
179 try:
180 self._indexbynode[node]
180 self._indexbynode[node]
181 except KeyError:
181 except KeyError:
182 raise error.LookupError(node, self._indexpath, _('no node'))
182 raise error.LookupError(node, self._indexpath, _('no node'))
183
183
184 for rev, entry in self._indexbyrev.items():
184 for rev, entry in self._indexbyrev.items():
185 if entry[b'node'] == node:
185 if entry[b'node'] == node:
186 return rev
186 return rev
187
187
188 raise error.ProgrammingError('this should not occur')
188 raise error.ProgrammingError('this should not occur')
189
189
190 def node(self, rev):
190 def node(self, rev):
191 validaterev(rev)
191 validaterev(rev)
192
192
193 return self._indexbyrev[rev][b'node']
193 return self._indexbyrev[rev][b'node']
194
194
195 def lookup(self, node):
195 def lookup(self, node):
196 if isinstance(node, int):
196 if isinstance(node, int):
197 return self.node(node)
197 return self.node(node)
198
198
199 if len(node) == 20:
199 if len(node) == 20:
200 self.rev(node)
200 self.rev(node)
201 return node
201 return node
202
202
203 try:
203 try:
204 rev = int(node)
204 rev = int(node)
205 if '%d' % rev != node:
205 if '%d' % rev != node:
206 raise ValueError
206 raise ValueError
207
207
208 if rev < 0:
208 if rev < 0:
209 rev = len(self) + rev
209 rev = len(self) + rev
210 if rev < 0 or rev >= len(self):
210 if rev < 0 or rev >= len(self):
211 raise ValueError
211 raise ValueError
212
212
213 return self.node(rev)
213 return self.node(rev)
214 except (ValueError, OverflowError):
214 except (ValueError, OverflowError):
215 pass
215 pass
216
216
217 if len(node) == 40:
217 if len(node) == 40:
218 try:
218 try:
219 rawnode = bin(node)
219 rawnode = bin(node)
220 self.rev(rawnode)
220 self.rev(rawnode)
221 return rawnode
221 return rawnode
222 except TypeError:
222 except TypeError:
223 pass
223 pass
224
224
225 raise error.LookupError(node, self._path, _('invalid lookup input'))
225 raise error.LookupError(node, self._path, _('invalid lookup input'))
226
226
227 def linkrev(self, rev):
227 def linkrev(self, rev):
228 validaterev(rev)
228 validaterev(rev)
229
229
230 return self._indexbyrev[rev][b'linkrev']
230 return self._indexbyrev[rev][b'linkrev']
231
231
232 def _flags(self, rev):
232 def _flags(self, rev):
233 validaterev(rev)
233 validaterev(rev)
234
234
235 return self._indexbyrev[rev][b'flags']
235 return self._indexbyrev[rev][b'flags']
236
236
237 def _candelta(self, baserev, rev):
237 def _candelta(self, baserev, rev):
238 validaterev(baserev)
238 validaterev(baserev)
239 validaterev(rev)
239 validaterev(rev)
240
240
241 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
241 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
242 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
242 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
243 return False
243 return False
244
244
245 return True
245 return True
246
246
247 def _processflags(self, text, flags, operation, raw=False):
247 def _processflags(self, text, flags, operation, raw=False):
248 if flags == 0:
248 if flags == 0:
249 return text, True
249 return text, True
250
250
251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
252 raise simplestoreerror(_("incompatible revision flag '%#x'") %
252 raise simplestoreerror(_("incompatible revision flag '%#x'") %
253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
254
254
255 validatehash = True
255 validatehash = True
256 # Depending on the operation (read or write), the order might be
256 # Depending on the operation (read or write), the order might be
257 # reversed due to non-commutative transforms.
257 # reversed due to non-commutative transforms.
258 orderedflags = revlog.REVIDX_FLAGS_ORDER
258 orderedflags = revlog.REVIDX_FLAGS_ORDER
259 if operation == 'write':
259 if operation == 'write':
260 orderedflags = reversed(orderedflags)
260 orderedflags = reversed(orderedflags)
261
261
262 for flag in orderedflags:
262 for flag in orderedflags:
263 # If a flagprocessor has been registered for a known flag, apply the
263 # If a flagprocessor has been registered for a known flag, apply the
264 # related operation transform and update result tuple.
264 # related operation transform and update result tuple.
265 if flag & flags:
265 if flag & flags:
266 vhash = True
266 vhash = True
267
267
268 if flag not in revlog._flagprocessors:
268 if flag not in revlog._flagprocessors:
269 message = _("missing processor for flag '%#x'") % (flag)
269 message = _("missing processor for flag '%#x'") % (flag)
270 raise simplestoreerror(message)
270 raise simplestoreerror(message)
271
271
272 processor = revlog._flagprocessors[flag]
272 processor = revlog._flagprocessors[flag]
273 if processor is not None:
273 if processor is not None:
274 readtransform, writetransform, rawtransform = processor
274 readtransform, writetransform, rawtransform = processor
275
275
276 if raw:
276 if raw:
277 vhash = rawtransform(self, text)
277 vhash = rawtransform(self, text)
278 elif operation == 'read':
278 elif operation == 'read':
279 text, vhash = readtransform(self, text)
279 text, vhash = readtransform(self, text)
280 else: # write operation
280 else: # write operation
281 text, vhash = writetransform(self, text)
281 text, vhash = writetransform(self, text)
282 validatehash = validatehash and vhash
282 validatehash = validatehash and vhash
283
283
284 return text, validatehash
284 return text, validatehash
285
285
286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
287 if p1 is None and p2 is None:
287 if p1 is None and p2 is None:
288 p1, p2 = self.parents(node)
288 p1, p2 = self.parents(node)
289 if node != storageutil.hashrevisionsha1(text, p1, p2):
289 if node != storageutil.hashrevisionsha1(text, p1, p2):
290 raise simplestoreerror(_("integrity check failed on %s") %
290 raise simplestoreerror(_("integrity check failed on %s") %
291 self._path)
291 self._path)
292
292
293 def revision(self, node, raw=False):
293 def revision(self, node, raw=False):
294 validatenode(node)
294 validatenode(node)
295
295
296 if node == nullid:
296 if node == nullid:
297 return b''
297 return b''
298
298
299 rev = self.rev(node)
299 rev = self.rev(node)
300 flags = self._flags(rev)
300 flags = self._flags(rev)
301
301
302 path = b'/'.join([self._storepath, hex(node)])
302 path = b'/'.join([self._storepath, hex(node)])
303 rawtext = self._svfs.read(path)
303 rawtext = self._svfs.read(path)
304
304
305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
306 if validatehash:
306 if validatehash:
307 self.checkhash(text, node, rev=rev)
307 self.checkhash(text, node, rev=rev)
308
308
309 return text
309 return text
310
310
311 def read(self, node):
311 def read(self, node):
312 validatenode(node)
312 validatenode(node)
313
313
314 revision = self.revision(node)
314 revision = self.revision(node)
315
315
316 if not revision.startswith(b'\1\n'):
316 if not revision.startswith(b'\1\n'):
317 return revision
317 return revision
318
318
319 start = revision.index(b'\1\n', 2)
319 start = revision.index(b'\1\n', 2)
320 return revision[start + 2:]
320 return revision[start + 2:]
321
321
322 def renamed(self, node):
322 def renamed(self, node):
323 validatenode(node)
323 validatenode(node)
324
324
325 if self.parents(node)[0] != nullid:
325 if self.parents(node)[0] != nullid:
326 return False
326 return False
327
327
328 fulltext = self.revision(node)
328 fulltext = self.revision(node)
329 m = storageutil.parsemeta(fulltext)[0]
329 m = storageutil.parsemeta(fulltext)[0]
330
330
331 if m and 'copy' in m:
331 if m and 'copy' in m:
332 return m['copy'], bin(m['copyrev'])
332 return m['copy'], bin(m['copyrev'])
333
333
334 return False
334 return False
335
335
336 def cmp(self, node, text):
336 def cmp(self, node, text):
337 validatenode(node)
337 validatenode(node)
338
338
339 t = text
339 t = text
340
340
341 if text.startswith(b'\1\n'):
341 if text.startswith(b'\1\n'):
342 t = b'\1\n\1\n' + text
342 t = b'\1\n\1\n' + text
343
343
344 p1, p2 = self.parents(node)
344 p1, p2 = self.parents(node)
345
345
346 if storageutil.hashrevisionsha1(t, p1, p2) == node:
346 if storageutil.hashrevisionsha1(t, p1, p2) == node:
347 return False
347 return False
348
348
349 if self.iscensored(self.rev(node)):
349 if self.iscensored(self.rev(node)):
350 return text != b''
350 return text != b''
351
351
352 if self.renamed(node):
352 if self.renamed(node):
353 t2 = self.read(node)
353 t2 = self.read(node)
354 return t2 != text
354 return t2 != text
355
355
356 return True
356 return True
357
357
358 def size(self, rev):
358 def size(self, rev):
359 validaterev(rev)
359 validaterev(rev)
360
360
361 node = self._indexbyrev[rev][b'node']
361 node = self._indexbyrev[rev][b'node']
362
362
363 if self.renamed(node):
363 if self.renamed(node):
364 return len(self.read(node))
364 return len(self.read(node))
365
365
366 if self.iscensored(rev):
366 if self.iscensored(rev):
367 return 0
367 return 0
368
368
369 return len(self.revision(node))
369 return len(self.revision(node))
370
370
371 def iscensored(self, rev):
371 def iscensored(self, rev):
372 validaterev(rev)
372 validaterev(rev)
373
373
374 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
374 return self._flags(rev) & repository.REVISION_FLAG_CENSORED
375
375
376 def commonancestorsheads(self, a, b):
376 def commonancestorsheads(self, a, b):
377 validatenode(a)
377 validatenode(a)
378 validatenode(b)
378 validatenode(b)
379
379
380 a = self.rev(a)
380 a = self.rev(a)
381 b = self.rev(b)
381 b = self.rev(b)
382
382
383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
384 return pycompat.maplist(self.node, ancestors)
384 return pycompat.maplist(self.node, ancestors)
385
385
386 def descendants(self, revs):
386 def descendants(self, revs):
387 # This is a copy of revlog.descendants()
387 # This is a copy of revlog.descendants()
388 first = min(revs)
388 first = min(revs)
389 if first == nullrev:
389 if first == nullrev:
390 for i in self:
390 for i in self:
391 yield i
391 yield i
392 return
392 return
393
393
394 seen = set(revs)
394 seen = set(revs)
395 for i in self.revs(start=first + 1):
395 for i in self.revs(start=first + 1):
396 for x in self.parentrevs(i):
396 for x in self.parentrevs(i):
397 if x != nullrev and x in seen:
397 if x != nullrev and x in seen:
398 seen.add(i)
398 seen.add(i)
399 yield i
399 yield i
400 break
400 break
401
401
402 # Required by verify.
402 # Required by verify.
403 def files(self):
403 def files(self):
404 entries = self._svfs.listdir(self._storepath)
404 entries = self._svfs.listdir(self._storepath)
405
405
406 # Strip out undo.backup.* files created as part of transaction
406 # Strip out undo.backup.* files created as part of transaction
407 # recording.
407 # recording.
408 entries = [f for f in entries if not f.startswith('undo.backup.')]
408 entries = [f for f in entries if not f.startswith('undo.backup.')]
409
409
410 return [b'/'.join((self._storepath, f)) for f in entries]
410 return [b'/'.join((self._storepath, f)) for f in entries]
411
411
412 def add(self, text, meta, transaction, linkrev, p1, p2):
412 def add(self, text, meta, transaction, linkrev, p1, p2):
413 if meta or text.startswith(b'\1\n'):
413 if meta or text.startswith(b'\1\n'):
414 text = storageutil.packmeta(meta, text)
414 text = storageutil.packmeta(meta, text)
415
415
416 return self.addrevision(text, transaction, linkrev, p1, p2)
416 return self.addrevision(text, transaction, linkrev, p1, p2)
417
417
418 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
418 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
419 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
419 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
420 validatenode(p1)
420 validatenode(p1)
421 validatenode(p2)
421 validatenode(p2)
422
422
423 if flags:
423 if flags:
424 node = node or storageutil.hashrevisionsha1(text, p1, p2)
424 node = node or storageutil.hashrevisionsha1(text, p1, p2)
425
425
426 rawtext, validatehash = self._processflags(text, flags, 'write')
426 rawtext, validatehash = self._processflags(text, flags, 'write')
427
427
428 node = node or storageutil.hashrevisionsha1(text, p1, p2)
428 node = node or storageutil.hashrevisionsha1(text, p1, p2)
429
429
430 if node in self._indexbynode:
430 if node in self._indexbynode:
431 return node
431 return node
432
432
433 if validatehash:
433 if validatehash:
434 self.checkhash(rawtext, node, p1=p1, p2=p2)
434 self.checkhash(rawtext, node, p1=p1, p2=p2)
435
435
436 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
436 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
437 flags)
437 flags)
438
438
439 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
439 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
440 transaction.addbackup(self._indexpath)
440 transaction.addbackup(self._indexpath)
441
441
442 path = b'/'.join([self._storepath, hex(node)])
442 path = b'/'.join([self._storepath, hex(node)])
443
443
444 self._svfs.write(path, rawtext)
444 self._svfs.write(path, rawtext)
445
445
446 self._indexdata.append({
446 self._indexdata.append({
447 b'node': node,
447 b'node': node,
448 b'p1': p1,
448 b'p1': p1,
449 b'p2': p2,
449 b'p2': p2,
450 b'linkrev': link,
450 b'linkrev': link,
451 b'flags': flags,
451 b'flags': flags,
452 })
452 })
453
453
454 self._reflectindexupdate()
454 self._reflectindexupdate()
455
455
456 return node
456 return node
457
457
458 def _reflectindexupdate(self):
458 def _reflectindexupdate(self):
459 self._refreshindex()
459 self._refreshindex()
460 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
460 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
461
461
462 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
462 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None,
463 maybemissingparents=False):
464 if maybemissingparents:
465 raise error.Abort(_('simple store does not support missing parents '
466 'write mode'))
467
463 nodes = []
468 nodes = []
464
469
465 transaction.addbackup(self._indexpath)
470 transaction.addbackup(self._indexpath)
466
471
467 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
472 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
468 linkrev = linkmapper(linknode)
473 linkrev = linkmapper(linknode)
469 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
474 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
470
475
471 nodes.append(node)
476 nodes.append(node)
472
477
473 if node in self._indexbynode:
478 if node in self._indexbynode:
474 continue
479 continue
475
480
476 # Need to resolve the fulltext from the delta base.
481 # Need to resolve the fulltext from the delta base.
477 if deltabase == nullid:
482 if deltabase == nullid:
478 text = mdiff.patch(b'', delta)
483 text = mdiff.patch(b'', delta)
479 else:
484 else:
480 text = mdiff.patch(self.revision(deltabase), delta)
485 text = mdiff.patch(self.revision(deltabase), delta)
481
486
482 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
487 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
483 flags)
488 flags)
484
489
485 if addrevisioncb:
490 if addrevisioncb:
486 addrevisioncb(self, node)
491 addrevisioncb(self, node)
487
492
488 return nodes
493 return nodes
489
494
490 def heads(self, start=None, stop=None):
495 def heads(self, start=None, stop=None):
491 # This is copied from revlog.py.
496 # This is copied from revlog.py.
492 if start is None and stop is None:
497 if start is None and stop is None:
493 if not len(self):
498 if not len(self):
494 return [nullid]
499 return [nullid]
495 return [self.node(r) for r in self.headrevs()]
500 return [self.node(r) for r in self.headrevs()]
496
501
497 if start is None:
502 if start is None:
498 start = nullid
503 start = nullid
499 if stop is None:
504 if stop is None:
500 stop = []
505 stop = []
501 stoprevs = set([self.rev(n) for n in stop])
506 stoprevs = set([self.rev(n) for n in stop])
502 startrev = self.rev(start)
507 startrev = self.rev(start)
503 reachable = {startrev}
508 reachable = {startrev}
504 heads = {startrev}
509 heads = {startrev}
505
510
506 parentrevs = self.parentrevs
511 parentrevs = self.parentrevs
507 for r in self.revs(start=startrev + 1):
512 for r in self.revs(start=startrev + 1):
508 for p in parentrevs(r):
513 for p in parentrevs(r):
509 if p in reachable:
514 if p in reachable:
510 if r not in stoprevs:
515 if r not in stoprevs:
511 reachable.add(r)
516 reachable.add(r)
512 heads.add(r)
517 heads.add(r)
513 if p in heads and p not in stoprevs:
518 if p in heads and p not in stoprevs:
514 heads.remove(p)
519 heads.remove(p)
515
520
516 return [self.node(r) for r in heads]
521 return [self.node(r) for r in heads]
517
522
518 def children(self, node):
523 def children(self, node):
519 validatenode(node)
524 validatenode(node)
520
525
521 # This is a copy of revlog.children().
526 # This is a copy of revlog.children().
522 c = []
527 c = []
523 p = self.rev(node)
528 p = self.rev(node)
524 for r in self.revs(start=p + 1):
529 for r in self.revs(start=p + 1):
525 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
530 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
526 if prevs:
531 if prevs:
527 for pr in prevs:
532 for pr in prevs:
528 if pr == p:
533 if pr == p:
529 c.append(self.node(r))
534 c.append(self.node(r))
530 elif p == nullrev:
535 elif p == nullrev:
531 c.append(self.node(r))
536 c.append(self.node(r))
532 return c
537 return c
533
538
534 def getstrippoint(self, minlink):
539 def getstrippoint(self, minlink):
535
540
536 # This is largely a copy of revlog.getstrippoint().
541 # This is largely a copy of revlog.getstrippoint().
537 brokenrevs = set()
542 brokenrevs = set()
538 strippoint = len(self)
543 strippoint = len(self)
539
544
540 heads = {}
545 heads = {}
541 futurelargelinkrevs = set()
546 futurelargelinkrevs = set()
542 for head in self.heads():
547 for head in self.heads():
543 headlinkrev = self.linkrev(self.rev(head))
548 headlinkrev = self.linkrev(self.rev(head))
544 heads[head] = headlinkrev
549 heads[head] = headlinkrev
545 if headlinkrev >= minlink:
550 if headlinkrev >= minlink:
546 futurelargelinkrevs.add(headlinkrev)
551 futurelargelinkrevs.add(headlinkrev)
547
552
548 # This algorithm involves walking down the rev graph, starting at the
553 # This algorithm involves walking down the rev graph, starting at the
549 # heads. Since the revs are topologically sorted according to linkrev,
554 # heads. Since the revs are topologically sorted according to linkrev,
550 # once all head linkrevs are below the minlink, we know there are
555 # once all head linkrevs are below the minlink, we know there are
551 # no more revs that could have a linkrev greater than minlink.
556 # no more revs that could have a linkrev greater than minlink.
552 # So we can stop walking.
557 # So we can stop walking.
553 while futurelargelinkrevs:
558 while futurelargelinkrevs:
554 strippoint -= 1
559 strippoint -= 1
555 linkrev = heads.pop(strippoint)
560 linkrev = heads.pop(strippoint)
556
561
557 if linkrev < minlink:
562 if linkrev < minlink:
558 brokenrevs.add(strippoint)
563 brokenrevs.add(strippoint)
559 else:
564 else:
560 futurelargelinkrevs.remove(linkrev)
565 futurelargelinkrevs.remove(linkrev)
561
566
562 for p in self.parentrevs(strippoint):
567 for p in self.parentrevs(strippoint):
563 if p != nullrev:
568 if p != nullrev:
564 plinkrev = self.linkrev(p)
569 plinkrev = self.linkrev(p)
565 heads[p] = plinkrev
570 heads[p] = plinkrev
566 if plinkrev >= minlink:
571 if plinkrev >= minlink:
567 futurelargelinkrevs.add(plinkrev)
572 futurelargelinkrevs.add(plinkrev)
568
573
569 return strippoint, brokenrevs
574 return strippoint, brokenrevs
570
575
571 def strip(self, minlink, transaction):
576 def strip(self, minlink, transaction):
572 if not len(self):
577 if not len(self):
573 return
578 return
574
579
575 rev, _ignored = self.getstrippoint(minlink)
580 rev, _ignored = self.getstrippoint(minlink)
576 if rev == len(self):
581 if rev == len(self):
577 return
582 return
578
583
579 # Purge index data starting at the requested revision.
584 # Purge index data starting at the requested revision.
580 self._indexdata[rev:] = []
585 self._indexdata[rev:] = []
581 self._reflectindexupdate()
586 self._reflectindexupdate()
582
587
583 def issimplestorefile(f, kind, st):
588 def issimplestorefile(f, kind, st):
584 if kind != stat.S_IFREG:
589 if kind != stat.S_IFREG:
585 return False
590 return False
586
591
587 if store.isrevlog(f, kind, st):
592 if store.isrevlog(f, kind, st):
588 return False
593 return False
589
594
590 # Ignore transaction undo files.
595 # Ignore transaction undo files.
591 if f.startswith('undo.'):
596 if f.startswith('undo.'):
592 return False
597 return False
593
598
594 # Otherwise assume it belongs to the simple store.
599 # Otherwise assume it belongs to the simple store.
595 return True
600 return True
596
601
597 class simplestore(store.encodedstore):
602 class simplestore(store.encodedstore):
598 def datafiles(self):
603 def datafiles(self):
599 for x in super(simplestore, self).datafiles():
604 for x in super(simplestore, self).datafiles():
600 yield x
605 yield x
601
606
602 # Supplement with non-revlog files.
607 # Supplement with non-revlog files.
603 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
608 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
604
609
605 for unencoded, encoded, size in extrafiles:
610 for unencoded, encoded, size in extrafiles:
606 try:
611 try:
607 unencoded = store.decodefilename(unencoded)
612 unencoded = store.decodefilename(unencoded)
608 except KeyError:
613 except KeyError:
609 unencoded = None
614 unencoded = None
610
615
611 yield unencoded, encoded, size
616 yield unencoded, encoded, size
612
617
613 def reposetup(ui, repo):
618 def reposetup(ui, repo):
614 if not repo.local():
619 if not repo.local():
615 return
620 return
616
621
617 if isinstance(repo, bundlerepo.bundlerepository):
622 if isinstance(repo, bundlerepo.bundlerepository):
618 raise error.Abort(_('cannot use simple store with bundlerepo'))
623 raise error.Abort(_('cannot use simple store with bundlerepo'))
619
624
620 class simplestorerepo(repo.__class__):
625 class simplestorerepo(repo.__class__):
621 def file(self, f):
626 def file(self, f):
622 return filestorage(self.svfs, f)
627 return filestorage(self.svfs, f)
623
628
624 repo.__class__ = simplestorerepo
629 repo.__class__ = simplestorerepo
625
630
626 def featuresetup(ui, supported):
631 def featuresetup(ui, supported):
627 supported.add(REQUIREMENT)
632 supported.add(REQUIREMENT)
628
633
629 def newreporequirements(orig, ui):
634 def newreporequirements(orig, ui):
630 """Modifies default requirements for new repos to use the simple store."""
635 """Modifies default requirements for new repos to use the simple store."""
631 requirements = orig(ui)
636 requirements = orig(ui)
632
637
633 # These requirements are only used to affect creation of the store
638 # These requirements are only used to affect creation of the store
634 # object. We have our own store. So we can remove them.
639 # object. We have our own store. So we can remove them.
635 # TODO do this once we feel like taking the test hit.
640 # TODO do this once we feel like taking the test hit.
636 #if 'fncache' in requirements:
641 #if 'fncache' in requirements:
637 # requirements.remove('fncache')
642 # requirements.remove('fncache')
638 #if 'dotencode' in requirements:
643 #if 'dotencode' in requirements:
639 # requirements.remove('dotencode')
644 # requirements.remove('dotencode')
640
645
641 requirements.add(REQUIREMENT)
646 requirements.add(REQUIREMENT)
642
647
643 return requirements
648 return requirements
644
649
645 def makestore(orig, requirements, path, vfstype):
650 def makestore(orig, requirements, path, vfstype):
646 if REQUIREMENT not in requirements:
651 if REQUIREMENT not in requirements:
647 return orig(requirements, path, vfstype)
652 return orig(requirements, path, vfstype)
648
653
649 return simplestore(path, vfstype)
654 return simplestore(path, vfstype)
650
655
651 def verifierinit(orig, self, *args, **kwargs):
656 def verifierinit(orig, self, *args, **kwargs):
652 orig(self, *args, **kwargs)
657 orig(self, *args, **kwargs)
653
658
654 # We don't care that files in the store don't align with what is
659 # We don't care that files in the store don't align with what is
655 # advertised. So suppress these warnings.
660 # advertised. So suppress these warnings.
656 self.warnorphanstorefiles = False
661 self.warnorphanstorefiles = False
657
662
658 def extsetup(ui):
663 def extsetup(ui):
659 localrepo.featuresetupfuncs.add(featuresetup)
664 localrepo.featuresetupfuncs.add(featuresetup)
660
665
661 extensions.wrapfunction(localrepo, 'newreporequirements',
666 extensions.wrapfunction(localrepo, 'newreporequirements',
662 newreporequirements)
667 newreporequirements)
663 extensions.wrapfunction(store, 'store', makestore)
668 extensions.wrapfunction(store, 'store', makestore)
664 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
669 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now