##// END OF EJS Templates
filelog: add a hasnode() method (API)...
Gregory Szorc -
r40423:f1a39128 default
parent child Browse files
Show More
@@ -1,1113 +1,1119
1 # sqlitestore.py - Storage backend that uses SQLite
1 # sqlitestore.py - Storage backend that uses SQLite
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """store repository data in SQLite (EXPERIMENTAL)
8 """store repository data in SQLite (EXPERIMENTAL)
9
9
10 The sqlitestore extension enables the storage of repository data in SQLite.
10 The sqlitestore extension enables the storage of repository data in SQLite.
11
11
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 GUARANTEES. This means that repositories created with this extension may
13 GUARANTEES. This means that repositories created with this extension may
14 only be usable with the exact version of this extension/Mercurial that was
14 only be usable with the exact version of this extension/Mercurial that was
15 used. The extension attempts to enforce this in order to prevent repository
15 used. The extension attempts to enforce this in order to prevent repository
16 corruption.
16 corruption.
17
17
18 In addition, several features are not yet supported or have known bugs:
18 In addition, several features are not yet supported or have known bugs:
19
19
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 data is not yet stored in SQLite.
21 data is not yet stored in SQLite.
22 * Transactions are not robust. If the process is aborted at the right time
22 * Transactions are not robust. If the process is aborted at the right time
23 during transaction close/rollback, the repository could be in an inconsistent
23 during transaction close/rollback, the repository could be in an inconsistent
24 state. This problem will diminish once all repository data is tracked by
24 state. This problem will diminish once all repository data is tracked by
25 SQLite.
25 SQLite.
26 * Bundle repositories do not work (the ability to use e.g.
26 * Bundle repositories do not work (the ability to use e.g.
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 existing repository).
28 existing repository).
29 * Various other features don't work.
29 * Various other features don't work.
30
30
31 This extension should work for basic clone/pull, update, and commit workflows.
31 This extension should work for basic clone/pull, update, and commit workflows.
32 Some history rewriting operations may fail due to lack of support for bundle
32 Some history rewriting operations may fail due to lack of support for bundle
33 repositories.
33 repositories.
34
34
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 """
37 """
38
38
39 # To run the test suite with repos using SQLite by default, execute the
39 # To run the test suite with repos using SQLite by default, execute the
40 # following:
40 # following:
41 #
41 #
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 # --extra-config-opt extensions.sqlitestore= \
43 # --extra-config-opt extensions.sqlitestore= \
44 # --extra-config-opt storage.new-repo-backend=sqlite
44 # --extra-config-opt storage.new-repo-backend=sqlite
45
45
46 from __future__ import absolute_import
46 from __future__ import absolute_import
47
47
48 import hashlib
48 import hashlib
49 import sqlite3
49 import sqlite3
50 import struct
50 import struct
51 import threading
51 import threading
52 import zlib
52 import zlib
53
53
54 from mercurial.i18n import _
54 from mercurial.i18n import _
55 from mercurial.node import (
55 from mercurial.node import (
56 nullid,
56 nullid,
57 nullrev,
57 nullrev,
58 short,
58 short,
59 )
59 )
60 from mercurial.thirdparty import (
60 from mercurial.thirdparty import (
61 attr,
61 attr,
62 )
62 )
63 from mercurial import (
63 from mercurial import (
64 ancestor,
64 ancestor,
65 dagop,
65 dagop,
66 error,
66 error,
67 extensions,
67 extensions,
68 localrepo,
68 localrepo,
69 mdiff,
69 mdiff,
70 pycompat,
70 pycompat,
71 registrar,
71 registrar,
72 repository,
72 repository,
73 util,
73 util,
74 verify,
74 verify,
75 )
75 )
76 from mercurial.utils import (
76 from mercurial.utils import (
77 interfaceutil,
77 interfaceutil,
78 storageutil,
78 storageutil,
79 )
79 )
80
80
81 try:
81 try:
82 from mercurial import zstd
82 from mercurial import zstd
83 zstd.__version__
83 zstd.__version__
84 except ImportError:
84 except ImportError:
85 zstd = None
85 zstd = None
86
86
87 configtable = {}
87 configtable = {}
88 configitem = registrar.configitem(configtable)
88 configitem = registrar.configitem(configtable)
89
89
90 # experimental config: storage.sqlite.compression
90 # experimental config: storage.sqlite.compression
91 configitem('storage', 'sqlite.compression',
91 configitem('storage', 'sqlite.compression',
92 default='zstd' if zstd else 'zlib')
92 default='zstd' if zstd else 'zlib')
93
93
94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
96 # be specifying the version(s) of Mercurial they are tested with, or
96 # be specifying the version(s) of Mercurial they are tested with, or
97 # leave the attribute unspecified.
97 # leave the attribute unspecified.
98 testedwith = 'ships-with-hg-core'
98 testedwith = 'ships-with-hg-core'
99
99
100 REQUIREMENT = b'exp-sqlite-001'
100 REQUIREMENT = b'exp-sqlite-001'
101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
104
104
105 CURRENT_SCHEMA_VERSION = 1
105 CURRENT_SCHEMA_VERSION = 1
106
106
107 COMPRESSION_NONE = 1
107 COMPRESSION_NONE = 1
108 COMPRESSION_ZSTD = 2
108 COMPRESSION_ZSTD = 2
109 COMPRESSION_ZLIB = 3
109 COMPRESSION_ZLIB = 3
110
110
111 FLAG_CENSORED = 1
111 FLAG_CENSORED = 1
112
112
113 CREATE_SCHEMA = [
113 CREATE_SCHEMA = [
114 # Deltas are stored as content-indexed blobs.
114 # Deltas are stored as content-indexed blobs.
115 # compression column holds COMPRESSION_* constant for how the
115 # compression column holds COMPRESSION_* constant for how the
116 # delta is encoded.
116 # delta is encoded.
117
117
118 r'CREATE TABLE delta ('
118 r'CREATE TABLE delta ('
119 r' id INTEGER PRIMARY KEY, '
119 r' id INTEGER PRIMARY KEY, '
120 r' compression INTEGER NOT NULL, '
120 r' compression INTEGER NOT NULL, '
121 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
121 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
122 r' delta BLOB NOT NULL '
122 r' delta BLOB NOT NULL '
123 r')',
123 r')',
124
124
125 # Tracked paths are denormalized to integers to avoid redundant
125 # Tracked paths are denormalized to integers to avoid redundant
126 # storage of the path name.
126 # storage of the path name.
127 r'CREATE TABLE filepath ('
127 r'CREATE TABLE filepath ('
128 r' id INTEGER PRIMARY KEY, '
128 r' id INTEGER PRIMARY KEY, '
129 r' path BLOB NOT NULL '
129 r' path BLOB NOT NULL '
130 r')',
130 r')',
131
131
132 r'CREATE UNIQUE INDEX filepath_path '
132 r'CREATE UNIQUE INDEX filepath_path '
133 r' ON filepath (path)',
133 r' ON filepath (path)',
134
134
135 # We have a single table for all file revision data.
135 # We have a single table for all file revision data.
136 # Each file revision is uniquely described by a (path, rev) and
136 # Each file revision is uniquely described by a (path, rev) and
137 # (path, node).
137 # (path, node).
138 #
138 #
139 # Revision data is stored as a pointer to the delta producing this
139 # Revision data is stored as a pointer to the delta producing this
140 # revision and the file revision whose delta should be applied before
140 # revision and the file revision whose delta should be applied before
141 # that one. One can reconstruct the delta chain by recursively following
141 # that one. One can reconstruct the delta chain by recursively following
142 # the delta base revision pointers until one encounters NULL.
142 # the delta base revision pointers until one encounters NULL.
143 #
143 #
144 # flags column holds bitwise integer flags controlling storage options.
144 # flags column holds bitwise integer flags controlling storage options.
145 # These flags are defined by the FLAG_* constants.
145 # These flags are defined by the FLAG_* constants.
146 r'CREATE TABLE fileindex ('
146 r'CREATE TABLE fileindex ('
147 r' id INTEGER PRIMARY KEY, '
147 r' id INTEGER PRIMARY KEY, '
148 r' pathid INTEGER REFERENCES filepath(id), '
148 r' pathid INTEGER REFERENCES filepath(id), '
149 r' revnum INTEGER NOT NULL, '
149 r' revnum INTEGER NOT NULL, '
150 r' p1rev INTEGER NOT NULL, '
150 r' p1rev INTEGER NOT NULL, '
151 r' p2rev INTEGER NOT NULL, '
151 r' p2rev INTEGER NOT NULL, '
152 r' linkrev INTEGER NOT NULL, '
152 r' linkrev INTEGER NOT NULL, '
153 r' flags INTEGER NOT NULL, '
153 r' flags INTEGER NOT NULL, '
154 r' deltaid INTEGER REFERENCES delta(id), '
154 r' deltaid INTEGER REFERENCES delta(id), '
155 r' deltabaseid INTEGER REFERENCES fileindex(id), '
155 r' deltabaseid INTEGER REFERENCES fileindex(id), '
156 r' node BLOB NOT NULL '
156 r' node BLOB NOT NULL '
157 r')',
157 r')',
158
158
159 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
159 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
160 r' ON fileindex (pathid, revnum)',
160 r' ON fileindex (pathid, revnum)',
161
161
162 r'CREATE UNIQUE INDEX fileindex_pathnode '
162 r'CREATE UNIQUE INDEX fileindex_pathnode '
163 r' ON fileindex (pathid, node)',
163 r' ON fileindex (pathid, node)',
164
164
165 # Provide a view over all file data for convenience.
165 # Provide a view over all file data for convenience.
166 r'CREATE VIEW filedata AS '
166 r'CREATE VIEW filedata AS '
167 r'SELECT '
167 r'SELECT '
168 r' fileindex.id AS id, '
168 r' fileindex.id AS id, '
169 r' filepath.id AS pathid, '
169 r' filepath.id AS pathid, '
170 r' filepath.path AS path, '
170 r' filepath.path AS path, '
171 r' fileindex.revnum AS revnum, '
171 r' fileindex.revnum AS revnum, '
172 r' fileindex.node AS node, '
172 r' fileindex.node AS node, '
173 r' fileindex.p1rev AS p1rev, '
173 r' fileindex.p1rev AS p1rev, '
174 r' fileindex.p2rev AS p2rev, '
174 r' fileindex.p2rev AS p2rev, '
175 r' fileindex.linkrev AS linkrev, '
175 r' fileindex.linkrev AS linkrev, '
176 r' fileindex.flags AS flags, '
176 r' fileindex.flags AS flags, '
177 r' fileindex.deltaid AS deltaid, '
177 r' fileindex.deltaid AS deltaid, '
178 r' fileindex.deltabaseid AS deltabaseid '
178 r' fileindex.deltabaseid AS deltabaseid '
179 r'FROM filepath, fileindex '
179 r'FROM filepath, fileindex '
180 r'WHERE fileindex.pathid=filepath.id',
180 r'WHERE fileindex.pathid=filepath.id',
181
181
182 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
182 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
183 ]
183 ]
184
184
185 def resolvedeltachain(db, pathid, node, revisioncache,
185 def resolvedeltachain(db, pathid, node, revisioncache,
186 stoprids, zstddctx=None):
186 stoprids, zstddctx=None):
187 """Resolve a delta chain for a file node."""
187 """Resolve a delta chain for a file node."""
188
188
189 # TODO the "not in ({stops})" here is possibly slowing down the query
189 # TODO the "not in ({stops})" here is possibly slowing down the query
190 # because it needs to perform the lookup on every recursive invocation.
190 # because it needs to perform the lookup on every recursive invocation.
191 # This could possibly be faster if we created a temporary query with
191 # This could possibly be faster if we created a temporary query with
192 # baseid "poisoned" to null and limited the recursive filter to
192 # baseid "poisoned" to null and limited the recursive filter to
193 # "is not null".
193 # "is not null".
194 res = db.execute(
194 res = db.execute(
195 r'WITH RECURSIVE '
195 r'WITH RECURSIVE '
196 r' deltachain(deltaid, baseid) AS ('
196 r' deltachain(deltaid, baseid) AS ('
197 r' SELECT deltaid, deltabaseid FROM fileindex '
197 r' SELECT deltaid, deltabaseid FROM fileindex '
198 r' WHERE pathid=? AND node=? '
198 r' WHERE pathid=? AND node=? '
199 r' UNION ALL '
199 r' UNION ALL '
200 r' SELECT fileindex.deltaid, deltabaseid '
200 r' SELECT fileindex.deltaid, deltabaseid '
201 r' FROM fileindex, deltachain '
201 r' FROM fileindex, deltachain '
202 r' WHERE '
202 r' WHERE '
203 r' fileindex.id=deltachain.baseid '
203 r' fileindex.id=deltachain.baseid '
204 r' AND deltachain.baseid IS NOT NULL '
204 r' AND deltachain.baseid IS NOT NULL '
205 r' AND fileindex.id NOT IN ({stops}) '
205 r' AND fileindex.id NOT IN ({stops}) '
206 r' ) '
206 r' ) '
207 r'SELECT deltachain.baseid, compression, delta '
207 r'SELECT deltachain.baseid, compression, delta '
208 r'FROM deltachain, delta '
208 r'FROM deltachain, delta '
209 r'WHERE delta.id=deltachain.deltaid'.format(
209 r'WHERE delta.id=deltachain.deltaid'.format(
210 stops=r','.join([r'?'] * len(stoprids))),
210 stops=r','.join([r'?'] * len(stoprids))),
211 tuple([pathid, node] + list(stoprids.keys())))
211 tuple([pathid, node] + list(stoprids.keys())))
212
212
213 deltas = []
213 deltas = []
214 lastdeltabaseid = None
214 lastdeltabaseid = None
215
215
216 for deltabaseid, compression, delta in res:
216 for deltabaseid, compression, delta in res:
217 lastdeltabaseid = deltabaseid
217 lastdeltabaseid = deltabaseid
218
218
219 if compression == COMPRESSION_ZSTD:
219 if compression == COMPRESSION_ZSTD:
220 delta = zstddctx.decompress(delta)
220 delta = zstddctx.decompress(delta)
221 elif compression == COMPRESSION_NONE:
221 elif compression == COMPRESSION_NONE:
222 delta = delta
222 delta = delta
223 elif compression == COMPRESSION_ZLIB:
223 elif compression == COMPRESSION_ZLIB:
224 delta = zlib.decompress(delta)
224 delta = zlib.decompress(delta)
225 else:
225 else:
226 raise SQLiteStoreError('unhandled compression type: %d' %
226 raise SQLiteStoreError('unhandled compression type: %d' %
227 compression)
227 compression)
228
228
229 deltas.append(delta)
229 deltas.append(delta)
230
230
231 if lastdeltabaseid in stoprids:
231 if lastdeltabaseid in stoprids:
232 basetext = revisioncache[stoprids[lastdeltabaseid]]
232 basetext = revisioncache[stoprids[lastdeltabaseid]]
233 else:
233 else:
234 basetext = deltas.pop()
234 basetext = deltas.pop()
235
235
236 deltas.reverse()
236 deltas.reverse()
237 fulltext = mdiff.patches(basetext, deltas)
237 fulltext = mdiff.patches(basetext, deltas)
238
238
239 # SQLite returns buffer instances for blob columns on Python 2. This
239 # SQLite returns buffer instances for blob columns on Python 2. This
240 # type can propagate through the delta application layer. Because
240 # type can propagate through the delta application layer. Because
241 # downstream callers assume revisions are bytes, cast as needed.
241 # downstream callers assume revisions are bytes, cast as needed.
242 if not isinstance(fulltext, bytes):
242 if not isinstance(fulltext, bytes):
243 fulltext = bytes(delta)
243 fulltext = bytes(delta)
244
244
245 return fulltext
245 return fulltext
246
246
247 def insertdelta(db, compression, hash, delta):
247 def insertdelta(db, compression, hash, delta):
248 try:
248 try:
249 return db.execute(
249 return db.execute(
250 r'INSERT INTO delta (compression, hash, delta) '
250 r'INSERT INTO delta (compression, hash, delta) '
251 r'VALUES (?, ?, ?)',
251 r'VALUES (?, ?, ?)',
252 (compression, hash, delta)).lastrowid
252 (compression, hash, delta)).lastrowid
253 except sqlite3.IntegrityError:
253 except sqlite3.IntegrityError:
254 return db.execute(
254 return db.execute(
255 r'SELECT id FROM delta WHERE hash=?',
255 r'SELECT id FROM delta WHERE hash=?',
256 (hash,)).fetchone()[0]
256 (hash,)).fetchone()[0]
257
257
258 class SQLiteStoreError(error.StorageError):
258 class SQLiteStoreError(error.StorageError):
259 pass
259 pass
260
260
261 @attr.s
261 @attr.s
262 class revisionentry(object):
262 class revisionentry(object):
263 rid = attr.ib()
263 rid = attr.ib()
264 rev = attr.ib()
264 rev = attr.ib()
265 node = attr.ib()
265 node = attr.ib()
266 p1rev = attr.ib()
266 p1rev = attr.ib()
267 p2rev = attr.ib()
267 p2rev = attr.ib()
268 p1node = attr.ib()
268 p1node = attr.ib()
269 p2node = attr.ib()
269 p2node = attr.ib()
270 linkrev = attr.ib()
270 linkrev = attr.ib()
271 flags = attr.ib()
271 flags = attr.ib()
272
272
273 @interfaceutil.implementer(repository.irevisiondelta)
273 @interfaceutil.implementer(repository.irevisiondelta)
274 @attr.s(slots=True)
274 @attr.s(slots=True)
275 class sqliterevisiondelta(object):
275 class sqliterevisiondelta(object):
276 node = attr.ib()
276 node = attr.ib()
277 p1node = attr.ib()
277 p1node = attr.ib()
278 p2node = attr.ib()
278 p2node = attr.ib()
279 basenode = attr.ib()
279 basenode = attr.ib()
280 flags = attr.ib()
280 flags = attr.ib()
281 baserevisionsize = attr.ib()
281 baserevisionsize = attr.ib()
282 revision = attr.ib()
282 revision = attr.ib()
283 delta = attr.ib()
283 delta = attr.ib()
284 linknode = attr.ib(default=None)
284 linknode = attr.ib(default=None)
285
285
286 @interfaceutil.implementer(repository.iverifyproblem)
286 @interfaceutil.implementer(repository.iverifyproblem)
287 @attr.s(frozen=True)
287 @attr.s(frozen=True)
288 class sqliteproblem(object):
288 class sqliteproblem(object):
289 warning = attr.ib(default=None)
289 warning = attr.ib(default=None)
290 error = attr.ib(default=None)
290 error = attr.ib(default=None)
291 node = attr.ib(default=None)
291 node = attr.ib(default=None)
292
292
293 @interfaceutil.implementer(repository.ifilestorage)
293 @interfaceutil.implementer(repository.ifilestorage)
294 class sqlitefilestore(object):
294 class sqlitefilestore(object):
295 """Implements storage for an individual tracked path."""
295 """Implements storage for an individual tracked path."""
296
296
297 def __init__(self, db, path, compression):
297 def __init__(self, db, path, compression):
298 self._db = db
298 self._db = db
299 self._path = path
299 self._path = path
300
300
301 self._pathid = None
301 self._pathid = None
302
302
303 # revnum -> node
303 # revnum -> node
304 self._revtonode = {}
304 self._revtonode = {}
305 # node -> revnum
305 # node -> revnum
306 self._nodetorev = {}
306 self._nodetorev = {}
307 # node -> data structure
307 # node -> data structure
308 self._revisions = {}
308 self._revisions = {}
309
309
310 self._revisioncache = util.lrucachedict(10)
310 self._revisioncache = util.lrucachedict(10)
311
311
312 self._compengine = compression
312 self._compengine = compression
313
313
314 if compression == 'zstd':
314 if compression == 'zstd':
315 self._cctx = zstd.ZstdCompressor(level=3)
315 self._cctx = zstd.ZstdCompressor(level=3)
316 self._dctx = zstd.ZstdDecompressor()
316 self._dctx = zstd.ZstdDecompressor()
317 else:
317 else:
318 self._cctx = None
318 self._cctx = None
319 self._dctx = None
319 self._dctx = None
320
320
321 self._refreshindex()
321 self._refreshindex()
322
322
323 def _refreshindex(self):
323 def _refreshindex(self):
324 self._revtonode = {}
324 self._revtonode = {}
325 self._nodetorev = {}
325 self._nodetorev = {}
326 self._revisions = {}
326 self._revisions = {}
327
327
328 res = list(self._db.execute(
328 res = list(self._db.execute(
329 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
329 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
330
330
331 if not res:
331 if not res:
332 self._pathid = None
332 self._pathid = None
333 return
333 return
334
334
335 self._pathid = res[0][0]
335 self._pathid = res[0][0]
336
336
337 res = self._db.execute(
337 res = self._db.execute(
338 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
338 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
339 r'FROM fileindex '
339 r'FROM fileindex '
340 r'WHERE pathid=? '
340 r'WHERE pathid=? '
341 r'ORDER BY revnum ASC',
341 r'ORDER BY revnum ASC',
342 (self._pathid,))
342 (self._pathid,))
343
343
344 for i, row in enumerate(res):
344 for i, row in enumerate(res):
345 rid, rev, node, p1rev, p2rev, linkrev, flags = row
345 rid, rev, node, p1rev, p2rev, linkrev, flags = row
346
346
347 if i != rev:
347 if i != rev:
348 raise SQLiteStoreError(_('sqlite database has inconsistent '
348 raise SQLiteStoreError(_('sqlite database has inconsistent '
349 'revision numbers'))
349 'revision numbers'))
350
350
351 if p1rev == nullrev:
351 if p1rev == nullrev:
352 p1node = nullid
352 p1node = nullid
353 else:
353 else:
354 p1node = self._revtonode[p1rev]
354 p1node = self._revtonode[p1rev]
355
355
356 if p2rev == nullrev:
356 if p2rev == nullrev:
357 p2node = nullid
357 p2node = nullid
358 else:
358 else:
359 p2node = self._revtonode[p2rev]
359 p2node = self._revtonode[p2rev]
360
360
361 entry = revisionentry(
361 entry = revisionentry(
362 rid=rid,
362 rid=rid,
363 rev=rev,
363 rev=rev,
364 node=node,
364 node=node,
365 p1rev=p1rev,
365 p1rev=p1rev,
366 p2rev=p2rev,
366 p2rev=p2rev,
367 p1node=p1node,
367 p1node=p1node,
368 p2node=p2node,
368 p2node=p2node,
369 linkrev=linkrev,
369 linkrev=linkrev,
370 flags=flags)
370 flags=flags)
371
371
372 self._revtonode[rev] = node
372 self._revtonode[rev] = node
373 self._nodetorev[node] = rev
373 self._nodetorev[node] = rev
374 self._revisions[node] = entry
374 self._revisions[node] = entry
375
375
376 # Start of ifileindex interface.
376 # Start of ifileindex interface.
377
377
378 def __len__(self):
378 def __len__(self):
379 return len(self._revisions)
379 return len(self._revisions)
380
380
381 def __iter__(self):
381 def __iter__(self):
382 return iter(pycompat.xrange(len(self._revisions)))
382 return iter(pycompat.xrange(len(self._revisions)))
383
383
384 def hasnode(self, node):
385 if node == nullid:
386 return False
387
388 return node in self._nodetorev
389
384 def revs(self, start=0, stop=None):
390 def revs(self, start=0, stop=None):
385 return storageutil.iterrevs(len(self._revisions), start=start,
391 return storageutil.iterrevs(len(self._revisions), start=start,
386 stop=stop)
392 stop=stop)
387
393
388 def parents(self, node):
394 def parents(self, node):
389 if node == nullid:
395 if node == nullid:
390 return nullid, nullid
396 return nullid, nullid
391
397
392 if node not in self._revisions:
398 if node not in self._revisions:
393 raise error.LookupError(node, self._path, _('no node'))
399 raise error.LookupError(node, self._path, _('no node'))
394
400
395 entry = self._revisions[node]
401 entry = self._revisions[node]
396 return entry.p1node, entry.p2node
402 return entry.p1node, entry.p2node
397
403
398 def parentrevs(self, rev):
404 def parentrevs(self, rev):
399 if rev == nullrev:
405 if rev == nullrev:
400 return nullrev, nullrev
406 return nullrev, nullrev
401
407
402 if rev not in self._revtonode:
408 if rev not in self._revtonode:
403 raise IndexError(rev)
409 raise IndexError(rev)
404
410
405 entry = self._revisions[self._revtonode[rev]]
411 entry = self._revisions[self._revtonode[rev]]
406 return entry.p1rev, entry.p2rev
412 return entry.p1rev, entry.p2rev
407
413
408 def rev(self, node):
414 def rev(self, node):
409 if node == nullid:
415 if node == nullid:
410 return nullrev
416 return nullrev
411
417
412 if node not in self._nodetorev:
418 if node not in self._nodetorev:
413 raise error.LookupError(node, self._path, _('no node'))
419 raise error.LookupError(node, self._path, _('no node'))
414
420
415 return self._nodetorev[node]
421 return self._nodetorev[node]
416
422
417 def node(self, rev):
423 def node(self, rev):
418 if rev == nullrev:
424 if rev == nullrev:
419 return nullid
425 return nullid
420
426
421 if rev not in self._revtonode:
427 if rev not in self._revtonode:
422 raise IndexError(rev)
428 raise IndexError(rev)
423
429
424 return self._revtonode[rev]
430 return self._revtonode[rev]
425
431
426 def lookup(self, node):
432 def lookup(self, node):
427 return storageutil.fileidlookup(self, node, self._path)
433 return storageutil.fileidlookup(self, node, self._path)
428
434
429 def linkrev(self, rev):
435 def linkrev(self, rev):
430 if rev == nullrev:
436 if rev == nullrev:
431 return nullrev
437 return nullrev
432
438
433 if rev not in self._revtonode:
439 if rev not in self._revtonode:
434 raise IndexError(rev)
440 raise IndexError(rev)
435
441
436 entry = self._revisions[self._revtonode[rev]]
442 entry = self._revisions[self._revtonode[rev]]
437 return entry.linkrev
443 return entry.linkrev
438
444
439 def iscensored(self, rev):
445 def iscensored(self, rev):
440 if rev == nullrev:
446 if rev == nullrev:
441 return False
447 return False
442
448
443 if rev not in self._revtonode:
449 if rev not in self._revtonode:
444 raise IndexError(rev)
450 raise IndexError(rev)
445
451
446 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
452 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
447
453
448 def commonancestorsheads(self, node1, node2):
454 def commonancestorsheads(self, node1, node2):
449 rev1 = self.rev(node1)
455 rev1 = self.rev(node1)
450 rev2 = self.rev(node2)
456 rev2 = self.rev(node2)
451
457
452 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
458 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
453 return pycompat.maplist(self.node, ancestors)
459 return pycompat.maplist(self.node, ancestors)
454
460
455 def descendants(self, revs):
461 def descendants(self, revs):
456 # TODO we could implement this using a recursive SQL query, which
462 # TODO we could implement this using a recursive SQL query, which
457 # might be faster.
463 # might be faster.
458 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
464 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
459
465
460 def heads(self, start=None, stop=None):
466 def heads(self, start=None, stop=None):
461 if start is None and stop is None:
467 if start is None and stop is None:
462 if not len(self):
468 if not len(self):
463 return [nullid]
469 return [nullid]
464
470
465 startrev = self.rev(start) if start is not None else nullrev
471 startrev = self.rev(start) if start is not None else nullrev
466 stoprevs = {self.rev(n) for n in stop or []}
472 stoprevs = {self.rev(n) for n in stop or []}
467
473
468 revs = dagop.headrevssubset(self.revs, self.parentrevs,
474 revs = dagop.headrevssubset(self.revs, self.parentrevs,
469 startrev=startrev, stoprevs=stoprevs)
475 startrev=startrev, stoprevs=stoprevs)
470
476
471 return [self.node(rev) for rev in revs]
477 return [self.node(rev) for rev in revs]
472
478
473 def children(self, node):
479 def children(self, node):
474 rev = self.rev(node)
480 rev = self.rev(node)
475
481
476 res = self._db.execute(
482 res = self._db.execute(
477 r'SELECT'
483 r'SELECT'
478 r' node '
484 r' node '
479 r' FROM filedata '
485 r' FROM filedata '
480 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
486 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
481 r' ORDER BY revnum ASC',
487 r' ORDER BY revnum ASC',
482 (self._path, rev, rev))
488 (self._path, rev, rev))
483
489
484 return [row[0] for row in res]
490 return [row[0] for row in res]
485
491
486 # End of ifileindex interface.
492 # End of ifileindex interface.
487
493
488 # Start of ifiledata interface.
494 # Start of ifiledata interface.
489
495
490 def size(self, rev):
496 def size(self, rev):
491 if rev == nullrev:
497 if rev == nullrev:
492 return 0
498 return 0
493
499
494 if rev not in self._revtonode:
500 if rev not in self._revtonode:
495 raise IndexError(rev)
501 raise IndexError(rev)
496
502
497 node = self._revtonode[rev]
503 node = self._revtonode[rev]
498
504
499 if self.renamed(node):
505 if self.renamed(node):
500 return len(self.read(node))
506 return len(self.read(node))
501
507
502 return len(self.revision(node))
508 return len(self.revision(node))
503
509
504 def revision(self, node, raw=False, _verifyhash=True):
510 def revision(self, node, raw=False, _verifyhash=True):
505 if node in (nullid, nullrev):
511 if node in (nullid, nullrev):
506 return b''
512 return b''
507
513
508 if isinstance(node, int):
514 if isinstance(node, int):
509 node = self.node(node)
515 node = self.node(node)
510
516
511 if node not in self._nodetorev:
517 if node not in self._nodetorev:
512 raise error.LookupError(node, self._path, _('no node'))
518 raise error.LookupError(node, self._path, _('no node'))
513
519
514 if node in self._revisioncache:
520 if node in self._revisioncache:
515 return self._revisioncache[node]
521 return self._revisioncache[node]
516
522
517 # Because we have a fulltext revision cache, we are able to
523 # Because we have a fulltext revision cache, we are able to
518 # short-circuit delta chain traversal and decompression as soon as
524 # short-circuit delta chain traversal and decompression as soon as
519 # we encounter a revision in the cache.
525 # we encounter a revision in the cache.
520
526
521 stoprids = {self._revisions[n].rid: n
527 stoprids = {self._revisions[n].rid: n
522 for n in self._revisioncache}
528 for n in self._revisioncache}
523
529
524 if not stoprids:
530 if not stoprids:
525 stoprids[-1] = None
531 stoprids[-1] = None
526
532
527 fulltext = resolvedeltachain(self._db, self._pathid, node,
533 fulltext = resolvedeltachain(self._db, self._pathid, node,
528 self._revisioncache, stoprids,
534 self._revisioncache, stoprids,
529 zstddctx=self._dctx)
535 zstddctx=self._dctx)
530
536
531 if _verifyhash:
537 if _verifyhash:
532 self._checkhash(fulltext, node)
538 self._checkhash(fulltext, node)
533 self._revisioncache[node] = fulltext
539 self._revisioncache[node] = fulltext
534
540
535 return fulltext
541 return fulltext
536
542
537 def read(self, node):
543 def read(self, node):
538 return storageutil.filtermetadata(self.revision(node))
544 return storageutil.filtermetadata(self.revision(node))
539
545
540 def renamed(self, node):
546 def renamed(self, node):
541 return storageutil.filerevisioncopied(self, node)
547 return storageutil.filerevisioncopied(self, node)
542
548
543 def cmp(self, node, fulltext):
549 def cmp(self, node, fulltext):
544 return not storageutil.filedataequivalent(self, node, fulltext)
550 return not storageutil.filedataequivalent(self, node, fulltext)
545
551
546 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
552 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
547 assumehaveparentrevisions=False, deltaprevious=False):
553 assumehaveparentrevisions=False, deltaprevious=False):
548 if nodesorder not in ('nodes', 'storage', None):
554 if nodesorder not in ('nodes', 'storage', None):
549 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
555 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
550 nodesorder)
556 nodesorder)
551
557
552 nodes = [n for n in nodes if n != nullid]
558 nodes = [n for n in nodes if n != nullid]
553
559
554 if not nodes:
560 if not nodes:
555 return
561 return
556
562
557 # TODO perform in a single query.
563 # TODO perform in a single query.
558 res = self._db.execute(
564 res = self._db.execute(
559 r'SELECT revnum, deltaid FROM fileindex '
565 r'SELECT revnum, deltaid FROM fileindex '
560 r'WHERE pathid=? '
566 r'WHERE pathid=? '
561 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
567 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
562 tuple([self._pathid] + nodes))
568 tuple([self._pathid] + nodes))
563
569
564 deltabases = {}
570 deltabases = {}
565
571
566 for rev, deltaid in res:
572 for rev, deltaid in res:
567 res = self._db.execute(
573 res = self._db.execute(
568 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
574 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
569 (self._pathid, deltaid))
575 (self._pathid, deltaid))
570 deltabases[rev] = res.fetchone()[0]
576 deltabases[rev] = res.fetchone()[0]
571
577
572 # TODO define revdifffn so we can use delta from storage.
578 # TODO define revdifffn so we can use delta from storage.
573 for delta in storageutil.emitrevisions(
579 for delta in storageutil.emitrevisions(
574 self, nodes, nodesorder, sqliterevisiondelta,
580 self, nodes, nodesorder, sqliterevisiondelta,
575 deltaparentfn=deltabases.__getitem__,
581 deltaparentfn=deltabases.__getitem__,
576 revisiondata=revisiondata,
582 revisiondata=revisiondata,
577 assumehaveparentrevisions=assumehaveparentrevisions,
583 assumehaveparentrevisions=assumehaveparentrevisions,
578 deltaprevious=deltaprevious):
584 deltaprevious=deltaprevious):
579
585
580 yield delta
586 yield delta
581
587
582 # End of ifiledata interface.
588 # End of ifiledata interface.
583
589
584 # Start of ifilemutation interface.
590 # Start of ifilemutation interface.
585
591
586 def add(self, filedata, meta, transaction, linkrev, p1, p2):
592 def add(self, filedata, meta, transaction, linkrev, p1, p2):
587 if meta or filedata.startswith(b'\x01\n'):
593 if meta or filedata.startswith(b'\x01\n'):
588 filedata = storageutil.packmeta(meta, filedata)
594 filedata = storageutil.packmeta(meta, filedata)
589
595
590 return self.addrevision(filedata, transaction, linkrev, p1, p2)
596 return self.addrevision(filedata, transaction, linkrev, p1, p2)
591
597
592 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
598 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
593 flags=0, cachedelta=None):
599 flags=0, cachedelta=None):
594 if flags:
600 if flags:
595 raise SQLiteStoreError(_('flags not supported on revisions'))
601 raise SQLiteStoreError(_('flags not supported on revisions'))
596
602
597 validatehash = node is not None
603 validatehash = node is not None
598 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
604 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
599
605
600 if validatehash:
606 if validatehash:
601 self._checkhash(revisiondata, node, p1, p2)
607 self._checkhash(revisiondata, node, p1, p2)
602
608
603 if node in self._nodetorev:
609 if node in self._nodetorev:
604 return node
610 return node
605
611
606 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
612 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
607 p1, p2)
613 p1, p2)
608
614
609 self._revisioncache[node] = revisiondata
615 self._revisioncache[node] = revisiondata
610 return node
616 return node
611
617
612 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
618 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
613 nodes = []
619 nodes = []
614
620
615 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
621 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
616 storeflags = 0
622 storeflags = 0
617
623
618 if wireflags & repository.REVISION_FLAG_CENSORED:
624 if wireflags & repository.REVISION_FLAG_CENSORED:
619 storeflags |= FLAG_CENSORED
625 storeflags |= FLAG_CENSORED
620
626
621 if wireflags & ~repository.REVISION_FLAG_CENSORED:
627 if wireflags & ~repository.REVISION_FLAG_CENSORED:
622 raise SQLiteStoreError('unhandled revision flag')
628 raise SQLiteStoreError('unhandled revision flag')
623
629
624 baserev = self.rev(deltabase)
630 baserev = self.rev(deltabase)
625
631
626 # If base is censored, delta must be full replacement in a single
632 # If base is censored, delta must be full replacement in a single
627 # patch operation.
633 # patch operation.
628 if baserev != nullrev and self.iscensored(baserev):
634 if baserev != nullrev and self.iscensored(baserev):
629 hlen = struct.calcsize('>lll')
635 hlen = struct.calcsize('>lll')
630 oldlen = len(self.revision(deltabase, raw=True,
636 oldlen = len(self.revision(deltabase, raw=True,
631 _verifyhash=False))
637 _verifyhash=False))
632 newlen = len(delta) - hlen
638 newlen = len(delta) - hlen
633
639
634 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
640 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
635 raise error.CensoredBaseError(self._path,
641 raise error.CensoredBaseError(self._path,
636 deltabase)
642 deltabase)
637
643
638 if (not (storeflags & FLAG_CENSORED)
644 if (not (storeflags & FLAG_CENSORED)
639 and storageutil.deltaiscensored(
645 and storageutil.deltaiscensored(
640 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
646 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
641 storeflags |= FLAG_CENSORED
647 storeflags |= FLAG_CENSORED
642
648
643 linkrev = linkmapper(linknode)
649 linkrev = linkmapper(linknode)
644
650
645 nodes.append(node)
651 nodes.append(node)
646
652
647 if node in self._revisions:
653 if node in self._revisions:
648 continue
654 continue
649
655
650 if deltabase == nullid:
656 if deltabase == nullid:
651 text = mdiff.patch(b'', delta)
657 text = mdiff.patch(b'', delta)
652 storedelta = None
658 storedelta = None
653 else:
659 else:
654 text = None
660 text = None
655 storedelta = (deltabase, delta)
661 storedelta = (deltabase, delta)
656
662
657 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
663 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
658 storedelta=storedelta, flags=storeflags)
664 storedelta=storedelta, flags=storeflags)
659
665
660 if addrevisioncb:
666 if addrevisioncb:
661 addrevisioncb(self, node)
667 addrevisioncb(self, node)
662
668
663 return nodes
669 return nodes
664
670
665 def censorrevision(self, tr, censornode, tombstone=b''):
671 def censorrevision(self, tr, censornode, tombstone=b''):
666 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
672 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
667
673
668 # This restriction is cargo culted from revlogs and makes no sense for
674 # This restriction is cargo culted from revlogs and makes no sense for
669 # SQLite, since columns can be resized at will.
675 # SQLite, since columns can be resized at will.
670 if len(tombstone) > len(self.revision(censornode, raw=True)):
676 if len(tombstone) > len(self.revision(censornode, raw=True)):
671 raise error.Abort(_('censor tombstone must be no longer than '
677 raise error.Abort(_('censor tombstone must be no longer than '
672 'censored data'))
678 'censored data'))
673
679
674 # We need to replace the censored revision's data with the tombstone.
680 # We need to replace the censored revision's data with the tombstone.
675 # But replacing that data will have implications for delta chains that
681 # But replacing that data will have implications for delta chains that
676 # reference it.
682 # reference it.
677 #
683 #
678 # While "better," more complex strategies are possible, we do something
684 # While "better," more complex strategies are possible, we do something
679 # simple: we find delta chain children of the censored revision and we
685 # simple: we find delta chain children of the censored revision and we
680 # replace those incremental deltas with fulltexts of their corresponding
686 # replace those incremental deltas with fulltexts of their corresponding
681 # revision. Then we delete the now-unreferenced delta and original
687 # revision. Then we delete the now-unreferenced delta and original
682 # revision and insert a replacement.
688 # revision and insert a replacement.
683
689
684 # Find the delta to be censored.
690 # Find the delta to be censored.
685 censoreddeltaid = self._db.execute(
691 censoreddeltaid = self._db.execute(
686 r'SELECT deltaid FROM fileindex WHERE id=?',
692 r'SELECT deltaid FROM fileindex WHERE id=?',
687 (self._revisions[censornode].rid,)).fetchone()[0]
693 (self._revisions[censornode].rid,)).fetchone()[0]
688
694
689 # Find all its delta chain children.
695 # Find all its delta chain children.
690 # TODO once we support storing deltas for !files, we'll need to look
696 # TODO once we support storing deltas for !files, we'll need to look
691 # for those delta chains too.
697 # for those delta chains too.
692 rows = list(self._db.execute(
698 rows = list(self._db.execute(
693 r'SELECT id, pathid, node FROM fileindex '
699 r'SELECT id, pathid, node FROM fileindex '
694 r'WHERE deltabaseid=? OR deltaid=?',
700 r'WHERE deltabaseid=? OR deltaid=?',
695 (censoreddeltaid, censoreddeltaid)))
701 (censoreddeltaid, censoreddeltaid)))
696
702
697 for row in rows:
703 for row in rows:
698 rid, pathid, node = row
704 rid, pathid, node = row
699
705
700 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
706 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
701 zstddctx=self._dctx)
707 zstddctx=self._dctx)
702
708
703 deltahash = hashlib.sha1(fulltext).digest()
709 deltahash = hashlib.sha1(fulltext).digest()
704
710
705 if self._compengine == 'zstd':
711 if self._compengine == 'zstd':
706 deltablob = self._cctx.compress(fulltext)
712 deltablob = self._cctx.compress(fulltext)
707 compression = COMPRESSION_ZSTD
713 compression = COMPRESSION_ZSTD
708 elif self._compengine == 'zlib':
714 elif self._compengine == 'zlib':
709 deltablob = zlib.compress(fulltext)
715 deltablob = zlib.compress(fulltext)
710 compression = COMPRESSION_ZLIB
716 compression = COMPRESSION_ZLIB
711 elif self._compengine == 'none':
717 elif self._compengine == 'none':
712 deltablob = fulltext
718 deltablob = fulltext
713 compression = COMPRESSION_NONE
719 compression = COMPRESSION_NONE
714 else:
720 else:
715 raise error.ProgrammingError('unhandled compression engine: %s'
721 raise error.ProgrammingError('unhandled compression engine: %s'
716 % self._compengine)
722 % self._compengine)
717
723
718 if len(deltablob) >= len(fulltext):
724 if len(deltablob) >= len(fulltext):
719 deltablob = fulltext
725 deltablob = fulltext
720 compression = COMPRESSION_NONE
726 compression = COMPRESSION_NONE
721
727
722 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
728 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
723
729
724 self._db.execute(
730 self._db.execute(
725 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
731 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
726 r'WHERE id=?', (deltaid, rid))
732 r'WHERE id=?', (deltaid, rid))
727
733
728 # Now create the tombstone delta and replace the delta on the censored
734 # Now create the tombstone delta and replace the delta on the censored
729 # node.
735 # node.
730 deltahash = hashlib.sha1(tombstone).digest()
736 deltahash = hashlib.sha1(tombstone).digest()
731 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
737 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
732 deltahash, tombstone)
738 deltahash, tombstone)
733
739
734 flags = self._revisions[censornode].flags
740 flags = self._revisions[censornode].flags
735 flags |= FLAG_CENSORED
741 flags |= FLAG_CENSORED
736
742
737 self._db.execute(
743 self._db.execute(
738 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
744 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
739 r'WHERE pathid=? AND node=?',
745 r'WHERE pathid=? AND node=?',
740 (flags, tombstonedeltaid, self._pathid, censornode))
746 (flags, tombstonedeltaid, self._pathid, censornode))
741
747
742 self._db.execute(
748 self._db.execute(
743 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
749 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
744
750
745 self._refreshindex()
751 self._refreshindex()
746 self._revisioncache.clear()
752 self._revisioncache.clear()
747
753
748 def getstrippoint(self, minlink):
754 def getstrippoint(self, minlink):
749 return storageutil.resolvestripinfo(minlink, len(self) - 1,
755 return storageutil.resolvestripinfo(minlink, len(self) - 1,
750 [self.rev(n) for n in self.heads()],
756 [self.rev(n) for n in self.heads()],
751 self.linkrev,
757 self.linkrev,
752 self.parentrevs)
758 self.parentrevs)
753
759
754 def strip(self, minlink, transaction):
760 def strip(self, minlink, transaction):
755 if not len(self):
761 if not len(self):
756 return
762 return
757
763
758 rev, _ignored = self.getstrippoint(minlink)
764 rev, _ignored = self.getstrippoint(minlink)
759
765
760 if rev == len(self):
766 if rev == len(self):
761 return
767 return
762
768
763 for rev in self.revs(rev):
769 for rev in self.revs(rev):
764 self._db.execute(
770 self._db.execute(
765 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
771 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
766 (self._pathid, self.node(rev)))
772 (self._pathid, self.node(rev)))
767
773
768 # TODO how should we garbage collect data in delta table?
774 # TODO how should we garbage collect data in delta table?
769
775
770 self._refreshindex()
776 self._refreshindex()
771
777
772 # End of ifilemutation interface.
778 # End of ifilemutation interface.
773
779
774 # Start of ifilestorage interface.
780 # Start of ifilestorage interface.
775
781
776 def files(self):
782 def files(self):
777 return []
783 return []
778
784
779 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
785 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
780 revisionscount=False, trackedsize=False,
786 revisionscount=False, trackedsize=False,
781 storedsize=False):
787 storedsize=False):
782 d = {}
788 d = {}
783
789
784 if exclusivefiles:
790 if exclusivefiles:
785 d['exclusivefiles'] = []
791 d['exclusivefiles'] = []
786
792
787 if sharedfiles:
793 if sharedfiles:
788 # TODO list sqlite file(s) here.
794 # TODO list sqlite file(s) here.
789 d['sharedfiles'] = []
795 d['sharedfiles'] = []
790
796
791 if revisionscount:
797 if revisionscount:
792 d['revisionscount'] = len(self)
798 d['revisionscount'] = len(self)
793
799
794 if trackedsize:
800 if trackedsize:
795 d['trackedsize'] = sum(len(self.revision(node))
801 d['trackedsize'] = sum(len(self.revision(node))
796 for node in self._nodetorev)
802 for node in self._nodetorev)
797
803
798 if storedsize:
804 if storedsize:
799 # TODO implement this?
805 # TODO implement this?
800 d['storedsize'] = None
806 d['storedsize'] = None
801
807
802 return d
808 return d
803
809
804 def verifyintegrity(self, state):
810 def verifyintegrity(self, state):
805 state['skipread'] = set()
811 state['skipread'] = set()
806
812
807 for rev in self:
813 for rev in self:
808 node = self.node(rev)
814 node = self.node(rev)
809
815
810 try:
816 try:
811 self.revision(node)
817 self.revision(node)
812 except Exception as e:
818 except Exception as e:
813 yield sqliteproblem(
819 yield sqliteproblem(
814 error=_('unpacking %s: %s') % (short(node), e),
820 error=_('unpacking %s: %s') % (short(node), e),
815 node=node)
821 node=node)
816
822
817 state['skipread'].add(node)
823 state['skipread'].add(node)
818
824
819 # End of ifilestorage interface.
825 # End of ifilestorage interface.
820
826
821 def _checkhash(self, fulltext, node, p1=None, p2=None):
827 def _checkhash(self, fulltext, node, p1=None, p2=None):
822 if p1 is None and p2 is None:
828 if p1 is None and p2 is None:
823 p1, p2 = self.parents(node)
829 p1, p2 = self.parents(node)
824
830
825 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
831 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
826 return
832 return
827
833
828 try:
834 try:
829 del self._revisioncache[node]
835 del self._revisioncache[node]
830 except KeyError:
836 except KeyError:
831 pass
837 pass
832
838
833 if storageutil.iscensoredtext(fulltext):
839 if storageutil.iscensoredtext(fulltext):
834 raise error.CensoredNodeError(self._path, node, fulltext)
840 raise error.CensoredNodeError(self._path, node, fulltext)
835
841
836 raise SQLiteStoreError(_('integrity check failed on %s') %
842 raise SQLiteStoreError(_('integrity check failed on %s') %
837 self._path)
843 self._path)
838
844
839 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
845 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
840 p1, p2, storedelta=None, flags=0):
846 p1, p2, storedelta=None, flags=0):
841 if self._pathid is None:
847 if self._pathid is None:
842 res = self._db.execute(
848 res = self._db.execute(
843 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
849 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
844 self._pathid = res.lastrowid
850 self._pathid = res.lastrowid
845
851
846 # For simplicity, always store a delta against p1.
852 # For simplicity, always store a delta against p1.
847 # TODO we need a lot more logic here to make behavior reasonable.
853 # TODO we need a lot more logic here to make behavior reasonable.
848
854
849 if storedelta:
855 if storedelta:
850 deltabase, delta = storedelta
856 deltabase, delta = storedelta
851
857
852 if isinstance(deltabase, int):
858 if isinstance(deltabase, int):
853 deltabase = self.node(deltabase)
859 deltabase = self.node(deltabase)
854
860
855 else:
861 else:
856 assert revisiondata is not None
862 assert revisiondata is not None
857 deltabase = p1
863 deltabase = p1
858
864
859 if deltabase == nullid:
865 if deltabase == nullid:
860 delta = revisiondata
866 delta = revisiondata
861 else:
867 else:
862 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
868 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
863 revisiondata)
869 revisiondata)
864
870
865 # File index stores a pointer to its delta and the parent delta.
871 # File index stores a pointer to its delta and the parent delta.
866 # The parent delta is stored via a pointer to the fileindex PK.
872 # The parent delta is stored via a pointer to the fileindex PK.
867 if deltabase == nullid:
873 if deltabase == nullid:
868 baseid = None
874 baseid = None
869 else:
875 else:
870 baseid = self._revisions[deltabase].rid
876 baseid = self._revisions[deltabase].rid
871
877
872 # Deltas are stored with a hash of their content. This allows
878 # Deltas are stored with a hash of their content. This allows
873 # us to de-duplicate. The table is configured to ignore conflicts
879 # us to de-duplicate. The table is configured to ignore conflicts
874 # and it is faster to just insert and silently noop than to look
880 # and it is faster to just insert and silently noop than to look
875 # first.
881 # first.
876 deltahash = hashlib.sha1(delta).digest()
882 deltahash = hashlib.sha1(delta).digest()
877
883
878 if self._compengine == 'zstd':
884 if self._compengine == 'zstd':
879 deltablob = self._cctx.compress(delta)
885 deltablob = self._cctx.compress(delta)
880 compression = COMPRESSION_ZSTD
886 compression = COMPRESSION_ZSTD
881 elif self._compengine == 'zlib':
887 elif self._compengine == 'zlib':
882 deltablob = zlib.compress(delta)
888 deltablob = zlib.compress(delta)
883 compression = COMPRESSION_ZLIB
889 compression = COMPRESSION_ZLIB
884 elif self._compengine == 'none':
890 elif self._compengine == 'none':
885 deltablob = delta
891 deltablob = delta
886 compression = COMPRESSION_NONE
892 compression = COMPRESSION_NONE
887 else:
893 else:
888 raise error.ProgrammingError('unhandled compression engine: %s' %
894 raise error.ProgrammingError('unhandled compression engine: %s' %
889 self._compengine)
895 self._compengine)
890
896
891 # Don't store compressed data if it isn't practical.
897 # Don't store compressed data if it isn't practical.
892 if len(deltablob) >= len(delta):
898 if len(deltablob) >= len(delta):
893 deltablob = delta
899 deltablob = delta
894 compression = COMPRESSION_NONE
900 compression = COMPRESSION_NONE
895
901
896 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
902 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
897
903
898 rev = len(self)
904 rev = len(self)
899
905
900 if p1 == nullid:
906 if p1 == nullid:
901 p1rev = nullrev
907 p1rev = nullrev
902 else:
908 else:
903 p1rev = self._nodetorev[p1]
909 p1rev = self._nodetorev[p1]
904
910
905 if p2 == nullid:
911 if p2 == nullid:
906 p2rev = nullrev
912 p2rev = nullrev
907 else:
913 else:
908 p2rev = self._nodetorev[p2]
914 p2rev = self._nodetorev[p2]
909
915
910 rid = self._db.execute(
916 rid = self._db.execute(
911 r'INSERT INTO fileindex ('
917 r'INSERT INTO fileindex ('
912 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
918 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
913 r' deltaid, deltabaseid) '
919 r' deltaid, deltabaseid) '
914 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
920 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
915 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
921 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
916 deltaid, baseid)
922 deltaid, baseid)
917 ).lastrowid
923 ).lastrowid
918
924
919 entry = revisionentry(
925 entry = revisionentry(
920 rid=rid,
926 rid=rid,
921 rev=rev,
927 rev=rev,
922 node=node,
928 node=node,
923 p1rev=p1rev,
929 p1rev=p1rev,
924 p2rev=p2rev,
930 p2rev=p2rev,
925 p1node=p1,
931 p1node=p1,
926 p2node=p2,
932 p2node=p2,
927 linkrev=linkrev,
933 linkrev=linkrev,
928 flags=flags)
934 flags=flags)
929
935
930 self._nodetorev[node] = rev
936 self._nodetorev[node] = rev
931 self._revtonode[rev] = node
937 self._revtonode[rev] = node
932 self._revisions[node] = entry
938 self._revisions[node] = entry
933
939
934 return node
940 return node
935
941
936 class sqliterepository(localrepo.localrepository):
942 class sqliterepository(localrepo.localrepository):
937 def cancopy(self):
943 def cancopy(self):
938 return False
944 return False
939
945
940 def transaction(self, *args, **kwargs):
946 def transaction(self, *args, **kwargs):
941 current = self.currenttransaction()
947 current = self.currenttransaction()
942
948
943 tr = super(sqliterepository, self).transaction(*args, **kwargs)
949 tr = super(sqliterepository, self).transaction(*args, **kwargs)
944
950
945 if current:
951 if current:
946 return tr
952 return tr
947
953
948 self._dbconn.execute(r'BEGIN TRANSACTION')
954 self._dbconn.execute(r'BEGIN TRANSACTION')
949
955
950 def committransaction(_):
956 def committransaction(_):
951 self._dbconn.commit()
957 self._dbconn.commit()
952
958
953 tr.addfinalize('sqlitestore', committransaction)
959 tr.addfinalize('sqlitestore', committransaction)
954
960
955 return tr
961 return tr
956
962
957 @property
963 @property
958 def _dbconn(self):
964 def _dbconn(self):
959 # SQLite connections can only be used on the thread that created
965 # SQLite connections can only be used on the thread that created
960 # them. In most cases, this "just works." However, hgweb uses
966 # them. In most cases, this "just works." However, hgweb uses
961 # multiple threads.
967 # multiple threads.
962 tid = threading.current_thread().ident
968 tid = threading.current_thread().ident
963
969
964 if self._db:
970 if self._db:
965 if self._db[0] == tid:
971 if self._db[0] == tid:
966 return self._db[1]
972 return self._db[1]
967
973
968 db = makedb(self.svfs.join('db.sqlite'))
974 db = makedb(self.svfs.join('db.sqlite'))
969 self._db = (tid, db)
975 self._db = (tid, db)
970
976
971 return db
977 return db
972
978
973 def makedb(path):
979 def makedb(path):
974 """Construct a database handle for a database at path."""
980 """Construct a database handle for a database at path."""
975
981
976 db = sqlite3.connect(path)
982 db = sqlite3.connect(path)
977 db.text_factory = bytes
983 db.text_factory = bytes
978
984
979 res = db.execute(r'PRAGMA user_version').fetchone()[0]
985 res = db.execute(r'PRAGMA user_version').fetchone()[0]
980
986
981 # New database.
987 # New database.
982 if res == 0:
988 if res == 0:
983 for statement in CREATE_SCHEMA:
989 for statement in CREATE_SCHEMA:
984 db.execute(statement)
990 db.execute(statement)
985
991
986 db.commit()
992 db.commit()
987
993
988 elif res == CURRENT_SCHEMA_VERSION:
994 elif res == CURRENT_SCHEMA_VERSION:
989 pass
995 pass
990
996
991 else:
997 else:
992 raise error.Abort(_('sqlite database has unrecognized version'))
998 raise error.Abort(_('sqlite database has unrecognized version'))
993
999
994 db.execute(r'PRAGMA journal_mode=WAL')
1000 db.execute(r'PRAGMA journal_mode=WAL')
995
1001
996 return db
1002 return db
997
1003
998 def featuresetup(ui, supported):
1004 def featuresetup(ui, supported):
999 supported.add(REQUIREMENT)
1005 supported.add(REQUIREMENT)
1000
1006
1001 if zstd:
1007 if zstd:
1002 supported.add(REQUIREMENT_ZSTD)
1008 supported.add(REQUIREMENT_ZSTD)
1003
1009
1004 supported.add(REQUIREMENT_ZLIB)
1010 supported.add(REQUIREMENT_ZLIB)
1005 supported.add(REQUIREMENT_NONE)
1011 supported.add(REQUIREMENT_NONE)
1006
1012
1007 def newreporequirements(orig, ui, createopts):
1013 def newreporequirements(orig, ui, createopts):
1008 if createopts['backend'] != 'sqlite':
1014 if createopts['backend'] != 'sqlite':
1009 return orig(ui, createopts)
1015 return orig(ui, createopts)
1010
1016
1011 # This restriction can be lifted once we have more confidence.
1017 # This restriction can be lifted once we have more confidence.
1012 if 'sharedrepo' in createopts:
1018 if 'sharedrepo' in createopts:
1013 raise error.Abort(_('shared repositories not supported with SQLite '
1019 raise error.Abort(_('shared repositories not supported with SQLite '
1014 'store'))
1020 'store'))
1015
1021
1016 # This filtering is out of an abundance of caution: we want to ensure
1022 # This filtering is out of an abundance of caution: we want to ensure
1017 # we honor creation options and we do that by annotating exactly the
1023 # we honor creation options and we do that by annotating exactly the
1018 # creation options we recognize.
1024 # creation options we recognize.
1019 known = {
1025 known = {
1020 'narrowfiles',
1026 'narrowfiles',
1021 'backend',
1027 'backend',
1022 }
1028 }
1023
1029
1024 unsupported = set(createopts) - known
1030 unsupported = set(createopts) - known
1025 if unsupported:
1031 if unsupported:
1026 raise error.Abort(_('SQLite store does not support repo creation '
1032 raise error.Abort(_('SQLite store does not support repo creation '
1027 'option: %s') % ', '.join(sorted(unsupported)))
1033 'option: %s') % ', '.join(sorted(unsupported)))
1028
1034
1029 # Since we're a hybrid store that still relies on revlogs, we fall back
1035 # Since we're a hybrid store that still relies on revlogs, we fall back
1030 # to using the revlogv1 backend's storage requirements then adding our
1036 # to using the revlogv1 backend's storage requirements then adding our
1031 # own requirement.
1037 # own requirement.
1032 createopts['backend'] = 'revlogv1'
1038 createopts['backend'] = 'revlogv1'
1033 requirements = orig(ui, createopts)
1039 requirements = orig(ui, createopts)
1034 requirements.add(REQUIREMENT)
1040 requirements.add(REQUIREMENT)
1035
1041
1036 compression = ui.config('storage', 'sqlite.compression')
1042 compression = ui.config('storage', 'sqlite.compression')
1037
1043
1038 if compression == 'zstd' and not zstd:
1044 if compression == 'zstd' and not zstd:
1039 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1045 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1040 'zstandard compression not available to this '
1046 'zstandard compression not available to this '
1041 'Mercurial install'))
1047 'Mercurial install'))
1042
1048
1043 if compression == 'zstd':
1049 if compression == 'zstd':
1044 requirements.add(REQUIREMENT_ZSTD)
1050 requirements.add(REQUIREMENT_ZSTD)
1045 elif compression == 'zlib':
1051 elif compression == 'zlib':
1046 requirements.add(REQUIREMENT_ZLIB)
1052 requirements.add(REQUIREMENT_ZLIB)
1047 elif compression == 'none':
1053 elif compression == 'none':
1048 requirements.add(REQUIREMENT_NONE)
1054 requirements.add(REQUIREMENT_NONE)
1049 else:
1055 else:
1050 raise error.Abort(_('unknown compression engine defined in '
1056 raise error.Abort(_('unknown compression engine defined in '
1051 'storage.sqlite.compression: %s') % compression)
1057 'storage.sqlite.compression: %s') % compression)
1052
1058
1053 return requirements
1059 return requirements
1054
1060
1055 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1061 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1056 class sqlitefilestorage(object):
1062 class sqlitefilestorage(object):
1057 """Repository file storage backed by SQLite."""
1063 """Repository file storage backed by SQLite."""
1058 def file(self, path):
1064 def file(self, path):
1059 if path[0] == b'/':
1065 if path[0] == b'/':
1060 path = path[1:]
1066 path = path[1:]
1061
1067
1062 if REQUIREMENT_ZSTD in self.requirements:
1068 if REQUIREMENT_ZSTD in self.requirements:
1063 compression = 'zstd'
1069 compression = 'zstd'
1064 elif REQUIREMENT_ZLIB in self.requirements:
1070 elif REQUIREMENT_ZLIB in self.requirements:
1065 compression = 'zlib'
1071 compression = 'zlib'
1066 elif REQUIREMENT_NONE in self.requirements:
1072 elif REQUIREMENT_NONE in self.requirements:
1067 compression = 'none'
1073 compression = 'none'
1068 else:
1074 else:
1069 raise error.Abort(_('unable to determine what compression engine '
1075 raise error.Abort(_('unable to determine what compression engine '
1070 'to use for SQLite storage'))
1076 'to use for SQLite storage'))
1071
1077
1072 return sqlitefilestore(self._dbconn, path, compression)
1078 return sqlitefilestore(self._dbconn, path, compression)
1073
1079
1074 def makefilestorage(orig, requirements, **kwargs):
1080 def makefilestorage(orig, requirements, **kwargs):
1075 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1081 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1076 if REQUIREMENT in requirements:
1082 if REQUIREMENT in requirements:
1077 return sqlitefilestorage
1083 return sqlitefilestorage
1078 else:
1084 else:
1079 return orig(requirements=requirements, **kwargs)
1085 return orig(requirements=requirements, **kwargs)
1080
1086
1081 def makemain(orig, ui, requirements, **kwargs):
1087 def makemain(orig, ui, requirements, **kwargs):
1082 if REQUIREMENT in requirements:
1088 if REQUIREMENT in requirements:
1083 if REQUIREMENT_ZSTD in requirements and not zstd:
1089 if REQUIREMENT_ZSTD in requirements and not zstd:
1084 raise error.Abort(_('repository uses zstandard compression, which '
1090 raise error.Abort(_('repository uses zstandard compression, which '
1085 'is not available to this Mercurial install'))
1091 'is not available to this Mercurial install'))
1086
1092
1087 return sqliterepository
1093 return sqliterepository
1088
1094
1089 return orig(requirements=requirements, **kwargs)
1095 return orig(requirements=requirements, **kwargs)
1090
1096
1091 def verifierinit(orig, self, *args, **kwargs):
1097 def verifierinit(orig, self, *args, **kwargs):
1092 orig(self, *args, **kwargs)
1098 orig(self, *args, **kwargs)
1093
1099
1094 # We don't care that files in the store don't align with what is
1100 # We don't care that files in the store don't align with what is
1095 # advertised. So suppress these warnings.
1101 # advertised. So suppress these warnings.
1096 self.warnorphanstorefiles = False
1102 self.warnorphanstorefiles = False
1097
1103
1098 def extsetup(ui):
1104 def extsetup(ui):
1099 localrepo.featuresetupfuncs.add(featuresetup)
1105 localrepo.featuresetupfuncs.add(featuresetup)
1100 extensions.wrapfunction(localrepo, 'newreporequirements',
1106 extensions.wrapfunction(localrepo, 'newreporequirements',
1101 newreporequirements)
1107 newreporequirements)
1102 extensions.wrapfunction(localrepo, 'makefilestorage',
1108 extensions.wrapfunction(localrepo, 'makefilestorage',
1103 makefilestorage)
1109 makefilestorage)
1104 extensions.wrapfunction(localrepo, 'makemain',
1110 extensions.wrapfunction(localrepo, 'makemain',
1105 makemain)
1111 makemain)
1106 extensions.wrapfunction(verify.verifier, '__init__',
1112 extensions.wrapfunction(verify.verifier, '__init__',
1107 verifierinit)
1113 verifierinit)
1108
1114
1109 def reposetup(ui, repo):
1115 def reposetup(ui, repo):
1110 if isinstance(repo, sqliterepository):
1116 if isinstance(repo, sqliterepository):
1111 repo._db = None
1117 repo._db = None
1112
1118
1113 # TODO check for bundlerepository?
1119 # TODO check for bundlerepository?
@@ -1,219 +1,233
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .node import (
11 nullid,
12 nullrev,
13 )
10 from . import (
14 from . import (
11 error,
15 error,
12 repository,
16 repository,
13 revlog,
17 revlog,
14 )
18 )
15 from .utils import (
19 from .utils import (
16 interfaceutil,
20 interfaceutil,
17 storageutil,
21 storageutil,
18 )
22 )
19
23
20 @interfaceutil.implementer(repository.ifilestorage)
24 @interfaceutil.implementer(repository.ifilestorage)
21 class filelog(object):
25 class filelog(object):
22 def __init__(self, opener, path):
26 def __init__(self, opener, path):
23 self._revlog = revlog.revlog(opener,
27 self._revlog = revlog.revlog(opener,
24 '/'.join(('data', path + '.i')),
28 '/'.join(('data', path + '.i')),
25 censorable=True)
29 censorable=True)
26 # Full name of the user visible file, relative to the repository root.
30 # Full name of the user visible file, relative to the repository root.
27 # Used by LFS.
31 # Used by LFS.
28 self._revlog.filename = path
32 self._revlog.filename = path
29
33
30 def __len__(self):
34 def __len__(self):
31 return len(self._revlog)
35 return len(self._revlog)
32
36
33 def __iter__(self):
37 def __iter__(self):
34 return self._revlog.__iter__()
38 return self._revlog.__iter__()
35
39
40 def hasnode(self, node):
41 if node in (nullid, nullrev):
42 return False
43
44 try:
45 self._revlog.rev(node)
46 return True
47 except (TypeError, ValueError, IndexError, error.LookupError):
48 return False
49
36 def revs(self, start=0, stop=None):
50 def revs(self, start=0, stop=None):
37 return self._revlog.revs(start=start, stop=stop)
51 return self._revlog.revs(start=start, stop=stop)
38
52
39 def parents(self, node):
53 def parents(self, node):
40 return self._revlog.parents(node)
54 return self._revlog.parents(node)
41
55
42 def parentrevs(self, rev):
56 def parentrevs(self, rev):
43 return self._revlog.parentrevs(rev)
57 return self._revlog.parentrevs(rev)
44
58
45 def rev(self, node):
59 def rev(self, node):
46 return self._revlog.rev(node)
60 return self._revlog.rev(node)
47
61
48 def node(self, rev):
62 def node(self, rev):
49 return self._revlog.node(rev)
63 return self._revlog.node(rev)
50
64
51 def lookup(self, node):
65 def lookup(self, node):
52 return storageutil.fileidlookup(self._revlog, node,
66 return storageutil.fileidlookup(self._revlog, node,
53 self._revlog.indexfile)
67 self._revlog.indexfile)
54
68
55 def linkrev(self, rev):
69 def linkrev(self, rev):
56 return self._revlog.linkrev(rev)
70 return self._revlog.linkrev(rev)
57
71
58 def commonancestorsheads(self, node1, node2):
72 def commonancestorsheads(self, node1, node2):
59 return self._revlog.commonancestorsheads(node1, node2)
73 return self._revlog.commonancestorsheads(node1, node2)
60
74
61 # Used by dagop.blockdescendants().
75 # Used by dagop.blockdescendants().
62 def descendants(self, revs):
76 def descendants(self, revs):
63 return self._revlog.descendants(revs)
77 return self._revlog.descendants(revs)
64
78
65 def heads(self, start=None, stop=None):
79 def heads(self, start=None, stop=None):
66 return self._revlog.heads(start, stop)
80 return self._revlog.heads(start, stop)
67
81
68 # Used by hgweb, children extension.
82 # Used by hgweb, children extension.
69 def children(self, node):
83 def children(self, node):
70 return self._revlog.children(node)
84 return self._revlog.children(node)
71
85
72 def iscensored(self, rev):
86 def iscensored(self, rev):
73 return self._revlog.iscensored(rev)
87 return self._revlog.iscensored(rev)
74
88
75 def revision(self, node, _df=None, raw=False):
89 def revision(self, node, _df=None, raw=False):
76 return self._revlog.revision(node, _df=_df, raw=raw)
90 return self._revlog.revision(node, _df=_df, raw=raw)
77
91
78 def emitrevisions(self, nodes, nodesorder=None,
92 def emitrevisions(self, nodes, nodesorder=None,
79 revisiondata=False, assumehaveparentrevisions=False,
93 revisiondata=False, assumehaveparentrevisions=False,
80 deltaprevious=False):
94 deltaprevious=False):
81 return self._revlog.emitrevisions(
95 return self._revlog.emitrevisions(
82 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
96 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
83 assumehaveparentrevisions=assumehaveparentrevisions,
97 assumehaveparentrevisions=assumehaveparentrevisions,
84 deltaprevious=deltaprevious)
98 deltaprevious=deltaprevious)
85
99
86 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
100 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
87 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
101 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
88 cachedelta=None):
102 cachedelta=None):
89 return self._revlog.addrevision(revisiondata, transaction, linkrev,
103 return self._revlog.addrevision(revisiondata, transaction, linkrev,
90 p1, p2, node=node, flags=flags,
104 p1, p2, node=node, flags=flags,
91 cachedelta=cachedelta)
105 cachedelta=cachedelta)
92
106
93 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
107 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
94 return self._revlog.addgroup(deltas, linkmapper, transaction,
108 return self._revlog.addgroup(deltas, linkmapper, transaction,
95 addrevisioncb=addrevisioncb)
109 addrevisioncb=addrevisioncb)
96
110
97 def getstrippoint(self, minlink):
111 def getstrippoint(self, minlink):
98 return self._revlog.getstrippoint(minlink)
112 return self._revlog.getstrippoint(minlink)
99
113
100 def strip(self, minlink, transaction):
114 def strip(self, minlink, transaction):
101 return self._revlog.strip(minlink, transaction)
115 return self._revlog.strip(minlink, transaction)
102
116
103 def censorrevision(self, tr, node, tombstone=b''):
117 def censorrevision(self, tr, node, tombstone=b''):
104 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
118 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
105
119
106 def files(self):
120 def files(self):
107 return self._revlog.files()
121 return self._revlog.files()
108
122
109 def read(self, node):
123 def read(self, node):
110 return storageutil.filtermetadata(self.revision(node))
124 return storageutil.filtermetadata(self.revision(node))
111
125
112 def add(self, text, meta, transaction, link, p1=None, p2=None):
126 def add(self, text, meta, transaction, link, p1=None, p2=None):
113 if meta or text.startswith('\1\n'):
127 if meta or text.startswith('\1\n'):
114 text = storageutil.packmeta(meta, text)
128 text = storageutil.packmeta(meta, text)
115 return self.addrevision(text, transaction, link, p1, p2)
129 return self.addrevision(text, transaction, link, p1, p2)
116
130
117 def renamed(self, node):
131 def renamed(self, node):
118 return storageutil.filerevisioncopied(self, node)
132 return storageutil.filerevisioncopied(self, node)
119
133
120 def size(self, rev):
134 def size(self, rev):
121 """return the size of a given revision"""
135 """return the size of a given revision"""
122
136
123 # for revisions with renames, we have to go the slow way
137 # for revisions with renames, we have to go the slow way
124 node = self.node(rev)
138 node = self.node(rev)
125 if self.renamed(node):
139 if self.renamed(node):
126 return len(self.read(node))
140 return len(self.read(node))
127 if self.iscensored(rev):
141 if self.iscensored(rev):
128 return 0
142 return 0
129
143
130 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
144 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
131 return self._revlog.size(rev)
145 return self._revlog.size(rev)
132
146
133 def cmp(self, node, text):
147 def cmp(self, node, text):
134 """compare text with a given file revision
148 """compare text with a given file revision
135
149
136 returns True if text is different than what is stored.
150 returns True if text is different than what is stored.
137 """
151 """
138 return not storageutil.filedataequivalent(self, node, text)
152 return not storageutil.filedataequivalent(self, node, text)
139
153
140 def verifyintegrity(self, state):
154 def verifyintegrity(self, state):
141 return self._revlog.verifyintegrity(state)
155 return self._revlog.verifyintegrity(state)
142
156
143 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
157 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
144 revisionscount=False, trackedsize=False,
158 revisionscount=False, trackedsize=False,
145 storedsize=False):
159 storedsize=False):
146 return self._revlog.storageinfo(
160 return self._revlog.storageinfo(
147 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
161 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
148 revisionscount=revisionscount, trackedsize=trackedsize,
162 revisionscount=revisionscount, trackedsize=trackedsize,
149 storedsize=storedsize)
163 storedsize=storedsize)
150
164
151 # TODO these aren't part of the interface and aren't internal methods.
165 # TODO these aren't part of the interface and aren't internal methods.
152 # Callers should be fixed to not use them.
166 # Callers should be fixed to not use them.
153
167
154 # Used by bundlefilelog, unionfilelog.
168 # Used by bundlefilelog, unionfilelog.
155 @property
169 @property
156 def indexfile(self):
170 def indexfile(self):
157 return self._revlog.indexfile
171 return self._revlog.indexfile
158
172
159 @indexfile.setter
173 @indexfile.setter
160 def indexfile(self, value):
174 def indexfile(self, value):
161 self._revlog.indexfile = value
175 self._revlog.indexfile = value
162
176
163 # Used by repo upgrade.
177 # Used by repo upgrade.
164 def clone(self, tr, destrevlog, **kwargs):
178 def clone(self, tr, destrevlog, **kwargs):
165 if not isinstance(destrevlog, filelog):
179 if not isinstance(destrevlog, filelog):
166 raise error.ProgrammingError('expected filelog to clone()')
180 raise error.ProgrammingError('expected filelog to clone()')
167
181
168 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
182 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
169
183
170 class narrowfilelog(filelog):
184 class narrowfilelog(filelog):
171 """Filelog variation to be used with narrow stores."""
185 """Filelog variation to be used with narrow stores."""
172
186
173 def __init__(self, opener, path, narrowmatch):
187 def __init__(self, opener, path, narrowmatch):
174 super(narrowfilelog, self).__init__(opener, path)
188 super(narrowfilelog, self).__init__(opener, path)
175 self._narrowmatch = narrowmatch
189 self._narrowmatch = narrowmatch
176
190
177 def renamed(self, node):
191 def renamed(self, node):
178 res = super(narrowfilelog, self).renamed(node)
192 res = super(narrowfilelog, self).renamed(node)
179
193
180 # Renames that come from outside the narrowspec are problematic
194 # Renames that come from outside the narrowspec are problematic
181 # because we may lack the base text for the rename. This can result
195 # because we may lack the base text for the rename. This can result
182 # in code attempting to walk the ancestry or compute a diff
196 # in code attempting to walk the ancestry or compute a diff
183 # encountering a missing revision. We address this by silently
197 # encountering a missing revision. We address this by silently
184 # removing rename metadata if the source file is outside the
198 # removing rename metadata if the source file is outside the
185 # narrow spec.
199 # narrow spec.
186 #
200 #
187 # A better solution would be to see if the base revision is available,
201 # A better solution would be to see if the base revision is available,
188 # rather than assuming it isn't.
202 # rather than assuming it isn't.
189 #
203 #
190 # An even better solution would be to teach all consumers of rename
204 # An even better solution would be to teach all consumers of rename
191 # metadata that the base revision may not be available.
205 # metadata that the base revision may not be available.
192 #
206 #
193 # TODO consider better ways of doing this.
207 # TODO consider better ways of doing this.
194 if res and not self._narrowmatch(res[0]):
208 if res and not self._narrowmatch(res[0]):
195 return None
209 return None
196
210
197 return res
211 return res
198
212
199 def size(self, rev):
213 def size(self, rev):
200 # Because we have a custom renamed() that may lie, we need to call
214 # Because we have a custom renamed() that may lie, we need to call
201 # the base renamed() to report accurate results.
215 # the base renamed() to report accurate results.
202 node = self.node(rev)
216 node = self.node(rev)
203 if super(narrowfilelog, self).renamed(node):
217 if super(narrowfilelog, self).renamed(node):
204 return len(self.read(node))
218 return len(self.read(node))
205 else:
219 else:
206 return super(narrowfilelog, self).size(rev)
220 return super(narrowfilelog, self).size(rev)
207
221
208 def cmp(self, node, text):
222 def cmp(self, node, text):
209 different = super(narrowfilelog, self).cmp(node, text)
223 different = super(narrowfilelog, self).cmp(node, text)
210
224
211 # Because renamed() may lie, we may get false positives for
225 # Because renamed() may lie, we may get false positives for
212 # different content. Check for this by comparing against the original
226 # different content. Check for this by comparing against the original
213 # renamed() implementation.
227 # renamed() implementation.
214 if different:
228 if different:
215 if super(narrowfilelog, self).renamed(node):
229 if super(narrowfilelog, self).renamed(node):
216 t2 = self.read(node)
230 t2 = self.read(node)
217 return t2 != text
231 return t2 != text
218
232
219 return different
233 return different
@@ -1,1835 +1,1845
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30 # Repository supports being stream cloned.
30 # Repository supports being stream cloned.
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32
32
33 REVISION_FLAG_CENSORED = 1 << 15
33 REVISION_FLAG_CENSORED = 1 << 15
34 REVISION_FLAG_ELLIPSIS = 1 << 14
34 REVISION_FLAG_ELLIPSIS = 1 << 14
35 REVISION_FLAG_EXTSTORED = 1 << 13
35 REVISION_FLAG_EXTSTORED = 1 << 13
36
36
37 REVISION_FLAGS_KNOWN = (
37 REVISION_FLAGS_KNOWN = (
38 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
38 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
39
39
40 class ipeerconnection(interfaceutil.Interface):
40 class ipeerconnection(interfaceutil.Interface):
41 """Represents a "connection" to a repository.
41 """Represents a "connection" to a repository.
42
42
43 This is the base interface for representing a connection to a repository.
43 This is the base interface for representing a connection to a repository.
44 It holds basic properties and methods applicable to all peer types.
44 It holds basic properties and methods applicable to all peer types.
45
45
46 This is not a complete interface definition and should not be used
46 This is not a complete interface definition and should not be used
47 outside of this module.
47 outside of this module.
48 """
48 """
49 ui = interfaceutil.Attribute("""ui.ui instance""")
49 ui = interfaceutil.Attribute("""ui.ui instance""")
50
50
51 def url():
51 def url():
52 """Returns a URL string representing this peer.
52 """Returns a URL string representing this peer.
53
53
54 Currently, implementations expose the raw URL used to construct the
54 Currently, implementations expose the raw URL used to construct the
55 instance. It may contain credentials as part of the URL. The
55 instance. It may contain credentials as part of the URL. The
56 expectations of the value aren't well-defined and this could lead to
56 expectations of the value aren't well-defined and this could lead to
57 data leakage.
57 data leakage.
58
58
59 TODO audit/clean consumers and more clearly define the contents of this
59 TODO audit/clean consumers and more clearly define the contents of this
60 value.
60 value.
61 """
61 """
62
62
63 def local():
63 def local():
64 """Returns a local repository instance.
64 """Returns a local repository instance.
65
65
66 If the peer represents a local repository, returns an object that
66 If the peer represents a local repository, returns an object that
67 can be used to interface with it. Otherwise returns ``None``.
67 can be used to interface with it. Otherwise returns ``None``.
68 """
68 """
69
69
70 def peer():
70 def peer():
71 """Returns an object conforming to this interface.
71 """Returns an object conforming to this interface.
72
72
73 Most implementations will ``return self``.
73 Most implementations will ``return self``.
74 """
74 """
75
75
76 def canpush():
76 def canpush():
77 """Returns a boolean indicating if this peer can be pushed to."""
77 """Returns a boolean indicating if this peer can be pushed to."""
78
78
79 def close():
79 def close():
80 """Close the connection to this peer.
80 """Close the connection to this peer.
81
81
82 This is called when the peer will no longer be used. Resources
82 This is called when the peer will no longer be used. Resources
83 associated with the peer should be cleaned up.
83 associated with the peer should be cleaned up.
84 """
84 """
85
85
86 class ipeercapabilities(interfaceutil.Interface):
86 class ipeercapabilities(interfaceutil.Interface):
87 """Peer sub-interface related to capabilities."""
87 """Peer sub-interface related to capabilities."""
88
88
89 def capable(name):
89 def capable(name):
90 """Determine support for a named capability.
90 """Determine support for a named capability.
91
91
92 Returns ``False`` if capability not supported.
92 Returns ``False`` if capability not supported.
93
93
94 Returns ``True`` if boolean capability is supported. Returns a string
94 Returns ``True`` if boolean capability is supported. Returns a string
95 if capability support is non-boolean.
95 if capability support is non-boolean.
96
96
97 Capability strings may or may not map to wire protocol capabilities.
97 Capability strings may or may not map to wire protocol capabilities.
98 """
98 """
99
99
100 def requirecap(name, purpose):
100 def requirecap(name, purpose):
101 """Require a capability to be present.
101 """Require a capability to be present.
102
102
103 Raises a ``CapabilityError`` if the capability isn't present.
103 Raises a ``CapabilityError`` if the capability isn't present.
104 """
104 """
105
105
106 class ipeercommands(interfaceutil.Interface):
106 class ipeercommands(interfaceutil.Interface):
107 """Client-side interface for communicating over the wire protocol.
107 """Client-side interface for communicating over the wire protocol.
108
108
109 This interface is used as a gateway to the Mercurial wire protocol.
109 This interface is used as a gateway to the Mercurial wire protocol.
110 methods commonly call wire protocol commands of the same name.
110 methods commonly call wire protocol commands of the same name.
111 """
111 """
112
112
113 def branchmap():
113 def branchmap():
114 """Obtain heads in named branches.
114 """Obtain heads in named branches.
115
115
116 Returns a dict mapping branch name to an iterable of nodes that are
116 Returns a dict mapping branch name to an iterable of nodes that are
117 heads on that branch.
117 heads on that branch.
118 """
118 """
119
119
120 def capabilities():
120 def capabilities():
121 """Obtain capabilities of the peer.
121 """Obtain capabilities of the peer.
122
122
123 Returns a set of string capabilities.
123 Returns a set of string capabilities.
124 """
124 """
125
125
126 def clonebundles():
126 def clonebundles():
127 """Obtains the clone bundles manifest for the repo.
127 """Obtains the clone bundles manifest for the repo.
128
128
129 Returns the manifest as unparsed bytes.
129 Returns the manifest as unparsed bytes.
130 """
130 """
131
131
132 def debugwireargs(one, two, three=None, four=None, five=None):
132 def debugwireargs(one, two, three=None, four=None, five=None):
133 """Used to facilitate debugging of arguments passed over the wire."""
133 """Used to facilitate debugging of arguments passed over the wire."""
134
134
135 def getbundle(source, **kwargs):
135 def getbundle(source, **kwargs):
136 """Obtain remote repository data as a bundle.
136 """Obtain remote repository data as a bundle.
137
137
138 This command is how the bulk of repository data is transferred from
138 This command is how the bulk of repository data is transferred from
139 the peer to the local repository
139 the peer to the local repository
140
140
141 Returns a generator of bundle data.
141 Returns a generator of bundle data.
142 """
142 """
143
143
144 def heads():
144 def heads():
145 """Determine all known head revisions in the peer.
145 """Determine all known head revisions in the peer.
146
146
147 Returns an iterable of binary nodes.
147 Returns an iterable of binary nodes.
148 """
148 """
149
149
150 def known(nodes):
150 def known(nodes):
151 """Determine whether multiple nodes are known.
151 """Determine whether multiple nodes are known.
152
152
153 Accepts an iterable of nodes whose presence to check for.
153 Accepts an iterable of nodes whose presence to check for.
154
154
155 Returns an iterable of booleans indicating of the corresponding node
155 Returns an iterable of booleans indicating of the corresponding node
156 at that index is known to the peer.
156 at that index is known to the peer.
157 """
157 """
158
158
159 def listkeys(namespace):
159 def listkeys(namespace):
160 """Obtain all keys in a pushkey namespace.
160 """Obtain all keys in a pushkey namespace.
161
161
162 Returns an iterable of key names.
162 Returns an iterable of key names.
163 """
163 """
164
164
165 def lookup(key):
165 def lookup(key):
166 """Resolve a value to a known revision.
166 """Resolve a value to a known revision.
167
167
168 Returns a binary node of the resolved revision on success.
168 Returns a binary node of the resolved revision on success.
169 """
169 """
170
170
171 def pushkey(namespace, key, old, new):
171 def pushkey(namespace, key, old, new):
172 """Set a value using the ``pushkey`` protocol.
172 """Set a value using the ``pushkey`` protocol.
173
173
174 Arguments correspond to the pushkey namespace and key to operate on and
174 Arguments correspond to the pushkey namespace and key to operate on and
175 the old and new values for that key.
175 the old and new values for that key.
176
176
177 Returns a string with the peer result. The value inside varies by the
177 Returns a string with the peer result. The value inside varies by the
178 namespace.
178 namespace.
179 """
179 """
180
180
181 def stream_out():
181 def stream_out():
182 """Obtain streaming clone data.
182 """Obtain streaming clone data.
183
183
184 Successful result should be a generator of data chunks.
184 Successful result should be a generator of data chunks.
185 """
185 """
186
186
187 def unbundle(bundle, heads, url):
187 def unbundle(bundle, heads, url):
188 """Transfer repository data to the peer.
188 """Transfer repository data to the peer.
189
189
190 This is how the bulk of data during a push is transferred.
190 This is how the bulk of data during a push is transferred.
191
191
192 Returns the integer number of heads added to the peer.
192 Returns the integer number of heads added to the peer.
193 """
193 """
194
194
195 class ipeerlegacycommands(interfaceutil.Interface):
195 class ipeerlegacycommands(interfaceutil.Interface):
196 """Interface for implementing support for legacy wire protocol commands.
196 """Interface for implementing support for legacy wire protocol commands.
197
197
198 Wire protocol commands transition to legacy status when they are no longer
198 Wire protocol commands transition to legacy status when they are no longer
199 used by modern clients. To facilitate identifying which commands are
199 used by modern clients. To facilitate identifying which commands are
200 legacy, the interfaces are split.
200 legacy, the interfaces are split.
201 """
201 """
202
202
203 def between(pairs):
203 def between(pairs):
204 """Obtain nodes between pairs of nodes.
204 """Obtain nodes between pairs of nodes.
205
205
206 ``pairs`` is an iterable of node pairs.
206 ``pairs`` is an iterable of node pairs.
207
207
208 Returns an iterable of iterables of nodes corresponding to each
208 Returns an iterable of iterables of nodes corresponding to each
209 requested pair.
209 requested pair.
210 """
210 """
211
211
212 def branches(nodes):
212 def branches(nodes):
213 """Obtain ancestor changesets of specific nodes back to a branch point.
213 """Obtain ancestor changesets of specific nodes back to a branch point.
214
214
215 For each requested node, the peer finds the first ancestor node that is
215 For each requested node, the peer finds the first ancestor node that is
216 a DAG root or is a merge.
216 a DAG root or is a merge.
217
217
218 Returns an iterable of iterables with the resolved values for each node.
218 Returns an iterable of iterables with the resolved values for each node.
219 """
219 """
220
220
221 def changegroup(nodes, source):
221 def changegroup(nodes, source):
222 """Obtain a changegroup with data for descendants of specified nodes."""
222 """Obtain a changegroup with data for descendants of specified nodes."""
223
223
224 def changegroupsubset(bases, heads, source):
224 def changegroupsubset(bases, heads, source):
225 pass
225 pass
226
226
227 class ipeercommandexecutor(interfaceutil.Interface):
227 class ipeercommandexecutor(interfaceutil.Interface):
228 """Represents a mechanism to execute remote commands.
228 """Represents a mechanism to execute remote commands.
229
229
230 This is the primary interface for requesting that wire protocol commands
230 This is the primary interface for requesting that wire protocol commands
231 be executed. Instances of this interface are active in a context manager
231 be executed. Instances of this interface are active in a context manager
232 and have a well-defined lifetime. When the context manager exits, all
232 and have a well-defined lifetime. When the context manager exits, all
233 outstanding requests are waited on.
233 outstanding requests are waited on.
234 """
234 """
235
235
236 def callcommand(name, args):
236 def callcommand(name, args):
237 """Request that a named command be executed.
237 """Request that a named command be executed.
238
238
239 Receives the command name and a dictionary of command arguments.
239 Receives the command name and a dictionary of command arguments.
240
240
241 Returns a ``concurrent.futures.Future`` that will resolve to the
241 Returns a ``concurrent.futures.Future`` that will resolve to the
242 result of that command request. That exact value is left up to
242 result of that command request. That exact value is left up to
243 the implementation and possibly varies by command.
243 the implementation and possibly varies by command.
244
244
245 Not all commands can coexist with other commands in an executor
245 Not all commands can coexist with other commands in an executor
246 instance: it depends on the underlying wire protocol transport being
246 instance: it depends on the underlying wire protocol transport being
247 used and the command itself.
247 used and the command itself.
248
248
249 Implementations MAY call ``sendcommands()`` automatically if the
249 Implementations MAY call ``sendcommands()`` automatically if the
250 requested command can not coexist with other commands in this executor.
250 requested command can not coexist with other commands in this executor.
251
251
252 Implementations MAY call ``sendcommands()`` automatically when the
252 Implementations MAY call ``sendcommands()`` automatically when the
253 future's ``result()`` is called. So, consumers using multiple
253 future's ``result()`` is called. So, consumers using multiple
254 commands with an executor MUST ensure that ``result()`` is not called
254 commands with an executor MUST ensure that ``result()`` is not called
255 until all command requests have been issued.
255 until all command requests have been issued.
256 """
256 """
257
257
258 def sendcommands():
258 def sendcommands():
259 """Trigger submission of queued command requests.
259 """Trigger submission of queued command requests.
260
260
261 Not all transports submit commands as soon as they are requested to
261 Not all transports submit commands as soon as they are requested to
262 run. When called, this method forces queued command requests to be
262 run. When called, this method forces queued command requests to be
263 issued. It will no-op if all commands have already been sent.
263 issued. It will no-op if all commands have already been sent.
264
264
265 When called, no more new commands may be issued with this executor.
265 When called, no more new commands may be issued with this executor.
266 """
266 """
267
267
268 def close():
268 def close():
269 """Signal that this command request is finished.
269 """Signal that this command request is finished.
270
270
271 When called, no more new commands may be issued. All outstanding
271 When called, no more new commands may be issued. All outstanding
272 commands that have previously been issued are waited on before
272 commands that have previously been issued are waited on before
273 returning. This not only includes waiting for the futures to resolve,
273 returning. This not only includes waiting for the futures to resolve,
274 but also waiting for all response data to arrive. In other words,
274 but also waiting for all response data to arrive. In other words,
275 calling this waits for all on-wire state for issued command requests
275 calling this waits for all on-wire state for issued command requests
276 to finish.
276 to finish.
277
277
278 When used as a context manager, this method is called when exiting the
278 When used as a context manager, this method is called when exiting the
279 context manager.
279 context manager.
280
280
281 This method may call ``sendcommands()`` if there are buffered commands.
281 This method may call ``sendcommands()`` if there are buffered commands.
282 """
282 """
283
283
284 class ipeerrequests(interfaceutil.Interface):
284 class ipeerrequests(interfaceutil.Interface):
285 """Interface for executing commands on a peer."""
285 """Interface for executing commands on a peer."""
286
286
287 def commandexecutor():
287 def commandexecutor():
288 """A context manager that resolves to an ipeercommandexecutor.
288 """A context manager that resolves to an ipeercommandexecutor.
289
289
290 The object this resolves to can be used to issue command requests
290 The object this resolves to can be used to issue command requests
291 to the peer.
291 to the peer.
292
292
293 Callers should call its ``callcommand`` method to issue command
293 Callers should call its ``callcommand`` method to issue command
294 requests.
294 requests.
295
295
296 A new executor should be obtained for each distinct set of commands
296 A new executor should be obtained for each distinct set of commands
297 (possibly just a single command) that the consumer wants to execute
297 (possibly just a single command) that the consumer wants to execute
298 as part of a single operation or round trip. This is because some
298 as part of a single operation or round trip. This is because some
299 peers are half-duplex and/or don't support persistent connections.
299 peers are half-duplex and/or don't support persistent connections.
300 e.g. in the case of HTTP peers, commands sent to an executor represent
300 e.g. in the case of HTTP peers, commands sent to an executor represent
301 a single HTTP request. While some peers may support multiple command
301 a single HTTP request. While some peers may support multiple command
302 sends over the wire per executor, consumers need to code to the least
302 sends over the wire per executor, consumers need to code to the least
303 capable peer. So it should be assumed that command executors buffer
303 capable peer. So it should be assumed that command executors buffer
304 called commands until they are told to send them and that each
304 called commands until they are told to send them and that each
305 command executor could result in a new connection or wire-level request
305 command executor could result in a new connection or wire-level request
306 being issued.
306 being issued.
307 """
307 """
308
308
309 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
309 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
310 """Unified interface for peer repositories.
310 """Unified interface for peer repositories.
311
311
312 All peer instances must conform to this interface.
312 All peer instances must conform to this interface.
313 """
313 """
314
314
315 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
315 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
316 """Unified peer interface for wire protocol version 2 peers."""
316 """Unified peer interface for wire protocol version 2 peers."""
317
317
318 apidescriptor = interfaceutil.Attribute(
318 apidescriptor = interfaceutil.Attribute(
319 """Data structure holding description of server API.""")
319 """Data structure holding description of server API.""")
320
320
321 @interfaceutil.implementer(ipeerbase)
321 @interfaceutil.implementer(ipeerbase)
322 class peer(object):
322 class peer(object):
323 """Base class for peer repositories."""
323 """Base class for peer repositories."""
324
324
325 def capable(self, name):
325 def capable(self, name):
326 caps = self.capabilities()
326 caps = self.capabilities()
327 if name in caps:
327 if name in caps:
328 return True
328 return True
329
329
330 name = '%s=' % name
330 name = '%s=' % name
331 for cap in caps:
331 for cap in caps:
332 if cap.startswith(name):
332 if cap.startswith(name):
333 return cap[len(name):]
333 return cap[len(name):]
334
334
335 return False
335 return False
336
336
337 def requirecap(self, name, purpose):
337 def requirecap(self, name, purpose):
338 if self.capable(name):
338 if self.capable(name):
339 return
339 return
340
340
341 raise error.CapabilityError(
341 raise error.CapabilityError(
342 _('cannot %s; remote repository does not support the %r '
342 _('cannot %s; remote repository does not support the %r '
343 'capability') % (purpose, name))
343 'capability') % (purpose, name))
344
344
345 class iverifyproblem(interfaceutil.Interface):
345 class iverifyproblem(interfaceutil.Interface):
346 """Represents a problem with the integrity of the repository.
346 """Represents a problem with the integrity of the repository.
347
347
348 Instances of this interface are emitted to describe an integrity issue
348 Instances of this interface are emitted to describe an integrity issue
349 with a repository (e.g. corrupt storage, missing data, etc).
349 with a repository (e.g. corrupt storage, missing data, etc).
350
350
351 Instances are essentially messages associated with severity.
351 Instances are essentially messages associated with severity.
352 """
352 """
353 warning = interfaceutil.Attribute(
353 warning = interfaceutil.Attribute(
354 """Message indicating a non-fatal problem.""")
354 """Message indicating a non-fatal problem.""")
355
355
356 error = interfaceutil.Attribute(
356 error = interfaceutil.Attribute(
357 """Message indicating a fatal problem.""")
357 """Message indicating a fatal problem.""")
358
358
359 node = interfaceutil.Attribute(
359 node = interfaceutil.Attribute(
360 """Revision encountering the problem.
360 """Revision encountering the problem.
361
361
362 ``None`` means the problem doesn't apply to a single revision.
362 ``None`` means the problem doesn't apply to a single revision.
363 """)
363 """)
364
364
365 class irevisiondelta(interfaceutil.Interface):
365 class irevisiondelta(interfaceutil.Interface):
366 """Represents a delta between one revision and another.
366 """Represents a delta between one revision and another.
367
367
368 Instances convey enough information to allow a revision to be exchanged
368 Instances convey enough information to allow a revision to be exchanged
369 with another repository.
369 with another repository.
370
370
371 Instances represent the fulltext revision data or a delta against
371 Instances represent the fulltext revision data or a delta against
372 another revision. Therefore the ``revision`` and ``delta`` attributes
372 another revision. Therefore the ``revision`` and ``delta`` attributes
373 are mutually exclusive.
373 are mutually exclusive.
374
374
375 Typically used for changegroup generation.
375 Typically used for changegroup generation.
376 """
376 """
377
377
378 node = interfaceutil.Attribute(
378 node = interfaceutil.Attribute(
379 """20 byte node of this revision.""")
379 """20 byte node of this revision.""")
380
380
381 p1node = interfaceutil.Attribute(
381 p1node = interfaceutil.Attribute(
382 """20 byte node of 1st parent of this revision.""")
382 """20 byte node of 1st parent of this revision.""")
383
383
384 p2node = interfaceutil.Attribute(
384 p2node = interfaceutil.Attribute(
385 """20 byte node of 2nd parent of this revision.""")
385 """20 byte node of 2nd parent of this revision.""")
386
386
387 linknode = interfaceutil.Attribute(
387 linknode = interfaceutil.Attribute(
388 """20 byte node of the changelog revision this node is linked to.""")
388 """20 byte node of the changelog revision this node is linked to.""")
389
389
390 flags = interfaceutil.Attribute(
390 flags = interfaceutil.Attribute(
391 """2 bytes of integer flags that apply to this revision.
391 """2 bytes of integer flags that apply to this revision.
392
392
393 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
393 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
394 """)
394 """)
395
395
396 basenode = interfaceutil.Attribute(
396 basenode = interfaceutil.Attribute(
397 """20 byte node of the revision this data is a delta against.
397 """20 byte node of the revision this data is a delta against.
398
398
399 ``nullid`` indicates that the revision is a full revision and not
399 ``nullid`` indicates that the revision is a full revision and not
400 a delta.
400 a delta.
401 """)
401 """)
402
402
403 baserevisionsize = interfaceutil.Attribute(
403 baserevisionsize = interfaceutil.Attribute(
404 """Size of base revision this delta is against.
404 """Size of base revision this delta is against.
405
405
406 May be ``None`` if ``basenode`` is ``nullid``.
406 May be ``None`` if ``basenode`` is ``nullid``.
407 """)
407 """)
408
408
409 revision = interfaceutil.Attribute(
409 revision = interfaceutil.Attribute(
410 """Raw fulltext of revision data for this node.""")
410 """Raw fulltext of revision data for this node.""")
411
411
412 delta = interfaceutil.Attribute(
412 delta = interfaceutil.Attribute(
413 """Delta between ``basenode`` and ``node``.
413 """Delta between ``basenode`` and ``node``.
414
414
415 Stored in the bdiff delta format.
415 Stored in the bdiff delta format.
416 """)
416 """)
417
417
418 class ifilerevisionssequence(interfaceutil.Interface):
418 class ifilerevisionssequence(interfaceutil.Interface):
419 """Contains index data for all revisions of a file.
419 """Contains index data for all revisions of a file.
420
420
421 Types implementing this behave like lists of tuples. The index
421 Types implementing this behave like lists of tuples. The index
422 in the list corresponds to the revision number. The values contain
422 in the list corresponds to the revision number. The values contain
423 index metadata.
423 index metadata.
424
424
425 The *null* revision (revision number -1) is always the last item
425 The *null* revision (revision number -1) is always the last item
426 in the index.
426 in the index.
427 """
427 """
428
428
429 def __len__():
429 def __len__():
430 """The total number of revisions."""
430 """The total number of revisions."""
431
431
432 def __getitem__(rev):
432 def __getitem__(rev):
433 """Returns the object having a specific revision number.
433 """Returns the object having a specific revision number.
434
434
435 Returns an 8-tuple with the following fields:
435 Returns an 8-tuple with the following fields:
436
436
437 offset+flags
437 offset+flags
438 Contains the offset and flags for the revision. 64-bit unsigned
438 Contains the offset and flags for the revision. 64-bit unsigned
439 integer where first 6 bytes are the offset and the next 2 bytes
439 integer where first 6 bytes are the offset and the next 2 bytes
440 are flags. The offset can be 0 if it is not used by the store.
440 are flags. The offset can be 0 if it is not used by the store.
441 compressed size
441 compressed size
442 Size of the revision data in the store. It can be 0 if it isn't
442 Size of the revision data in the store. It can be 0 if it isn't
443 needed by the store.
443 needed by the store.
444 uncompressed size
444 uncompressed size
445 Fulltext size. It can be 0 if it isn't needed by the store.
445 Fulltext size. It can be 0 if it isn't needed by the store.
446 base revision
446 base revision
447 Revision number of revision the delta for storage is encoded
447 Revision number of revision the delta for storage is encoded
448 against. -1 indicates not encoded against a base revision.
448 against. -1 indicates not encoded against a base revision.
449 link revision
449 link revision
450 Revision number of changelog revision this entry is related to.
450 Revision number of changelog revision this entry is related to.
451 p1 revision
451 p1 revision
452 Revision number of 1st parent. -1 if no 1st parent.
452 Revision number of 1st parent. -1 if no 1st parent.
453 p2 revision
453 p2 revision
454 Revision number of 2nd parent. -1 if no 1st parent.
454 Revision number of 2nd parent. -1 if no 1st parent.
455 node
455 node
456 Binary node value for this revision number.
456 Binary node value for this revision number.
457
457
458 Negative values should index off the end of the sequence. ``-1``
458 Negative values should index off the end of the sequence. ``-1``
459 should return the null revision. ``-2`` should return the most
459 should return the null revision. ``-2`` should return the most
460 recent revision.
460 recent revision.
461 """
461 """
462
462
463 def __contains__(rev):
463 def __contains__(rev):
464 """Whether a revision number exists."""
464 """Whether a revision number exists."""
465
465
466 def insert(self, i, entry):
466 def insert(self, i, entry):
467 """Add an item to the index at specific revision."""
467 """Add an item to the index at specific revision."""
468
468
469 class ifileindex(interfaceutil.Interface):
469 class ifileindex(interfaceutil.Interface):
470 """Storage interface for index data of a single file.
470 """Storage interface for index data of a single file.
471
471
472 File storage data is divided into index metadata and data storage.
472 File storage data is divided into index metadata and data storage.
473 This interface defines the index portion of the interface.
473 This interface defines the index portion of the interface.
474
474
475 The index logically consists of:
475 The index logically consists of:
476
476
477 * A mapping between revision numbers and nodes.
477 * A mapping between revision numbers and nodes.
478 * DAG data (storing and querying the relationship between nodes).
478 * DAG data (storing and querying the relationship between nodes).
479 * Metadata to facilitate storage.
479 * Metadata to facilitate storage.
480 """
480 """
481 def __len__():
481 def __len__():
482 """Obtain the number of revisions stored for this file."""
482 """Obtain the number of revisions stored for this file."""
483
483
484 def __iter__():
484 def __iter__():
485 """Iterate over revision numbers for this file."""
485 """Iterate over revision numbers for this file."""
486
486
487 def hasnode(node):
488 """Returns a bool indicating if a node is known to this store.
489
490 Implementations must only return True for full, binary node values:
491 hex nodes, revision numbers, and partial node matches must be
492 rejected.
493
494 The null node is never present.
495 """
496
487 def revs(start=0, stop=None):
497 def revs(start=0, stop=None):
488 """Iterate over revision numbers for this file, with control."""
498 """Iterate over revision numbers for this file, with control."""
489
499
490 def parents(node):
500 def parents(node):
491 """Returns a 2-tuple of parent nodes for a revision.
501 """Returns a 2-tuple of parent nodes for a revision.
492
502
493 Values will be ``nullid`` if the parent is empty.
503 Values will be ``nullid`` if the parent is empty.
494 """
504 """
495
505
496 def parentrevs(rev):
506 def parentrevs(rev):
497 """Like parents() but operates on revision numbers."""
507 """Like parents() but operates on revision numbers."""
498
508
499 def rev(node):
509 def rev(node):
500 """Obtain the revision number given a node.
510 """Obtain the revision number given a node.
501
511
502 Raises ``error.LookupError`` if the node is not known.
512 Raises ``error.LookupError`` if the node is not known.
503 """
513 """
504
514
505 def node(rev):
515 def node(rev):
506 """Obtain the node value given a revision number.
516 """Obtain the node value given a revision number.
507
517
508 Raises ``IndexError`` if the node is not known.
518 Raises ``IndexError`` if the node is not known.
509 """
519 """
510
520
511 def lookup(node):
521 def lookup(node):
512 """Attempt to resolve a value to a node.
522 """Attempt to resolve a value to a node.
513
523
514 Value can be a binary node, hex node, revision number, or a string
524 Value can be a binary node, hex node, revision number, or a string
515 that can be converted to an integer.
525 that can be converted to an integer.
516
526
517 Raises ``error.LookupError`` if a node could not be resolved.
527 Raises ``error.LookupError`` if a node could not be resolved.
518 """
528 """
519
529
520 def linkrev(rev):
530 def linkrev(rev):
521 """Obtain the changeset revision number a revision is linked to."""
531 """Obtain the changeset revision number a revision is linked to."""
522
532
523 def iscensored(rev):
533 def iscensored(rev):
524 """Return whether a revision's content has been censored."""
534 """Return whether a revision's content has been censored."""
525
535
526 def commonancestorsheads(node1, node2):
536 def commonancestorsheads(node1, node2):
527 """Obtain an iterable of nodes containing heads of common ancestors.
537 """Obtain an iterable of nodes containing heads of common ancestors.
528
538
529 See ``ancestor.commonancestorsheads()``.
539 See ``ancestor.commonancestorsheads()``.
530 """
540 """
531
541
532 def descendants(revs):
542 def descendants(revs):
533 """Obtain descendant revision numbers for a set of revision numbers.
543 """Obtain descendant revision numbers for a set of revision numbers.
534
544
535 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
536 """
546 """
537
547
538 def heads(start=None, stop=None):
548 def heads(start=None, stop=None):
539 """Obtain a list of nodes that are DAG heads, with control.
549 """Obtain a list of nodes that are DAG heads, with control.
540
550
541 The set of revisions examined can be limited by specifying
551 The set of revisions examined can be limited by specifying
542 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
543 iterable of nodes. DAG traversal starts at earlier revision
553 iterable of nodes. DAG traversal starts at earlier revision
544 ``start`` and iterates forward until any node in ``stop`` is
554 ``start`` and iterates forward until any node in ``stop`` is
545 encountered.
555 encountered.
546 """
556 """
547
557
548 def children(node):
558 def children(node):
549 """Obtain nodes that are children of a node.
559 """Obtain nodes that are children of a node.
550
560
551 Returns a list of nodes.
561 Returns a list of nodes.
552 """
562 """
553
563
554 class ifiledata(interfaceutil.Interface):
564 class ifiledata(interfaceutil.Interface):
555 """Storage interface for data storage of a specific file.
565 """Storage interface for data storage of a specific file.
556
566
557 This complements ``ifileindex`` and provides an interface for accessing
567 This complements ``ifileindex`` and provides an interface for accessing
558 data for a tracked file.
568 data for a tracked file.
559 """
569 """
560 def size(rev):
570 def size(rev):
561 """Obtain the fulltext size of file data.
571 """Obtain the fulltext size of file data.
562
572
563 Any metadata is excluded from size measurements.
573 Any metadata is excluded from size measurements.
564 """
574 """
565
575
566 def revision(node, raw=False):
576 def revision(node, raw=False):
567 """"Obtain fulltext data for a node.
577 """"Obtain fulltext data for a node.
568
578
569 By default, any storage transformations are applied before the data
579 By default, any storage transformations are applied before the data
570 is returned. If ``raw`` is True, non-raw storage transformations
580 is returned. If ``raw`` is True, non-raw storage transformations
571 are not applied.
581 are not applied.
572
582
573 The fulltext data may contain a header containing metadata. Most
583 The fulltext data may contain a header containing metadata. Most
574 consumers should use ``read()`` to obtain the actual file data.
584 consumers should use ``read()`` to obtain the actual file data.
575 """
585 """
576
586
577 def read(node):
587 def read(node):
578 """Resolve file fulltext data.
588 """Resolve file fulltext data.
579
589
580 This is similar to ``revision()`` except any metadata in the data
590 This is similar to ``revision()`` except any metadata in the data
581 headers is stripped.
591 headers is stripped.
582 """
592 """
583
593
584 def renamed(node):
594 def renamed(node):
585 """Obtain copy metadata for a node.
595 """Obtain copy metadata for a node.
586
596
587 Returns ``False`` if no copy metadata is stored or a 2-tuple of
597 Returns ``False`` if no copy metadata is stored or a 2-tuple of
588 (path, node) from which this revision was copied.
598 (path, node) from which this revision was copied.
589 """
599 """
590
600
591 def cmp(node, fulltext):
601 def cmp(node, fulltext):
592 """Compare fulltext to another revision.
602 """Compare fulltext to another revision.
593
603
594 Returns True if the fulltext is different from what is stored.
604 Returns True if the fulltext is different from what is stored.
595
605
596 This takes copy metadata into account.
606 This takes copy metadata into account.
597
607
598 TODO better document the copy metadata and censoring logic.
608 TODO better document the copy metadata and censoring logic.
599 """
609 """
600
610
601 def emitrevisions(nodes,
611 def emitrevisions(nodes,
602 nodesorder=None,
612 nodesorder=None,
603 revisiondata=False,
613 revisiondata=False,
604 assumehaveparentrevisions=False,
614 assumehaveparentrevisions=False,
605 deltaprevious=False):
615 deltaprevious=False):
606 """Produce ``irevisiondelta`` for revisions.
616 """Produce ``irevisiondelta`` for revisions.
607
617
608 Given an iterable of nodes, emits objects conforming to the
618 Given an iterable of nodes, emits objects conforming to the
609 ``irevisiondelta`` interface that describe revisions in storage.
619 ``irevisiondelta`` interface that describe revisions in storage.
610
620
611 This method is a generator.
621 This method is a generator.
612
622
613 The input nodes may be unordered. Implementations must ensure that a
623 The input nodes may be unordered. Implementations must ensure that a
614 node's parents are emitted before the node itself. Transitively, this
624 node's parents are emitted before the node itself. Transitively, this
615 means that a node may only be emitted once all its ancestors in
625 means that a node may only be emitted once all its ancestors in
616 ``nodes`` have also been emitted.
626 ``nodes`` have also been emitted.
617
627
618 By default, emits "index" data (the ``node``, ``p1node``, and
628 By default, emits "index" data (the ``node``, ``p1node``, and
619 ``p2node`` attributes). If ``revisiondata`` is set, revision data
629 ``p2node`` attributes). If ``revisiondata`` is set, revision data
620 will also be present on the emitted objects.
630 will also be present on the emitted objects.
621
631
622 With default argument values, implementations can choose to emit
632 With default argument values, implementations can choose to emit
623 either fulltext revision data or a delta. When emitting deltas,
633 either fulltext revision data or a delta. When emitting deltas,
624 implementations must consider whether the delta's base revision
634 implementations must consider whether the delta's base revision
625 fulltext is available to the receiver.
635 fulltext is available to the receiver.
626
636
627 The base revision fulltext is guaranteed to be available if any of
637 The base revision fulltext is guaranteed to be available if any of
628 the following are met:
638 the following are met:
629
639
630 * Its fulltext revision was emitted by this method call.
640 * Its fulltext revision was emitted by this method call.
631 * A delta for that revision was emitted by this method call.
641 * A delta for that revision was emitted by this method call.
632 * ``assumehaveparentrevisions`` is True and the base revision is a
642 * ``assumehaveparentrevisions`` is True and the base revision is a
633 parent of the node.
643 parent of the node.
634
644
635 ``nodesorder`` can be used to control the order that revisions are
645 ``nodesorder`` can be used to control the order that revisions are
636 emitted. By default, revisions can be reordered as long as they are
646 emitted. By default, revisions can be reordered as long as they are
637 in DAG topological order (see above). If the value is ``nodes``,
647 in DAG topological order (see above). If the value is ``nodes``,
638 the iteration order from ``nodes`` should be used. If the value is
648 the iteration order from ``nodes`` should be used. If the value is
639 ``storage``, then the native order from the backing storage layer
649 ``storage``, then the native order from the backing storage layer
640 is used. (Not all storage layers will have strong ordering and behavior
650 is used. (Not all storage layers will have strong ordering and behavior
641 of this mode is storage-dependent.) ``nodes`` ordering can force
651 of this mode is storage-dependent.) ``nodes`` ordering can force
642 revisions to be emitted before their ancestors, so consumers should
652 revisions to be emitted before their ancestors, so consumers should
643 use it with care.
653 use it with care.
644
654
645 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
655 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
646 be set and it is the caller's responsibility to resolve it, if needed.
656 be set and it is the caller's responsibility to resolve it, if needed.
647
657
648 If ``deltaprevious`` is True and revision data is requested, all
658 If ``deltaprevious`` is True and revision data is requested, all
649 revision data should be emitted as deltas against the revision
659 revision data should be emitted as deltas against the revision
650 emitted just prior. The initial revision should be a delta against
660 emitted just prior. The initial revision should be a delta against
651 its 1st parent.
661 its 1st parent.
652 """
662 """
653
663
654 class ifilemutation(interfaceutil.Interface):
664 class ifilemutation(interfaceutil.Interface):
655 """Storage interface for mutation events of a tracked file."""
665 """Storage interface for mutation events of a tracked file."""
656
666
657 def add(filedata, meta, transaction, linkrev, p1, p2):
667 def add(filedata, meta, transaction, linkrev, p1, p2):
658 """Add a new revision to the store.
668 """Add a new revision to the store.
659
669
660 Takes file data, dictionary of metadata, a transaction, linkrev,
670 Takes file data, dictionary of metadata, a transaction, linkrev,
661 and parent nodes.
671 and parent nodes.
662
672
663 Returns the node that was added.
673 Returns the node that was added.
664
674
665 May no-op if a revision matching the supplied data is already stored.
675 May no-op if a revision matching the supplied data is already stored.
666 """
676 """
667
677
668 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
678 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
669 flags=0, cachedelta=None):
679 flags=0, cachedelta=None):
670 """Add a new revision to the store.
680 """Add a new revision to the store.
671
681
672 This is similar to ``add()`` except it operates at a lower level.
682 This is similar to ``add()`` except it operates at a lower level.
673
683
674 The data passed in already contains a metadata header, if any.
684 The data passed in already contains a metadata header, if any.
675
685
676 ``node`` and ``flags`` can be used to define the expected node and
686 ``node`` and ``flags`` can be used to define the expected node and
677 the flags to use with storage. ``flags`` is a bitwise value composed
687 the flags to use with storage. ``flags`` is a bitwise value composed
678 of the various ``REVISION_FLAG_*`` constants.
688 of the various ``REVISION_FLAG_*`` constants.
679
689
680 ``add()`` is usually called when adding files from e.g. the working
690 ``add()`` is usually called when adding files from e.g. the working
681 directory. ``addrevision()`` is often called by ``add()`` and for
691 directory. ``addrevision()`` is often called by ``add()`` and for
682 scenarios where revision data has already been computed, such as when
692 scenarios where revision data has already been computed, such as when
683 applying raw data from a peer repo.
693 applying raw data from a peer repo.
684 """
694 """
685
695
686 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
696 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
687 """Process a series of deltas for storage.
697 """Process a series of deltas for storage.
688
698
689 ``deltas`` is an iterable of 7-tuples of
699 ``deltas`` is an iterable of 7-tuples of
690 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
700 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
691 to add.
701 to add.
692
702
693 The ``delta`` field contains ``mpatch`` data to apply to a base
703 The ``delta`` field contains ``mpatch`` data to apply to a base
694 revision, identified by ``deltabase``. The base node can be
704 revision, identified by ``deltabase``. The base node can be
695 ``nullid``, in which case the header from the delta can be ignored
705 ``nullid``, in which case the header from the delta can be ignored
696 and the delta used as the fulltext.
706 and the delta used as the fulltext.
697
707
698 ``addrevisioncb`` should be called for each node as it is committed.
708 ``addrevisioncb`` should be called for each node as it is committed.
699
709
700 Returns a list of nodes that were processed. A node will be in the list
710 Returns a list of nodes that were processed. A node will be in the list
701 even if it existed in the store previously.
711 even if it existed in the store previously.
702 """
712 """
703
713
704 def censorrevision(tr, node, tombstone=b''):
714 def censorrevision(tr, node, tombstone=b''):
705 """Remove the content of a single revision.
715 """Remove the content of a single revision.
706
716
707 The specified ``node`` will have its content purged from storage.
717 The specified ``node`` will have its content purged from storage.
708 Future attempts to access the revision data for this node will
718 Future attempts to access the revision data for this node will
709 result in failure.
719 result in failure.
710
720
711 A ``tombstone`` message can optionally be stored. This message may be
721 A ``tombstone`` message can optionally be stored. This message may be
712 displayed to users when they attempt to access the missing revision
722 displayed to users when they attempt to access the missing revision
713 data.
723 data.
714
724
715 Storage backends may have stored deltas against the previous content
725 Storage backends may have stored deltas against the previous content
716 in this revision. As part of censoring a revision, these storage
726 in this revision. As part of censoring a revision, these storage
717 backends are expected to rewrite any internally stored deltas such
727 backends are expected to rewrite any internally stored deltas such
718 that they no longer reference the deleted content.
728 that they no longer reference the deleted content.
719 """
729 """
720
730
721 def getstrippoint(minlink):
731 def getstrippoint(minlink):
722 """Find the minimum revision that must be stripped to strip a linkrev.
732 """Find the minimum revision that must be stripped to strip a linkrev.
723
733
724 Returns a 2-tuple containing the minimum revision number and a set
734 Returns a 2-tuple containing the minimum revision number and a set
725 of all revisions numbers that would be broken by this strip.
735 of all revisions numbers that would be broken by this strip.
726
736
727 TODO this is highly revlog centric and should be abstracted into
737 TODO this is highly revlog centric and should be abstracted into
728 a higher-level deletion API. ``repair.strip()`` relies on this.
738 a higher-level deletion API. ``repair.strip()`` relies on this.
729 """
739 """
730
740
731 def strip(minlink, transaction):
741 def strip(minlink, transaction):
732 """Remove storage of items starting at a linkrev.
742 """Remove storage of items starting at a linkrev.
733
743
734 This uses ``getstrippoint()`` to determine the first node to remove.
744 This uses ``getstrippoint()`` to determine the first node to remove.
735 Then it effectively truncates storage for all revisions after that.
745 Then it effectively truncates storage for all revisions after that.
736
746
737 TODO this is highly revlog centric and should be abstracted into a
747 TODO this is highly revlog centric and should be abstracted into a
738 higher-level deletion API.
748 higher-level deletion API.
739 """
749 """
740
750
741 class ifilestorage(ifileindex, ifiledata, ifilemutation):
751 class ifilestorage(ifileindex, ifiledata, ifilemutation):
742 """Complete storage interface for a single tracked file."""
752 """Complete storage interface for a single tracked file."""
743
753
744 def files():
754 def files():
745 """Obtain paths that are backing storage for this file.
755 """Obtain paths that are backing storage for this file.
746
756
747 TODO this is used heavily by verify code and there should probably
757 TODO this is used heavily by verify code and there should probably
748 be a better API for that.
758 be a better API for that.
749 """
759 """
750
760
751 def storageinfo(exclusivefiles=False, sharedfiles=False,
761 def storageinfo(exclusivefiles=False, sharedfiles=False,
752 revisionscount=False, trackedsize=False,
762 revisionscount=False, trackedsize=False,
753 storedsize=False):
763 storedsize=False):
754 """Obtain information about storage for this file's data.
764 """Obtain information about storage for this file's data.
755
765
756 Returns a dict describing storage for this tracked path. The keys
766 Returns a dict describing storage for this tracked path. The keys
757 in the dict map to arguments of the same. The arguments are bools
767 in the dict map to arguments of the same. The arguments are bools
758 indicating whether to calculate and obtain that data.
768 indicating whether to calculate and obtain that data.
759
769
760 exclusivefiles
770 exclusivefiles
761 Iterable of (vfs, path) describing files that are exclusively
771 Iterable of (vfs, path) describing files that are exclusively
762 used to back storage for this tracked path.
772 used to back storage for this tracked path.
763
773
764 sharedfiles
774 sharedfiles
765 Iterable of (vfs, path) describing files that are used to back
775 Iterable of (vfs, path) describing files that are used to back
766 storage for this tracked path. Those files may also provide storage
776 storage for this tracked path. Those files may also provide storage
767 for other stored entities.
777 for other stored entities.
768
778
769 revisionscount
779 revisionscount
770 Number of revisions available for retrieval.
780 Number of revisions available for retrieval.
771
781
772 trackedsize
782 trackedsize
773 Total size in bytes of all tracked revisions. This is a sum of the
783 Total size in bytes of all tracked revisions. This is a sum of the
774 length of the fulltext of all revisions.
784 length of the fulltext of all revisions.
775
785
776 storedsize
786 storedsize
777 Total size in bytes used to store data for all tracked revisions.
787 Total size in bytes used to store data for all tracked revisions.
778 This is commonly less than ``trackedsize`` due to internal usage
788 This is commonly less than ``trackedsize`` due to internal usage
779 of deltas rather than fulltext revisions.
789 of deltas rather than fulltext revisions.
780
790
781 Not all storage backends may support all queries are have a reasonable
791 Not all storage backends may support all queries are have a reasonable
782 value to use. In that case, the value should be set to ``None`` and
792 value to use. In that case, the value should be set to ``None`` and
783 callers are expected to handle this special value.
793 callers are expected to handle this special value.
784 """
794 """
785
795
786 def verifyintegrity(state):
796 def verifyintegrity(state):
787 """Verifies the integrity of file storage.
797 """Verifies the integrity of file storage.
788
798
789 ``state`` is a dict holding state of the verifier process. It can be
799 ``state`` is a dict holding state of the verifier process. It can be
790 used to communicate data between invocations of multiple storage
800 used to communicate data between invocations of multiple storage
791 primitives.
801 primitives.
792
802
793 If individual revisions cannot have their revision content resolved,
803 If individual revisions cannot have their revision content resolved,
794 the method is expected to set the ``skipread`` key to a set of nodes
804 the method is expected to set the ``skipread`` key to a set of nodes
795 that encountered problems.
805 that encountered problems.
796
806
797 The method yields objects conforming to the ``iverifyproblem``
807 The method yields objects conforming to the ``iverifyproblem``
798 interface.
808 interface.
799 """
809 """
800
810
801 class idirs(interfaceutil.Interface):
811 class idirs(interfaceutil.Interface):
802 """Interface representing a collection of directories from paths.
812 """Interface representing a collection of directories from paths.
803
813
804 This interface is essentially a derived data structure representing
814 This interface is essentially a derived data structure representing
805 directories from a collection of paths.
815 directories from a collection of paths.
806 """
816 """
807
817
808 def addpath(path):
818 def addpath(path):
809 """Add a path to the collection.
819 """Add a path to the collection.
810
820
811 All directories in the path will be added to the collection.
821 All directories in the path will be added to the collection.
812 """
822 """
813
823
814 def delpath(path):
824 def delpath(path):
815 """Remove a path from the collection.
825 """Remove a path from the collection.
816
826
817 If the removal was the last path in a particular directory, the
827 If the removal was the last path in a particular directory, the
818 directory is removed from the collection.
828 directory is removed from the collection.
819 """
829 """
820
830
821 def __iter__():
831 def __iter__():
822 """Iterate over the directories in this collection of paths."""
832 """Iterate over the directories in this collection of paths."""
823
833
824 def __contains__(path):
834 def __contains__(path):
825 """Whether a specific directory is in this collection."""
835 """Whether a specific directory is in this collection."""
826
836
827 class imanifestdict(interfaceutil.Interface):
837 class imanifestdict(interfaceutil.Interface):
828 """Interface representing a manifest data structure.
838 """Interface representing a manifest data structure.
829
839
830 A manifest is effectively a dict mapping paths to entries. Each entry
840 A manifest is effectively a dict mapping paths to entries. Each entry
831 consists of a binary node and extra flags affecting that entry.
841 consists of a binary node and extra flags affecting that entry.
832 """
842 """
833
843
834 def __getitem__(path):
844 def __getitem__(path):
835 """Returns the binary node value for a path in the manifest.
845 """Returns the binary node value for a path in the manifest.
836
846
837 Raises ``KeyError`` if the path does not exist in the manifest.
847 Raises ``KeyError`` if the path does not exist in the manifest.
838
848
839 Equivalent to ``self.find(path)[0]``.
849 Equivalent to ``self.find(path)[0]``.
840 """
850 """
841
851
842 def find(path):
852 def find(path):
843 """Returns the entry for a path in the manifest.
853 """Returns the entry for a path in the manifest.
844
854
845 Returns a 2-tuple of (node, flags).
855 Returns a 2-tuple of (node, flags).
846
856
847 Raises ``KeyError`` if the path does not exist in the manifest.
857 Raises ``KeyError`` if the path does not exist in the manifest.
848 """
858 """
849
859
850 def __len__():
860 def __len__():
851 """Return the number of entries in the manifest."""
861 """Return the number of entries in the manifest."""
852
862
853 def __nonzero__():
863 def __nonzero__():
854 """Returns True if the manifest has entries, False otherwise."""
864 """Returns True if the manifest has entries, False otherwise."""
855
865
856 __bool__ = __nonzero__
866 __bool__ = __nonzero__
857
867
858 def __setitem__(path, node):
868 def __setitem__(path, node):
859 """Define the node value for a path in the manifest.
869 """Define the node value for a path in the manifest.
860
870
861 If the path is already in the manifest, its flags will be copied to
871 If the path is already in the manifest, its flags will be copied to
862 the new entry.
872 the new entry.
863 """
873 """
864
874
865 def __contains__(path):
875 def __contains__(path):
866 """Whether a path exists in the manifest."""
876 """Whether a path exists in the manifest."""
867
877
868 def __delitem__(path):
878 def __delitem__(path):
869 """Remove a path from the manifest.
879 """Remove a path from the manifest.
870
880
871 Raises ``KeyError`` if the path is not in the manifest.
881 Raises ``KeyError`` if the path is not in the manifest.
872 """
882 """
873
883
874 def __iter__():
884 def __iter__():
875 """Iterate over paths in the manifest."""
885 """Iterate over paths in the manifest."""
876
886
877 def iterkeys():
887 def iterkeys():
878 """Iterate over paths in the manifest."""
888 """Iterate over paths in the manifest."""
879
889
880 def keys():
890 def keys():
881 """Obtain a list of paths in the manifest."""
891 """Obtain a list of paths in the manifest."""
882
892
883 def filesnotin(other, match=None):
893 def filesnotin(other, match=None):
884 """Obtain the set of paths in this manifest but not in another.
894 """Obtain the set of paths in this manifest but not in another.
885
895
886 ``match`` is an optional matcher function to be applied to both
896 ``match`` is an optional matcher function to be applied to both
887 manifests.
897 manifests.
888
898
889 Returns a set of paths.
899 Returns a set of paths.
890 """
900 """
891
901
892 def dirs():
902 def dirs():
893 """Returns an object implementing the ``idirs`` interface."""
903 """Returns an object implementing the ``idirs`` interface."""
894
904
895 def hasdir(dir):
905 def hasdir(dir):
896 """Returns a bool indicating if a directory is in this manifest."""
906 """Returns a bool indicating if a directory is in this manifest."""
897
907
898 def matches(match):
908 def matches(match):
899 """Generate a new manifest filtered through a matcher.
909 """Generate a new manifest filtered through a matcher.
900
910
901 Returns an object conforming to the ``imanifestdict`` interface.
911 Returns an object conforming to the ``imanifestdict`` interface.
902 """
912 """
903
913
904 def walk(match):
914 def walk(match):
905 """Generator of paths in manifest satisfying a matcher.
915 """Generator of paths in manifest satisfying a matcher.
906
916
907 This is equivalent to ``self.matches(match).iterkeys()`` except a new
917 This is equivalent to ``self.matches(match).iterkeys()`` except a new
908 manifest object is not created.
918 manifest object is not created.
909
919
910 If the matcher has explicit files listed and they don't exist in
920 If the matcher has explicit files listed and they don't exist in
911 the manifest, ``match.bad()`` is called for each missing file.
921 the manifest, ``match.bad()`` is called for each missing file.
912 """
922 """
913
923
914 def diff(other, match=None, clean=False):
924 def diff(other, match=None, clean=False):
915 """Find differences between this manifest and another.
925 """Find differences between this manifest and another.
916
926
917 This manifest is compared to ``other``.
927 This manifest is compared to ``other``.
918
928
919 If ``match`` is provided, the two manifests are filtered against this
929 If ``match`` is provided, the two manifests are filtered against this
920 matcher and only entries satisfying the matcher are compared.
930 matcher and only entries satisfying the matcher are compared.
921
931
922 If ``clean`` is True, unchanged files are included in the returned
932 If ``clean`` is True, unchanged files are included in the returned
923 object.
933 object.
924
934
925 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
935 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
926 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
936 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
927 represents the node and flags for this manifest and ``(node2, flag2)``
937 represents the node and flags for this manifest and ``(node2, flag2)``
928 are the same for the other manifest.
938 are the same for the other manifest.
929 """
939 """
930
940
931 def setflag(path, flag):
941 def setflag(path, flag):
932 """Set the flag value for a given path.
942 """Set the flag value for a given path.
933
943
934 Raises ``KeyError`` if the path is not already in the manifest.
944 Raises ``KeyError`` if the path is not already in the manifest.
935 """
945 """
936
946
937 def get(path, default=None):
947 def get(path, default=None):
938 """Obtain the node value for a path or a default value if missing."""
948 """Obtain the node value for a path or a default value if missing."""
939
949
940 def flags(path, default=''):
950 def flags(path, default=''):
941 """Return the flags value for a path or a default value if missing."""
951 """Return the flags value for a path or a default value if missing."""
942
952
943 def copy():
953 def copy():
944 """Return a copy of this manifest."""
954 """Return a copy of this manifest."""
945
955
946 def items():
956 def items():
947 """Returns an iterable of (path, node) for items in this manifest."""
957 """Returns an iterable of (path, node) for items in this manifest."""
948
958
949 def iteritems():
959 def iteritems():
950 """Identical to items()."""
960 """Identical to items()."""
951
961
952 def iterentries():
962 def iterentries():
953 """Returns an iterable of (path, node, flags) for this manifest.
963 """Returns an iterable of (path, node, flags) for this manifest.
954
964
955 Similar to ``iteritems()`` except items are a 3-tuple and include
965 Similar to ``iteritems()`` except items are a 3-tuple and include
956 flags.
966 flags.
957 """
967 """
958
968
959 def text():
969 def text():
960 """Obtain the raw data representation for this manifest.
970 """Obtain the raw data representation for this manifest.
961
971
962 Result is used to create a manifest revision.
972 Result is used to create a manifest revision.
963 """
973 """
964
974
965 def fastdelta(base, changes):
975 def fastdelta(base, changes):
966 """Obtain a delta between this manifest and another given changes.
976 """Obtain a delta between this manifest and another given changes.
967
977
968 ``base`` in the raw data representation for another manifest.
978 ``base`` in the raw data representation for another manifest.
969
979
970 ``changes`` is an iterable of ``(path, to_delete)``.
980 ``changes`` is an iterable of ``(path, to_delete)``.
971
981
972 Returns a 2-tuple containing ``bytearray(self.text())`` and the
982 Returns a 2-tuple containing ``bytearray(self.text())`` and the
973 delta between ``base`` and this manifest.
983 delta between ``base`` and this manifest.
974 """
984 """
975
985
976 class imanifestrevisionbase(interfaceutil.Interface):
986 class imanifestrevisionbase(interfaceutil.Interface):
977 """Base interface representing a single revision of a manifest.
987 """Base interface representing a single revision of a manifest.
978
988
979 Should not be used as a primary interface: should always be inherited
989 Should not be used as a primary interface: should always be inherited
980 as part of a larger interface.
990 as part of a larger interface.
981 """
991 """
982
992
983 def new():
993 def new():
984 """Obtain a new manifest instance.
994 """Obtain a new manifest instance.
985
995
986 Returns an object conforming to the ``imanifestrevisionwritable``
996 Returns an object conforming to the ``imanifestrevisionwritable``
987 interface. The instance will be associated with the same
997 interface. The instance will be associated with the same
988 ``imanifestlog`` collection as this instance.
998 ``imanifestlog`` collection as this instance.
989 """
999 """
990
1000
991 def copy():
1001 def copy():
992 """Obtain a copy of this manifest instance.
1002 """Obtain a copy of this manifest instance.
993
1003
994 Returns an object conforming to the ``imanifestrevisionwritable``
1004 Returns an object conforming to the ``imanifestrevisionwritable``
995 interface. The instance will be associated with the same
1005 interface. The instance will be associated with the same
996 ``imanifestlog`` collection as this instance.
1006 ``imanifestlog`` collection as this instance.
997 """
1007 """
998
1008
999 def read():
1009 def read():
1000 """Obtain the parsed manifest data structure.
1010 """Obtain the parsed manifest data structure.
1001
1011
1002 The returned object conforms to the ``imanifestdict`` interface.
1012 The returned object conforms to the ``imanifestdict`` interface.
1003 """
1013 """
1004
1014
1005 class imanifestrevisionstored(imanifestrevisionbase):
1015 class imanifestrevisionstored(imanifestrevisionbase):
1006 """Interface representing a manifest revision committed to storage."""
1016 """Interface representing a manifest revision committed to storage."""
1007
1017
1008 def node():
1018 def node():
1009 """The binary node for this manifest."""
1019 """The binary node for this manifest."""
1010
1020
1011 parents = interfaceutil.Attribute(
1021 parents = interfaceutil.Attribute(
1012 """List of binary nodes that are parents for this manifest revision."""
1022 """List of binary nodes that are parents for this manifest revision."""
1013 )
1023 )
1014
1024
1015 def readdelta(shallow=False):
1025 def readdelta(shallow=False):
1016 """Obtain the manifest data structure representing changes from parent.
1026 """Obtain the manifest data structure representing changes from parent.
1017
1027
1018 This manifest is compared to its 1st parent. A new manifest representing
1028 This manifest is compared to its 1st parent. A new manifest representing
1019 those differences is constructed.
1029 those differences is constructed.
1020
1030
1021 The returned object conforms to the ``imanifestdict`` interface.
1031 The returned object conforms to the ``imanifestdict`` interface.
1022 """
1032 """
1023
1033
1024 def readfast(shallow=False):
1034 def readfast(shallow=False):
1025 """Calls either ``read()`` or ``readdelta()``.
1035 """Calls either ``read()`` or ``readdelta()``.
1026
1036
1027 The faster of the two options is called.
1037 The faster of the two options is called.
1028 """
1038 """
1029
1039
1030 def find(key):
1040 def find(key):
1031 """Calls self.read().find(key)``.
1041 """Calls self.read().find(key)``.
1032
1042
1033 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1043 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1034 """
1044 """
1035
1045
1036 class imanifestrevisionwritable(imanifestrevisionbase):
1046 class imanifestrevisionwritable(imanifestrevisionbase):
1037 """Interface representing a manifest revision that can be committed."""
1047 """Interface representing a manifest revision that can be committed."""
1038
1048
1039 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1049 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1040 """Add this revision to storage.
1050 """Add this revision to storage.
1041
1051
1042 Takes a transaction object, the changeset revision number it will
1052 Takes a transaction object, the changeset revision number it will
1043 be associated with, its parent nodes, and lists of added and
1053 be associated with, its parent nodes, and lists of added and
1044 removed paths.
1054 removed paths.
1045
1055
1046 If match is provided, storage can choose not to inspect or write out
1056 If match is provided, storage can choose not to inspect or write out
1047 items that do not match. Storage is still required to be able to provide
1057 items that do not match. Storage is still required to be able to provide
1048 the full manifest in the future for any directories written (these
1058 the full manifest in the future for any directories written (these
1049 manifests should not be "narrowed on disk").
1059 manifests should not be "narrowed on disk").
1050
1060
1051 Returns the binary node of the created revision.
1061 Returns the binary node of the created revision.
1052 """
1062 """
1053
1063
1054 class imanifeststorage(interfaceutil.Interface):
1064 class imanifeststorage(interfaceutil.Interface):
1055 """Storage interface for manifest data."""
1065 """Storage interface for manifest data."""
1056
1066
1057 tree = interfaceutil.Attribute(
1067 tree = interfaceutil.Attribute(
1058 """The path to the directory this manifest tracks.
1068 """The path to the directory this manifest tracks.
1059
1069
1060 The empty bytestring represents the root manifest.
1070 The empty bytestring represents the root manifest.
1061 """)
1071 """)
1062
1072
1063 index = interfaceutil.Attribute(
1073 index = interfaceutil.Attribute(
1064 """An ``ifilerevisionssequence`` instance.""")
1074 """An ``ifilerevisionssequence`` instance.""")
1065
1075
1066 indexfile = interfaceutil.Attribute(
1076 indexfile = interfaceutil.Attribute(
1067 """Path of revlog index file.
1077 """Path of revlog index file.
1068
1078
1069 TODO this is revlog specific and should not be exposed.
1079 TODO this is revlog specific and should not be exposed.
1070 """)
1080 """)
1071
1081
1072 opener = interfaceutil.Attribute(
1082 opener = interfaceutil.Attribute(
1073 """VFS opener to use to access underlying files used for storage.
1083 """VFS opener to use to access underlying files used for storage.
1074
1084
1075 TODO this is revlog specific and should not be exposed.
1085 TODO this is revlog specific and should not be exposed.
1076 """)
1086 """)
1077
1087
1078 version = interfaceutil.Attribute(
1088 version = interfaceutil.Attribute(
1079 """Revlog version number.
1089 """Revlog version number.
1080
1090
1081 TODO this is revlog specific and should not be exposed.
1091 TODO this is revlog specific and should not be exposed.
1082 """)
1092 """)
1083
1093
1084 _generaldelta = interfaceutil.Attribute(
1094 _generaldelta = interfaceutil.Attribute(
1085 """Whether generaldelta storage is being used.
1095 """Whether generaldelta storage is being used.
1086
1096
1087 TODO this is revlog specific and should not be exposed.
1097 TODO this is revlog specific and should not be exposed.
1088 """)
1098 """)
1089
1099
1090 fulltextcache = interfaceutil.Attribute(
1100 fulltextcache = interfaceutil.Attribute(
1091 """Dict with cache of fulltexts.
1101 """Dict with cache of fulltexts.
1092
1102
1093 TODO this doesn't feel appropriate for the storage interface.
1103 TODO this doesn't feel appropriate for the storage interface.
1094 """)
1104 """)
1095
1105
1096 def __len__():
1106 def __len__():
1097 """Obtain the number of revisions stored for this manifest."""
1107 """Obtain the number of revisions stored for this manifest."""
1098
1108
1099 def __iter__():
1109 def __iter__():
1100 """Iterate over revision numbers for this manifest."""
1110 """Iterate over revision numbers for this manifest."""
1101
1111
1102 def rev(node):
1112 def rev(node):
1103 """Obtain the revision number given a binary node.
1113 """Obtain the revision number given a binary node.
1104
1114
1105 Raises ``error.LookupError`` if the node is not known.
1115 Raises ``error.LookupError`` if the node is not known.
1106 """
1116 """
1107
1117
1108 def node(rev):
1118 def node(rev):
1109 """Obtain the node value given a revision number.
1119 """Obtain the node value given a revision number.
1110
1120
1111 Raises ``error.LookupError`` if the revision is not known.
1121 Raises ``error.LookupError`` if the revision is not known.
1112 """
1122 """
1113
1123
1114 def lookup(value):
1124 def lookup(value):
1115 """Attempt to resolve a value to a node.
1125 """Attempt to resolve a value to a node.
1116
1126
1117 Value can be a binary node, hex node, revision number, or a bytes
1127 Value can be a binary node, hex node, revision number, or a bytes
1118 that can be converted to an integer.
1128 that can be converted to an integer.
1119
1129
1120 Raises ``error.LookupError`` if a ndoe could not be resolved.
1130 Raises ``error.LookupError`` if a ndoe could not be resolved.
1121 """
1131 """
1122
1132
1123 def parents(node):
1133 def parents(node):
1124 """Returns a 2-tuple of parent nodes for a node.
1134 """Returns a 2-tuple of parent nodes for a node.
1125
1135
1126 Values will be ``nullid`` if the parent is empty.
1136 Values will be ``nullid`` if the parent is empty.
1127 """
1137 """
1128
1138
1129 def parentrevs(rev):
1139 def parentrevs(rev):
1130 """Like parents() but operates on revision numbers."""
1140 """Like parents() but operates on revision numbers."""
1131
1141
1132 def linkrev(rev):
1142 def linkrev(rev):
1133 """Obtain the changeset revision number a revision is linked to."""
1143 """Obtain the changeset revision number a revision is linked to."""
1134
1144
1135 def revision(node, _df=None, raw=False):
1145 def revision(node, _df=None, raw=False):
1136 """Obtain fulltext data for a node."""
1146 """Obtain fulltext data for a node."""
1137
1147
1138 def revdiff(rev1, rev2):
1148 def revdiff(rev1, rev2):
1139 """Obtain a delta between two revision numbers.
1149 """Obtain a delta between two revision numbers.
1140
1150
1141 The returned data is the result of ``bdiff.bdiff()`` on the raw
1151 The returned data is the result of ``bdiff.bdiff()`` on the raw
1142 revision data.
1152 revision data.
1143 """
1153 """
1144
1154
1145 def cmp(node, fulltext):
1155 def cmp(node, fulltext):
1146 """Compare fulltext to another revision.
1156 """Compare fulltext to another revision.
1147
1157
1148 Returns True if the fulltext is different from what is stored.
1158 Returns True if the fulltext is different from what is stored.
1149 """
1159 """
1150
1160
1151 def emitrevisions(nodes,
1161 def emitrevisions(nodes,
1152 nodesorder=None,
1162 nodesorder=None,
1153 revisiondata=False,
1163 revisiondata=False,
1154 assumehaveparentrevisions=False):
1164 assumehaveparentrevisions=False):
1155 """Produce ``irevisiondelta`` describing revisions.
1165 """Produce ``irevisiondelta`` describing revisions.
1156
1166
1157 See the documentation for ``ifiledata`` for more.
1167 See the documentation for ``ifiledata`` for more.
1158 """
1168 """
1159
1169
1160 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1170 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1161 """Process a series of deltas for storage.
1171 """Process a series of deltas for storage.
1162
1172
1163 See the documentation in ``ifilemutation`` for more.
1173 See the documentation in ``ifilemutation`` for more.
1164 """
1174 """
1165
1175
1166 def rawsize(rev):
1176 def rawsize(rev):
1167 """Obtain the size of tracked data.
1177 """Obtain the size of tracked data.
1168
1178
1169 Is equivalent to ``len(m.revision(node, raw=True))``.
1179 Is equivalent to ``len(m.revision(node, raw=True))``.
1170
1180
1171 TODO this method is only used by upgrade code and may be removed.
1181 TODO this method is only used by upgrade code and may be removed.
1172 """
1182 """
1173
1183
1174 def getstrippoint(minlink):
1184 def getstrippoint(minlink):
1175 """Find minimum revision that must be stripped to strip a linkrev.
1185 """Find minimum revision that must be stripped to strip a linkrev.
1176
1186
1177 See the documentation in ``ifilemutation`` for more.
1187 See the documentation in ``ifilemutation`` for more.
1178 """
1188 """
1179
1189
1180 def strip(minlink, transaction):
1190 def strip(minlink, transaction):
1181 """Remove storage of items starting at a linkrev.
1191 """Remove storage of items starting at a linkrev.
1182
1192
1183 See the documentation in ``ifilemutation`` for more.
1193 See the documentation in ``ifilemutation`` for more.
1184 """
1194 """
1185
1195
1186 def checksize():
1196 def checksize():
1187 """Obtain the expected sizes of backing files.
1197 """Obtain the expected sizes of backing files.
1188
1198
1189 TODO this is used by verify and it should not be part of the interface.
1199 TODO this is used by verify and it should not be part of the interface.
1190 """
1200 """
1191
1201
1192 def files():
1202 def files():
1193 """Obtain paths that are backing storage for this manifest.
1203 """Obtain paths that are backing storage for this manifest.
1194
1204
1195 TODO this is used by verify and there should probably be a better API
1205 TODO this is used by verify and there should probably be a better API
1196 for this functionality.
1206 for this functionality.
1197 """
1207 """
1198
1208
1199 def deltaparent(rev):
1209 def deltaparent(rev):
1200 """Obtain the revision that a revision is delta'd against.
1210 """Obtain the revision that a revision is delta'd against.
1201
1211
1202 TODO delta encoding is an implementation detail of storage and should
1212 TODO delta encoding is an implementation detail of storage and should
1203 not be exposed to the storage interface.
1213 not be exposed to the storage interface.
1204 """
1214 """
1205
1215
1206 def clone(tr, dest, **kwargs):
1216 def clone(tr, dest, **kwargs):
1207 """Clone this instance to another."""
1217 """Clone this instance to another."""
1208
1218
1209 def clearcaches(clear_persisted_data=False):
1219 def clearcaches(clear_persisted_data=False):
1210 """Clear any caches associated with this instance."""
1220 """Clear any caches associated with this instance."""
1211
1221
1212 def dirlog(d):
1222 def dirlog(d):
1213 """Obtain a manifest storage instance for a tree."""
1223 """Obtain a manifest storage instance for a tree."""
1214
1224
1215 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1225 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1216 match=None):
1226 match=None):
1217 """Add a revision to storage.
1227 """Add a revision to storage.
1218
1228
1219 ``m`` is an object conforming to ``imanifestdict``.
1229 ``m`` is an object conforming to ``imanifestdict``.
1220
1230
1221 ``link`` is the linkrev revision number.
1231 ``link`` is the linkrev revision number.
1222
1232
1223 ``p1`` and ``p2`` are the parent revision numbers.
1233 ``p1`` and ``p2`` are the parent revision numbers.
1224
1234
1225 ``added`` and ``removed`` are iterables of added and removed paths,
1235 ``added`` and ``removed`` are iterables of added and removed paths,
1226 respectively.
1236 respectively.
1227
1237
1228 ``readtree`` is a function that can be used to read the child tree(s)
1238 ``readtree`` is a function that can be used to read the child tree(s)
1229 when recursively writing the full tree structure when using
1239 when recursively writing the full tree structure when using
1230 treemanifets.
1240 treemanifets.
1231
1241
1232 ``match`` is a matcher that can be used to hint to storage that not all
1242 ``match`` is a matcher that can be used to hint to storage that not all
1233 paths must be inspected; this is an optimization and can be safely
1243 paths must be inspected; this is an optimization and can be safely
1234 ignored. Note that the storage must still be able to reproduce a full
1244 ignored. Note that the storage must still be able to reproduce a full
1235 manifest including files that did not match.
1245 manifest including files that did not match.
1236 """
1246 """
1237
1247
1238 def storageinfo(exclusivefiles=False, sharedfiles=False,
1248 def storageinfo(exclusivefiles=False, sharedfiles=False,
1239 revisionscount=False, trackedsize=False,
1249 revisionscount=False, trackedsize=False,
1240 storedsize=False):
1250 storedsize=False):
1241 """Obtain information about storage for this manifest's data.
1251 """Obtain information about storage for this manifest's data.
1242
1252
1243 See ``ifilestorage.storageinfo()`` for a description of this method.
1253 See ``ifilestorage.storageinfo()`` for a description of this method.
1244 This one behaves the same way, except for manifest data.
1254 This one behaves the same way, except for manifest data.
1245 """
1255 """
1246
1256
1247 class imanifestlog(interfaceutil.Interface):
1257 class imanifestlog(interfaceutil.Interface):
1248 """Interface representing a collection of manifest snapshots.
1258 """Interface representing a collection of manifest snapshots.
1249
1259
1250 Represents the root manifest in a repository.
1260 Represents the root manifest in a repository.
1251
1261
1252 Also serves as a means to access nested tree manifests and to cache
1262 Also serves as a means to access nested tree manifests and to cache
1253 tree manifests.
1263 tree manifests.
1254 """
1264 """
1255
1265
1256 def __getitem__(node):
1266 def __getitem__(node):
1257 """Obtain a manifest instance for a given binary node.
1267 """Obtain a manifest instance for a given binary node.
1258
1268
1259 Equivalent to calling ``self.get('', node)``.
1269 Equivalent to calling ``self.get('', node)``.
1260
1270
1261 The returned object conforms to the ``imanifestrevisionstored``
1271 The returned object conforms to the ``imanifestrevisionstored``
1262 interface.
1272 interface.
1263 """
1273 """
1264
1274
1265 def get(tree, node, verify=True):
1275 def get(tree, node, verify=True):
1266 """Retrieve the manifest instance for a given directory and binary node.
1276 """Retrieve the manifest instance for a given directory and binary node.
1267
1277
1268 ``node`` always refers to the node of the root manifest (which will be
1278 ``node`` always refers to the node of the root manifest (which will be
1269 the only manifest if flat manifests are being used).
1279 the only manifest if flat manifests are being used).
1270
1280
1271 If ``tree`` is the empty string, the root manifest is returned.
1281 If ``tree`` is the empty string, the root manifest is returned.
1272 Otherwise the manifest for the specified directory will be returned
1282 Otherwise the manifest for the specified directory will be returned
1273 (requires tree manifests).
1283 (requires tree manifests).
1274
1284
1275 If ``verify`` is True, ``LookupError`` is raised if the node is not
1285 If ``verify`` is True, ``LookupError`` is raised if the node is not
1276 known.
1286 known.
1277
1287
1278 The returned object conforms to the ``imanifestrevisionstored``
1288 The returned object conforms to the ``imanifestrevisionstored``
1279 interface.
1289 interface.
1280 """
1290 """
1281
1291
1282 def getstorage(tree):
1292 def getstorage(tree):
1283 """Retrieve an interface to storage for a particular tree.
1293 """Retrieve an interface to storage for a particular tree.
1284
1294
1285 If ``tree`` is the empty bytestring, storage for the root manifest will
1295 If ``tree`` is the empty bytestring, storage for the root manifest will
1286 be returned. Otherwise storage for a tree manifest is returned.
1296 be returned. Otherwise storage for a tree manifest is returned.
1287
1297
1288 TODO formalize interface for returned object.
1298 TODO formalize interface for returned object.
1289 """
1299 """
1290
1300
1291 def clearcaches():
1301 def clearcaches():
1292 """Clear caches associated with this collection."""
1302 """Clear caches associated with this collection."""
1293
1303
1294 def rev(node):
1304 def rev(node):
1295 """Obtain the revision number for a binary node.
1305 """Obtain the revision number for a binary node.
1296
1306
1297 Raises ``error.LookupError`` if the node is not known.
1307 Raises ``error.LookupError`` if the node is not known.
1298 """
1308 """
1299
1309
1300 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1310 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1301 """Local repository sub-interface providing access to tracked file storage.
1311 """Local repository sub-interface providing access to tracked file storage.
1302
1312
1303 This interface defines how a repository accesses storage for a single
1313 This interface defines how a repository accesses storage for a single
1304 tracked file path.
1314 tracked file path.
1305 """
1315 """
1306
1316
1307 def file(f):
1317 def file(f):
1308 """Obtain a filelog for a tracked path.
1318 """Obtain a filelog for a tracked path.
1309
1319
1310 The returned type conforms to the ``ifilestorage`` interface.
1320 The returned type conforms to the ``ifilestorage`` interface.
1311 """
1321 """
1312
1322
1313 class ilocalrepositorymain(interfaceutil.Interface):
1323 class ilocalrepositorymain(interfaceutil.Interface):
1314 """Main interface for local repositories.
1324 """Main interface for local repositories.
1315
1325
1316 This currently captures the reality of things - not how things should be.
1326 This currently captures the reality of things - not how things should be.
1317 """
1327 """
1318
1328
1319 supportedformats = interfaceutil.Attribute(
1329 supportedformats = interfaceutil.Attribute(
1320 """Set of requirements that apply to stream clone.
1330 """Set of requirements that apply to stream clone.
1321
1331
1322 This is actually a class attribute and is shared among all instances.
1332 This is actually a class attribute and is shared among all instances.
1323 """)
1333 """)
1324
1334
1325 supported = interfaceutil.Attribute(
1335 supported = interfaceutil.Attribute(
1326 """Set of requirements that this repo is capable of opening.""")
1336 """Set of requirements that this repo is capable of opening.""")
1327
1337
1328 requirements = interfaceutil.Attribute(
1338 requirements = interfaceutil.Attribute(
1329 """Set of requirements this repo uses.""")
1339 """Set of requirements this repo uses.""")
1330
1340
1331 features = interfaceutil.Attribute(
1341 features = interfaceutil.Attribute(
1332 """Set of "features" this repository supports.
1342 """Set of "features" this repository supports.
1333
1343
1334 A "feature" is a loosely-defined term. It can refer to a feature
1344 A "feature" is a loosely-defined term. It can refer to a feature
1335 in the classical sense or can describe an implementation detail
1345 in the classical sense or can describe an implementation detail
1336 of the repository. For example, a ``readonly`` feature may denote
1346 of the repository. For example, a ``readonly`` feature may denote
1337 the repository as read-only. Or a ``revlogfilestore`` feature may
1347 the repository as read-only. Or a ``revlogfilestore`` feature may
1338 denote that the repository is using revlogs for file storage.
1348 denote that the repository is using revlogs for file storage.
1339
1349
1340 The intent of features is to provide a machine-queryable mechanism
1350 The intent of features is to provide a machine-queryable mechanism
1341 for repo consumers to test for various repository characteristics.
1351 for repo consumers to test for various repository characteristics.
1342
1352
1343 Features are similar to ``requirements``. The main difference is that
1353 Features are similar to ``requirements``. The main difference is that
1344 requirements are stored on-disk and represent requirements to open the
1354 requirements are stored on-disk and represent requirements to open the
1345 repository. Features are more run-time capabilities of the repository
1355 repository. Features are more run-time capabilities of the repository
1346 and more granular capabilities (which may be derived from requirements).
1356 and more granular capabilities (which may be derived from requirements).
1347 """)
1357 """)
1348
1358
1349 filtername = interfaceutil.Attribute(
1359 filtername = interfaceutil.Attribute(
1350 """Name of the repoview that is active on this repo.""")
1360 """Name of the repoview that is active on this repo.""")
1351
1361
1352 wvfs = interfaceutil.Attribute(
1362 wvfs = interfaceutil.Attribute(
1353 """VFS used to access the working directory.""")
1363 """VFS used to access the working directory.""")
1354
1364
1355 vfs = interfaceutil.Attribute(
1365 vfs = interfaceutil.Attribute(
1356 """VFS rooted at the .hg directory.
1366 """VFS rooted at the .hg directory.
1357
1367
1358 Used to access repository data not in the store.
1368 Used to access repository data not in the store.
1359 """)
1369 """)
1360
1370
1361 svfs = interfaceutil.Attribute(
1371 svfs = interfaceutil.Attribute(
1362 """VFS rooted at the store.
1372 """VFS rooted at the store.
1363
1373
1364 Used to access repository data in the store. Typically .hg/store.
1374 Used to access repository data in the store. Typically .hg/store.
1365 But can point elsewhere if the store is shared.
1375 But can point elsewhere if the store is shared.
1366 """)
1376 """)
1367
1377
1368 root = interfaceutil.Attribute(
1378 root = interfaceutil.Attribute(
1369 """Path to the root of the working directory.""")
1379 """Path to the root of the working directory.""")
1370
1380
1371 path = interfaceutil.Attribute(
1381 path = interfaceutil.Attribute(
1372 """Path to the .hg directory.""")
1382 """Path to the .hg directory.""")
1373
1383
1374 origroot = interfaceutil.Attribute(
1384 origroot = interfaceutil.Attribute(
1375 """The filesystem path that was used to construct the repo.""")
1385 """The filesystem path that was used to construct the repo.""")
1376
1386
1377 auditor = interfaceutil.Attribute(
1387 auditor = interfaceutil.Attribute(
1378 """A pathauditor for the working directory.
1388 """A pathauditor for the working directory.
1379
1389
1380 This checks if a path refers to a nested repository.
1390 This checks if a path refers to a nested repository.
1381
1391
1382 Operates on the filesystem.
1392 Operates on the filesystem.
1383 """)
1393 """)
1384
1394
1385 nofsauditor = interfaceutil.Attribute(
1395 nofsauditor = interfaceutil.Attribute(
1386 """A pathauditor for the working directory.
1396 """A pathauditor for the working directory.
1387
1397
1388 This is like ``auditor`` except it doesn't do filesystem checks.
1398 This is like ``auditor`` except it doesn't do filesystem checks.
1389 """)
1399 """)
1390
1400
1391 baseui = interfaceutil.Attribute(
1401 baseui = interfaceutil.Attribute(
1392 """Original ui instance passed into constructor.""")
1402 """Original ui instance passed into constructor.""")
1393
1403
1394 ui = interfaceutil.Attribute(
1404 ui = interfaceutil.Attribute(
1395 """Main ui instance for this instance.""")
1405 """Main ui instance for this instance.""")
1396
1406
1397 sharedpath = interfaceutil.Attribute(
1407 sharedpath = interfaceutil.Attribute(
1398 """Path to the .hg directory of the repo this repo was shared from.""")
1408 """Path to the .hg directory of the repo this repo was shared from.""")
1399
1409
1400 store = interfaceutil.Attribute(
1410 store = interfaceutil.Attribute(
1401 """A store instance.""")
1411 """A store instance.""")
1402
1412
1403 spath = interfaceutil.Attribute(
1413 spath = interfaceutil.Attribute(
1404 """Path to the store.""")
1414 """Path to the store.""")
1405
1415
1406 sjoin = interfaceutil.Attribute(
1416 sjoin = interfaceutil.Attribute(
1407 """Alias to self.store.join.""")
1417 """Alias to self.store.join.""")
1408
1418
1409 cachevfs = interfaceutil.Attribute(
1419 cachevfs = interfaceutil.Attribute(
1410 """A VFS used to access the cache directory.
1420 """A VFS used to access the cache directory.
1411
1421
1412 Typically .hg/cache.
1422 Typically .hg/cache.
1413 """)
1423 """)
1414
1424
1415 filteredrevcache = interfaceutil.Attribute(
1425 filteredrevcache = interfaceutil.Attribute(
1416 """Holds sets of revisions to be filtered.""")
1426 """Holds sets of revisions to be filtered.""")
1417
1427
1418 names = interfaceutil.Attribute(
1428 names = interfaceutil.Attribute(
1419 """A ``namespaces`` instance.""")
1429 """A ``namespaces`` instance.""")
1420
1430
1421 def close():
1431 def close():
1422 """Close the handle on this repository."""
1432 """Close the handle on this repository."""
1423
1433
1424 def peer():
1434 def peer():
1425 """Obtain an object conforming to the ``peer`` interface."""
1435 """Obtain an object conforming to the ``peer`` interface."""
1426
1436
1427 def unfiltered():
1437 def unfiltered():
1428 """Obtain an unfiltered/raw view of this repo."""
1438 """Obtain an unfiltered/raw view of this repo."""
1429
1439
1430 def filtered(name, visibilityexceptions=None):
1440 def filtered(name, visibilityexceptions=None):
1431 """Obtain a named view of this repository."""
1441 """Obtain a named view of this repository."""
1432
1442
1433 obsstore = interfaceutil.Attribute(
1443 obsstore = interfaceutil.Attribute(
1434 """A store of obsolescence data.""")
1444 """A store of obsolescence data.""")
1435
1445
1436 changelog = interfaceutil.Attribute(
1446 changelog = interfaceutil.Attribute(
1437 """A handle on the changelog revlog.""")
1447 """A handle on the changelog revlog.""")
1438
1448
1439 manifestlog = interfaceutil.Attribute(
1449 manifestlog = interfaceutil.Attribute(
1440 """An instance conforming to the ``imanifestlog`` interface.
1450 """An instance conforming to the ``imanifestlog`` interface.
1441
1451
1442 Provides access to manifests for the repository.
1452 Provides access to manifests for the repository.
1443 """)
1453 """)
1444
1454
1445 dirstate = interfaceutil.Attribute(
1455 dirstate = interfaceutil.Attribute(
1446 """Working directory state.""")
1456 """Working directory state.""")
1447
1457
1448 narrowpats = interfaceutil.Attribute(
1458 narrowpats = interfaceutil.Attribute(
1449 """Matcher patterns for this repository's narrowspec.""")
1459 """Matcher patterns for this repository's narrowspec.""")
1450
1460
1451 def narrowmatch():
1461 def narrowmatch():
1452 """Obtain a matcher for the narrowspec."""
1462 """Obtain a matcher for the narrowspec."""
1453
1463
1454 def setnarrowpats(newincludes, newexcludes):
1464 def setnarrowpats(newincludes, newexcludes):
1455 """Define the narrowspec for this repository."""
1465 """Define the narrowspec for this repository."""
1456
1466
1457 def __getitem__(changeid):
1467 def __getitem__(changeid):
1458 """Try to resolve a changectx."""
1468 """Try to resolve a changectx."""
1459
1469
1460 def __contains__(changeid):
1470 def __contains__(changeid):
1461 """Whether a changeset exists."""
1471 """Whether a changeset exists."""
1462
1472
1463 def __nonzero__():
1473 def __nonzero__():
1464 """Always returns True."""
1474 """Always returns True."""
1465 return True
1475 return True
1466
1476
1467 __bool__ = __nonzero__
1477 __bool__ = __nonzero__
1468
1478
1469 def __len__():
1479 def __len__():
1470 """Returns the number of changesets in the repo."""
1480 """Returns the number of changesets in the repo."""
1471
1481
1472 def __iter__():
1482 def __iter__():
1473 """Iterate over revisions in the changelog."""
1483 """Iterate over revisions in the changelog."""
1474
1484
1475 def revs(expr, *args):
1485 def revs(expr, *args):
1476 """Evaluate a revset.
1486 """Evaluate a revset.
1477
1487
1478 Emits revisions.
1488 Emits revisions.
1479 """
1489 """
1480
1490
1481 def set(expr, *args):
1491 def set(expr, *args):
1482 """Evaluate a revset.
1492 """Evaluate a revset.
1483
1493
1484 Emits changectx instances.
1494 Emits changectx instances.
1485 """
1495 """
1486
1496
1487 def anyrevs(specs, user=False, localalias=None):
1497 def anyrevs(specs, user=False, localalias=None):
1488 """Find revisions matching one of the given revsets."""
1498 """Find revisions matching one of the given revsets."""
1489
1499
1490 def url():
1500 def url():
1491 """Returns a string representing the location of this repo."""
1501 """Returns a string representing the location of this repo."""
1492
1502
1493 def hook(name, throw=False, **args):
1503 def hook(name, throw=False, **args):
1494 """Call a hook."""
1504 """Call a hook."""
1495
1505
1496 def tags():
1506 def tags():
1497 """Return a mapping of tag to node."""
1507 """Return a mapping of tag to node."""
1498
1508
1499 def tagtype(tagname):
1509 def tagtype(tagname):
1500 """Return the type of a given tag."""
1510 """Return the type of a given tag."""
1501
1511
1502 def tagslist():
1512 def tagslist():
1503 """Return a list of tags ordered by revision."""
1513 """Return a list of tags ordered by revision."""
1504
1514
1505 def nodetags(node):
1515 def nodetags(node):
1506 """Return the tags associated with a node."""
1516 """Return the tags associated with a node."""
1507
1517
1508 def nodebookmarks(node):
1518 def nodebookmarks(node):
1509 """Return the list of bookmarks pointing to the specified node."""
1519 """Return the list of bookmarks pointing to the specified node."""
1510
1520
1511 def branchmap():
1521 def branchmap():
1512 """Return a mapping of branch to heads in that branch."""
1522 """Return a mapping of branch to heads in that branch."""
1513
1523
1514 def revbranchcache():
1524 def revbranchcache():
1515 pass
1525 pass
1516
1526
1517 def branchtip(branchtip, ignoremissing=False):
1527 def branchtip(branchtip, ignoremissing=False):
1518 """Return the tip node for a given branch."""
1528 """Return the tip node for a given branch."""
1519
1529
1520 def lookup(key):
1530 def lookup(key):
1521 """Resolve the node for a revision."""
1531 """Resolve the node for a revision."""
1522
1532
1523 def lookupbranch(key):
1533 def lookupbranch(key):
1524 """Look up the branch name of the given revision or branch name."""
1534 """Look up the branch name of the given revision or branch name."""
1525
1535
1526 def known(nodes):
1536 def known(nodes):
1527 """Determine whether a series of nodes is known.
1537 """Determine whether a series of nodes is known.
1528
1538
1529 Returns a list of bools.
1539 Returns a list of bools.
1530 """
1540 """
1531
1541
1532 def local():
1542 def local():
1533 """Whether the repository is local."""
1543 """Whether the repository is local."""
1534 return True
1544 return True
1535
1545
1536 def publishing():
1546 def publishing():
1537 """Whether the repository is a publishing repository."""
1547 """Whether the repository is a publishing repository."""
1538
1548
1539 def cancopy():
1549 def cancopy():
1540 pass
1550 pass
1541
1551
1542 def shared():
1552 def shared():
1543 """The type of shared repository or None."""
1553 """The type of shared repository or None."""
1544
1554
1545 def wjoin(f, *insidef):
1555 def wjoin(f, *insidef):
1546 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1556 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1547
1557
1548 def setparents(p1, p2):
1558 def setparents(p1, p2):
1549 """Set the parent nodes of the working directory."""
1559 """Set the parent nodes of the working directory."""
1550
1560
1551 def filectx(path, changeid=None, fileid=None):
1561 def filectx(path, changeid=None, fileid=None):
1552 """Obtain a filectx for the given file revision."""
1562 """Obtain a filectx for the given file revision."""
1553
1563
1554 def getcwd():
1564 def getcwd():
1555 """Obtain the current working directory from the dirstate."""
1565 """Obtain the current working directory from the dirstate."""
1556
1566
1557 def pathto(f, cwd=None):
1567 def pathto(f, cwd=None):
1558 """Obtain the relative path to a file."""
1568 """Obtain the relative path to a file."""
1559
1569
1560 def adddatafilter(name, fltr):
1570 def adddatafilter(name, fltr):
1561 pass
1571 pass
1562
1572
1563 def wread(filename):
1573 def wread(filename):
1564 """Read a file from wvfs, using data filters."""
1574 """Read a file from wvfs, using data filters."""
1565
1575
1566 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1576 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1567 """Write data to a file in the wvfs, using data filters."""
1577 """Write data to a file in the wvfs, using data filters."""
1568
1578
1569 def wwritedata(filename, data):
1579 def wwritedata(filename, data):
1570 """Resolve data for writing to the wvfs, using data filters."""
1580 """Resolve data for writing to the wvfs, using data filters."""
1571
1581
1572 def currenttransaction():
1582 def currenttransaction():
1573 """Obtain the current transaction instance or None."""
1583 """Obtain the current transaction instance or None."""
1574
1584
1575 def transaction(desc, report=None):
1585 def transaction(desc, report=None):
1576 """Open a new transaction to write to the repository."""
1586 """Open a new transaction to write to the repository."""
1577
1587
1578 def undofiles():
1588 def undofiles():
1579 """Returns a list of (vfs, path) for files to undo transactions."""
1589 """Returns a list of (vfs, path) for files to undo transactions."""
1580
1590
1581 def recover():
1591 def recover():
1582 """Roll back an interrupted transaction."""
1592 """Roll back an interrupted transaction."""
1583
1593
1584 def rollback(dryrun=False, force=False):
1594 def rollback(dryrun=False, force=False):
1585 """Undo the last transaction.
1595 """Undo the last transaction.
1586
1596
1587 DANGEROUS.
1597 DANGEROUS.
1588 """
1598 """
1589
1599
1590 def updatecaches(tr=None, full=False):
1600 def updatecaches(tr=None, full=False):
1591 """Warm repo caches."""
1601 """Warm repo caches."""
1592
1602
1593 def invalidatecaches():
1603 def invalidatecaches():
1594 """Invalidate cached data due to the repository mutating."""
1604 """Invalidate cached data due to the repository mutating."""
1595
1605
1596 def invalidatevolatilesets():
1606 def invalidatevolatilesets():
1597 pass
1607 pass
1598
1608
1599 def invalidatedirstate():
1609 def invalidatedirstate():
1600 """Invalidate the dirstate."""
1610 """Invalidate the dirstate."""
1601
1611
1602 def invalidate(clearfilecache=False):
1612 def invalidate(clearfilecache=False):
1603 pass
1613 pass
1604
1614
1605 def invalidateall():
1615 def invalidateall():
1606 pass
1616 pass
1607
1617
1608 def lock(wait=True):
1618 def lock(wait=True):
1609 """Lock the repository store and return a lock instance."""
1619 """Lock the repository store and return a lock instance."""
1610
1620
1611 def wlock(wait=True):
1621 def wlock(wait=True):
1612 """Lock the non-store parts of the repository."""
1622 """Lock the non-store parts of the repository."""
1613
1623
1614 def currentwlock():
1624 def currentwlock():
1615 """Return the wlock if it's held or None."""
1625 """Return the wlock if it's held or None."""
1616
1626
1617 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1627 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1618 pass
1628 pass
1619
1629
1620 def commit(text='', user=None, date=None, match=None, force=False,
1630 def commit(text='', user=None, date=None, match=None, force=False,
1621 editor=False, extra=None):
1631 editor=False, extra=None):
1622 """Add a new revision to the repository."""
1632 """Add a new revision to the repository."""
1623
1633
1624 def commitctx(ctx, error=False):
1634 def commitctx(ctx, error=False):
1625 """Commit a commitctx instance to the repository."""
1635 """Commit a commitctx instance to the repository."""
1626
1636
1627 def destroying():
1637 def destroying():
1628 """Inform the repository that nodes are about to be destroyed."""
1638 """Inform the repository that nodes are about to be destroyed."""
1629
1639
1630 def destroyed():
1640 def destroyed():
1631 """Inform the repository that nodes have been destroyed."""
1641 """Inform the repository that nodes have been destroyed."""
1632
1642
1633 def status(node1='.', node2=None, match=None, ignored=False,
1643 def status(node1='.', node2=None, match=None, ignored=False,
1634 clean=False, unknown=False, listsubrepos=False):
1644 clean=False, unknown=False, listsubrepos=False):
1635 """Convenience method to call repo[x].status()."""
1645 """Convenience method to call repo[x].status()."""
1636
1646
1637 def addpostdsstatus(ps):
1647 def addpostdsstatus(ps):
1638 pass
1648 pass
1639
1649
1640 def postdsstatus():
1650 def postdsstatus():
1641 pass
1651 pass
1642
1652
1643 def clearpostdsstatus():
1653 def clearpostdsstatus():
1644 pass
1654 pass
1645
1655
1646 def heads(start=None):
1656 def heads(start=None):
1647 """Obtain list of nodes that are DAG heads."""
1657 """Obtain list of nodes that are DAG heads."""
1648
1658
1649 def branchheads(branch=None, start=None, closed=False):
1659 def branchheads(branch=None, start=None, closed=False):
1650 pass
1660 pass
1651
1661
1652 def branches(nodes):
1662 def branches(nodes):
1653 pass
1663 pass
1654
1664
1655 def between(pairs):
1665 def between(pairs):
1656 pass
1666 pass
1657
1667
1658 def checkpush(pushop):
1668 def checkpush(pushop):
1659 pass
1669 pass
1660
1670
1661 prepushoutgoinghooks = interfaceutil.Attribute(
1671 prepushoutgoinghooks = interfaceutil.Attribute(
1662 """util.hooks instance.""")
1672 """util.hooks instance.""")
1663
1673
1664 def pushkey(namespace, key, old, new):
1674 def pushkey(namespace, key, old, new):
1665 pass
1675 pass
1666
1676
1667 def listkeys(namespace):
1677 def listkeys(namespace):
1668 pass
1678 pass
1669
1679
1670 def debugwireargs(one, two, three=None, four=None, five=None):
1680 def debugwireargs(one, two, three=None, four=None, five=None):
1671 pass
1681 pass
1672
1682
1673 def savecommitmessage(text):
1683 def savecommitmessage(text):
1674 pass
1684 pass
1675
1685
1676 class completelocalrepository(ilocalrepositorymain,
1686 class completelocalrepository(ilocalrepositorymain,
1677 ilocalrepositoryfilestorage):
1687 ilocalrepositoryfilestorage):
1678 """Complete interface for a local repository."""
1688 """Complete interface for a local repository."""
1679
1689
1680 class iwireprotocolcommandcacher(interfaceutil.Interface):
1690 class iwireprotocolcommandcacher(interfaceutil.Interface):
1681 """Represents a caching backend for wire protocol commands.
1691 """Represents a caching backend for wire protocol commands.
1682
1692
1683 Wire protocol version 2 supports transparent caching of many commands.
1693 Wire protocol version 2 supports transparent caching of many commands.
1684 To leverage this caching, servers can activate objects that cache
1694 To leverage this caching, servers can activate objects that cache
1685 command responses. Objects handle both cache writing and reading.
1695 command responses. Objects handle both cache writing and reading.
1686 This interface defines how that response caching mechanism works.
1696 This interface defines how that response caching mechanism works.
1687
1697
1688 Wire protocol version 2 commands emit a series of objects that are
1698 Wire protocol version 2 commands emit a series of objects that are
1689 serialized and sent to the client. The caching layer exists between
1699 serialized and sent to the client. The caching layer exists between
1690 the invocation of the command function and the sending of its output
1700 the invocation of the command function and the sending of its output
1691 objects to an output layer.
1701 objects to an output layer.
1692
1702
1693 Instances of this interface represent a binding to a cache that
1703 Instances of this interface represent a binding to a cache that
1694 can serve a response (in place of calling a command function) and/or
1704 can serve a response (in place of calling a command function) and/or
1695 write responses to a cache for subsequent use.
1705 write responses to a cache for subsequent use.
1696
1706
1697 When a command request arrives, the following happens with regards
1707 When a command request arrives, the following happens with regards
1698 to this interface:
1708 to this interface:
1699
1709
1700 1. The server determines whether the command request is cacheable.
1710 1. The server determines whether the command request is cacheable.
1701 2. If it is, an instance of this interface is spawned.
1711 2. If it is, an instance of this interface is spawned.
1702 3. The cacher is activated in a context manager (``__enter__`` is called).
1712 3. The cacher is activated in a context manager (``__enter__`` is called).
1703 4. A cache *key* for that request is derived. This will call the
1713 4. A cache *key* for that request is derived. This will call the
1704 instance's ``adjustcachekeystate()`` method so the derivation
1714 instance's ``adjustcachekeystate()`` method so the derivation
1705 can be influenced.
1715 can be influenced.
1706 5. The cacher is informed of the derived cache key via a call to
1716 5. The cacher is informed of the derived cache key via a call to
1707 ``setcachekey()``.
1717 ``setcachekey()``.
1708 6. The cacher's ``lookup()`` method is called to test for presence of
1718 6. The cacher's ``lookup()`` method is called to test for presence of
1709 the derived key in the cache.
1719 the derived key in the cache.
1710 7. If ``lookup()`` returns a hit, that cached result is used in place
1720 7. If ``lookup()`` returns a hit, that cached result is used in place
1711 of invoking the command function. ``__exit__`` is called and the instance
1721 of invoking the command function. ``__exit__`` is called and the instance
1712 is discarded.
1722 is discarded.
1713 8. The command function is invoked.
1723 8. The command function is invoked.
1714 9. ``onobject()`` is called for each object emitted by the command
1724 9. ``onobject()`` is called for each object emitted by the command
1715 function.
1725 function.
1716 10. After the final object is seen, ``onfinished()`` is called.
1726 10. After the final object is seen, ``onfinished()`` is called.
1717 11. ``__exit__`` is called to signal the end of use of the instance.
1727 11. ``__exit__`` is called to signal the end of use of the instance.
1718
1728
1719 Cache *key* derivation can be influenced by the instance.
1729 Cache *key* derivation can be influenced by the instance.
1720
1730
1721 Cache keys are initially derived by a deterministic representation of
1731 Cache keys are initially derived by a deterministic representation of
1722 the command request. This includes the command name, arguments, protocol
1732 the command request. This includes the command name, arguments, protocol
1723 version, etc. This initial key derivation is performed by CBOR-encoding a
1733 version, etc. This initial key derivation is performed by CBOR-encoding a
1724 data structure and feeding that output into a hasher.
1734 data structure and feeding that output into a hasher.
1725
1735
1726 Instances of this interface can influence this initial key derivation
1736 Instances of this interface can influence this initial key derivation
1727 via ``adjustcachekeystate()``.
1737 via ``adjustcachekeystate()``.
1728
1738
1729 The instance is informed of the derived cache key via a call to
1739 The instance is informed of the derived cache key via a call to
1730 ``setcachekey()``. The instance must store the key locally so it can
1740 ``setcachekey()``. The instance must store the key locally so it can
1731 be consulted on subsequent operations that may require it.
1741 be consulted on subsequent operations that may require it.
1732
1742
1733 When constructed, the instance has access to a callable that can be used
1743 When constructed, the instance has access to a callable that can be used
1734 for encoding response objects. This callable receives as its single
1744 for encoding response objects. This callable receives as its single
1735 argument an object emitted by a command function. It returns an iterable
1745 argument an object emitted by a command function. It returns an iterable
1736 of bytes chunks representing the encoded object. Unless the cacher is
1746 of bytes chunks representing the encoded object. Unless the cacher is
1737 caching native Python objects in memory or has a way of reconstructing
1747 caching native Python objects in memory or has a way of reconstructing
1738 the original Python objects, implementations typically call this function
1748 the original Python objects, implementations typically call this function
1739 to produce bytes from the output objects and then store those bytes in
1749 to produce bytes from the output objects and then store those bytes in
1740 the cache. When it comes time to re-emit those bytes, they are wrapped
1750 the cache. When it comes time to re-emit those bytes, they are wrapped
1741 in a ``wireprototypes.encodedresponse`` instance to tell the output
1751 in a ``wireprototypes.encodedresponse`` instance to tell the output
1742 layer that they are pre-encoded.
1752 layer that they are pre-encoded.
1743
1753
1744 When receiving the objects emitted by the command function, instances
1754 When receiving the objects emitted by the command function, instances
1745 can choose what to do with those objects. The simplest thing to do is
1755 can choose what to do with those objects. The simplest thing to do is
1746 re-emit the original objects. They will be forwarded to the output
1756 re-emit the original objects. They will be forwarded to the output
1747 layer and will be processed as if the cacher did not exist.
1757 layer and will be processed as if the cacher did not exist.
1748
1758
1749 Implementations could also choose to not emit objects - instead locally
1759 Implementations could also choose to not emit objects - instead locally
1750 buffering objects or their encoded representation. They could then emit
1760 buffering objects or their encoded representation. They could then emit
1751 a single "coalesced" object when ``onfinished()`` is called. In
1761 a single "coalesced" object when ``onfinished()`` is called. In
1752 this way, the implementation would function as a filtering layer of
1762 this way, the implementation would function as a filtering layer of
1753 sorts.
1763 sorts.
1754
1764
1755 When caching objects, typically the encoded form of the object will
1765 When caching objects, typically the encoded form of the object will
1756 be stored. Keep in mind that if the original object is forwarded to
1766 be stored. Keep in mind that if the original object is forwarded to
1757 the output layer, it will need to be encoded there as well. For large
1767 the output layer, it will need to be encoded there as well. For large
1758 output, this redundant encoding could add overhead. Implementations
1768 output, this redundant encoding could add overhead. Implementations
1759 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1769 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1760 instances to avoid this overhead.
1770 instances to avoid this overhead.
1761 """
1771 """
1762 def __enter__():
1772 def __enter__():
1763 """Marks the instance as active.
1773 """Marks the instance as active.
1764
1774
1765 Should return self.
1775 Should return self.
1766 """
1776 """
1767
1777
1768 def __exit__(exctype, excvalue, exctb):
1778 def __exit__(exctype, excvalue, exctb):
1769 """Called when cacher is no longer used.
1779 """Called when cacher is no longer used.
1770
1780
1771 This can be used by implementations to perform cleanup actions (e.g.
1781 This can be used by implementations to perform cleanup actions (e.g.
1772 disconnecting network sockets, aborting a partially cached response.
1782 disconnecting network sockets, aborting a partially cached response.
1773 """
1783 """
1774
1784
1775 def adjustcachekeystate(state):
1785 def adjustcachekeystate(state):
1776 """Influences cache key derivation by adjusting state to derive key.
1786 """Influences cache key derivation by adjusting state to derive key.
1777
1787
1778 A dict defining the state used to derive the cache key is passed.
1788 A dict defining the state used to derive the cache key is passed.
1779
1789
1780 Implementations can modify this dict to record additional state that
1790 Implementations can modify this dict to record additional state that
1781 is wanted to influence key derivation.
1791 is wanted to influence key derivation.
1782
1792
1783 Implementations are *highly* encouraged to not modify or delete
1793 Implementations are *highly* encouraged to not modify or delete
1784 existing keys.
1794 existing keys.
1785 """
1795 """
1786
1796
1787 def setcachekey(key):
1797 def setcachekey(key):
1788 """Record the derived cache key for this request.
1798 """Record the derived cache key for this request.
1789
1799
1790 Instances may mutate the key for internal usage, as desired. e.g.
1800 Instances may mutate the key for internal usage, as desired. e.g.
1791 instances may wish to prepend the repo name, introduce path
1801 instances may wish to prepend the repo name, introduce path
1792 components for filesystem or URL addressing, etc. Behavior is up to
1802 components for filesystem or URL addressing, etc. Behavior is up to
1793 the cache.
1803 the cache.
1794
1804
1795 Returns a bool indicating if the request is cacheable by this
1805 Returns a bool indicating if the request is cacheable by this
1796 instance.
1806 instance.
1797 """
1807 """
1798
1808
1799 def lookup():
1809 def lookup():
1800 """Attempt to resolve an entry in the cache.
1810 """Attempt to resolve an entry in the cache.
1801
1811
1802 The instance is instructed to look for the cache key that it was
1812 The instance is instructed to look for the cache key that it was
1803 informed about via the call to ``setcachekey()``.
1813 informed about via the call to ``setcachekey()``.
1804
1814
1805 If there's no cache hit or the cacher doesn't wish to use the cached
1815 If there's no cache hit or the cacher doesn't wish to use the cached
1806 entry, ``None`` should be returned.
1816 entry, ``None`` should be returned.
1807
1817
1808 Else, a dict defining the cached result should be returned. The
1818 Else, a dict defining the cached result should be returned. The
1809 dict may have the following keys:
1819 dict may have the following keys:
1810
1820
1811 objs
1821 objs
1812 An iterable of objects that should be sent to the client. That
1822 An iterable of objects that should be sent to the client. That
1813 iterable of objects is expected to be what the command function
1823 iterable of objects is expected to be what the command function
1814 would return if invoked or an equivalent representation thereof.
1824 would return if invoked or an equivalent representation thereof.
1815 """
1825 """
1816
1826
1817 def onobject(obj):
1827 def onobject(obj):
1818 """Called when a new object is emitted from the command function.
1828 """Called when a new object is emitted from the command function.
1819
1829
1820 Receives as its argument the object that was emitted from the
1830 Receives as its argument the object that was emitted from the
1821 command function.
1831 command function.
1822
1832
1823 This method returns an iterator of objects to forward to the output
1833 This method returns an iterator of objects to forward to the output
1824 layer. The easiest implementation is a generator that just
1834 layer. The easiest implementation is a generator that just
1825 ``yield obj``.
1835 ``yield obj``.
1826 """
1836 """
1827
1837
1828 def onfinished():
1838 def onfinished():
1829 """Called after all objects have been emitted from the command function.
1839 """Called after all objects have been emitted from the command function.
1830
1840
1831 Implementations should return an iterator of objects to forward to
1841 Implementations should return an iterator of objects to forward to
1832 the output layer.
1842 the output layer.
1833
1843
1834 This method can be a generator.
1844 This method can be a generator.
1835 """
1845 """
@@ -1,1331 +1,1345
1 # storage.py - Testing of storage primitives.
1 # storage.py - Testing of storage primitives.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import unittest
10 import unittest
11
11
12 from ..node import (
12 from ..node import (
13 hex,
13 hex,
14 nullid,
14 nullid,
15 nullrev,
15 nullrev,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 mdiff,
19 mdiff,
20 repository,
20 repository,
21 )
21 )
22 from ..utils import (
22 from ..utils import (
23 storageutil,
23 storageutil,
24 )
24 )
25
25
26 class basetestcase(unittest.TestCase):
26 class basetestcase(unittest.TestCase):
27 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
27 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
28 assertRaisesRegex = (# camelcase-required
28 assertRaisesRegex = (# camelcase-required
29 unittest.TestCase.assertRaisesRegexp)
29 unittest.TestCase.assertRaisesRegexp)
30
30
31 class ifileindextests(basetestcase):
31 class ifileindextests(basetestcase):
32 """Generic tests for the ifileindex interface.
32 """Generic tests for the ifileindex interface.
33
33
34 All file storage backends for index data should conform to the tests in this
34 All file storage backends for index data should conform to the tests in this
35 class.
35 class.
36
36
37 Use ``makeifileindextests()`` to create an instance of this type.
37 Use ``makeifileindextests()`` to create an instance of this type.
38 """
38 """
39 def testempty(self):
39 def testempty(self):
40 f = self._makefilefn()
40 f = self._makefilefn()
41 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
41 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
42 self.assertEqual(list(f), [], 'iter yields nothing by default')
42 self.assertEqual(list(f), [], 'iter yields nothing by default')
43
43
44 gen = iter(f)
44 gen = iter(f)
45 with self.assertRaises(StopIteration):
45 with self.assertRaises(StopIteration):
46 next(gen)
46 next(gen)
47
47
48 self.assertFalse(f.hasnode(None))
49 self.assertFalse(f.hasnode(0))
50 self.assertFalse(f.hasnode(nullrev))
51 self.assertFalse(f.hasnode(nullid))
52 self.assertFalse(f.hasnode(b'0'))
53 self.assertFalse(f.hasnode(b'a' * 20))
54
48 # revs() should evaluate to an empty list.
55 # revs() should evaluate to an empty list.
49 self.assertEqual(list(f.revs()), [])
56 self.assertEqual(list(f.revs()), [])
50
57
51 revs = iter(f.revs())
58 revs = iter(f.revs())
52 with self.assertRaises(StopIteration):
59 with self.assertRaises(StopIteration):
53 next(revs)
60 next(revs)
54
61
55 self.assertEqual(list(f.revs(start=20)), [])
62 self.assertEqual(list(f.revs(start=20)), [])
56
63
57 # parents() and parentrevs() work with nullid/nullrev.
64 # parents() and parentrevs() work with nullid/nullrev.
58 self.assertEqual(f.parents(nullid), (nullid, nullid))
65 self.assertEqual(f.parents(nullid), (nullid, nullid))
59 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
66 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
60
67
61 with self.assertRaises(error.LookupError):
68 with self.assertRaises(error.LookupError):
62 f.parents(b'\x01' * 20)
69 f.parents(b'\x01' * 20)
63
70
64 for i in range(-5, 5):
71 for i in range(-5, 5):
65 if i == nullrev:
72 if i == nullrev:
66 continue
73 continue
67
74
68 with self.assertRaises(IndexError):
75 with self.assertRaises(IndexError):
69 f.parentrevs(i)
76 f.parentrevs(i)
70
77
71 # nullid/nullrev lookup always works.
78 # nullid/nullrev lookup always works.
72 self.assertEqual(f.rev(nullid), nullrev)
79 self.assertEqual(f.rev(nullid), nullrev)
73 self.assertEqual(f.node(nullrev), nullid)
80 self.assertEqual(f.node(nullrev), nullid)
74
81
75 with self.assertRaises(error.LookupError):
82 with self.assertRaises(error.LookupError):
76 f.rev(b'\x01' * 20)
83 f.rev(b'\x01' * 20)
77
84
78 for i in range(-5, 5):
85 for i in range(-5, 5):
79 if i == nullrev:
86 if i == nullrev:
80 continue
87 continue
81
88
82 with self.assertRaises(IndexError):
89 with self.assertRaises(IndexError):
83 f.node(i)
90 f.node(i)
84
91
85 self.assertEqual(f.lookup(nullid), nullid)
92 self.assertEqual(f.lookup(nullid), nullid)
86 self.assertEqual(f.lookup(nullrev), nullid)
93 self.assertEqual(f.lookup(nullrev), nullid)
87 self.assertEqual(f.lookup(hex(nullid)), nullid)
94 self.assertEqual(f.lookup(hex(nullid)), nullid)
88 self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
95 self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
89
96
90 with self.assertRaises(error.LookupError):
97 with self.assertRaises(error.LookupError):
91 f.lookup(b'badvalue')
98 f.lookup(b'badvalue')
92
99
93 with self.assertRaises(error.LookupError):
100 with self.assertRaises(error.LookupError):
94 f.lookup(hex(nullid)[0:12])
101 f.lookup(hex(nullid)[0:12])
95
102
96 with self.assertRaises(error.LookupError):
103 with self.assertRaises(error.LookupError):
97 f.lookup(b'-2')
104 f.lookup(b'-2')
98
105
99 with self.assertRaises(error.LookupError):
106 with self.assertRaises(error.LookupError):
100 f.lookup(b'0')
107 f.lookup(b'0')
101
108
102 with self.assertRaises(error.LookupError):
109 with self.assertRaises(error.LookupError):
103 f.lookup(b'1')
110 f.lookup(b'1')
104
111
105 with self.assertRaises(error.LookupError):
112 with self.assertRaises(error.LookupError):
106 f.lookup(b'11111111111111111111111111111111111111')
113 f.lookup(b'11111111111111111111111111111111111111')
107
114
108 for i in range(-5, 5):
115 for i in range(-5, 5):
109 if i == nullrev:
116 if i == nullrev:
110 continue
117 continue
111
118
112 with self.assertRaises(LookupError):
119 with self.assertRaises(LookupError):
113 f.lookup(i)
120 f.lookup(i)
114
121
115 self.assertEqual(f.linkrev(nullrev), nullrev)
122 self.assertEqual(f.linkrev(nullrev), nullrev)
116
123
117 for i in range(-5, 5):
124 for i in range(-5, 5):
118 if i == nullrev:
125 if i == nullrev:
119 continue
126 continue
120
127
121 with self.assertRaises(IndexError):
128 with self.assertRaises(IndexError):
122 f.linkrev(i)
129 f.linkrev(i)
123
130
124 self.assertFalse(f.iscensored(nullrev))
131 self.assertFalse(f.iscensored(nullrev))
125
132
126 for i in range(-5, 5):
133 for i in range(-5, 5):
127 if i == nullrev:
134 if i == nullrev:
128 continue
135 continue
129
136
130 with self.assertRaises(IndexError):
137 with self.assertRaises(IndexError):
131 f.iscensored(i)
138 f.iscensored(i)
132
139
133 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
140 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
134
141
135 with self.assertRaises(ValueError):
142 with self.assertRaises(ValueError):
136 self.assertEqual(list(f.descendants([])), [])
143 self.assertEqual(list(f.descendants([])), [])
137
144
138 self.assertEqual(list(f.descendants([nullrev])), [])
145 self.assertEqual(list(f.descendants([nullrev])), [])
139
146
140 self.assertEqual(f.heads(), [nullid])
147 self.assertEqual(f.heads(), [nullid])
141 self.assertEqual(f.heads(nullid), [nullid])
148 self.assertEqual(f.heads(nullid), [nullid])
142 self.assertEqual(f.heads(None, [nullid]), [nullid])
149 self.assertEqual(f.heads(None, [nullid]), [nullid])
143 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
150 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
144
151
145 self.assertEqual(f.children(nullid), [])
152 self.assertEqual(f.children(nullid), [])
146
153
147 with self.assertRaises(error.LookupError):
154 with self.assertRaises(error.LookupError):
148 f.children(b'\x01' * 20)
155 f.children(b'\x01' * 20)
149
156
150 def testsinglerevision(self):
157 def testsinglerevision(self):
151 f = self._makefilefn()
158 f = self._makefilefn()
152 with self._maketransactionfn() as tr:
159 with self._maketransactionfn() as tr:
153 node = f.add(b'initial', None, tr, 0, nullid, nullid)
160 node = f.add(b'initial', None, tr, 0, nullid, nullid)
154
161
155 self.assertEqual(len(f), 1)
162 self.assertEqual(len(f), 1)
156 self.assertEqual(list(f), [0])
163 self.assertEqual(list(f), [0])
157
164
158 gen = iter(f)
165 gen = iter(f)
159 self.assertEqual(next(gen), 0)
166 self.assertEqual(next(gen), 0)
160
167
161 with self.assertRaises(StopIteration):
168 with self.assertRaises(StopIteration):
162 next(gen)
169 next(gen)
163
170
171 self.assertTrue(f.hasnode(node))
172 self.assertFalse(f.hasnode(hex(node)))
173 self.assertFalse(f.hasnode(nullrev))
174 self.assertFalse(f.hasnode(nullid))
175 self.assertFalse(f.hasnode(node[0:12]))
176 self.assertFalse(f.hasnode(hex(node)[0:20]))
177
164 self.assertEqual(list(f.revs()), [0])
178 self.assertEqual(list(f.revs()), [0])
165 self.assertEqual(list(f.revs(start=1)), [])
179 self.assertEqual(list(f.revs(start=1)), [])
166 self.assertEqual(list(f.revs(start=0)), [0])
180 self.assertEqual(list(f.revs(start=0)), [0])
167 self.assertEqual(list(f.revs(stop=0)), [0])
181 self.assertEqual(list(f.revs(stop=0)), [0])
168 self.assertEqual(list(f.revs(stop=1)), [0])
182 self.assertEqual(list(f.revs(stop=1)), [0])
169 self.assertEqual(list(f.revs(1, 1)), [])
183 self.assertEqual(list(f.revs(1, 1)), [])
170 # TODO buggy
184 # TODO buggy
171 self.assertEqual(list(f.revs(1, 0)), [1, 0])
185 self.assertEqual(list(f.revs(1, 0)), [1, 0])
172 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
186 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
173
187
174 self.assertEqual(f.parents(node), (nullid, nullid))
188 self.assertEqual(f.parents(node), (nullid, nullid))
175 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
189 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
176
190
177 with self.assertRaises(error.LookupError):
191 with self.assertRaises(error.LookupError):
178 f.parents(b'\x01' * 20)
192 f.parents(b'\x01' * 20)
179
193
180 with self.assertRaises(IndexError):
194 with self.assertRaises(IndexError):
181 f.parentrevs(1)
195 f.parentrevs(1)
182
196
183 self.assertEqual(f.rev(node), 0)
197 self.assertEqual(f.rev(node), 0)
184
198
185 with self.assertRaises(error.LookupError):
199 with self.assertRaises(error.LookupError):
186 f.rev(b'\x01' * 20)
200 f.rev(b'\x01' * 20)
187
201
188 self.assertEqual(f.node(0), node)
202 self.assertEqual(f.node(0), node)
189
203
190 with self.assertRaises(IndexError):
204 with self.assertRaises(IndexError):
191 f.node(1)
205 f.node(1)
192
206
193 self.assertEqual(f.lookup(node), node)
207 self.assertEqual(f.lookup(node), node)
194 self.assertEqual(f.lookup(0), node)
208 self.assertEqual(f.lookup(0), node)
195 self.assertEqual(f.lookup(-1), nullid)
209 self.assertEqual(f.lookup(-1), nullid)
196 self.assertEqual(f.lookup(b'0'), node)
210 self.assertEqual(f.lookup(b'0'), node)
197 self.assertEqual(f.lookup(hex(node)), node)
211 self.assertEqual(f.lookup(hex(node)), node)
198
212
199 with self.assertRaises(error.LookupError):
213 with self.assertRaises(error.LookupError):
200 f.lookup(hex(node)[0:12])
214 f.lookup(hex(node)[0:12])
201
215
202 with self.assertRaises(error.LookupError):
216 with self.assertRaises(error.LookupError):
203 f.lookup(-2)
217 f.lookup(-2)
204
218
205 with self.assertRaises(error.LookupError):
219 with self.assertRaises(error.LookupError):
206 f.lookup(b'-2')
220 f.lookup(b'-2')
207
221
208 with self.assertRaises(error.LookupError):
222 with self.assertRaises(error.LookupError):
209 f.lookup(1)
223 f.lookup(1)
210
224
211 with self.assertRaises(error.LookupError):
225 with self.assertRaises(error.LookupError):
212 f.lookup(b'1')
226 f.lookup(b'1')
213
227
214 self.assertEqual(f.linkrev(0), 0)
228 self.assertEqual(f.linkrev(0), 0)
215
229
216 with self.assertRaises(IndexError):
230 with self.assertRaises(IndexError):
217 f.linkrev(1)
231 f.linkrev(1)
218
232
219 self.assertFalse(f.iscensored(0))
233 self.assertFalse(f.iscensored(0))
220
234
221 with self.assertRaises(IndexError):
235 with self.assertRaises(IndexError):
222 f.iscensored(1)
236 f.iscensored(1)
223
237
224 self.assertEqual(list(f.descendants([0])), [])
238 self.assertEqual(list(f.descendants([0])), [])
225
239
226 self.assertEqual(f.heads(), [node])
240 self.assertEqual(f.heads(), [node])
227 self.assertEqual(f.heads(node), [node])
241 self.assertEqual(f.heads(node), [node])
228 self.assertEqual(f.heads(stop=[node]), [node])
242 self.assertEqual(f.heads(stop=[node]), [node])
229
243
230 with self.assertRaises(error.LookupError):
244 with self.assertRaises(error.LookupError):
231 f.heads(stop=[b'\x01' * 20])
245 f.heads(stop=[b'\x01' * 20])
232
246
233 self.assertEqual(f.children(node), [])
247 self.assertEqual(f.children(node), [])
234
248
235 def testmultiplerevisions(self):
249 def testmultiplerevisions(self):
236 fulltext0 = b'x' * 1024
250 fulltext0 = b'x' * 1024
237 fulltext1 = fulltext0 + b'y'
251 fulltext1 = fulltext0 + b'y'
238 fulltext2 = b'y' + fulltext0 + b'z'
252 fulltext2 = b'y' + fulltext0 + b'z'
239
253
240 f = self._makefilefn()
254 f = self._makefilefn()
241 with self._maketransactionfn() as tr:
255 with self._maketransactionfn() as tr:
242 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
256 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
243 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
257 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
244 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
258 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
245
259
246 self.assertEqual(len(f), 3)
260 self.assertEqual(len(f), 3)
247 self.assertEqual(list(f), [0, 1, 2])
261 self.assertEqual(list(f), [0, 1, 2])
248
262
249 gen = iter(f)
263 gen = iter(f)
250 self.assertEqual(next(gen), 0)
264 self.assertEqual(next(gen), 0)
251 self.assertEqual(next(gen), 1)
265 self.assertEqual(next(gen), 1)
252 self.assertEqual(next(gen), 2)
266 self.assertEqual(next(gen), 2)
253
267
254 with self.assertRaises(StopIteration):
268 with self.assertRaises(StopIteration):
255 next(gen)
269 next(gen)
256
270
257 self.assertEqual(list(f.revs()), [0, 1, 2])
271 self.assertEqual(list(f.revs()), [0, 1, 2])
258 self.assertEqual(list(f.revs(0)), [0, 1, 2])
272 self.assertEqual(list(f.revs(0)), [0, 1, 2])
259 self.assertEqual(list(f.revs(1)), [1, 2])
273 self.assertEqual(list(f.revs(1)), [1, 2])
260 self.assertEqual(list(f.revs(2)), [2])
274 self.assertEqual(list(f.revs(2)), [2])
261 self.assertEqual(list(f.revs(3)), [])
275 self.assertEqual(list(f.revs(3)), [])
262 self.assertEqual(list(f.revs(stop=1)), [0, 1])
276 self.assertEqual(list(f.revs(stop=1)), [0, 1])
263 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
277 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
264 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
278 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
265 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
279 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
266 self.assertEqual(list(f.revs(2, 1)), [2, 1])
280 self.assertEqual(list(f.revs(2, 1)), [2, 1])
267 # TODO this is wrong
281 # TODO this is wrong
268 self.assertEqual(list(f.revs(3, 2)), [3, 2])
282 self.assertEqual(list(f.revs(3, 2)), [3, 2])
269
283
270 self.assertEqual(f.parents(node0), (nullid, nullid))
284 self.assertEqual(f.parents(node0), (nullid, nullid))
271 self.assertEqual(f.parents(node1), (node0, nullid))
285 self.assertEqual(f.parents(node1), (node0, nullid))
272 self.assertEqual(f.parents(node2), (node1, nullid))
286 self.assertEqual(f.parents(node2), (node1, nullid))
273
287
274 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
288 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
275 self.assertEqual(f.parentrevs(1), (0, nullrev))
289 self.assertEqual(f.parentrevs(1), (0, nullrev))
276 self.assertEqual(f.parentrevs(2), (1, nullrev))
290 self.assertEqual(f.parentrevs(2), (1, nullrev))
277
291
278 self.assertEqual(f.rev(node0), 0)
292 self.assertEqual(f.rev(node0), 0)
279 self.assertEqual(f.rev(node1), 1)
293 self.assertEqual(f.rev(node1), 1)
280 self.assertEqual(f.rev(node2), 2)
294 self.assertEqual(f.rev(node2), 2)
281
295
282 with self.assertRaises(error.LookupError):
296 with self.assertRaises(error.LookupError):
283 f.rev(b'\x01' * 20)
297 f.rev(b'\x01' * 20)
284
298
285 self.assertEqual(f.node(0), node0)
299 self.assertEqual(f.node(0), node0)
286 self.assertEqual(f.node(1), node1)
300 self.assertEqual(f.node(1), node1)
287 self.assertEqual(f.node(2), node2)
301 self.assertEqual(f.node(2), node2)
288
302
289 with self.assertRaises(IndexError):
303 with self.assertRaises(IndexError):
290 f.node(3)
304 f.node(3)
291
305
292 self.assertEqual(f.lookup(node0), node0)
306 self.assertEqual(f.lookup(node0), node0)
293 self.assertEqual(f.lookup(0), node0)
307 self.assertEqual(f.lookup(0), node0)
294 self.assertEqual(f.lookup(b'0'), node0)
308 self.assertEqual(f.lookup(b'0'), node0)
295 self.assertEqual(f.lookup(hex(node0)), node0)
309 self.assertEqual(f.lookup(hex(node0)), node0)
296
310
297 self.assertEqual(f.lookup(node1), node1)
311 self.assertEqual(f.lookup(node1), node1)
298 self.assertEqual(f.lookup(1), node1)
312 self.assertEqual(f.lookup(1), node1)
299 self.assertEqual(f.lookup(b'1'), node1)
313 self.assertEqual(f.lookup(b'1'), node1)
300 self.assertEqual(f.lookup(hex(node1)), node1)
314 self.assertEqual(f.lookup(hex(node1)), node1)
301
315
302 self.assertEqual(f.linkrev(0), 0)
316 self.assertEqual(f.linkrev(0), 0)
303 self.assertEqual(f.linkrev(1), 1)
317 self.assertEqual(f.linkrev(1), 1)
304 self.assertEqual(f.linkrev(2), 3)
318 self.assertEqual(f.linkrev(2), 3)
305
319
306 with self.assertRaises(IndexError):
320 with self.assertRaises(IndexError):
307 f.linkrev(3)
321 f.linkrev(3)
308
322
309 self.assertFalse(f.iscensored(0))
323 self.assertFalse(f.iscensored(0))
310 self.assertFalse(f.iscensored(1))
324 self.assertFalse(f.iscensored(1))
311 self.assertFalse(f.iscensored(2))
325 self.assertFalse(f.iscensored(2))
312
326
313 with self.assertRaises(IndexError):
327 with self.assertRaises(IndexError):
314 f.iscensored(3)
328 f.iscensored(3)
315
329
316 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
330 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
317 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
331 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
318 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
332 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
319 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
333 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
320 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
334 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
321 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
335 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
322
336
323 self.assertEqual(list(f.descendants([0])), [1, 2])
337 self.assertEqual(list(f.descendants([0])), [1, 2])
324 self.assertEqual(list(f.descendants([1])), [2])
338 self.assertEqual(list(f.descendants([1])), [2])
325 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
339 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
326
340
327 self.assertEqual(f.heads(), [node2])
341 self.assertEqual(f.heads(), [node2])
328 self.assertEqual(f.heads(node0), [node2])
342 self.assertEqual(f.heads(node0), [node2])
329 self.assertEqual(f.heads(node1), [node2])
343 self.assertEqual(f.heads(node1), [node2])
330 self.assertEqual(f.heads(node2), [node2])
344 self.assertEqual(f.heads(node2), [node2])
331
345
332 # TODO this behavior seems wonky. Is it correct? If so, the
346 # TODO this behavior seems wonky. Is it correct? If so, the
333 # docstring for heads() should be updated to reflect desired
347 # docstring for heads() should be updated to reflect desired
334 # behavior.
348 # behavior.
335 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
349 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
336 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
350 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
337 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
351 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
338
352
339 with self.assertRaises(error.LookupError):
353 with self.assertRaises(error.LookupError):
340 f.heads(stop=[b'\x01' * 20])
354 f.heads(stop=[b'\x01' * 20])
341
355
342 self.assertEqual(f.children(node0), [node1])
356 self.assertEqual(f.children(node0), [node1])
343 self.assertEqual(f.children(node1), [node2])
357 self.assertEqual(f.children(node1), [node2])
344 self.assertEqual(f.children(node2), [])
358 self.assertEqual(f.children(node2), [])
345
359
346 def testmultipleheads(self):
360 def testmultipleheads(self):
347 f = self._makefilefn()
361 f = self._makefilefn()
348
362
349 with self._maketransactionfn() as tr:
363 with self._maketransactionfn() as tr:
350 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
364 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
351 node1 = f.add(b'1', None, tr, 1, node0, nullid)
365 node1 = f.add(b'1', None, tr, 1, node0, nullid)
352 node2 = f.add(b'2', None, tr, 2, node1, nullid)
366 node2 = f.add(b'2', None, tr, 2, node1, nullid)
353 node3 = f.add(b'3', None, tr, 3, node0, nullid)
367 node3 = f.add(b'3', None, tr, 3, node0, nullid)
354 node4 = f.add(b'4', None, tr, 4, node3, nullid)
368 node4 = f.add(b'4', None, tr, 4, node3, nullid)
355 node5 = f.add(b'5', None, tr, 5, node0, nullid)
369 node5 = f.add(b'5', None, tr, 5, node0, nullid)
356
370
357 self.assertEqual(len(f), 6)
371 self.assertEqual(len(f), 6)
358
372
359 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
373 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
360 self.assertEqual(list(f.descendants([1])), [2])
374 self.assertEqual(list(f.descendants([1])), [2])
361 self.assertEqual(list(f.descendants([2])), [])
375 self.assertEqual(list(f.descendants([2])), [])
362 self.assertEqual(list(f.descendants([3])), [4])
376 self.assertEqual(list(f.descendants([3])), [4])
363 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
377 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
364 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
378 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
365
379
366 self.assertEqual(f.heads(), [node2, node4, node5])
380 self.assertEqual(f.heads(), [node2, node4, node5])
367 self.assertEqual(f.heads(node0), [node2, node4, node5])
381 self.assertEqual(f.heads(node0), [node2, node4, node5])
368 self.assertEqual(f.heads(node1), [node2])
382 self.assertEqual(f.heads(node1), [node2])
369 self.assertEqual(f.heads(node2), [node2])
383 self.assertEqual(f.heads(node2), [node2])
370 self.assertEqual(f.heads(node3), [node4])
384 self.assertEqual(f.heads(node3), [node4])
371 self.assertEqual(f.heads(node4), [node4])
385 self.assertEqual(f.heads(node4), [node4])
372 self.assertEqual(f.heads(node5), [node5])
386 self.assertEqual(f.heads(node5), [node5])
373
387
374 # TODO this seems wrong.
388 # TODO this seems wrong.
375 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
389 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
376 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
390 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
377
391
378 self.assertEqual(f.children(node0), [node1, node3, node5])
392 self.assertEqual(f.children(node0), [node1, node3, node5])
379 self.assertEqual(f.children(node1), [node2])
393 self.assertEqual(f.children(node1), [node2])
380 self.assertEqual(f.children(node2), [])
394 self.assertEqual(f.children(node2), [])
381 self.assertEqual(f.children(node3), [node4])
395 self.assertEqual(f.children(node3), [node4])
382 self.assertEqual(f.children(node4), [])
396 self.assertEqual(f.children(node4), [])
383 self.assertEqual(f.children(node5), [])
397 self.assertEqual(f.children(node5), [])
384
398
385 class ifiledatatests(basetestcase):
399 class ifiledatatests(basetestcase):
386 """Generic tests for the ifiledata interface.
400 """Generic tests for the ifiledata interface.
387
401
388 All file storage backends for data should conform to the tests in this
402 All file storage backends for data should conform to the tests in this
389 class.
403 class.
390
404
391 Use ``makeifiledatatests()`` to create an instance of this type.
405 Use ``makeifiledatatests()`` to create an instance of this type.
392 """
406 """
393 def testempty(self):
407 def testempty(self):
394 f = self._makefilefn()
408 f = self._makefilefn()
395
409
396 self.assertEqual(f.storageinfo(), {})
410 self.assertEqual(f.storageinfo(), {})
397 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
411 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
398 {'revisionscount': 0, 'trackedsize': 0})
412 {'revisionscount': 0, 'trackedsize': 0})
399
413
400 self.assertEqual(f.size(nullrev), 0)
414 self.assertEqual(f.size(nullrev), 0)
401
415
402 for i in range(-5, 5):
416 for i in range(-5, 5):
403 if i == nullrev:
417 if i == nullrev:
404 continue
418 continue
405
419
406 with self.assertRaises(IndexError):
420 with self.assertRaises(IndexError):
407 f.size(i)
421 f.size(i)
408
422
409 self.assertEqual(f.revision(nullid), b'')
423 self.assertEqual(f.revision(nullid), b'')
410 self.assertEqual(f.revision(nullid, raw=True), b'')
424 self.assertEqual(f.revision(nullid, raw=True), b'')
411
425
412 with self.assertRaises(error.LookupError):
426 with self.assertRaises(error.LookupError):
413 f.revision(b'\x01' * 20)
427 f.revision(b'\x01' * 20)
414
428
415 self.assertEqual(f.read(nullid), b'')
429 self.assertEqual(f.read(nullid), b'')
416
430
417 with self.assertRaises(error.LookupError):
431 with self.assertRaises(error.LookupError):
418 f.read(b'\x01' * 20)
432 f.read(b'\x01' * 20)
419
433
420 self.assertFalse(f.renamed(nullid))
434 self.assertFalse(f.renamed(nullid))
421
435
422 with self.assertRaises(error.LookupError):
436 with self.assertRaises(error.LookupError):
423 f.read(b'\x01' * 20)
437 f.read(b'\x01' * 20)
424
438
425 self.assertTrue(f.cmp(nullid, b''))
439 self.assertTrue(f.cmp(nullid, b''))
426 self.assertTrue(f.cmp(nullid, b'foo'))
440 self.assertTrue(f.cmp(nullid, b'foo'))
427
441
428 with self.assertRaises(error.LookupError):
442 with self.assertRaises(error.LookupError):
429 f.cmp(b'\x01' * 20, b'irrelevant')
443 f.cmp(b'\x01' * 20, b'irrelevant')
430
444
431 # Emitting empty list is an empty generator.
445 # Emitting empty list is an empty generator.
432 gen = f.emitrevisions([])
446 gen = f.emitrevisions([])
433 with self.assertRaises(StopIteration):
447 with self.assertRaises(StopIteration):
434 next(gen)
448 next(gen)
435
449
436 # Emitting null node yields nothing.
450 # Emitting null node yields nothing.
437 gen = f.emitrevisions([nullid])
451 gen = f.emitrevisions([nullid])
438 with self.assertRaises(StopIteration):
452 with self.assertRaises(StopIteration):
439 next(gen)
453 next(gen)
440
454
441 # Requesting unknown node fails.
455 # Requesting unknown node fails.
442 with self.assertRaises(error.LookupError):
456 with self.assertRaises(error.LookupError):
443 list(f.emitrevisions([b'\x01' * 20]))
457 list(f.emitrevisions([b'\x01' * 20]))
444
458
445 def testsinglerevision(self):
459 def testsinglerevision(self):
446 fulltext = b'initial'
460 fulltext = b'initial'
447
461
448 f = self._makefilefn()
462 f = self._makefilefn()
449 with self._maketransactionfn() as tr:
463 with self._maketransactionfn() as tr:
450 node = f.add(fulltext, None, tr, 0, nullid, nullid)
464 node = f.add(fulltext, None, tr, 0, nullid, nullid)
451
465
452 self.assertEqual(f.storageinfo(), {})
466 self.assertEqual(f.storageinfo(), {})
453 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
467 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
454 {'revisionscount': 1, 'trackedsize': len(fulltext)})
468 {'revisionscount': 1, 'trackedsize': len(fulltext)})
455
469
456 self.assertEqual(f.size(0), len(fulltext))
470 self.assertEqual(f.size(0), len(fulltext))
457
471
458 with self.assertRaises(IndexError):
472 with self.assertRaises(IndexError):
459 f.size(1)
473 f.size(1)
460
474
461 self.assertEqual(f.revision(node), fulltext)
475 self.assertEqual(f.revision(node), fulltext)
462 self.assertEqual(f.revision(node, raw=True), fulltext)
476 self.assertEqual(f.revision(node, raw=True), fulltext)
463
477
464 self.assertEqual(f.read(node), fulltext)
478 self.assertEqual(f.read(node), fulltext)
465
479
466 self.assertFalse(f.renamed(node))
480 self.assertFalse(f.renamed(node))
467
481
468 self.assertFalse(f.cmp(node, fulltext))
482 self.assertFalse(f.cmp(node, fulltext))
469 self.assertTrue(f.cmp(node, fulltext + b'extra'))
483 self.assertTrue(f.cmp(node, fulltext + b'extra'))
470
484
471 # Emitting a single revision works.
485 # Emitting a single revision works.
472 gen = f.emitrevisions([node])
486 gen = f.emitrevisions([node])
473 rev = next(gen)
487 rev = next(gen)
474
488
475 self.assertEqual(rev.node, node)
489 self.assertEqual(rev.node, node)
476 self.assertEqual(rev.p1node, nullid)
490 self.assertEqual(rev.p1node, nullid)
477 self.assertEqual(rev.p2node, nullid)
491 self.assertEqual(rev.p2node, nullid)
478 self.assertIsNone(rev.linknode)
492 self.assertIsNone(rev.linknode)
479 self.assertEqual(rev.basenode, nullid)
493 self.assertEqual(rev.basenode, nullid)
480 self.assertIsNone(rev.baserevisionsize)
494 self.assertIsNone(rev.baserevisionsize)
481 self.assertIsNone(rev.revision)
495 self.assertIsNone(rev.revision)
482 self.assertIsNone(rev.delta)
496 self.assertIsNone(rev.delta)
483
497
484 with self.assertRaises(StopIteration):
498 with self.assertRaises(StopIteration):
485 next(gen)
499 next(gen)
486
500
487 # Requesting revision data works.
501 # Requesting revision data works.
488 gen = f.emitrevisions([node], revisiondata=True)
502 gen = f.emitrevisions([node], revisiondata=True)
489 rev = next(gen)
503 rev = next(gen)
490
504
491 self.assertEqual(rev.node, node)
505 self.assertEqual(rev.node, node)
492 self.assertEqual(rev.p1node, nullid)
506 self.assertEqual(rev.p1node, nullid)
493 self.assertEqual(rev.p2node, nullid)
507 self.assertEqual(rev.p2node, nullid)
494 self.assertIsNone(rev.linknode)
508 self.assertIsNone(rev.linknode)
495 self.assertEqual(rev.basenode, nullid)
509 self.assertEqual(rev.basenode, nullid)
496 self.assertIsNone(rev.baserevisionsize)
510 self.assertIsNone(rev.baserevisionsize)
497 self.assertEqual(rev.revision, fulltext)
511 self.assertEqual(rev.revision, fulltext)
498 self.assertIsNone(rev.delta)
512 self.assertIsNone(rev.delta)
499
513
500 with self.assertRaises(StopIteration):
514 with self.assertRaises(StopIteration):
501 next(gen)
515 next(gen)
502
516
503 # Emitting an unknown node after a known revision results in error.
517 # Emitting an unknown node after a known revision results in error.
504 with self.assertRaises(error.LookupError):
518 with self.assertRaises(error.LookupError):
505 list(f.emitrevisions([node, b'\x01' * 20]))
519 list(f.emitrevisions([node, b'\x01' * 20]))
506
520
507 def testmultiplerevisions(self):
521 def testmultiplerevisions(self):
508 fulltext0 = b'x' * 1024
522 fulltext0 = b'x' * 1024
509 fulltext1 = fulltext0 + b'y'
523 fulltext1 = fulltext0 + b'y'
510 fulltext2 = b'y' + fulltext0 + b'z'
524 fulltext2 = b'y' + fulltext0 + b'z'
511
525
512 f = self._makefilefn()
526 f = self._makefilefn()
513 with self._maketransactionfn() as tr:
527 with self._maketransactionfn() as tr:
514 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
528 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
515 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
529 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
516 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
530 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
517
531
518 self.assertEqual(f.storageinfo(), {})
532 self.assertEqual(f.storageinfo(), {})
519 self.assertEqual(
533 self.assertEqual(
520 f.storageinfo(revisionscount=True, trackedsize=True),
534 f.storageinfo(revisionscount=True, trackedsize=True),
521 {
535 {
522 'revisionscount': 3,
536 'revisionscount': 3,
523 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
537 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
524 })
538 })
525
539
526 self.assertEqual(f.size(0), len(fulltext0))
540 self.assertEqual(f.size(0), len(fulltext0))
527 self.assertEqual(f.size(1), len(fulltext1))
541 self.assertEqual(f.size(1), len(fulltext1))
528 self.assertEqual(f.size(2), len(fulltext2))
542 self.assertEqual(f.size(2), len(fulltext2))
529
543
530 with self.assertRaises(IndexError):
544 with self.assertRaises(IndexError):
531 f.size(3)
545 f.size(3)
532
546
533 self.assertEqual(f.revision(node0), fulltext0)
547 self.assertEqual(f.revision(node0), fulltext0)
534 self.assertEqual(f.revision(node0, raw=True), fulltext0)
548 self.assertEqual(f.revision(node0, raw=True), fulltext0)
535 self.assertEqual(f.revision(node1), fulltext1)
549 self.assertEqual(f.revision(node1), fulltext1)
536 self.assertEqual(f.revision(node1, raw=True), fulltext1)
550 self.assertEqual(f.revision(node1, raw=True), fulltext1)
537 self.assertEqual(f.revision(node2), fulltext2)
551 self.assertEqual(f.revision(node2), fulltext2)
538 self.assertEqual(f.revision(node2, raw=True), fulltext2)
552 self.assertEqual(f.revision(node2, raw=True), fulltext2)
539
553
540 with self.assertRaises(error.LookupError):
554 with self.assertRaises(error.LookupError):
541 f.revision(b'\x01' * 20)
555 f.revision(b'\x01' * 20)
542
556
543 self.assertEqual(f.read(node0), fulltext0)
557 self.assertEqual(f.read(node0), fulltext0)
544 self.assertEqual(f.read(node1), fulltext1)
558 self.assertEqual(f.read(node1), fulltext1)
545 self.assertEqual(f.read(node2), fulltext2)
559 self.assertEqual(f.read(node2), fulltext2)
546
560
547 with self.assertRaises(error.LookupError):
561 with self.assertRaises(error.LookupError):
548 f.read(b'\x01' * 20)
562 f.read(b'\x01' * 20)
549
563
550 self.assertFalse(f.renamed(node0))
564 self.assertFalse(f.renamed(node0))
551 self.assertFalse(f.renamed(node1))
565 self.assertFalse(f.renamed(node1))
552 self.assertFalse(f.renamed(node2))
566 self.assertFalse(f.renamed(node2))
553
567
554 with self.assertRaises(error.LookupError):
568 with self.assertRaises(error.LookupError):
555 f.renamed(b'\x01' * 20)
569 f.renamed(b'\x01' * 20)
556
570
557 self.assertFalse(f.cmp(node0, fulltext0))
571 self.assertFalse(f.cmp(node0, fulltext0))
558 self.assertFalse(f.cmp(node1, fulltext1))
572 self.assertFalse(f.cmp(node1, fulltext1))
559 self.assertFalse(f.cmp(node2, fulltext2))
573 self.assertFalse(f.cmp(node2, fulltext2))
560
574
561 self.assertTrue(f.cmp(node1, fulltext0))
575 self.assertTrue(f.cmp(node1, fulltext0))
562 self.assertTrue(f.cmp(node2, fulltext1))
576 self.assertTrue(f.cmp(node2, fulltext1))
563
577
564 with self.assertRaises(error.LookupError):
578 with self.assertRaises(error.LookupError):
565 f.cmp(b'\x01' * 20, b'irrelevant')
579 f.cmp(b'\x01' * 20, b'irrelevant')
566
580
567 # Nodes should be emitted in order.
581 # Nodes should be emitted in order.
568 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
582 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
569
583
570 rev = next(gen)
584 rev = next(gen)
571
585
572 self.assertEqual(rev.node, node0)
586 self.assertEqual(rev.node, node0)
573 self.assertEqual(rev.p1node, nullid)
587 self.assertEqual(rev.p1node, nullid)
574 self.assertEqual(rev.p2node, nullid)
588 self.assertEqual(rev.p2node, nullid)
575 self.assertIsNone(rev.linknode)
589 self.assertIsNone(rev.linknode)
576 self.assertEqual(rev.basenode, nullid)
590 self.assertEqual(rev.basenode, nullid)
577 self.assertIsNone(rev.baserevisionsize)
591 self.assertIsNone(rev.baserevisionsize)
578 self.assertEqual(rev.revision, fulltext0)
592 self.assertEqual(rev.revision, fulltext0)
579 self.assertIsNone(rev.delta)
593 self.assertIsNone(rev.delta)
580
594
581 rev = next(gen)
595 rev = next(gen)
582
596
583 self.assertEqual(rev.node, node1)
597 self.assertEqual(rev.node, node1)
584 self.assertEqual(rev.p1node, node0)
598 self.assertEqual(rev.p1node, node0)
585 self.assertEqual(rev.p2node, nullid)
599 self.assertEqual(rev.p2node, nullid)
586 self.assertIsNone(rev.linknode)
600 self.assertIsNone(rev.linknode)
587 self.assertEqual(rev.basenode, node0)
601 self.assertEqual(rev.basenode, node0)
588 self.assertIsNone(rev.baserevisionsize)
602 self.assertIsNone(rev.baserevisionsize)
589 self.assertIsNone(rev.revision)
603 self.assertIsNone(rev.revision)
590 self.assertEqual(rev.delta,
604 self.assertEqual(rev.delta,
591 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
605 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
592 fulltext1)
606 fulltext1)
593
607
594 rev = next(gen)
608 rev = next(gen)
595
609
596 self.assertEqual(rev.node, node2)
610 self.assertEqual(rev.node, node2)
597 self.assertEqual(rev.p1node, node1)
611 self.assertEqual(rev.p1node, node1)
598 self.assertEqual(rev.p2node, nullid)
612 self.assertEqual(rev.p2node, nullid)
599 self.assertIsNone(rev.linknode)
613 self.assertIsNone(rev.linknode)
600 self.assertEqual(rev.basenode, node1)
614 self.assertEqual(rev.basenode, node1)
601 self.assertIsNone(rev.baserevisionsize)
615 self.assertIsNone(rev.baserevisionsize)
602 self.assertIsNone(rev.revision)
616 self.assertIsNone(rev.revision)
603 self.assertEqual(rev.delta,
617 self.assertEqual(rev.delta,
604 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
618 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
605 fulltext2)
619 fulltext2)
606
620
607 with self.assertRaises(StopIteration):
621 with self.assertRaises(StopIteration):
608 next(gen)
622 next(gen)
609
623
610 # Request not in DAG order is reordered to be in DAG order.
624 # Request not in DAG order is reordered to be in DAG order.
611 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
625 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
612
626
613 rev = next(gen)
627 rev = next(gen)
614
628
615 self.assertEqual(rev.node, node0)
629 self.assertEqual(rev.node, node0)
616 self.assertEqual(rev.p1node, nullid)
630 self.assertEqual(rev.p1node, nullid)
617 self.assertEqual(rev.p2node, nullid)
631 self.assertEqual(rev.p2node, nullid)
618 self.assertIsNone(rev.linknode)
632 self.assertIsNone(rev.linknode)
619 self.assertEqual(rev.basenode, nullid)
633 self.assertEqual(rev.basenode, nullid)
620 self.assertIsNone(rev.baserevisionsize)
634 self.assertIsNone(rev.baserevisionsize)
621 self.assertEqual(rev.revision, fulltext0)
635 self.assertEqual(rev.revision, fulltext0)
622 self.assertIsNone(rev.delta)
636 self.assertIsNone(rev.delta)
623
637
624 rev = next(gen)
638 rev = next(gen)
625
639
626 self.assertEqual(rev.node, node1)
640 self.assertEqual(rev.node, node1)
627 self.assertEqual(rev.p1node, node0)
641 self.assertEqual(rev.p1node, node0)
628 self.assertEqual(rev.p2node, nullid)
642 self.assertEqual(rev.p2node, nullid)
629 self.assertIsNone(rev.linknode)
643 self.assertIsNone(rev.linknode)
630 self.assertEqual(rev.basenode, node0)
644 self.assertEqual(rev.basenode, node0)
631 self.assertIsNone(rev.baserevisionsize)
645 self.assertIsNone(rev.baserevisionsize)
632 self.assertIsNone(rev.revision)
646 self.assertIsNone(rev.revision)
633 self.assertEqual(rev.delta,
647 self.assertEqual(rev.delta,
634 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
648 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
635 fulltext1)
649 fulltext1)
636
650
637 rev = next(gen)
651 rev = next(gen)
638
652
639 self.assertEqual(rev.node, node2)
653 self.assertEqual(rev.node, node2)
640 self.assertEqual(rev.p1node, node1)
654 self.assertEqual(rev.p1node, node1)
641 self.assertEqual(rev.p2node, nullid)
655 self.assertEqual(rev.p2node, nullid)
642 self.assertIsNone(rev.linknode)
656 self.assertIsNone(rev.linknode)
643 self.assertEqual(rev.basenode, node1)
657 self.assertEqual(rev.basenode, node1)
644 self.assertIsNone(rev.baserevisionsize)
658 self.assertIsNone(rev.baserevisionsize)
645 self.assertIsNone(rev.revision)
659 self.assertIsNone(rev.revision)
646 self.assertEqual(rev.delta,
660 self.assertEqual(rev.delta,
647 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
661 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
648 fulltext2)
662 fulltext2)
649
663
650 with self.assertRaises(StopIteration):
664 with self.assertRaises(StopIteration):
651 next(gen)
665 next(gen)
652
666
653 # Unrecognized nodesorder value raises ProgrammingError.
667 # Unrecognized nodesorder value raises ProgrammingError.
654 with self.assertRaises(error.ProgrammingError):
668 with self.assertRaises(error.ProgrammingError):
655 list(f.emitrevisions([], nodesorder='bad'))
669 list(f.emitrevisions([], nodesorder='bad'))
656
670
657 # nodesorder=storage is recognized. But we can't test it thoroughly
671 # nodesorder=storage is recognized. But we can't test it thoroughly
658 # because behavior is storage-dependent.
672 # because behavior is storage-dependent.
659 res = list(f.emitrevisions([node2, node1, node0],
673 res = list(f.emitrevisions([node2, node1, node0],
660 nodesorder='storage'))
674 nodesorder='storage'))
661 self.assertEqual(len(res), 3)
675 self.assertEqual(len(res), 3)
662 self.assertEqual({o.node for o in res}, {node0, node1, node2})
676 self.assertEqual({o.node for o in res}, {node0, node1, node2})
663
677
664 # nodesorder=nodes forces the order.
678 # nodesorder=nodes forces the order.
665 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
679 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
666 revisiondata=True)
680 revisiondata=True)
667
681
668 rev = next(gen)
682 rev = next(gen)
669 self.assertEqual(rev.node, node2)
683 self.assertEqual(rev.node, node2)
670 self.assertEqual(rev.p1node, node1)
684 self.assertEqual(rev.p1node, node1)
671 self.assertEqual(rev.p2node, nullid)
685 self.assertEqual(rev.p2node, nullid)
672 self.assertEqual(rev.basenode, nullid)
686 self.assertEqual(rev.basenode, nullid)
673 self.assertIsNone(rev.baserevisionsize)
687 self.assertIsNone(rev.baserevisionsize)
674 self.assertEqual(rev.revision, fulltext2)
688 self.assertEqual(rev.revision, fulltext2)
675 self.assertIsNone(rev.delta)
689 self.assertIsNone(rev.delta)
676
690
677 rev = next(gen)
691 rev = next(gen)
678 self.assertEqual(rev.node, node0)
692 self.assertEqual(rev.node, node0)
679 self.assertEqual(rev.p1node, nullid)
693 self.assertEqual(rev.p1node, nullid)
680 self.assertEqual(rev.p2node, nullid)
694 self.assertEqual(rev.p2node, nullid)
681 # Delta behavior is storage dependent, so we can't easily test it.
695 # Delta behavior is storage dependent, so we can't easily test it.
682
696
683 with self.assertRaises(StopIteration):
697 with self.assertRaises(StopIteration):
684 next(gen)
698 next(gen)
685
699
686 # assumehaveparentrevisions=False (the default) won't send a delta for
700 # assumehaveparentrevisions=False (the default) won't send a delta for
687 # the first revision.
701 # the first revision.
688 gen = f.emitrevisions({node2, node1}, revisiondata=True)
702 gen = f.emitrevisions({node2, node1}, revisiondata=True)
689
703
690 rev = next(gen)
704 rev = next(gen)
691 self.assertEqual(rev.node, node1)
705 self.assertEqual(rev.node, node1)
692 self.assertEqual(rev.p1node, node0)
706 self.assertEqual(rev.p1node, node0)
693 self.assertEqual(rev.p2node, nullid)
707 self.assertEqual(rev.p2node, nullid)
694 self.assertEqual(rev.basenode, nullid)
708 self.assertEqual(rev.basenode, nullid)
695 self.assertIsNone(rev.baserevisionsize)
709 self.assertIsNone(rev.baserevisionsize)
696 self.assertEqual(rev.revision, fulltext1)
710 self.assertEqual(rev.revision, fulltext1)
697 self.assertIsNone(rev.delta)
711 self.assertIsNone(rev.delta)
698
712
699 rev = next(gen)
713 rev = next(gen)
700 self.assertEqual(rev.node, node2)
714 self.assertEqual(rev.node, node2)
701 self.assertEqual(rev.p1node, node1)
715 self.assertEqual(rev.p1node, node1)
702 self.assertEqual(rev.p2node, nullid)
716 self.assertEqual(rev.p2node, nullid)
703 self.assertEqual(rev.basenode, node1)
717 self.assertEqual(rev.basenode, node1)
704 self.assertIsNone(rev.baserevisionsize)
718 self.assertIsNone(rev.baserevisionsize)
705 self.assertIsNone(rev.revision)
719 self.assertIsNone(rev.revision)
706 self.assertEqual(rev.delta,
720 self.assertEqual(rev.delta,
707 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
721 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
708 fulltext2)
722 fulltext2)
709
723
710 with self.assertRaises(StopIteration):
724 with self.assertRaises(StopIteration):
711 next(gen)
725 next(gen)
712
726
713 # assumehaveparentrevisions=True allows delta against initial revision.
727 # assumehaveparentrevisions=True allows delta against initial revision.
714 gen = f.emitrevisions([node2, node1],
728 gen = f.emitrevisions([node2, node1],
715 revisiondata=True, assumehaveparentrevisions=True)
729 revisiondata=True, assumehaveparentrevisions=True)
716
730
717 rev = next(gen)
731 rev = next(gen)
718 self.assertEqual(rev.node, node1)
732 self.assertEqual(rev.node, node1)
719 self.assertEqual(rev.p1node, node0)
733 self.assertEqual(rev.p1node, node0)
720 self.assertEqual(rev.p2node, nullid)
734 self.assertEqual(rev.p2node, nullid)
721 self.assertEqual(rev.basenode, node0)
735 self.assertEqual(rev.basenode, node0)
722 self.assertIsNone(rev.baserevisionsize)
736 self.assertIsNone(rev.baserevisionsize)
723 self.assertIsNone(rev.revision)
737 self.assertIsNone(rev.revision)
724 self.assertEqual(rev.delta,
738 self.assertEqual(rev.delta,
725 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
739 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
726 fulltext1)
740 fulltext1)
727
741
728 # forceprevious=True forces a delta against the previous revision.
742 # forceprevious=True forces a delta against the previous revision.
729 # Special case for initial revision.
743 # Special case for initial revision.
730 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
744 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
731
745
732 rev = next(gen)
746 rev = next(gen)
733 self.assertEqual(rev.node, node0)
747 self.assertEqual(rev.node, node0)
734 self.assertEqual(rev.p1node, nullid)
748 self.assertEqual(rev.p1node, nullid)
735 self.assertEqual(rev.p2node, nullid)
749 self.assertEqual(rev.p2node, nullid)
736 self.assertEqual(rev.basenode, nullid)
750 self.assertEqual(rev.basenode, nullid)
737 self.assertIsNone(rev.baserevisionsize)
751 self.assertIsNone(rev.baserevisionsize)
738 self.assertIsNone(rev.revision)
752 self.assertIsNone(rev.revision)
739 self.assertEqual(rev.delta,
753 self.assertEqual(rev.delta,
740 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
754 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
741 fulltext0)
755 fulltext0)
742
756
743 with self.assertRaises(StopIteration):
757 with self.assertRaises(StopIteration):
744 next(gen)
758 next(gen)
745
759
746 gen = f.emitrevisions([node0, node2], revisiondata=True,
760 gen = f.emitrevisions([node0, node2], revisiondata=True,
747 deltaprevious=True)
761 deltaprevious=True)
748
762
749 rev = next(gen)
763 rev = next(gen)
750 self.assertEqual(rev.node, node0)
764 self.assertEqual(rev.node, node0)
751 self.assertEqual(rev.p1node, nullid)
765 self.assertEqual(rev.p1node, nullid)
752 self.assertEqual(rev.p2node, nullid)
766 self.assertEqual(rev.p2node, nullid)
753 self.assertEqual(rev.basenode, nullid)
767 self.assertEqual(rev.basenode, nullid)
754 self.assertIsNone(rev.baserevisionsize)
768 self.assertIsNone(rev.baserevisionsize)
755 self.assertIsNone(rev.revision)
769 self.assertIsNone(rev.revision)
756 self.assertEqual(rev.delta,
770 self.assertEqual(rev.delta,
757 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
771 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
758 fulltext0)
772 fulltext0)
759
773
760 rev = next(gen)
774 rev = next(gen)
761 self.assertEqual(rev.node, node2)
775 self.assertEqual(rev.node, node2)
762 self.assertEqual(rev.p1node, node1)
776 self.assertEqual(rev.p1node, node1)
763 self.assertEqual(rev.p2node, nullid)
777 self.assertEqual(rev.p2node, nullid)
764 self.assertEqual(rev.basenode, node0)
778 self.assertEqual(rev.basenode, node0)
765
779
766 with self.assertRaises(StopIteration):
780 with self.assertRaises(StopIteration):
767 next(gen)
781 next(gen)
768
782
769 def testrenamed(self):
783 def testrenamed(self):
770 fulltext0 = b'foo'
784 fulltext0 = b'foo'
771 fulltext1 = b'bar'
785 fulltext1 = b'bar'
772 fulltext2 = b'baz'
786 fulltext2 = b'baz'
773
787
774 meta1 = {
788 meta1 = {
775 b'copy': b'source0',
789 b'copy': b'source0',
776 b'copyrev': b'a' * 40,
790 b'copyrev': b'a' * 40,
777 }
791 }
778
792
779 meta2 = {
793 meta2 = {
780 b'copy': b'source1',
794 b'copy': b'source1',
781 b'copyrev': b'b' * 40,
795 b'copyrev': b'b' * 40,
782 }
796 }
783
797
784 stored1 = b''.join([
798 stored1 = b''.join([
785 b'\x01\ncopy: source0\n',
799 b'\x01\ncopy: source0\n',
786 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
800 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
787 fulltext1,
801 fulltext1,
788 ])
802 ])
789
803
790 stored2 = b''.join([
804 stored2 = b''.join([
791 b'\x01\ncopy: source1\n',
805 b'\x01\ncopy: source1\n',
792 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
806 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
793 fulltext2,
807 fulltext2,
794 ])
808 ])
795
809
796 f = self._makefilefn()
810 f = self._makefilefn()
797 with self._maketransactionfn() as tr:
811 with self._maketransactionfn() as tr:
798 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
812 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
799 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
813 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
800 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
814 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
801
815
802 # Metadata header isn't recognized when parent isn't nullid.
816 # Metadata header isn't recognized when parent isn't nullid.
803 self.assertEqual(f.size(1), len(stored1))
817 self.assertEqual(f.size(1), len(stored1))
804 self.assertEqual(f.size(2), len(fulltext2))
818 self.assertEqual(f.size(2), len(fulltext2))
805
819
806 self.assertEqual(f.revision(node1), stored1)
820 self.assertEqual(f.revision(node1), stored1)
807 self.assertEqual(f.revision(node1, raw=True), stored1)
821 self.assertEqual(f.revision(node1, raw=True), stored1)
808 self.assertEqual(f.revision(node2), stored2)
822 self.assertEqual(f.revision(node2), stored2)
809 self.assertEqual(f.revision(node2, raw=True), stored2)
823 self.assertEqual(f.revision(node2, raw=True), stored2)
810
824
811 self.assertEqual(f.read(node1), fulltext1)
825 self.assertEqual(f.read(node1), fulltext1)
812 self.assertEqual(f.read(node2), fulltext2)
826 self.assertEqual(f.read(node2), fulltext2)
813
827
814 # Returns False when first parent is set.
828 # Returns False when first parent is set.
815 self.assertFalse(f.renamed(node1))
829 self.assertFalse(f.renamed(node1))
816 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
830 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
817
831
818 self.assertTrue(f.cmp(node1, fulltext1))
832 self.assertTrue(f.cmp(node1, fulltext1))
819 self.assertTrue(f.cmp(node1, stored1))
833 self.assertTrue(f.cmp(node1, stored1))
820 self.assertFalse(f.cmp(node2, fulltext2))
834 self.assertFalse(f.cmp(node2, fulltext2))
821 self.assertTrue(f.cmp(node2, stored2))
835 self.assertTrue(f.cmp(node2, stored2))
822
836
823 def testmetadataprefix(self):
837 def testmetadataprefix(self):
824 # Content with metadata prefix has extra prefix inserted in storage.
838 # Content with metadata prefix has extra prefix inserted in storage.
825 fulltext0 = b'\x01\nfoo'
839 fulltext0 = b'\x01\nfoo'
826 stored0 = b'\x01\n\x01\n\x01\nfoo'
840 stored0 = b'\x01\n\x01\n\x01\nfoo'
827
841
828 fulltext1 = b'\x01\nbar'
842 fulltext1 = b'\x01\nbar'
829 meta1 = {
843 meta1 = {
830 b'copy': b'source0',
844 b'copy': b'source0',
831 b'copyrev': b'b' * 40,
845 b'copyrev': b'b' * 40,
832 }
846 }
833 stored1 = b''.join([
847 stored1 = b''.join([
834 b'\x01\ncopy: source0\n',
848 b'\x01\ncopy: source0\n',
835 b'copyrev: %s\n' % (b'b' * 40),
849 b'copyrev: %s\n' % (b'b' * 40),
836 b'\x01\n\x01\nbar',
850 b'\x01\n\x01\nbar',
837 ])
851 ])
838
852
839 f = self._makefilefn()
853 f = self._makefilefn()
840 with self._maketransactionfn() as tr:
854 with self._maketransactionfn() as tr:
841 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
855 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
842 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
856 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
843
857
844 # TODO this is buggy.
858 # TODO this is buggy.
845 self.assertEqual(f.size(0), len(fulltext0) + 4)
859 self.assertEqual(f.size(0), len(fulltext0) + 4)
846
860
847 self.assertEqual(f.size(1), len(fulltext1))
861 self.assertEqual(f.size(1), len(fulltext1))
848
862
849 self.assertEqual(f.revision(node0), stored0)
863 self.assertEqual(f.revision(node0), stored0)
850 self.assertEqual(f.revision(node0, raw=True), stored0)
864 self.assertEqual(f.revision(node0, raw=True), stored0)
851
865
852 self.assertEqual(f.revision(node1), stored1)
866 self.assertEqual(f.revision(node1), stored1)
853 self.assertEqual(f.revision(node1, raw=True), stored1)
867 self.assertEqual(f.revision(node1, raw=True), stored1)
854
868
855 self.assertEqual(f.read(node0), fulltext0)
869 self.assertEqual(f.read(node0), fulltext0)
856 self.assertEqual(f.read(node1), fulltext1)
870 self.assertEqual(f.read(node1), fulltext1)
857
871
858 self.assertFalse(f.cmp(node0, fulltext0))
872 self.assertFalse(f.cmp(node0, fulltext0))
859 self.assertTrue(f.cmp(node0, stored0))
873 self.assertTrue(f.cmp(node0, stored0))
860
874
861 self.assertFalse(f.cmp(node1, fulltext1))
875 self.assertFalse(f.cmp(node1, fulltext1))
862 self.assertTrue(f.cmp(node1, stored0))
876 self.assertTrue(f.cmp(node1, stored0))
863
877
864 def testbadnoderead(self):
878 def testbadnoderead(self):
865 f = self._makefilefn()
879 f = self._makefilefn()
866
880
867 fulltext0 = b'foo\n' * 30
881 fulltext0 = b'foo\n' * 30
868 fulltext1 = fulltext0 + b'bar\n'
882 fulltext1 = fulltext0 + b'bar\n'
869
883
870 with self._maketransactionfn() as tr:
884 with self._maketransactionfn() as tr:
871 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
885 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
872 node1 = b'\xaa' * 20
886 node1 = b'\xaa' * 20
873
887
874 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
888 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
875 rawtext=fulltext1)
889 rawtext=fulltext1)
876
890
877 self.assertEqual(len(f), 2)
891 self.assertEqual(len(f), 2)
878 self.assertEqual(f.parents(node1), (node0, nullid))
892 self.assertEqual(f.parents(node1), (node0, nullid))
879
893
880 # revision() raises since it performs hash verification.
894 # revision() raises since it performs hash verification.
881 with self.assertRaises(error.StorageError):
895 with self.assertRaises(error.StorageError):
882 f.revision(node1)
896 f.revision(node1)
883
897
884 # raw=True still verifies because there are no special storage
898 # raw=True still verifies because there are no special storage
885 # settings.
899 # settings.
886 with self.assertRaises(error.StorageError):
900 with self.assertRaises(error.StorageError):
887 f.revision(node1, raw=True)
901 f.revision(node1, raw=True)
888
902
889 # read() behaves like revision().
903 # read() behaves like revision().
890 with self.assertRaises(error.StorageError):
904 with self.assertRaises(error.StorageError):
891 f.read(node1)
905 f.read(node1)
892
906
893 # We can't test renamed() here because some backends may not require
907 # We can't test renamed() here because some backends may not require
894 # reading/validating the fulltext to return rename metadata.
908 # reading/validating the fulltext to return rename metadata.
895
909
896 def testbadnoderevisionraw(self):
910 def testbadnoderevisionraw(self):
897 # Like above except we test revision(raw=True) first to isolate
911 # Like above except we test revision(raw=True) first to isolate
898 # revision caching behavior.
912 # revision caching behavior.
899 f = self._makefilefn()
913 f = self._makefilefn()
900
914
901 fulltext0 = b'foo\n' * 30
915 fulltext0 = b'foo\n' * 30
902 fulltext1 = fulltext0 + b'bar\n'
916 fulltext1 = fulltext0 + b'bar\n'
903
917
904 with self._maketransactionfn() as tr:
918 with self._maketransactionfn() as tr:
905 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
919 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
906 node1 = b'\xaa' * 20
920 node1 = b'\xaa' * 20
907
921
908 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
922 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
909 rawtext=fulltext1)
923 rawtext=fulltext1)
910
924
911 with self.assertRaises(error.StorageError):
925 with self.assertRaises(error.StorageError):
912 f.revision(node1, raw=True)
926 f.revision(node1, raw=True)
913
927
914 with self.assertRaises(error.StorageError):
928 with self.assertRaises(error.StorageError):
915 f.revision(node1, raw=True)
929 f.revision(node1, raw=True)
916
930
917 def testbadnoderevisionraw(self):
931 def testbadnoderevisionraw(self):
918 # Like above except we test read() first to isolate revision caching
932 # Like above except we test read() first to isolate revision caching
919 # behavior.
933 # behavior.
920 f = self._makefilefn()
934 f = self._makefilefn()
921
935
922 fulltext0 = b'foo\n' * 30
936 fulltext0 = b'foo\n' * 30
923 fulltext1 = fulltext0 + b'bar\n'
937 fulltext1 = fulltext0 + b'bar\n'
924
938
925 with self._maketransactionfn() as tr:
939 with self._maketransactionfn() as tr:
926 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
940 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
927 node1 = b'\xaa' * 20
941 node1 = b'\xaa' * 20
928
942
929 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
943 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
930 rawtext=fulltext1)
944 rawtext=fulltext1)
931
945
932 with self.assertRaises(error.StorageError):
946 with self.assertRaises(error.StorageError):
933 f.read(node1)
947 f.read(node1)
934
948
935 with self.assertRaises(error.StorageError):
949 with self.assertRaises(error.StorageError):
936 f.read(node1)
950 f.read(node1)
937
951
938 def testbadnodedelta(self):
952 def testbadnodedelta(self):
939 f = self._makefilefn()
953 f = self._makefilefn()
940
954
941 fulltext0 = b'foo\n' * 31
955 fulltext0 = b'foo\n' * 31
942 fulltext1 = fulltext0 + b'bar\n'
956 fulltext1 = fulltext0 + b'bar\n'
943 fulltext2 = fulltext1 + b'baz\n'
957 fulltext2 = fulltext1 + b'baz\n'
944
958
945 with self._maketransactionfn() as tr:
959 with self._maketransactionfn() as tr:
946 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
960 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
947 node1 = b'\xaa' * 20
961 node1 = b'\xaa' * 20
948
962
949 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
963 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
950 rawtext=fulltext1)
964 rawtext=fulltext1)
951
965
952 with self.assertRaises(error.StorageError):
966 with self.assertRaises(error.StorageError):
953 f.read(node1)
967 f.read(node1)
954
968
955 node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
969 node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
956
970
957 with self._maketransactionfn() as tr:
971 with self._maketransactionfn() as tr:
958 delta = mdiff.textdiff(fulltext1, fulltext2)
972 delta = mdiff.textdiff(fulltext1, fulltext2)
959 self._addrawrevisionfn(f, tr, node2, node1, nullid,
973 self._addrawrevisionfn(f, tr, node2, node1, nullid,
960 2, delta=(1, delta))
974 2, delta=(1, delta))
961
975
962 self.assertEqual(len(f), 3)
976 self.assertEqual(len(f), 3)
963
977
964 # Assuming a delta is stored, we shouldn't need to validate node1 in
978 # Assuming a delta is stored, we shouldn't need to validate node1 in
965 # order to retrieve node2.
979 # order to retrieve node2.
966 self.assertEqual(f.read(node2), fulltext2)
980 self.assertEqual(f.read(node2), fulltext2)
967
981
968 def testcensored(self):
982 def testcensored(self):
969 f = self._makefilefn()
983 f = self._makefilefn()
970
984
971 stored1 = storageutil.packmeta({
985 stored1 = storageutil.packmeta({
972 b'censored': b'tombstone',
986 b'censored': b'tombstone',
973 }, b'')
987 }, b'')
974
988
975 with self._maketransactionfn() as tr:
989 with self._maketransactionfn() as tr:
976 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
990 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
977
991
978 # The node value doesn't matter since we can't verify it.
992 # The node value doesn't matter since we can't verify it.
979 node1 = b'\xbb' * 20
993 node1 = b'\xbb' * 20
980
994
981 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
995 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
982 censored=True)
996 censored=True)
983
997
984 self.assertTrue(f.iscensored(1))
998 self.assertTrue(f.iscensored(1))
985
999
986 with self.assertRaises(error.CensoredNodeError):
1000 with self.assertRaises(error.CensoredNodeError):
987 f.revision(1)
1001 f.revision(1)
988
1002
989 with self.assertRaises(error.CensoredNodeError):
1003 with self.assertRaises(error.CensoredNodeError):
990 f.revision(1, raw=True)
1004 f.revision(1, raw=True)
991
1005
992 with self.assertRaises(error.CensoredNodeError):
1006 with self.assertRaises(error.CensoredNodeError):
993 f.read(1)
1007 f.read(1)
994
1008
995 def testcensoredrawrevision(self):
1009 def testcensoredrawrevision(self):
996 # Like above, except we do the revision(raw=True) request first to
1010 # Like above, except we do the revision(raw=True) request first to
997 # isolate revision caching behavior.
1011 # isolate revision caching behavior.
998
1012
999 f = self._makefilefn()
1013 f = self._makefilefn()
1000
1014
1001 stored1 = storageutil.packmeta({
1015 stored1 = storageutil.packmeta({
1002 b'censored': b'tombstone',
1016 b'censored': b'tombstone',
1003 }, b'')
1017 }, b'')
1004
1018
1005 with self._maketransactionfn() as tr:
1019 with self._maketransactionfn() as tr:
1006 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1020 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1007
1021
1008 # The node value doesn't matter since we can't verify it.
1022 # The node value doesn't matter since we can't verify it.
1009 node1 = b'\xbb' * 20
1023 node1 = b'\xbb' * 20
1010
1024
1011 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
1025 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
1012 censored=True)
1026 censored=True)
1013
1027
1014 with self.assertRaises(error.CensoredNodeError):
1028 with self.assertRaises(error.CensoredNodeError):
1015 f.revision(1, raw=True)
1029 f.revision(1, raw=True)
1016
1030
1017 class ifilemutationtests(basetestcase):
1031 class ifilemutationtests(basetestcase):
1018 """Generic tests for the ifilemutation interface.
1032 """Generic tests for the ifilemutation interface.
1019
1033
1020 All file storage backends that support writing should conform to this
1034 All file storage backends that support writing should conform to this
1021 interface.
1035 interface.
1022
1036
1023 Use ``makeifilemutationtests()`` to create an instance of this type.
1037 Use ``makeifilemutationtests()`` to create an instance of this type.
1024 """
1038 """
1025 def testaddnoop(self):
1039 def testaddnoop(self):
1026 f = self._makefilefn()
1040 f = self._makefilefn()
1027 with self._maketransactionfn() as tr:
1041 with self._maketransactionfn() as tr:
1028 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1042 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1029 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
1043 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
1030 # Varying by linkrev shouldn't impact hash.
1044 # Varying by linkrev shouldn't impact hash.
1031 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
1045 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
1032
1046
1033 self.assertEqual(node1, node0)
1047 self.assertEqual(node1, node0)
1034 self.assertEqual(node2, node0)
1048 self.assertEqual(node2, node0)
1035 self.assertEqual(len(f), 1)
1049 self.assertEqual(len(f), 1)
1036
1050
1037 def testaddrevisionbadnode(self):
1051 def testaddrevisionbadnode(self):
1038 f = self._makefilefn()
1052 f = self._makefilefn()
1039 with self._maketransactionfn() as tr:
1053 with self._maketransactionfn() as tr:
1040 # Adding a revision with bad node value fails.
1054 # Adding a revision with bad node value fails.
1041 with self.assertRaises(error.StorageError):
1055 with self.assertRaises(error.StorageError):
1042 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
1056 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
1043
1057
1044 def testaddrevisionunknownflag(self):
1058 def testaddrevisionunknownflag(self):
1045 f = self._makefilefn()
1059 f = self._makefilefn()
1046 with self._maketransactionfn() as tr:
1060 with self._maketransactionfn() as tr:
1047 for i in range(15, 0, -1):
1061 for i in range(15, 0, -1):
1048 if (1 << i) & ~repository.REVISION_FLAGS_KNOWN:
1062 if (1 << i) & ~repository.REVISION_FLAGS_KNOWN:
1049 flags = 1 << i
1063 flags = 1 << i
1050 break
1064 break
1051
1065
1052 with self.assertRaises(error.StorageError):
1066 with self.assertRaises(error.StorageError):
1053 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
1067 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
1054
1068
1055 def testaddgroupsimple(self):
1069 def testaddgroupsimple(self):
1056 f = self._makefilefn()
1070 f = self._makefilefn()
1057
1071
1058 callbackargs = []
1072 callbackargs = []
1059 def cb(*args, **kwargs):
1073 def cb(*args, **kwargs):
1060 callbackargs.append((args, kwargs))
1074 callbackargs.append((args, kwargs))
1061
1075
1062 def linkmapper(node):
1076 def linkmapper(node):
1063 return 0
1077 return 0
1064
1078
1065 with self._maketransactionfn() as tr:
1079 with self._maketransactionfn() as tr:
1066 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
1080 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
1067
1081
1068 self.assertEqual(nodes, [])
1082 self.assertEqual(nodes, [])
1069 self.assertEqual(callbackargs, [])
1083 self.assertEqual(callbackargs, [])
1070 self.assertEqual(len(f), 0)
1084 self.assertEqual(len(f), 0)
1071
1085
1072 fulltext0 = b'foo'
1086 fulltext0 = b'foo'
1073 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
1087 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
1074
1088
1075 with self._maketransactionfn() as tr:
1089 with self._maketransactionfn() as tr:
1076 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
1090 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
1077
1091
1078 f = self._makefilefn()
1092 f = self._makefilefn()
1079
1093
1080 deltas = [
1094 deltas = [
1081 (node0, nullid, nullid, nullid, nullid, delta0, 0),
1095 (node0, nullid, nullid, nullid, nullid, delta0, 0),
1082 ]
1096 ]
1083
1097
1084 with self._maketransactionfn() as tr:
1098 with self._maketransactionfn() as tr:
1085 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
1099 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
1086
1100
1087 self.assertEqual(nodes, [
1101 self.assertEqual(nodes, [
1088 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
1102 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
1089 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
1103 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
1090
1104
1091 self.assertEqual(len(callbackargs), 1)
1105 self.assertEqual(len(callbackargs), 1)
1092 self.assertEqual(callbackargs[0][0][1], nodes[0])
1106 self.assertEqual(callbackargs[0][0][1], nodes[0])
1093
1107
1094 self.assertEqual(list(f.revs()), [0])
1108 self.assertEqual(list(f.revs()), [0])
1095 self.assertEqual(f.rev(nodes[0]), 0)
1109 self.assertEqual(f.rev(nodes[0]), 0)
1096 self.assertEqual(f.node(0), nodes[0])
1110 self.assertEqual(f.node(0), nodes[0])
1097
1111
1098 def testaddgroupmultiple(self):
1112 def testaddgroupmultiple(self):
1099 f = self._makefilefn()
1113 f = self._makefilefn()
1100
1114
1101 fulltexts = [
1115 fulltexts = [
1102 b'foo',
1116 b'foo',
1103 b'bar',
1117 b'bar',
1104 b'x' * 1024,
1118 b'x' * 1024,
1105 ]
1119 ]
1106
1120
1107 nodes = []
1121 nodes = []
1108 with self._maketransactionfn() as tr:
1122 with self._maketransactionfn() as tr:
1109 for fulltext in fulltexts:
1123 for fulltext in fulltexts:
1110 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1124 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1111
1125
1112 f = self._makefilefn()
1126 f = self._makefilefn()
1113 deltas = []
1127 deltas = []
1114 for i, fulltext in enumerate(fulltexts):
1128 for i, fulltext in enumerate(fulltexts):
1115 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1129 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1116
1130
1117 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1131 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1118
1132
1119 with self._maketransactionfn() as tr:
1133 with self._maketransactionfn() as tr:
1120 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1134 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1121
1135
1122 self.assertEqual(len(f), len(deltas))
1136 self.assertEqual(len(f), len(deltas))
1123 self.assertEqual(list(f.revs()), [0, 1, 2])
1137 self.assertEqual(list(f.revs()), [0, 1, 2])
1124 self.assertEqual(f.rev(nodes[0]), 0)
1138 self.assertEqual(f.rev(nodes[0]), 0)
1125 self.assertEqual(f.rev(nodes[1]), 1)
1139 self.assertEqual(f.rev(nodes[1]), 1)
1126 self.assertEqual(f.rev(nodes[2]), 2)
1140 self.assertEqual(f.rev(nodes[2]), 2)
1127 self.assertEqual(f.node(0), nodes[0])
1141 self.assertEqual(f.node(0), nodes[0])
1128 self.assertEqual(f.node(1), nodes[1])
1142 self.assertEqual(f.node(1), nodes[1])
1129 self.assertEqual(f.node(2), nodes[2])
1143 self.assertEqual(f.node(2), nodes[2])
1130
1144
1131 def testdeltaagainstcensored(self):
1145 def testdeltaagainstcensored(self):
1132 # Attempt to apply a delta made against a censored revision.
1146 # Attempt to apply a delta made against a censored revision.
1133 f = self._makefilefn()
1147 f = self._makefilefn()
1134
1148
1135 stored1 = storageutil.packmeta({
1149 stored1 = storageutil.packmeta({
1136 b'censored': b'tombstone',
1150 b'censored': b'tombstone',
1137 }, b'')
1151 }, b'')
1138
1152
1139 with self._maketransactionfn() as tr:
1153 with self._maketransactionfn() as tr:
1140 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1154 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1141
1155
1142 # The node value doesn't matter since we can't verify it.
1156 # The node value doesn't matter since we can't verify it.
1143 node1 = b'\xbb' * 20
1157 node1 = b'\xbb' * 20
1144
1158
1145 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
1159 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
1146 censored=True)
1160 censored=True)
1147
1161
1148 delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
1162 delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
1149 deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
1163 deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
1150
1164
1151 with self._maketransactionfn() as tr:
1165 with self._maketransactionfn() as tr:
1152 with self.assertRaises(error.CensoredBaseError):
1166 with self.assertRaises(error.CensoredBaseError):
1153 f.addgroup(deltas, lambda x: 0, tr)
1167 f.addgroup(deltas, lambda x: 0, tr)
1154
1168
1155 def testcensorrevisionbasic(self):
1169 def testcensorrevisionbasic(self):
1156 f = self._makefilefn()
1170 f = self._makefilefn()
1157
1171
1158 with self._maketransactionfn() as tr:
1172 with self._maketransactionfn() as tr:
1159 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1173 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1160 node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
1174 node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
1161 node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
1175 node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
1162
1176
1163 with self._maketransactionfn() as tr:
1177 with self._maketransactionfn() as tr:
1164 f.censorrevision(tr, node1)
1178 f.censorrevision(tr, node1)
1165
1179
1166 self.assertEqual(len(f), 3)
1180 self.assertEqual(len(f), 3)
1167 self.assertEqual(list(f.revs()), [0, 1, 2])
1181 self.assertEqual(list(f.revs()), [0, 1, 2])
1168
1182
1169 self.assertEqual(f.read(node0), b'foo\n' * 30)
1183 self.assertEqual(f.read(node0), b'foo\n' * 30)
1170 self.assertEqual(f.read(node2), b'foo\n' * 32)
1184 self.assertEqual(f.read(node2), b'foo\n' * 32)
1171
1185
1172 with self.assertRaises(error.CensoredNodeError):
1186 with self.assertRaises(error.CensoredNodeError):
1173 f.read(node1)
1187 f.read(node1)
1174
1188
1175 def testgetstrippointnoparents(self):
1189 def testgetstrippointnoparents(self):
1176 # N revisions where none have parents.
1190 # N revisions where none have parents.
1177 f = self._makefilefn()
1191 f = self._makefilefn()
1178
1192
1179 with self._maketransactionfn() as tr:
1193 with self._maketransactionfn() as tr:
1180 for rev in range(10):
1194 for rev in range(10):
1181 f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
1195 f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
1182
1196
1183 for rev in range(10):
1197 for rev in range(10):
1184 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1198 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1185
1199
1186 def testgetstrippointlinear(self):
1200 def testgetstrippointlinear(self):
1187 # N revisions in a linear chain.
1201 # N revisions in a linear chain.
1188 f = self._makefilefn()
1202 f = self._makefilefn()
1189
1203
1190 with self._maketransactionfn() as tr:
1204 with self._maketransactionfn() as tr:
1191 p1 = nullid
1205 p1 = nullid
1192
1206
1193 for rev in range(10):
1207 for rev in range(10):
1194 f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1208 f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1195
1209
1196 for rev in range(10):
1210 for rev in range(10):
1197 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1211 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1198
1212
1199 def testgetstrippointmultipleheads(self):
1213 def testgetstrippointmultipleheads(self):
1200 f = self._makefilefn()
1214 f = self._makefilefn()
1201
1215
1202 with self._maketransactionfn() as tr:
1216 with self._maketransactionfn() as tr:
1203 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1217 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1204 node1 = f.add(b'1', None, tr, 1, node0, nullid)
1218 node1 = f.add(b'1', None, tr, 1, node0, nullid)
1205 f.add(b'2', None, tr, 2, node1, nullid)
1219 f.add(b'2', None, tr, 2, node1, nullid)
1206 f.add(b'3', None, tr, 3, node0, nullid)
1220 f.add(b'3', None, tr, 3, node0, nullid)
1207 f.add(b'4', None, tr, 4, node0, nullid)
1221 f.add(b'4', None, tr, 4, node0, nullid)
1208
1222
1209 for rev in range(5):
1223 for rev in range(5):
1210 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1224 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1211
1225
1212 def testgetstrippointearlierlinkrevs(self):
1226 def testgetstrippointearlierlinkrevs(self):
1213 f = self._makefilefn()
1227 f = self._makefilefn()
1214
1228
1215 with self._maketransactionfn() as tr:
1229 with self._maketransactionfn() as tr:
1216 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1230 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1217 f.add(b'1', None, tr, 10, node0, nullid)
1231 f.add(b'1', None, tr, 10, node0, nullid)
1218 f.add(b'2', None, tr, 5, node0, nullid)
1232 f.add(b'2', None, tr, 5, node0, nullid)
1219
1233
1220 self.assertEqual(f.getstrippoint(0), (0, set()))
1234 self.assertEqual(f.getstrippoint(0), (0, set()))
1221 self.assertEqual(f.getstrippoint(1), (1, set()))
1235 self.assertEqual(f.getstrippoint(1), (1, set()))
1222 self.assertEqual(f.getstrippoint(2), (1, set()))
1236 self.assertEqual(f.getstrippoint(2), (1, set()))
1223 self.assertEqual(f.getstrippoint(3), (1, set()))
1237 self.assertEqual(f.getstrippoint(3), (1, set()))
1224 self.assertEqual(f.getstrippoint(4), (1, set()))
1238 self.assertEqual(f.getstrippoint(4), (1, set()))
1225 self.assertEqual(f.getstrippoint(5), (1, set()))
1239 self.assertEqual(f.getstrippoint(5), (1, set()))
1226 self.assertEqual(f.getstrippoint(6), (1, {2}))
1240 self.assertEqual(f.getstrippoint(6), (1, {2}))
1227 self.assertEqual(f.getstrippoint(7), (1, {2}))
1241 self.assertEqual(f.getstrippoint(7), (1, {2}))
1228 self.assertEqual(f.getstrippoint(8), (1, {2}))
1242 self.assertEqual(f.getstrippoint(8), (1, {2}))
1229 self.assertEqual(f.getstrippoint(9), (1, {2}))
1243 self.assertEqual(f.getstrippoint(9), (1, {2}))
1230 self.assertEqual(f.getstrippoint(10), (1, {2}))
1244 self.assertEqual(f.getstrippoint(10), (1, {2}))
1231 self.assertEqual(f.getstrippoint(11), (3, set()))
1245 self.assertEqual(f.getstrippoint(11), (3, set()))
1232
1246
1233 def teststripempty(self):
1247 def teststripempty(self):
1234 f = self._makefilefn()
1248 f = self._makefilefn()
1235
1249
1236 with self._maketransactionfn() as tr:
1250 with self._maketransactionfn() as tr:
1237 f.strip(0, tr)
1251 f.strip(0, tr)
1238
1252
1239 self.assertEqual(len(f), 0)
1253 self.assertEqual(len(f), 0)
1240
1254
1241 def teststripall(self):
1255 def teststripall(self):
1242 f = self._makefilefn()
1256 f = self._makefilefn()
1243
1257
1244 with self._maketransactionfn() as tr:
1258 with self._maketransactionfn() as tr:
1245 p1 = nullid
1259 p1 = nullid
1246 for rev in range(10):
1260 for rev in range(10):
1247 p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1261 p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1248
1262
1249 self.assertEqual(len(f), 10)
1263 self.assertEqual(len(f), 10)
1250
1264
1251 with self._maketransactionfn() as tr:
1265 with self._maketransactionfn() as tr:
1252 f.strip(0, tr)
1266 f.strip(0, tr)
1253
1267
1254 self.assertEqual(len(f), 0)
1268 self.assertEqual(len(f), 0)
1255
1269
1256 def teststrippartial(self):
1270 def teststrippartial(self):
1257 f = self._makefilefn()
1271 f = self._makefilefn()
1258
1272
1259 with self._maketransactionfn() as tr:
1273 with self._maketransactionfn() as tr:
1260 f.add(b'0', None, tr, 0, nullid, nullid)
1274 f.add(b'0', None, tr, 0, nullid, nullid)
1261 node1 = f.add(b'1', None, tr, 5, nullid, nullid)
1275 node1 = f.add(b'1', None, tr, 5, nullid, nullid)
1262 node2 = f.add(b'2', None, tr, 10, nullid, nullid)
1276 node2 = f.add(b'2', None, tr, 10, nullid, nullid)
1263
1277
1264 self.assertEqual(len(f), 3)
1278 self.assertEqual(len(f), 3)
1265
1279
1266 with self._maketransactionfn() as tr:
1280 with self._maketransactionfn() as tr:
1267 f.strip(11, tr)
1281 f.strip(11, tr)
1268
1282
1269 self.assertEqual(len(f), 3)
1283 self.assertEqual(len(f), 3)
1270
1284
1271 with self._maketransactionfn() as tr:
1285 with self._maketransactionfn() as tr:
1272 f.strip(10, tr)
1286 f.strip(10, tr)
1273
1287
1274 self.assertEqual(len(f), 2)
1288 self.assertEqual(len(f), 2)
1275
1289
1276 with self.assertRaises(error.LookupError):
1290 with self.assertRaises(error.LookupError):
1277 f.rev(node2)
1291 f.rev(node2)
1278
1292
1279 with self._maketransactionfn() as tr:
1293 with self._maketransactionfn() as tr:
1280 f.strip(6, tr)
1294 f.strip(6, tr)
1281
1295
1282 self.assertEqual(len(f), 2)
1296 self.assertEqual(len(f), 2)
1283
1297
1284 with self._maketransactionfn() as tr:
1298 with self._maketransactionfn() as tr:
1285 f.strip(3, tr)
1299 f.strip(3, tr)
1286
1300
1287 self.assertEqual(len(f), 1)
1301 self.assertEqual(len(f), 1)
1288
1302
1289 with self.assertRaises(error.LookupError):
1303 with self.assertRaises(error.LookupError):
1290 f.rev(node1)
1304 f.rev(node1)
1291
1305
1292 def makeifileindextests(makefilefn, maketransactionfn, addrawrevisionfn):
1306 def makeifileindextests(makefilefn, maketransactionfn, addrawrevisionfn):
1293 """Create a unittest.TestCase class suitable for testing file storage.
1307 """Create a unittest.TestCase class suitable for testing file storage.
1294
1308
1295 ``makefilefn`` is a callable which receives the test case as an
1309 ``makefilefn`` is a callable which receives the test case as an
1296 argument and returns an object implementing the ``ifilestorage`` interface.
1310 argument and returns an object implementing the ``ifilestorage`` interface.
1297
1311
1298 ``maketransactionfn`` is a callable which receives the test case as an
1312 ``maketransactionfn`` is a callable which receives the test case as an
1299 argument and returns a transaction object.
1313 argument and returns a transaction object.
1300
1314
1301 ``addrawrevisionfn`` is a callable which receives arguments describing a
1315 ``addrawrevisionfn`` is a callable which receives arguments describing a
1302 low-level revision to add. This callable allows the insertion of
1316 low-level revision to add. This callable allows the insertion of
1303 potentially bad data into the store in order to facilitate testing.
1317 potentially bad data into the store in order to facilitate testing.
1304
1318
1305 Returns a type that is a ``unittest.TestCase`` that can be used for
1319 Returns a type that is a ``unittest.TestCase`` that can be used for
1306 testing the object implementing the file storage interface. Simply
1320 testing the object implementing the file storage interface. Simply
1307 assign the returned value to a module-level attribute and a test loader
1321 assign the returned value to a module-level attribute and a test loader
1308 should find and run it automatically.
1322 should find and run it automatically.
1309 """
1323 """
1310 d = {
1324 d = {
1311 r'_makefilefn': makefilefn,
1325 r'_makefilefn': makefilefn,
1312 r'_maketransactionfn': maketransactionfn,
1326 r'_maketransactionfn': maketransactionfn,
1313 r'_addrawrevisionfn': addrawrevisionfn,
1327 r'_addrawrevisionfn': addrawrevisionfn,
1314 }
1328 }
1315 return type(r'ifileindextests', (ifileindextests,), d)
1329 return type(r'ifileindextests', (ifileindextests,), d)
1316
1330
1317 def makeifiledatatests(makefilefn, maketransactionfn, addrawrevisionfn):
1331 def makeifiledatatests(makefilefn, maketransactionfn, addrawrevisionfn):
1318 d = {
1332 d = {
1319 r'_makefilefn': makefilefn,
1333 r'_makefilefn': makefilefn,
1320 r'_maketransactionfn': maketransactionfn,
1334 r'_maketransactionfn': maketransactionfn,
1321 r'_addrawrevisionfn': addrawrevisionfn,
1335 r'_addrawrevisionfn': addrawrevisionfn,
1322 }
1336 }
1323 return type(r'ifiledatatests', (ifiledatatests,), d)
1337 return type(r'ifiledatatests', (ifiledatatests,), d)
1324
1338
1325 def makeifilemutationtests(makefilefn, maketransactionfn, addrawrevisionfn):
1339 def makeifilemutationtests(makefilefn, maketransactionfn, addrawrevisionfn):
1326 d = {
1340 d = {
1327 r'_makefilefn': makefilefn,
1341 r'_makefilefn': makefilefn,
1328 r'_maketransactionfn': maketransactionfn,
1342 r'_maketransactionfn': maketransactionfn,
1329 r'_addrawrevisionfn': addrawrevisionfn,
1343 r'_addrawrevisionfn': addrawrevisionfn,
1330 }
1344 }
1331 return type(r'ifilemutationtests', (ifilemutationtests,), d)
1345 return type(r'ifilemutationtests', (ifilemutationtests,), d)
General Comments 0
You need to be logged in to leave comments. Login now