##// END OF EJS Templates
filelog: add a hasnode() method (API)...
Gregory Szorc -
r40423:f1a39128 default
parent child Browse files
Show More
@@ -1,1113 +1,1119
1 1 # sqlitestore.py - Storage backend that uses SQLite
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """store repository data in SQLite (EXPERIMENTAL)
9 9
10 10 The sqlitestore extension enables the storage of repository data in SQLite.
11 11
12 12 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
13 13 GUARANTEES. This means that repositories created with this extension may
14 14 only be usable with the exact version of this extension/Mercurial that was
15 15 used. The extension attempts to enforce this in order to prevent repository
16 16 corruption.
17 17
18 18 In addition, several features are not yet supported or have known bugs:
19 19
20 20 * Only some data is stored in SQLite. Changeset, manifest, and other repository
21 21 data is not yet stored in SQLite.
22 22 * Transactions are not robust. If the process is aborted at the right time
23 23 during transaction close/rollback, the repository could be in an inconsistent
24 24 state. This problem will diminish once all repository data is tracked by
25 25 SQLite.
26 26 * Bundle repositories do not work (the ability to use e.g.
27 27 `hg -R <bundle-file> log` to automatically overlay a bundle on top of the
28 28 existing repository).
29 29 * Various other features don't work.
30 30
31 31 This extension should work for basic clone/pull, update, and commit workflows.
32 32 Some history rewriting operations may fail due to lack of support for bundle
33 33 repositories.
34 34
35 35 To use, activate the extension and set the ``storage.new-repo-backend`` config
36 36 option to ``sqlite`` to enable new repositories to use SQLite for storage.
37 37 """
38 38
39 39 # To run the test suite with repos using SQLite by default, execute the
40 40 # following:
41 41 #
42 42 # HGREPOFEATURES="sqlitestore" run-tests.py \
43 43 # --extra-config-opt extensions.sqlitestore= \
44 44 # --extra-config-opt storage.new-repo-backend=sqlite
45 45
46 46 from __future__ import absolute_import
47 47
48 48 import hashlib
49 49 import sqlite3
50 50 import struct
51 51 import threading
52 52 import zlib
53 53
54 54 from mercurial.i18n import _
55 55 from mercurial.node import (
56 56 nullid,
57 57 nullrev,
58 58 short,
59 59 )
60 60 from mercurial.thirdparty import (
61 61 attr,
62 62 )
63 63 from mercurial import (
64 64 ancestor,
65 65 dagop,
66 66 error,
67 67 extensions,
68 68 localrepo,
69 69 mdiff,
70 70 pycompat,
71 71 registrar,
72 72 repository,
73 73 util,
74 74 verify,
75 75 )
76 76 from mercurial.utils import (
77 77 interfaceutil,
78 78 storageutil,
79 79 )
80 80
81 81 try:
82 82 from mercurial import zstd
83 83 zstd.__version__
84 84 except ImportError:
85 85 zstd = None
86 86
87 87 configtable = {}
88 88 configitem = registrar.configitem(configtable)
89 89
90 90 # experimental config: storage.sqlite.compression
91 91 configitem('storage', 'sqlite.compression',
92 92 default='zstd' if zstd else 'zlib')
93 93
94 94 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
95 95 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
96 96 # be specifying the version(s) of Mercurial they are tested with, or
97 97 # leave the attribute unspecified.
98 98 testedwith = 'ships-with-hg-core'
99 99
100 100 REQUIREMENT = b'exp-sqlite-001'
101 101 REQUIREMENT_ZSTD = b'exp-sqlite-comp-001=zstd'
102 102 REQUIREMENT_ZLIB = b'exp-sqlite-comp-001=zlib'
103 103 REQUIREMENT_NONE = b'exp-sqlite-comp-001=none'
104 104
105 105 CURRENT_SCHEMA_VERSION = 1
106 106
107 107 COMPRESSION_NONE = 1
108 108 COMPRESSION_ZSTD = 2
109 109 COMPRESSION_ZLIB = 3
110 110
111 111 FLAG_CENSORED = 1
112 112
113 113 CREATE_SCHEMA = [
114 114 # Deltas are stored as content-indexed blobs.
115 115 # compression column holds COMPRESSION_* constant for how the
116 116 # delta is encoded.
117 117
118 118 r'CREATE TABLE delta ('
119 119 r' id INTEGER PRIMARY KEY, '
120 120 r' compression INTEGER NOT NULL, '
121 121 r' hash BLOB UNIQUE ON CONFLICT ABORT, '
122 122 r' delta BLOB NOT NULL '
123 123 r')',
124 124
125 125 # Tracked paths are denormalized to integers to avoid redundant
126 126 # storage of the path name.
127 127 r'CREATE TABLE filepath ('
128 128 r' id INTEGER PRIMARY KEY, '
129 129 r' path BLOB NOT NULL '
130 130 r')',
131 131
132 132 r'CREATE UNIQUE INDEX filepath_path '
133 133 r' ON filepath (path)',
134 134
135 135 # We have a single table for all file revision data.
136 136 # Each file revision is uniquely described by a (path, rev) and
137 137 # (path, node).
138 138 #
139 139 # Revision data is stored as a pointer to the delta producing this
140 140 # revision and the file revision whose delta should be applied before
141 141 # that one. One can reconstruct the delta chain by recursively following
142 142 # the delta base revision pointers until one encounters NULL.
143 143 #
144 144 # flags column holds bitwise integer flags controlling storage options.
145 145 # These flags are defined by the FLAG_* constants.
146 146 r'CREATE TABLE fileindex ('
147 147 r' id INTEGER PRIMARY KEY, '
148 148 r' pathid INTEGER REFERENCES filepath(id), '
149 149 r' revnum INTEGER NOT NULL, '
150 150 r' p1rev INTEGER NOT NULL, '
151 151 r' p2rev INTEGER NOT NULL, '
152 152 r' linkrev INTEGER NOT NULL, '
153 153 r' flags INTEGER NOT NULL, '
154 154 r' deltaid INTEGER REFERENCES delta(id), '
155 155 r' deltabaseid INTEGER REFERENCES fileindex(id), '
156 156 r' node BLOB NOT NULL '
157 157 r')',
158 158
159 159 r'CREATE UNIQUE INDEX fileindex_pathrevnum '
160 160 r' ON fileindex (pathid, revnum)',
161 161
162 162 r'CREATE UNIQUE INDEX fileindex_pathnode '
163 163 r' ON fileindex (pathid, node)',
164 164
165 165 # Provide a view over all file data for convenience.
166 166 r'CREATE VIEW filedata AS '
167 167 r'SELECT '
168 168 r' fileindex.id AS id, '
169 169 r' filepath.id AS pathid, '
170 170 r' filepath.path AS path, '
171 171 r' fileindex.revnum AS revnum, '
172 172 r' fileindex.node AS node, '
173 173 r' fileindex.p1rev AS p1rev, '
174 174 r' fileindex.p2rev AS p2rev, '
175 175 r' fileindex.linkrev AS linkrev, '
176 176 r' fileindex.flags AS flags, '
177 177 r' fileindex.deltaid AS deltaid, '
178 178 r' fileindex.deltabaseid AS deltabaseid '
179 179 r'FROM filepath, fileindex '
180 180 r'WHERE fileindex.pathid=filepath.id',
181 181
182 182 r'PRAGMA user_version=%d' % CURRENT_SCHEMA_VERSION,
183 183 ]
184 184
185 185 def resolvedeltachain(db, pathid, node, revisioncache,
186 186 stoprids, zstddctx=None):
187 187 """Resolve a delta chain for a file node."""
188 188
189 189 # TODO the "not in ({stops})" here is possibly slowing down the query
190 190 # because it needs to perform the lookup on every recursive invocation.
191 191 # This could possibly be faster if we created a temporary query with
192 192 # baseid "poisoned" to null and limited the recursive filter to
193 193 # "is not null".
194 194 res = db.execute(
195 195 r'WITH RECURSIVE '
196 196 r' deltachain(deltaid, baseid) AS ('
197 197 r' SELECT deltaid, deltabaseid FROM fileindex '
198 198 r' WHERE pathid=? AND node=? '
199 199 r' UNION ALL '
200 200 r' SELECT fileindex.deltaid, deltabaseid '
201 201 r' FROM fileindex, deltachain '
202 202 r' WHERE '
203 203 r' fileindex.id=deltachain.baseid '
204 204 r' AND deltachain.baseid IS NOT NULL '
205 205 r' AND fileindex.id NOT IN ({stops}) '
206 206 r' ) '
207 207 r'SELECT deltachain.baseid, compression, delta '
208 208 r'FROM deltachain, delta '
209 209 r'WHERE delta.id=deltachain.deltaid'.format(
210 210 stops=r','.join([r'?'] * len(stoprids))),
211 211 tuple([pathid, node] + list(stoprids.keys())))
212 212
213 213 deltas = []
214 214 lastdeltabaseid = None
215 215
216 216 for deltabaseid, compression, delta in res:
217 217 lastdeltabaseid = deltabaseid
218 218
219 219 if compression == COMPRESSION_ZSTD:
220 220 delta = zstddctx.decompress(delta)
221 221 elif compression == COMPRESSION_NONE:
222 222 delta = delta
223 223 elif compression == COMPRESSION_ZLIB:
224 224 delta = zlib.decompress(delta)
225 225 else:
226 226 raise SQLiteStoreError('unhandled compression type: %d' %
227 227 compression)
228 228
229 229 deltas.append(delta)
230 230
231 231 if lastdeltabaseid in stoprids:
232 232 basetext = revisioncache[stoprids[lastdeltabaseid]]
233 233 else:
234 234 basetext = deltas.pop()
235 235
236 236 deltas.reverse()
237 237 fulltext = mdiff.patches(basetext, deltas)
238 238
239 239 # SQLite returns buffer instances for blob columns on Python 2. This
240 240 # type can propagate through the delta application layer. Because
241 241 # downstream callers assume revisions are bytes, cast as needed.
242 242 if not isinstance(fulltext, bytes):
243 243 fulltext = bytes(delta)
244 244
245 245 return fulltext
246 246
247 247 def insertdelta(db, compression, hash, delta):
248 248 try:
249 249 return db.execute(
250 250 r'INSERT INTO delta (compression, hash, delta) '
251 251 r'VALUES (?, ?, ?)',
252 252 (compression, hash, delta)).lastrowid
253 253 except sqlite3.IntegrityError:
254 254 return db.execute(
255 255 r'SELECT id FROM delta WHERE hash=?',
256 256 (hash,)).fetchone()[0]
257 257
258 258 class SQLiteStoreError(error.StorageError):
259 259 pass
260 260
261 261 @attr.s
262 262 class revisionentry(object):
263 263 rid = attr.ib()
264 264 rev = attr.ib()
265 265 node = attr.ib()
266 266 p1rev = attr.ib()
267 267 p2rev = attr.ib()
268 268 p1node = attr.ib()
269 269 p2node = attr.ib()
270 270 linkrev = attr.ib()
271 271 flags = attr.ib()
272 272
273 273 @interfaceutil.implementer(repository.irevisiondelta)
274 274 @attr.s(slots=True)
275 275 class sqliterevisiondelta(object):
276 276 node = attr.ib()
277 277 p1node = attr.ib()
278 278 p2node = attr.ib()
279 279 basenode = attr.ib()
280 280 flags = attr.ib()
281 281 baserevisionsize = attr.ib()
282 282 revision = attr.ib()
283 283 delta = attr.ib()
284 284 linknode = attr.ib(default=None)
285 285
286 286 @interfaceutil.implementer(repository.iverifyproblem)
287 287 @attr.s(frozen=True)
288 288 class sqliteproblem(object):
289 289 warning = attr.ib(default=None)
290 290 error = attr.ib(default=None)
291 291 node = attr.ib(default=None)
292 292
293 293 @interfaceutil.implementer(repository.ifilestorage)
294 294 class sqlitefilestore(object):
295 295 """Implements storage for an individual tracked path."""
296 296
297 297 def __init__(self, db, path, compression):
298 298 self._db = db
299 299 self._path = path
300 300
301 301 self._pathid = None
302 302
303 303 # revnum -> node
304 304 self._revtonode = {}
305 305 # node -> revnum
306 306 self._nodetorev = {}
307 307 # node -> data structure
308 308 self._revisions = {}
309 309
310 310 self._revisioncache = util.lrucachedict(10)
311 311
312 312 self._compengine = compression
313 313
314 314 if compression == 'zstd':
315 315 self._cctx = zstd.ZstdCompressor(level=3)
316 316 self._dctx = zstd.ZstdDecompressor()
317 317 else:
318 318 self._cctx = None
319 319 self._dctx = None
320 320
321 321 self._refreshindex()
322 322
323 323 def _refreshindex(self):
324 324 self._revtonode = {}
325 325 self._nodetorev = {}
326 326 self._revisions = {}
327 327
328 328 res = list(self._db.execute(
329 329 r'SELECT id FROM filepath WHERE path=?', (self._path,)))
330 330
331 331 if not res:
332 332 self._pathid = None
333 333 return
334 334
335 335 self._pathid = res[0][0]
336 336
337 337 res = self._db.execute(
338 338 r'SELECT id, revnum, node, p1rev, p2rev, linkrev, flags '
339 339 r'FROM fileindex '
340 340 r'WHERE pathid=? '
341 341 r'ORDER BY revnum ASC',
342 342 (self._pathid,))
343 343
344 344 for i, row in enumerate(res):
345 345 rid, rev, node, p1rev, p2rev, linkrev, flags = row
346 346
347 347 if i != rev:
348 348 raise SQLiteStoreError(_('sqlite database has inconsistent '
349 349 'revision numbers'))
350 350
351 351 if p1rev == nullrev:
352 352 p1node = nullid
353 353 else:
354 354 p1node = self._revtonode[p1rev]
355 355
356 356 if p2rev == nullrev:
357 357 p2node = nullid
358 358 else:
359 359 p2node = self._revtonode[p2rev]
360 360
361 361 entry = revisionentry(
362 362 rid=rid,
363 363 rev=rev,
364 364 node=node,
365 365 p1rev=p1rev,
366 366 p2rev=p2rev,
367 367 p1node=p1node,
368 368 p2node=p2node,
369 369 linkrev=linkrev,
370 370 flags=flags)
371 371
372 372 self._revtonode[rev] = node
373 373 self._nodetorev[node] = rev
374 374 self._revisions[node] = entry
375 375
376 376 # Start of ifileindex interface.
377 377
378 378 def __len__(self):
379 379 return len(self._revisions)
380 380
381 381 def __iter__(self):
382 382 return iter(pycompat.xrange(len(self._revisions)))
383 383
384 def hasnode(self, node):
385 if node == nullid:
386 return False
387
388 return node in self._nodetorev
389
384 390 def revs(self, start=0, stop=None):
385 391 return storageutil.iterrevs(len(self._revisions), start=start,
386 392 stop=stop)
387 393
388 394 def parents(self, node):
389 395 if node == nullid:
390 396 return nullid, nullid
391 397
392 398 if node not in self._revisions:
393 399 raise error.LookupError(node, self._path, _('no node'))
394 400
395 401 entry = self._revisions[node]
396 402 return entry.p1node, entry.p2node
397 403
398 404 def parentrevs(self, rev):
399 405 if rev == nullrev:
400 406 return nullrev, nullrev
401 407
402 408 if rev not in self._revtonode:
403 409 raise IndexError(rev)
404 410
405 411 entry = self._revisions[self._revtonode[rev]]
406 412 return entry.p1rev, entry.p2rev
407 413
408 414 def rev(self, node):
409 415 if node == nullid:
410 416 return nullrev
411 417
412 418 if node not in self._nodetorev:
413 419 raise error.LookupError(node, self._path, _('no node'))
414 420
415 421 return self._nodetorev[node]
416 422
417 423 def node(self, rev):
418 424 if rev == nullrev:
419 425 return nullid
420 426
421 427 if rev not in self._revtonode:
422 428 raise IndexError(rev)
423 429
424 430 return self._revtonode[rev]
425 431
426 432 def lookup(self, node):
427 433 return storageutil.fileidlookup(self, node, self._path)
428 434
429 435 def linkrev(self, rev):
430 436 if rev == nullrev:
431 437 return nullrev
432 438
433 439 if rev not in self._revtonode:
434 440 raise IndexError(rev)
435 441
436 442 entry = self._revisions[self._revtonode[rev]]
437 443 return entry.linkrev
438 444
439 445 def iscensored(self, rev):
440 446 if rev == nullrev:
441 447 return False
442 448
443 449 if rev not in self._revtonode:
444 450 raise IndexError(rev)
445 451
446 452 return self._revisions[self._revtonode[rev]].flags & FLAG_CENSORED
447 453
448 454 def commonancestorsheads(self, node1, node2):
449 455 rev1 = self.rev(node1)
450 456 rev2 = self.rev(node2)
451 457
452 458 ancestors = ancestor.commonancestorsheads(self.parentrevs, rev1, rev2)
453 459 return pycompat.maplist(self.node, ancestors)
454 460
455 461 def descendants(self, revs):
456 462 # TODO we could implement this using a recursive SQL query, which
457 463 # might be faster.
458 464 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
459 465
460 466 def heads(self, start=None, stop=None):
461 467 if start is None and stop is None:
462 468 if not len(self):
463 469 return [nullid]
464 470
465 471 startrev = self.rev(start) if start is not None else nullrev
466 472 stoprevs = {self.rev(n) for n in stop or []}
467 473
468 474 revs = dagop.headrevssubset(self.revs, self.parentrevs,
469 475 startrev=startrev, stoprevs=stoprevs)
470 476
471 477 return [self.node(rev) for rev in revs]
472 478
473 479 def children(self, node):
474 480 rev = self.rev(node)
475 481
476 482 res = self._db.execute(
477 483 r'SELECT'
478 484 r' node '
479 485 r' FROM filedata '
480 486 r' WHERE path=? AND (p1rev=? OR p2rev=?) '
481 487 r' ORDER BY revnum ASC',
482 488 (self._path, rev, rev))
483 489
484 490 return [row[0] for row in res]
485 491
486 492 # End of ifileindex interface.
487 493
488 494 # Start of ifiledata interface.
489 495
490 496 def size(self, rev):
491 497 if rev == nullrev:
492 498 return 0
493 499
494 500 if rev not in self._revtonode:
495 501 raise IndexError(rev)
496 502
497 503 node = self._revtonode[rev]
498 504
499 505 if self.renamed(node):
500 506 return len(self.read(node))
501 507
502 508 return len(self.revision(node))
503 509
504 510 def revision(self, node, raw=False, _verifyhash=True):
505 511 if node in (nullid, nullrev):
506 512 return b''
507 513
508 514 if isinstance(node, int):
509 515 node = self.node(node)
510 516
511 517 if node not in self._nodetorev:
512 518 raise error.LookupError(node, self._path, _('no node'))
513 519
514 520 if node in self._revisioncache:
515 521 return self._revisioncache[node]
516 522
517 523 # Because we have a fulltext revision cache, we are able to
518 524 # short-circuit delta chain traversal and decompression as soon as
519 525 # we encounter a revision in the cache.
520 526
521 527 stoprids = {self._revisions[n].rid: n
522 528 for n in self._revisioncache}
523 529
524 530 if not stoprids:
525 531 stoprids[-1] = None
526 532
527 533 fulltext = resolvedeltachain(self._db, self._pathid, node,
528 534 self._revisioncache, stoprids,
529 535 zstddctx=self._dctx)
530 536
531 537 if _verifyhash:
532 538 self._checkhash(fulltext, node)
533 539 self._revisioncache[node] = fulltext
534 540
535 541 return fulltext
536 542
537 543 def read(self, node):
538 544 return storageutil.filtermetadata(self.revision(node))
539 545
540 546 def renamed(self, node):
541 547 return storageutil.filerevisioncopied(self, node)
542 548
543 549 def cmp(self, node, fulltext):
544 550 return not storageutil.filedataequivalent(self, node, fulltext)
545 551
546 552 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
547 553 assumehaveparentrevisions=False, deltaprevious=False):
548 554 if nodesorder not in ('nodes', 'storage', None):
549 555 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
550 556 nodesorder)
551 557
552 558 nodes = [n for n in nodes if n != nullid]
553 559
554 560 if not nodes:
555 561 return
556 562
557 563 # TODO perform in a single query.
558 564 res = self._db.execute(
559 565 r'SELECT revnum, deltaid FROM fileindex '
560 566 r'WHERE pathid=? '
561 567 r' AND node in (%s)' % (r','.join([r'?'] * len(nodes))),
562 568 tuple([self._pathid] + nodes))
563 569
564 570 deltabases = {}
565 571
566 572 for rev, deltaid in res:
567 573 res = self._db.execute(
568 574 r'SELECT revnum from fileindex WHERE pathid=? AND deltaid=?',
569 575 (self._pathid, deltaid))
570 576 deltabases[rev] = res.fetchone()[0]
571 577
572 578 # TODO define revdifffn so we can use delta from storage.
573 579 for delta in storageutil.emitrevisions(
574 580 self, nodes, nodesorder, sqliterevisiondelta,
575 581 deltaparentfn=deltabases.__getitem__,
576 582 revisiondata=revisiondata,
577 583 assumehaveparentrevisions=assumehaveparentrevisions,
578 584 deltaprevious=deltaprevious):
579 585
580 586 yield delta
581 587
582 588 # End of ifiledata interface.
583 589
584 590 # Start of ifilemutation interface.
585 591
586 592 def add(self, filedata, meta, transaction, linkrev, p1, p2):
587 593 if meta or filedata.startswith(b'\x01\n'):
588 594 filedata = storageutil.packmeta(meta, filedata)
589 595
590 596 return self.addrevision(filedata, transaction, linkrev, p1, p2)
591 597
592 598 def addrevision(self, revisiondata, transaction, linkrev, p1, p2, node=None,
593 599 flags=0, cachedelta=None):
594 600 if flags:
595 601 raise SQLiteStoreError(_('flags not supported on revisions'))
596 602
597 603 validatehash = node is not None
598 604 node = node or storageutil.hashrevisionsha1(revisiondata, p1, p2)
599 605
600 606 if validatehash:
601 607 self._checkhash(revisiondata, node, p1, p2)
602 608
603 609 if node in self._nodetorev:
604 610 return node
605 611
606 612 node = self._addrawrevision(node, revisiondata, transaction, linkrev,
607 613 p1, p2)
608 614
609 615 self._revisioncache[node] = revisiondata
610 616 return node
611 617
612 618 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
613 619 nodes = []
614 620
615 621 for node, p1, p2, linknode, deltabase, delta, wireflags in deltas:
616 622 storeflags = 0
617 623
618 624 if wireflags & repository.REVISION_FLAG_CENSORED:
619 625 storeflags |= FLAG_CENSORED
620 626
621 627 if wireflags & ~repository.REVISION_FLAG_CENSORED:
622 628 raise SQLiteStoreError('unhandled revision flag')
623 629
624 630 baserev = self.rev(deltabase)
625 631
626 632 # If base is censored, delta must be full replacement in a single
627 633 # patch operation.
628 634 if baserev != nullrev and self.iscensored(baserev):
629 635 hlen = struct.calcsize('>lll')
630 636 oldlen = len(self.revision(deltabase, raw=True,
631 637 _verifyhash=False))
632 638 newlen = len(delta) - hlen
633 639
634 640 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
635 641 raise error.CensoredBaseError(self._path,
636 642 deltabase)
637 643
638 644 if (not (storeflags & FLAG_CENSORED)
639 645 and storageutil.deltaiscensored(
640 646 delta, baserev, lambda x: len(self.revision(x, raw=True)))):
641 647 storeflags |= FLAG_CENSORED
642 648
643 649 linkrev = linkmapper(linknode)
644 650
645 651 nodes.append(node)
646 652
647 653 if node in self._revisions:
648 654 continue
649 655
650 656 if deltabase == nullid:
651 657 text = mdiff.patch(b'', delta)
652 658 storedelta = None
653 659 else:
654 660 text = None
655 661 storedelta = (deltabase, delta)
656 662
657 663 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
658 664 storedelta=storedelta, flags=storeflags)
659 665
660 666 if addrevisioncb:
661 667 addrevisioncb(self, node)
662 668
663 669 return nodes
664 670
665 671 def censorrevision(self, tr, censornode, tombstone=b''):
666 672 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
667 673
668 674 # This restriction is cargo culted from revlogs and makes no sense for
669 675 # SQLite, since columns can be resized at will.
670 676 if len(tombstone) > len(self.revision(censornode, raw=True)):
671 677 raise error.Abort(_('censor tombstone must be no longer than '
672 678 'censored data'))
673 679
674 680 # We need to replace the censored revision's data with the tombstone.
675 681 # But replacing that data will have implications for delta chains that
676 682 # reference it.
677 683 #
678 684 # While "better," more complex strategies are possible, we do something
679 685 # simple: we find delta chain children of the censored revision and we
680 686 # replace those incremental deltas with fulltexts of their corresponding
681 687 # revision. Then we delete the now-unreferenced delta and original
682 688 # revision and insert a replacement.
683 689
684 690 # Find the delta to be censored.
685 691 censoreddeltaid = self._db.execute(
686 692 r'SELECT deltaid FROM fileindex WHERE id=?',
687 693 (self._revisions[censornode].rid,)).fetchone()[0]
688 694
689 695 # Find all its delta chain children.
690 696 # TODO once we support storing deltas for !files, we'll need to look
691 697 # for those delta chains too.
692 698 rows = list(self._db.execute(
693 699 r'SELECT id, pathid, node FROM fileindex '
694 700 r'WHERE deltabaseid=? OR deltaid=?',
695 701 (censoreddeltaid, censoreddeltaid)))
696 702
697 703 for row in rows:
698 704 rid, pathid, node = row
699 705
700 706 fulltext = resolvedeltachain(self._db, pathid, node, {}, {-1: None},
701 707 zstddctx=self._dctx)
702 708
703 709 deltahash = hashlib.sha1(fulltext).digest()
704 710
705 711 if self._compengine == 'zstd':
706 712 deltablob = self._cctx.compress(fulltext)
707 713 compression = COMPRESSION_ZSTD
708 714 elif self._compengine == 'zlib':
709 715 deltablob = zlib.compress(fulltext)
710 716 compression = COMPRESSION_ZLIB
711 717 elif self._compengine == 'none':
712 718 deltablob = fulltext
713 719 compression = COMPRESSION_NONE
714 720 else:
715 721 raise error.ProgrammingError('unhandled compression engine: %s'
716 722 % self._compengine)
717 723
718 724 if len(deltablob) >= len(fulltext):
719 725 deltablob = fulltext
720 726 compression = COMPRESSION_NONE
721 727
722 728 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
723 729
724 730 self._db.execute(
725 731 r'UPDATE fileindex SET deltaid=?, deltabaseid=NULL '
726 732 r'WHERE id=?', (deltaid, rid))
727 733
728 734 # Now create the tombstone delta and replace the delta on the censored
729 735 # node.
730 736 deltahash = hashlib.sha1(tombstone).digest()
731 737 tombstonedeltaid = insertdelta(self._db, COMPRESSION_NONE,
732 738 deltahash, tombstone)
733 739
734 740 flags = self._revisions[censornode].flags
735 741 flags |= FLAG_CENSORED
736 742
737 743 self._db.execute(
738 744 r'UPDATE fileindex SET flags=?, deltaid=?, deltabaseid=NULL '
739 745 r'WHERE pathid=? AND node=?',
740 746 (flags, tombstonedeltaid, self._pathid, censornode))
741 747
742 748 self._db.execute(
743 749 r'DELETE FROM delta WHERE id=?', (censoreddeltaid,))
744 750
745 751 self._refreshindex()
746 752 self._revisioncache.clear()
747 753
748 754 def getstrippoint(self, minlink):
749 755 return storageutil.resolvestripinfo(minlink, len(self) - 1,
750 756 [self.rev(n) for n in self.heads()],
751 757 self.linkrev,
752 758 self.parentrevs)
753 759
754 760 def strip(self, minlink, transaction):
755 761 if not len(self):
756 762 return
757 763
758 764 rev, _ignored = self.getstrippoint(minlink)
759 765
760 766 if rev == len(self):
761 767 return
762 768
763 769 for rev in self.revs(rev):
764 770 self._db.execute(
765 771 r'DELETE FROM fileindex WHERE pathid=? AND node=?',
766 772 (self._pathid, self.node(rev)))
767 773
768 774 # TODO how should we garbage collect data in delta table?
769 775
770 776 self._refreshindex()
771 777
772 778 # End of ifilemutation interface.
773 779
774 780 # Start of ifilestorage interface.
775 781
776 782 def files(self):
777 783 return []
778 784
779 785 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
780 786 revisionscount=False, trackedsize=False,
781 787 storedsize=False):
782 788 d = {}
783 789
784 790 if exclusivefiles:
785 791 d['exclusivefiles'] = []
786 792
787 793 if sharedfiles:
788 794 # TODO list sqlite file(s) here.
789 795 d['sharedfiles'] = []
790 796
791 797 if revisionscount:
792 798 d['revisionscount'] = len(self)
793 799
794 800 if trackedsize:
795 801 d['trackedsize'] = sum(len(self.revision(node))
796 802 for node in self._nodetorev)
797 803
798 804 if storedsize:
799 805 # TODO implement this?
800 806 d['storedsize'] = None
801 807
802 808 return d
803 809
804 810 def verifyintegrity(self, state):
805 811 state['skipread'] = set()
806 812
807 813 for rev in self:
808 814 node = self.node(rev)
809 815
810 816 try:
811 817 self.revision(node)
812 818 except Exception as e:
813 819 yield sqliteproblem(
814 820 error=_('unpacking %s: %s') % (short(node), e),
815 821 node=node)
816 822
817 823 state['skipread'].add(node)
818 824
819 825 # End of ifilestorage interface.
820 826
821 827 def _checkhash(self, fulltext, node, p1=None, p2=None):
822 828 if p1 is None and p2 is None:
823 829 p1, p2 = self.parents(node)
824 830
825 831 if node == storageutil.hashrevisionsha1(fulltext, p1, p2):
826 832 return
827 833
828 834 try:
829 835 del self._revisioncache[node]
830 836 except KeyError:
831 837 pass
832 838
833 839 if storageutil.iscensoredtext(fulltext):
834 840 raise error.CensoredNodeError(self._path, node, fulltext)
835 841
836 842 raise SQLiteStoreError(_('integrity check failed on %s') %
837 843 self._path)
838 844
839 845 def _addrawrevision(self, node, revisiondata, transaction, linkrev,
840 846 p1, p2, storedelta=None, flags=0):
841 847 if self._pathid is None:
842 848 res = self._db.execute(
843 849 r'INSERT INTO filepath (path) VALUES (?)', (self._path,))
844 850 self._pathid = res.lastrowid
845 851
846 852 # For simplicity, always store a delta against p1.
847 853 # TODO we need a lot more logic here to make behavior reasonable.
848 854
849 855 if storedelta:
850 856 deltabase, delta = storedelta
851 857
852 858 if isinstance(deltabase, int):
853 859 deltabase = self.node(deltabase)
854 860
855 861 else:
856 862 assert revisiondata is not None
857 863 deltabase = p1
858 864
859 865 if deltabase == nullid:
860 866 delta = revisiondata
861 867 else:
862 868 delta = mdiff.textdiff(self.revision(self.rev(deltabase)),
863 869 revisiondata)
864 870
865 871 # File index stores a pointer to its delta and the parent delta.
866 872 # The parent delta is stored via a pointer to the fileindex PK.
867 873 if deltabase == nullid:
868 874 baseid = None
869 875 else:
870 876 baseid = self._revisions[deltabase].rid
871 877
872 878 # Deltas are stored with a hash of their content. This allows
873 879 # us to de-duplicate. The table is configured to ignore conflicts
874 880 # and it is faster to just insert and silently noop than to look
875 881 # first.
876 882 deltahash = hashlib.sha1(delta).digest()
877 883
878 884 if self._compengine == 'zstd':
879 885 deltablob = self._cctx.compress(delta)
880 886 compression = COMPRESSION_ZSTD
881 887 elif self._compengine == 'zlib':
882 888 deltablob = zlib.compress(delta)
883 889 compression = COMPRESSION_ZLIB
884 890 elif self._compengine == 'none':
885 891 deltablob = delta
886 892 compression = COMPRESSION_NONE
887 893 else:
888 894 raise error.ProgrammingError('unhandled compression engine: %s' %
889 895 self._compengine)
890 896
891 897 # Don't store compressed data if it isn't practical.
892 898 if len(deltablob) >= len(delta):
893 899 deltablob = delta
894 900 compression = COMPRESSION_NONE
895 901
896 902 deltaid = insertdelta(self._db, compression, deltahash, deltablob)
897 903
898 904 rev = len(self)
899 905
900 906 if p1 == nullid:
901 907 p1rev = nullrev
902 908 else:
903 909 p1rev = self._nodetorev[p1]
904 910
905 911 if p2 == nullid:
906 912 p2rev = nullrev
907 913 else:
908 914 p2rev = self._nodetorev[p2]
909 915
910 916 rid = self._db.execute(
911 917 r'INSERT INTO fileindex ('
912 918 r' pathid, revnum, node, p1rev, p2rev, linkrev, flags, '
913 919 r' deltaid, deltabaseid) '
914 920 r' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)',
915 921 (self._pathid, rev, node, p1rev, p2rev, linkrev, flags,
916 922 deltaid, baseid)
917 923 ).lastrowid
918 924
919 925 entry = revisionentry(
920 926 rid=rid,
921 927 rev=rev,
922 928 node=node,
923 929 p1rev=p1rev,
924 930 p2rev=p2rev,
925 931 p1node=p1,
926 932 p2node=p2,
927 933 linkrev=linkrev,
928 934 flags=flags)
929 935
930 936 self._nodetorev[node] = rev
931 937 self._revtonode[rev] = node
932 938 self._revisions[node] = entry
933 939
934 940 return node
935 941
936 942 class sqliterepository(localrepo.localrepository):
937 943 def cancopy(self):
938 944 return False
939 945
940 946 def transaction(self, *args, **kwargs):
941 947 current = self.currenttransaction()
942 948
943 949 tr = super(sqliterepository, self).transaction(*args, **kwargs)
944 950
945 951 if current:
946 952 return tr
947 953
948 954 self._dbconn.execute(r'BEGIN TRANSACTION')
949 955
950 956 def committransaction(_):
951 957 self._dbconn.commit()
952 958
953 959 tr.addfinalize('sqlitestore', committransaction)
954 960
955 961 return tr
956 962
957 963 @property
958 964 def _dbconn(self):
959 965 # SQLite connections can only be used on the thread that created
960 966 # them. In most cases, this "just works." However, hgweb uses
961 967 # multiple threads.
962 968 tid = threading.current_thread().ident
963 969
964 970 if self._db:
965 971 if self._db[0] == tid:
966 972 return self._db[1]
967 973
968 974 db = makedb(self.svfs.join('db.sqlite'))
969 975 self._db = (tid, db)
970 976
971 977 return db
972 978
973 979 def makedb(path):
974 980 """Construct a database handle for a database at path."""
975 981
976 982 db = sqlite3.connect(path)
977 983 db.text_factory = bytes
978 984
979 985 res = db.execute(r'PRAGMA user_version').fetchone()[0]
980 986
981 987 # New database.
982 988 if res == 0:
983 989 for statement in CREATE_SCHEMA:
984 990 db.execute(statement)
985 991
986 992 db.commit()
987 993
988 994 elif res == CURRENT_SCHEMA_VERSION:
989 995 pass
990 996
991 997 else:
992 998 raise error.Abort(_('sqlite database has unrecognized version'))
993 999
994 1000 db.execute(r'PRAGMA journal_mode=WAL')
995 1001
996 1002 return db
997 1003
998 1004 def featuresetup(ui, supported):
999 1005 supported.add(REQUIREMENT)
1000 1006
1001 1007 if zstd:
1002 1008 supported.add(REQUIREMENT_ZSTD)
1003 1009
1004 1010 supported.add(REQUIREMENT_ZLIB)
1005 1011 supported.add(REQUIREMENT_NONE)
1006 1012
1007 1013 def newreporequirements(orig, ui, createopts):
1008 1014 if createopts['backend'] != 'sqlite':
1009 1015 return orig(ui, createopts)
1010 1016
1011 1017 # This restriction can be lifted once we have more confidence.
1012 1018 if 'sharedrepo' in createopts:
1013 1019 raise error.Abort(_('shared repositories not supported with SQLite '
1014 1020 'store'))
1015 1021
1016 1022 # This filtering is out of an abundance of caution: we want to ensure
1017 1023 # we honor creation options and we do that by annotating exactly the
1018 1024 # creation options we recognize.
1019 1025 known = {
1020 1026 'narrowfiles',
1021 1027 'backend',
1022 1028 }
1023 1029
1024 1030 unsupported = set(createopts) - known
1025 1031 if unsupported:
1026 1032 raise error.Abort(_('SQLite store does not support repo creation '
1027 1033 'option: %s') % ', '.join(sorted(unsupported)))
1028 1034
1029 1035 # Since we're a hybrid store that still relies on revlogs, we fall back
1030 1036 # to using the revlogv1 backend's storage requirements then adding our
1031 1037 # own requirement.
1032 1038 createopts['backend'] = 'revlogv1'
1033 1039 requirements = orig(ui, createopts)
1034 1040 requirements.add(REQUIREMENT)
1035 1041
1036 1042 compression = ui.config('storage', 'sqlite.compression')
1037 1043
1038 1044 if compression == 'zstd' and not zstd:
1039 1045 raise error.Abort(_('storage.sqlite.compression set to "zstd" but '
1040 1046 'zstandard compression not available to this '
1041 1047 'Mercurial install'))
1042 1048
1043 1049 if compression == 'zstd':
1044 1050 requirements.add(REQUIREMENT_ZSTD)
1045 1051 elif compression == 'zlib':
1046 1052 requirements.add(REQUIREMENT_ZLIB)
1047 1053 elif compression == 'none':
1048 1054 requirements.add(REQUIREMENT_NONE)
1049 1055 else:
1050 1056 raise error.Abort(_('unknown compression engine defined in '
1051 1057 'storage.sqlite.compression: %s') % compression)
1052 1058
1053 1059 return requirements
1054 1060
1055 1061 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
1056 1062 class sqlitefilestorage(object):
1057 1063 """Repository file storage backed by SQLite."""
1058 1064 def file(self, path):
1059 1065 if path[0] == b'/':
1060 1066 path = path[1:]
1061 1067
1062 1068 if REQUIREMENT_ZSTD in self.requirements:
1063 1069 compression = 'zstd'
1064 1070 elif REQUIREMENT_ZLIB in self.requirements:
1065 1071 compression = 'zlib'
1066 1072 elif REQUIREMENT_NONE in self.requirements:
1067 1073 compression = 'none'
1068 1074 else:
1069 1075 raise error.Abort(_('unable to determine what compression engine '
1070 1076 'to use for SQLite storage'))
1071 1077
1072 1078 return sqlitefilestore(self._dbconn, path, compression)
1073 1079
1074 1080 def makefilestorage(orig, requirements, **kwargs):
1075 1081 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
1076 1082 if REQUIREMENT in requirements:
1077 1083 return sqlitefilestorage
1078 1084 else:
1079 1085 return orig(requirements=requirements, **kwargs)
1080 1086
1081 1087 def makemain(orig, ui, requirements, **kwargs):
1082 1088 if REQUIREMENT in requirements:
1083 1089 if REQUIREMENT_ZSTD in requirements and not zstd:
1084 1090 raise error.Abort(_('repository uses zstandard compression, which '
1085 1091 'is not available to this Mercurial install'))
1086 1092
1087 1093 return sqliterepository
1088 1094
1089 1095 return orig(requirements=requirements, **kwargs)
1090 1096
1091 1097 def verifierinit(orig, self, *args, **kwargs):
1092 1098 orig(self, *args, **kwargs)
1093 1099
1094 1100 # We don't care that files in the store don't align with what is
1095 1101 # advertised. So suppress these warnings.
1096 1102 self.warnorphanstorefiles = False
1097 1103
1098 1104 def extsetup(ui):
1099 1105 localrepo.featuresetupfuncs.add(featuresetup)
1100 1106 extensions.wrapfunction(localrepo, 'newreporequirements',
1101 1107 newreporequirements)
1102 1108 extensions.wrapfunction(localrepo, 'makefilestorage',
1103 1109 makefilestorage)
1104 1110 extensions.wrapfunction(localrepo, 'makemain',
1105 1111 makemain)
1106 1112 extensions.wrapfunction(verify.verifier, '__init__',
1107 1113 verifierinit)
1108 1114
1109 1115 def reposetup(ui, repo):
1110 1116 if isinstance(repo, sqliterepository):
1111 1117 repo._db = None
1112 1118
1113 1119 # TODO check for bundlerepository?
@@ -1,219 +1,233
1 1 # filelog.py - file history class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 from .node import (
11 nullid,
12 nullrev,
13 )
10 14 from . import (
11 15 error,
12 16 repository,
13 17 revlog,
14 18 )
15 19 from .utils import (
16 20 interfaceutil,
17 21 storageutil,
18 22 )
19 23
20 24 @interfaceutil.implementer(repository.ifilestorage)
21 25 class filelog(object):
22 26 def __init__(self, opener, path):
23 27 self._revlog = revlog.revlog(opener,
24 28 '/'.join(('data', path + '.i')),
25 29 censorable=True)
26 30 # Full name of the user visible file, relative to the repository root.
27 31 # Used by LFS.
28 32 self._revlog.filename = path
29 33
30 34 def __len__(self):
31 35 return len(self._revlog)
32 36
33 37 def __iter__(self):
34 38 return self._revlog.__iter__()
35 39
40 def hasnode(self, node):
41 if node in (nullid, nullrev):
42 return False
43
44 try:
45 self._revlog.rev(node)
46 return True
47 except (TypeError, ValueError, IndexError, error.LookupError):
48 return False
49
36 50 def revs(self, start=0, stop=None):
37 51 return self._revlog.revs(start=start, stop=stop)
38 52
39 53 def parents(self, node):
40 54 return self._revlog.parents(node)
41 55
42 56 def parentrevs(self, rev):
43 57 return self._revlog.parentrevs(rev)
44 58
45 59 def rev(self, node):
46 60 return self._revlog.rev(node)
47 61
48 62 def node(self, rev):
49 63 return self._revlog.node(rev)
50 64
51 65 def lookup(self, node):
52 66 return storageutil.fileidlookup(self._revlog, node,
53 67 self._revlog.indexfile)
54 68
55 69 def linkrev(self, rev):
56 70 return self._revlog.linkrev(rev)
57 71
58 72 def commonancestorsheads(self, node1, node2):
59 73 return self._revlog.commonancestorsheads(node1, node2)
60 74
61 75 # Used by dagop.blockdescendants().
62 76 def descendants(self, revs):
63 77 return self._revlog.descendants(revs)
64 78
65 79 def heads(self, start=None, stop=None):
66 80 return self._revlog.heads(start, stop)
67 81
68 82 # Used by hgweb, children extension.
69 83 def children(self, node):
70 84 return self._revlog.children(node)
71 85
72 86 def iscensored(self, rev):
73 87 return self._revlog.iscensored(rev)
74 88
75 89 def revision(self, node, _df=None, raw=False):
76 90 return self._revlog.revision(node, _df=_df, raw=raw)
77 91
78 92 def emitrevisions(self, nodes, nodesorder=None,
79 93 revisiondata=False, assumehaveparentrevisions=False,
80 94 deltaprevious=False):
81 95 return self._revlog.emitrevisions(
82 96 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
83 97 assumehaveparentrevisions=assumehaveparentrevisions,
84 98 deltaprevious=deltaprevious)
85 99
86 100 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
87 101 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
88 102 cachedelta=None):
89 103 return self._revlog.addrevision(revisiondata, transaction, linkrev,
90 104 p1, p2, node=node, flags=flags,
91 105 cachedelta=cachedelta)
92 106
93 107 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
94 108 return self._revlog.addgroup(deltas, linkmapper, transaction,
95 109 addrevisioncb=addrevisioncb)
96 110
97 111 def getstrippoint(self, minlink):
98 112 return self._revlog.getstrippoint(minlink)
99 113
100 114 def strip(self, minlink, transaction):
101 115 return self._revlog.strip(minlink, transaction)
102 116
103 117 def censorrevision(self, tr, node, tombstone=b''):
104 118 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
105 119
106 120 def files(self):
107 121 return self._revlog.files()
108 122
109 123 def read(self, node):
110 124 return storageutil.filtermetadata(self.revision(node))
111 125
112 126 def add(self, text, meta, transaction, link, p1=None, p2=None):
113 127 if meta or text.startswith('\1\n'):
114 128 text = storageutil.packmeta(meta, text)
115 129 return self.addrevision(text, transaction, link, p1, p2)
116 130
117 131 def renamed(self, node):
118 132 return storageutil.filerevisioncopied(self, node)
119 133
120 134 def size(self, rev):
121 135 """return the size of a given revision"""
122 136
123 137 # for revisions with renames, we have to go the slow way
124 138 node = self.node(rev)
125 139 if self.renamed(node):
126 140 return len(self.read(node))
127 141 if self.iscensored(rev):
128 142 return 0
129 143
130 144 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
131 145 return self._revlog.size(rev)
132 146
133 147 def cmp(self, node, text):
134 148 """compare text with a given file revision
135 149
136 150 returns True if text is different than what is stored.
137 151 """
138 152 return not storageutil.filedataequivalent(self, node, text)
139 153
140 154 def verifyintegrity(self, state):
141 155 return self._revlog.verifyintegrity(state)
142 156
143 157 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
144 158 revisionscount=False, trackedsize=False,
145 159 storedsize=False):
146 160 return self._revlog.storageinfo(
147 161 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
148 162 revisionscount=revisionscount, trackedsize=trackedsize,
149 163 storedsize=storedsize)
150 164
151 165 # TODO these aren't part of the interface and aren't internal methods.
152 166 # Callers should be fixed to not use them.
153 167
154 168 # Used by bundlefilelog, unionfilelog.
155 169 @property
156 170 def indexfile(self):
157 171 return self._revlog.indexfile
158 172
159 173 @indexfile.setter
160 174 def indexfile(self, value):
161 175 self._revlog.indexfile = value
162 176
163 177 # Used by repo upgrade.
164 178 def clone(self, tr, destrevlog, **kwargs):
165 179 if not isinstance(destrevlog, filelog):
166 180 raise error.ProgrammingError('expected filelog to clone()')
167 181
168 182 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
169 183
170 184 class narrowfilelog(filelog):
171 185 """Filelog variation to be used with narrow stores."""
172 186
173 187 def __init__(self, opener, path, narrowmatch):
174 188 super(narrowfilelog, self).__init__(opener, path)
175 189 self._narrowmatch = narrowmatch
176 190
177 191 def renamed(self, node):
178 192 res = super(narrowfilelog, self).renamed(node)
179 193
180 194 # Renames that come from outside the narrowspec are problematic
181 195 # because we may lack the base text for the rename. This can result
182 196 # in code attempting to walk the ancestry or compute a diff
183 197 # encountering a missing revision. We address this by silently
184 198 # removing rename metadata if the source file is outside the
185 199 # narrow spec.
186 200 #
187 201 # A better solution would be to see if the base revision is available,
188 202 # rather than assuming it isn't.
189 203 #
190 204 # An even better solution would be to teach all consumers of rename
191 205 # metadata that the base revision may not be available.
192 206 #
193 207 # TODO consider better ways of doing this.
194 208 if res and not self._narrowmatch(res[0]):
195 209 return None
196 210
197 211 return res
198 212
199 213 def size(self, rev):
200 214 # Because we have a custom renamed() that may lie, we need to call
201 215 # the base renamed() to report accurate results.
202 216 node = self.node(rev)
203 217 if super(narrowfilelog, self).renamed(node):
204 218 return len(self.read(node))
205 219 else:
206 220 return super(narrowfilelog, self).size(rev)
207 221
208 222 def cmp(self, node, text):
209 223 different = super(narrowfilelog, self).cmp(node, text)
210 224
211 225 # Because renamed() may lie, we may get false positives for
212 226 # different content. Check for this by comparing against the original
213 227 # renamed() implementation.
214 228 if different:
215 229 if super(narrowfilelog, self).renamed(node):
216 230 t2 = self.read(node)
217 231 return t2 != text
218 232
219 233 return different
@@ -1,1835 +1,1845
1 1 # repository.py - Interfaces and base classes for repositories and peers.
2 2 #
3 3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 )
14 14 from .utils import (
15 15 interfaceutil,
16 16 )
17 17
18 18 # When narrowing is finalized and no longer subject to format changes,
19 19 # we should move this to just "narrow" or similar.
20 20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21 21
22 22 # Local repository feature string.
23 23
24 24 # Revlogs are being used for file storage.
25 25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 26 # The storage part of the repository is shared from an external source.
27 27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 28 # LFS supported for backing file storage.
29 29 REPO_FEATURE_LFS = b'lfs'
30 30 # Repository supports being stream cloned.
31 31 REPO_FEATURE_STREAM_CLONE = b'streamclone'
32 32
33 33 REVISION_FLAG_CENSORED = 1 << 15
34 34 REVISION_FLAG_ELLIPSIS = 1 << 14
35 35 REVISION_FLAG_EXTSTORED = 1 << 13
36 36
37 37 REVISION_FLAGS_KNOWN = (
38 38 REVISION_FLAG_CENSORED | REVISION_FLAG_ELLIPSIS | REVISION_FLAG_EXTSTORED)
39 39
40 40 class ipeerconnection(interfaceutil.Interface):
41 41 """Represents a "connection" to a repository.
42 42
43 43 This is the base interface for representing a connection to a repository.
44 44 It holds basic properties and methods applicable to all peer types.
45 45
46 46 This is not a complete interface definition and should not be used
47 47 outside of this module.
48 48 """
49 49 ui = interfaceutil.Attribute("""ui.ui instance""")
50 50
51 51 def url():
52 52 """Returns a URL string representing this peer.
53 53
54 54 Currently, implementations expose the raw URL used to construct the
55 55 instance. It may contain credentials as part of the URL. The
56 56 expectations of the value aren't well-defined and this could lead to
57 57 data leakage.
58 58
59 59 TODO audit/clean consumers and more clearly define the contents of this
60 60 value.
61 61 """
62 62
63 63 def local():
64 64 """Returns a local repository instance.
65 65
66 66 If the peer represents a local repository, returns an object that
67 67 can be used to interface with it. Otherwise returns ``None``.
68 68 """
69 69
70 70 def peer():
71 71 """Returns an object conforming to this interface.
72 72
73 73 Most implementations will ``return self``.
74 74 """
75 75
76 76 def canpush():
77 77 """Returns a boolean indicating if this peer can be pushed to."""
78 78
79 79 def close():
80 80 """Close the connection to this peer.
81 81
82 82 This is called when the peer will no longer be used. Resources
83 83 associated with the peer should be cleaned up.
84 84 """
85 85
86 86 class ipeercapabilities(interfaceutil.Interface):
87 87 """Peer sub-interface related to capabilities."""
88 88
89 89 def capable(name):
90 90 """Determine support for a named capability.
91 91
92 92 Returns ``False`` if capability not supported.
93 93
94 94 Returns ``True`` if boolean capability is supported. Returns a string
95 95 if capability support is non-boolean.
96 96
97 97 Capability strings may or may not map to wire protocol capabilities.
98 98 """
99 99
100 100 def requirecap(name, purpose):
101 101 """Require a capability to be present.
102 102
103 103 Raises a ``CapabilityError`` if the capability isn't present.
104 104 """
105 105
106 106 class ipeercommands(interfaceutil.Interface):
107 107 """Client-side interface for communicating over the wire protocol.
108 108
109 109 This interface is used as a gateway to the Mercurial wire protocol.
110 110 methods commonly call wire protocol commands of the same name.
111 111 """
112 112
113 113 def branchmap():
114 114 """Obtain heads in named branches.
115 115
116 116 Returns a dict mapping branch name to an iterable of nodes that are
117 117 heads on that branch.
118 118 """
119 119
120 120 def capabilities():
121 121 """Obtain capabilities of the peer.
122 122
123 123 Returns a set of string capabilities.
124 124 """
125 125
126 126 def clonebundles():
127 127 """Obtains the clone bundles manifest for the repo.
128 128
129 129 Returns the manifest as unparsed bytes.
130 130 """
131 131
132 132 def debugwireargs(one, two, three=None, four=None, five=None):
133 133 """Used to facilitate debugging of arguments passed over the wire."""
134 134
135 135 def getbundle(source, **kwargs):
136 136 """Obtain remote repository data as a bundle.
137 137
138 138 This command is how the bulk of repository data is transferred from
139 139 the peer to the local repository
140 140
141 141 Returns a generator of bundle data.
142 142 """
143 143
144 144 def heads():
145 145 """Determine all known head revisions in the peer.
146 146
147 147 Returns an iterable of binary nodes.
148 148 """
149 149
150 150 def known(nodes):
151 151 """Determine whether multiple nodes are known.
152 152
153 153 Accepts an iterable of nodes whose presence to check for.
154 154
155 155 Returns an iterable of booleans indicating of the corresponding node
156 156 at that index is known to the peer.
157 157 """
158 158
159 159 def listkeys(namespace):
160 160 """Obtain all keys in a pushkey namespace.
161 161
162 162 Returns an iterable of key names.
163 163 """
164 164
165 165 def lookup(key):
166 166 """Resolve a value to a known revision.
167 167
168 168 Returns a binary node of the resolved revision on success.
169 169 """
170 170
171 171 def pushkey(namespace, key, old, new):
172 172 """Set a value using the ``pushkey`` protocol.
173 173
174 174 Arguments correspond to the pushkey namespace and key to operate on and
175 175 the old and new values for that key.
176 176
177 177 Returns a string with the peer result. The value inside varies by the
178 178 namespace.
179 179 """
180 180
181 181 def stream_out():
182 182 """Obtain streaming clone data.
183 183
184 184 Successful result should be a generator of data chunks.
185 185 """
186 186
187 187 def unbundle(bundle, heads, url):
188 188 """Transfer repository data to the peer.
189 189
190 190 This is how the bulk of data during a push is transferred.
191 191
192 192 Returns the integer number of heads added to the peer.
193 193 """
194 194
195 195 class ipeerlegacycommands(interfaceutil.Interface):
196 196 """Interface for implementing support for legacy wire protocol commands.
197 197
198 198 Wire protocol commands transition to legacy status when they are no longer
199 199 used by modern clients. To facilitate identifying which commands are
200 200 legacy, the interfaces are split.
201 201 """
202 202
203 203 def between(pairs):
204 204 """Obtain nodes between pairs of nodes.
205 205
206 206 ``pairs`` is an iterable of node pairs.
207 207
208 208 Returns an iterable of iterables of nodes corresponding to each
209 209 requested pair.
210 210 """
211 211
212 212 def branches(nodes):
213 213 """Obtain ancestor changesets of specific nodes back to a branch point.
214 214
215 215 For each requested node, the peer finds the first ancestor node that is
216 216 a DAG root or is a merge.
217 217
218 218 Returns an iterable of iterables with the resolved values for each node.
219 219 """
220 220
221 221 def changegroup(nodes, source):
222 222 """Obtain a changegroup with data for descendants of specified nodes."""
223 223
224 224 def changegroupsubset(bases, heads, source):
225 225 pass
226 226
227 227 class ipeercommandexecutor(interfaceutil.Interface):
228 228 """Represents a mechanism to execute remote commands.
229 229
230 230 This is the primary interface for requesting that wire protocol commands
231 231 be executed. Instances of this interface are active in a context manager
232 232 and have a well-defined lifetime. When the context manager exits, all
233 233 outstanding requests are waited on.
234 234 """
235 235
236 236 def callcommand(name, args):
237 237 """Request that a named command be executed.
238 238
239 239 Receives the command name and a dictionary of command arguments.
240 240
241 241 Returns a ``concurrent.futures.Future`` that will resolve to the
242 242 result of that command request. That exact value is left up to
243 243 the implementation and possibly varies by command.
244 244
245 245 Not all commands can coexist with other commands in an executor
246 246 instance: it depends on the underlying wire protocol transport being
247 247 used and the command itself.
248 248
249 249 Implementations MAY call ``sendcommands()`` automatically if the
250 250 requested command can not coexist with other commands in this executor.
251 251
252 252 Implementations MAY call ``sendcommands()`` automatically when the
253 253 future's ``result()`` is called. So, consumers using multiple
254 254 commands with an executor MUST ensure that ``result()`` is not called
255 255 until all command requests have been issued.
256 256 """
257 257
258 258 def sendcommands():
259 259 """Trigger submission of queued command requests.
260 260
261 261 Not all transports submit commands as soon as they are requested to
262 262 run. When called, this method forces queued command requests to be
263 263 issued. It will no-op if all commands have already been sent.
264 264
265 265 When called, no more new commands may be issued with this executor.
266 266 """
267 267
268 268 def close():
269 269 """Signal that this command request is finished.
270 270
271 271 When called, no more new commands may be issued. All outstanding
272 272 commands that have previously been issued are waited on before
273 273 returning. This not only includes waiting for the futures to resolve,
274 274 but also waiting for all response data to arrive. In other words,
275 275 calling this waits for all on-wire state for issued command requests
276 276 to finish.
277 277
278 278 When used as a context manager, this method is called when exiting the
279 279 context manager.
280 280
281 281 This method may call ``sendcommands()`` if there are buffered commands.
282 282 """
283 283
284 284 class ipeerrequests(interfaceutil.Interface):
285 285 """Interface for executing commands on a peer."""
286 286
287 287 def commandexecutor():
288 288 """A context manager that resolves to an ipeercommandexecutor.
289 289
290 290 The object this resolves to can be used to issue command requests
291 291 to the peer.
292 292
293 293 Callers should call its ``callcommand`` method to issue command
294 294 requests.
295 295
296 296 A new executor should be obtained for each distinct set of commands
297 297 (possibly just a single command) that the consumer wants to execute
298 298 as part of a single operation or round trip. This is because some
299 299 peers are half-duplex and/or don't support persistent connections.
300 300 e.g. in the case of HTTP peers, commands sent to an executor represent
301 301 a single HTTP request. While some peers may support multiple command
302 302 sends over the wire per executor, consumers need to code to the least
303 303 capable peer. So it should be assumed that command executors buffer
304 304 called commands until they are told to send them and that each
305 305 command executor could result in a new connection or wire-level request
306 306 being issued.
307 307 """
308 308
309 309 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
310 310 """Unified interface for peer repositories.
311 311
312 312 All peer instances must conform to this interface.
313 313 """
314 314
315 315 class ipeerv2(ipeerconnection, ipeercapabilities, ipeerrequests):
316 316 """Unified peer interface for wire protocol version 2 peers."""
317 317
318 318 apidescriptor = interfaceutil.Attribute(
319 319 """Data structure holding description of server API.""")
320 320
321 321 @interfaceutil.implementer(ipeerbase)
322 322 class peer(object):
323 323 """Base class for peer repositories."""
324 324
325 325 def capable(self, name):
326 326 caps = self.capabilities()
327 327 if name in caps:
328 328 return True
329 329
330 330 name = '%s=' % name
331 331 for cap in caps:
332 332 if cap.startswith(name):
333 333 return cap[len(name):]
334 334
335 335 return False
336 336
337 337 def requirecap(self, name, purpose):
338 338 if self.capable(name):
339 339 return
340 340
341 341 raise error.CapabilityError(
342 342 _('cannot %s; remote repository does not support the %r '
343 343 'capability') % (purpose, name))
344 344
345 345 class iverifyproblem(interfaceutil.Interface):
346 346 """Represents a problem with the integrity of the repository.
347 347
348 348 Instances of this interface are emitted to describe an integrity issue
349 349 with a repository (e.g. corrupt storage, missing data, etc).
350 350
351 351 Instances are essentially messages associated with severity.
352 352 """
353 353 warning = interfaceutil.Attribute(
354 354 """Message indicating a non-fatal problem.""")
355 355
356 356 error = interfaceutil.Attribute(
357 357 """Message indicating a fatal problem.""")
358 358
359 359 node = interfaceutil.Attribute(
360 360 """Revision encountering the problem.
361 361
362 362 ``None`` means the problem doesn't apply to a single revision.
363 363 """)
364 364
365 365 class irevisiondelta(interfaceutil.Interface):
366 366 """Represents a delta between one revision and another.
367 367
368 368 Instances convey enough information to allow a revision to be exchanged
369 369 with another repository.
370 370
371 371 Instances represent the fulltext revision data or a delta against
372 372 another revision. Therefore the ``revision`` and ``delta`` attributes
373 373 are mutually exclusive.
374 374
375 375 Typically used for changegroup generation.
376 376 """
377 377
378 378 node = interfaceutil.Attribute(
379 379 """20 byte node of this revision.""")
380 380
381 381 p1node = interfaceutil.Attribute(
382 382 """20 byte node of 1st parent of this revision.""")
383 383
384 384 p2node = interfaceutil.Attribute(
385 385 """20 byte node of 2nd parent of this revision.""")
386 386
387 387 linknode = interfaceutil.Attribute(
388 388 """20 byte node of the changelog revision this node is linked to.""")
389 389
390 390 flags = interfaceutil.Attribute(
391 391 """2 bytes of integer flags that apply to this revision.
392 392
393 393 This is a bitwise composition of the ``REVISION_FLAG_*`` constants.
394 394 """)
395 395
396 396 basenode = interfaceutil.Attribute(
397 397 """20 byte node of the revision this data is a delta against.
398 398
399 399 ``nullid`` indicates that the revision is a full revision and not
400 400 a delta.
401 401 """)
402 402
403 403 baserevisionsize = interfaceutil.Attribute(
404 404 """Size of base revision this delta is against.
405 405
406 406 May be ``None`` if ``basenode`` is ``nullid``.
407 407 """)
408 408
409 409 revision = interfaceutil.Attribute(
410 410 """Raw fulltext of revision data for this node.""")
411 411
412 412 delta = interfaceutil.Attribute(
413 413 """Delta between ``basenode`` and ``node``.
414 414
415 415 Stored in the bdiff delta format.
416 416 """)
417 417
418 418 class ifilerevisionssequence(interfaceutil.Interface):
419 419 """Contains index data for all revisions of a file.
420 420
421 421 Types implementing this behave like lists of tuples. The index
422 422 in the list corresponds to the revision number. The values contain
423 423 index metadata.
424 424
425 425 The *null* revision (revision number -1) is always the last item
426 426 in the index.
427 427 """
428 428
429 429 def __len__():
430 430 """The total number of revisions."""
431 431
432 432 def __getitem__(rev):
433 433 """Returns the object having a specific revision number.
434 434
435 435 Returns an 8-tuple with the following fields:
436 436
437 437 offset+flags
438 438 Contains the offset and flags for the revision. 64-bit unsigned
439 439 integer where first 6 bytes are the offset and the next 2 bytes
440 440 are flags. The offset can be 0 if it is not used by the store.
441 441 compressed size
442 442 Size of the revision data in the store. It can be 0 if it isn't
443 443 needed by the store.
444 444 uncompressed size
445 445 Fulltext size. It can be 0 if it isn't needed by the store.
446 446 base revision
447 447 Revision number of revision the delta for storage is encoded
448 448 against. -1 indicates not encoded against a base revision.
449 449 link revision
450 450 Revision number of changelog revision this entry is related to.
451 451 p1 revision
452 452 Revision number of 1st parent. -1 if no 1st parent.
453 453 p2 revision
454 454 Revision number of 2nd parent. -1 if no 1st parent.
455 455 node
456 456 Binary node value for this revision number.
457 457
458 458 Negative values should index off the end of the sequence. ``-1``
459 459 should return the null revision. ``-2`` should return the most
460 460 recent revision.
461 461 """
462 462
463 463 def __contains__(rev):
464 464 """Whether a revision number exists."""
465 465
466 466 def insert(self, i, entry):
467 467 """Add an item to the index at specific revision."""
468 468
469 469 class ifileindex(interfaceutil.Interface):
470 470 """Storage interface for index data of a single file.
471 471
472 472 File storage data is divided into index metadata and data storage.
473 473 This interface defines the index portion of the interface.
474 474
475 475 The index logically consists of:
476 476
477 477 * A mapping between revision numbers and nodes.
478 478 * DAG data (storing and querying the relationship between nodes).
479 479 * Metadata to facilitate storage.
480 480 """
481 481 def __len__():
482 482 """Obtain the number of revisions stored for this file."""
483 483
484 484 def __iter__():
485 485 """Iterate over revision numbers for this file."""
486 486
487 def hasnode(node):
488 """Returns a bool indicating if a node is known to this store.
489
490 Implementations must only return True for full, binary node values:
491 hex nodes, revision numbers, and partial node matches must be
492 rejected.
493
494 The null node is never present.
495 """
496
487 497 def revs(start=0, stop=None):
488 498 """Iterate over revision numbers for this file, with control."""
489 499
490 500 def parents(node):
491 501 """Returns a 2-tuple of parent nodes for a revision.
492 502
493 503 Values will be ``nullid`` if the parent is empty.
494 504 """
495 505
496 506 def parentrevs(rev):
497 507 """Like parents() but operates on revision numbers."""
498 508
499 509 def rev(node):
500 510 """Obtain the revision number given a node.
501 511
502 512 Raises ``error.LookupError`` if the node is not known.
503 513 """
504 514
505 515 def node(rev):
506 516 """Obtain the node value given a revision number.
507 517
508 518 Raises ``IndexError`` if the node is not known.
509 519 """
510 520
511 521 def lookup(node):
512 522 """Attempt to resolve a value to a node.
513 523
514 524 Value can be a binary node, hex node, revision number, or a string
515 525 that can be converted to an integer.
516 526
517 527 Raises ``error.LookupError`` if a node could not be resolved.
518 528 """
519 529
520 530 def linkrev(rev):
521 531 """Obtain the changeset revision number a revision is linked to."""
522 532
523 533 def iscensored(rev):
524 534 """Return whether a revision's content has been censored."""
525 535
526 536 def commonancestorsheads(node1, node2):
527 537 """Obtain an iterable of nodes containing heads of common ancestors.
528 538
529 539 See ``ancestor.commonancestorsheads()``.
530 540 """
531 541
532 542 def descendants(revs):
533 543 """Obtain descendant revision numbers for a set of revision numbers.
534 544
535 545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
536 546 """
537 547
538 548 def heads(start=None, stop=None):
539 549 """Obtain a list of nodes that are DAG heads, with control.
540 550
541 551 The set of revisions examined can be limited by specifying
542 552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
543 553 iterable of nodes. DAG traversal starts at earlier revision
544 554 ``start`` and iterates forward until any node in ``stop`` is
545 555 encountered.
546 556 """
547 557
548 558 def children(node):
549 559 """Obtain nodes that are children of a node.
550 560
551 561 Returns a list of nodes.
552 562 """
553 563
554 564 class ifiledata(interfaceutil.Interface):
555 565 """Storage interface for data storage of a specific file.
556 566
557 567 This complements ``ifileindex`` and provides an interface for accessing
558 568 data for a tracked file.
559 569 """
560 570 def size(rev):
561 571 """Obtain the fulltext size of file data.
562 572
563 573 Any metadata is excluded from size measurements.
564 574 """
565 575
566 576 def revision(node, raw=False):
567 577 """"Obtain fulltext data for a node.
568 578
569 579 By default, any storage transformations are applied before the data
570 580 is returned. If ``raw`` is True, non-raw storage transformations
571 581 are not applied.
572 582
573 583 The fulltext data may contain a header containing metadata. Most
574 584 consumers should use ``read()`` to obtain the actual file data.
575 585 """
576 586
577 587 def read(node):
578 588 """Resolve file fulltext data.
579 589
580 590 This is similar to ``revision()`` except any metadata in the data
581 591 headers is stripped.
582 592 """
583 593
584 594 def renamed(node):
585 595 """Obtain copy metadata for a node.
586 596
587 597 Returns ``False`` if no copy metadata is stored or a 2-tuple of
588 598 (path, node) from which this revision was copied.
589 599 """
590 600
591 601 def cmp(node, fulltext):
592 602 """Compare fulltext to another revision.
593 603
594 604 Returns True if the fulltext is different from what is stored.
595 605
596 606 This takes copy metadata into account.
597 607
598 608 TODO better document the copy metadata and censoring logic.
599 609 """
600 610
601 611 def emitrevisions(nodes,
602 612 nodesorder=None,
603 613 revisiondata=False,
604 614 assumehaveparentrevisions=False,
605 615 deltaprevious=False):
606 616 """Produce ``irevisiondelta`` for revisions.
607 617
608 618 Given an iterable of nodes, emits objects conforming to the
609 619 ``irevisiondelta`` interface that describe revisions in storage.
610 620
611 621 This method is a generator.
612 622
613 623 The input nodes may be unordered. Implementations must ensure that a
614 624 node's parents are emitted before the node itself. Transitively, this
615 625 means that a node may only be emitted once all its ancestors in
616 626 ``nodes`` have also been emitted.
617 627
618 628 By default, emits "index" data (the ``node``, ``p1node``, and
619 629 ``p2node`` attributes). If ``revisiondata`` is set, revision data
620 630 will also be present on the emitted objects.
621 631
622 632 With default argument values, implementations can choose to emit
623 633 either fulltext revision data or a delta. When emitting deltas,
624 634 implementations must consider whether the delta's base revision
625 635 fulltext is available to the receiver.
626 636
627 637 The base revision fulltext is guaranteed to be available if any of
628 638 the following are met:
629 639
630 640 * Its fulltext revision was emitted by this method call.
631 641 * A delta for that revision was emitted by this method call.
632 642 * ``assumehaveparentrevisions`` is True and the base revision is a
633 643 parent of the node.
634 644
635 645 ``nodesorder`` can be used to control the order that revisions are
636 646 emitted. By default, revisions can be reordered as long as they are
637 647 in DAG topological order (see above). If the value is ``nodes``,
638 648 the iteration order from ``nodes`` should be used. If the value is
639 649 ``storage``, then the native order from the backing storage layer
640 650 is used. (Not all storage layers will have strong ordering and behavior
641 651 of this mode is storage-dependent.) ``nodes`` ordering can force
642 652 revisions to be emitted before their ancestors, so consumers should
643 653 use it with care.
644 654
645 655 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
646 656 be set and it is the caller's responsibility to resolve it, if needed.
647 657
648 658 If ``deltaprevious`` is True and revision data is requested, all
649 659 revision data should be emitted as deltas against the revision
650 660 emitted just prior. The initial revision should be a delta against
651 661 its 1st parent.
652 662 """
653 663
654 664 class ifilemutation(interfaceutil.Interface):
655 665 """Storage interface for mutation events of a tracked file."""
656 666
657 667 def add(filedata, meta, transaction, linkrev, p1, p2):
658 668 """Add a new revision to the store.
659 669
660 670 Takes file data, dictionary of metadata, a transaction, linkrev,
661 671 and parent nodes.
662 672
663 673 Returns the node that was added.
664 674
665 675 May no-op if a revision matching the supplied data is already stored.
666 676 """
667 677
668 678 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
669 679 flags=0, cachedelta=None):
670 680 """Add a new revision to the store.
671 681
672 682 This is similar to ``add()`` except it operates at a lower level.
673 683
674 684 The data passed in already contains a metadata header, if any.
675 685
676 686 ``node`` and ``flags`` can be used to define the expected node and
677 687 the flags to use with storage. ``flags`` is a bitwise value composed
678 688 of the various ``REVISION_FLAG_*`` constants.
679 689
680 690 ``add()`` is usually called when adding files from e.g. the working
681 691 directory. ``addrevision()`` is often called by ``add()`` and for
682 692 scenarios where revision data has already been computed, such as when
683 693 applying raw data from a peer repo.
684 694 """
685 695
686 696 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
687 697 """Process a series of deltas for storage.
688 698
689 699 ``deltas`` is an iterable of 7-tuples of
690 700 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
691 701 to add.
692 702
693 703 The ``delta`` field contains ``mpatch`` data to apply to a base
694 704 revision, identified by ``deltabase``. The base node can be
695 705 ``nullid``, in which case the header from the delta can be ignored
696 706 and the delta used as the fulltext.
697 707
698 708 ``addrevisioncb`` should be called for each node as it is committed.
699 709
700 710 Returns a list of nodes that were processed. A node will be in the list
701 711 even if it existed in the store previously.
702 712 """
703 713
704 714 def censorrevision(tr, node, tombstone=b''):
705 715 """Remove the content of a single revision.
706 716
707 717 The specified ``node`` will have its content purged from storage.
708 718 Future attempts to access the revision data for this node will
709 719 result in failure.
710 720
711 721 A ``tombstone`` message can optionally be stored. This message may be
712 722 displayed to users when they attempt to access the missing revision
713 723 data.
714 724
715 725 Storage backends may have stored deltas against the previous content
716 726 in this revision. As part of censoring a revision, these storage
717 727 backends are expected to rewrite any internally stored deltas such
718 728 that they no longer reference the deleted content.
719 729 """
720 730
721 731 def getstrippoint(minlink):
722 732 """Find the minimum revision that must be stripped to strip a linkrev.
723 733
724 734 Returns a 2-tuple containing the minimum revision number and a set
725 735 of all revisions numbers that would be broken by this strip.
726 736
727 737 TODO this is highly revlog centric and should be abstracted into
728 738 a higher-level deletion API. ``repair.strip()`` relies on this.
729 739 """
730 740
731 741 def strip(minlink, transaction):
732 742 """Remove storage of items starting at a linkrev.
733 743
734 744 This uses ``getstrippoint()`` to determine the first node to remove.
735 745 Then it effectively truncates storage for all revisions after that.
736 746
737 747 TODO this is highly revlog centric and should be abstracted into a
738 748 higher-level deletion API.
739 749 """
740 750
741 751 class ifilestorage(ifileindex, ifiledata, ifilemutation):
742 752 """Complete storage interface for a single tracked file."""
743 753
744 754 def files():
745 755 """Obtain paths that are backing storage for this file.
746 756
747 757 TODO this is used heavily by verify code and there should probably
748 758 be a better API for that.
749 759 """
750 760
751 761 def storageinfo(exclusivefiles=False, sharedfiles=False,
752 762 revisionscount=False, trackedsize=False,
753 763 storedsize=False):
754 764 """Obtain information about storage for this file's data.
755 765
756 766 Returns a dict describing storage for this tracked path. The keys
757 767 in the dict map to arguments of the same. The arguments are bools
758 768 indicating whether to calculate and obtain that data.
759 769
760 770 exclusivefiles
761 771 Iterable of (vfs, path) describing files that are exclusively
762 772 used to back storage for this tracked path.
763 773
764 774 sharedfiles
765 775 Iterable of (vfs, path) describing files that are used to back
766 776 storage for this tracked path. Those files may also provide storage
767 777 for other stored entities.
768 778
769 779 revisionscount
770 780 Number of revisions available for retrieval.
771 781
772 782 trackedsize
773 783 Total size in bytes of all tracked revisions. This is a sum of the
774 784 length of the fulltext of all revisions.
775 785
776 786 storedsize
777 787 Total size in bytes used to store data for all tracked revisions.
778 788 This is commonly less than ``trackedsize`` due to internal usage
779 789 of deltas rather than fulltext revisions.
780 790
781 791 Not all storage backends may support all queries are have a reasonable
782 792 value to use. In that case, the value should be set to ``None`` and
783 793 callers are expected to handle this special value.
784 794 """
785 795
786 796 def verifyintegrity(state):
787 797 """Verifies the integrity of file storage.
788 798
789 799 ``state`` is a dict holding state of the verifier process. It can be
790 800 used to communicate data between invocations of multiple storage
791 801 primitives.
792 802
793 803 If individual revisions cannot have their revision content resolved,
794 804 the method is expected to set the ``skipread`` key to a set of nodes
795 805 that encountered problems.
796 806
797 807 The method yields objects conforming to the ``iverifyproblem``
798 808 interface.
799 809 """
800 810
801 811 class idirs(interfaceutil.Interface):
802 812 """Interface representing a collection of directories from paths.
803 813
804 814 This interface is essentially a derived data structure representing
805 815 directories from a collection of paths.
806 816 """
807 817
808 818 def addpath(path):
809 819 """Add a path to the collection.
810 820
811 821 All directories in the path will be added to the collection.
812 822 """
813 823
814 824 def delpath(path):
815 825 """Remove a path from the collection.
816 826
817 827 If the removal was the last path in a particular directory, the
818 828 directory is removed from the collection.
819 829 """
820 830
821 831 def __iter__():
822 832 """Iterate over the directories in this collection of paths."""
823 833
824 834 def __contains__(path):
825 835 """Whether a specific directory is in this collection."""
826 836
827 837 class imanifestdict(interfaceutil.Interface):
828 838 """Interface representing a manifest data structure.
829 839
830 840 A manifest is effectively a dict mapping paths to entries. Each entry
831 841 consists of a binary node and extra flags affecting that entry.
832 842 """
833 843
834 844 def __getitem__(path):
835 845 """Returns the binary node value for a path in the manifest.
836 846
837 847 Raises ``KeyError`` if the path does not exist in the manifest.
838 848
839 849 Equivalent to ``self.find(path)[0]``.
840 850 """
841 851
842 852 def find(path):
843 853 """Returns the entry for a path in the manifest.
844 854
845 855 Returns a 2-tuple of (node, flags).
846 856
847 857 Raises ``KeyError`` if the path does not exist in the manifest.
848 858 """
849 859
850 860 def __len__():
851 861 """Return the number of entries in the manifest."""
852 862
853 863 def __nonzero__():
854 864 """Returns True if the manifest has entries, False otherwise."""
855 865
856 866 __bool__ = __nonzero__
857 867
858 868 def __setitem__(path, node):
859 869 """Define the node value for a path in the manifest.
860 870
861 871 If the path is already in the manifest, its flags will be copied to
862 872 the new entry.
863 873 """
864 874
865 875 def __contains__(path):
866 876 """Whether a path exists in the manifest."""
867 877
868 878 def __delitem__(path):
869 879 """Remove a path from the manifest.
870 880
871 881 Raises ``KeyError`` if the path is not in the manifest.
872 882 """
873 883
874 884 def __iter__():
875 885 """Iterate over paths in the manifest."""
876 886
877 887 def iterkeys():
878 888 """Iterate over paths in the manifest."""
879 889
880 890 def keys():
881 891 """Obtain a list of paths in the manifest."""
882 892
883 893 def filesnotin(other, match=None):
884 894 """Obtain the set of paths in this manifest but not in another.
885 895
886 896 ``match`` is an optional matcher function to be applied to both
887 897 manifests.
888 898
889 899 Returns a set of paths.
890 900 """
891 901
892 902 def dirs():
893 903 """Returns an object implementing the ``idirs`` interface."""
894 904
895 905 def hasdir(dir):
896 906 """Returns a bool indicating if a directory is in this manifest."""
897 907
898 908 def matches(match):
899 909 """Generate a new manifest filtered through a matcher.
900 910
901 911 Returns an object conforming to the ``imanifestdict`` interface.
902 912 """
903 913
904 914 def walk(match):
905 915 """Generator of paths in manifest satisfying a matcher.
906 916
907 917 This is equivalent to ``self.matches(match).iterkeys()`` except a new
908 918 manifest object is not created.
909 919
910 920 If the matcher has explicit files listed and they don't exist in
911 921 the manifest, ``match.bad()`` is called for each missing file.
912 922 """
913 923
914 924 def diff(other, match=None, clean=False):
915 925 """Find differences between this manifest and another.
916 926
917 927 This manifest is compared to ``other``.
918 928
919 929 If ``match`` is provided, the two manifests are filtered against this
920 930 matcher and only entries satisfying the matcher are compared.
921 931
922 932 If ``clean`` is True, unchanged files are included in the returned
923 933 object.
924 934
925 935 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
926 936 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
927 937 represents the node and flags for this manifest and ``(node2, flag2)``
928 938 are the same for the other manifest.
929 939 """
930 940
931 941 def setflag(path, flag):
932 942 """Set the flag value for a given path.
933 943
934 944 Raises ``KeyError`` if the path is not already in the manifest.
935 945 """
936 946
937 947 def get(path, default=None):
938 948 """Obtain the node value for a path or a default value if missing."""
939 949
940 950 def flags(path, default=''):
941 951 """Return the flags value for a path or a default value if missing."""
942 952
943 953 def copy():
944 954 """Return a copy of this manifest."""
945 955
946 956 def items():
947 957 """Returns an iterable of (path, node) for items in this manifest."""
948 958
949 959 def iteritems():
950 960 """Identical to items()."""
951 961
952 962 def iterentries():
953 963 """Returns an iterable of (path, node, flags) for this manifest.
954 964
955 965 Similar to ``iteritems()`` except items are a 3-tuple and include
956 966 flags.
957 967 """
958 968
959 969 def text():
960 970 """Obtain the raw data representation for this manifest.
961 971
962 972 Result is used to create a manifest revision.
963 973 """
964 974
965 975 def fastdelta(base, changes):
966 976 """Obtain a delta between this manifest and another given changes.
967 977
968 978 ``base`` in the raw data representation for another manifest.
969 979
970 980 ``changes`` is an iterable of ``(path, to_delete)``.
971 981
972 982 Returns a 2-tuple containing ``bytearray(self.text())`` and the
973 983 delta between ``base`` and this manifest.
974 984 """
975 985
976 986 class imanifestrevisionbase(interfaceutil.Interface):
977 987 """Base interface representing a single revision of a manifest.
978 988
979 989 Should not be used as a primary interface: should always be inherited
980 990 as part of a larger interface.
981 991 """
982 992
983 993 def new():
984 994 """Obtain a new manifest instance.
985 995
986 996 Returns an object conforming to the ``imanifestrevisionwritable``
987 997 interface. The instance will be associated with the same
988 998 ``imanifestlog`` collection as this instance.
989 999 """
990 1000
991 1001 def copy():
992 1002 """Obtain a copy of this manifest instance.
993 1003
994 1004 Returns an object conforming to the ``imanifestrevisionwritable``
995 1005 interface. The instance will be associated with the same
996 1006 ``imanifestlog`` collection as this instance.
997 1007 """
998 1008
999 1009 def read():
1000 1010 """Obtain the parsed manifest data structure.
1001 1011
1002 1012 The returned object conforms to the ``imanifestdict`` interface.
1003 1013 """
1004 1014
1005 1015 class imanifestrevisionstored(imanifestrevisionbase):
1006 1016 """Interface representing a manifest revision committed to storage."""
1007 1017
1008 1018 def node():
1009 1019 """The binary node for this manifest."""
1010 1020
1011 1021 parents = interfaceutil.Attribute(
1012 1022 """List of binary nodes that are parents for this manifest revision."""
1013 1023 )
1014 1024
1015 1025 def readdelta(shallow=False):
1016 1026 """Obtain the manifest data structure representing changes from parent.
1017 1027
1018 1028 This manifest is compared to its 1st parent. A new manifest representing
1019 1029 those differences is constructed.
1020 1030
1021 1031 The returned object conforms to the ``imanifestdict`` interface.
1022 1032 """
1023 1033
1024 1034 def readfast(shallow=False):
1025 1035 """Calls either ``read()`` or ``readdelta()``.
1026 1036
1027 1037 The faster of the two options is called.
1028 1038 """
1029 1039
1030 1040 def find(key):
1031 1041 """Calls self.read().find(key)``.
1032 1042
1033 1043 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1034 1044 """
1035 1045
1036 1046 class imanifestrevisionwritable(imanifestrevisionbase):
1037 1047 """Interface representing a manifest revision that can be committed."""
1038 1048
1039 1049 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1040 1050 """Add this revision to storage.
1041 1051
1042 1052 Takes a transaction object, the changeset revision number it will
1043 1053 be associated with, its parent nodes, and lists of added and
1044 1054 removed paths.
1045 1055
1046 1056 If match is provided, storage can choose not to inspect or write out
1047 1057 items that do not match. Storage is still required to be able to provide
1048 1058 the full manifest in the future for any directories written (these
1049 1059 manifests should not be "narrowed on disk").
1050 1060
1051 1061 Returns the binary node of the created revision.
1052 1062 """
1053 1063
1054 1064 class imanifeststorage(interfaceutil.Interface):
1055 1065 """Storage interface for manifest data."""
1056 1066
1057 1067 tree = interfaceutil.Attribute(
1058 1068 """The path to the directory this manifest tracks.
1059 1069
1060 1070 The empty bytestring represents the root manifest.
1061 1071 """)
1062 1072
1063 1073 index = interfaceutil.Attribute(
1064 1074 """An ``ifilerevisionssequence`` instance.""")
1065 1075
1066 1076 indexfile = interfaceutil.Attribute(
1067 1077 """Path of revlog index file.
1068 1078
1069 1079 TODO this is revlog specific and should not be exposed.
1070 1080 """)
1071 1081
1072 1082 opener = interfaceutil.Attribute(
1073 1083 """VFS opener to use to access underlying files used for storage.
1074 1084
1075 1085 TODO this is revlog specific and should not be exposed.
1076 1086 """)
1077 1087
1078 1088 version = interfaceutil.Attribute(
1079 1089 """Revlog version number.
1080 1090
1081 1091 TODO this is revlog specific and should not be exposed.
1082 1092 """)
1083 1093
1084 1094 _generaldelta = interfaceutil.Attribute(
1085 1095 """Whether generaldelta storage is being used.
1086 1096
1087 1097 TODO this is revlog specific and should not be exposed.
1088 1098 """)
1089 1099
1090 1100 fulltextcache = interfaceutil.Attribute(
1091 1101 """Dict with cache of fulltexts.
1092 1102
1093 1103 TODO this doesn't feel appropriate for the storage interface.
1094 1104 """)
1095 1105
1096 1106 def __len__():
1097 1107 """Obtain the number of revisions stored for this manifest."""
1098 1108
1099 1109 def __iter__():
1100 1110 """Iterate over revision numbers for this manifest."""
1101 1111
1102 1112 def rev(node):
1103 1113 """Obtain the revision number given a binary node.
1104 1114
1105 1115 Raises ``error.LookupError`` if the node is not known.
1106 1116 """
1107 1117
1108 1118 def node(rev):
1109 1119 """Obtain the node value given a revision number.
1110 1120
1111 1121 Raises ``error.LookupError`` if the revision is not known.
1112 1122 """
1113 1123
1114 1124 def lookup(value):
1115 1125 """Attempt to resolve a value to a node.
1116 1126
1117 1127 Value can be a binary node, hex node, revision number, or a bytes
1118 1128 that can be converted to an integer.
1119 1129
1120 1130 Raises ``error.LookupError`` if a ndoe could not be resolved.
1121 1131 """
1122 1132
1123 1133 def parents(node):
1124 1134 """Returns a 2-tuple of parent nodes for a node.
1125 1135
1126 1136 Values will be ``nullid`` if the parent is empty.
1127 1137 """
1128 1138
1129 1139 def parentrevs(rev):
1130 1140 """Like parents() but operates on revision numbers."""
1131 1141
1132 1142 def linkrev(rev):
1133 1143 """Obtain the changeset revision number a revision is linked to."""
1134 1144
1135 1145 def revision(node, _df=None, raw=False):
1136 1146 """Obtain fulltext data for a node."""
1137 1147
1138 1148 def revdiff(rev1, rev2):
1139 1149 """Obtain a delta between two revision numbers.
1140 1150
1141 1151 The returned data is the result of ``bdiff.bdiff()`` on the raw
1142 1152 revision data.
1143 1153 """
1144 1154
1145 1155 def cmp(node, fulltext):
1146 1156 """Compare fulltext to another revision.
1147 1157
1148 1158 Returns True if the fulltext is different from what is stored.
1149 1159 """
1150 1160
1151 1161 def emitrevisions(nodes,
1152 1162 nodesorder=None,
1153 1163 revisiondata=False,
1154 1164 assumehaveparentrevisions=False):
1155 1165 """Produce ``irevisiondelta`` describing revisions.
1156 1166
1157 1167 See the documentation for ``ifiledata`` for more.
1158 1168 """
1159 1169
1160 1170 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1161 1171 """Process a series of deltas for storage.
1162 1172
1163 1173 See the documentation in ``ifilemutation`` for more.
1164 1174 """
1165 1175
1166 1176 def rawsize(rev):
1167 1177 """Obtain the size of tracked data.
1168 1178
1169 1179 Is equivalent to ``len(m.revision(node, raw=True))``.
1170 1180
1171 1181 TODO this method is only used by upgrade code and may be removed.
1172 1182 """
1173 1183
1174 1184 def getstrippoint(minlink):
1175 1185 """Find minimum revision that must be stripped to strip a linkrev.
1176 1186
1177 1187 See the documentation in ``ifilemutation`` for more.
1178 1188 """
1179 1189
1180 1190 def strip(minlink, transaction):
1181 1191 """Remove storage of items starting at a linkrev.
1182 1192
1183 1193 See the documentation in ``ifilemutation`` for more.
1184 1194 """
1185 1195
1186 1196 def checksize():
1187 1197 """Obtain the expected sizes of backing files.
1188 1198
1189 1199 TODO this is used by verify and it should not be part of the interface.
1190 1200 """
1191 1201
1192 1202 def files():
1193 1203 """Obtain paths that are backing storage for this manifest.
1194 1204
1195 1205 TODO this is used by verify and there should probably be a better API
1196 1206 for this functionality.
1197 1207 """
1198 1208
1199 1209 def deltaparent(rev):
1200 1210 """Obtain the revision that a revision is delta'd against.
1201 1211
1202 1212 TODO delta encoding is an implementation detail of storage and should
1203 1213 not be exposed to the storage interface.
1204 1214 """
1205 1215
1206 1216 def clone(tr, dest, **kwargs):
1207 1217 """Clone this instance to another."""
1208 1218
1209 1219 def clearcaches(clear_persisted_data=False):
1210 1220 """Clear any caches associated with this instance."""
1211 1221
1212 1222 def dirlog(d):
1213 1223 """Obtain a manifest storage instance for a tree."""
1214 1224
1215 1225 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1216 1226 match=None):
1217 1227 """Add a revision to storage.
1218 1228
1219 1229 ``m`` is an object conforming to ``imanifestdict``.
1220 1230
1221 1231 ``link`` is the linkrev revision number.
1222 1232
1223 1233 ``p1`` and ``p2`` are the parent revision numbers.
1224 1234
1225 1235 ``added`` and ``removed`` are iterables of added and removed paths,
1226 1236 respectively.
1227 1237
1228 1238 ``readtree`` is a function that can be used to read the child tree(s)
1229 1239 when recursively writing the full tree structure when using
1230 1240 treemanifets.
1231 1241
1232 1242 ``match`` is a matcher that can be used to hint to storage that not all
1233 1243 paths must be inspected; this is an optimization and can be safely
1234 1244 ignored. Note that the storage must still be able to reproduce a full
1235 1245 manifest including files that did not match.
1236 1246 """
1237 1247
1238 1248 def storageinfo(exclusivefiles=False, sharedfiles=False,
1239 1249 revisionscount=False, trackedsize=False,
1240 1250 storedsize=False):
1241 1251 """Obtain information about storage for this manifest's data.
1242 1252
1243 1253 See ``ifilestorage.storageinfo()`` for a description of this method.
1244 1254 This one behaves the same way, except for manifest data.
1245 1255 """
1246 1256
1247 1257 class imanifestlog(interfaceutil.Interface):
1248 1258 """Interface representing a collection of manifest snapshots.
1249 1259
1250 1260 Represents the root manifest in a repository.
1251 1261
1252 1262 Also serves as a means to access nested tree manifests and to cache
1253 1263 tree manifests.
1254 1264 """
1255 1265
1256 1266 def __getitem__(node):
1257 1267 """Obtain a manifest instance for a given binary node.
1258 1268
1259 1269 Equivalent to calling ``self.get('', node)``.
1260 1270
1261 1271 The returned object conforms to the ``imanifestrevisionstored``
1262 1272 interface.
1263 1273 """
1264 1274
1265 1275 def get(tree, node, verify=True):
1266 1276 """Retrieve the manifest instance for a given directory and binary node.
1267 1277
1268 1278 ``node`` always refers to the node of the root manifest (which will be
1269 1279 the only manifest if flat manifests are being used).
1270 1280
1271 1281 If ``tree`` is the empty string, the root manifest is returned.
1272 1282 Otherwise the manifest for the specified directory will be returned
1273 1283 (requires tree manifests).
1274 1284
1275 1285 If ``verify`` is True, ``LookupError`` is raised if the node is not
1276 1286 known.
1277 1287
1278 1288 The returned object conforms to the ``imanifestrevisionstored``
1279 1289 interface.
1280 1290 """
1281 1291
1282 1292 def getstorage(tree):
1283 1293 """Retrieve an interface to storage for a particular tree.
1284 1294
1285 1295 If ``tree`` is the empty bytestring, storage for the root manifest will
1286 1296 be returned. Otherwise storage for a tree manifest is returned.
1287 1297
1288 1298 TODO formalize interface for returned object.
1289 1299 """
1290 1300
1291 1301 def clearcaches():
1292 1302 """Clear caches associated with this collection."""
1293 1303
1294 1304 def rev(node):
1295 1305 """Obtain the revision number for a binary node.
1296 1306
1297 1307 Raises ``error.LookupError`` if the node is not known.
1298 1308 """
1299 1309
1300 1310 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1301 1311 """Local repository sub-interface providing access to tracked file storage.
1302 1312
1303 1313 This interface defines how a repository accesses storage for a single
1304 1314 tracked file path.
1305 1315 """
1306 1316
1307 1317 def file(f):
1308 1318 """Obtain a filelog for a tracked path.
1309 1319
1310 1320 The returned type conforms to the ``ifilestorage`` interface.
1311 1321 """
1312 1322
1313 1323 class ilocalrepositorymain(interfaceutil.Interface):
1314 1324 """Main interface for local repositories.
1315 1325
1316 1326 This currently captures the reality of things - not how things should be.
1317 1327 """
1318 1328
1319 1329 supportedformats = interfaceutil.Attribute(
1320 1330 """Set of requirements that apply to stream clone.
1321 1331
1322 1332 This is actually a class attribute and is shared among all instances.
1323 1333 """)
1324 1334
1325 1335 supported = interfaceutil.Attribute(
1326 1336 """Set of requirements that this repo is capable of opening.""")
1327 1337
1328 1338 requirements = interfaceutil.Attribute(
1329 1339 """Set of requirements this repo uses.""")
1330 1340
1331 1341 features = interfaceutil.Attribute(
1332 1342 """Set of "features" this repository supports.
1333 1343
1334 1344 A "feature" is a loosely-defined term. It can refer to a feature
1335 1345 in the classical sense or can describe an implementation detail
1336 1346 of the repository. For example, a ``readonly`` feature may denote
1337 1347 the repository as read-only. Or a ``revlogfilestore`` feature may
1338 1348 denote that the repository is using revlogs for file storage.
1339 1349
1340 1350 The intent of features is to provide a machine-queryable mechanism
1341 1351 for repo consumers to test for various repository characteristics.
1342 1352
1343 1353 Features are similar to ``requirements``. The main difference is that
1344 1354 requirements are stored on-disk and represent requirements to open the
1345 1355 repository. Features are more run-time capabilities of the repository
1346 1356 and more granular capabilities (which may be derived from requirements).
1347 1357 """)
1348 1358
1349 1359 filtername = interfaceutil.Attribute(
1350 1360 """Name of the repoview that is active on this repo.""")
1351 1361
1352 1362 wvfs = interfaceutil.Attribute(
1353 1363 """VFS used to access the working directory.""")
1354 1364
1355 1365 vfs = interfaceutil.Attribute(
1356 1366 """VFS rooted at the .hg directory.
1357 1367
1358 1368 Used to access repository data not in the store.
1359 1369 """)
1360 1370
1361 1371 svfs = interfaceutil.Attribute(
1362 1372 """VFS rooted at the store.
1363 1373
1364 1374 Used to access repository data in the store. Typically .hg/store.
1365 1375 But can point elsewhere if the store is shared.
1366 1376 """)
1367 1377
1368 1378 root = interfaceutil.Attribute(
1369 1379 """Path to the root of the working directory.""")
1370 1380
1371 1381 path = interfaceutil.Attribute(
1372 1382 """Path to the .hg directory.""")
1373 1383
1374 1384 origroot = interfaceutil.Attribute(
1375 1385 """The filesystem path that was used to construct the repo.""")
1376 1386
1377 1387 auditor = interfaceutil.Attribute(
1378 1388 """A pathauditor for the working directory.
1379 1389
1380 1390 This checks if a path refers to a nested repository.
1381 1391
1382 1392 Operates on the filesystem.
1383 1393 """)
1384 1394
1385 1395 nofsauditor = interfaceutil.Attribute(
1386 1396 """A pathauditor for the working directory.
1387 1397
1388 1398 This is like ``auditor`` except it doesn't do filesystem checks.
1389 1399 """)
1390 1400
1391 1401 baseui = interfaceutil.Attribute(
1392 1402 """Original ui instance passed into constructor.""")
1393 1403
1394 1404 ui = interfaceutil.Attribute(
1395 1405 """Main ui instance for this instance.""")
1396 1406
1397 1407 sharedpath = interfaceutil.Attribute(
1398 1408 """Path to the .hg directory of the repo this repo was shared from.""")
1399 1409
1400 1410 store = interfaceutil.Attribute(
1401 1411 """A store instance.""")
1402 1412
1403 1413 spath = interfaceutil.Attribute(
1404 1414 """Path to the store.""")
1405 1415
1406 1416 sjoin = interfaceutil.Attribute(
1407 1417 """Alias to self.store.join.""")
1408 1418
1409 1419 cachevfs = interfaceutil.Attribute(
1410 1420 """A VFS used to access the cache directory.
1411 1421
1412 1422 Typically .hg/cache.
1413 1423 """)
1414 1424
1415 1425 filteredrevcache = interfaceutil.Attribute(
1416 1426 """Holds sets of revisions to be filtered.""")
1417 1427
1418 1428 names = interfaceutil.Attribute(
1419 1429 """A ``namespaces`` instance.""")
1420 1430
1421 1431 def close():
1422 1432 """Close the handle on this repository."""
1423 1433
1424 1434 def peer():
1425 1435 """Obtain an object conforming to the ``peer`` interface."""
1426 1436
1427 1437 def unfiltered():
1428 1438 """Obtain an unfiltered/raw view of this repo."""
1429 1439
1430 1440 def filtered(name, visibilityexceptions=None):
1431 1441 """Obtain a named view of this repository."""
1432 1442
1433 1443 obsstore = interfaceutil.Attribute(
1434 1444 """A store of obsolescence data.""")
1435 1445
1436 1446 changelog = interfaceutil.Attribute(
1437 1447 """A handle on the changelog revlog.""")
1438 1448
1439 1449 manifestlog = interfaceutil.Attribute(
1440 1450 """An instance conforming to the ``imanifestlog`` interface.
1441 1451
1442 1452 Provides access to manifests for the repository.
1443 1453 """)
1444 1454
1445 1455 dirstate = interfaceutil.Attribute(
1446 1456 """Working directory state.""")
1447 1457
1448 1458 narrowpats = interfaceutil.Attribute(
1449 1459 """Matcher patterns for this repository's narrowspec.""")
1450 1460
1451 1461 def narrowmatch():
1452 1462 """Obtain a matcher for the narrowspec."""
1453 1463
1454 1464 def setnarrowpats(newincludes, newexcludes):
1455 1465 """Define the narrowspec for this repository."""
1456 1466
1457 1467 def __getitem__(changeid):
1458 1468 """Try to resolve a changectx."""
1459 1469
1460 1470 def __contains__(changeid):
1461 1471 """Whether a changeset exists."""
1462 1472
1463 1473 def __nonzero__():
1464 1474 """Always returns True."""
1465 1475 return True
1466 1476
1467 1477 __bool__ = __nonzero__
1468 1478
1469 1479 def __len__():
1470 1480 """Returns the number of changesets in the repo."""
1471 1481
1472 1482 def __iter__():
1473 1483 """Iterate over revisions in the changelog."""
1474 1484
1475 1485 def revs(expr, *args):
1476 1486 """Evaluate a revset.
1477 1487
1478 1488 Emits revisions.
1479 1489 """
1480 1490
1481 1491 def set(expr, *args):
1482 1492 """Evaluate a revset.
1483 1493
1484 1494 Emits changectx instances.
1485 1495 """
1486 1496
1487 1497 def anyrevs(specs, user=False, localalias=None):
1488 1498 """Find revisions matching one of the given revsets."""
1489 1499
1490 1500 def url():
1491 1501 """Returns a string representing the location of this repo."""
1492 1502
1493 1503 def hook(name, throw=False, **args):
1494 1504 """Call a hook."""
1495 1505
1496 1506 def tags():
1497 1507 """Return a mapping of tag to node."""
1498 1508
1499 1509 def tagtype(tagname):
1500 1510 """Return the type of a given tag."""
1501 1511
1502 1512 def tagslist():
1503 1513 """Return a list of tags ordered by revision."""
1504 1514
1505 1515 def nodetags(node):
1506 1516 """Return the tags associated with a node."""
1507 1517
1508 1518 def nodebookmarks(node):
1509 1519 """Return the list of bookmarks pointing to the specified node."""
1510 1520
1511 1521 def branchmap():
1512 1522 """Return a mapping of branch to heads in that branch."""
1513 1523
1514 1524 def revbranchcache():
1515 1525 pass
1516 1526
1517 1527 def branchtip(branchtip, ignoremissing=False):
1518 1528 """Return the tip node for a given branch."""
1519 1529
1520 1530 def lookup(key):
1521 1531 """Resolve the node for a revision."""
1522 1532
1523 1533 def lookupbranch(key):
1524 1534 """Look up the branch name of the given revision or branch name."""
1525 1535
1526 1536 def known(nodes):
1527 1537 """Determine whether a series of nodes is known.
1528 1538
1529 1539 Returns a list of bools.
1530 1540 """
1531 1541
1532 1542 def local():
1533 1543 """Whether the repository is local."""
1534 1544 return True
1535 1545
1536 1546 def publishing():
1537 1547 """Whether the repository is a publishing repository."""
1538 1548
1539 1549 def cancopy():
1540 1550 pass
1541 1551
1542 1552 def shared():
1543 1553 """The type of shared repository or None."""
1544 1554
1545 1555 def wjoin(f, *insidef):
1546 1556 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1547 1557
1548 1558 def setparents(p1, p2):
1549 1559 """Set the parent nodes of the working directory."""
1550 1560
1551 1561 def filectx(path, changeid=None, fileid=None):
1552 1562 """Obtain a filectx for the given file revision."""
1553 1563
1554 1564 def getcwd():
1555 1565 """Obtain the current working directory from the dirstate."""
1556 1566
1557 1567 def pathto(f, cwd=None):
1558 1568 """Obtain the relative path to a file."""
1559 1569
1560 1570 def adddatafilter(name, fltr):
1561 1571 pass
1562 1572
1563 1573 def wread(filename):
1564 1574 """Read a file from wvfs, using data filters."""
1565 1575
1566 1576 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1567 1577 """Write data to a file in the wvfs, using data filters."""
1568 1578
1569 1579 def wwritedata(filename, data):
1570 1580 """Resolve data for writing to the wvfs, using data filters."""
1571 1581
1572 1582 def currenttransaction():
1573 1583 """Obtain the current transaction instance or None."""
1574 1584
1575 1585 def transaction(desc, report=None):
1576 1586 """Open a new transaction to write to the repository."""
1577 1587
1578 1588 def undofiles():
1579 1589 """Returns a list of (vfs, path) for files to undo transactions."""
1580 1590
1581 1591 def recover():
1582 1592 """Roll back an interrupted transaction."""
1583 1593
1584 1594 def rollback(dryrun=False, force=False):
1585 1595 """Undo the last transaction.
1586 1596
1587 1597 DANGEROUS.
1588 1598 """
1589 1599
1590 1600 def updatecaches(tr=None, full=False):
1591 1601 """Warm repo caches."""
1592 1602
1593 1603 def invalidatecaches():
1594 1604 """Invalidate cached data due to the repository mutating."""
1595 1605
1596 1606 def invalidatevolatilesets():
1597 1607 pass
1598 1608
1599 1609 def invalidatedirstate():
1600 1610 """Invalidate the dirstate."""
1601 1611
1602 1612 def invalidate(clearfilecache=False):
1603 1613 pass
1604 1614
1605 1615 def invalidateall():
1606 1616 pass
1607 1617
1608 1618 def lock(wait=True):
1609 1619 """Lock the repository store and return a lock instance."""
1610 1620
1611 1621 def wlock(wait=True):
1612 1622 """Lock the non-store parts of the repository."""
1613 1623
1614 1624 def currentwlock():
1615 1625 """Return the wlock if it's held or None."""
1616 1626
1617 1627 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1618 1628 pass
1619 1629
1620 1630 def commit(text='', user=None, date=None, match=None, force=False,
1621 1631 editor=False, extra=None):
1622 1632 """Add a new revision to the repository."""
1623 1633
1624 1634 def commitctx(ctx, error=False):
1625 1635 """Commit a commitctx instance to the repository."""
1626 1636
1627 1637 def destroying():
1628 1638 """Inform the repository that nodes are about to be destroyed."""
1629 1639
1630 1640 def destroyed():
1631 1641 """Inform the repository that nodes have been destroyed."""
1632 1642
1633 1643 def status(node1='.', node2=None, match=None, ignored=False,
1634 1644 clean=False, unknown=False, listsubrepos=False):
1635 1645 """Convenience method to call repo[x].status()."""
1636 1646
1637 1647 def addpostdsstatus(ps):
1638 1648 pass
1639 1649
1640 1650 def postdsstatus():
1641 1651 pass
1642 1652
1643 1653 def clearpostdsstatus():
1644 1654 pass
1645 1655
1646 1656 def heads(start=None):
1647 1657 """Obtain list of nodes that are DAG heads."""
1648 1658
1649 1659 def branchheads(branch=None, start=None, closed=False):
1650 1660 pass
1651 1661
1652 1662 def branches(nodes):
1653 1663 pass
1654 1664
1655 1665 def between(pairs):
1656 1666 pass
1657 1667
1658 1668 def checkpush(pushop):
1659 1669 pass
1660 1670
1661 1671 prepushoutgoinghooks = interfaceutil.Attribute(
1662 1672 """util.hooks instance.""")
1663 1673
1664 1674 def pushkey(namespace, key, old, new):
1665 1675 pass
1666 1676
1667 1677 def listkeys(namespace):
1668 1678 pass
1669 1679
1670 1680 def debugwireargs(one, two, three=None, four=None, five=None):
1671 1681 pass
1672 1682
1673 1683 def savecommitmessage(text):
1674 1684 pass
1675 1685
1676 1686 class completelocalrepository(ilocalrepositorymain,
1677 1687 ilocalrepositoryfilestorage):
1678 1688 """Complete interface for a local repository."""
1679 1689
1680 1690 class iwireprotocolcommandcacher(interfaceutil.Interface):
1681 1691 """Represents a caching backend for wire protocol commands.
1682 1692
1683 1693 Wire protocol version 2 supports transparent caching of many commands.
1684 1694 To leverage this caching, servers can activate objects that cache
1685 1695 command responses. Objects handle both cache writing and reading.
1686 1696 This interface defines how that response caching mechanism works.
1687 1697
1688 1698 Wire protocol version 2 commands emit a series of objects that are
1689 1699 serialized and sent to the client. The caching layer exists between
1690 1700 the invocation of the command function and the sending of its output
1691 1701 objects to an output layer.
1692 1702
1693 1703 Instances of this interface represent a binding to a cache that
1694 1704 can serve a response (in place of calling a command function) and/or
1695 1705 write responses to a cache for subsequent use.
1696 1706
1697 1707 When a command request arrives, the following happens with regards
1698 1708 to this interface:
1699 1709
1700 1710 1. The server determines whether the command request is cacheable.
1701 1711 2. If it is, an instance of this interface is spawned.
1702 1712 3. The cacher is activated in a context manager (``__enter__`` is called).
1703 1713 4. A cache *key* for that request is derived. This will call the
1704 1714 instance's ``adjustcachekeystate()`` method so the derivation
1705 1715 can be influenced.
1706 1716 5. The cacher is informed of the derived cache key via a call to
1707 1717 ``setcachekey()``.
1708 1718 6. The cacher's ``lookup()`` method is called to test for presence of
1709 1719 the derived key in the cache.
1710 1720 7. If ``lookup()`` returns a hit, that cached result is used in place
1711 1721 of invoking the command function. ``__exit__`` is called and the instance
1712 1722 is discarded.
1713 1723 8. The command function is invoked.
1714 1724 9. ``onobject()`` is called for each object emitted by the command
1715 1725 function.
1716 1726 10. After the final object is seen, ``onfinished()`` is called.
1717 1727 11. ``__exit__`` is called to signal the end of use of the instance.
1718 1728
1719 1729 Cache *key* derivation can be influenced by the instance.
1720 1730
1721 1731 Cache keys are initially derived by a deterministic representation of
1722 1732 the command request. This includes the command name, arguments, protocol
1723 1733 version, etc. This initial key derivation is performed by CBOR-encoding a
1724 1734 data structure and feeding that output into a hasher.
1725 1735
1726 1736 Instances of this interface can influence this initial key derivation
1727 1737 via ``adjustcachekeystate()``.
1728 1738
1729 1739 The instance is informed of the derived cache key via a call to
1730 1740 ``setcachekey()``. The instance must store the key locally so it can
1731 1741 be consulted on subsequent operations that may require it.
1732 1742
1733 1743 When constructed, the instance has access to a callable that can be used
1734 1744 for encoding response objects. This callable receives as its single
1735 1745 argument an object emitted by a command function. It returns an iterable
1736 1746 of bytes chunks representing the encoded object. Unless the cacher is
1737 1747 caching native Python objects in memory or has a way of reconstructing
1738 1748 the original Python objects, implementations typically call this function
1739 1749 to produce bytes from the output objects and then store those bytes in
1740 1750 the cache. When it comes time to re-emit those bytes, they are wrapped
1741 1751 in a ``wireprototypes.encodedresponse`` instance to tell the output
1742 1752 layer that they are pre-encoded.
1743 1753
1744 1754 When receiving the objects emitted by the command function, instances
1745 1755 can choose what to do with those objects. The simplest thing to do is
1746 1756 re-emit the original objects. They will be forwarded to the output
1747 1757 layer and will be processed as if the cacher did not exist.
1748 1758
1749 1759 Implementations could also choose to not emit objects - instead locally
1750 1760 buffering objects or their encoded representation. They could then emit
1751 1761 a single "coalesced" object when ``onfinished()`` is called. In
1752 1762 this way, the implementation would function as a filtering layer of
1753 1763 sorts.
1754 1764
1755 1765 When caching objects, typically the encoded form of the object will
1756 1766 be stored. Keep in mind that if the original object is forwarded to
1757 1767 the output layer, it will need to be encoded there as well. For large
1758 1768 output, this redundant encoding could add overhead. Implementations
1759 1769 could wrap the encoded object data in ``wireprototypes.encodedresponse``
1760 1770 instances to avoid this overhead.
1761 1771 """
1762 1772 def __enter__():
1763 1773 """Marks the instance as active.
1764 1774
1765 1775 Should return self.
1766 1776 """
1767 1777
1768 1778 def __exit__(exctype, excvalue, exctb):
1769 1779 """Called when cacher is no longer used.
1770 1780
1771 1781 This can be used by implementations to perform cleanup actions (e.g.
1772 1782 disconnecting network sockets, aborting a partially cached response.
1773 1783 """
1774 1784
1775 1785 def adjustcachekeystate(state):
1776 1786 """Influences cache key derivation by adjusting state to derive key.
1777 1787
1778 1788 A dict defining the state used to derive the cache key is passed.
1779 1789
1780 1790 Implementations can modify this dict to record additional state that
1781 1791 is wanted to influence key derivation.
1782 1792
1783 1793 Implementations are *highly* encouraged to not modify or delete
1784 1794 existing keys.
1785 1795 """
1786 1796
1787 1797 def setcachekey(key):
1788 1798 """Record the derived cache key for this request.
1789 1799
1790 1800 Instances may mutate the key for internal usage, as desired. e.g.
1791 1801 instances may wish to prepend the repo name, introduce path
1792 1802 components for filesystem or URL addressing, etc. Behavior is up to
1793 1803 the cache.
1794 1804
1795 1805 Returns a bool indicating if the request is cacheable by this
1796 1806 instance.
1797 1807 """
1798 1808
1799 1809 def lookup():
1800 1810 """Attempt to resolve an entry in the cache.
1801 1811
1802 1812 The instance is instructed to look for the cache key that it was
1803 1813 informed about via the call to ``setcachekey()``.
1804 1814
1805 1815 If there's no cache hit or the cacher doesn't wish to use the cached
1806 1816 entry, ``None`` should be returned.
1807 1817
1808 1818 Else, a dict defining the cached result should be returned. The
1809 1819 dict may have the following keys:
1810 1820
1811 1821 objs
1812 1822 An iterable of objects that should be sent to the client. That
1813 1823 iterable of objects is expected to be what the command function
1814 1824 would return if invoked or an equivalent representation thereof.
1815 1825 """
1816 1826
1817 1827 def onobject(obj):
1818 1828 """Called when a new object is emitted from the command function.
1819 1829
1820 1830 Receives as its argument the object that was emitted from the
1821 1831 command function.
1822 1832
1823 1833 This method returns an iterator of objects to forward to the output
1824 1834 layer. The easiest implementation is a generator that just
1825 1835 ``yield obj``.
1826 1836 """
1827 1837
1828 1838 def onfinished():
1829 1839 """Called after all objects have been emitted from the command function.
1830 1840
1831 1841 Implementations should return an iterator of objects to forward to
1832 1842 the output layer.
1833 1843
1834 1844 This method can be a generator.
1835 1845 """
@@ -1,1331 +1,1345
1 1 # storage.py - Testing of storage primitives.
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import unittest
11 11
12 12 from ..node import (
13 13 hex,
14 14 nullid,
15 15 nullrev,
16 16 )
17 17 from .. import (
18 18 error,
19 19 mdiff,
20 20 repository,
21 21 )
22 22 from ..utils import (
23 23 storageutil,
24 24 )
25 25
26 26 class basetestcase(unittest.TestCase):
27 27 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
28 28 assertRaisesRegex = (# camelcase-required
29 29 unittest.TestCase.assertRaisesRegexp)
30 30
31 31 class ifileindextests(basetestcase):
32 32 """Generic tests for the ifileindex interface.
33 33
34 34 All file storage backends for index data should conform to the tests in this
35 35 class.
36 36
37 37 Use ``makeifileindextests()`` to create an instance of this type.
38 38 """
39 39 def testempty(self):
40 40 f = self._makefilefn()
41 41 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
42 42 self.assertEqual(list(f), [], 'iter yields nothing by default')
43 43
44 44 gen = iter(f)
45 45 with self.assertRaises(StopIteration):
46 46 next(gen)
47 47
48 self.assertFalse(f.hasnode(None))
49 self.assertFalse(f.hasnode(0))
50 self.assertFalse(f.hasnode(nullrev))
51 self.assertFalse(f.hasnode(nullid))
52 self.assertFalse(f.hasnode(b'0'))
53 self.assertFalse(f.hasnode(b'a' * 20))
54
48 55 # revs() should evaluate to an empty list.
49 56 self.assertEqual(list(f.revs()), [])
50 57
51 58 revs = iter(f.revs())
52 59 with self.assertRaises(StopIteration):
53 60 next(revs)
54 61
55 62 self.assertEqual(list(f.revs(start=20)), [])
56 63
57 64 # parents() and parentrevs() work with nullid/nullrev.
58 65 self.assertEqual(f.parents(nullid), (nullid, nullid))
59 66 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
60 67
61 68 with self.assertRaises(error.LookupError):
62 69 f.parents(b'\x01' * 20)
63 70
64 71 for i in range(-5, 5):
65 72 if i == nullrev:
66 73 continue
67 74
68 75 with self.assertRaises(IndexError):
69 76 f.parentrevs(i)
70 77
71 78 # nullid/nullrev lookup always works.
72 79 self.assertEqual(f.rev(nullid), nullrev)
73 80 self.assertEqual(f.node(nullrev), nullid)
74 81
75 82 with self.assertRaises(error.LookupError):
76 83 f.rev(b'\x01' * 20)
77 84
78 85 for i in range(-5, 5):
79 86 if i == nullrev:
80 87 continue
81 88
82 89 with self.assertRaises(IndexError):
83 90 f.node(i)
84 91
85 92 self.assertEqual(f.lookup(nullid), nullid)
86 93 self.assertEqual(f.lookup(nullrev), nullid)
87 94 self.assertEqual(f.lookup(hex(nullid)), nullid)
88 95 self.assertEqual(f.lookup(b'%d' % nullrev), nullid)
89 96
90 97 with self.assertRaises(error.LookupError):
91 98 f.lookup(b'badvalue')
92 99
93 100 with self.assertRaises(error.LookupError):
94 101 f.lookup(hex(nullid)[0:12])
95 102
96 103 with self.assertRaises(error.LookupError):
97 104 f.lookup(b'-2')
98 105
99 106 with self.assertRaises(error.LookupError):
100 107 f.lookup(b'0')
101 108
102 109 with self.assertRaises(error.LookupError):
103 110 f.lookup(b'1')
104 111
105 112 with self.assertRaises(error.LookupError):
106 113 f.lookup(b'11111111111111111111111111111111111111')
107 114
108 115 for i in range(-5, 5):
109 116 if i == nullrev:
110 117 continue
111 118
112 119 with self.assertRaises(LookupError):
113 120 f.lookup(i)
114 121
115 122 self.assertEqual(f.linkrev(nullrev), nullrev)
116 123
117 124 for i in range(-5, 5):
118 125 if i == nullrev:
119 126 continue
120 127
121 128 with self.assertRaises(IndexError):
122 129 f.linkrev(i)
123 130
124 131 self.assertFalse(f.iscensored(nullrev))
125 132
126 133 for i in range(-5, 5):
127 134 if i == nullrev:
128 135 continue
129 136
130 137 with self.assertRaises(IndexError):
131 138 f.iscensored(i)
132 139
133 140 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
134 141
135 142 with self.assertRaises(ValueError):
136 143 self.assertEqual(list(f.descendants([])), [])
137 144
138 145 self.assertEqual(list(f.descendants([nullrev])), [])
139 146
140 147 self.assertEqual(f.heads(), [nullid])
141 148 self.assertEqual(f.heads(nullid), [nullid])
142 149 self.assertEqual(f.heads(None, [nullid]), [nullid])
143 150 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
144 151
145 152 self.assertEqual(f.children(nullid), [])
146 153
147 154 with self.assertRaises(error.LookupError):
148 155 f.children(b'\x01' * 20)
149 156
150 157 def testsinglerevision(self):
151 158 f = self._makefilefn()
152 159 with self._maketransactionfn() as tr:
153 160 node = f.add(b'initial', None, tr, 0, nullid, nullid)
154 161
155 162 self.assertEqual(len(f), 1)
156 163 self.assertEqual(list(f), [0])
157 164
158 165 gen = iter(f)
159 166 self.assertEqual(next(gen), 0)
160 167
161 168 with self.assertRaises(StopIteration):
162 169 next(gen)
163 170
171 self.assertTrue(f.hasnode(node))
172 self.assertFalse(f.hasnode(hex(node)))
173 self.assertFalse(f.hasnode(nullrev))
174 self.assertFalse(f.hasnode(nullid))
175 self.assertFalse(f.hasnode(node[0:12]))
176 self.assertFalse(f.hasnode(hex(node)[0:20]))
177
164 178 self.assertEqual(list(f.revs()), [0])
165 179 self.assertEqual(list(f.revs(start=1)), [])
166 180 self.assertEqual(list(f.revs(start=0)), [0])
167 181 self.assertEqual(list(f.revs(stop=0)), [0])
168 182 self.assertEqual(list(f.revs(stop=1)), [0])
169 183 self.assertEqual(list(f.revs(1, 1)), [])
170 184 # TODO buggy
171 185 self.assertEqual(list(f.revs(1, 0)), [1, 0])
172 186 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
173 187
174 188 self.assertEqual(f.parents(node), (nullid, nullid))
175 189 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
176 190
177 191 with self.assertRaises(error.LookupError):
178 192 f.parents(b'\x01' * 20)
179 193
180 194 with self.assertRaises(IndexError):
181 195 f.parentrevs(1)
182 196
183 197 self.assertEqual(f.rev(node), 0)
184 198
185 199 with self.assertRaises(error.LookupError):
186 200 f.rev(b'\x01' * 20)
187 201
188 202 self.assertEqual(f.node(0), node)
189 203
190 204 with self.assertRaises(IndexError):
191 205 f.node(1)
192 206
193 207 self.assertEqual(f.lookup(node), node)
194 208 self.assertEqual(f.lookup(0), node)
195 209 self.assertEqual(f.lookup(-1), nullid)
196 210 self.assertEqual(f.lookup(b'0'), node)
197 211 self.assertEqual(f.lookup(hex(node)), node)
198 212
199 213 with self.assertRaises(error.LookupError):
200 214 f.lookup(hex(node)[0:12])
201 215
202 216 with self.assertRaises(error.LookupError):
203 217 f.lookup(-2)
204 218
205 219 with self.assertRaises(error.LookupError):
206 220 f.lookup(b'-2')
207 221
208 222 with self.assertRaises(error.LookupError):
209 223 f.lookup(1)
210 224
211 225 with self.assertRaises(error.LookupError):
212 226 f.lookup(b'1')
213 227
214 228 self.assertEqual(f.linkrev(0), 0)
215 229
216 230 with self.assertRaises(IndexError):
217 231 f.linkrev(1)
218 232
219 233 self.assertFalse(f.iscensored(0))
220 234
221 235 with self.assertRaises(IndexError):
222 236 f.iscensored(1)
223 237
224 238 self.assertEqual(list(f.descendants([0])), [])
225 239
226 240 self.assertEqual(f.heads(), [node])
227 241 self.assertEqual(f.heads(node), [node])
228 242 self.assertEqual(f.heads(stop=[node]), [node])
229 243
230 244 with self.assertRaises(error.LookupError):
231 245 f.heads(stop=[b'\x01' * 20])
232 246
233 247 self.assertEqual(f.children(node), [])
234 248
235 249 def testmultiplerevisions(self):
236 250 fulltext0 = b'x' * 1024
237 251 fulltext1 = fulltext0 + b'y'
238 252 fulltext2 = b'y' + fulltext0 + b'z'
239 253
240 254 f = self._makefilefn()
241 255 with self._maketransactionfn() as tr:
242 256 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
243 257 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
244 258 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
245 259
246 260 self.assertEqual(len(f), 3)
247 261 self.assertEqual(list(f), [0, 1, 2])
248 262
249 263 gen = iter(f)
250 264 self.assertEqual(next(gen), 0)
251 265 self.assertEqual(next(gen), 1)
252 266 self.assertEqual(next(gen), 2)
253 267
254 268 with self.assertRaises(StopIteration):
255 269 next(gen)
256 270
257 271 self.assertEqual(list(f.revs()), [0, 1, 2])
258 272 self.assertEqual(list(f.revs(0)), [0, 1, 2])
259 273 self.assertEqual(list(f.revs(1)), [1, 2])
260 274 self.assertEqual(list(f.revs(2)), [2])
261 275 self.assertEqual(list(f.revs(3)), [])
262 276 self.assertEqual(list(f.revs(stop=1)), [0, 1])
263 277 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
264 278 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
265 279 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
266 280 self.assertEqual(list(f.revs(2, 1)), [2, 1])
267 281 # TODO this is wrong
268 282 self.assertEqual(list(f.revs(3, 2)), [3, 2])
269 283
270 284 self.assertEqual(f.parents(node0), (nullid, nullid))
271 285 self.assertEqual(f.parents(node1), (node0, nullid))
272 286 self.assertEqual(f.parents(node2), (node1, nullid))
273 287
274 288 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
275 289 self.assertEqual(f.parentrevs(1), (0, nullrev))
276 290 self.assertEqual(f.parentrevs(2), (1, nullrev))
277 291
278 292 self.assertEqual(f.rev(node0), 0)
279 293 self.assertEqual(f.rev(node1), 1)
280 294 self.assertEqual(f.rev(node2), 2)
281 295
282 296 with self.assertRaises(error.LookupError):
283 297 f.rev(b'\x01' * 20)
284 298
285 299 self.assertEqual(f.node(0), node0)
286 300 self.assertEqual(f.node(1), node1)
287 301 self.assertEqual(f.node(2), node2)
288 302
289 303 with self.assertRaises(IndexError):
290 304 f.node(3)
291 305
292 306 self.assertEqual(f.lookup(node0), node0)
293 307 self.assertEqual(f.lookup(0), node0)
294 308 self.assertEqual(f.lookup(b'0'), node0)
295 309 self.assertEqual(f.lookup(hex(node0)), node0)
296 310
297 311 self.assertEqual(f.lookup(node1), node1)
298 312 self.assertEqual(f.lookup(1), node1)
299 313 self.assertEqual(f.lookup(b'1'), node1)
300 314 self.assertEqual(f.lookup(hex(node1)), node1)
301 315
302 316 self.assertEqual(f.linkrev(0), 0)
303 317 self.assertEqual(f.linkrev(1), 1)
304 318 self.assertEqual(f.linkrev(2), 3)
305 319
306 320 with self.assertRaises(IndexError):
307 321 f.linkrev(3)
308 322
309 323 self.assertFalse(f.iscensored(0))
310 324 self.assertFalse(f.iscensored(1))
311 325 self.assertFalse(f.iscensored(2))
312 326
313 327 with self.assertRaises(IndexError):
314 328 f.iscensored(3)
315 329
316 330 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
317 331 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
318 332 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
319 333 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
320 334 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
321 335 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
322 336
323 337 self.assertEqual(list(f.descendants([0])), [1, 2])
324 338 self.assertEqual(list(f.descendants([1])), [2])
325 339 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
326 340
327 341 self.assertEqual(f.heads(), [node2])
328 342 self.assertEqual(f.heads(node0), [node2])
329 343 self.assertEqual(f.heads(node1), [node2])
330 344 self.assertEqual(f.heads(node2), [node2])
331 345
332 346 # TODO this behavior seems wonky. Is it correct? If so, the
333 347 # docstring for heads() should be updated to reflect desired
334 348 # behavior.
335 349 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
336 350 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
337 351 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
338 352
339 353 with self.assertRaises(error.LookupError):
340 354 f.heads(stop=[b'\x01' * 20])
341 355
342 356 self.assertEqual(f.children(node0), [node1])
343 357 self.assertEqual(f.children(node1), [node2])
344 358 self.assertEqual(f.children(node2), [])
345 359
346 360 def testmultipleheads(self):
347 361 f = self._makefilefn()
348 362
349 363 with self._maketransactionfn() as tr:
350 364 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
351 365 node1 = f.add(b'1', None, tr, 1, node0, nullid)
352 366 node2 = f.add(b'2', None, tr, 2, node1, nullid)
353 367 node3 = f.add(b'3', None, tr, 3, node0, nullid)
354 368 node4 = f.add(b'4', None, tr, 4, node3, nullid)
355 369 node5 = f.add(b'5', None, tr, 5, node0, nullid)
356 370
357 371 self.assertEqual(len(f), 6)
358 372
359 373 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
360 374 self.assertEqual(list(f.descendants([1])), [2])
361 375 self.assertEqual(list(f.descendants([2])), [])
362 376 self.assertEqual(list(f.descendants([3])), [4])
363 377 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
364 378 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
365 379
366 380 self.assertEqual(f.heads(), [node2, node4, node5])
367 381 self.assertEqual(f.heads(node0), [node2, node4, node5])
368 382 self.assertEqual(f.heads(node1), [node2])
369 383 self.assertEqual(f.heads(node2), [node2])
370 384 self.assertEqual(f.heads(node3), [node4])
371 385 self.assertEqual(f.heads(node4), [node4])
372 386 self.assertEqual(f.heads(node5), [node5])
373 387
374 388 # TODO this seems wrong.
375 389 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
376 390 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
377 391
378 392 self.assertEqual(f.children(node0), [node1, node3, node5])
379 393 self.assertEqual(f.children(node1), [node2])
380 394 self.assertEqual(f.children(node2), [])
381 395 self.assertEqual(f.children(node3), [node4])
382 396 self.assertEqual(f.children(node4), [])
383 397 self.assertEqual(f.children(node5), [])
384 398
385 399 class ifiledatatests(basetestcase):
386 400 """Generic tests for the ifiledata interface.
387 401
388 402 All file storage backends for data should conform to the tests in this
389 403 class.
390 404
391 405 Use ``makeifiledatatests()`` to create an instance of this type.
392 406 """
393 407 def testempty(self):
394 408 f = self._makefilefn()
395 409
396 410 self.assertEqual(f.storageinfo(), {})
397 411 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
398 412 {'revisionscount': 0, 'trackedsize': 0})
399 413
400 414 self.assertEqual(f.size(nullrev), 0)
401 415
402 416 for i in range(-5, 5):
403 417 if i == nullrev:
404 418 continue
405 419
406 420 with self.assertRaises(IndexError):
407 421 f.size(i)
408 422
409 423 self.assertEqual(f.revision(nullid), b'')
410 424 self.assertEqual(f.revision(nullid, raw=True), b'')
411 425
412 426 with self.assertRaises(error.LookupError):
413 427 f.revision(b'\x01' * 20)
414 428
415 429 self.assertEqual(f.read(nullid), b'')
416 430
417 431 with self.assertRaises(error.LookupError):
418 432 f.read(b'\x01' * 20)
419 433
420 434 self.assertFalse(f.renamed(nullid))
421 435
422 436 with self.assertRaises(error.LookupError):
423 437 f.read(b'\x01' * 20)
424 438
425 439 self.assertTrue(f.cmp(nullid, b''))
426 440 self.assertTrue(f.cmp(nullid, b'foo'))
427 441
428 442 with self.assertRaises(error.LookupError):
429 443 f.cmp(b'\x01' * 20, b'irrelevant')
430 444
431 445 # Emitting empty list is an empty generator.
432 446 gen = f.emitrevisions([])
433 447 with self.assertRaises(StopIteration):
434 448 next(gen)
435 449
436 450 # Emitting null node yields nothing.
437 451 gen = f.emitrevisions([nullid])
438 452 with self.assertRaises(StopIteration):
439 453 next(gen)
440 454
441 455 # Requesting unknown node fails.
442 456 with self.assertRaises(error.LookupError):
443 457 list(f.emitrevisions([b'\x01' * 20]))
444 458
445 459 def testsinglerevision(self):
446 460 fulltext = b'initial'
447 461
448 462 f = self._makefilefn()
449 463 with self._maketransactionfn() as tr:
450 464 node = f.add(fulltext, None, tr, 0, nullid, nullid)
451 465
452 466 self.assertEqual(f.storageinfo(), {})
453 467 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
454 468 {'revisionscount': 1, 'trackedsize': len(fulltext)})
455 469
456 470 self.assertEqual(f.size(0), len(fulltext))
457 471
458 472 with self.assertRaises(IndexError):
459 473 f.size(1)
460 474
461 475 self.assertEqual(f.revision(node), fulltext)
462 476 self.assertEqual(f.revision(node, raw=True), fulltext)
463 477
464 478 self.assertEqual(f.read(node), fulltext)
465 479
466 480 self.assertFalse(f.renamed(node))
467 481
468 482 self.assertFalse(f.cmp(node, fulltext))
469 483 self.assertTrue(f.cmp(node, fulltext + b'extra'))
470 484
471 485 # Emitting a single revision works.
472 486 gen = f.emitrevisions([node])
473 487 rev = next(gen)
474 488
475 489 self.assertEqual(rev.node, node)
476 490 self.assertEqual(rev.p1node, nullid)
477 491 self.assertEqual(rev.p2node, nullid)
478 492 self.assertIsNone(rev.linknode)
479 493 self.assertEqual(rev.basenode, nullid)
480 494 self.assertIsNone(rev.baserevisionsize)
481 495 self.assertIsNone(rev.revision)
482 496 self.assertIsNone(rev.delta)
483 497
484 498 with self.assertRaises(StopIteration):
485 499 next(gen)
486 500
487 501 # Requesting revision data works.
488 502 gen = f.emitrevisions([node], revisiondata=True)
489 503 rev = next(gen)
490 504
491 505 self.assertEqual(rev.node, node)
492 506 self.assertEqual(rev.p1node, nullid)
493 507 self.assertEqual(rev.p2node, nullid)
494 508 self.assertIsNone(rev.linknode)
495 509 self.assertEqual(rev.basenode, nullid)
496 510 self.assertIsNone(rev.baserevisionsize)
497 511 self.assertEqual(rev.revision, fulltext)
498 512 self.assertIsNone(rev.delta)
499 513
500 514 with self.assertRaises(StopIteration):
501 515 next(gen)
502 516
503 517 # Emitting an unknown node after a known revision results in error.
504 518 with self.assertRaises(error.LookupError):
505 519 list(f.emitrevisions([node, b'\x01' * 20]))
506 520
507 521 def testmultiplerevisions(self):
508 522 fulltext0 = b'x' * 1024
509 523 fulltext1 = fulltext0 + b'y'
510 524 fulltext2 = b'y' + fulltext0 + b'z'
511 525
512 526 f = self._makefilefn()
513 527 with self._maketransactionfn() as tr:
514 528 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
515 529 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
516 530 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
517 531
518 532 self.assertEqual(f.storageinfo(), {})
519 533 self.assertEqual(
520 534 f.storageinfo(revisionscount=True, trackedsize=True),
521 535 {
522 536 'revisionscount': 3,
523 537 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
524 538 })
525 539
526 540 self.assertEqual(f.size(0), len(fulltext0))
527 541 self.assertEqual(f.size(1), len(fulltext1))
528 542 self.assertEqual(f.size(2), len(fulltext2))
529 543
530 544 with self.assertRaises(IndexError):
531 545 f.size(3)
532 546
533 547 self.assertEqual(f.revision(node0), fulltext0)
534 548 self.assertEqual(f.revision(node0, raw=True), fulltext0)
535 549 self.assertEqual(f.revision(node1), fulltext1)
536 550 self.assertEqual(f.revision(node1, raw=True), fulltext1)
537 551 self.assertEqual(f.revision(node2), fulltext2)
538 552 self.assertEqual(f.revision(node2, raw=True), fulltext2)
539 553
540 554 with self.assertRaises(error.LookupError):
541 555 f.revision(b'\x01' * 20)
542 556
543 557 self.assertEqual(f.read(node0), fulltext0)
544 558 self.assertEqual(f.read(node1), fulltext1)
545 559 self.assertEqual(f.read(node2), fulltext2)
546 560
547 561 with self.assertRaises(error.LookupError):
548 562 f.read(b'\x01' * 20)
549 563
550 564 self.assertFalse(f.renamed(node0))
551 565 self.assertFalse(f.renamed(node1))
552 566 self.assertFalse(f.renamed(node2))
553 567
554 568 with self.assertRaises(error.LookupError):
555 569 f.renamed(b'\x01' * 20)
556 570
557 571 self.assertFalse(f.cmp(node0, fulltext0))
558 572 self.assertFalse(f.cmp(node1, fulltext1))
559 573 self.assertFalse(f.cmp(node2, fulltext2))
560 574
561 575 self.assertTrue(f.cmp(node1, fulltext0))
562 576 self.assertTrue(f.cmp(node2, fulltext1))
563 577
564 578 with self.assertRaises(error.LookupError):
565 579 f.cmp(b'\x01' * 20, b'irrelevant')
566 580
567 581 # Nodes should be emitted in order.
568 582 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
569 583
570 584 rev = next(gen)
571 585
572 586 self.assertEqual(rev.node, node0)
573 587 self.assertEqual(rev.p1node, nullid)
574 588 self.assertEqual(rev.p2node, nullid)
575 589 self.assertIsNone(rev.linknode)
576 590 self.assertEqual(rev.basenode, nullid)
577 591 self.assertIsNone(rev.baserevisionsize)
578 592 self.assertEqual(rev.revision, fulltext0)
579 593 self.assertIsNone(rev.delta)
580 594
581 595 rev = next(gen)
582 596
583 597 self.assertEqual(rev.node, node1)
584 598 self.assertEqual(rev.p1node, node0)
585 599 self.assertEqual(rev.p2node, nullid)
586 600 self.assertIsNone(rev.linknode)
587 601 self.assertEqual(rev.basenode, node0)
588 602 self.assertIsNone(rev.baserevisionsize)
589 603 self.assertIsNone(rev.revision)
590 604 self.assertEqual(rev.delta,
591 605 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
592 606 fulltext1)
593 607
594 608 rev = next(gen)
595 609
596 610 self.assertEqual(rev.node, node2)
597 611 self.assertEqual(rev.p1node, node1)
598 612 self.assertEqual(rev.p2node, nullid)
599 613 self.assertIsNone(rev.linknode)
600 614 self.assertEqual(rev.basenode, node1)
601 615 self.assertIsNone(rev.baserevisionsize)
602 616 self.assertIsNone(rev.revision)
603 617 self.assertEqual(rev.delta,
604 618 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
605 619 fulltext2)
606 620
607 621 with self.assertRaises(StopIteration):
608 622 next(gen)
609 623
610 624 # Request not in DAG order is reordered to be in DAG order.
611 625 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
612 626
613 627 rev = next(gen)
614 628
615 629 self.assertEqual(rev.node, node0)
616 630 self.assertEqual(rev.p1node, nullid)
617 631 self.assertEqual(rev.p2node, nullid)
618 632 self.assertIsNone(rev.linknode)
619 633 self.assertEqual(rev.basenode, nullid)
620 634 self.assertIsNone(rev.baserevisionsize)
621 635 self.assertEqual(rev.revision, fulltext0)
622 636 self.assertIsNone(rev.delta)
623 637
624 638 rev = next(gen)
625 639
626 640 self.assertEqual(rev.node, node1)
627 641 self.assertEqual(rev.p1node, node0)
628 642 self.assertEqual(rev.p2node, nullid)
629 643 self.assertIsNone(rev.linknode)
630 644 self.assertEqual(rev.basenode, node0)
631 645 self.assertIsNone(rev.baserevisionsize)
632 646 self.assertIsNone(rev.revision)
633 647 self.assertEqual(rev.delta,
634 648 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
635 649 fulltext1)
636 650
637 651 rev = next(gen)
638 652
639 653 self.assertEqual(rev.node, node2)
640 654 self.assertEqual(rev.p1node, node1)
641 655 self.assertEqual(rev.p2node, nullid)
642 656 self.assertIsNone(rev.linknode)
643 657 self.assertEqual(rev.basenode, node1)
644 658 self.assertIsNone(rev.baserevisionsize)
645 659 self.assertIsNone(rev.revision)
646 660 self.assertEqual(rev.delta,
647 661 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
648 662 fulltext2)
649 663
650 664 with self.assertRaises(StopIteration):
651 665 next(gen)
652 666
653 667 # Unrecognized nodesorder value raises ProgrammingError.
654 668 with self.assertRaises(error.ProgrammingError):
655 669 list(f.emitrevisions([], nodesorder='bad'))
656 670
657 671 # nodesorder=storage is recognized. But we can't test it thoroughly
658 672 # because behavior is storage-dependent.
659 673 res = list(f.emitrevisions([node2, node1, node0],
660 674 nodesorder='storage'))
661 675 self.assertEqual(len(res), 3)
662 676 self.assertEqual({o.node for o in res}, {node0, node1, node2})
663 677
664 678 # nodesorder=nodes forces the order.
665 679 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
666 680 revisiondata=True)
667 681
668 682 rev = next(gen)
669 683 self.assertEqual(rev.node, node2)
670 684 self.assertEqual(rev.p1node, node1)
671 685 self.assertEqual(rev.p2node, nullid)
672 686 self.assertEqual(rev.basenode, nullid)
673 687 self.assertIsNone(rev.baserevisionsize)
674 688 self.assertEqual(rev.revision, fulltext2)
675 689 self.assertIsNone(rev.delta)
676 690
677 691 rev = next(gen)
678 692 self.assertEqual(rev.node, node0)
679 693 self.assertEqual(rev.p1node, nullid)
680 694 self.assertEqual(rev.p2node, nullid)
681 695 # Delta behavior is storage dependent, so we can't easily test it.
682 696
683 697 with self.assertRaises(StopIteration):
684 698 next(gen)
685 699
686 700 # assumehaveparentrevisions=False (the default) won't send a delta for
687 701 # the first revision.
688 702 gen = f.emitrevisions({node2, node1}, revisiondata=True)
689 703
690 704 rev = next(gen)
691 705 self.assertEqual(rev.node, node1)
692 706 self.assertEqual(rev.p1node, node0)
693 707 self.assertEqual(rev.p2node, nullid)
694 708 self.assertEqual(rev.basenode, nullid)
695 709 self.assertIsNone(rev.baserevisionsize)
696 710 self.assertEqual(rev.revision, fulltext1)
697 711 self.assertIsNone(rev.delta)
698 712
699 713 rev = next(gen)
700 714 self.assertEqual(rev.node, node2)
701 715 self.assertEqual(rev.p1node, node1)
702 716 self.assertEqual(rev.p2node, nullid)
703 717 self.assertEqual(rev.basenode, node1)
704 718 self.assertIsNone(rev.baserevisionsize)
705 719 self.assertIsNone(rev.revision)
706 720 self.assertEqual(rev.delta,
707 721 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
708 722 fulltext2)
709 723
710 724 with self.assertRaises(StopIteration):
711 725 next(gen)
712 726
713 727 # assumehaveparentrevisions=True allows delta against initial revision.
714 728 gen = f.emitrevisions([node2, node1],
715 729 revisiondata=True, assumehaveparentrevisions=True)
716 730
717 731 rev = next(gen)
718 732 self.assertEqual(rev.node, node1)
719 733 self.assertEqual(rev.p1node, node0)
720 734 self.assertEqual(rev.p2node, nullid)
721 735 self.assertEqual(rev.basenode, node0)
722 736 self.assertIsNone(rev.baserevisionsize)
723 737 self.assertIsNone(rev.revision)
724 738 self.assertEqual(rev.delta,
725 739 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
726 740 fulltext1)
727 741
728 742 # forceprevious=True forces a delta against the previous revision.
729 743 # Special case for initial revision.
730 744 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
731 745
732 746 rev = next(gen)
733 747 self.assertEqual(rev.node, node0)
734 748 self.assertEqual(rev.p1node, nullid)
735 749 self.assertEqual(rev.p2node, nullid)
736 750 self.assertEqual(rev.basenode, nullid)
737 751 self.assertIsNone(rev.baserevisionsize)
738 752 self.assertIsNone(rev.revision)
739 753 self.assertEqual(rev.delta,
740 754 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
741 755 fulltext0)
742 756
743 757 with self.assertRaises(StopIteration):
744 758 next(gen)
745 759
746 760 gen = f.emitrevisions([node0, node2], revisiondata=True,
747 761 deltaprevious=True)
748 762
749 763 rev = next(gen)
750 764 self.assertEqual(rev.node, node0)
751 765 self.assertEqual(rev.p1node, nullid)
752 766 self.assertEqual(rev.p2node, nullid)
753 767 self.assertEqual(rev.basenode, nullid)
754 768 self.assertIsNone(rev.baserevisionsize)
755 769 self.assertIsNone(rev.revision)
756 770 self.assertEqual(rev.delta,
757 771 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
758 772 fulltext0)
759 773
760 774 rev = next(gen)
761 775 self.assertEqual(rev.node, node2)
762 776 self.assertEqual(rev.p1node, node1)
763 777 self.assertEqual(rev.p2node, nullid)
764 778 self.assertEqual(rev.basenode, node0)
765 779
766 780 with self.assertRaises(StopIteration):
767 781 next(gen)
768 782
769 783 def testrenamed(self):
770 784 fulltext0 = b'foo'
771 785 fulltext1 = b'bar'
772 786 fulltext2 = b'baz'
773 787
774 788 meta1 = {
775 789 b'copy': b'source0',
776 790 b'copyrev': b'a' * 40,
777 791 }
778 792
779 793 meta2 = {
780 794 b'copy': b'source1',
781 795 b'copyrev': b'b' * 40,
782 796 }
783 797
784 798 stored1 = b''.join([
785 799 b'\x01\ncopy: source0\n',
786 800 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
787 801 fulltext1,
788 802 ])
789 803
790 804 stored2 = b''.join([
791 805 b'\x01\ncopy: source1\n',
792 806 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
793 807 fulltext2,
794 808 ])
795 809
796 810 f = self._makefilefn()
797 811 with self._maketransactionfn() as tr:
798 812 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
799 813 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
800 814 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
801 815
802 816 # Metadata header isn't recognized when parent isn't nullid.
803 817 self.assertEqual(f.size(1), len(stored1))
804 818 self.assertEqual(f.size(2), len(fulltext2))
805 819
806 820 self.assertEqual(f.revision(node1), stored1)
807 821 self.assertEqual(f.revision(node1, raw=True), stored1)
808 822 self.assertEqual(f.revision(node2), stored2)
809 823 self.assertEqual(f.revision(node2, raw=True), stored2)
810 824
811 825 self.assertEqual(f.read(node1), fulltext1)
812 826 self.assertEqual(f.read(node2), fulltext2)
813 827
814 828 # Returns False when first parent is set.
815 829 self.assertFalse(f.renamed(node1))
816 830 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
817 831
818 832 self.assertTrue(f.cmp(node1, fulltext1))
819 833 self.assertTrue(f.cmp(node1, stored1))
820 834 self.assertFalse(f.cmp(node2, fulltext2))
821 835 self.assertTrue(f.cmp(node2, stored2))
822 836
823 837 def testmetadataprefix(self):
824 838 # Content with metadata prefix has extra prefix inserted in storage.
825 839 fulltext0 = b'\x01\nfoo'
826 840 stored0 = b'\x01\n\x01\n\x01\nfoo'
827 841
828 842 fulltext1 = b'\x01\nbar'
829 843 meta1 = {
830 844 b'copy': b'source0',
831 845 b'copyrev': b'b' * 40,
832 846 }
833 847 stored1 = b''.join([
834 848 b'\x01\ncopy: source0\n',
835 849 b'copyrev: %s\n' % (b'b' * 40),
836 850 b'\x01\n\x01\nbar',
837 851 ])
838 852
839 853 f = self._makefilefn()
840 854 with self._maketransactionfn() as tr:
841 855 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
842 856 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
843 857
844 858 # TODO this is buggy.
845 859 self.assertEqual(f.size(0), len(fulltext0) + 4)
846 860
847 861 self.assertEqual(f.size(1), len(fulltext1))
848 862
849 863 self.assertEqual(f.revision(node0), stored0)
850 864 self.assertEqual(f.revision(node0, raw=True), stored0)
851 865
852 866 self.assertEqual(f.revision(node1), stored1)
853 867 self.assertEqual(f.revision(node1, raw=True), stored1)
854 868
855 869 self.assertEqual(f.read(node0), fulltext0)
856 870 self.assertEqual(f.read(node1), fulltext1)
857 871
858 872 self.assertFalse(f.cmp(node0, fulltext0))
859 873 self.assertTrue(f.cmp(node0, stored0))
860 874
861 875 self.assertFalse(f.cmp(node1, fulltext1))
862 876 self.assertTrue(f.cmp(node1, stored0))
863 877
864 878 def testbadnoderead(self):
865 879 f = self._makefilefn()
866 880
867 881 fulltext0 = b'foo\n' * 30
868 882 fulltext1 = fulltext0 + b'bar\n'
869 883
870 884 with self._maketransactionfn() as tr:
871 885 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
872 886 node1 = b'\xaa' * 20
873 887
874 888 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
875 889 rawtext=fulltext1)
876 890
877 891 self.assertEqual(len(f), 2)
878 892 self.assertEqual(f.parents(node1), (node0, nullid))
879 893
880 894 # revision() raises since it performs hash verification.
881 895 with self.assertRaises(error.StorageError):
882 896 f.revision(node1)
883 897
884 898 # raw=True still verifies because there are no special storage
885 899 # settings.
886 900 with self.assertRaises(error.StorageError):
887 901 f.revision(node1, raw=True)
888 902
889 903 # read() behaves like revision().
890 904 with self.assertRaises(error.StorageError):
891 905 f.read(node1)
892 906
893 907 # We can't test renamed() here because some backends may not require
894 908 # reading/validating the fulltext to return rename metadata.
895 909
896 910 def testbadnoderevisionraw(self):
897 911 # Like above except we test revision(raw=True) first to isolate
898 912 # revision caching behavior.
899 913 f = self._makefilefn()
900 914
901 915 fulltext0 = b'foo\n' * 30
902 916 fulltext1 = fulltext0 + b'bar\n'
903 917
904 918 with self._maketransactionfn() as tr:
905 919 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
906 920 node1 = b'\xaa' * 20
907 921
908 922 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
909 923 rawtext=fulltext1)
910 924
911 925 with self.assertRaises(error.StorageError):
912 926 f.revision(node1, raw=True)
913 927
914 928 with self.assertRaises(error.StorageError):
915 929 f.revision(node1, raw=True)
916 930
917 931 def testbadnoderevisionraw(self):
918 932 # Like above except we test read() first to isolate revision caching
919 933 # behavior.
920 934 f = self._makefilefn()
921 935
922 936 fulltext0 = b'foo\n' * 30
923 937 fulltext1 = fulltext0 + b'bar\n'
924 938
925 939 with self._maketransactionfn() as tr:
926 940 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
927 941 node1 = b'\xaa' * 20
928 942
929 943 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
930 944 rawtext=fulltext1)
931 945
932 946 with self.assertRaises(error.StorageError):
933 947 f.read(node1)
934 948
935 949 with self.assertRaises(error.StorageError):
936 950 f.read(node1)
937 951
938 952 def testbadnodedelta(self):
939 953 f = self._makefilefn()
940 954
941 955 fulltext0 = b'foo\n' * 31
942 956 fulltext1 = fulltext0 + b'bar\n'
943 957 fulltext2 = fulltext1 + b'baz\n'
944 958
945 959 with self._maketransactionfn() as tr:
946 960 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
947 961 node1 = b'\xaa' * 20
948 962
949 963 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1,
950 964 rawtext=fulltext1)
951 965
952 966 with self.assertRaises(error.StorageError):
953 967 f.read(node1)
954 968
955 969 node2 = storageutil.hashrevisionsha1(fulltext2, node1, nullid)
956 970
957 971 with self._maketransactionfn() as tr:
958 972 delta = mdiff.textdiff(fulltext1, fulltext2)
959 973 self._addrawrevisionfn(f, tr, node2, node1, nullid,
960 974 2, delta=(1, delta))
961 975
962 976 self.assertEqual(len(f), 3)
963 977
964 978 # Assuming a delta is stored, we shouldn't need to validate node1 in
965 979 # order to retrieve node2.
966 980 self.assertEqual(f.read(node2), fulltext2)
967 981
968 982 def testcensored(self):
969 983 f = self._makefilefn()
970 984
971 985 stored1 = storageutil.packmeta({
972 986 b'censored': b'tombstone',
973 987 }, b'')
974 988
975 989 with self._maketransactionfn() as tr:
976 990 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
977 991
978 992 # The node value doesn't matter since we can't verify it.
979 993 node1 = b'\xbb' * 20
980 994
981 995 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
982 996 censored=True)
983 997
984 998 self.assertTrue(f.iscensored(1))
985 999
986 1000 with self.assertRaises(error.CensoredNodeError):
987 1001 f.revision(1)
988 1002
989 1003 with self.assertRaises(error.CensoredNodeError):
990 1004 f.revision(1, raw=True)
991 1005
992 1006 with self.assertRaises(error.CensoredNodeError):
993 1007 f.read(1)
994 1008
995 1009 def testcensoredrawrevision(self):
996 1010 # Like above, except we do the revision(raw=True) request first to
997 1011 # isolate revision caching behavior.
998 1012
999 1013 f = self._makefilefn()
1000 1014
1001 1015 stored1 = storageutil.packmeta({
1002 1016 b'censored': b'tombstone',
1003 1017 }, b'')
1004 1018
1005 1019 with self._maketransactionfn() as tr:
1006 1020 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1007 1021
1008 1022 # The node value doesn't matter since we can't verify it.
1009 1023 node1 = b'\xbb' * 20
1010 1024
1011 1025 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
1012 1026 censored=True)
1013 1027
1014 1028 with self.assertRaises(error.CensoredNodeError):
1015 1029 f.revision(1, raw=True)
1016 1030
1017 1031 class ifilemutationtests(basetestcase):
1018 1032 """Generic tests for the ifilemutation interface.
1019 1033
1020 1034 All file storage backends that support writing should conform to this
1021 1035 interface.
1022 1036
1023 1037 Use ``makeifilemutationtests()`` to create an instance of this type.
1024 1038 """
1025 1039 def testaddnoop(self):
1026 1040 f = self._makefilefn()
1027 1041 with self._maketransactionfn() as tr:
1028 1042 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
1029 1043 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
1030 1044 # Varying by linkrev shouldn't impact hash.
1031 1045 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
1032 1046
1033 1047 self.assertEqual(node1, node0)
1034 1048 self.assertEqual(node2, node0)
1035 1049 self.assertEqual(len(f), 1)
1036 1050
1037 1051 def testaddrevisionbadnode(self):
1038 1052 f = self._makefilefn()
1039 1053 with self._maketransactionfn() as tr:
1040 1054 # Adding a revision with bad node value fails.
1041 1055 with self.assertRaises(error.StorageError):
1042 1056 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
1043 1057
1044 1058 def testaddrevisionunknownflag(self):
1045 1059 f = self._makefilefn()
1046 1060 with self._maketransactionfn() as tr:
1047 1061 for i in range(15, 0, -1):
1048 1062 if (1 << i) & ~repository.REVISION_FLAGS_KNOWN:
1049 1063 flags = 1 << i
1050 1064 break
1051 1065
1052 1066 with self.assertRaises(error.StorageError):
1053 1067 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
1054 1068
1055 1069 def testaddgroupsimple(self):
1056 1070 f = self._makefilefn()
1057 1071
1058 1072 callbackargs = []
1059 1073 def cb(*args, **kwargs):
1060 1074 callbackargs.append((args, kwargs))
1061 1075
1062 1076 def linkmapper(node):
1063 1077 return 0
1064 1078
1065 1079 with self._maketransactionfn() as tr:
1066 1080 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
1067 1081
1068 1082 self.assertEqual(nodes, [])
1069 1083 self.assertEqual(callbackargs, [])
1070 1084 self.assertEqual(len(f), 0)
1071 1085
1072 1086 fulltext0 = b'foo'
1073 1087 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
1074 1088
1075 1089 with self._maketransactionfn() as tr:
1076 1090 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
1077 1091
1078 1092 f = self._makefilefn()
1079 1093
1080 1094 deltas = [
1081 1095 (node0, nullid, nullid, nullid, nullid, delta0, 0),
1082 1096 ]
1083 1097
1084 1098 with self._maketransactionfn() as tr:
1085 1099 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
1086 1100
1087 1101 self.assertEqual(nodes, [
1088 1102 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
1089 1103 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
1090 1104
1091 1105 self.assertEqual(len(callbackargs), 1)
1092 1106 self.assertEqual(callbackargs[0][0][1], nodes[0])
1093 1107
1094 1108 self.assertEqual(list(f.revs()), [0])
1095 1109 self.assertEqual(f.rev(nodes[0]), 0)
1096 1110 self.assertEqual(f.node(0), nodes[0])
1097 1111
1098 1112 def testaddgroupmultiple(self):
1099 1113 f = self._makefilefn()
1100 1114
1101 1115 fulltexts = [
1102 1116 b'foo',
1103 1117 b'bar',
1104 1118 b'x' * 1024,
1105 1119 ]
1106 1120
1107 1121 nodes = []
1108 1122 with self._maketransactionfn() as tr:
1109 1123 for fulltext in fulltexts:
1110 1124 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1111 1125
1112 1126 f = self._makefilefn()
1113 1127 deltas = []
1114 1128 for i, fulltext in enumerate(fulltexts):
1115 1129 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1116 1130
1117 1131 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1118 1132
1119 1133 with self._maketransactionfn() as tr:
1120 1134 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1121 1135
1122 1136 self.assertEqual(len(f), len(deltas))
1123 1137 self.assertEqual(list(f.revs()), [0, 1, 2])
1124 1138 self.assertEqual(f.rev(nodes[0]), 0)
1125 1139 self.assertEqual(f.rev(nodes[1]), 1)
1126 1140 self.assertEqual(f.rev(nodes[2]), 2)
1127 1141 self.assertEqual(f.node(0), nodes[0])
1128 1142 self.assertEqual(f.node(1), nodes[1])
1129 1143 self.assertEqual(f.node(2), nodes[2])
1130 1144
1131 1145 def testdeltaagainstcensored(self):
1132 1146 # Attempt to apply a delta made against a censored revision.
1133 1147 f = self._makefilefn()
1134 1148
1135 1149 stored1 = storageutil.packmeta({
1136 1150 b'censored': b'tombstone',
1137 1151 }, b'')
1138 1152
1139 1153 with self._maketransactionfn() as tr:
1140 1154 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1141 1155
1142 1156 # The node value doesn't matter since we can't verify it.
1143 1157 node1 = b'\xbb' * 20
1144 1158
1145 1159 self._addrawrevisionfn(f, tr, node1, node0, nullid, 1, stored1,
1146 1160 censored=True)
1147 1161
1148 1162 delta = mdiff.textdiff(b'bar\n' * 30, (b'bar\n' * 30) + b'baz\n')
1149 1163 deltas = [(b'\xcc' * 20, node1, nullid, b'\x01' * 20, node1, delta, 0)]
1150 1164
1151 1165 with self._maketransactionfn() as tr:
1152 1166 with self.assertRaises(error.CensoredBaseError):
1153 1167 f.addgroup(deltas, lambda x: 0, tr)
1154 1168
1155 1169 def testcensorrevisionbasic(self):
1156 1170 f = self._makefilefn()
1157 1171
1158 1172 with self._maketransactionfn() as tr:
1159 1173 node0 = f.add(b'foo\n' * 30, None, tr, 0, nullid, nullid)
1160 1174 node1 = f.add(b'foo\n' * 31, None, tr, 1, node0, nullid)
1161 1175 node2 = f.add(b'foo\n' * 32, None, tr, 2, node1, nullid)
1162 1176
1163 1177 with self._maketransactionfn() as tr:
1164 1178 f.censorrevision(tr, node1)
1165 1179
1166 1180 self.assertEqual(len(f), 3)
1167 1181 self.assertEqual(list(f.revs()), [0, 1, 2])
1168 1182
1169 1183 self.assertEqual(f.read(node0), b'foo\n' * 30)
1170 1184 self.assertEqual(f.read(node2), b'foo\n' * 32)
1171 1185
1172 1186 with self.assertRaises(error.CensoredNodeError):
1173 1187 f.read(node1)
1174 1188
1175 1189 def testgetstrippointnoparents(self):
1176 1190 # N revisions where none have parents.
1177 1191 f = self._makefilefn()
1178 1192
1179 1193 with self._maketransactionfn() as tr:
1180 1194 for rev in range(10):
1181 1195 f.add(b'%d' % rev, None, tr, rev, nullid, nullid)
1182 1196
1183 1197 for rev in range(10):
1184 1198 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1185 1199
1186 1200 def testgetstrippointlinear(self):
1187 1201 # N revisions in a linear chain.
1188 1202 f = self._makefilefn()
1189 1203
1190 1204 with self._maketransactionfn() as tr:
1191 1205 p1 = nullid
1192 1206
1193 1207 for rev in range(10):
1194 1208 f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1195 1209
1196 1210 for rev in range(10):
1197 1211 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1198 1212
1199 1213 def testgetstrippointmultipleheads(self):
1200 1214 f = self._makefilefn()
1201 1215
1202 1216 with self._maketransactionfn() as tr:
1203 1217 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1204 1218 node1 = f.add(b'1', None, tr, 1, node0, nullid)
1205 1219 f.add(b'2', None, tr, 2, node1, nullid)
1206 1220 f.add(b'3', None, tr, 3, node0, nullid)
1207 1221 f.add(b'4', None, tr, 4, node0, nullid)
1208 1222
1209 1223 for rev in range(5):
1210 1224 self.assertEqual(f.getstrippoint(rev), (rev, set()))
1211 1225
1212 1226 def testgetstrippointearlierlinkrevs(self):
1213 1227 f = self._makefilefn()
1214 1228
1215 1229 with self._maketransactionfn() as tr:
1216 1230 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
1217 1231 f.add(b'1', None, tr, 10, node0, nullid)
1218 1232 f.add(b'2', None, tr, 5, node0, nullid)
1219 1233
1220 1234 self.assertEqual(f.getstrippoint(0), (0, set()))
1221 1235 self.assertEqual(f.getstrippoint(1), (1, set()))
1222 1236 self.assertEqual(f.getstrippoint(2), (1, set()))
1223 1237 self.assertEqual(f.getstrippoint(3), (1, set()))
1224 1238 self.assertEqual(f.getstrippoint(4), (1, set()))
1225 1239 self.assertEqual(f.getstrippoint(5), (1, set()))
1226 1240 self.assertEqual(f.getstrippoint(6), (1, {2}))
1227 1241 self.assertEqual(f.getstrippoint(7), (1, {2}))
1228 1242 self.assertEqual(f.getstrippoint(8), (1, {2}))
1229 1243 self.assertEqual(f.getstrippoint(9), (1, {2}))
1230 1244 self.assertEqual(f.getstrippoint(10), (1, {2}))
1231 1245 self.assertEqual(f.getstrippoint(11), (3, set()))
1232 1246
1233 1247 def teststripempty(self):
1234 1248 f = self._makefilefn()
1235 1249
1236 1250 with self._maketransactionfn() as tr:
1237 1251 f.strip(0, tr)
1238 1252
1239 1253 self.assertEqual(len(f), 0)
1240 1254
1241 1255 def teststripall(self):
1242 1256 f = self._makefilefn()
1243 1257
1244 1258 with self._maketransactionfn() as tr:
1245 1259 p1 = nullid
1246 1260 for rev in range(10):
1247 1261 p1 = f.add(b'%d' % rev, None, tr, rev, p1, nullid)
1248 1262
1249 1263 self.assertEqual(len(f), 10)
1250 1264
1251 1265 with self._maketransactionfn() as tr:
1252 1266 f.strip(0, tr)
1253 1267
1254 1268 self.assertEqual(len(f), 0)
1255 1269
1256 1270 def teststrippartial(self):
1257 1271 f = self._makefilefn()
1258 1272
1259 1273 with self._maketransactionfn() as tr:
1260 1274 f.add(b'0', None, tr, 0, nullid, nullid)
1261 1275 node1 = f.add(b'1', None, tr, 5, nullid, nullid)
1262 1276 node2 = f.add(b'2', None, tr, 10, nullid, nullid)
1263 1277
1264 1278 self.assertEqual(len(f), 3)
1265 1279
1266 1280 with self._maketransactionfn() as tr:
1267 1281 f.strip(11, tr)
1268 1282
1269 1283 self.assertEqual(len(f), 3)
1270 1284
1271 1285 with self._maketransactionfn() as tr:
1272 1286 f.strip(10, tr)
1273 1287
1274 1288 self.assertEqual(len(f), 2)
1275 1289
1276 1290 with self.assertRaises(error.LookupError):
1277 1291 f.rev(node2)
1278 1292
1279 1293 with self._maketransactionfn() as tr:
1280 1294 f.strip(6, tr)
1281 1295
1282 1296 self.assertEqual(len(f), 2)
1283 1297
1284 1298 with self._maketransactionfn() as tr:
1285 1299 f.strip(3, tr)
1286 1300
1287 1301 self.assertEqual(len(f), 1)
1288 1302
1289 1303 with self.assertRaises(error.LookupError):
1290 1304 f.rev(node1)
1291 1305
1292 1306 def makeifileindextests(makefilefn, maketransactionfn, addrawrevisionfn):
1293 1307 """Create a unittest.TestCase class suitable for testing file storage.
1294 1308
1295 1309 ``makefilefn`` is a callable which receives the test case as an
1296 1310 argument and returns an object implementing the ``ifilestorage`` interface.
1297 1311
1298 1312 ``maketransactionfn`` is a callable which receives the test case as an
1299 1313 argument and returns a transaction object.
1300 1314
1301 1315 ``addrawrevisionfn`` is a callable which receives arguments describing a
1302 1316 low-level revision to add. This callable allows the insertion of
1303 1317 potentially bad data into the store in order to facilitate testing.
1304 1318
1305 1319 Returns a type that is a ``unittest.TestCase`` that can be used for
1306 1320 testing the object implementing the file storage interface. Simply
1307 1321 assign the returned value to a module-level attribute and a test loader
1308 1322 should find and run it automatically.
1309 1323 """
1310 1324 d = {
1311 1325 r'_makefilefn': makefilefn,
1312 1326 r'_maketransactionfn': maketransactionfn,
1313 1327 r'_addrawrevisionfn': addrawrevisionfn,
1314 1328 }
1315 1329 return type(r'ifileindextests', (ifileindextests,), d)
1316 1330
1317 1331 def makeifiledatatests(makefilefn, maketransactionfn, addrawrevisionfn):
1318 1332 d = {
1319 1333 r'_makefilefn': makefilefn,
1320 1334 r'_maketransactionfn': maketransactionfn,
1321 1335 r'_addrawrevisionfn': addrawrevisionfn,
1322 1336 }
1323 1337 return type(r'ifiledatatests', (ifiledatatests,), d)
1324 1338
1325 1339 def makeifilemutationtests(makefilefn, maketransactionfn, addrawrevisionfn):
1326 1340 d = {
1327 1341 r'_makefilefn': makefilefn,
1328 1342 r'_maketransactionfn': maketransactionfn,
1329 1343 r'_addrawrevisionfn': addrawrevisionfn,
1330 1344 }
1331 1345 return type(r'ifilemutationtests', (ifilemutationtests,), d)
General Comments 0
You need to be logged in to leave comments. Login now